diff --git a/.github/workflows/grovedb.yml b/.github/workflows/grovedb.yml index af4242def..5ad86e747 100644 --- a/.github/workflows/grovedb.yml +++ b/.github/workflows/grovedb.yml @@ -19,14 +19,22 @@ jobs: access_token: ${{ github.token }} - uses: actions/checkout@v2 + with: + submodules: recursive - name: Setup Rust uses: actions-rs/toolchain@v1 with: toolchain: stable + target: wasm32-unknown-unknown - name: Enable Rust cache uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: "false" + + - name: Setup Trunk + uses: jetli/trunk-action@v0.5.0 - run: cargo test --workspace --all-features @@ -42,6 +50,8 @@ jobs: - name: Check out repo uses: actions/checkout@v2 + with: + submodules: recursive - name: Setup Rust uses: actions-rs/toolchain@v1 @@ -49,9 +59,15 @@ jobs: toolchain: stable default: true components: clippy + target: wasm32-unknown-unknown - name: Enable Rust cache uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: "false" + + - name: Setup Trunk + uses: jetli/trunk-action@v0.5.0 - uses: actions-rs/clippy-check@v1 with: @@ -78,6 +94,8 @@ jobs: - name: Enable Rust cache uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: "false" - run: exit `cargo +nightly fmt --check | wc -l` @@ -100,6 +118,8 @@ jobs: - name: Enable Rust cache uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: "false" - run: cargo check diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml deleted file mode 100644 index bac693cbf..000000000 --- a/.github/workflows/nodejs.yml +++ /dev/null @@ -1,64 +0,0 @@ -on: - workflow_dispatch: - pull_request: - branches: - - master - -name: Node.JS binding - -jobs: - test: - name: Tests - runs-on: ubuntu-latest - steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - - uses: actions/checkout@v2 - - - name: Setup Node.JS - uses: actions/setup-node@v2 - with: - node-version: '16' - - - name: Setup Rust toolchain - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - default: true - - - name: Install NPM deps - run: npm ci - - - name: Run tests - run: npm test - - linting: - name: Linting - runs-on: ubuntu-latest - steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - - uses: actions/checkout@v2 - - - name: Setup Node.JS - uses: actions/setup-node@v2 - with: - node-version: '16' - - - name: Setup Rust toolchain - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - default: true - - - name: Install NPM deps - run: npm ci - - - name: Run ES linter - run: npm run lint \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..2b7c58dda --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "grovedb/grovedbg"] + path = grovedb/grovedbg + url = https://github.com/dashpay/grovedbg diff --git a/Cargo.toml b/Cargo.toml index b0a389486..17e25e98a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,6 @@ [workspace] +resolver = "2" +exclude = ["grovedb/grovedbg"] members = [ "costs", "grovedb", @@ -7,4 +9,6 @@ members = [ "storage", "visualize", "path", + "grovedbg-types", + "grovedb-version" ] diff --git a/README.md b/README.md index 5094ed8b1..4d66f7f9c 100644 --- a/README.md +++ b/README.md @@ -232,6 +232,24 @@ From here we can build: ```cargo build``` +## grovedbg + +There is a work in progress implementation of a debugger layer for GroveDB. To use this library with +these capabilities enabled one needs to set a dependency with `grovedbg` feature. + +At build time this requires two environment dependencies: +1. `wasm32-unknown-unknown` Rust toolchain; +2. [trunk](https://trunkrs.dev/) utility. + +Then, to launch visualizer tool to observe the database structure inside of your browser on a port, +let's say 10000, the following snippet should do: + +```rust + let db = Arc::new(GroveDb::open("db").unwrap()); + db.start_visualzier(10000); +``` + +Just remember to use Arc because the HTTP server might outlast the GroveDB instance. ## Performance diff --git a/costs/Cargo.toml b/costs/Cargo.toml index 5d9784d9d..8178f8399 100644 --- a/costs/Cargo.toml +++ b/costs/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-costs" -version = "1.0.0-rc.1" +version = "1.0.0-rc.2" edition = "2021" license = "MIT" description = "Costs extension crate for GroveDB" @@ -10,6 +10,6 @@ repository = "https://github.com/dashpay/grovedb" [dependencies] -thiserror = "1.0.30" +thiserror = "1.0.59" intmap = "2.0.0" -integer-encoding = "3.0.3" +integer-encoding = "4.0.0" diff --git a/costs/src/lib.rs b/costs/src/lib.rs index 83d29f640..a68670280 100644 --- a/costs/src/lib.rs +++ b/costs/src/lib.rs @@ -93,6 +93,11 @@ pub struct OperationCost { } impl OperationCost { + /// Is Nothing + pub fn is_nothing(&self) -> bool { + self == &Self::default() + } + /// Helper function to build default `OperationCost` with different /// `seek_count`. pub fn with_seek_count(seek_count: u16) -> Self { diff --git a/costs/src/storage_cost/removal.rs b/costs/src/storage_cost/removal.rs index 6d36a57be..9fa7af991 100644 --- a/costs/src/storage_cost/removal.rs +++ b/costs/src/storage_cost/removal.rs @@ -49,9 +49,10 @@ pub const UNKNOWN_EPOCH: u64 = u64::MAX; pub type StorageRemovalPerEpochByIdentifier = BTreeMap>; /// Removal bytes -#[derive(Debug, PartialEq, Clone, Eq)] +#[derive(Debug, PartialEq, Clone, Eq, Default)] pub enum StorageRemovedBytes { /// No storage removal + #[default] NoStorageRemoval, /// Basic storage removal BasicStorageRemoval(u32), @@ -59,12 +60,6 @@ pub enum StorageRemovedBytes { SectionedStorageRemoval(StorageRemovalPerEpochByIdentifier), } -impl Default for StorageRemovedBytes { - fn default() -> Self { - NoStorageRemoval - } -} - impl Add for StorageRemovedBytes { type Output = Self; diff --git a/grovedb-version/Cargo.toml b/grovedb-version/Cargo.toml new file mode 100644 index 000000000..06189c449 --- /dev/null +++ b/grovedb-version/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "grovedb-version" +authors = ["Samuel Westrich "] +description = "Versioning library for Platform" +version = "1.0.0-rc.2" +edition = "2021" +license = "MIT" + +[dependencies] +thiserror = { version = "1.0.59" } +versioned-feature-core = { git = "https://github.com/dashpay/versioned-feature-core", version = "1.0.0" } + +[features] +mock-versions = [] diff --git a/grovedb-version/src/error.rs b/grovedb-version/src/error.rs new file mode 100644 index 000000000..0d3d4c9a0 --- /dev/null +++ b/grovedb-version/src/error.rs @@ -0,0 +1,25 @@ +use thiserror::Error; +use versioned_feature_core::FeatureVersion; + +#[derive(Error, Debug)] +pub enum GroveVersionError { + /// Expected some specific versions + #[error("grove unknown version on {method}, received: {received}")] + UnknownVersionMismatch { + /// method + method: String, + /// the allowed versions for this method + known_versions: Vec, + /// requested core height + received: FeatureVersion, + }, + + /// Expected some specific versions + #[error("{method} not active for grove version")] + VersionNotActive { + /// method + method: String, + /// the allowed versions for this method + known_versions: Vec, + }, +} diff --git a/grovedb-version/src/lib.rs b/grovedb-version/src/lib.rs new file mode 100644 index 000000000..48b80a52e --- /dev/null +++ b/grovedb-version/src/lib.rs @@ -0,0 +1,106 @@ +use crate::version::GroveVersion; + +pub mod error; +pub mod version; + +#[macro_export] +macro_rules! check_grovedb_v0_with_cost { + ($method:expr, $version:expr) => {{ + const EXPECTED_VERSION: u16 = 0; + if $version != EXPECTED_VERSION { + return Err(GroveVersionError::UnknownVersionMismatch { + method: $method.to_string(), + known_versions: vec![EXPECTED_VERSION], + received: $version, + } + .into()) + .wrap_with_cost(OperationCost::default()); + } + }}; +} + +#[macro_export] +macro_rules! check_grovedb_v0 { + ($method:expr, $version:expr) => {{ + const EXPECTED_VERSION: u16 = 0; + if $version != EXPECTED_VERSION { + return Err(GroveVersionError::UnknownVersionMismatch { + method: $method.to_string(), + known_versions: vec![EXPECTED_VERSION], + received: $version, + } + .into()); + } + }}; +} + +#[macro_export] +macro_rules! check_merk_v0_with_cost { + ($method:expr, $version:expr) => {{ + const EXPECTED_VERSION: u16 = 0; + if $version != EXPECTED_VERSION { + return Err(GroveVersionError::UnknownVersionMismatch { + method: $method.to_string(), + known_versions: vec![EXPECTED_VERSION], + received: $version, + } + .into()) + .wrap_with_cost(OperationCost::default()); + } + }}; +} + +#[macro_export] +macro_rules! check_merk_v0 { + ($method:expr, $version:expr) => {{ + const EXPECTED_VERSION: u16 = 0; + if $version != EXPECTED_VERSION { + return Err(GroveVersionError::UnknownVersionMismatch { + method: $method.to_string(), + known_versions: vec![EXPECTED_VERSION], + received: $version, + } + .into()); + } + }}; +} + +pub trait TryFromVersioned: Sized { + /// The type returned in the event of a conversion error. + type Error; + + /// Performs the conversion. + fn try_from_versioned(value: T, grove_version: &GroveVersion) -> Result; +} + +pub trait TryIntoVersioned: Sized { + /// The type returned in the event of a conversion error. + type Error; + + /// Performs the conversion. + fn try_into_versioned(self, grove_version: &GroveVersion) -> Result; +} + +impl TryIntoVersioned for T +where + U: TryFromVersioned, +{ + type Error = U::Error; + + #[inline] + fn try_into_versioned(self, grove_version: &GroveVersion) -> Result { + U::try_from_versioned(self, grove_version) + } +} + +impl TryFromVersioned for T +where + T: TryFrom, +{ + type Error = T::Error; + + #[inline] + fn try_from_versioned(value: U, _grove_version: &GroveVersion) -> Result { + T::try_from(value) + } +} diff --git a/grovedb-version/src/version/grovedb_versions.rs b/grovedb-version/src/version/grovedb_versions.rs new file mode 100644 index 000000000..51bbdcc68 --- /dev/null +++ b/grovedb-version/src/version/grovedb_versions.rs @@ -0,0 +1,226 @@ +use versioned_feature_core::FeatureVersion; + +#[derive(Clone, Debug, Default)] +pub struct GroveDBVersions { + pub apply_batch: GroveDBApplyBatchVersions, + pub element: GroveDBElementMethodVersions, + pub operations: GroveDBOperationsVersions, + pub path_query_methods: GroveDBPathQueryMethodVersions, + pub replication: GroveDBReplicationVersions, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBPathQueryMethodVersions { + pub terminal_keys: FeatureVersion, + pub merge: FeatureVersion, + pub query_items_at_path: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBApplyBatchVersions { + pub apply_batch_structure: FeatureVersion, + pub apply_body: FeatureVersion, + pub continue_partial_apply_body: FeatureVersion, + pub apply_operations_without_batching: FeatureVersion, + pub apply_batch: FeatureVersion, + pub apply_partial_batch: FeatureVersion, + pub open_batch_transactional_merk_at_path: FeatureVersion, + pub open_batch_merk_at_path: FeatureVersion, + pub apply_batch_with_element_flags_update: FeatureVersion, + pub apply_partial_batch_with_element_flags_update: FeatureVersion, + pub estimated_case_operations_for_batch: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsVersions { + pub get: GroveDBOperationsGetVersions, + pub insert: GroveDBOperationsInsertVersions, + pub delete: GroveDBOperationsDeleteVersions, + pub delete_up_tree: GroveDBOperationsDeleteUpTreeVersions, + pub query: GroveDBOperationsQueryVersions, + pub proof: GroveDBOperationsProofVersions, + pub average_case: GroveDBOperationsAverageCaseVersions, + pub worst_case: GroveDBOperationsWorstCaseVersions, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsGetVersions { + pub get: FeatureVersion, + pub get_caching_optional: FeatureVersion, + pub follow_reference: FeatureVersion, + pub get_raw: FeatureVersion, + pub get_raw_caching_optional: FeatureVersion, + pub get_raw_optional: FeatureVersion, + pub get_raw_optional_caching_optional: FeatureVersion, + pub has_raw: FeatureVersion, + pub check_subtree_exists_invalid_path: FeatureVersion, + pub average_case_for_has_raw: FeatureVersion, + pub average_case_for_has_raw_tree: FeatureVersion, + pub average_case_for_get_raw: FeatureVersion, + pub average_case_for_get: FeatureVersion, + pub average_case_for_get_tree: FeatureVersion, + pub worst_case_for_has_raw: FeatureVersion, + pub worst_case_for_get_raw: FeatureVersion, + pub worst_case_for_get: FeatureVersion, + pub is_empty_tree: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsProofVersions { + pub prove_query: FeatureVersion, + pub prove_query_many: FeatureVersion, + pub verify_query_with_options: FeatureVersion, + pub verify_query_raw: FeatureVersion, + pub verify_layer_proof: FeatureVersion, + pub verify_query: FeatureVersion, + pub verify_subset_query: FeatureVersion, + pub verify_query_with_absence_proof: FeatureVersion, + pub verify_subset_query_with_absence_proof: FeatureVersion, + pub verify_query_with_chained_path_queries: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsQueryVersions { + pub query_encoded_many: FeatureVersion, + pub query_many_raw: FeatureVersion, + pub get_proved_path_query: FeatureVersion, + pub query: FeatureVersion, + pub query_item_value: FeatureVersion, + pub query_item_value_or_sum: FeatureVersion, + pub query_sums: FeatureVersion, + pub query_raw: FeatureVersion, + pub query_keys_optional: FeatureVersion, + pub query_raw_keys_optional: FeatureVersion, + pub follow_element: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsAverageCaseVersions { + pub add_average_case_get_merk_at_path: FeatureVersion, + pub average_case_merk_replace_tree: FeatureVersion, + pub average_case_merk_insert_tree: FeatureVersion, + pub average_case_merk_delete_tree: FeatureVersion, + pub average_case_merk_insert_element: FeatureVersion, + pub average_case_merk_replace_element: FeatureVersion, + pub average_case_merk_patch_element: FeatureVersion, + pub average_case_merk_delete_element: FeatureVersion, + pub add_average_case_has_raw_cost: FeatureVersion, + pub add_average_case_has_raw_tree_cost: FeatureVersion, + pub add_average_case_get_raw_cost: FeatureVersion, + pub add_average_case_get_raw_tree_cost: FeatureVersion, + pub add_average_case_get_cost: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsWorstCaseVersions { + pub add_worst_case_get_merk_at_path: FeatureVersion, + pub worst_case_merk_replace_tree: FeatureVersion, + pub worst_case_merk_insert_tree: FeatureVersion, + pub worst_case_merk_delete_tree: FeatureVersion, + pub worst_case_merk_insert_element: FeatureVersion, + pub worst_case_merk_replace_element: FeatureVersion, + pub worst_case_merk_patch_element: FeatureVersion, + pub worst_case_merk_delete_element: FeatureVersion, + pub add_worst_case_has_raw_cost: FeatureVersion, + pub add_worst_case_get_raw_tree_cost: FeatureVersion, + pub add_worst_case_get_raw_cost: FeatureVersion, + pub add_worst_case_get_cost: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsInsertVersions { + pub insert: FeatureVersion, + pub insert_on_transaction: FeatureVersion, + pub insert_without_transaction: FeatureVersion, + pub add_element_on_transaction: FeatureVersion, + pub add_element_without_transaction: FeatureVersion, + pub insert_if_not_exists: FeatureVersion, + pub insert_if_changed_value: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsDeleteVersions { + pub delete: FeatureVersion, + pub clear_subtree: FeatureVersion, + pub delete_with_sectional_storage_function: FeatureVersion, + pub delete_if_empty_tree: FeatureVersion, + pub delete_if_empty_tree_with_sectional_storage_function: FeatureVersion, + pub delete_operation_for_delete_internal: FeatureVersion, + pub delete_internal_on_transaction: FeatureVersion, + pub delete_internal_without_transaction: FeatureVersion, + pub average_case_delete_operation_for_delete: FeatureVersion, + pub worst_case_delete_operation_for_delete: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsDeleteUpTreeVersions { + pub delete_up_tree_while_empty: FeatureVersion, + pub delete_up_tree_while_empty_with_sectional_storage: FeatureVersion, + pub delete_operations_for_delete_up_tree_while_empty: FeatureVersion, + pub add_delete_operations_for_delete_up_tree_while_empty: FeatureVersion, + pub average_case_delete_operations_for_delete_up_tree_while_empty: FeatureVersion, + pub worst_case_delete_operations_for_delete_up_tree_while_empty: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsApplyBatchVersions { + pub apply_batch_structure: FeatureVersion, + pub apply_body: FeatureVersion, + pub continue_partial_apply_body: FeatureVersion, + pub apply_operations_without_batching: FeatureVersion, + pub apply_batch: FeatureVersion, + pub apply_partial_batch: FeatureVersion, + pub open_batch_transactional_merk_at_path: FeatureVersion, + pub open_batch_merk_at_path: FeatureVersion, + pub apply_batch_with_element_flags_update: FeatureVersion, + pub apply_partial_batch_with_element_flags_update: FeatureVersion, + pub estimated_case_operations_for_batch: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBElementMethodVersions { + pub delete: FeatureVersion, + pub delete_with_sectioned_removal_bytes: FeatureVersion, + pub delete_into_batch_operations: FeatureVersion, + pub element_at_key_already_exists: FeatureVersion, + pub get: FeatureVersion, + pub get_optional: FeatureVersion, + pub get_from_storage: FeatureVersion, + pub get_optional_from_storage: FeatureVersion, + pub get_with_absolute_refs: FeatureVersion, + pub get_value_hash: FeatureVersion, + pub get_specialized_cost: FeatureVersion, + pub value_defined_cost: FeatureVersion, + pub value_defined_cost_for_serialized_value: FeatureVersion, + pub specialized_costs_for_key_value: FeatureVersion, + pub required_item_space: FeatureVersion, + pub insert: FeatureVersion, + pub insert_into_batch_operations: FeatureVersion, + pub insert_if_not_exists: FeatureVersion, + pub insert_if_not_exists_into_batch_operations: FeatureVersion, + pub insert_if_changed_value: FeatureVersion, + pub insert_if_changed_value_into_batch_operations: FeatureVersion, + pub insert_reference: FeatureVersion, + pub insert_reference_into_batch_operations: FeatureVersion, + pub insert_subtree: FeatureVersion, + pub insert_subtree_into_batch_operations: FeatureVersion, + pub get_query: FeatureVersion, + pub get_query_values: FeatureVersion, + pub get_query_apply_function: FeatureVersion, + pub get_path_query: FeatureVersion, + pub get_sized_query: FeatureVersion, + pub path_query_push: FeatureVersion, + pub query_item: FeatureVersion, + pub basic_push: FeatureVersion, + pub serialize: FeatureVersion, + pub serialized_size: FeatureVersion, + pub deserialize: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBReplicationVersions { + pub get_subtrees_metadata: FeatureVersion, + pub fetch_chunk: FeatureVersion, + pub start_snapshot_syncing: FeatureVersion, + pub apply_chunk: FeatureVersion, +} diff --git a/grovedb-version/src/version/merk_versions.rs b/grovedb-version/src/version/merk_versions.rs new file mode 100644 index 000000000..fac25f913 --- /dev/null +++ b/grovedb-version/src/version/merk_versions.rs @@ -0,0 +1,2 @@ +#[derive(Clone, Debug, Default)] +pub struct MerkVersions {} diff --git a/grovedb-version/src/version/mod.rs b/grovedb-version/src/version/mod.rs new file mode 100644 index 000000000..06ac4e120 --- /dev/null +++ b/grovedb-version/src/version/mod.rs @@ -0,0 +1,26 @@ +pub mod grovedb_versions; +pub mod merk_versions; +pub mod v1; + +pub use versioned_feature_core::*; + +use crate::version::{ + grovedb_versions::GroveDBVersions, merk_versions::MerkVersions, v1::GROVE_V1, +}; + +#[derive(Clone, Debug, Default)] +pub struct GroveVersion { + pub protocol_version: u32, + pub grovedb_versions: GroveDBVersions, + pub merk_versions: MerkVersions, +} + +impl GroveVersion { + pub fn latest<'a>() -> &'a Self { + GROVE_VERSIONS + .last() + .expect("expected to have a platform version") + } +} + +pub const GROVE_VERSIONS: &[GroveVersion] = &[GROVE_V1]; diff --git a/grovedb-version/src/version/v1.rs b/grovedb-version/src/version/v1.rs new file mode 100644 index 000000000..19bf135e1 --- /dev/null +++ b/grovedb-version/src/version/v1.rs @@ -0,0 +1,187 @@ +use crate::version::{ + grovedb_versions::{ + GroveDBApplyBatchVersions, GroveDBElementMethodVersions, + GroveDBOperationsAverageCaseVersions, GroveDBOperationsDeleteUpTreeVersions, + GroveDBOperationsDeleteVersions, GroveDBOperationsGetVersions, + GroveDBOperationsInsertVersions, GroveDBOperationsProofVersions, + GroveDBOperationsQueryVersions, GroveDBOperationsVersions, + GroveDBOperationsWorstCaseVersions, GroveDBPathQueryMethodVersions, + GroveDBReplicationVersions, GroveDBVersions, + }, + merk_versions::MerkVersions, + GroveVersion, +}; + +pub const GROVE_V1: GroveVersion = GroveVersion { + protocol_version: 0, + grovedb_versions: GroveDBVersions { + apply_batch: GroveDBApplyBatchVersions { + apply_batch_structure: 0, + apply_body: 0, + continue_partial_apply_body: 0, + apply_operations_without_batching: 0, + apply_batch: 0, + apply_partial_batch: 0, + open_batch_transactional_merk_at_path: 0, + open_batch_merk_at_path: 0, + apply_batch_with_element_flags_update: 0, + apply_partial_batch_with_element_flags_update: 0, + estimated_case_operations_for_batch: 0, + }, + element: GroveDBElementMethodVersions { + delete: 0, + delete_with_sectioned_removal_bytes: 0, + delete_into_batch_operations: 0, + element_at_key_already_exists: 0, + get: 0, + get_optional: 0, + get_from_storage: 0, + get_optional_from_storage: 0, + get_with_absolute_refs: 0, + get_value_hash: 0, + get_specialized_cost: 0, + value_defined_cost: 0, + value_defined_cost_for_serialized_value: 0, + specialized_costs_for_key_value: 0, + required_item_space: 0, + insert: 0, + insert_into_batch_operations: 0, + insert_if_not_exists: 0, + insert_if_not_exists_into_batch_operations: 0, + insert_if_changed_value: 0, + insert_if_changed_value_into_batch_operations: 0, + insert_reference: 0, + insert_reference_into_batch_operations: 0, + insert_subtree: 0, + insert_subtree_into_batch_operations: 0, + get_query: 0, + get_query_values: 0, + get_query_apply_function: 0, + get_path_query: 0, + get_sized_query: 0, + path_query_push: 0, + query_item: 0, + basic_push: 0, + serialize: 0, + serialized_size: 0, + deserialize: 0, + }, + operations: GroveDBOperationsVersions { + get: GroveDBOperationsGetVersions { + get: 0, + get_caching_optional: 0, + follow_reference: 0, + get_raw: 0, + get_raw_caching_optional: 0, + get_raw_optional: 0, + get_raw_optional_caching_optional: 0, + has_raw: 0, + check_subtree_exists_invalid_path: 0, + average_case_for_has_raw: 0, + average_case_for_has_raw_tree: 0, + average_case_for_get_raw: 0, + average_case_for_get: 0, + average_case_for_get_tree: 0, + worst_case_for_has_raw: 0, + worst_case_for_get_raw: 0, + worst_case_for_get: 0, + is_empty_tree: 0, + }, + insert: GroveDBOperationsInsertVersions { + insert: 0, + insert_on_transaction: 0, + insert_without_transaction: 0, + add_element_on_transaction: 0, + add_element_without_transaction: 0, + insert_if_not_exists: 0, + insert_if_changed_value: 0, + }, + delete: GroveDBOperationsDeleteVersions { + delete: 0, + clear_subtree: 0, + delete_with_sectional_storage_function: 0, + delete_if_empty_tree: 0, + delete_if_empty_tree_with_sectional_storage_function: 0, + delete_operation_for_delete_internal: 0, + delete_internal_on_transaction: 0, + delete_internal_without_transaction: 0, + average_case_delete_operation_for_delete: 0, + worst_case_delete_operation_for_delete: 0, + }, + delete_up_tree: GroveDBOperationsDeleteUpTreeVersions { + delete_up_tree_while_empty: 0, + delete_up_tree_while_empty_with_sectional_storage: 0, + delete_operations_for_delete_up_tree_while_empty: 0, + add_delete_operations_for_delete_up_tree_while_empty: 0, + average_case_delete_operations_for_delete_up_tree_while_empty: 0, + worst_case_delete_operations_for_delete_up_tree_while_empty: 0, + }, + query: GroveDBOperationsQueryVersions { + query_encoded_many: 0, + query_many_raw: 0, + get_proved_path_query: 0, + query: 0, + query_item_value: 0, + query_item_value_or_sum: 0, + query_sums: 0, + query_raw: 0, + query_keys_optional: 0, + query_raw_keys_optional: 0, + follow_element: 0, + }, + proof: GroveDBOperationsProofVersions { + prove_query: 0, + prove_query_many: 0, + verify_query_with_options: 0, + verify_query_raw: 0, + verify_layer_proof: 0, + verify_query: 0, + verify_subset_query: 0, + verify_query_with_absence_proof: 0, + verify_subset_query_with_absence_proof: 0, + verify_query_with_chained_path_queries: 0, + }, + average_case: GroveDBOperationsAverageCaseVersions { + add_average_case_get_merk_at_path: 0, + average_case_merk_replace_tree: 0, + average_case_merk_insert_tree: 0, + average_case_merk_delete_tree: 0, + average_case_merk_insert_element: 0, + average_case_merk_replace_element: 0, + average_case_merk_patch_element: 0, + average_case_merk_delete_element: 0, + add_average_case_has_raw_cost: 0, + add_average_case_has_raw_tree_cost: 0, + add_average_case_get_raw_cost: 0, + add_average_case_get_raw_tree_cost: 0, + add_average_case_get_cost: 0, + }, + worst_case: GroveDBOperationsWorstCaseVersions { + add_worst_case_get_merk_at_path: 0, + worst_case_merk_replace_tree: 0, + worst_case_merk_insert_tree: 0, + worst_case_merk_delete_tree: 0, + worst_case_merk_insert_element: 0, + worst_case_merk_replace_element: 0, + worst_case_merk_patch_element: 0, + worst_case_merk_delete_element: 0, + add_worst_case_has_raw_cost: 0, + add_worst_case_get_raw_tree_cost: 0, + add_worst_case_get_raw_cost: 0, + add_worst_case_get_cost: 0, + }, + }, + path_query_methods: GroveDBPathQueryMethodVersions { + terminal_keys: 0, + merge: 0, + query_items_at_path: 0, + }, + replication: GroveDBReplicationVersions { + get_subtrees_metadata: 0, + fetch_chunk: 0, + start_snapshot_syncing: 0, + apply_chunk: 0, + }, + }, + merk_versions: MerkVersions {}, +}; diff --git a/grovedb/Cargo.toml b/grovedb/Cargo.toml index a03f48e2e..57a27479a 100644 --- a/grovedb/Cargo.toml +++ b/grovedb/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "grovedb" description = "Fully featured database using balanced hierarchical authenticated data structures" -version = "1.0.0-rc.1" +version = "1.0.0-rc.2" authors = ["Samuel Westrich ", "Wisdom Ogwu "] edition = "2021" license = "MIT" @@ -10,29 +10,36 @@ repository = "https://github.com/dashpay/grovedb" readme = "../README.md" documentation = "https://docs.rs/grovedb" - [dependencies] -grovedb-merk = { version = "1.0.0-rc.1", path = "../merk", optional = true, default-features = false } -thiserror = { version = "1.0.37", optional = true } -tempfile = { version = "3.3.0", optional = true } -bincode = { version = "1.3.3", optional = true } -serde = { version = "1.0.149", optional = true } -grovedb-storage = { version = "1.0.0-rc.1", path = "../storage", optional = true } -grovedb-visualize = { version = "1.0.0-rc.1", path = "../visualize", optional = true } -hex = { version = "0.4.3", optional = true } -itertools = { version = "0.10.5", optional = true } -integer-encoding = { version = "3.0.4", optional = true } -grovedb-costs = { version = "1.0.0-rc.1", path = "../costs", optional = true } +grovedb-merk = { version = "1.0.0-rc.2", path = "../merk", optional = true, default-features = false } +thiserror = { version = "1.0.59", optional = true } +tempfile = { version = "3.10.1", optional = true } +bincode = { version = "2.0.0-rc.3" } +grovedb-storage = { version = "1.0.0-rc.2", path = "../storage", optional = true } +grovedb-visualize = { version = "1.0.0-rc.2", path = "../visualize", optional = true } +hex = { version = "0.4.3"} +itertools = { version = "0.12.1", optional = true } +derive_more = { version = "0.99.18" } +integer-encoding = { version = "4.0.0", optional = true } +grovedb-costs = { version = "1.0.0-rc.2", path = "../costs", optional = true } nohash-hasher = { version = "0.2.0", optional = true } -indexmap = { version = "1.9.2", optional = true } +indexmap = { version = "2.2.6"} intmap = { version = "2.0.0", optional = true } -grovedb-path = { version = "1.0.0-rc.1", path = "../path" } +grovedb-path = { version = "1.0.0-rc.2", path = "../path" } +grovedbg-types = { path = "../grovedbg-types", optional = true } +tokio = { version = "1.37.0", features = ["rt-multi-thread", "net"], optional = true } +axum = { version = "0.7.5", features = ["macros"], optional = true } +tower-http = { version = "0.5.2", features = ["fs"], optional = true } +blake3 = "1.4.0" +bitvec = "1" +zip-extensions = { version ="0.6.2", optional = true } +grovedb-version = { version = "1.0.0-rc.2", path = "../grovedb-version" } [dev-dependencies] rand = "0.8.5" -criterion = "0.4.0" +criterion = "0.5.1" hex = "0.4.3" -pretty_assertions = "1.3.0" +pretty_assertions = "1.4.0" [[bench]] name = "insertion_benchmark" @@ -40,28 +47,39 @@ harness = false [features] default = ["full"] +proof_debug = ["grovedb-merk/proof_debug"] full = [ "grovedb-merk/full", "thiserror", "tempfile", - "bincode", - "serde/derive", "grovedb-storage/rocksdb_storage", - "grovedb-visualize", - "hex", + "visualize", "itertools", "integer-encoding", "grovedb-costs", "nohash-hasher", - "indexmap", "intmap" ] +visualize = [ + "grovedb-visualize", +] verify = [ "grovedb-merk/verify", "grovedb-costs", "thiserror", - "serde/derive", - "bincode", "integer-encoding", ] estimated_costs = ["full"] +grovedbg = [ + "grovedbg-types", + "tokio", + "full", + "grovedb-merk/grovedbg", + "axum", + "tower-http", + "zip-extensions", + "tempfile" +] + +[build-dependencies] +zip-extensions = "0.6.2" diff --git a/grovedb/benches/insertion_benchmark.rs b/grovedb/benches/insertion_benchmark.rs index b073508c6..051a32d11 100644 --- a/grovedb/benches/insertion_benchmark.rs +++ b/grovedb/benches/insertion_benchmark.rs @@ -50,9 +50,16 @@ pub fn insertion_benchmark_without_transaction(c: &mut Criterion) { let dir = TempDir::new().unwrap(); let db = GroveDb::open(dir.path()).unwrap(); let test_leaf: &[u8] = b"leaf1"; - db.insert(EMPTY_PATH, test_leaf, Element::empty_tree(), None, None) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + test_leaf, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .unwrap(); let keys = std::iter::repeat_with(|| rand::thread_rng().gen::<[u8; 32]>()).take(N_ITEMS); c.bench_function("scalars insertion without transaction", |b| { @@ -64,6 +71,7 @@ pub fn insertion_benchmark_without_transaction(c: &mut Criterion) { Element::new_item(k.to_vec()), None, None, + grove_version, ) .unwrap() .unwrap(); @@ -79,9 +87,16 @@ pub fn insertion_benchmark_with_transaction(c: &mut Criterion) { let dir = TempDir::new().unwrap(); let db = GroveDb::open(dir.path()).unwrap(); let test_leaf: &[u8] = b"leaf1"; - db.insert(EMPTY_PATH, test_leaf, Element::empty_tree(), None, None) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + test_leaf, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .unwrap(); let keys = std::iter::repeat_with(|| rand::thread_rng().gen::<[u8; 32]>()).take(N_ITEMS); c.bench_function("scalars insertion with transaction", |b| { @@ -94,6 +109,7 @@ pub fn insertion_benchmark_with_transaction(c: &mut Criterion) { Element::new_item(k.to_vec()), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -113,9 +129,16 @@ pub fn root_leaf_insertion_benchmark_without_transaction(c: &mut Criterion) { c.bench_function("root leaves insertion without transaction", |b| { b.iter(|| { for k in keys.clone() { - db.insert(EMPTY_PATH, &k, Element::empty_tree(), None, None) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + &k, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .unwrap(); } }) }); @@ -132,9 +155,16 @@ pub fn root_leaf_insertion_benchmark_with_transaction(c: &mut Criterion) { b.iter(|| { let tx = db.start_transaction(); for k in keys.clone() { - db.insert(EMPTY_PATH, &k, Element::empty_tree(), None, Some(&tx)) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + &k, + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .unwrap(); } db.commit_transaction(tx).unwrap().unwrap(); }) @@ -155,6 +185,7 @@ pub fn deeply_nested_insertion_benchmark_without_transaction(c: &mut Criterion) Element::empty_tree(), None, None, + grove_version, ) .unwrap() .unwrap(); @@ -172,6 +203,7 @@ pub fn deeply_nested_insertion_benchmark_without_transaction(c: &mut Criterion) Element::new_item(k.to_vec()), None, None, + grove_version, ) .unwrap() .unwrap(); @@ -194,6 +226,7 @@ pub fn deeply_nested_insertion_benchmark_with_transaction(c: &mut Criterion) { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .unwrap(); @@ -212,6 +245,7 @@ pub fn deeply_nested_insertion_benchmark_with_transaction(c: &mut Criterion) { Element::new_item(k.to_vec()), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); diff --git a/grovedb/build.rs b/grovedb/build.rs new file mode 100644 index 000000000..cbdc11ac2 --- /dev/null +++ b/grovedb/build.rs @@ -0,0 +1,37 @@ +#[cfg(feature = "grovedbg")] +fn main() { + use std::{ + env, + path::PathBuf, + process::{Command, ExitStatus, Output}, + }; + + let out_dir = PathBuf::from(&env::var_os("OUT_DIR").unwrap()); + + let Output { + status, + stdout, + stderr, + } = Command::new("trunk") + .arg("build") + .arg("--release") + .arg("--dist") + .arg(&out_dir) + .arg("grovedbg/index.html") + .output() + .expect("cannot start trunk process"); + + if !status.success() { + let stdout_msg = String::from_utf8_lossy(&stdout); + let stderr_msg = String::from_utf8_lossy(&stderr); + let bindgen_version = env::var_os("TRUNK_TOOLS_WASM_BINDGEN").unwrap_or_default(); + panic!("Error running `trunk build --release`\nbindgen version:{bindgen_version:?}\n{stdout_msg}\n{stderr_msg}"); + } + + let zip_file = out_dir.join("grovedbg.zip"); + zip_extensions::write::zip_create_from_directory(&zip_file, &out_dir) + .expect("can't create a grovedbg zip archive"); +} + +#[cfg(not(feature = "grovedbg"))] +fn main() {} diff --git a/grovedb/grovedbg b/grovedb/grovedbg new file mode 160000 index 000000000..954be7451 --- /dev/null +++ b/grovedb/grovedbg @@ -0,0 +1 @@ +Subproject commit 954be74510d3c3bb79a7e622e55af66aae5c6ad4 diff --git a/grovedb/src/batch/batch_structure.rs b/grovedb/src/batch/batch_structure.rs index f07aad174..eb00f1e8e 100644 --- a/grovedb/src/batch/batch_structure.rs +++ b/grovedb/src/batch/batch_structure.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Batch structure #[cfg(feature = "full")] diff --git a/grovedb/src/batch/estimated_costs/average_case_costs.rs b/grovedb/src/batch/estimated_costs/average_case_costs.rs index e6e58975c..2f50186bb 100644 --- a/grovedb/src/batch/estimated_costs/average_case_costs.rs +++ b/grovedb/src/batch/estimated_costs/average_case_costs.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Average case costs #[cfg(feature = "full")] @@ -46,6 +18,7 @@ use grovedb_merk::{ }; #[cfg(feature = "full")] use grovedb_storage::rocksdb_storage::RocksDbStorage; +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] use itertools::Itertools; @@ -69,6 +42,7 @@ impl Op { key: &KeyInfo, layer_element_estimates: &EstimatedLayerInformation, propagate: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { let in_tree_using_sums = layer_element_estimates.is_sum_tree; let propagate_if_input = || { @@ -84,6 +58,7 @@ impl Op { layer_element_estimates, sum.is_some(), propagate, + grove_version, ), Op::InsertTreeWithRootHash { flags, sum, .. } => { GroveDb::average_case_merk_insert_tree( @@ -92,6 +67,7 @@ impl Op { sum.is_some(), in_tree_using_sums, propagate_if_input(), + grove_version, ) } Op::Insert { element } => GroveDb::average_case_merk_insert_element( @@ -99,6 +75,7 @@ impl Op { element, in_tree_using_sums, propagate_if_input(), + grove_version, ), Op::RefreshReference { reference_path_type, @@ -114,12 +91,14 @@ impl Op { ), in_tree_using_sums, propagate_if_input(), + grove_version, ), Op::Replace { element } => GroveDb::average_case_merk_replace_element( key, element, in_tree_using_sums, propagate_if_input(), + grove_version, ), Op::Patch { element, @@ -130,21 +109,27 @@ impl Op { *change_in_bytes, in_tree_using_sums, propagate_if_input(), + grove_version, + ), + Op::Delete => GroveDb::average_case_merk_delete_element( + key, + layer_element_estimates, + propagate, + grove_version, ), - Op::Delete => { - GroveDb::average_case_merk_delete_element(key, layer_element_estimates, propagate) - } Op::DeleteTree => GroveDb::average_case_merk_delete_tree( key, false, layer_element_estimates, propagate, + grove_version, ), Op::DeleteSumTree => GroveDb::average_case_merk_delete_tree( key, true, layer_element_estimates, propagate, + grove_version, ), } } @@ -204,6 +189,7 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { _batch_apply_options: &BatchApplyOptions, _flags_update: &mut G, _split_removal_bytes: &mut SR, + grove_version: &GroveVersion, ) -> CostResult { let mut cost = OperationCost::default(); @@ -212,8 +198,8 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { self.paths.get(path).ok_or_else(|| { let paths = self .paths - .iter() - .map(|(k, _v)| k.0.iter().map(|k| hex::encode(k.as_slice())).join("/")) + .keys() + .map(|k| k.0.iter().map(|k| hex::encode(k.as_slice())).join("/")) .join(" | "); Error::PathNotFoundInCacheForEstimatedCosts(format!( "required path {} not found in paths {}", @@ -234,8 +220,8 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { self.paths.get(path).ok_or_else(|| { let paths = self .paths - .iter() - .map(|(k, _v)| k.0.iter().map(|k| hex::encode(k.as_slice())).join("/")) + .keys() + .map(|k| k.0.iter().map(|k| hex::encode(k.as_slice())).join("/")) .join(" | "); Error::PathNotFoundInCacheForEstimatedCosts(format!( "required path for estimated merk caching {} not found in paths {}", @@ -244,11 +230,15 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { )) }) ); - GroveDb::add_average_case_get_merk_at_path::( - &mut cost, - path, - layer_should_be_empty, - layer_info.is_sum_tree, + cost_return_on_error_no_add!( + &cost, + GroveDb::add_average_case_get_merk_at_path::( + &mut cost, + path, + layer_should_be_empty, + layer_info.is_sum_tree, + grove_version, + ) ); self.cached_merks .insert(path.clone(), layer_info.is_sum_tree); @@ -257,7 +247,7 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { for (key, op) in ops_at_path_by_key.into_iter() { cost_return_on_error!( &mut cost, - op.average_case_cost(&key, layer_element_estimates, false) + op.average_case_cost(&key, layer_element_estimates, false, grove_version) ); } @@ -268,20 +258,28 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { Ok(([0u8; 32], None, None)).wrap_with_cost(cost) } - fn update_base_merk_root_key(&mut self, _root_key: Option>) -> CostResult<(), Error> { + fn update_base_merk_root_key( + &mut self, + _root_key: Option>, + grove_version: &GroveVersion, + ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); cost.seek_count += 1; let base_path = KeyInfoPath(vec![]); if let Some(estimated_layer_info) = self.paths.get(&base_path) { // Then we have to get the tree - if self.cached_merks.get(&base_path).is_none() { - GroveDb::add_average_case_get_merk_at_path::( - &mut cost, - &base_path, - estimated_layer_info - .estimated_layer_count - .estimated_to_be_empty(), - estimated_layer_info.is_sum_tree, + if !self.cached_merks.contains_key(&base_path) { + cost_return_on_error_no_add!( + &cost, + GroveDb::add_average_case_get_merk_at_path::( + &mut cost, + &base_path, + estimated_layer_info + .estimated_layer_count + .estimated_to_be_empty(), + estimated_layer_info.is_sum_tree, + grove_version + ) ); self.cached_merks .insert(base_path, estimated_layer_info.is_sum_tree); @@ -306,6 +304,7 @@ mod tests { EstimatedLayerSizes::{AllItems, AllSubtrees}, EstimatedSumTrees::{NoSumTrees, SomeSumTrees}, }; + use grovedb_version::version::GroveVersion; use crate::{ batch::{ @@ -318,6 +317,7 @@ mod tests { #[test] fn test_batch_root_one_tree_insert_op_average_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -343,11 +343,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get average case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert!( average_case_cost.eq(&cost), "average cost not eq {:?} \n to cost {:?}", @@ -385,6 +386,7 @@ mod tests { #[test] fn test_batch_root_one_tree_with_flags_insert_op_average_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -418,11 +420,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get average case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert!( average_case_cost.worse_or_eq_than(&cost), "not worse {:?} \n than {:?}", @@ -450,6 +453,7 @@ mod tests { #[test] fn test_batch_root_one_item_insert_op_average_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -475,11 +479,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get average case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; // because we know the object we are inserting we can know the average // case cost if it doesn't already exist assert_eq!( @@ -510,12 +515,20 @@ mod tests { #[test] fn test_batch_root_one_tree_insert_op_under_element_average_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![], @@ -540,11 +553,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get average case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; // because we know the object we are inserting we can know the average // case cost if it doesn't already exist assert_eq!(cost.storage_cost, average_case_cost.storage_cost); @@ -576,7 +590,7 @@ mod tests { seek_count: 5, // todo: why is this 5 storage_cost: StorageCost { added_bytes: 115, - replaced_bytes: 106, + replaced_bytes: 75, removed_bytes: NoStorageRemoval, }, storage_loaded_bytes: 109, @@ -587,12 +601,20 @@ mod tests { #[test] fn test_batch_root_one_tree_insert_op_in_sub_tree_average_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![b"0".to_vec()], @@ -626,11 +648,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get average case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert!( average_case_cost.worse_or_eq_than(&cost), "not worse {:?} \n than {:?}", @@ -671,6 +694,7 @@ mod tests { #[test] fn test_batch_root_one_sum_item_replace_op_average_case_costs() { + let grove_version = GroveVersion::latest(); let ops = vec![GroveDbOp::replace_op( vec![vec![7]], hex::decode("46447a3b4c8939fd4cf8b610ba7da3d3f6b52b39ab2549bf91503b9b07814055") @@ -709,6 +733,7 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get average case costs"); @@ -723,7 +748,7 @@ mod tests { seek_count: 41, storage_cost: StorageCost { added_bytes: 0, - replaced_bytes: 5625, + replaced_bytes: 5594, removed_bytes: NoStorageRemoval, }, storage_loaded_bytes: 7669, @@ -734,12 +759,20 @@ mod tests { #[test] fn test_batch_average_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"keyb", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"keyb", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![], @@ -773,10 +806,11 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to estimate costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; // at the moment we just check the added bytes are the same assert_eq!( average_case_cost.storage_cost.added_bytes, diff --git a/grovedb/src/batch/estimated_costs/mod.rs b/grovedb/src/batch/estimated_costs/mod.rs index f0f505bc1..54fc109c1 100644 --- a/grovedb/src/batch/estimated_costs/mod.rs +++ b/grovedb/src/batch/estimated_costs/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Estimated costs #[cfg(feature = "full")] diff --git a/grovedb/src/batch/estimated_costs/worst_case_costs.rs b/grovedb/src/batch/estimated_costs/worst_case_costs.rs index 5bb59dfa9..1b1d42e74 100644 --- a/grovedb/src/batch/estimated_costs/worst_case_costs.rs +++ b/grovedb/src/batch/estimated_costs/worst_case_costs.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Worst case costs #[cfg(feature = "full")] @@ -45,6 +17,7 @@ use grovedb_merk::estimated_costs::worst_case_costs::{ use grovedb_merk::RootHashKeyAndSum; #[cfg(feature = "full")] use grovedb_storage::rocksdb_storage::RocksDbStorage; +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] use itertools::Itertools; @@ -66,6 +39,7 @@ impl Op { is_in_parent_sum_tree: bool, worst_case_layer_element_estimates: &WorstCaseLayerInformation, propagate: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { let propagate_if_input = || { if propagate { @@ -81,6 +55,7 @@ impl Op { is_in_parent_sum_tree, worst_case_layer_element_estimates, propagate, + grove_version, ), Op::InsertTreeWithRootHash { flags, sum, .. } => GroveDb::worst_case_merk_insert_tree( key, @@ -88,12 +63,14 @@ impl Op { sum.is_some(), is_in_parent_sum_tree, propagate_if_input(), + grove_version, ), Op::Insert { element } => GroveDb::worst_case_merk_insert_element( key, element, is_in_parent_sum_tree, propagate_if_input(), + grove_version, ), Op::RefreshReference { reference_path_type, @@ -109,12 +86,14 @@ impl Op { ), is_in_parent_sum_tree, propagate_if_input(), + grove_version, ), Op::Replace { element } => GroveDb::worst_case_merk_replace_element( key, element, is_in_parent_sum_tree, propagate_if_input(), + grove_version, ), Op::Patch { element, @@ -124,23 +103,27 @@ impl Op { element, is_in_parent_sum_tree, propagate_if_input(), + grove_version, ), Op::Delete => GroveDb::worst_case_merk_delete_element( key, worst_case_layer_element_estimates, propagate, + grove_version, ), Op::DeleteTree => GroveDb::worst_case_merk_delete_tree( key, false, worst_case_layer_element_estimates, propagate, + grove_version, ), Op::DeleteSumTree => GroveDb::worst_case_merk_delete_tree( key, true, worst_case_layer_element_estimates, propagate, + grove_version, ), } } @@ -200,6 +183,7 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { _batch_apply_options: &BatchApplyOptions, _flags_update: &mut G, _split_removal_bytes: &mut SR, + grove_version: &GroveVersion, ) -> CostResult { let mut cost = OperationCost::default(); @@ -214,15 +198,29 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { ); // Then we have to get the tree - if self.cached_merks.get(path).is_none() { - GroveDb::add_worst_case_get_merk_at_path::(&mut cost, path, false); + if !self.cached_merks.contains(path) { + cost_return_on_error_no_add!( + &cost, + GroveDb::add_worst_case_get_merk_at_path::( + &mut cost, + path, + false, + grove_version, + ) + ); self.cached_merks.insert(path.clone()); } for (key, op) in ops_at_path_by_key.into_iter() { cost_return_on_error!( &mut cost, - op.worst_case_cost(&key, false, worst_case_layer_element_estimates, false) + op.worst_case_cost( + &key, + false, + worst_case_layer_element_estimates, + false, + grove_version + ) ); } @@ -233,15 +231,25 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { Ok(([0u8; 32], None, None)).wrap_with_cost(cost) } - fn update_base_merk_root_key(&mut self, _root_key: Option>) -> CostResult<(), Error> { + fn update_base_merk_root_key( + &mut self, + _root_key: Option>, + grove_version: &GroveVersion, + ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); cost.seek_count += 1; let base_path = KeyInfoPath(vec![]); if let Some(_estimated_layer_info) = self.paths.get(&base_path) { // Then we have to get the tree - if self.cached_merks.get(&base_path).is_none() { - GroveDb::add_worst_case_get_merk_at_path::( - &mut cost, &base_path, false, + if !self.cached_merks.contains(&base_path) { + cost_return_on_error_no_add!( + &cost, + GroveDb::add_worst_case_get_merk_at_path::( + &mut cost, + &base_path, + false, + grove_version, + ) ); self.cached_merks.insert(base_path); } @@ -261,6 +269,7 @@ mod tests { }; #[rustfmt::skip] use grovedb_merk::estimated_costs::worst_case_costs::WorstCaseLayerInformation::MaxElementsNumber; + use grovedb_version::version::GroveVersion; use crate::{ batch::{ @@ -273,6 +282,7 @@ mod tests { #[test] fn test_batch_root_one_tree_insert_op_worst_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -291,11 +301,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get worst case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert!( worst_case_cost.worse_or_eq_than(&cost), "not worse {:?} \n than {:?}", @@ -326,6 +337,7 @@ mod tests { #[test] fn test_batch_root_one_tree_with_flags_insert_op_worst_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -344,11 +356,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get worst case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert!( worst_case_cost.worse_or_eq_than(&cost), "not worse {:?} \n than {:?}", @@ -379,6 +392,7 @@ mod tests { #[test] fn test_batch_root_one_item_insert_op_worst_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -397,11 +411,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get worst case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert!( worst_case_cost.worse_or_eq_than(&cost), "not worse {:?} \n than {:?}", @@ -432,12 +447,20 @@ mod tests { #[test] fn test_batch_root_one_tree_insert_op_under_element_worst_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![], @@ -454,11 +477,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get worst case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert!( worst_case_cost.worse_or_eq_than(&cost), "not worse {:?} \n than {:?}", @@ -489,12 +513,20 @@ mod tests { #[test] fn test_batch_root_one_tree_insert_op_in_sub_tree_worst_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![b"0".to_vec()], @@ -515,11 +547,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get worst case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert!( worst_case_cost.worse_or_eq_than(&cost), "not worse {:?} \n than {:?}", @@ -544,12 +577,20 @@ mod tests { #[test] fn test_batch_worst_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"keyb", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"keyb", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![], @@ -566,9 +607,10 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ); assert!(worst_case_cost_result.value.is_ok()); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; // at the moment we just check the added bytes are the same assert_eq!( worst_case_cost_result.cost.storage_cost.added_bytes, diff --git a/grovedb/src/batch/just_in_time_cost_tests.rs b/grovedb/src/batch/just_in_time_cost_tests.rs index 2321b467e..e1fddf5c7 100644 --- a/grovedb/src/batch/just_in_time_cost_tests.rs +++ b/grovedb/src/batch/just_in_time_cost_tests.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2023 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! This tests just in time costs //! Just in time costs modify the tree in the same batch @@ -33,6 +5,8 @@ mod tests { use std::option::Option::None; + use grovedb_version::version::GroveVersion; + use crate::{ batch::GroveDbOp, reference_path::ReferencePathType::UpstreamFromElementHeightReference, @@ -42,15 +16,30 @@ mod tests { #[test] fn test_partial_costs_with_no_new_operations_are_same_as_apply_batch() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"documents", Element::empty_tree(), None, None) - .cost_as_result() - .expect("expected to insert successfully"); - db.insert(EMPTY_PATH, b"balances", Element::empty_tree(), None, None) - .cost_as_result() - .expect("expected to insert successfully"); + db.insert( + EMPTY_PATH, + b"documents", + Element::empty_tree(), + None, + None, + grove_version, + ) + .cost_as_result() + .expect("expected to insert successfully"); + db.insert( + EMPTY_PATH, + b"balances", + Element::empty_tree(), + None, + None, + grove_version, + ) + .cost_as_result() + .expect("expected to insert successfully"); let ops = vec![ GroveDbOp::insert_op( vec![b"documents".to_vec()], @@ -73,27 +62,38 @@ mod tests { ]; let full_cost = db - .apply_batch(ops.clone(), None, Some(&tx)) + .apply_batch(ops.clone(), None, Some(&tx), grove_version) .cost_as_result() .expect("expected to apply batch"); let apply_root_hash = db - .root_hash(Some(&tx)) + .root_hash(Some(&tx), grove_version) .unwrap() .expect("expected to get root hash"); - db.get([b"documents".as_slice()].as_ref(), b"key2", Some(&tx)) - .unwrap() - .expect("cannot get element"); + db.get( + [b"documents".as_slice()].as_ref(), + b"key2", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); - db.get([b"documents".as_slice()].as_ref(), b"key3", Some(&tx)) - .unwrap() - .expect("cannot get element"); + db.get( + [b"documents".as_slice()].as_ref(), + b"key3", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); db.get( [b"documents".as_slice(), b"key3".as_slice()].as_ref(), b"key4", Some(&tx), + grove_version, ) .unwrap() .expect("cannot get element"); @@ -101,27 +101,44 @@ mod tests { tx.rollback().expect("expected to rollback"); let cost = db - .apply_partial_batch(ops, None, |_cost, _left_over_ops| Ok(vec![]), Some(&tx)) + .apply_partial_batch( + ops, + None, + |_cost, _left_over_ops| Ok(vec![]), + Some(&tx), + grove_version, + ) .cost_as_result() .expect("expected to apply batch"); let apply_partial_root_hash = db - .root_hash(Some(&tx)) + .root_hash(Some(&tx), grove_version) .unwrap() .expect("expected to get root hash"); - db.get([b"documents".as_slice()].as_ref(), b"key2", Some(&tx)) - .unwrap() - .expect("cannot get element"); + db.get( + [b"documents".as_slice()].as_ref(), + b"key2", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); - db.get([b"documents".as_slice()].as_ref(), b"key3", Some(&tx)) - .unwrap() - .expect("cannot get element"); + db.get( + [b"documents".as_slice()].as_ref(), + b"key3", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); db.get( [b"documents".as_slice(), b"key3".as_slice()].as_ref(), b"key4", Some(&tx), + grove_version, ) .unwrap() .expect("cannot get element"); @@ -133,18 +150,27 @@ mod tests { #[test] fn test_partial_costs_with_add_balance_operations() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"documents", Element::empty_tree(), None, None) - .cost_as_result() - .expect("expected to insert successfully"); + db.insert( + EMPTY_PATH, + b"documents", + Element::empty_tree(), + None, + None, + grove_version, + ) + .cost_as_result() + .expect("expected to insert successfully"); db.insert( EMPTY_PATH, b"balances", Element::empty_sum_tree(), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); @@ -170,27 +196,38 @@ mod tests { ]; let full_cost = db - .apply_batch(ops.clone(), None, Some(&tx)) + .apply_batch(ops.clone(), None, Some(&tx), grove_version) .cost_as_result() .expect("expected to apply batch"); let apply_root_hash = db - .root_hash(Some(&tx)) + .root_hash(Some(&tx), grove_version) .unwrap() .expect("expected to get root hash"); - db.get([b"documents".as_slice()].as_ref(), b"key2", Some(&tx)) - .unwrap() - .expect("cannot get element"); + db.get( + [b"documents".as_slice()].as_ref(), + b"key2", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); - db.get([b"documents".as_slice()].as_ref(), b"key3", Some(&tx)) - .unwrap() - .expect("cannot get element"); + db.get( + [b"documents".as_slice()].as_ref(), + b"key3", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); db.get( [b"documents".as_slice(), b"key3".as_slice()].as_ref(), b"key4", Some(&tx), + grove_version, ) .unwrap() .expect("cannot get element"); @@ -218,33 +255,50 @@ mod tests { Ok(new_ops) }, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to apply batch"); let apply_partial_root_hash = db - .root_hash(Some(&tx)) + .root_hash(Some(&tx), grove_version) .unwrap() .expect("expected to get root hash"); - db.get([b"documents".as_slice()].as_ref(), b"key2", Some(&tx)) - .unwrap() - .expect("cannot get element"); + db.get( + [b"documents".as_slice()].as_ref(), + b"key2", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); - db.get([b"documents".as_slice()].as_ref(), b"key3", Some(&tx)) - .unwrap() - .expect("cannot get element"); + db.get( + [b"documents".as_slice()].as_ref(), + b"key3", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); db.get( [b"documents".as_slice(), b"key3".as_slice()].as_ref(), b"key4", Some(&tx), + grove_version, ) .unwrap() .expect("cannot get element"); let balance = db - .get([b"balances".as_slice()].as_ref(), b"person", Some(&tx)) + .get( + [b"balances".as_slice()].as_ref(), + b"person", + Some(&tx), + grove_version, + ) .unwrap() .expect("cannot get element"); diff --git a/grovedb/src/batch/key_info.rs b/grovedb/src/batch/key_info.rs index 19bea9e20..e7dd25b5b 100644 --- a/grovedb/src/batch/key_info.rs +++ b/grovedb/src/batch/key_info.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Key info #[cfg(feature = "full")] @@ -100,20 +72,7 @@ impl PartialEq<&[u8]> for KeyInfo { #[cfg(feature = "full")] impl PartialOrd for KeyInfo { fn partial_cmp(&self, other: &Self) -> Option { - match self.as_slice().partial_cmp(other.as_slice()) { - None => None, - Some(ord) => match ord { - Ordering::Less => Some(Ordering::Less), - Ordering::Equal => { - let other_len = other.max_length(); - match self.max_length().partial_cmp(&other_len) { - None => Some(Ordering::Equal), - Some(ord) => Some(ord), - } - } - Ordering::Greater => Some(Ordering::Greater), - }, - } + Some(self.cmp(other)) } } diff --git a/grovedb/src/batch/mod.rs b/grovedb/src/batch/mod.rs index f61202091..7f9c119ef 100644 --- a/grovedb/src/batch/mod.rs +++ b/grovedb/src/batch/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Apply multiple GroveDB operations atomically. mod batch_structure; @@ -80,13 +52,16 @@ use grovedb_merk::{ value_hash, NULL_HASH, }, CryptoHash, Error as MerkError, Merk, MerkType, RootHashKeyAndSum, - TreeFeatureType::{BasicMerk, SummedMerk}, + TreeFeatureType::{BasicMerkNode, SummedMerkNode}, }; use grovedb_path::SubtreePath; use grovedb_storage::{ rocksdb_storage::{PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext}, Storage, StorageBatch, StorageContext, }; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; use grovedb_visualize::{Drawer, Visualize}; use integer_encoding::VarInt; use itertools::Itertools; @@ -166,21 +141,31 @@ pub enum Op { DeleteSumTree, } +impl Op { + fn to_u8(&self) -> u8 { + match self { + Op::DeleteTree => 0, + Op::DeleteSumTree => 1, + Op::Delete => 2, + Op::InsertTreeWithRootHash { .. } => 3, + Op::ReplaceTreeRootKey { .. } => 4, + Op::RefreshReference { .. } => 5, + Op::Replace { .. } => 6, + Op::Patch { .. } => 7, + Op::Insert { .. } => 8, + } + } +} + impl PartialOrd for Op { fn partial_cmp(&self, other: &Self) -> Option { - match (self, other) { - (Op::Delete, Op::Insert { .. }) => Some(Ordering::Less), - (Op::Delete, Op::Replace { .. }) => Some(Ordering::Less), - (Op::Insert { .. }, Op::Delete) => Some(Ordering::Greater), - (Op::Replace { .. }, Op::Delete) => Some(Ordering::Greater), - _ => Some(Ordering::Equal), - } + Some(self.cmp(other)) } } impl Ord for Op { fn cmp(&self, other: &Self) -> Ordering { - self.partial_cmp(other).expect("all ops have order") + self.to_u8().cmp(&other.to_u8()) } } @@ -370,33 +355,25 @@ impl fmt::Debug for GroveDbOp { self.key.visualize(key_drawer).unwrap(); let op_dbg = match &self.op { - Op::Insert { element } => match element { - Element::Item(..) => "Insert Item", - Element::Reference(..) => "Insert Ref", - Element::Tree(..) => "Insert Tree", - Element::SumTree(..) => "Insert Sum Tree", - Element::SumItem(..) => "Insert Sum Item", - }, - Op::Replace { element } => match element { - Element::Item(..) => "Replace Item", - Element::Reference(..) => "Replace Ref", - Element::Tree(..) => "Replace Tree", - Element::SumTree(..) => "Replace Sum Tree", - Element::SumItem(..) => "Replace Sum Item", - }, - Op::Patch { element, .. } => match element { - Element::Item(..) => "Patch Item", - Element::Reference(..) => "Patch Ref", - Element::Tree(..) => "Patch Tree", - Element::SumTree(..) => "Patch Sum Tree", - Element::SumItem(..) => "Patch Sum Item", - }, - Op::RefreshReference { .. } => "Refresh Reference", - Op::Delete => "Delete", - Op::DeleteTree => "Delete Tree", - Op::DeleteSumTree => "Delete Sum Tree", - Op::ReplaceTreeRootKey { .. } => "Replace Tree Hash and Root Key", - Op::InsertTreeWithRootHash { .. } => "Insert Tree Hash and Root Key", + Op::Insert { element } => format!("Insert {:?}", element), + Op::Replace { element } => format!("Replace {:?}", element), + Op::Patch { element, .. } => format!("Patch {:?}", element), + Op::RefreshReference { + reference_path_type, + max_reference_hop, + trust_refresh_reference, + .. + } => { + format!( + "Refresh Reference: path {:?}, max_hop {:?}, trust_reference {} ", + reference_path_type, max_reference_hop, trust_refresh_reference + ) + } + Op::Delete => "Delete".to_string(), + Op::DeleteTree => "Delete Tree".to_string(), + Op::DeleteSumTree => "Delete Sum Tree".to_string(), + Op::ReplaceTreeRootKey { .. } => "Replace Tree Hash and Root Key".to_string(), + Op::InsertTreeWithRootHash { .. } => "Insert Tree Hash and Root Key".to_string(), }; f.debug_struct("GroveDbOp") @@ -550,7 +527,7 @@ impl GroveDbOp { } /// Verify consistency of operations - pub fn verify_consistency_of_operations(ops: &Vec) -> GroveDbOpConsistencyResults { + pub fn verify_consistency_of_operations(ops: &[GroveDbOp]) -> GroveDbOpConsistencyResults { let ops_len = ops.len(); // operations should not have any duplicates let mut repeated_ops = vec![]; @@ -692,9 +669,14 @@ trait TreeCache { batch_apply_options: &BatchApplyOptions, flags_update: &mut G, split_removal_bytes: &mut SR, + grove_version: &GroveVersion, ) -> CostResult; - fn update_base_merk_root_key(&mut self, root_key: Option>) -> CostResult<(), Error>; + fn update_base_merk_root_key( + &mut self, + root_key: Option>, + grove_version: &GroveVersion, + ) -> CostResult<(), Error>; } impl<'db, S, F> TreeCacheMerkByPath @@ -749,6 +731,7 @@ where ops_by_qualified_paths: &'a BTreeMap>, Op>, recursions_allowed: u8, intermediate_reference_info: Option<&'a ReferencePathType>, + grove_version: &GroveVersion, ) -> CostResult { let mut cost = OperationCost::default(); let (key, reference_path) = qualified_path.split_last().unwrap(); // already checked @@ -766,8 +749,13 @@ where if recursions_allowed == 1 { let referenced_element_value_hash_opt = cost_return_on_error!( &mut cost, - merk.get_value_hash(key.as_ref(), true) - .map_err(|e| Error::CorruptedData(e.to_string())) + merk.get_value_hash( + key.as_ref(), + true, + Some(Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|e| Error::CorruptedData(e.to_string())) ); let referenced_element_value_hash = cost_return_on_error!( @@ -798,6 +786,7 @@ where path.as_slice(), ops_by_qualified_paths, recursions_allowed - 1, + grove_version, ) } else { // Here the element being referenced doesn't change in the same batch @@ -806,8 +795,13 @@ where // change in the batch. let referenced_element = cost_return_on_error!( &mut cost, - merk.get(key.as_ref(), true) - .map_err(|e| Error::CorruptedData(e.to_string())) + merk.get( + key.as_ref(), + true, + Some(Element::value_defined_cost_for_serialized_value), + grove_version + ) + .map_err(|e| Error::CorruptedData(e.to_string())) ); let referenced_element = cost_return_on_error_no_add!( @@ -828,14 +822,15 @@ where let element = cost_return_on_error_no_add!( &cost, - Element::deserialize(referenced_element.as_slice()).map_err(|_| { + Element::deserialize(referenced_element.as_slice(), grove_version).map_err(|_| { Error::CorruptedData(String::from("unable to deserialize element")) }) ); match element { Element::Item(..) | Element::SumItem(..) => { - let serialized = cost_return_on_error_no_add!(&cost, element.serialize()); + let serialized = + cost_return_on_error_no_add!(&cost, element.serialize(grove_version)); let val_hash = value_hash(&serialized).unwrap_add_cost(&mut cost); Ok(val_hash).wrap_with_cost(cost) } @@ -848,6 +843,7 @@ where path.as_slice(), ops_by_qualified_paths, recursions_allowed - 1, + grove_version, ) } Element::Tree(..) | Element::SumTree(..) => Err(Error::InvalidBatchOperation( @@ -873,6 +869,7 @@ where qualified_path: &[Vec], ops_by_qualified_paths: &'a BTreeMap>, Op>, recursions_allowed: u8, + grove_version: &GroveVersion, ) -> CostResult { let mut cost = OperationCost::default(); if recursions_allowed == 0 { @@ -890,8 +887,10 @@ where Op::Insert { element } | Op::Replace { element } | Op::Patch { element, .. } => { match element { Element::Item(..) | Element::SumItem(..) => { - let serialized = - cost_return_on_error_no_add!(&cost, element.serialize()); + let serialized = cost_return_on_error_no_add!( + &cost, + element.serialize(grove_version) + ); let val_hash = value_hash(&serialized).unwrap_add_cost(&mut cost); Ok(val_hash).wrap_with_cost(cost) } @@ -907,6 +906,7 @@ where path.as_slice(), ops_by_qualified_paths, recursions_allowed - 1, + grove_version, ) } Element::Tree(..) | Element::SumTree(..) => { @@ -933,6 +933,7 @@ where ops_by_qualified_paths, recursions_allowed, reference_info, + grove_version, ) } Op::Delete | Op::DeleteTree | Op::DeleteSumTree => { @@ -948,6 +949,7 @@ where ops_by_qualified_paths, recursions_allowed, None, + grove_version, ) } } @@ -979,7 +981,11 @@ where Ok(()).wrap_with_cost(cost) } - fn update_base_merk_root_key(&mut self, root_key: Option>) -> CostResult<(), Error> { + fn update_base_merk_root_key( + &mut self, + root_key: Option>, + _grove_version: &GroveVersion, + ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); let base_path = vec![]; let merk_wrapped = self @@ -990,7 +996,7 @@ where let mut merk = cost_return_on_error!(&mut cost, merk_wrapped); merk.set_base_root_key(root_key) .add_cost(cost) - .map_err(|_| Error::InternalError("unable to set base root key")) + .map_err(|_| Error::InternalError("unable to set base root key".to_string())) } fn execute_ops_on_path( @@ -1001,6 +1007,7 @@ where batch_apply_options: &BatchApplyOptions, flags_update: &mut G, split_removal_bytes: &mut SR, + grove_version: &GroveVersion, ) -> CostResult { let mut cost = OperationCost::default(); // todo: fix this @@ -1048,7 +1055,8 @@ where self.follow_reference_get_value_hash( path_reference.as_slice(), ops_by_qualified_paths, - element_max_reference_hop.unwrap_or(MAX_REFERENCE_HOPS as u8) + element_max_reference_hop.unwrap_or(MAX_REFERENCE_HOPS as u8), + grove_version, ) ); @@ -1058,7 +1066,8 @@ where key_info.get_key_clone(), referenced_element_value_hash, &mut batch_operations, - merk_feature_type + merk_feature_type, + grove_version, ) ); } @@ -1076,7 +1085,8 @@ where NULL_HASH, false, &mut batch_operations, - merk_feature_type + merk_feature_type, + grove_version, ) ); } @@ -1094,7 +1104,8 @@ where &mut merk, key_info.get_key(), &mut batch_operations, - merk_feature_type + merk_feature_type, + grove_version, ) ); if !inserted { @@ -1109,7 +1120,8 @@ where element.insert_into_batch_operations( key_info.get_key(), &mut batch_operations, - merk_feature_type + merk_feature_type, + grove_version, ) ); } @@ -1130,18 +1142,23 @@ where } else { let value = cost_return_on_error!( &mut cost, - merk.get(key_info.as_slice(), true) - .map(|result_value| result_value - .map_err(Error::MerkError) - .and_then(|maybe_value| maybe_value.ok_or( - Error::InvalidInput( - "trying to refresh a non existing reference", - ) - ))) + merk.get( + key_info.as_slice(), + true, + Some(Element::value_defined_cost_for_serialized_value), + grove_version + ) + .map( + |result_value| result_value.map_err(Error::MerkError).and_then( + |maybe_value| maybe_value.ok_or(Error::InvalidInput( + "trying to refresh a non existing reference", + )) + ) + ) ); cost_return_on_error_no_add!( &cost, - Element::deserialize(value.as_slice()).map_err(|_| { + Element::deserialize(value.as_slice(), grove_version).map_err(|_| { Error::CorruptedData(String::from("unable to deserialize element")) }) ) @@ -1150,13 +1167,14 @@ where let Element::Reference(path_reference, max_reference_hop, _) = &element else { return Err(Error::InvalidInput( "trying to refresh a an element that is not a reference", - )).wrap_with_cost(cost) + )) + .wrap_with_cost(cost); }; let merk_feature_type = if is_sum_tree { - SummedMerk(0) + SummedMerkNode(0) } else { - BasicMerk + BasicMerkNode }; let path_reference = cost_return_on_error!( @@ -1180,7 +1198,8 @@ where self.follow_reference_get_value_hash( path_reference.as_slice(), ops_by_qualified_paths, - max_reference_hop.unwrap_or(MAX_REFERENCE_HOPS as u8) + max_reference_hop.unwrap_or(MAX_REFERENCE_HOPS as u8), + grove_version ) ); @@ -1190,7 +1209,8 @@ where key_info.get_key_clone(), referenced_element_value_hash, &mut batch_operations, - merk_feature_type + merk_feature_type, + grove_version ) ); } @@ -1202,7 +1222,8 @@ where false, is_sum_tree, /* we are in a sum tree, this might or might not be a * sum item */ - &mut batch_operations + &mut batch_operations, + grove_version ) ); } @@ -1213,7 +1234,8 @@ where key_info.get_key(), true, false, - &mut batch_operations + &mut batch_operations, + grove_version ) ); } @@ -1224,7 +1246,8 @@ where key_info.get_key(), true, true, - &mut batch_operations + &mut batch_operations, + grove_version ) ); } @@ -1241,7 +1264,8 @@ where root_key, hash, sum, - &mut batch_operations + &mut batch_operations, + grove_version ) ); } @@ -1267,7 +1291,8 @@ where hash, false, &mut batch_operations, - merk_feature_type + merk_feature_type, + grove_version ) ); } @@ -1275,21 +1300,22 @@ where } cost_return_on_error!( &mut cost, - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch_operations, &[], Some(batch_apply_options.as_merk_options()), &|key, value| { - Element::specialized_costs_for_key_value(key, value, is_sum_tree) + Element::specialized_costs_for_key_value(key, value, is_sum_tree, grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, + Some(&Element::value_defined_cost_for_serialized_value), &mut |storage_costs, old_value, new_value| { // todo: change the flags without full deserialization - let old_element = Element::deserialize(old_value.as_slice()) + let old_element = Element::deserialize(old_value.as_slice(), grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string()))?; let maybe_old_flags = old_element.get_flags_owned(); - let mut new_element = Element::deserialize(new_value.as_slice()) + let mut new_element = Element::deserialize(new_value.as_slice(), grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string()))?; let maybe_new_flags = new_element.get_flags_mut(); match maybe_new_flags { @@ -1306,9 +1332,11 @@ where })?; if changed { let flags_len = new_flags.len() as u32; - new_value.clone_from(&new_element.serialize().map_err(|e| { - MerkError::ClientCorruptionError(e.to_string()) - })?); + new_value.clone_from( + &new_element.serialize(grove_version).map_err(|e| { + MerkError::ClientCorruptionError(e.to_string()) + })?, + ); // we need to give back the value defined cost in the case that the // new element is a tree match new_element { @@ -1341,7 +1369,7 @@ where } }, &mut |value, removed_key_bytes, removed_value_bytes| { - let mut element = Element::deserialize(value.as_slice()) + let mut element = Element::deserialize(value.as_slice(), grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string()))?; let maybe_flags = element.get_flags_mut(); match maybe_flags { @@ -1355,6 +1383,7 @@ where } } }, + grove_version, ) .map_err(|e| Error::CorruptedData(e.to_string())) ); @@ -1379,6 +1408,7 @@ impl GroveDb { fn apply_batch_structure, F, SR>( batch_structure: BatchStructure, batch_apply_options: Option, + grove_version: &GroveVersion, ) -> CostResult, Error> where F: FnMut(&StorageCost, Option, &mut ElementFlags) -> Result, @@ -1388,6 +1418,13 @@ impl GroveDb { u32, ) -> Result<(StorageRemovedBytes, StorageRemovedBytes), Error>, { + check_grovedb_v0_with_cost!( + "apply_batch_structure", + grove_version + .grovedb_versions + .apply_batch + .apply_batch_structure + ); let mut cost = OperationCost::default(); let BatchStructure { mut ops_by_level_paths, @@ -1417,6 +1454,7 @@ impl GroveDb { &batch_apply_options, &mut flags_update, &mut split_removal_bytes, + grove_version, ) ); if batch_apply_options.base_root_storage_is_free { @@ -1424,7 +1462,7 @@ impl GroveDb { let mut update_root_cost = cost_return_on_error_no_add!( &cost, merk_tree_cache - .update_base_merk_root_key(calculated_root_key) + .update_base_merk_root_key(calculated_root_key, grove_version) .cost_as_result() ); update_root_cost.storage_cost = StorageCost::default(); @@ -1432,7 +1470,8 @@ impl GroveDb { } else { cost_return_on_error!( &mut cost, - merk_tree_cache.update_base_merk_root_key(calculated_root_key) + merk_tree_cache + .update_base_merk_root_key(calculated_root_key, grove_version) ); } } else { @@ -1445,6 +1484,7 @@ impl GroveDb { &batch_apply_options, &mut flags_update, &mut split_removal_bytes, + grove_version, ) ); @@ -1567,9 +1607,7 @@ impl GroveDb { // we need to pause the batch execution return Ok(Some(ops_by_level_paths)).wrap_with_cost(cost); } - if current_level > 0 { - current_level -= 1; - } + current_level = current_level.saturating_sub(1); } Ok(None).wrap_with_cost(cost) } @@ -1595,7 +1633,12 @@ impl GroveDb { Error, >, get_merk_fn: impl FnMut(&[Vec], bool) -> CostResult, Error>, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "apply_body", + grove_version.grovedb_versions.apply_batch.apply_body + ); let mut cost = OperationCost::default(); let batch_structure = cost_return_on_error!( &mut cost, @@ -1609,7 +1652,8 @@ impl GroveDb { } ) ); - Self::apply_batch_structure(batch_structure, batch_apply_options).add_cost(cost) + Self::apply_batch_structure(batch_structure, batch_apply_options, grove_version) + .add_cost(cost) } /// Method to propagate updated subtree root hashes up to GroveDB root @@ -1634,7 +1678,15 @@ impl GroveDb { Error, >, get_merk_fn: impl FnMut(&[Vec], bool) -> CostResult, Error>, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "continue_partial_apply_body", + grove_version + .grovedb_versions + .apply_batch + .continue_partial_apply_body + ); let mut cost = OperationCost::default(); let batch_structure = cost_return_on_error!( &mut cost, @@ -1649,7 +1701,8 @@ impl GroveDb { } ) ); - Self::apply_batch_structure(batch_structure, batch_apply_options).add_cost(cost) + Self::apply_batch_structure(batch_structure, batch_apply_options, grove_version) + .add_cost(cost) } /// Applies operations on GroveDB without batching @@ -1658,7 +1711,15 @@ impl GroveDb { ops: Vec, options: Option, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "apply_operations_without_batching", + grove_version + .grovedb_versions + .apply_batch + .apply_operations_without_batching + ); let mut cost = OperationCost::default(); for op in ops.into_iter() { match op.op { @@ -1674,6 +1735,7 @@ impl GroveDb { element.to_owned(), options.clone().map(|o| o.as_insert_options()), transaction, + grove_version, ) ); } @@ -1686,7 +1748,8 @@ impl GroveDb { path_slices.as_slice(), op.key.as_slice(), options.clone().map(|o| o.as_delete_options()), - transaction + transaction, + grove_version ) ); } @@ -1702,7 +1765,12 @@ impl GroveDb { ops: Vec, batch_apply_options: Option, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "apply_batch", + grove_version.grovedb_versions.apply_batch.apply_batch + ); self.apply_batch_with_element_flags_update( ops, batch_apply_options, @@ -1714,6 +1782,7 @@ impl GroveDb { )) }, transaction, + grove_version, ) } @@ -1727,7 +1796,15 @@ impl GroveDb { &Option, ) -> Result, Error>, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "apply_partial_batch", + grove_version + .grovedb_versions + .apply_batch + .apply_partial_batch + ); self.apply_partial_batch_with_element_flags_update( ops, batch_apply_options, @@ -1740,6 +1817,7 @@ impl GroveDb { }, cost_based_add_on_operations, transaction, + grove_version, ) } @@ -1751,7 +1829,15 @@ impl GroveDb { path: SubtreePath, tx: &'db Transaction, new_merk: bool, + grove_version: &GroveVersion, ) -> CostResult>, Error> { + check_grovedb_v0_with_cost!( + "open_batch_transactional_merk_at_path", + grove_version + .grovedb_versions + .apply_batch + .open_batch_transactional_merk_at_path + ); let mut cost = OperationCost::default(); let storage = self .db @@ -1769,37 +1855,46 @@ impl GroveDb { .unwrap_add_cost(&mut cost); let element = cost_return_on_error!( &mut cost, - Element::get_from_storage(&parent_storage, parent_key).map_err(|_| { - Error::InvalidPath(format!( - "could not get key for parent of subtree for batch at path {}", - parent_path.to_vec().into_iter().map(hex::encode).join("/") - )) - }) + Element::get_from_storage(&parent_storage, parent_key, grove_version).map_err( + |_| { + Error::InvalidPath(format!( + "could not get key for parent of subtree for batch at path {}", + parent_path.to_vec().into_iter().map(hex::encode).join("/") + )) + } + ) ); let is_sum_tree = element.is_sum_tree(); if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { - Merk::open_layered_with_root_key(storage, root_key, is_sum_tree) - .map_err(|_| { - Error::CorruptedData( - "cannot open a subtree with given root key".to_owned(), - ) - }) - .add_cost(cost) + Merk::open_layered_with_root_key( + storage, + root_key, + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| { + Error::CorruptedData("cannot open a subtree with given root key".to_owned()) + }) + .add_cost(cost) } else { Err(Error::CorruptedPath( - "cannot open a subtree as parent exists but is not a tree", + "cannot open a subtree as parent exists but is not a tree".to_string(), )) .wrap_with_cost(OperationCost::default()) } } + } else if new_merk { + Ok(Merk::open_empty(storage, MerkType::BaseMerk, false)).wrap_with_cost(cost) } else { - if new_merk { - Ok(Merk::open_empty(storage, MerkType::BaseMerk, false)).wrap_with_cost(cost) - } else { - Merk::open_base(storage, false) - .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) - .add_cost(cost) - } + Merk::open_base( + storage, + false, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) + .add_cost(cost) } } @@ -1809,7 +1904,15 @@ impl GroveDb { storage_batch: &'a StorageBatch, path: SubtreePath, new_merk: bool, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "open_batch_merk_at_path", + grove_version + .grovedb_versions + .apply_batch + .open_batch_merk_at_path + ); let mut local_cost = OperationCost::default(); let storage = self .db @@ -1830,15 +1933,21 @@ impl GroveDb { .unwrap_add_cost(&mut local_cost); let element = cost_return_on_error!( &mut local_cost, - Element::get_from_storage(&parent_storage, last) + Element::get_from_storage(&parent_storage, last, grove_version) ); let is_sum_tree = element.is_sum_tree(); if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { - Merk::open_layered_with_root_key(storage, root_key, is_sum_tree) - .map_err(|_| { - Error::CorruptedData("cannot open a subtree with given root key".to_owned()) - }) - .add_cost(local_cost) + Merk::open_layered_with_root_key( + storage, + root_key, + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| { + Error::CorruptedData("cannot open a subtree with given root key".to_owned()) + }) + .add_cost(local_cost) } else { Err(Error::CorruptedData( "cannot open a subtree as parent exists but is not a tree".to_owned(), @@ -1846,9 +1955,14 @@ impl GroveDb { .wrap_with_cost(local_cost) } } else { - Merk::open_base(storage, false) - .map_err(|_| Error::CorruptedData("cannot open a subtree".to_owned())) - .add_cost(local_cost) + Merk::open_base( + storage, + false, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| Error::CorruptedData("cannot open a subtree".to_owned())) + .add_cost(local_cost) } } @@ -1871,7 +1985,15 @@ impl GroveDb { Error, >, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "apply_batch_with_element_flags_update", + grove_version + .grovedb_versions + .apply_batch + .apply_batch_with_element_flags_update + ); let mut cost = OperationCost::default(); if ops.is_empty() { @@ -1924,8 +2046,10 @@ impl GroveDb { path.into(), tx, new_merk, + grove_version, ) - } + }, + grove_version ) ); @@ -1945,8 +2069,14 @@ impl GroveDb { update_element_flags_function, split_removal_bytes_function, |path, new_merk| { - self.open_batch_merk_at_path(&storage_batch, path.into(), new_merk) - } + self.open_batch_merk_at_path( + &storage_batch, + path.into(), + new_merk, + grove_version, + ) + }, + grove_version ) ); @@ -1987,7 +2117,15 @@ impl GroveDb { &Option, ) -> Result, Error>, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "apply_partial_batch_with_element_flags_update", + grove_version + .grovedb_versions + .apply_batch + .apply_partial_batch_with_element_flags_update + ); let mut cost = OperationCost::default(); if ops.is_empty() { @@ -2044,8 +2182,10 @@ impl GroveDb { path.into(), tx, new_merk, + grove_version, ) - } + }, + grove_version ) ); // if we paused at the root height, the left over operations would be to replace @@ -2090,8 +2230,10 @@ impl GroveDb { path.into(), tx, new_merk, + grove_version, ) - } + }, + grove_version ) ); @@ -2121,8 +2263,14 @@ impl GroveDb { &mut update_element_flags_function, &mut split_removal_bytes_function, |path, new_merk| { - self.open_batch_merk_at_path(&storage_batch, path.into(), new_merk) - } + self.open_batch_merk_at_path( + &storage_batch, + path.into(), + new_merk, + grove_version, + ) + }, + grove_version ) ); @@ -2161,8 +2309,14 @@ impl GroveDb { update_element_flags_function, split_removal_bytes_function, |path, new_merk| { - self.open_batch_merk_at_path(&continue_storage_batch, path.into(), new_merk) - } + self.open_batch_merk_at_path( + &continue_storage_batch, + path.into(), + new_merk, + grove_version, + ) + }, + grove_version ) ); @@ -2207,7 +2361,15 @@ impl GroveDb { (StorageRemovedBytes, StorageRemovedBytes), Error, >, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "estimated_case_operations_for_batch", + grove_version + .grovedb_versions + .apply_batch + .estimated_case_operations_for_batch + ); let mut cost = OperationCost::default(); if ops.is_empty() { @@ -2229,7 +2391,11 @@ impl GroveDb { ); cost_return_on_error!( &mut cost, - Self::apply_batch_structure(batch_structure, batch_apply_options) + Self::apply_batch_structure( + batch_structure, + batch_apply_options, + grove_version + ) ); } @@ -2247,7 +2413,11 @@ impl GroveDb { ); cost_return_on_error!( &mut cost, - Self::apply_batch_structure(batch_structure, batch_apply_options) + Self::apply_batch_structure( + batch_structure, + batch_apply_options, + grove_version + ) ); } } @@ -2272,7 +2442,8 @@ mod tests { #[test] fn test_batch_validation_ok() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); let element2 = Element::new_item(b"ayy2".to_vec()); let ops = vec![ @@ -2303,32 +2474,47 @@ mod tests { element2.clone(), ), ]; - db.apply_batch(ops, None, None) + db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("cannot apply batch"); // visualize_stderr(&db); - db.get(EMPTY_PATH, b"key1", None) - .unwrap() - .expect("cannot get element"); - db.get([b"key1".as_ref()].as_ref(), b"key2", None) + db.get(EMPTY_PATH, b"key1", None, grove_version) .unwrap() .expect("cannot get element"); - db.get([b"key1".as_ref(), b"key2"].as_ref(), b"key3", None) - .unwrap() - .expect("cannot get element"); - db.get([b"key1".as_ref(), b"key2", b"key3"].as_ref(), b"key4", None) + db.get([b"key1".as_ref()].as_ref(), b"key2", None, grove_version) .unwrap() .expect("cannot get element"); + db.get( + [b"key1".as_ref(), b"key2"].as_ref(), + b"key3", + None, + grove_version, + ) + .unwrap() + .expect("cannot get element"); + db.get( + [b"key1".as_ref(), b"key2", b"key3"].as_ref(), + b"key4", + None, + grove_version, + ) + .unwrap() + .expect("cannot get element"); assert_eq!( - db.get([b"key1".as_ref(), b"key2", b"key3"].as_ref(), b"key4", None) - .unwrap() - .expect("cannot get element"), + db.get( + [b"key1".as_ref(), b"key2", b"key3"].as_ref(), + b"key4", + None, + grove_version + ) + .unwrap() + .expect("cannot get element"), element ); assert_eq!( - db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", None) + db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", None, grove_version) .unwrap() .expect("cannot get element"), element2 @@ -2337,7 +2523,8 @@ mod tests { #[test] fn test_batch_operation_consistency_checker() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); // No two operations should be the same let ops = vec![ @@ -2345,7 +2532,7 @@ mod tests { GroveDbOp::insert_op(vec![b"a".to_vec()], b"b".to_vec(), Element::empty_tree()), ]; assert!(matches!( - db.apply_batch(ops, None, None).unwrap(), + db.apply_batch(ops, None, None, grove_version).unwrap(), Err(Error::InvalidBatchOperation( "batch operations fail consistency checks" )) @@ -2361,7 +2548,7 @@ mod tests { GroveDbOp::insert_op(vec![b"a".to_vec()], b"b".to_vec(), Element::empty_tree()), ]; assert!(matches!( - db.apply_batch(ops, None, None).unwrap(), + db.apply_batch(ops, None, None, grove_version).unwrap(), Err(Error::InvalidBatchOperation( "batch operations fail consistency checks" )) @@ -2377,7 +2564,7 @@ mod tests { GroveDbOp::delete_op(vec![], TEST_LEAF.to_vec()), ]; assert!(matches!( - db.apply_batch(ops, None, None).unwrap(), + db.apply_batch(ops, None, None, grove_version).unwrap(), Err(Error::InvalidBatchOperation( "batch operations fail consistency checks" )) @@ -2396,8 +2583,8 @@ mod tests { Element::empty_tree(), ), ]; - assert!(matches!( - db.apply_batch( + assert!(db + .apply_batch( ops, Some(BatchApplyOptions { validate_insertion_does_not_override: false, @@ -2408,21 +2595,29 @@ mod tests { base_root_storage_is_free: true, batch_pause_height: None, }), - None + None, + grove_version ) - .unwrap(), - Ok(_) - )); + .unwrap() + .is_ok()); } #[test] fn test_batch_validation_ok_on_transaction() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"keyb", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"keyb", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let element = Element::new_item(b"ayy".to_vec()); let element2 = Element::new_item(b"ayy2".to_vec()); @@ -2454,32 +2649,43 @@ mod tests { element2.clone(), ), ]; - db.apply_batch(ops, None, Some(&tx)) + db.apply_batch(ops, None, Some(&tx), grove_version) .unwrap() .expect("cannot apply batch"); - db.get(EMPTY_PATH, b"keyb", None) + db.get(EMPTY_PATH, b"keyb", None, grove_version) .unwrap() .expect_err("we should not get an element"); - db.get(EMPTY_PATH, b"keyb", Some(&tx)) + db.get(EMPTY_PATH, b"keyb", Some(&tx), grove_version) .unwrap() .expect("we should get an element"); - db.get(EMPTY_PATH, b"key1", None) + db.get(EMPTY_PATH, b"key1", None, grove_version) .unwrap() .expect_err("we should not get an element"); - db.get(EMPTY_PATH, b"key1", Some(&tx)) - .unwrap() - .expect("cannot get element"); - db.get([b"key1".as_ref()].as_ref(), b"key2", Some(&tx)) - .unwrap() - .expect("cannot get element"); - db.get([b"key1".as_ref(), b"key2"].as_ref(), b"key3", Some(&tx)) + db.get(EMPTY_PATH, b"key1", Some(&tx), grove_version) .unwrap() .expect("cannot get element"); + db.get( + [b"key1".as_ref()].as_ref(), + b"key2", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); + db.get( + [b"key1".as_ref(), b"key2"].as_ref(), + b"key3", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); db.get( [b"key1".as_ref(), b"key2", b"key3"].as_ref(), b"key4", Some(&tx), + grove_version, ) .unwrap() .expect("cannot get element"); @@ -2488,22 +2694,29 @@ mod tests { db.get( [b"key1".as_ref(), b"key2", b"key3"].as_ref(), b"key4", - Some(&tx) + Some(&tx), + grove_version ) .unwrap() .expect("cannot get element"), element ); assert_eq!( - db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", Some(&tx)) - .unwrap() - .expect("cannot get element"), + db.get( + [TEST_LEAF, b"key1"].as_ref(), + b"key2", + Some(&tx), + grove_version + ) + .unwrap() + .expect("cannot get element"), element2 ); } #[test] fn test_batch_add_other_element_in_sub_tree() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); // let's start by inserting a tree structure @@ -2559,6 +2772,7 @@ mod tests { Ok((NoStorageRemoval, NoStorageRemoval)) }, Some(&tx), + grove_version, ) .unwrap() .expect("expected to do tree form insert"); @@ -2634,6 +2848,7 @@ mod tests { Ok((NoStorageRemoval, NoStorageRemoval)) }, Some(&tx), + grove_version, ) .unwrap() .expect("expected to do first insert"); @@ -2709,6 +2924,7 @@ mod tests { Ok((NoStorageRemoval, NoStorageRemoval)) }, Some(&tx), + grove_version, ) .unwrap() .expect("successful batch apply"); @@ -2853,37 +3069,40 @@ mod tests { #[ignore] #[test] fn test_batch_produces_same_result() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let tx = db.start_transaction(); let ops = grove_db_ops_for_contract_insert(); - db.apply_batch(ops, None, Some(&tx)) + db.apply_batch(ops, None, Some(&tx), grove_version) .unwrap() .expect("expected to apply batch"); - db.root_hash(None).unwrap().expect("cannot get root hash"); + db.root_hash(None, grove_version) + .unwrap() + .expect("cannot get root hash"); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let tx = db.start_transaction(); let ops = grove_db_ops_for_contract_insert(); - db.apply_batch(ops.clone(), None, Some(&tx)) + db.apply_batch(ops.clone(), None, Some(&tx), grove_version) .unwrap() .expect("expected to apply batch"); let batch_hash = db - .root_hash(Some(&tx)) + .root_hash(Some(&tx), grove_version) .unwrap() .expect("cannot get root hash"); db.rollback_transaction(&tx).expect("expected to rollback"); - db.apply_operations_without_batching(ops, None, Some(&tx)) + db.apply_operations_without_batching(ops, None, Some(&tx), grove_version) .unwrap() .expect("expected to apply batch"); let no_batch_hash = db - .root_hash(Some(&tx)) + .root_hash(Some(&tx), grove_version) .unwrap() .expect("cannot get root hash"); @@ -2893,44 +3112,47 @@ mod tests { #[ignore] #[test] fn test_batch_contract_with_document_produces_same_result() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let tx = db.start_transaction(); let ops = grove_db_ops_for_contract_insert(); - db.apply_batch(ops, None, Some(&tx)) + db.apply_batch(ops, None, Some(&tx), grove_version) .unwrap() .expect("expected to apply batch"); - db.root_hash(None).unwrap().expect("cannot get root hash"); + db.root_hash(None, grove_version) + .unwrap() + .expect("cannot get root hash"); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let tx = db.start_transaction(); let ops = grove_db_ops_for_contract_insert(); let document_ops = grove_db_ops_for_contract_document_insert(); - db.apply_batch(ops.clone(), None, Some(&tx)) + db.apply_batch(ops.clone(), None, Some(&tx), grove_version) .unwrap() .expect("expected to apply batch"); - db.apply_batch(document_ops.clone(), None, Some(&tx)) + db.apply_batch(document_ops.clone(), None, Some(&tx), grove_version) .unwrap() .expect("expected to apply batch"); let batch_hash = db - .root_hash(Some(&tx)) + .root_hash(Some(&tx), grove_version) .unwrap() .expect("cannot get root hash"); db.rollback_transaction(&tx).expect("expected to rollback"); - db.apply_operations_without_batching(ops, None, Some(&tx)) + db.apply_operations_without_batching(ops, None, Some(&tx), grove_version) .unwrap() .expect("expected to apply batch"); - db.apply_operations_without_batching(document_ops, None, Some(&tx)) + db.apply_operations_without_batching(document_ops, None, Some(&tx), grove_version) .unwrap() .expect("expected to apply batch"); let no_batch_hash = db - .root_hash(Some(&tx)) + .root_hash(Some(&tx), grove_version) .unwrap() .expect("cannot get root hash"); @@ -2939,7 +3161,8 @@ mod tests { #[test] fn test_batch_validation_broken_chain() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); let ops = vec![ GroveDbOp::insert_op(vec![], b"key1".to_vec(), Element::empty_tree()), @@ -2954,16 +3177,20 @@ mod tests { Element::empty_tree(), ), ]; - assert!(db.apply_batch(ops, None, None).unwrap().is_err()); assert!(db - .get([b"key1".as_ref()].as_ref(), b"key2", None) + .apply_batch(ops, None, None, grove_version) + .unwrap() + .is_err()); + assert!(db + .get([b"key1".as_ref()].as_ref(), b"key2", None, grove_version) .unwrap() .is_err()); } #[test] fn test_batch_validation_broken_chain_aborts_whole_batch() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); let ops = vec![ GroveDbOp::insert_op( @@ -2988,31 +3215,43 @@ mod tests { Element::empty_tree(), ), ]; - assert!(db.apply_batch(ops, None, None).unwrap().is_err()); assert!(db - .get([b"key1".as_ref()].as_ref(), b"key2", None) + .apply_batch(ops, None, None, grove_version) + .unwrap() + .is_err()); + assert!(db + .get([b"key1".as_ref()].as_ref(), b"key2", None, grove_version) .unwrap() .is_err()); assert!(db - .get([TEST_LEAF, b"key1"].as_ref(), b"key2", None) + .get([TEST_LEAF, b"key1"].as_ref(), b"key2", None, grove_version) .unwrap() .is_err(),); } #[test] fn test_batch_validation_deletion_brokes_chain() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); - db.insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, None) - .unwrap() - .expect("cannot insert a subtree"); + db.insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("cannot insert a subtree"); db.insert( [b"key1".as_ref()].as_ref(), b"key2", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("cannot insert a subtree"); @@ -3030,12 +3269,16 @@ mod tests { ), GroveDbOp::delete_op(vec![b"key1".to_vec()], b"key2".to_vec()), ]; - assert!(db.apply_batch(ops, None, None).unwrap().is_err()); + assert!(db + .apply_batch(ops, None, None, grove_version) + .unwrap() + .is_err()); } #[test] fn test_batch_validation_insertion_under_deleted_tree() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); let ops = vec![ GroveDbOp::insert_op(vec![], b"key1".to_vec(), Element::empty_tree()), @@ -3056,17 +3299,23 @@ mod tests { ), GroveDbOp::delete_op(vec![b"key1".to_vec()], b"key2".to_vec()), ]; - db.apply_batch(ops, None, None) + db.apply_batch(ops, None, None, grove_version) .unwrap() .expect_err("insertion of element under a deleted tree should not be allowed"); - db.get([b"key1".as_ref(), b"key2", b"key3"].as_ref(), b"key4", None) - .unwrap() - .expect_err("nothing should have been inserted"); + db.get( + [b"key1".as_ref(), b"key2", b"key3"].as_ref(), + b"key4", + None, + grove_version, + ) + .unwrap() + .expect_err("nothing should have been inserted"); } #[test] fn test_batch_validation_insert_into_existing_tree() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); db.insert( @@ -3075,6 +3324,7 @@ mod tests { element.clone(), None, None, + grove_version, ) .unwrap() .expect("cannot insert value"); @@ -3084,6 +3334,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("cannot insert value"); @@ -3094,7 +3345,10 @@ mod tests { b"key1".to_vec(), element.clone(), )]; - assert!(db.apply_batch(ops, None, None).unwrap().is_err()); + assert!(db + .apply_batch(ops, None, None, grove_version) + .unwrap() + .is_err()); // Insertion into a tree is correct let ops = vec![GroveDbOp::insert_op( @@ -3102,11 +3356,11 @@ mod tests { b"key1".to_vec(), element.clone(), )]; - db.apply_batch(ops, None, None) + db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("cannot apply batch"); assert_eq!( - db.get([TEST_LEAF, b"valid"].as_ref(), b"key1", None) + db.get([TEST_LEAF, b"valid"].as_ref(), b"key1", None, grove_version) .unwrap() .expect("cannot get element"), element @@ -3115,7 +3369,8 @@ mod tests { #[test] fn test_batch_validation_nested_subtree_overwrite() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); let element2 = Element::new_item(b"ayy2".to_vec()); db.insert( @@ -3124,6 +3379,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("cannot insert a subtree"); @@ -3133,6 +3389,7 @@ mod tests { element, None, None, + grove_version, ) .unwrap() .expect("cannot insert an item"); @@ -3158,7 +3415,8 @@ mod tests { base_root_storage_is_free: true, batch_pause_height: None, }), - None + None, + grove_version ) .unwrap() .is_err()); @@ -3172,7 +3430,10 @@ mod tests { Element::empty_tree(), ), ]; - assert!(db.apply_batch(ops, None, None).unwrap().is_err()); + assert!(db + .apply_batch(ops, None, None, grove_version) + .unwrap() + .is_err()); // TEST_LEAF will be deleted so you can not insert underneath it // We are testing with the batch apply option @@ -3197,7 +3458,8 @@ mod tests { base_root_storage_is_free: true, batch_pause_height: None, }), - None + None, + grove_version ) .unwrap() .is_err()); @@ -3205,7 +3467,8 @@ mod tests { #[test] fn test_batch_validation_root_leaf_removal() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let ops = vec![ GroveDbOp::insert_op( vec![], @@ -3230,7 +3493,8 @@ mod tests { base_root_storage_is_free: true, batch_pause_height: None, }), - None + None, + grove_version ) .unwrap() .is_err()); @@ -3238,7 +3502,8 @@ mod tests { #[test] fn test_merk_data_is_deleted() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); db.insert( @@ -3247,6 +3512,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("cannot insert a subtree"); @@ -3256,6 +3522,7 @@ mod tests { element.clone(), None, None, + grove_version, ) .unwrap() .expect("cannot insert an item"); @@ -3266,40 +3533,59 @@ mod tests { )]; assert_eq!( - db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", None) + db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", None, grove_version) .unwrap() .expect("cannot get item"), element ); - db.apply_batch(ops, None, None) + db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("cannot apply batch"); assert!(db - .get([TEST_LEAF, b"key1"].as_ref(), b"key2", None) + .get([TEST_LEAF, b"key1"].as_ref(), b"key2", None, grove_version) .unwrap() .is_err()); } #[test] fn test_multi_tree_insertion_deletion_with_propagation_no_tx() { - let db = make_test_grovedb(); - db.insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, None) - .unwrap() - .expect("cannot insert root leaf"); - db.insert(EMPTY_PATH, b"key2", Element::empty_tree(), None, None) - .unwrap() - .expect("cannot insert root leaf"); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + db.insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("cannot insert root leaf"); + db.insert( + EMPTY_PATH, + b"key2", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("cannot insert root leaf"); db.insert( [ANOTHER_TEST_LEAF].as_ref(), b"key1", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("cannot insert root leaf"); - let hash = db.root_hash(None).unwrap().expect("cannot get root hash"); + let hash = db + .root_hash(None, grove_version) + .unwrap() + .expect("cannot get root hash"); let element = Element::new_item(b"ayy".to_vec()); let element2 = Element::new_item(b"ayy2".to_vec()); @@ -3322,43 +3608,66 @@ mod tests { GroveDbOp::insert_op(vec![TEST_LEAF.to_vec()], b"key".to_vec(), element2.clone()), GroveDbOp::delete_op(vec![ANOTHER_TEST_LEAF.to_vec()], b"key1".to_vec()), ]; - db.apply_batch(ops, None, None) + db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("cannot apply batch"); assert!(db - .get([ANOTHER_TEST_LEAF].as_ref(), b"key1", None) + .get([ANOTHER_TEST_LEAF].as_ref(), b"key1", None, grove_version) .unwrap() .is_err()); assert_eq!( - db.get([b"key1".as_ref(), b"key2", b"key3"].as_ref(), b"key4", None) - .unwrap() - .expect("cannot get element"), + db.get( + [b"key1".as_ref(), b"key2", b"key3"].as_ref(), + b"key4", + None, + grove_version + ) + .unwrap() + .expect("cannot get element"), element ); assert_eq!( - db.get([TEST_LEAF].as_ref(), b"key", None) + db.get([TEST_LEAF].as_ref(), b"key", None, grove_version) .unwrap() .expect("cannot get element"), element2 ); assert_ne!( - db.root_hash(None).unwrap().expect("cannot get root hash"), + db.root_hash(None, grove_version) + .unwrap() + .expect("cannot get root hash"), hash ); // verify root leaves - assert!(db.get(EMPTY_PATH, TEST_LEAF, None).unwrap().is_ok()); - assert!(db.get(EMPTY_PATH, ANOTHER_TEST_LEAF, None).unwrap().is_ok()); - assert!(db.get(EMPTY_PATH, b"key1", None).unwrap().is_ok()); - assert!(db.get(EMPTY_PATH, b"key2", None).unwrap().is_ok()); - assert!(db.get(EMPTY_PATH, b"key3", None).unwrap().is_err()); + assert!(db + .get(EMPTY_PATH, TEST_LEAF, None, grove_version) + .unwrap() + .is_ok()); + assert!(db + .get(EMPTY_PATH, ANOTHER_TEST_LEAF, None, grove_version) + .unwrap() + .is_ok()); + assert!(db + .get(EMPTY_PATH, b"key1", None, grove_version) + .unwrap() + .is_ok()); + assert!(db + .get(EMPTY_PATH, b"key2", None, grove_version) + .unwrap() + .is_ok()); + assert!(db + .get(EMPTY_PATH, b"key3", None, grove_version) + .unwrap() + .is_err()); } #[test] fn test_nested_batch_insertion_corrupts_state() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let full_path = vec![ b"leaf1".to_vec(), b"sub1".to_vec(), @@ -3369,9 +3678,16 @@ mod tests { ]; let mut acc_path: Vec> = vec![]; for p in full_path.into_iter() { - db.insert(acc_path.as_slice(), &p, Element::empty_tree(), None, None) - .unwrap() - .expect("expected to insert"); + db.insert( + acc_path.as_slice(), + &p, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert"); acc_path.push(p); } @@ -3381,29 +3697,37 @@ mod tests { b"key".to_vec(), element.clone(), )]; - db.apply_batch(batch, None, None) + db.apply_batch(batch, None, None, grove_version) .unwrap() .expect("cannot apply batch"); let batch = vec![GroveDbOp::insert_op(acc_path, b"key".to_vec(), element)]; - db.apply_batch(batch, None, None) + db.apply_batch(batch, None, None, grove_version) .unwrap() .expect("cannot apply same batch twice"); } #[test] fn test_apply_sorted_pre_validated_batch_propagation() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let full_path = vec![b"leaf1".to_vec(), b"sub1".to_vec()]; let mut acc_path: Vec> = vec![]; for p in full_path.into_iter() { - db.insert(acc_path.as_slice(), &p, Element::empty_tree(), None, None) - .unwrap() - .expect("expected to insert"); + db.insert( + acc_path.as_slice(), + &p, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert"); acc_path.push(p); } - let root_hash = db.root_hash(None).unwrap().unwrap(); + let root_hash = db.root_hash(None, grove_version).unwrap().unwrap(); let element = Element::new_item(b"ayy".to_vec()); let batch = vec![GroveDbOp::insert_op( @@ -3411,17 +3735,21 @@ mod tests { b"key".to_vec(), element, )]; - db.apply_batch(batch, None, None) + db.apply_batch(batch, None, None, grove_version) .unwrap() .expect("cannot apply batch"); - assert_ne!(db.root_hash(None).unwrap().unwrap(), root_hash); + assert_ne!( + db.root_hash(None, grove_version).unwrap().unwrap(), + root_hash + ); } #[test] fn test_references() { + let grove_version = GroveVersion::latest(); // insert reference that points to non-existent item - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let batch = vec![GroveDbOp::insert_op( vec![TEST_LEAF.to_vec()], b"key1".to_vec(), @@ -3431,12 +3759,12 @@ mod tests { ])), )]; assert!(matches!( - db.apply_batch(batch, None, None).unwrap(), + db.apply_batch(batch, None, None, grove_version).unwrap(), Err(Error::MissingReference(String { .. })) )); // insert reference with item it points to in the same batch - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let elem = Element::new_item(b"ayy".to_vec()); let batch = vec![ GroveDbOp::insert_op( @@ -3453,9 +3781,12 @@ mod tests { elem.clone(), ), ]; - assert!(matches!(db.apply_batch(batch, None, None).unwrap(), Ok(_))); + assert!(db + .apply_batch(batch, None, None, grove_version) + .unwrap() + .is_ok()); assert_eq!( - db.get([TEST_LEAF].as_ref(), b"key1", None) + db.get([TEST_LEAF].as_ref(), b"key1", None, grove_version) .unwrap() .unwrap(), elem @@ -3466,15 +3797,15 @@ mod tests { reference_key_query.insert_key(b"key1".to_vec()); let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], reference_key_query); let proof = db - .prove_query(&path_query) + .prove_query(&path_query, None, grove_version) .unwrap() .expect("should generate proof"); - let verification_result = GroveDb::verify_query_raw(&proof, &path_query); - assert!(matches!(verification_result, Ok(_))); + let verification_result = GroveDb::verify_query_raw(&proof, &path_query, grove_version); + assert!(verification_result.is_ok()); // Hit reference limit when you specify max reference hop, lower than actual hop // count - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let elem = Element::new_item(b"ayy".to_vec()); let batch = vec![ GroveDbOp::insert_op( @@ -3502,7 +3833,7 @@ mod tests { GroveDbOp::insert_op(vec![TEST_LEAF.to_vec()], b"invalid_path".to_vec(), elem), ]; assert!(matches!( - db.apply_batch(batch, None, None).unwrap(), + db.apply_batch(batch, None, None, grove_version).unwrap(), Err(Error::ReferenceLimit) )); } diff --git a/grovedb/src/batch/mode.rs b/grovedb/src/batch/mode.rs index 76f15b6dd..897d15f2c 100644 --- a/grovedb/src/batch/mode.rs +++ b/grovedb/src/batch/mode.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Batch running mode #[cfg(feature = "estimated_costs")] diff --git a/grovedb/src/batch/multi_insert_cost_tests.rs b/grovedb/src/batch/multi_insert_cost_tests.rs index 501cc50a5..ad171d6d8 100644 --- a/grovedb/src/batch/multi_insert_cost_tests.rs +++ b/grovedb/src/batch/multi_insert_cost_tests.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Multi insert cost tests #[cfg(feature = "full")] @@ -36,6 +8,7 @@ mod tests { storage_cost::{removal::StorageRemovedBytes::NoStorageRemoval, StorageCost}, OperationCost, }; + use grovedb_version::version::GroveVersion; use crate::{ batch::GroveDbOp, @@ -46,14 +19,29 @@ mod tests { #[test] fn test_batch_two_insert_empty_tree_same_level_added_bytes_match_non_batch() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); let non_batch_cost_1 = db - .insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, Some(&tx)) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) .cost; let non_batch_cost_2 = db - .insert(EMPTY_PATH, b"key2", Element::empty_tree(), None, Some(&tx)) + .insert( + EMPTY_PATH, + b"key2", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) .cost; let non_batch_cost = non_batch_cost_1.add(non_batch_cost_2); tx.rollback().expect("expected to rollback"); @@ -61,7 +49,7 @@ mod tests { GroveDbOp::insert_op(vec![], b"key1".to_vec(), Element::empty_tree()), GroveDbOp::insert_op(vec![], b"key2".to_vec(), Element::empty_tree()), ]; - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert_eq!( non_batch_cost.storage_cost.added_bytes, cost.storage_cost.added_bytes @@ -72,11 +60,19 @@ mod tests { #[test] fn test_batch_three_inserts_elements_same_level_added_bytes_match_non_batch() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); let non_batch_cost_1 = db - .insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, Some(&tx)) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) .cost; let non_batch_cost_2 = db .insert( @@ -85,6 +81,7 @@ mod tests { Element::new_item_with_flags(b"pizza".to_vec(), Some([0, 1].to_vec())), None, Some(&tx), + grove_version, ) .cost; let non_batch_cost_3 = db @@ -94,6 +91,7 @@ mod tests { Element::new_reference(SiblingReference(b"key2".to_vec())), None, Some(&tx), + grove_version, ) .cost; let non_batch_cost = non_batch_cost_1.add(non_batch_cost_2).add(non_batch_cost_3); @@ -111,7 +109,7 @@ mod tests { Element::new_reference(SiblingReference(b"key2".to_vec())), ), ]; - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert_eq!( non_batch_cost.storage_cost.added_bytes, cost.storage_cost.added_bytes @@ -122,11 +120,19 @@ mod tests { #[test] fn test_batch_four_inserts_elements_multi_level_added_bytes_match_non_batch() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); let non_batch_cost_1 = db - .insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, Some(&tx)) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) .cost; let non_batch_cost_2 = db .insert( @@ -135,6 +141,7 @@ mod tests { Element::new_item_with_flags(b"pizza".to_vec(), Some([0, 1].to_vec())), None, Some(&tx), + grove_version, ) .cost; let non_batch_cost_3 = db @@ -144,6 +151,7 @@ mod tests { Element::empty_tree(), None, Some(&tx), + grove_version, ) .cost; let non_batch_cost_4 = db @@ -156,6 +164,7 @@ mod tests { )), None, Some(&tx), + grove_version, ) .cost; let non_batch_cost = non_batch_cost_1 @@ -185,7 +194,7 @@ mod tests { ), ]; let cost = db - .apply_batch(ops, None, Some(&tx)) + .apply_batch(ops, None, Some(&tx), grove_version) .cost_as_result() .expect("expected to apply batch"); assert_eq!( @@ -198,6 +207,7 @@ mod tests { #[test] fn test_batch_root_two_insert_tree_cost_same_level() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -205,7 +215,7 @@ mod tests { GroveDbOp::insert_op(vec![], b"key1".to_vec(), Element::empty_tree()), GroveDbOp::insert_op(vec![], b"key2".to_vec(), Element::empty_tree()), ]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 214 storage_written_bytes @@ -253,6 +263,7 @@ mod tests { #[test] fn test_batch_root_two_insert_tree_cost_different_level() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -264,7 +275,7 @@ mod tests { Element::empty_tree(), ), ]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 214 storage_written_bytes diff --git a/grovedb/src/batch/options.rs b/grovedb/src/batch/options.rs index b3916eb86..1f60aeb4c 100644 --- a/grovedb/src/batch/options.rs +++ b/grovedb/src/batch/options.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Options #[cfg(feature = "full")] @@ -42,7 +14,7 @@ pub struct BatchApplyOptions { pub validate_insertion_does_not_override: bool, /// Validate insertion does not override tree pub validate_insertion_does_not_override_tree: bool, - /// Allow deleting non empty trees + /// Allow deleting non-empty trees pub allow_deleting_non_empty_trees: bool, /// Deleting non empty trees returns error pub deleting_non_empty_trees_returns_error: bool, diff --git a/grovedb/src/batch/single_deletion_cost_tests.rs b/grovedb/src/batch/single_deletion_cost_tests.rs index 593ac04f4..fac9682f0 100644 --- a/grovedb/src/batch/single_deletion_cost_tests.rs +++ b/grovedb/src/batch/single_deletion_cost_tests.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Tests #[cfg(feature = "full")] @@ -35,6 +7,7 @@ mod tests { Identifier, StorageRemovalPerEpochByIdentifier, StorageRemovedBytes::SectionedStorageRemoval, }; + use grovedb_version::version::GroveVersion; use intmap::IntMap; use crate::{ @@ -45,17 +18,25 @@ mod tests { #[test] fn test_batch_one_deletion_tree_costs_match_non_batch_on_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db - .insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, None) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) .cost_as_result() .expect("expected to insert successfully"); let tx = db.start_transaction(); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, Some(&tx)) + .delete(EMPTY_PATH, b"key1", None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -93,7 +74,7 @@ mod tests { tx.rollback().expect("expected to rollback"); let ops = vec![GroveDbOp::delete_tree_op(vec![], b"key1".to_vec(), false)]; let batch_cost = db - .apply_batch(ops, None, Some(&tx)) + .apply_batch(ops, None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -101,6 +82,7 @@ mod tests { #[test] fn test_batch_one_deletion_item_costs_match_non_batch_on_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db @@ -110,6 +92,7 @@ mod tests { Element::new_item(b"cat".to_vec()), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); @@ -117,7 +100,7 @@ mod tests { let tx = db.start_transaction(); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, Some(&tx)) + .delete(EMPTY_PATH, b"key1", None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -156,7 +139,7 @@ mod tests { tx.rollback().expect("expected to rollback"); let ops = vec![GroveDbOp::delete_op(vec![], b"key1".to_vec())]; let batch_cost = db - .apply_batch(ops, None, Some(&tx)) + .apply_batch(ops, None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -164,15 +147,23 @@ mod tests { #[test] fn test_batch_one_deletion_tree_costs_match_non_batch_without_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db - .insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, None) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) .cost_as_result() .expect("expected to insert successfully"); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, None) + .delete(EMPTY_PATH, b"key1", None, None, grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -210,13 +201,20 @@ mod tests { let db = make_empty_grovedb(); let _insertion_cost = db - .insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, None) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) .cost_as_result() .expect("expected to insert successfully"); let ops = vec![GroveDbOp::delete_tree_op(vec![], b"key1".to_vec(), false)]; let batch_cost = db - .apply_batch(ops, None, None) + .apply_batch(ops, None, None, grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -224,6 +222,7 @@ mod tests { #[test] fn test_batch_one_deletion_item_costs_match_non_batch_without_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db @@ -233,12 +232,13 @@ mod tests { Element::new_item(b"cat".to_vec()), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, None) + .delete(EMPTY_PATH, b"key1", None, None, grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -283,13 +283,14 @@ mod tests { Element::new_item(b"cat".to_vec()), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); let ops = vec![GroveDbOp::delete_op(vec![], b"key1".to_vec())]; let batch_cost = db - .apply_batch(ops, None, None) + .apply_batch(ops, None, None, grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -297,6 +298,7 @@ mod tests { #[test] fn test_batch_one_deletion_tree_with_flags_costs_match_non_batch_on_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db @@ -306,6 +308,7 @@ mod tests { Element::empty_tree_with_flags(Some(b"dog".to_vec())), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); @@ -313,7 +316,7 @@ mod tests { let tx = db.start_transaction(); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, Some(&tx)) + .delete(EMPTY_PATH, b"key1", None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -356,7 +359,7 @@ mod tests { tx.rollback().expect("expected to rollback"); let ops = vec![GroveDbOp::delete_tree_op(vec![], b"key1".to_vec(), false)]; let batch_cost = db - .apply_batch(ops, None, Some(&tx)) + .apply_batch(ops, None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -365,6 +368,7 @@ mod tests { #[test] fn test_batch_one_deletion_tree_with_identity_cost_flags_costs_match_non_batch_on_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db @@ -374,6 +378,7 @@ mod tests { Element::empty_tree_with_flags(Some(vec![0, 0])), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); @@ -402,6 +407,7 @@ mod tests { let value_sectioned = SectionedStorageRemoval(removed_bytes); Ok((key_sectioned, value_sectioned)) }, + grove_version, ) .cost_as_result() .expect("expected to delete successfully"); @@ -469,6 +475,7 @@ mod tests { Ok((key_sectioned, value_sectioned)) }, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to delete successfully"); @@ -481,6 +488,7 @@ mod tests { #[test] fn test_batch_one_deletion_item_with_flags_costs_match_non_batch_on_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db @@ -490,6 +498,7 @@ mod tests { Element::new_item_with_flags(b"cat".to_vec(), Some(b"apple".to_vec())), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); @@ -497,7 +506,7 @@ mod tests { let tx = db.start_transaction(); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, Some(&tx)) + .delete(EMPTY_PATH, b"key1", None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -536,7 +545,7 @@ mod tests { tx.rollback().expect("expected to rollback"); let ops = vec![GroveDbOp::delete_op(vec![], b"key1".to_vec())]; let batch_cost = db - .apply_batch(ops, None, Some(&tx)) + .apply_batch(ops, None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -544,6 +553,7 @@ mod tests { #[test] fn test_batch_one_deletion_tree_with_flags_costs_match_non_batch_without_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db @@ -553,12 +563,13 @@ mod tests { Element::empty_tree_with_flags(Some(b"dog".to_vec())), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, None) + .delete(EMPTY_PATH, b"key1", None, None, grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -607,13 +618,14 @@ mod tests { Element::empty_tree_with_flags(Some(b"dog".to_vec())), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); let ops = vec![GroveDbOp::delete_tree_op(vec![], b"key1".to_vec(), false)]; let batch_cost = db - .apply_batch(ops, None, None) + .apply_batch(ops, None, None, grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -621,6 +633,7 @@ mod tests { #[test] fn test_batch_one_deletion_item_with_flags_costs_match_non_batch_without_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db @@ -630,12 +643,13 @@ mod tests { Element::new_item_with_flags(b"cat".to_vec(), Some(b"apple".to_vec())), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, None) + .delete(EMPTY_PATH, b"key1", None, None, grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -680,13 +694,14 @@ mod tests { Element::new_item_with_flags(b"cat".to_vec(), Some(b"apple".to_vec())), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); let ops = vec![GroveDbOp::delete_op(vec![], b"key1".to_vec())]; let batch_cost = db - .apply_batch(ops, None, None) + .apply_batch(ops, None, None, grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); diff --git a/grovedb/src/batch/single_insert_cost_tests.rs b/grovedb/src/batch/single_insert_cost_tests.rs index c7f791464..c025fb27d 100644 --- a/grovedb/src/batch/single_insert_cost_tests.rs +++ b/grovedb/src/batch/single_insert_cost_tests.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Tests #[cfg(feature = "full")] @@ -43,6 +15,7 @@ mod tests { }, OperationCost, }; + use grovedb_version::version::GroveVersion; use integer_encoding::VarInt; use intmap::IntMap; @@ -54,11 +27,19 @@ mod tests { #[test] fn test_batch_one_insert_costs_match_non_batch() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); let non_batch_cost = db - .insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, Some(&tx)) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) .cost; tx.rollback().expect("expected to rollback"); let ops = vec![GroveDbOp::insert_op( @@ -66,12 +47,13 @@ mod tests { b"key1".to_vec(), Element::empty_tree(), )]; - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert_eq!(non_batch_cost.storage_cost, cost.storage_cost); } #[test] fn test_batch_root_one_insert_tree_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -80,7 +62,7 @@ mod tests { b"key1".to_vec(), Element::empty_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 113 storage_written_bytes @@ -136,6 +118,7 @@ mod tests { #[test] fn test_batch_root_one_insert_item_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -144,7 +127,7 @@ mod tests { b"key1".to_vec(), Element::new_item(b"cat".to_vec()), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 214 storage_written_bytes @@ -199,6 +182,7 @@ mod tests { #[test] fn test_batch_root_one_insert_tree_under_parent_item_in_same_merk_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -209,6 +193,7 @@ mod tests { Element::new_item(b"cat".to_vec()), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("successful root tree leaf insert"); @@ -220,7 +205,7 @@ mod tests { b"key1".to_vec(), Element::empty_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 115 storage_written_bytes @@ -293,19 +278,27 @@ mod tests { #[test] fn test_batch_root_one_insert_tree_under_parent_tree_in_same_merk_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![], b"key1".to_vec(), Element::empty_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 113 storage_written_bytes @@ -335,9 +328,6 @@ mod tests { // Replaced bytes // 37 + 36 = 74 (key is not replaced) //needs update - // We instead are getting 106, because we are paying for (+ hash - key byte - // size) this means 31 extra bytes. - // In reality though we really are replacing 106 bytes. TBD what to do. // Hash node calls 8 // 1 to get tree hash @@ -359,7 +349,7 @@ mod tests { seek_count: 5, storage_cost: StorageCost { added_bytes: 115, - replaced_bytes: 106, // todo: this should actually be less + replaced_bytes: 75, removed_bytes: NoStorageRemoval, }, storage_loaded_bytes: 71, // todo: verify and explain @@ -370,19 +360,27 @@ mod tests { #[test] fn test_batch_root_one_insert_tree_under_parent_tree_in_different_merk_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![b"0".to_vec()], b"key1".to_vec(), Element::empty_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 113 storage_written_bytes @@ -451,6 +449,7 @@ mod tests { #[test] fn test_batch_root_one_insert_cost_right_below_value_required_cost_of_2() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -459,7 +458,7 @@ mod tests { b"key1".to_vec(), Element::new_item([0u8; 59].to_vec()), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 243 storage_written_bytes @@ -513,6 +512,7 @@ mod tests { #[test] fn test_batch_root_one_insert_cost_right_above_value_required_cost_of_2() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -521,7 +521,7 @@ mod tests { b"key1".to_vec(), Element::new_item([0u8; 60].to_vec()), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 243 storage_written_bytes @@ -575,11 +575,19 @@ mod tests { #[test] fn test_batch_root_one_update_item_bigger_cost_no_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -587,6 +595,7 @@ mod tests { Element::new_item_with_flags(b"value1".to_vec(), Some(vec![0])), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -607,6 +616,7 @@ mod tests { Ok((NoStorageRemoval, NoStorageRemoval)) }, Some(&tx), + grove_version, ) .cost; @@ -631,11 +641,19 @@ mod tests { #[test] fn test_batch_root_one_update_item_bigger_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -643,6 +661,7 @@ mod tests { Element::new_item_with_flags(b"value1".to_vec(), Some(vec![0, 0])), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -686,6 +705,7 @@ mod tests { )) }, Some(&tx), + grove_version, ) .cost; @@ -710,11 +730,19 @@ mod tests { #[test] fn test_batch_root_one_update_item_smaller_cost_no_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -722,6 +750,7 @@ mod tests { Element::new_item_with_flags(b"value1".to_vec(), Some(vec![0])), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -745,6 +774,7 @@ mod tests { )) }, Some(&tx), + grove_version, ) .cost; @@ -765,11 +795,19 @@ mod tests { #[test] fn test_batch_root_one_update_item_smaller_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -777,6 +815,7 @@ mod tests { Element::new_item_with_flags(b"value1".to_vec(), Some(vec![0, 0])), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -819,6 +858,7 @@ mod tests { Ok((NoStorageRemoval, SectionedStorageRemoval(removed_bytes))) }, Some(&tx), + grove_version, ) .cost; @@ -845,11 +885,19 @@ mod tests { #[test] fn test_batch_root_one_update_tree_bigger_flags_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -857,6 +905,7 @@ mod tests { Element::new_tree_with_flags(None, Some(vec![0, 0])), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -898,6 +947,7 @@ mod tests { Ok((NoStorageRemoval, BasicStorageRemoval(removed_value_bytes))) }, Some(&tx), + grove_version, ) .cost; diff --git a/grovedb/src/batch/single_sum_item_deletion_cost_tests.rs b/grovedb/src/batch/single_sum_item_deletion_cost_tests.rs index b049bf50d..bf5637d03 100644 --- a/grovedb/src/batch/single_sum_item_deletion_cost_tests.rs +++ b/grovedb/src/batch/single_sum_item_deletion_cost_tests.rs @@ -1,35 +1,8 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Tests #[cfg(feature = "full")] mod tests { + use grovedb_version::version::GroveVersion; use crate::{ batch::GroveDbOp, @@ -39,17 +12,25 @@ mod tests { #[test] fn test_batch_one_deletion_sum_tree_costs_match_non_batch_on_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db - .insert(EMPTY_PATH, b"key1", Element::empty_sum_tree(), None, None) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_sum_tree(), + None, + None, + grove_version, + ) .cost_as_result() .expect("expected to insert successfully"); let tx = db.start_transaction(); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, Some(&tx)) + .delete(EMPTY_PATH, b"key1", None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -64,7 +45,7 @@ mod tests { tx.rollback().expect("expected to rollback"); let ops = vec![GroveDbOp::delete_tree_op(vec![], b"key1".to_vec(), false)]; let batch_cost = db - .apply_batch(ops, None, Some(&tx)) + .apply_batch(ops, None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -72,6 +53,7 @@ mod tests { #[test] fn test_batch_one_deletion_sum_item_costs_match_non_batch_on_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); db.insert( @@ -80,6 +62,7 @@ mod tests { Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("expected to insert sum tree"); @@ -91,6 +74,7 @@ mod tests { Element::new_sum_item(15), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); @@ -98,7 +82,13 @@ mod tests { let tx = db.start_transaction(); let non_batch_cost = db - .delete([b"sum_tree".as_slice()].as_ref(), b"key1", None, Some(&tx)) + .delete( + [b"sum_tree".as_slice()].as_ref(), + b"key1", + None, + Some(&tx), + grove_version, + ) .cost_as_result() .expect("expected to delete successfully"); @@ -116,7 +106,7 @@ mod tests { b"key1".to_vec(), )]; let batch_cost = db - .apply_batch(ops, None, Some(&tx)) + .apply_batch(ops, None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -124,6 +114,7 @@ mod tests { #[test] fn test_batch_one_deletion_sum_tree_with_flags_costs_match_non_batch_on_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db @@ -133,6 +124,7 @@ mod tests { Element::empty_sum_tree_with_flags(Some(b"dog".to_vec())), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); @@ -140,7 +132,7 @@ mod tests { let tx = db.start_transaction(); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, Some(&tx)) + .delete(EMPTY_PATH, b"key1", None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -156,7 +148,7 @@ mod tests { tx.rollback().expect("expected to rollback"); let ops = vec![GroveDbOp::delete_tree_op(vec![], b"key1".to_vec(), false)]; let batch_cost = db - .apply_batch(ops, None, Some(&tx)) + .apply_batch(ops, None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); diff --git a/grovedb/src/batch/single_sum_item_insert_cost_tests.rs b/grovedb/src/batch/single_sum_item_insert_cost_tests.rs index 09ca5ee20..0ba3da44a 100644 --- a/grovedb/src/batch/single_sum_item_insert_cost_tests.rs +++ b/grovedb/src/batch/single_sum_item_insert_cost_tests.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Tests #[cfg(feature = "full")] @@ -34,6 +6,7 @@ mod tests { storage_cost::{removal::StorageRemovedBytes::NoStorageRemoval, StorageCost}, OperationCost, }; + use grovedb_version::version::GroveVersion; use crate::{ batch::GroveDbOp, @@ -43,6 +16,7 @@ mod tests { #[test] fn test_batch_one_sum_item_insert_costs_match_non_batch() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -52,6 +26,7 @@ mod tests { Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("expected to insert sum tree"); @@ -63,6 +38,7 @@ mod tests { Element::new_sum_item(150), None, Some(&tx), + grove_version, ) .cost; tx.rollback().expect("expected to rollback"); @@ -71,12 +47,13 @@ mod tests { b"key1".to_vec(), Element::new_sum_item(150), )]; - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert_eq!(non_batch_cost.storage_cost, cost.storage_cost); } #[test] fn test_batch_one_insert_sum_tree_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -85,7 +62,7 @@ mod tests { b"key1".to_vec(), Element::empty_sum_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 124 storage_written_bytes @@ -142,19 +119,27 @@ mod tests { #[test] fn test_batch_one_insert_sum_tree_under_parent_tree_in_same_merk_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![], b"key1".to_vec(), Element::empty_sum_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 124 storage_written_bytes @@ -209,7 +194,7 @@ mod tests { seek_count: 5, storage_cost: StorageCost { added_bytes: 124, - replaced_bytes: 106, // todo: this should actually be less + replaced_bytes: 75, removed_bytes: NoStorageRemoval, }, storage_loaded_bytes: 71, // todo: verify and explain @@ -220,19 +205,27 @@ mod tests { #[test] fn test_batch_one_insert_sum_tree_under_parent_sum_tree_in_same_merk_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_sum_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_sum_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![], b"key1".to_vec(), Element::empty_sum_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 124 storage_written_bytes @@ -287,7 +280,7 @@ mod tests { seek_count: 5, storage_cost: StorageCost { added_bytes: 124, - replaced_bytes: 107, // todo: this should actually be less + replaced_bytes: 84, removed_bytes: NoStorageRemoval, }, storage_loaded_bytes: 72, // todo: verify and explain @@ -298,19 +291,27 @@ mod tests { #[test] fn test_batch_one_insert_sum_tree_under_parent_tree_in_different_merk_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![b"0".to_vec()], b"key1".to_vec(), Element::empty_sum_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 124 storage_written_bytes @@ -380,19 +381,27 @@ mod tests { #[test] fn test_batch_one_insert_sum_tree_under_parent_sum_tree_in_different_merk_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_sum_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_sum_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![b"0".to_vec()], b"key1".to_vec(), Element::empty_sum_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 124 storage_written_bytes @@ -463,6 +472,7 @@ mod tests { #[test] fn test_batch_one_insert_sum_item_cost_right_below_value_required_cost_of_2() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -472,6 +482,7 @@ mod tests { Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("expected to insert sum tree"); @@ -481,7 +492,7 @@ mod tests { b"key1".to_vec(), Element::new_sum_item_with_flags(15, Some([0; 42].to_vec())), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 243 storage_written_bytes @@ -536,6 +547,7 @@ mod tests { #[test] fn test_batch_one_insert_sum_item_cost_right_above_value_required_cost_of_2() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -545,6 +557,7 @@ mod tests { Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("expected to insert sum tree"); @@ -554,7 +567,7 @@ mod tests { b"key1".to_vec(), Element::new_sum_item_with_flags(15, Some([0; 43].to_vec())), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 243 storage_written_bytes @@ -609,11 +622,19 @@ mod tests { #[test] fn test_batch_one_update_sum_item_bigger_no_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_sum_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_sum_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -621,6 +642,7 @@ mod tests { Element::new_sum_item_with_flags(100, None), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -641,6 +663,7 @@ mod tests { Ok((NoStorageRemoval, NoStorageRemoval)) }, Some(&tx), + grove_version, ) .cost; @@ -665,11 +688,19 @@ mod tests { #[test] fn test_batch_one_update_sum_item_bigger_with_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_sum_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_sum_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -677,6 +708,7 @@ mod tests { Element::new_sum_item_with_flags(100, Some(vec![0])), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -697,6 +729,7 @@ mod tests { Ok((NoStorageRemoval, NoStorageRemoval)) }, Some(&tx), + grove_version, ) .cost; @@ -721,11 +754,19 @@ mod tests { #[test] fn test_batch_one_update_sum_item_smaller_no_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_sum_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_sum_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -733,6 +774,7 @@ mod tests { Element::new_sum_item_with_flags(1000000, None), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -753,6 +795,7 @@ mod tests { Ok((NoStorageRemoval, NoStorageRemoval)) }, Some(&tx), + grove_version, ) .cost; @@ -777,11 +820,19 @@ mod tests { #[test] fn test_batch_one_update_sum_item_smaller_with_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_sum_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_sum_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -789,6 +840,7 @@ mod tests { Element::new_sum_item_with_flags(10000000, Some(vec![0])), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -809,6 +861,7 @@ mod tests { Ok((NoStorageRemoval, NoStorageRemoval)) }, Some(&tx), + grove_version, ) .cost; diff --git a/grovedb/src/debugger.rs b/grovedb/src/debugger.rs new file mode 100644 index 000000000..de76c6dfb --- /dev/null +++ b/grovedb/src/debugger.rs @@ -0,0 +1,190 @@ +//! GroveDB debugging support module. + +use std::{fs, net::Ipv4Addr, sync::Weak}; + +use axum::{extract::State, http::StatusCode, response::IntoResponse, routing::post, Json, Router}; +use grovedb_merk::debugger::NodeDbg; +use grovedb_path::SubtreePath; +use grovedb_version::version::GroveVersion; +use grovedbg_types::{NodeFetchRequest, NodeUpdate, Path}; +use tokio::{ + net::ToSocketAddrs, + sync::mpsc::{self, Sender}, +}; +use tower_http::services::ServeDir; + +use crate::{reference_path::ReferencePathType, GroveDb}; + +const GROVEDBG_ZIP: [u8; include_bytes!(concat!(env!("OUT_DIR"), "/grovedbg.zip")).len()] = + *include_bytes!(concat!(env!("OUT_DIR"), "/grovedbg.zip")); + +pub(super) fn start_visualizer(grovedb: Weak, addr: A) +where + A: ToSocketAddrs + Send + 'static, +{ + std::thread::spawn(move || { + let grovedbg_tmp = + tempfile::tempdir().expect("cannot create tempdir for grovedbg contents"); + let grovedbg_zip = grovedbg_tmp.path().join("grovedbg.zip"); + let grovedbg_www = grovedbg_tmp.path().join("grovedbg_www"); + + fs::write(&grovedbg_zip, &GROVEDBG_ZIP).expect("cannot crate grovedbg.zip"); + zip_extensions::read::zip_extract(&grovedbg_zip, &grovedbg_www) + .expect("cannot extract grovedbg contents"); + + let (shutdown_send, mut shutdown_receive) = mpsc::channel::<()>(1); + let app = Router::new() + .route("/fetch_node", post(fetch_node)) + .route("/fetch_root_node", post(fetch_root_node)) + .fallback_service(ServeDir::new(grovedbg_www)) + .with_state((shutdown_send, grovedb)); + + tokio::runtime::Runtime::new() + .unwrap() + .block_on(async move { + let listener = tokio::net::TcpListener::bind(addr) + .await + .expect("can't bind visualizer port"); + axum::serve(listener, app) + .with_graceful_shutdown(async move { + shutdown_receive.recv().await; + }) + .await + .unwrap() + }); + }); +} + +enum AppError { + Closed, + Any(String), +} + +impl IntoResponse for AppError { + fn into_response(self) -> axum::response::Response { + match self { + AppError::Closed => { + (StatusCode::SERVICE_UNAVAILABLE, "GroveDB is closed").into_response() + } + AppError::Any(e) => (StatusCode::INTERNAL_SERVER_ERROR, e).into_response(), + } + } +} + +impl From for AppError { + fn from(err: E) -> Self { + Self::Any(err.to_string()) + } +} + +async fn fetch_node( + State((shutdown, grovedb)): State<(Sender<()>, Weak)>, + Json(NodeFetchRequest { path, key }): Json, +) -> Result>, AppError> { + let Some(db) = grovedb.upgrade() else { + shutdown.send(()).await.ok(); + return Err(AppError::Closed); + }; + + // todo: GroveVersion::latest() to actual version + let merk = db + .open_non_transactional_merk_at_path(path.as_slice().into(), None, GroveVersion::latest()) + .unwrap()?; + let node = merk.get_node_dbg(&key)?; + + if let Some(node) = node { + let node_update: NodeUpdate = node_to_update(path, node)?; + Ok(Json(Some(node_update))) + } else { + Ok(None.into()) + } +} + +async fn fetch_root_node( + State((shutdown, grovedb)): State<(Sender<()>, Weak)>, +) -> Result>, AppError> { + let Some(db) = grovedb.upgrade() else { + shutdown.send(()).await.ok(); + return Err(AppError::Closed); + }; + + // todo: GroveVersion::latest() to actual version + let merk = db + .open_non_transactional_merk_at_path(SubtreePath::empty(), None, GroveVersion::latest()) + .unwrap()?; + + let node = merk.get_root_node_dbg()?; + + if let Some(node) = node { + let node_update: NodeUpdate = node_to_update(Vec::new(), node)?; + Ok(Json(Some(node_update))) + } else { + Ok(None.into()) + } +} + +fn node_to_update( + path: Path, + NodeDbg { + key, + value, + left_child, + right_child, + }: NodeDbg, +) -> Result { + // todo: GroveVersion::latest() to actual version + let grovedb_element = crate::Element::deserialize(&value, GroveVersion::latest())?; + + let element = match grovedb_element { + crate::Element::Item(value, ..) => grovedbg_types::Element::Item { value }, + crate::Element::Tree(root_key, ..) => grovedbg_types::Element::Subtree { root_key }, + crate::Element::Reference(ReferencePathType::AbsolutePathReference(path), ..) => { + grovedbg_types::Element::AbsolutePathReference { path } + } + crate::Element::Reference( + ReferencePathType::UpstreamRootHeightReference(n_keep, path_append), + .., + ) => grovedbg_types::Element::UpstreamRootHeightReference { + n_keep: n_keep.into(), + path_append, + }, + crate::Element::Reference( + ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference( + n_keep, + path_append, + ), + .., + ) => grovedbg_types::Element::UpstreamRootHeightWithParentPathAdditionReference { + n_keep: n_keep.into(), + path_append, + }, + crate::Element::Reference( + ReferencePathType::UpstreamFromElementHeightReference(n_remove, path_append), + .., + ) => grovedbg_types::Element::UpstreamFromElementHeightReference { + n_remove: n_remove.into(), + path_append, + }, + crate::Element::Reference(ReferencePathType::CousinReference(swap_parent), ..) => { + grovedbg_types::Element::CousinReference { swap_parent } + } + crate::Element::Reference(ReferencePathType::RemovedCousinReference(swap_parent), ..) => { + grovedbg_types::Element::RemovedCousinReference { swap_parent } + } + crate::Element::Reference(ReferencePathType::SiblingReference(sibling_key), ..) => { + grovedbg_types::Element::SiblingReference { sibling_key } + } + crate::Element::SumItem(value, _) => grovedbg_types::Element::SumItem { value }, + crate::Element::SumTree(root_key, sum, _) => { + grovedbg_types::Element::Sumtree { root_key, sum } + } + }; + + Ok(NodeUpdate { + path, + key, + element, + left_child, + right_child, + }) +} diff --git a/grovedb/src/element/constructor.rs b/grovedb/src/element/constructor.rs index 09976e987..91143ec87 100644 --- a/grovedb/src/element/constructor.rs +++ b/grovedb/src/element/constructor.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Constructor //! Functions for setting an element's type diff --git a/grovedb/src/element/delete.rs b/grovedb/src/element/delete.rs index 9766c5464..9c0879a74 100644 --- a/grovedb/src/element/delete.rs +++ b/grovedb/src/element/delete.rs @@ -1,40 +1,20 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Delete //! Implements functions in Element for deleting +#[cfg(feature = "full")] +use grovedb_costs::OperationCost; #[cfg(feature = "full")] use grovedb_costs::{storage_cost::removal::StorageRemovedBytes, CostResult, CostsExt}; #[cfg(feature = "full")] use grovedb_merk::{BatchEntry, Error as MerkError, Merk, MerkOptions, Op}; #[cfg(feature = "full")] use grovedb_storage::StorageContext; +#[cfg(feature = "full")] +use grovedb_version::check_grovedb_v0_with_cost; +#[cfg(feature = "full")] +use grovedb_version::error::GroveVersionError; +#[cfg(feature = "full")] +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] use crate::{Element, Error}; @@ -48,7 +28,9 @@ impl Element { merk_options: Option, is_layered: bool, is_sum: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!("delete", grove_version.grovedb_versions.element.delete); let op = match (is_sum, is_layered) { (true, true) => Op::DeleteLayeredMaybeSpecialized, (true, false) => Op::DeleteMaybeSpecialized, @@ -57,10 +39,17 @@ impl Element { }; let batch = [(key, op)]; let uses_sum_nodes = merk.is_sum_tree; - merk.apply_with_specialized_costs::<_, Vec>(&batch, &[], merk_options, &|key, value| { - Self::specialized_costs_for_key_value(key, value, uses_sum_nodes) - .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) - }) + merk.apply_with_specialized_costs::<_, Vec>( + &batch, + &[], + merk_options, + &|key, value| { + Self::specialized_costs_for_key_value(key, value, uses_sum_nodes, grove_version) + .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) + }, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) .map_err(|e| Error::CorruptedData(e.to_string())) } @@ -80,7 +69,15 @@ impl Element { (StorageRemovedBytes, StorageRemovedBytes), MerkError, >, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "delete_with_sectioned_removal_bytes", + grove_version + .grovedb_versions + .element + .delete_with_sectioned_removal_bytes + ); let op = match (is_in_sum_tree, is_layered) { (true, true) => Op::DeleteLayeredMaybeSpecialized, (true, false) => Op::DeleteMaybeSpecialized, @@ -94,11 +91,13 @@ impl Element { &[], merk_options, &|key, value| { - Self::specialized_costs_for_key_value(key, value, uses_sum_nodes) + Self::specialized_costs_for_key_value(key, value, uses_sum_nodes, grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, + Some(&Element::value_defined_cost_for_serialized_value), &mut |_costs, _old_value, _value| Ok((false, None)), sectioned_removal, + grove_version, ) .map_err(|e| Error::CorruptedData(e.to_string())) } @@ -110,7 +109,15 @@ impl Element { is_layered: bool, is_sum: bool, batch_operations: &mut Vec>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "delete_into_batch_operations", + grove_version + .grovedb_versions + .element + .delete_into_batch_operations + ); let op = match (is_sum, is_layered) { (true, true) => Op::DeleteLayeredMaybeSpecialized, (true, false) => Op::DeleteMaybeSpecialized, diff --git a/grovedb/src/element/exists.rs b/grovedb/src/element/exists.rs index d0bf5ecb0..63dcfe4bd 100644 --- a/grovedb/src/element/exists.rs +++ b/grovedb/src/element/exists.rs @@ -1,54 +1,36 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Exists //! Implements in Element functions for checking if stuff exists -#[cfg(feature = "full")] -use grovedb_costs::CostResult; -#[cfg(feature = "full")] +use grovedb_costs::{CostResult, CostsExt, OperationCost}; use grovedb_merk::Merk; -#[cfg(feature = "full")] use grovedb_storage::StorageContext; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; -#[cfg(feature = "full")] use crate::{Element, Error}; impl Element { - #[cfg(feature = "full")] /// Helper function that returns whether an element at the key for the /// element already exists. pub fn element_at_key_already_exists<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( &self, merk: &mut Merk, key: K, + grove_version: &GroveVersion, ) -> CostResult { - merk.exists(key.as_ref()) - .map_err(|e| Error::CorruptedData(e.to_string())) + check_grovedb_v0_with_cost!( + "element_at_key_already_exists", + grove_version + .grovedb_versions + .element + .element_at_key_already_exists + ); + merk.exists( + key.as_ref(), + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|e| Error::CorruptedData(e.to_string())) } } diff --git a/grovedb/src/element/get.rs b/grovedb/src/element/get.rs index eae7baa0e..1fda91dd5 100644 --- a/grovedb/src/element/get.rs +++ b/grovedb/src/element/get.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Get //! Implements functions in Element for getting @@ -37,9 +9,12 @@ use grovedb_merk::tree::kv::KV; #[cfg(feature = "full")] use grovedb_merk::Merk; #[cfg(feature = "full")] -use grovedb_merk::{ed::Decode, tree::TreeInner}; +use grovedb_merk::{ed::Decode, tree::TreeNodeInner}; #[cfg(feature = "full")] use grovedb_storage::StorageContext; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; use integer_encoding::VarInt; use crate::element::{SUM_ITEM_COST_SIZE, SUM_TREE_COST_SIZE, TREE_COST_SIZE}; @@ -54,13 +29,19 @@ impl Element { merk: &Merk, key: K, allow_cache: bool, + grove_version: &GroveVersion, ) -> CostResult { - Self::get_optional(merk, key.as_ref(), allow_cache).map(|result| { + check_grovedb_v0_with_cost!("get", grove_version.grovedb_versions.element.get); + Self::get_optional(merk, key.as_ref(), allow_cache, grove_version).map(|result| { let value = result?; value.ok_or_else(|| { Error::PathKeyNotFound(format!( - "key not found in Merk for get: {}", - hex::encode(key) + "get: key \"{}\" not found in Merk that has a root key [{}] and is of type {}", + hex::encode(key), + merk.root_key() + .map(hex::encode) + .unwrap_or("None".to_string()), + merk.merk_type )) }) }) @@ -73,19 +54,29 @@ impl Element { merk: &Merk, key: K, allow_cache: bool, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "get_optional", + grove_version.grovedb_versions.element.get_optional + ); let mut cost = OperationCost::default(); let value_opt = cost_return_on_error!( &mut cost, - merk.get(key.as_ref(), allow_cache) - .map_err(|e| Error::CorruptedData(e.to_string())) + merk.get( + key.as_ref(), + allow_cache, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) + .map_err(|e| Error::CorruptedData(e.to_string())) ); let element = cost_return_on_error_no_add!( &cost, value_opt .map(|value| { - Self::deserialize(value.as_slice()).map_err(|_| { + Self::deserialize(value.as_slice(), grove_version).map_err(|_| { Error::CorruptedData(String::from("unable to deserialize element")) }) }) @@ -102,8 +93,13 @@ impl Element { pub fn get_from_storage<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( storage: &S, key: K, + grove_version: &GroveVersion, ) -> CostResult { - Self::get_optional_from_storage(storage, key.as_ref()).map(|result| { + check_grovedb_v0_with_cost!( + "get_from_storage", + grove_version.grovedb_versions.element.get_from_storage + ); + Self::get_optional_from_storage(storage, key.as_ref(), grove_version).map(|result| { let value = result?; value.ok_or_else(|| { Error::PathKeyNotFound(format!( @@ -120,7 +116,15 @@ impl Element { pub fn get_optional_from_storage<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( storage: &S, key: K, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "get_optional_from_storage", + grove_version + .grovedb_versions + .element + .get_optional_from_storage + ); let mut cost = OperationCost::default(); let key_ref = key.as_ref(); let node_value_opt = cost_return_on_error!( @@ -129,7 +133,7 @@ impl Element { .get(key_ref) .map_err(|e| Error::CorruptedData(e.to_string())) ); - let maybe_tree_inner: Option = cost_return_on_error_no_add!( + let maybe_tree_inner: Option = cost_return_on_error_no_add!( &cost, node_value_opt .map(|node_value| { @@ -145,7 +149,7 @@ impl Element { value .as_ref() .map(|value| { - Self::deserialize(value.as_slice()).map_err(|_| { + Self::deserialize(value.as_slice(), grove_version).map_err(|_| { Error::CorruptedData(String::from("unable to deserialize element")) }) }) @@ -206,10 +210,21 @@ impl Element { path: &[&[u8]], key: K, allow_cache: bool, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "get_with_absolute_refs", + grove_version + .grovedb_versions + .element + .get_with_absolute_refs + ); let mut cost = OperationCost::default(); - let element = cost_return_on_error!(&mut cost, Self::get(merk, key.as_ref(), allow_cache)); + let element = cost_return_on_error!( + &mut cost, + Self::get(merk, key.as_ref(), allow_cache, grove_version) + ); let absolute_element = cost_return_on_error_no_add!( &cost, @@ -225,13 +240,23 @@ impl Element { merk: &Merk, key: K, allow_cache: bool, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "get_value_hash", + grove_version.grovedb_versions.element.get_value_hash + ); let mut cost = OperationCost::default(); let value_hash = cost_return_on_error!( &mut cost, - merk.get_value_hash(key.as_ref(), allow_cache) - .map_err(|e| Error::CorruptedData(e.to_string())) + merk.get_value_hash( + key.as_ref(), + allow_cache, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) + .map_err(|e| Error::CorruptedData(e.to_string())) ); Ok(value_hash).wrap_with_cost(cost) @@ -248,18 +273,26 @@ mod tests { #[test] fn test_cache_changes_cost() { + let grove_version = GroveVersion::latest(); let storage = TempStorage::new(); let batch = StorageBatch::new(); let ctx = storage .get_storage_context(SubtreePath::empty(), Some(&batch)) .unwrap(); - let mut merk = Merk::open_base(ctx, false).unwrap().unwrap(); + let mut merk = Merk::open_base( + ctx, + false, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .unwrap() + .unwrap(); Element::empty_tree() - .insert(&mut merk, b"mykey", None) + .insert(&mut merk, b"mykey", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"value".to_vec()) - .insert(&mut merk, b"another-key", None) + .insert(&mut merk, b"another-key", None, grove_version) .unwrap() .expect("expected successful insertion 2"); @@ -271,10 +304,17 @@ mod tests { let ctx = storage .get_storage_context(SubtreePath::empty(), None) .unwrap(); - let mut merk = Merk::open_base(ctx, false).unwrap().unwrap(); + let mut merk = Merk::open_base( + ctx, + false, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .unwrap() + .unwrap(); assert_eq!( - Element::get(&merk, b"another-key", true) + Element::get(&merk, b"another-key", true, grove_version) .unwrap() .expect("expected successful get"), Element::new_item(b"value".to_vec()), @@ -282,14 +322,14 @@ mod tests { // Warm up cache because the Merk was reopened. Element::new_item(b"value".to_vec()) - .insert(&mut merk, b"another-key", None) + .insert(&mut merk, b"another-key", None, grove_version) .unwrap() .expect("expected successful insertion 2"); - let cost_with_cache = Element::get(&merk, b"another-key", true) + let cost_with_cache = Element::get(&merk, b"another-key", true, grove_version) .cost_as_result() .expect("expected to get cost"); - let cost_without_cache = Element::get(&merk, b"another-key", false) + let cost_without_cache = Element::get(&merk, b"another-key", false, grove_version) .cost_as_result() .expect("expected to get cost"); assert_ne!(cost_with_cache, cost_without_cache); diff --git a/grovedb/src/element/helpers.rs b/grovedb/src/element/helpers.rs index c049e7baa..2d2db0764 100644 --- a/grovedb/src/element/helpers.rs +++ b/grovedb/src/element/helpers.rs @@ -1,51 +1,32 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Helpers //! Implements helper functions in Element +#[cfg(feature = "full")] +use grovedb_merk::tree::kv::{ + ValueDefinedCostType, + ValueDefinedCostType::{LayeredValueDefinedCost, SpecializedValueDefinedCost}, +}; #[cfg(feature = "full")] use grovedb_merk::{ - tree::{kv::KV, Tree}, + tree::{kv::KV, TreeNode}, TreeFeatureType, - TreeFeatureType::{BasicMerk, SummedMerk}, + TreeFeatureType::{BasicMerkNode, SummedMerkNode}, }; +use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; #[cfg(feature = "full")] use integer_encoding::VarInt; +#[cfg(feature = "full")] +use crate::reference_path::path_from_reference_path_type; #[cfg(any(feature = "full", feature = "verify"))] -use crate::{element::SUM_ITEM_COST_SIZE, Element, Error}; +use crate::reference_path::ReferencePathType; #[cfg(feature = "full")] use crate::{ - element::{SUM_TREE_COST_SIZE, TREE_COST_SIZE}, - reference_path::{path_from_reference_path_type, ReferencePathType}, + element::{SUM_ITEM_COST_SIZE, SUM_TREE_COST_SIZE, TREE_COST_SIZE}, ElementFlags, }; +#[cfg(any(feature = "full", feature = "verify"))] +use crate::{Element, Error}; impl Element { #[cfg(any(feature = "full", feature = "verify"))] @@ -59,8 +40,7 @@ impl Element { } #[cfg(any(feature = "full", feature = "verify"))] - /// Decoded the integer value in the SumItem element type, returns 0 for - /// everything else + /// Decoded the integer value in the SumItem element type pub fn as_sum_item_value(&self) -> Result { match self { Element::SumItem(value, _) => Ok(*value), @@ -68,6 +48,33 @@ impl Element { } } + #[cfg(any(feature = "full", feature = "verify"))] + /// Decoded the integer value in the SumItem element type + pub fn into_sum_item_value(self) -> Result { + match self { + Element::SumItem(value, _) => Ok(value), + _ => Err(Error::WrongElementType("expected a sum item")), + } + } + + #[cfg(any(feature = "full", feature = "verify"))] + /// Decoded the integer value in the SumTree element type + pub fn as_sum_tree_value(&self) -> Result { + match self { + Element::SumTree(_, value, _) => Ok(*value), + _ => Err(Error::WrongElementType("expected a sum tree")), + } + } + + #[cfg(any(feature = "full", feature = "verify"))] + /// Decoded the integer value in the SumTree element type + pub fn into_sum_tree_value(self) -> Result { + match self { + Element::SumTree(_, value, _) => Ok(value), + _ => Err(Error::WrongElementType("expected a sum tree")), + } + } + #[cfg(any(feature = "full", feature = "verify"))] /// Gives the item value in the Item element type pub fn as_item_bytes(&self) -> Result<&[u8], Error> { @@ -86,24 +93,51 @@ impl Element { } } + #[cfg(any(feature = "full", feature = "verify"))] + /// Gives the reference path type in the Reference element type + pub fn into_reference_path_type(self) -> Result { + match self { + Element::Reference(value, ..) => Ok(value), + _ => Err(Error::WrongElementType("expected a reference")), + } + } + #[cfg(any(feature = "full", feature = "verify"))] /// Check if the element is a sum tree pub fn is_sum_tree(&self) -> bool { matches!(self, Element::SumTree(..)) } + #[cfg(any(feature = "full", feature = "verify"))] + /// Check if the element is a tree but not a sum tree + pub fn is_basic_tree(&self) -> bool { + matches!(self, Element::Tree(..)) + } + #[cfg(any(feature = "full", feature = "verify"))] /// Check if the element is a tree - pub fn is_tree(&self) -> bool { + pub fn is_any_tree(&self) -> bool { matches!(self, Element::SumTree(..) | Element::Tree(..)) } + #[cfg(any(feature = "full", feature = "verify"))] + /// Check if the element is a reference + pub fn is_reference(&self) -> bool { + matches!(self, Element::Reference(..)) + } + #[cfg(any(feature = "full", feature = "verify"))] /// Check if the element is an item - pub fn is_item(&self) -> bool { + pub fn is_any_item(&self) -> bool { matches!(self, Element::Item(..) | Element::SumItem(..)) } + #[cfg(any(feature = "full", feature = "verify"))] + /// Check if the element is an item + pub fn is_basic_item(&self) -> bool { + matches!(self, Element::Item(..)) + } + #[cfg(any(feature = "full", feature = "verify"))] /// Check if the element is a sum item pub fn is_sum_item(&self) -> bool { @@ -114,8 +148,8 @@ impl Element { /// Get the tree feature type pub fn get_feature_type(&self, parent_is_sum_tree: bool) -> Result { match parent_is_sum_tree { - true => Ok(SummedMerk(self.sum_value_or_default())), - false => Ok(BasicMerk), + true => Ok(SummedMerkNode(self.sum_value_or_default())), + false => Ok(BasicMerkNode), } } @@ -155,55 +189,18 @@ impl Element { } } - #[cfg(feature = "full")] - /// Get the size of an element in bytes - #[deprecated] - pub fn byte_size(&self) -> u32 { - match self { - Element::Item(item, element_flag) => { - if let Some(flag) = element_flag { - flag.len() as u32 + item.len() as u32 - } else { - item.len() as u32 - } - } - Element::SumItem(item, element_flag) => { - if let Some(flag) = element_flag { - flag.len() as u32 + item.required_space() as u32 - } else { - item.required_space() as u32 - } - } - Element::Reference(path_reference, _, element_flag) => { - let path_length = path_reference.serialized_size() as u32; - - if let Some(flag) = element_flag { - flag.len() as u32 + path_length - } else { - path_length - } - } - Element::Tree(_, element_flag) => { - if let Some(flag) = element_flag { - flag.len() as u32 + 32 - } else { - 32 - } - } - Element::SumTree(_, _, element_flag) => { - if let Some(flag) = element_flag { - flag.len() as u32 + 32 + 8 - } else { - 32 + 8 - } - } - } - } - #[cfg(feature = "full")] /// Get the required item space - pub fn required_item_space(len: u32, flag_len: u32) -> u32 { - len + len.required_space() as u32 + flag_len + flag_len.required_space() as u32 + 1 + pub fn required_item_space( + len: u32, + flag_len: u32, + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "required_item_space", + grove_version.grovedb_versions.element.required_item_space + ); + Ok(len + len.required_space() as u32 + flag_len + flag_len.required_space() as u32 + 1) } #[cfg(feature = "full")] @@ -213,9 +210,9 @@ impl Element { path: &[&[u8]], key: Option<&[u8]>, ) -> Result { - // Convert any non absolute reference type to an absolute one + // Convert any non-absolute reference type to an absolute one // we do this here because references are aggregated first then followed later - // to follow non absolute references, we need the path they are stored at + // to follow non-absolute references, we need the path they are stored at // this information is lost during the aggregation phase. Ok(match &self { Element::Reference(reference_path_type, ..) => match reference_path_type { @@ -243,9 +240,17 @@ impl Element { key: &Vec, value: &[u8], is_sum_node: bool, + grove_version: &GroveVersion, ) -> Result { + check_grovedb_v0!( + "specialized_costs_for_key_value", + grove_version + .grovedb_versions + .element + .specialized_costs_for_key_value + ); // todo: we actually don't need to deserialize the whole element - let element = Element::deserialize(value)?; + let element = Element::deserialize(value, grove_version)?; let cost = match element { Element::Tree(_, flags) => { let flags_len = flags.map_or(0, |flags| { @@ -297,7 +302,11 @@ impl Element { #[cfg(feature = "full")] /// Get tree cost for the element - pub fn get_specialized_cost(&self) -> Result { + pub fn get_specialized_cost(&self, grove_version: &GroveVersion) -> Result { + check_grovedb_v0!( + "get_specialized_cost", + grove_version.grovedb_versions.element.get_specialized_cost + ); match self { Element::Tree(..) => Ok(TREE_COST_SIZE), Element::SumTree(..) => Ok(SUM_TREE_COST_SIZE), @@ -307,12 +316,48 @@ impl Element { )), } } + + #[cfg(feature = "full")] + /// Get the value defined cost for a serialized value + pub fn value_defined_cost(&self, grove_version: &GroveVersion) -> Option { + let Some(value_cost) = self.get_specialized_cost(grove_version).ok() else { + return None; + }; + + let cost = value_cost + + self.get_flags().as_ref().map_or(0, |flags| { + let flags_len = flags.len() as u32; + flags_len + flags_len.required_space() as u32 + }); + match self { + Element::Tree(..) => Some(LayeredValueDefinedCost(cost)), + Element::SumTree(..) => Some(LayeredValueDefinedCost(cost)), + Element::SumItem(..) => Some(SpecializedValueDefinedCost(cost)), + _ => None, + } + } + + #[cfg(feature = "full")] + /// Get the value defined cost for a serialized value + pub fn value_defined_cost_for_serialized_value( + value: &[u8], + grove_version: &GroveVersion, + ) -> Option { + let element = Element::deserialize(value, grove_version).ok()?; + element.value_defined_cost(grove_version) + } } #[cfg(feature = "full")] /// Decode from bytes -pub fn raw_decode(bytes: &[u8]) -> Result { - let tree = Tree::decode_raw(bytes, vec![]).map_err(|e| Error::CorruptedData(e.to_string()))?; - let element: Element = Element::deserialize(tree.value_as_slice())?; +pub fn raw_decode(bytes: &[u8], grove_version: &GroveVersion) -> Result { + let tree = TreeNode::decode_raw( + bytes, + vec![], + Some(Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|e| Error::CorruptedData(e.to_string()))?; + let element: Element = Element::deserialize(tree.value_as_slice(), grove_version)?; Ok(element) } diff --git a/grovedb/src/element/insert.rs b/grovedb/src/element/insert.rs index e500b41af..ce0144a2b 100644 --- a/grovedb/src/element/insert.rs +++ b/grovedb/src/element/insert.rs @@ -1,49 +1,18 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Insert //! Implements functions in Element for inserting into Merk -use grovedb_costs::cost_return_on_error_default; -#[cfg(feature = "full")] use grovedb_costs::{ - cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, + cost_return_on_error, cost_return_on_error_default, cost_return_on_error_no_add, CostResult, + CostsExt, OperationCost, }; -#[cfg(feature = "full")] use grovedb_merk::{BatchEntry, Error as MerkError, Merk, MerkOptions, Op, TreeFeatureType}; -#[cfg(feature = "full")] use grovedb_storage::StorageContext; -#[cfg(feature = "full")] +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; use integer_encoding::VarInt; -use crate::Element::SumItem; -#[cfg(feature = "full")] -use crate::{Element, Error, Hash}; +use crate::{Element, Element::SumItem, Error, Hash}; impl Element { #[cfg(feature = "full")] @@ -57,8 +26,11 @@ impl Element { merk: &mut Merk, key: K, options: Option, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { - let serialized = cost_return_on_error_default!(self.serialize()); + check_grovedb_v0_with_cost!("insert", grove_version.grovedb_versions.element.insert); + + let serialized = cost_return_on_error_default!(self.serialize(grove_version)); if !merk.is_sum_tree && self.is_sum_item() { return Err(Error::InvalidInput("cannot add sum item to non sum tree")) @@ -68,7 +40,8 @@ impl Element { let merk_feature_type = cost_return_on_error_default!(self.get_feature_type(merk.is_sum_tree)); let batch_operations = if matches!(self, SumItem(..)) { - let value_cost = cost_return_on_error_default!(self.get_specialized_cost()); + let value_cost = + cost_return_on_error_default!(self.get_specialized_cost(grove_version)); let cost = value_cost + self.get_flags().as_ref().map_or(0, |flags| { @@ -89,9 +62,11 @@ impl Element { options, &|key, value| { // it is possible that a normal item was being replaced with a - Self::specialized_costs_for_key_value(key, value, uses_sum_nodes) + Self::specialized_costs_for_key_value(key, value, uses_sum_nodes, grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|e| Error::CorruptedData(e.to_string())) } @@ -104,14 +79,24 @@ impl Element { key: K, batch_operations: &mut Vec>, feature_type: TreeFeatureType, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { - let serialized = match self.serialize() { + check_grovedb_v0_with_cost!( + "insert_into_batch_operations", + grove_version + .grovedb_versions + .element + .insert_into_batch_operations + ); + + let serialized = match self.serialize(grove_version) { Ok(s) => s, Err(e) => return Err(e).wrap_with_cost(Default::default()), }; let entry = if matches!(self, SumItem(..)) { - let value_cost = cost_return_on_error_default!(self.get_specialized_cost()); + let value_cost = + cost_return_on_error_default!(self.get_specialized_cost(grove_version)); let cost = value_cost + self.get_flags().as_ref().map_or(0, |flags| { @@ -140,14 +125,22 @@ impl Element { merk: &mut Merk, key: &[u8], options: Option, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "insert_if_not_exists", + grove_version.grovedb_versions.element.insert_if_not_exists + ); + let mut cost = OperationCost::default(); - let exists = - cost_return_on_error!(&mut cost, self.element_at_key_already_exists(merk, key)); + let exists = cost_return_on_error!( + &mut cost, + self.element_at_key_already_exists(merk, key, grove_version) + ); if exists { Ok(false).wrap_with_cost(cost) } else { - cost_return_on_error!(&mut cost, self.insert(merk, key, options)); + cost_return_on_error!(&mut cost, self.insert(merk, key, options, grove_version)); Ok(true).wrap_with_cost(cost) } } @@ -165,18 +158,32 @@ impl Element { key: K, batch_operations: &mut Vec>, feature_type: TreeFeatureType, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "insert_if_not_exists_into_batch_operations", + grove_version + .grovedb_versions + .element + .insert_if_not_exists_into_batch_operations + ); + let mut cost = OperationCost::default(); let exists = cost_return_on_error!( &mut cost, - self.element_at_key_already_exists(merk, key.as_ref()) + self.element_at_key_already_exists(merk, key.as_ref(), grove_version) ); if exists { Ok(false).wrap_with_cost(cost) } else { cost_return_on_error!( &mut cost, - self.insert_into_batch_operations(key, batch_operations, feature_type) + self.insert_into_batch_operations( + key, + batch_operations, + feature_type, + grove_version + ) ); Ok(true).wrap_with_cost(cost) } @@ -195,11 +202,20 @@ impl Element { merk: &mut Merk, key: &[u8], options: Option, + grove_version: &GroveVersion, ) -> CostResult<(bool, Option), Error> { + check_grovedb_v0_with_cost!( + "insert_if_changed_value", + grove_version + .grovedb_versions + .element + .insert_if_changed_value + ); + let mut cost = OperationCost::default(); let previous_element = cost_return_on_error!( &mut cost, - Self::get_optional_from_storage(&merk.storage, key) + Self::get_optional_from_storage(&merk.storage, key, grove_version) ); let needs_insert = match &previous_element { None => true, @@ -208,7 +224,7 @@ impl Element { if !needs_insert { Ok((false, None)).wrap_with_cost(cost) } else { - cost_return_on_error!(&mut cost, self.insert(merk, key, options)); + cost_return_on_error!(&mut cost, self.insert(merk, key, options, grove_version)); Ok((true, previous_element)).wrap_with_cost(cost) } } @@ -228,11 +244,20 @@ impl Element { key: K, batch_operations: &mut Vec>, feature_type: TreeFeatureType, + grove_version: &GroveVersion, ) -> CostResult<(bool, Option), Error> { + check_grovedb_v0_with_cost!( + "insert_if_changed_value_into_batch_operations", + grove_version + .grovedb_versions + .element + .insert_if_changed_value_into_batch_operations + ); + let mut cost = OperationCost::default(); let previous_element = cost_return_on_error!( &mut cost, - Self::get_optional_from_storage(&merk.storage, key.as_ref()) + Self::get_optional_from_storage(&merk.storage, key.as_ref(), grove_version) ); let needs_insert = match &previous_element { None => true, @@ -243,7 +268,12 @@ impl Element { } else { cost_return_on_error!( &mut cost, - self.insert_into_batch_operations(key, batch_operations, feature_type) + self.insert_into_batch_operations( + key, + batch_operations, + feature_type, + grove_version + ) ); Ok((true, previous_element)).wrap_with_cost(cost) } @@ -261,8 +291,14 @@ impl Element { key: K, referenced_value: Hash, options: Option, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { - let serialized = match self.serialize() { + check_grovedb_v0_with_cost!( + "insert_reference", + grove_version.grovedb_versions.element.insert_reference + ); + + let serialized = match self.serialize(grove_version) { Ok(s) => s, Err(e) => return Err(e).wrap_with_cost(Default::default()), }; @@ -284,9 +320,11 @@ impl Element { &[], options, &|key, value| { - Self::specialized_costs_for_key_value(key, value, uses_sum_nodes) + Self::specialized_costs_for_key_value(key, value, uses_sum_nodes, grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|e| Error::CorruptedData(e.to_string())) } @@ -300,8 +338,17 @@ impl Element { referenced_value: Hash, batch_operations: &mut Vec>, feature_type: TreeFeatureType, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { - let serialized = match self.serialize() { + check_grovedb_v0_with_cost!( + "insert_reference_into_batch_operations", + grove_version + .grovedb_versions + .element + .insert_reference_into_batch_operations + ); + + let serialized = match self.serialize(grove_version) { Ok(s) => s, Err(e) => return Err(e).wrap_with_cost(Default::default()), }; @@ -326,8 +373,14 @@ impl Element { key: K, subtree_root_hash: Hash, options: Option, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { - let serialized = match self.serialize() { + check_grovedb_v0_with_cost!( + "insert_subtree", + grove_version.grovedb_versions.element.insert_subtree + ); + + let serialized = match self.serialize(grove_version) { Ok(s) => s, Err(e) => return Err(e).wrap_with_cost(Default::default()), }; @@ -336,7 +389,8 @@ impl Element { let merk_feature_type = cost_return_on_error_no_add!(&cost, self.get_feature_type(merk.is_sum_tree)); - let tree_cost = cost_return_on_error_no_add!(&cost, self.get_specialized_cost()); + let tree_cost = + cost_return_on_error_no_add!(&cost, self.get_specialized_cost(grove_version)); let cost = tree_cost + self.get_flags().as_ref().map_or(0, |flags| { @@ -353,9 +407,11 @@ impl Element { &[], options, &|key, value| { - Self::specialized_costs_for_key_value(key, value, uses_sum_nodes) + Self::specialized_costs_for_key_value(key, value, uses_sum_nodes, grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|e| Error::CorruptedData(e.to_string())) } @@ -369,13 +425,22 @@ impl Element { is_replace: bool, batch_operations: &mut Vec>, feature_type: TreeFeatureType, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { - let serialized = match self.serialize() { + check_grovedb_v0_with_cost!( + "insert_subtree_into_batch_operations", + grove_version + .grovedb_versions + .element + .insert_subtree_into_batch_operations + ); + + let serialized = match self.serialize(grove_version) { Ok(s) => s, Err(e) => return Err(e).wrap_with_cost(Default::default()), }; - let tree_cost = cost_return_on_error_default!(self.get_specialized_cost()); + let tree_cost = cost_return_on_error_default!(self.get_specialized_cost(grove_version)); let cost = tree_cost + self.get_flags().as_ref().map_or(0, |flags| { @@ -410,18 +475,19 @@ mod tests { #[test] fn test_success_insert() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); Element::empty_tree() - .insert(&mut merk, b"mykey", None) + .insert(&mut merk, b"mykey", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"value".to_vec()) - .insert(&mut merk, b"another-key", None) + .insert(&mut merk, b"another-key", None, grove_version) .unwrap() .expect("expected successful insertion 2"); assert_eq!( - Element::get(&merk, b"another-key", true) + Element::get(&merk, b"another-key", true, grove_version) .unwrap() .expect("expected successful get"), Element::new_item(b"value".to_vec()), @@ -430,30 +496,31 @@ mod tests { #[test] fn test_insert_if_changed_value_does_not_insert_when_value_does_not_change() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); Element::empty_tree() - .insert(&mut merk, b"mykey", None) + .insert(&mut merk, b"mykey", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"value".to_vec()) - .insert(&mut merk, b"another-key", None) + .insert(&mut merk, b"another-key", None, grove_version) .unwrap() .expect("expected successful insertion 2"); - merk.commit(); + merk.commit(grove_version); let (inserted, previous) = Element::new_item(b"value".to_vec()) - .insert_if_changed_value(&mut merk, b"another-key", None) + .insert_if_changed_value(&mut merk, b"another-key", None, grove_version) .unwrap() .expect("expected successful insertion 2"); - merk.commit(); + merk.commit(grove_version); assert!(!inserted); assert_eq!(previous, None); assert_eq!( - Element::get(&merk, b"another-key", true) + Element::get(&merk, b"another-key", true, grove_version) .unwrap() .expect("expected successful get"), Element::new_item(b"value".to_vec()), @@ -462,16 +529,17 @@ mod tests { #[test] fn test_insert_if_changed_value_inserts_when_value_changed() { + let grove_version = GroveVersion::latest(); let storage = TempStorage::new(); let batch = StorageBatch::new(); - let mut merk = empty_path_merk(&*storage, &batch); + let mut merk = empty_path_merk(&*storage, &batch, grove_version); Element::empty_tree() - .insert(&mut merk, b"mykey", None) + .insert(&mut merk, b"mykey", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"value".to_vec()) - .insert(&mut merk, b"another-key", None) + .insert(&mut merk, b"another-key", None, grove_version) .unwrap() .expect("expected successful insertion 2"); @@ -481,9 +549,9 @@ mod tests { .unwrap(); let batch = StorageBatch::new(); - let mut merk = empty_path_merk(&*storage, &batch); + let mut merk = empty_path_merk(&*storage, &batch, grove_version); let (inserted, previous) = Element::new_item(b"value2".to_vec()) - .insert_if_changed_value(&mut merk, b"another-key", None) + .insert_if_changed_value(&mut merk, b"another-key", None, grove_version) .unwrap() .expect("expected successful insertion 2"); @@ -494,10 +562,10 @@ mod tests { .commit_multi_context_batch(batch, None) .unwrap() .unwrap(); - let merk = empty_path_merk_read_only(&*storage); + let merk = empty_path_merk_read_only(&*storage, grove_version); assert_eq!( - Element::get(&merk, b"another-key", true) + Element::get(&merk, b"another-key", true, grove_version) .unwrap() .expect("expected successful get"), Element::new_item(b"value2".to_vec()), @@ -506,13 +574,14 @@ mod tests { #[test] fn test_insert_if_changed_value_inserts_when_no_value() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); Element::empty_tree() - .insert(&mut merk, b"mykey", None) + .insert(&mut merk, b"mykey", None, grove_version) .unwrap() .expect("expected successful insertion"); let (inserted, previous) = Element::new_item(b"value2".to_vec()) - .insert_if_changed_value(&mut merk, b"another-key", None) + .insert_if_changed_value(&mut merk, b"another-key", None, grove_version) .unwrap() .expect("expected successful insertion 2"); @@ -520,7 +589,7 @@ mod tests { assert_eq!(previous, None); assert_eq!( - Element::get(&merk, b"another-key", true) + Element::get(&merk, b"another-key", true, grove_version) .unwrap() .expect("expected successful get"), Element::new_item(b"value2".to_vec()), diff --git a/grovedb/src/element/mod.rs b/grovedb/src/element/mod.rs index 009f85ae4..a6add9e64 100644 --- a/grovedb/src/element/mod.rs +++ b/grovedb/src/element/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Module for subtrees handling. //! Subtrees handling is isolated so basically this module is about adapting //! Merk API to GroveDB needs. @@ -45,19 +17,21 @@ mod insert; #[cfg(any(feature = "full", feature = "verify"))] mod query; #[cfg(any(feature = "full", feature = "verify"))] +pub use query::QueryOptions; +#[cfg(any(feature = "full", feature = "verify"))] mod serialize; -#[cfg(feature = "full")] -use core::fmt; +#[cfg(any(feature = "full", feature = "verify"))] +use std::fmt; +use bincode::{Decode, Encode}; #[cfg(any(feature = "full", feature = "verify"))] use grovedb_merk::estimated_costs::SUM_VALUE_EXTRA_COST; #[cfg(feature = "full")] use grovedb_merk::estimated_costs::{LAYER_COST_SIZE, SUM_LAYER_COST_SIZE}; #[cfg(feature = "full")] use grovedb_visualize::visualize_to_vec; -#[cfg(any(feature = "full", feature = "verify"))] -use serde::{Deserialize, Serialize}; +use crate::operations::proof::util::hex_to_ascii; #[cfg(any(feature = "full", feature = "verify"))] use crate::reference_path::ReferencePathType; @@ -93,13 +67,14 @@ pub type SumValue = i64; /// /// ONLY APPEND TO THIS LIST!!! Because /// of how serialization works. -#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[derive(Clone, Encode, Decode, PartialEq, Eq, Hash)] +#[cfg_attr(not(any(feature = "full", feature = "visualize")), derive(Debug))] pub enum Element { /// An ordinary value Item(Vec, Option), /// A reference to an object by its path Reference(ReferencePathType, MaxReferenceHop, Option), - /// A subtree, contains the a prefixed key representing the root of the + /// A subtree, contains the prefixed key representing the root of the /// subtree. Tree(Option>, Option), /// Signed integer value that can be totaled in a sum tree @@ -109,7 +84,78 @@ pub enum Element { SumTree(Option>, SumValue, Option), } -#[cfg(feature = "full")] +impl fmt::Display for Element { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Element::Item(data, flags) => { + write!( + f, + "Item({}{})", + hex_to_ascii(data), + flags + .as_ref() + .map_or(String::new(), |f| format!(", flags: {:?}", f)) + ) + } + Element::Reference(path, max_hop, flags) => { + write!( + f, + "Reference({}, max_hop: {}{})", + path, + max_hop.map_or("None".to_string(), |h| h.to_string()), + flags + .as_ref() + .map_or(String::new(), |f| format!(", flags: {:?}", f)) + ) + } + Element::Tree(root_key, flags) => { + write!( + f, + "Tree({}{})", + root_key.as_ref().map_or("None".to_string(), hex::encode), + flags + .as_ref() + .map_or(String::new(), |f| format!(", flags: {:?}", f)) + ) + } + Element::SumItem(sum_value, flags) => { + write!( + f, + "SumItem({}{}", + sum_value, + flags + .as_ref() + .map_or(String::new(), |f| format!(", flags: {:?}", f)) + ) + } + Element::SumTree(root_key, sum_value, flags) => { + write!( + f, + "SumTree({}, {}{}", + root_key.as_ref().map_or("None".to_string(), hex::encode), + sum_value, + flags + .as_ref() + .map_or(String::new(), |f| format!(", flags: {:?}", f)) + ) + } + } + } +} + +impl Element { + pub fn type_str(&self) -> &str { + match self { + Element::Item(..) => "item", + Element::Reference(..) => "reference", + Element::Tree(..) => "tree", + Element::SumItem(..) => "sum item", + Element::SumTree(..) => "sum tree", + } + } +} + +#[cfg(any(feature = "full", feature = "visualize"))] impl fmt::Debug for Element { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut v = Vec::new(); diff --git a/grovedb/src/element/query.rs b/grovedb/src/element/query.rs index 8c412cdc0..39c0494cc 100644 --- a/grovedb/src/element/query.rs +++ b/grovedb/src/element/query.rs @@ -1,34 +1,8 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Query //! Implements functions in Element for querying +use std::fmt; + #[cfg(feature = "full")] use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, CostContext, CostResult, CostsExt, @@ -36,13 +10,20 @@ use grovedb_costs::{ }; #[cfg(feature = "full")] use grovedb_merk::proofs::query::query_item::QueryItem; +#[cfg(feature = "full")] +use grovedb_merk::proofs::query::SubqueryBranch; #[cfg(any(feature = "full", feature = "verify"))] use grovedb_merk::proofs::Query; +#[cfg(feature = "full")] use grovedb_path::SubtreePath; #[cfg(feature = "full")] use grovedb_storage::{rocksdb_storage::RocksDbStorage, RawIterator, StorageContext}; +use grovedb_version::{ + check_grovedb_v0, check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; -use crate::query_result_type::Path; +#[cfg(feature = "full")] +use crate::operations::proof::util::hex_to_ascii; #[cfg(feature = "full")] use crate::{ element::helpers::raw_decode, @@ -53,11 +34,58 @@ use crate::{ QueryPathKeyElementTrioResultType, }, }, - util::{merk_optional_tx, storage_context_optional_tx}, + util::{merk_optional_tx, merk_optional_tx_internal_error, storage_context_optional_tx}, Error, PathQuery, TransactionArg, }; #[cfg(any(feature = "full", feature = "verify"))] -use crate::{Element, SizedQuery}; +use crate::{query_result_type::Path, Element, SizedQuery}; + +#[cfg(any(feature = "full", feature = "verify"))] +#[derive(Copy, Clone, Debug)] +pub struct QueryOptions { + pub allow_get_raw: bool, + pub allow_cache: bool, + /// Should we decrease the limit of elements found when we have no + /// subelements in the subquery? This should generally be set to true, + /// as having it false could mean very expensive queries. The queries + /// would be expensive because we could go through many many trees where the + /// sub elements have no matches, hence the limit would not decrease and + /// hence we would continue on the increasingly expensive query. + pub decrease_limit_on_range_with_no_sub_elements: bool, + pub error_if_intermediate_path_tree_not_present: bool, +} + +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for QueryOptions { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "QueryOptions {{")?; + writeln!(f, " allow_get_raw: {}", self.allow_get_raw)?; + writeln!(f, " allow_cache: {}", self.allow_cache)?; + writeln!( + f, + " decrease_limit_on_range_with_no_sub_elements: {}", + self.decrease_limit_on_range_with_no_sub_elements + )?; + writeln!( + f, + " error_if_intermediate_path_tree_not_present: {}", + self.error_if_intermediate_path_tree_not_present + )?; + write!(f, "}}") + } +} + +#[cfg(any(feature = "full", feature = "verify"))] +impl Default for QueryOptions { + fn default() -> Self { + QueryOptions { + allow_get_raw: false, + allow_cache: true, + decrease_limit_on_range_with_no_sub_elements: true, + error_if_intermediate_path_tree_not_present: true, + } + } +} #[cfg(feature = "full")] /// Path query push arguments @@ -73,14 +101,131 @@ where pub subquery_path: Option, pub subquery: Option, pub left_to_right: bool, - pub allow_get_raw: bool, - pub allow_cache: bool, + pub query_options: QueryOptions, pub result_type: QueryResultType, pub results: &'a mut Vec, pub limit: &'a mut Option, pub offset: &'a mut Option, } +#[cfg(feature = "full")] +fn format_query(query: &Query, indent: usize) -> String { + let indent_str = " ".repeat(indent); + let mut output = format!("{}Query {{\n", indent_str); + + output += &format!("{} items: [\n", indent_str); + for item in &query.items { + output += &format!("{} {},\n", indent_str, item); + } + output += &format!("{} ],\n", indent_str); + + output += &format!( + "{} default_subquery_branch: {}\n", + indent_str, + format_subquery_branch(&query.default_subquery_branch, indent + 2) + ); + + if let Some(ref branches) = query.conditional_subquery_branches { + output += &format!("{} conditional_subquery_branches: {{\n", indent_str); + for (item, branch) in branches { + output += &format!( + "{} {}: {},\n", + indent_str, + item, + format_subquery_branch(branch, indent + 4) + ); + } + output += &format!("{} }},\n", indent_str); + } + + output += &format!("{} left_to_right: {}\n", indent_str, query.left_to_right); + output += &format!("{}}}", indent_str); + + output +} + +#[cfg(feature = "full")] +fn format_subquery_branch(branch: &SubqueryBranch, indent: usize) -> String { + let indent_str = " ".repeat(indent); + let mut output = "SubqueryBranch {{\n".to_string(); + + if let Some(ref path) = branch.subquery_path { + output += &format!("{} subquery_path: {:?},\n", indent_str, path); + } + + if let Some(ref subquery) = branch.subquery { + output += &format!( + "{} subquery: {},\n", + indent_str, + format_query(subquery, indent + 2) + ); + } + + output += &format!("{}}}", " ".repeat(indent)); + + output +} + +#[cfg(feature = "full")] +impl<'db, 'ctx, 'a> fmt::Display for PathQueryPushArgs<'db, 'ctx, 'a> +where + 'db: 'ctx, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "PathQueryPushArgs {{")?; + writeln!( + f, + " key: {}", + self.key.map_or("None".to_string(), hex_to_ascii) + )?; + writeln!(f, " element: {}", self.element)?; + writeln!( + f, + " path: [{}]", + self.path + .iter() + .map(|p| hex_to_ascii(p)) + .collect::>() + .join(", ") + )?; + writeln!( + f, + " subquery_path: {}", + self.subquery_path + .as_ref() + .map_or("None".to_string(), |p| format!( + "[{}]", + p.iter() + .map(|e| hex_to_ascii(e.as_slice())) + .collect::>() + .join(", ") + )) + )?; + writeln!( + f, + " subquery: {}", + self.subquery + .as_ref() + .map_or("None".to_string(), |q| format!("\n{}", format_query(q, 4))) + )?; + writeln!(f, " left_to_right: {}", self.left_to_right)?; + writeln!(f, " query_options: {}", self.query_options)?; + writeln!(f, " result_type: {}", self.result_type)?; + writeln!( + f, + " results: [{}]", + self.results + .iter() + .map(|r| format!("{}", r)) + .collect::>() + .join(", ") + )?; + writeln!(f, " limit: {:?}", self.limit)?; + writeln!(f, " offset: {:?}", self.offset)?; + write!(f, "}}") + } +} + impl Element { #[cfg(feature = "full")] /// Returns a vector of result elements based on given query @@ -88,17 +233,25 @@ impl Element { storage: &RocksDbStorage, merk_path: &[&[u8]], query: &Query, + query_options: QueryOptions, result_type: QueryResultType, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "insert_subtree_into_batch_operations", + grove_version.grovedb_versions.element.get_query + ); + let sized_query = SizedQuery::new(query.clone(), None, None); Element::get_sized_query( storage, merk_path, &sized_query, - true, + query_options, result_type, transaction, + grove_version, ) .map_ok(|(elements, _)| elements) } @@ -109,14 +262,23 @@ impl Element { storage: &RocksDbStorage, merk_path: &[&[u8]], query: &Query, + query_options: QueryOptions, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "get_query_values", + grove_version.grovedb_versions.element.get_query_values + ); + Element::get_query( storage, merk_path, query, + query_options, QueryElementResultType, transaction, + grove_version, ) .flat_map_ok(|result_items| { let elements: Vec = result_items @@ -139,12 +301,20 @@ impl Element { storage: &RocksDbStorage, path: &[&[u8]], sized_query: &SizedQuery, - allow_get_raw: bool, - allow_cache: bool, + query_options: QueryOptions, result_type: QueryResultType, transaction: TransactionArg, - add_element_function: fn(PathQueryPushArgs) -> CostResult<(), Error>, + add_element_function: fn(PathQueryPushArgs, &GroveVersion) -> CostResult<(), Error>, + grove_version: &GroveVersion, ) -> CostResult<(QueryResultElements, u16), Error> { + check_grovedb_v0_with_cost!( + "get_query_apply_function", + grove_version + .grovedb_versions + .element + .get_query_apply_function + ); + let mut cost = OperationCost::default(); let mut results = Vec::new(); @@ -166,10 +336,10 @@ impl Element { transaction, &mut limit, &mut offset, - allow_get_raw, - allow_cache, + query_options, result_type, add_element_function, + grove_version, ) ); if limit == Some(0) { @@ -189,10 +359,10 @@ impl Element { transaction, &mut limit, &mut offset, - allow_get_raw, - allow_cache, + query_options, result_type, add_element_function, + grove_version, ) ); if limit == Some(0) { @@ -215,37 +385,16 @@ impl Element { pub fn get_path_query( storage: &RocksDbStorage, path_query: &PathQuery, - allow_cache: bool, + query_options: QueryOptions, result_type: QueryResultType, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(QueryResultElements, u16), Error> { - let path_slices = path_query - .path - .iter() - .map(|x| x.as_slice()) - .collect::>(); - Element::get_query_apply_function( - storage, - path_slices.as_slice(), - &path_query.query, - false, - allow_cache, - result_type, - transaction, - Element::path_query_push, - ) - } + check_grovedb_v0_with_cost!( + "get_path_query", + grove_version.grovedb_versions.element.get_path_query + ); - #[cfg(feature = "full")] - /// Returns a vector of elements including trees, and the number of skipped - /// elements - pub fn get_raw_path_query( - storage: &RocksDbStorage, - path_query: &PathQuery, - allow_cache: bool, - result_type: QueryResultType, - transaction: TransactionArg, - ) -> CostResult<(QueryResultElements, u16), Error> { let path_slices = path_query .path .iter() @@ -255,11 +404,11 @@ impl Element { storage, path_slices.as_slice(), &path_query.query, - true, - allow_cache, + query_options, result_type, transaction, Element::path_query_push, + grove_version, ) } @@ -269,25 +418,41 @@ impl Element { storage: &RocksDbStorage, path: &[&[u8]], sized_query: &SizedQuery, - allow_cache: bool, + query_options: QueryOptions, result_type: QueryResultType, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(QueryResultElements, u16), Error> { + check_grovedb_v0_with_cost!( + "get_sized_query", + grove_version.grovedb_versions.element.get_sized_query + ); + Element::get_query_apply_function( storage, path, sized_query, - false, - allow_cache, + query_options, result_type, transaction, Element::path_query_push, + grove_version, ) } #[cfg(feature = "full")] /// Push arguments to path query - fn path_query_push(args: PathQueryPushArgs) -> CostResult<(), Error> { + fn path_query_push( + args: PathQueryPushArgs, + grove_version: &GroveVersion, + ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "path_query_push", + grove_version.grovedb_versions.element.path_query_push + ); + + // println!("path_query_push {} \n", args); + let mut cost = OperationCost::default(); let PathQueryPushArgs { @@ -299,14 +464,19 @@ impl Element { subquery_path, subquery, left_to_right, - allow_get_raw, - allow_cache, + query_options, result_type, results, limit, offset, } = args; - if element.is_tree() { + let QueryOptions { + allow_get_raw, + allow_cache, + decrease_limit_on_range_with_no_sub_elements, + .. + } = query_options; + if element.is_any_tree() { let mut path_vec = path.to_vec(); let key = cost_return_on_error_no_add!( &cost, @@ -330,14 +500,20 @@ impl Element { Element::get_path_query( storage, &inner_path_query, - allow_cache, + query_options, result_type, - transaction + transaction, + grove_version, ) ); if let Some(limit) = limit { - *limit = limit.saturating_sub(sub_elements.len() as u16); + if sub_elements.is_empty() && decrease_limit_on_range_with_no_sub_elements { + // we should decrease by 1 in this case + *limit = limit.saturating_sub(1); + } else { + *limit = limit.saturating_sub(sub_elements.len() as u16); + } } if let Some(offset) = offset { *offset = offset.saturating_sub(skipped); @@ -361,6 +537,7 @@ impl Element { None, transaction, subtree, + grove_version, { results.push(QueryResultElement::ElementResultItem( cost_return_on_error!( @@ -370,6 +547,7 @@ impl Element { path_vec.as_slice(), subquery_path_last_key.as_slice(), allow_cache, + grove_version, ) ), )); @@ -384,6 +562,7 @@ impl Element { None, transaction, subtree, + grove_version, { results.push(QueryResultElement::KeyElementPairResultItem( ( @@ -395,6 +574,7 @@ impl Element { path_vec.as_slice(), subquery_path_last_key.as_slice(), allow_cache, + grove_version, ) ), ), @@ -410,6 +590,7 @@ impl Element { None, transaction, subtree, + grove_version, { results.push( QueryResultElement::PathKeyElementTrioResultItem(( @@ -422,6 +603,7 @@ impl Element { path_vec.as_slice(), subquery_path_last_key.as_slice(), allow_cache, + grove_version, ) ), )), @@ -446,22 +628,24 @@ impl Element { } else if allow_get_raw { cost_return_on_error_no_add!( &cost, - Element::basic_push(PathQueryPushArgs { - storage, - transaction, - key: Some(key), - element, - path, - subquery_path, - subquery, - left_to_right, - allow_get_raw, - allow_cache, - result_type, - results, - limit, - offset, - }) + Element::basic_push( + PathQueryPushArgs { + storage, + transaction, + key: Some(key), + element, + path, + subquery_path, + subquery, + left_to_right, + query_options, + result_type, + results, + limit, + offset, + }, + grove_version + ) ); } else { return Err(Error::InvalidPath( @@ -474,22 +658,24 @@ impl Element { } else { cost_return_on_error_no_add!( &cost, - Element::basic_push(PathQueryPushArgs { - storage, - transaction, - key, - element, - path, - subquery_path, - subquery, - left_to_right, - allow_get_raw, - allow_cache, - result_type, - results, - limit, - offset, - }) + Element::basic_push( + PathQueryPushArgs { + storage, + transaction, + key, + element, + path, + subquery_path, + subquery, + left_to_right, + query_options, + result_type, + results, + limit, + offset, + }, + grove_version + ) ); } Ok(()).wrap_with_cost(cost) @@ -498,7 +684,7 @@ impl Element { #[cfg(any(feature = "full", feature = "verify"))] /// Takes a sized query and a key and returns subquery key and subquery as /// tuple - pub fn subquery_paths_and_value_for_sized_query( + fn subquery_paths_and_value_for_sized_query( sized_query: &SizedQuery, key: &[u8], ) -> (Option, Option) { @@ -530,6 +716,12 @@ impl Element { (subquery_path, subquery) } + /// `decrease_limit_on_range_with_no_sub_elements` should generally be set + /// to true, as having it false could mean very expensive queries. + /// The queries would be expensive because we could go through many many + /// trees where the sub elements have no matches, hence the limit would + /// not decrease and hence we would continue on the increasingly + /// expensive query. #[cfg(feature = "full")] // TODO: refactor fn query_item( @@ -541,11 +733,16 @@ impl Element { transaction: TransactionArg, limit: &mut Option, offset: &mut Option, - allow_get_raw: bool, - allow_cache: bool, + query_options: QueryOptions, result_type: QueryResultType, - add_element_function: fn(PathQueryPushArgs) -> CostResult<(), Error>, + add_element_function: fn(PathQueryPushArgs, &GroveVersion) -> CostResult<(), Error>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "query_item", + grove_version.grovedb_versions.element.query_item + ); + let mut cost = OperationCost::default(); let subtree_path: SubtreePath<_> = path.into(); @@ -553,43 +750,71 @@ impl Element { if !item.is_range() { // this is a query on a key if let QueryItem::Key(key) = item { - let element_res = merk_optional_tx!( + let element_res = merk_optional_tx_internal_error!( &mut cost, storage, subtree_path, None, transaction, subtree, - { Element::get(&subtree, key, allow_cache).unwrap_add_cost(&mut cost) } + grove_version, + { + Element::get(&subtree, key, query_options.allow_cache, grove_version) + .unwrap_add_cost(&mut cost) + } ); match element_res { Ok(element) => { let (subquery_path, subquery) = Self::subquery_paths_and_value_for_sized_query(sized_query, key); - add_element_function(PathQueryPushArgs { - storage, - transaction, - key: Some(key.as_slice()), - element, - path, - subquery_path, - subquery, - left_to_right: sized_query.query.left_to_right, - allow_get_raw, - allow_cache, - result_type, - results, - limit, - offset, - }) + match add_element_function( + PathQueryPushArgs { + storage, + transaction, + key: Some(key.as_slice()), + element, + path, + subquery_path, + subquery, + left_to_right: sized_query.query.left_to_right, + query_options, + result_type, + results, + limit, + offset, + }, + grove_version, + ) .unwrap_add_cost(&mut cost) + { + Ok(_) => Ok(()), + Err(e) => { + if !query_options.error_if_intermediate_path_tree_not_present { + match e { + Error::PathParentLayerNotFound(_) => Ok(()), + _ => Err(e), + } + } else { + Err(e) + } + } + } } Err(Error::PathKeyNotFound(_)) => Ok(()), - Err(e) => Err(e), + Err(e) => { + if !query_options.error_if_intermediate_path_tree_not_present { + match e { + Error::PathParentLayerNotFound(_) => Ok(()), + _ => Err(e), + } + } else { + Err(e) + } + } } } else { Err(Error::InternalError( - "QueryItem must be a Key if not a range", + "QueryItem must be a Key if not a range".to_string(), )) } } else { @@ -610,7 +835,8 @@ impl Element { raw_decode( iter.value() .unwrap_add_cost(&mut cost) - .expect("if key exists then value should too") + .expect("if key exists then value should too"), + grove_version ) ); let key = iter @@ -619,9 +845,8 @@ impl Element { .expect("key should exist"); let (subquery_path, subquery) = Self::subquery_paths_and_value_for_sized_query(sized_query, key); - cost_return_on_error!( - &mut cost, - add_element_function(PathQueryPushArgs { + let result_with_cost = add_element_function( + PathQueryPushArgs { storage, transaction, key: Some(key), @@ -630,14 +855,29 @@ impl Element { subquery_path, subquery, left_to_right: sized_query.query.left_to_right, - allow_get_raw, - allow_cache, + query_options, result_type, results, limit, offset, - }) + }, + grove_version, ); + let result = result_with_cost.unwrap_add_cost(&mut cost); + match result { + Ok(x) => x, + Err(e) => { + if !query_options.error_if_intermediate_path_tree_not_present { + match e { + Error::PathKeyNotFound(_) + | Error::PathParentLayerNotFound(_) => (), + _ => return Err(e).wrap_with_cost(cost), + } + } else { + return Err(e).wrap_with_cost(cost); + } + } + } if sized_query.query.left_to_right { iter.next().unwrap_add_cost(&mut cost); } else { @@ -652,7 +892,13 @@ impl Element { } #[cfg(feature = "full")] - fn basic_push(args: PathQueryPushArgs) -> Result<(), Error> { + fn basic_push(args: PathQueryPushArgs, grove_version: &GroveVersion) -> Result<(), Error> { + check_grovedb_v0!( + "basic_push", + grove_version.grovedb_versions.element.basic_push + ); + + // println!("basic_push {}", args); let PathQueryPushArgs { path, key, @@ -672,14 +918,18 @@ impl Element { results.push(QueryResultElement::ElementResultItem(element)); } QueryResultType::QueryKeyElementPairResultType => { - let key = key.ok_or(Error::CorruptedPath("basic push must have a key"))?; + let key = key.ok_or(Error::CorruptedPath( + "basic push must have a key".to_string(), + ))?; results.push(QueryResultElement::KeyElementPairResultItem(( Vec::from(key), element, ))); } QueryResultType::QueryPathKeyElementTrioResultType => { - let key = key.ok_or(Error::CorruptedPath("basic push must have a key"))?; + let key = key.ok_or(Error::CorruptedPath( + "basic push must have a key".to_string(), + ))?; let path = path.iter().map(|a| a.to_vec()).collect(); results.push(QueryResultElement::PathKeyElementTrioResultItem(( path, @@ -711,9 +961,10 @@ impl Element { mod tests { use grovedb_merk::proofs::Query; use grovedb_storage::{Storage, StorageBatch}; + use grovedb_version::version::GroveVersion; use crate::{ - element::*, + element::{query::QueryOptions, *}, query_result_type::{ KeyElementPair, QueryResultElement, QueryResultElements, QueryResultType::{QueryKeyElementPairResultType, QueryPathKeyElementTrioResultType}, @@ -724,7 +975,8 @@ mod tests { #[test] fn test_get_query() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -732,6 +984,7 @@ mod tests { Element::new_item(b"ayyd".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -741,6 +994,7 @@ mod tests { Element::new_item(b"ayyc".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -750,6 +1004,7 @@ mod tests { Element::new_item(b"ayya".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -759,6 +1014,7 @@ mod tests { Element::new_item(b"ayyb".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -769,9 +1025,16 @@ mod tests { query.insert_key(b"a".to_vec()); assert_eq!( - Element::get_query_values(&db.db, &[TEST_LEAF], &query, None) - .unwrap() - .expect("expected successful get_query"), + Element::get_query_values( + &db.db, + &[TEST_LEAF], + &query, + QueryOptions::default(), + None, + grove_version + ) + .unwrap() + .expect("expected successful get_query"), vec![ Element::new_item(b"ayya".to_vec()), Element::new_item(b"ayyc".to_vec()) @@ -783,9 +1046,16 @@ mod tests { query.insert_range(b"b".to_vec()..b"d".to_vec()); query.insert_range(b"a".to_vec()..b"c".to_vec()); assert_eq!( - Element::get_query_values(&db.db, &[TEST_LEAF], &query, None) - .unwrap() - .expect("expected successful get_query"), + Element::get_query_values( + &db.db, + &[TEST_LEAF], + &query, + QueryOptions::default(), + None, + grove_version + ) + .unwrap() + .expect("expected successful get_query"), vec![ Element::new_item(b"ayya".to_vec()), Element::new_item(b"ayyb".to_vec()), @@ -798,9 +1068,16 @@ mod tests { query.insert_range_inclusive(b"b".to_vec()..=b"d".to_vec()); query.insert_range(b"b".to_vec()..b"c".to_vec()); assert_eq!( - Element::get_query_values(&db.db, &[TEST_LEAF], &query, None) - .unwrap() - .expect("expected successful get_query"), + Element::get_query_values( + &db.db, + &[TEST_LEAF], + &query, + QueryOptions::default(), + None, + grove_version + ) + .unwrap() + .expect("expected successful get_query"), vec![ Element::new_item(b"ayyb".to_vec()), Element::new_item(b"ayyc".to_vec()), @@ -814,9 +1091,16 @@ mod tests { query.insert_range(b"b".to_vec()..b"d".to_vec()); query.insert_range(b"a".to_vec()..b"c".to_vec()); assert_eq!( - Element::get_query_values(&db.db, &[TEST_LEAF], &query, None) - .unwrap() - .expect("expected successful get_query"), + Element::get_query_values( + &db.db, + &[TEST_LEAF], + &query, + QueryOptions::default(), + None, + grove_version + ) + .unwrap() + .expect("expected successful get_query"), vec![ Element::new_item(b"ayya".to_vec()), Element::new_item(b"ayyb".to_vec()), @@ -827,7 +1111,8 @@ mod tests { #[test] fn test_get_query_with_path() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -835,6 +1120,7 @@ mod tests { Element::new_item(b"ayyd".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -844,6 +1130,7 @@ mod tests { Element::new_item(b"ayyc".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -853,6 +1140,7 @@ mod tests { Element::new_item(b"ayya".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -862,6 +1150,7 @@ mod tests { Element::new_item(b"ayyb".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -875,8 +1164,10 @@ mod tests { &db.db, &[TEST_LEAF], &query, + QueryOptions::default(), QueryPathKeyElementTrioResultType, - None + None, + grove_version ) .unwrap() .expect("expected successful get_query") @@ -898,29 +1189,34 @@ mod tests { #[test] fn test_get_range_query() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let batch = StorageBatch::new(); let storage = &db.db; let mut merk = db - .open_non_transactional_merk_at_path([TEST_LEAF].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("cannot open Merk"); // TODO implement costs Element::new_item(b"ayyd".to_vec()) - .insert(&mut merk, b"d", None) + .insert(&mut merk, b"d", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"ayyc".to_vec()) - .insert(&mut merk, b"c", None) + .insert(&mut merk, b"c", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"ayya".to_vec()) - .insert(&mut merk, b"a", None) + .insert(&mut merk, b"a", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"ayyb".to_vec()) - .insert(&mut merk, b"b", None) + .insert(&mut merk, b"b", None, grove_version) .unwrap() .expect("expected successful insertion"); @@ -938,9 +1234,10 @@ mod tests { storage, &[TEST_LEAF], &ascending_query, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -972,9 +1269,10 @@ mod tests { storage, &[TEST_LEAF], &backwards_query, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1002,30 +1300,35 @@ mod tests { #[test] fn test_get_range_inclusive_query() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let batch = StorageBatch::new(); let storage = &db.db; let mut merk = db - .open_non_transactional_merk_at_path([TEST_LEAF].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("cannot open Merk"); Element::new_item(b"ayyd".to_vec()) - .insert(&mut merk, b"d", None) + .insert(&mut merk, b"d", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"ayyc".to_vec()) - .insert(&mut merk, b"c", None) + .insert(&mut merk, b"c", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"ayya".to_vec()) - .insert(&mut merk, b"a", None) + .insert(&mut merk, b"a", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"ayyb".to_vec()) - .insert(&mut merk, b"b", None) + .insert(&mut merk, b"b", None, grove_version) .unwrap() .expect("expected successful insertion"); @@ -1061,9 +1364,10 @@ mod tests { storage, &[TEST_LEAF], &ascending_query, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"), @@ -1078,9 +1382,10 @@ mod tests { storage, &[TEST_LEAF], &backwards_query, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"), @@ -1098,9 +1403,10 @@ mod tests { storage, &[TEST_LEAF], &backwards_query, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"), @@ -1110,7 +1416,8 @@ mod tests { #[test] fn test_get_limit_query() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -1118,6 +1425,7 @@ mod tests { Element::new_item(b"ayyd".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -1127,6 +1435,7 @@ mod tests { Element::new_item(b"ayyc".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -1136,6 +1445,7 @@ mod tests { Element::new_item(b"ayya".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -1145,6 +1455,7 @@ mod tests { Element::new_item(b"ayyb".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -1160,9 +1471,10 @@ mod tests { &db.db, &[TEST_LEAF], &backwards_query, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1186,9 +1498,10 @@ mod tests { &db.db, &[TEST_LEAF], &backwards_query, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1207,9 +1520,10 @@ mod tests { &db.db, &[TEST_LEAF], &limit_query, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1228,9 +1542,10 @@ mod tests { &db.db, &[TEST_LEAF], &limit_query, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1248,9 +1563,10 @@ mod tests { &db.db, &[TEST_LEAF], &limit_offset_query, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1273,9 +1589,10 @@ mod tests { &db.db, &[TEST_LEAF], &limit_offset_backwards_query, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1297,9 +1614,10 @@ mod tests { &db.db, &[TEST_LEAF], &limit_full_query, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1322,9 +1640,10 @@ mod tests { &db.db, &[TEST_LEAF], &limit_offset_backwards_query, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1347,9 +1666,10 @@ mod tests { &db.db, &[TEST_LEAF], &limit_backwards_query, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1375,7 +1695,10 @@ impl ElementsIterator { ElementsIterator { raw_iter } } - pub fn next_element(&mut self) -> CostResult, Error> { + pub fn next_element( + &mut self, + grove_version: &GroveVersion, + ) -> CostResult, Error> { let mut cost = OperationCost::default(); Ok(if self.raw_iter.valid().unwrap_add_cost(&mut cost) { @@ -1385,7 +1708,7 @@ impl ElementsIterator { .unwrap_add_cost(&mut cost) .zip(self.raw_iter.value().unwrap_add_cost(&mut cost)) { - let element = cost_return_on_error_no_add!(&cost, raw_decode(value)); + let element = cost_return_on_error_no_add!(&cost, raw_decode(value, grove_version)); let key_vec = key.to_vec(); self.raw_iter.next().unwrap_add_cost(&mut cost); Some((key_vec, element)) diff --git a/grovedb/src/element/serialize.rs b/grovedb/src/element/serialize.rs index 730881d21..395fea8dd 100644 --- a/grovedb/src/element/serialize.rs +++ b/grovedb/src/element/serialize.rs @@ -1,36 +1,8 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Serialize //! Implements serialization functions in Element -#[cfg(any(feature = "full", feature = "verify"))] -use bincode::Options; +use bincode::config; +use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; #[cfg(any(feature = "full", feature = "verify"))] use crate::{Element, Error}; @@ -38,32 +10,38 @@ use crate::{Element, Error}; impl Element { #[cfg(feature = "full")] /// Serializes self. Returns vector of u8s. - pub fn serialize(&self) -> Result, Error> { - bincode::DefaultOptions::default() - .with_varint_encoding() - .reject_trailing_bytes() - .serialize(self) - .map_err(|_| Error::CorruptedData(String::from("unable to serialize element"))) + pub fn serialize(&self, grove_version: &GroveVersion) -> Result, Error> { + check_grovedb_v0!( + "Element::serialize", + grove_version.grovedb_versions.element.serialize + ); + let config = config::standard().with_big_endian().with_no_limit(); + bincode::encode_to_vec(self, config) + .map_err(|e| Error::CorruptedData(format!("unable to serialize element {}", e))) } #[cfg(feature = "full")] /// Serializes self. Returns usize. - pub fn serialized_size(&self) -> usize { - bincode::DefaultOptions::default() - .with_varint_encoding() - .reject_trailing_bytes() - .serialized_size(self) - .unwrap() as usize // this should not be able to error + pub fn serialized_size(&self, grove_version: &GroveVersion) -> Result { + check_grovedb_v0!( + "Element::serialized_size", + grove_version.grovedb_versions.element.serialized_size + ); + self.serialize(grove_version) + .map(|serialized| serialized.len()) } #[cfg(any(feature = "full", feature = "verify"))] /// Deserializes given bytes and sets as self - pub fn deserialize(bytes: &[u8]) -> Result { - bincode::DefaultOptions::default() - .with_varint_encoding() - .reject_trailing_bytes() - .deserialize(bytes) - .map_err(|_| Error::CorruptedData(String::from("unable to deserialize element"))) + pub fn deserialize(bytes: &[u8], grove_version: &GroveVersion) -> Result { + check_grovedb_v0!( + "Element::deserialize", + grove_version.grovedb_versions.element.deserialize + ); + let config = config::standard().with_big_endian().with_no_limit(); + Ok(bincode::decode_from_slice(bytes, config) + .map_err(|e| Error::CorruptedData(format!("unable to deserialize element {}", e)))? + .0) } } @@ -77,32 +55,53 @@ mod tests { #[test] fn test_serialization() { + let grove_version = GroveVersion::latest(); let empty_tree = Element::empty_tree(); - let serialized = empty_tree.serialize().expect("expected to serialize"); + let serialized = empty_tree + .serialize(grove_version) + .expect("expected to serialize"); assert_eq!(serialized.len(), 3); - assert_eq!(serialized.len(), empty_tree.serialized_size()); + assert_eq!( + serialized.len(), + empty_tree.serialized_size(grove_version).unwrap() + ); // The tree is fixed length 32 bytes, so it's enum 2 then 32 bytes of zeroes assert_eq!(hex::encode(serialized), "020000"); let empty_tree = Element::new_tree_with_flags(None, Some(vec![5])); - let serialized = empty_tree.serialize().expect("expected to serialize"); + let serialized = empty_tree + .serialize(grove_version) + .expect("expected to serialize"); assert_eq!(serialized.len(), 5); - assert_eq!(serialized.len(), empty_tree.serialized_size()); + assert_eq!( + serialized.len(), + empty_tree.serialized_size(grove_version).unwrap() + ); assert_eq!(hex::encode(serialized), "0200010105"); let item = Element::new_item(hex::decode("abcdef").expect("expected to decode")); - let serialized = item.serialize().expect("expected to serialize"); + let serialized = item + .serialize(grove_version) + .expect("expected to serialize"); assert_eq!(serialized.len(), 6); - assert_eq!(serialized.len(), item.serialized_size()); + assert_eq!( + serialized.len(), + item.serialized_size(grove_version).unwrap() + ); // The item is variable length 3 bytes, so it's enum 2 then 32 bytes of zeroes assert_eq!(hex::encode(serialized), "0003abcdef00"); assert_eq!(hex::encode(5.encode_var_vec()), "0a"); let item = Element::new_sum_item(5); - let serialized = item.serialize().expect("expected to serialize"); + let serialized = item + .serialize(grove_version) + .expect("expected to serialize"); assert_eq!(serialized.len(), 3); - assert_eq!(serialized.len(), item.serialized_size()); + assert_eq!( + serialized.len(), + item.serialized_size(grove_version).unwrap() + ); // The item is variable length 3 bytes, so it's enum 2 then 32 bytes of zeroes assert_eq!(hex::encode(serialized), "030a00"); @@ -110,9 +109,14 @@ mod tests { hex::decode("abcdef").expect("expected to decode"), Some(vec![1]), ); - let serialized = item.serialize().expect("expected to serialize"); + let serialized = item + .serialize(grove_version) + .expect("expected to serialize"); assert_eq!(serialized.len(), 8); - assert_eq!(serialized.len(), item.serialized_size()); + assert_eq!( + serialized.len(), + item.serialized_size(grove_version).unwrap() + ); assert_eq!(hex::encode(serialized), "0003abcdef010101"); let reference = Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ @@ -120,9 +124,14 @@ mod tests { hex::decode("abcd").expect("expected to decode"), vec![5], ])); - let serialized = reference.serialize().expect("expected to serialize"); + let serialized = reference + .serialize(grove_version) + .expect("expected to serialize"); assert_eq!(serialized.len(), 12); - assert_eq!(serialized.len(), reference.serialized_size()); + assert_eq!( + serialized.len(), + reference.serialized_size(grove_version).unwrap() + ); // The item is variable length 2 bytes, so it's enum 1 then 1 byte for length, // then 1 byte for 0, then 1 byte 02 for abcd, then 1 byte '1' for 05 assert_eq!(hex::encode(serialized), "010003010002abcd01050000"); @@ -135,9 +144,14 @@ mod tests { ]), Some(vec![1, 2, 3]), ); - let serialized = reference.serialize().expect("expected to serialize"); + let serialized = reference + .serialize(grove_version) + .expect("expected to serialize"); assert_eq!(serialized.len(), 16); - assert_eq!(serialized.len(), reference.serialized_size()); + assert_eq!( + serialized.len(), + reference.serialized_size(grove_version).unwrap() + ); assert_eq!(hex::encode(serialized), "010003010002abcd0105000103010203"); } } diff --git a/grovedb/src/error.rs b/grovedb/src/error.rs index d2db936f2..0f6cd5d15 100644 --- a/grovedb/src/error.rs +++ b/grovedb/src/error.rs @@ -1,37 +1,14 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! GroveDB Errors +use std::convert::Infallible; + /// GroveDB Errors #[cfg(any(feature = "full", feature = "verify"))] #[derive(Debug, thiserror::Error)] pub enum Error { + #[error("infallible")] + /// This error can not happen, used for generics + Infallible, // Input data errors #[error("cyclic reference path")] /// Cyclic reference @@ -44,10 +21,10 @@ pub enum Error { MissingReference(String), #[error("internal error: {0}")] /// Internal error - InternalError(&'static str), + InternalError(String), #[error("invalid proof: {0}")] /// Invalid proof - InvalidProof(&'static str), + InvalidProof(String), #[error("invalid input: {0}")] /// Invalid input InvalidInput(&'static str), @@ -90,7 +67,7 @@ pub enum Error { /// The corrupted path represents a consistency error in internal groveDB /// logic #[error("corrupted path: {0}")] - CorruptedPath(&'static str), + CorruptedPath(String), // Query errors #[error("invalid query: {0}")] @@ -113,6 +90,10 @@ pub enum Error { /// Corrupted data CorruptedData(String), + #[error("data storage error: {0}")] + /// Corrupted storage + CorruptedStorage(String), + #[error("invalid code execution error: {0}")] /// Invalid code execution InvalidCodeExecution(&'static str), @@ -132,18 +113,22 @@ pub enum Error { /// Deleting non empty tree DeletingNonEmptyTree(&'static str), + #[error("clearing tree with subtrees not allowed error: {0}")] + /// Clearing tree with subtrees not allowed + ClearingTreeWithSubtreesNotAllowed(&'static str), + // Client allowed errors #[error("just in time element flags client error: {0}")] /// Just in time element flags client error - JustInTimeElementFlagsClientError(&'static str), + JustInTimeElementFlagsClientError(String), #[error("split removal bytes client error: {0}")] /// Split removal bytes client error - SplitRemovalBytesClientError(&'static str), + SplitRemovalBytesClientError(String), #[error("client returned non client error: {0}")] /// Client returned non client error - ClientReturnedNonClientError(&'static str), + ClientReturnedNonClientError(String), #[error("override not allowed error: {0}")] /// Override not allowed @@ -156,10 +141,33 @@ pub enum Error { // Support errors #[error("not supported: {0}")] /// Not supported - NotSupported(&'static str), + NotSupported(String), // Merk errors #[error("merk error: {0}")] /// Merk error MerkError(grovedb_merk::error::Error), + + // Version errors + #[error(transparent)] + /// Version error + VersionError(grovedb_version::error::GroveVersionError), +} + +impl From for Error { + fn from(_value: Infallible) -> Self { + Self::Infallible + } +} + +impl From for Error { + fn from(value: grovedb_merk::Error) -> Self { + Error::MerkError(value) + } +} + +impl From for Error { + fn from(value: grovedb_version::error::GroveVersionError) -> Self { + Error::VersionError(value) + } } diff --git a/grovedb/src/estimated_costs/average_case_costs.rs b/grovedb/src/estimated_costs/average_case_costs.rs index 919d4d5a0..32d5a315f 100644 --- a/grovedb/src/estimated_costs/average_case_costs.rs +++ b/grovedb/src/estimated_costs/average_case_costs.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Average case costs //! Implements average case cost functions in GroveDb @@ -40,10 +12,13 @@ use grovedb_merk::{ add_average_case_merk_replace_layered, EstimatedLayerInformation, }, }, - tree::Tree, + tree::TreeNode, HASH_LENGTH, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; +use grovedb_version::{ + check_grovedb_v0, check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; use integer_encoding::VarInt; use crate::{ @@ -59,7 +34,17 @@ impl GroveDb { path: &KeyInfoPath, merk_should_be_empty: bool, is_sum_tree: bool, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_average_case_get_merk_at_path", + grove_version + .grovedb_versions + .operations + .average_case + .add_average_case_get_merk_at_path + ); + cost.seek_count += 1; // If the merk is not empty we load the tree if !merk_should_be_empty { @@ -68,7 +53,7 @@ impl GroveDb { match path.last() { None => {} Some(key) => { - cost.storage_loaded_bytes += Tree::average_case_encoded_tree_size( + cost.storage_loaded_bytes += TreeNode::average_case_encoded_tree_size( key.max_length() as u32, HASH_LENGTH as u32, is_sum_tree, @@ -76,6 +61,8 @@ impl GroveDb { } } *cost += S::get_storage_context_cost(path.as_vec()); + + Ok(()) } /// Add average case for insertion into merk @@ -84,7 +71,17 @@ impl GroveDb { estimated_layer_information: &EstimatedLayerInformation, _is_sum_tree: bool, propagate: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "average_case_merk_replace_tree", + grove_version + .grovedb_versions + .operations + .average_case + .average_case_merk_replace_tree + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let flags_size = cost_return_on_error_no_add!( @@ -124,7 +121,17 @@ impl GroveDb { is_sum_tree: bool, in_tree_using_sums: bool, propagate_if_input: Option<&EstimatedLayerInformation>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "average_case_merk_insert_tree", + grove_version + .grovedb_versions + .operations + .average_case + .average_case_merk_insert_tree + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let flags_len = flags.as_ref().map_or(0, |flags| { @@ -152,7 +159,17 @@ impl GroveDb { is_sum_tree: bool, estimated_layer_information: &EstimatedLayerInformation, propagate: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "average_case_merk_delete_tree", + grove_version + .grovedb_versions + .operations + .average_case + .average_case_merk_delete_tree + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let flags_size = cost_return_on_error_no_add!( @@ -188,7 +205,17 @@ impl GroveDb { value: &Element, in_tree_using_sums: bool, propagate_for_level: Option<&EstimatedLayerInformation>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "average_case_merk_insert_element", + grove_version + .grovedb_versions + .operations + .average_case + .average_case_merk_insert_element + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; match value { @@ -208,7 +235,7 @@ impl GroveDb { _ => add_cost_case_merk_insert( &mut cost, key_len, - value.serialized_size() as u32, + cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, in_tree_using_sums, ), }; @@ -228,7 +255,17 @@ impl GroveDb { value: &Element, in_tree_using_sums: bool, propagate_for_level: Option<&EstimatedLayerInformation>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "average_case_merk_replace_element", + grove_version + .grovedb_versions + .operations + .average_case + .average_case_merk_replace_element + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; match value { @@ -259,7 +296,7 @@ impl GroveDb { let sum_item_cost_size = if value.is_sum_item() { SUM_ITEM_COST_SIZE } else { - value.serialized_size() as u32 + cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32 }; let value_len = sum_item_cost_size + flags_len; add_cost_case_merk_replace_same_size( @@ -272,7 +309,7 @@ impl GroveDb { _ => add_cost_case_merk_replace_same_size( &mut cost, key_len, - value.serialized_size() as u32, + cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, in_tree_using_sums, ), }; @@ -293,7 +330,17 @@ impl GroveDb { change_in_bytes: i32, in_tree_using_sums: bool, propagate_for_level: Option<&EstimatedLayerInformation>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "average_case_merk_patch_element", + grove_version + .grovedb_versions + .operations + .average_case + .average_case_merk_patch_element + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; match value { @@ -303,7 +350,9 @@ impl GroveDb { flags_len + flags_len.required_space() as u32 }); // Items need to be always the same serialized size for this to work - let item_cost_size = value.serialized_size() as u32; + let item_cost_size = + cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) + as u32; let value_len = item_cost_size + flags_len; add_cost_case_merk_patch( &mut cost, @@ -331,7 +380,17 @@ impl GroveDb { key: &KeyInfo, estimated_layer_information: &EstimatedLayerInformation, propagate: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "average_case_merk_delete_element", + grove_version + .grovedb_versions + .operations + .average_case + .average_case_merk_delete_element + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let value_size = cost_return_on_error_no_add!( @@ -358,8 +417,18 @@ impl GroveDb { key: &KeyInfo, estimated_element_size: u32, in_parent_tree_using_sums: bool, - ) { - let value_size = Tree::average_case_encoded_tree_size( + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_average_case_has_raw_cost", + grove_version + .grovedb_versions + .operations + .average_case + .add_average_case_has_raw_cost + ); + + let value_size = TreeNode::average_case_encoded_tree_size( key.max_length() as u32, estimated_element_size, in_parent_tree_using_sums, @@ -367,6 +436,7 @@ impl GroveDb { cost.seek_count += 1; cost.storage_loaded_bytes += value_size; *cost += S::get_storage_context_cost(path.as_vec()); + Ok(()) } /// Adds the average case of checking to see if a tree exists @@ -377,7 +447,17 @@ impl GroveDb { estimated_flags_size: u32, is_sum_tree: bool, in_parent_tree_using_sums: bool, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_average_case_has_raw_tree_cost", + grove_version + .grovedb_versions + .operations + .average_case + .add_average_case_has_raw_tree_cost + ); + let estimated_element_size = if is_sum_tree { SUM_TREE_COST_SIZE + estimated_flags_size } else { @@ -389,7 +469,8 @@ impl GroveDb { key, estimated_element_size, in_parent_tree_using_sums, - ); + grove_version, + ) } /// Add average case to get raw cost into merk @@ -399,14 +480,25 @@ impl GroveDb { key: &KeyInfo, estimated_element_size: u32, in_parent_tree_using_sums: bool, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_average_case_get_raw_cost", + grove_version + .grovedb_versions + .operations + .average_case + .add_average_case_get_raw_cost + ); + cost.seek_count += 1; add_average_case_get_merk_node( cost, key.max_length() as u32, estimated_element_size, in_parent_tree_using_sums, - ); + ) + .map_err(Error::MerkError) } /// adds the average cost of getting a tree @@ -417,7 +509,17 @@ impl GroveDb { estimated_flags_size: u32, is_sum_tree: bool, in_parent_tree_using_sums: bool, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_average_case_get_raw_tree_cost", + grove_version + .grovedb_versions + .operations + .average_case + .add_average_case_get_raw_tree_cost + ); + let estimated_element_size = if is_sum_tree { SUM_TREE_COST_SIZE + estimated_flags_size } else { @@ -429,7 +531,8 @@ impl GroveDb { key.max_length() as u32, estimated_element_size, in_parent_tree_using_sums, - ); + ) + .map_err(Error::MerkError) } /// adds the average cost of getting an element knowing there can be @@ -441,9 +544,19 @@ impl GroveDb { in_parent_tree_using_sums: bool, estimated_element_size: u32, estimated_references_sizes: Vec, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_average_case_get_cost", + grove_version + .grovedb_versions + .operations + .average_case + .add_average_case_get_cost + ); + // todo: verify - let value_size: u32 = Tree::average_case_encoded_tree_size( + let value_size: u32 = TreeNode::average_case_encoded_tree_size( key.max_length() as u32, estimated_element_size, in_parent_tree_using_sums, @@ -451,6 +564,7 @@ impl GroveDb { cost.seek_count += 1 + estimated_references_sizes.len() as u16; cost.storage_loaded_bytes += value_size + estimated_references_sizes.iter().sum::(); *cost += S::get_storage_context_cost(path.as_vec()); + Ok(()) } } @@ -461,11 +575,12 @@ mod test { use grovedb_costs::OperationCost; use grovedb_merk::{ estimated_costs::average_case_costs::add_average_case_get_merk_node, - test_utils::make_batch_seq, Merk, + test_utils::make_batch_seq, tree::kv::ValueDefinedCostType, Merk, }; use grovedb_storage::{ rocksdb_storage::RocksDbStorage, worst_case_costs::WorstKeyLength, Storage, StorageBatch, }; + use grovedb_version::version::GroveVersion; use tempfile::TempDir; use crate::{ @@ -476,6 +591,7 @@ mod test { #[test] fn test_get_merk_node_average_case() { + let grove_version = GroveVersion::latest(); // Open a merk and insert 10 elements. let tmp_dir = TempDir::new().expect("cannot open tempdir"); let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) @@ -487,11 +603,13 @@ mod test { .get_storage_context(EMPTY_PATH, Some(&batch)) .unwrap(), false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); let merk_batch = make_batch_seq(1..10); - merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None) + merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None, grove_version) .unwrap() .unwrap(); @@ -505,6 +623,8 @@ mod test { let merk = Merk::open_base( storage.get_storage_context(EMPTY_PATH, None).unwrap(), false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); @@ -514,40 +634,75 @@ mod test { // 2. Left link exists // 3. Right link exists // Based on merk's avl rotation algorithm node is key 8 satisfies this - let node_result = merk.get(&8_u64.to_be_bytes(), true); + let node_result = merk.get( + &8_u64.to_be_bytes(), + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ); // By tweaking the max element size, we can adapt the average case function to // this scenario. make_batch_seq creates values that are 60 bytes in size // (this will be the max_element_size) let mut cost = OperationCost::default(); let key = KnownKey(8_u64.to_be_bytes().to_vec()); - add_average_case_get_merk_node(&mut cost, key.max_length() as u32, 60, false); + add_average_case_get_merk_node(&mut cost, key.max_length() as u32, 60, false) + .expect("expected to add cost"); assert_eq!(cost, node_result.cost); } #[test] fn test_has_raw_average_case() { + let grove_version = GroveVersion::latest(); let tmp_dir = TempDir::new().unwrap(); let db = GroveDb::open(tmp_dir.path()).unwrap(); // insert empty tree to start - db.insert(EMPTY_PATH, TEST_LEAF, Element::empty_tree(), None, None) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + TEST_LEAF, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); // In this tree, we insert 3 items with keys [1, 2, 3] // after tree rotation, 2 will be at the top hence would have both left and // right links this will serve as our average case candidate. let elem = Element::new_item(b"value".to_vec()); - db.insert([TEST_LEAF].as_ref(), &[1], elem.clone(), None, None) - .unwrap() - .expect("expected insert"); - db.insert([TEST_LEAF].as_ref(), &[2], elem.clone(), None, None) - .unwrap() - .expect("expected insert"); - db.insert([TEST_LEAF].as_ref(), &[3], elem.clone(), None, None) - .unwrap() - .expect("expected insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[1], + elem.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[2], + elem.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[3], + elem.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected insert"); let path = KeyInfoPath::from_vec(vec![KnownKey(TEST_LEAF.to_vec())]); let key = KnownKey(vec![1]); @@ -556,11 +711,13 @@ mod test { &mut average_case_has_raw_cost, &path, &key, - elem.serialized_size() as u32, + elem.serialized_size(grove_version).expect("expected size") as u32, false, - ); + GroveVersion::latest(), + ) + .expect("expected to add cost"); - let actual_cost = db.has_raw([TEST_LEAF].as_ref(), &[2], None); + let actual_cost = db.has_raw([TEST_LEAF].as_ref(), &[2], None, GroveVersion::latest()); assert_eq!(average_case_has_raw_cost, actual_cost.cost); } diff --git a/grovedb/src/estimated_costs/worst_case_costs.rs b/grovedb/src/estimated_costs/worst_case_costs.rs index d84b3df22..2daf18b66 100644 --- a/grovedb/src/estimated_costs/worst_case_costs.rs +++ b/grovedb/src/estimated_costs/worst_case_costs.rs @@ -1,35 +1,7 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Worst case costs //! Implements worst case cost functions in GroveDb -use grovedb_costs::{CostResult, CostsExt, OperationCost}; +use grovedb_costs::{cost_return_on_error_no_add, CostResult, CostsExt, OperationCost}; use grovedb_merk::{ estimated_costs::{ add_cost_case_merk_insert, add_cost_case_merk_insert_layered, add_cost_case_merk_patch, @@ -42,10 +14,13 @@ use grovedb_merk::{ MERK_BIGGEST_VALUE_SIZE, }, }, - tree::Tree, + tree::TreeNode, HASH_LENGTH, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; +use grovedb_version::{ + check_grovedb_v0, check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; use integer_encoding::VarInt; use crate::{ @@ -62,12 +37,22 @@ impl GroveDb { cost: &mut OperationCost, path: &KeyInfoPath, is_sum_tree: bool, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_worst_case_get_merk_at_path", + grove_version + .grovedb_versions + .operations + .worst_case + .add_worst_case_get_merk_at_path + ); + cost.seek_count += 2; match path.last() { None => {} Some(key) => { - cost.storage_loaded_bytes += Tree::worst_case_encoded_tree_size( + cost.storage_loaded_bytes += TreeNode::worst_case_encoded_tree_size( key.max_length() as u32, HASH_LENGTH as u32, is_sum_tree, @@ -75,6 +60,7 @@ impl GroveDb { } } *cost += S::get_storage_context_cost(path.as_vec()); + Ok(()) } /// Add worst case for insertion into merk @@ -84,7 +70,17 @@ impl GroveDb { is_in_parent_sum_tree: bool, worst_case_layer_information: &WorstCaseLayerInformation, propagate: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "worst_case_merk_replace_tree", + grove_version + .grovedb_versions + .operations + .worst_case + .worst_case_merk_replace_tree + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let tree_cost = if is_sum_tree { @@ -115,7 +111,17 @@ impl GroveDb { is_sum_tree: bool, is_in_parent_sum_tree: bool, propagate_if_input: Option<&WorstCaseLayerInformation>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "worst_case_merk_insert_tree", + grove_version + .grovedb_versions + .operations + .worst_case + .worst_case_merk_insert_tree + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let flags_len = flags.as_ref().map_or(0, |flags| { @@ -143,7 +149,17 @@ impl GroveDb { is_sum_tree: bool, worst_case_layer_information: &WorstCaseLayerInformation, propagate: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "worst_case_merk_delete_tree", + grove_version + .grovedb_versions + .operations + .worst_case + .worst_case_merk_delete_tree + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let tree_cost = if is_sum_tree { @@ -170,7 +186,17 @@ impl GroveDb { value: &Element, in_parent_tree_using_sums: bool, propagate_for_level: Option<&WorstCaseLayerInformation>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "worst_case_merk_insert_element", + grove_version + .grovedb_versions + .operations + .worst_case + .worst_case_merk_insert_element + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; match value { @@ -195,7 +221,7 @@ impl GroveDb { _ => add_cost_case_merk_insert( &mut cost, key_len, - value.serialized_size() as u32, + cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, in_parent_tree_using_sums, ), }; @@ -215,7 +241,17 @@ impl GroveDb { value: &Element, in_parent_tree_using_sums: bool, propagate_for_level: Option<&WorstCaseLayerInformation>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "worst_case_merk_replace_element", + grove_version + .grovedb_versions + .operations + .worst_case + .worst_case_merk_replace_element + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; match value { @@ -253,7 +289,7 @@ impl GroveDb { _ => add_cost_case_merk_replace( &mut cost, key_len, - value.serialized_size() as u32, + cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, in_parent_tree_using_sums, ), }; @@ -274,7 +310,17 @@ impl GroveDb { change_in_bytes: i32, in_tree_using_sums: bool, propagate_for_level: Option<&WorstCaseLayerInformation>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "worst_case_merk_patch_element", + grove_version + .grovedb_versions + .operations + .worst_case + .worst_case_merk_patch_element + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; match value { @@ -284,7 +330,9 @@ impl GroveDb { flags_len + flags_len.required_space() as u32 }); // Items need to be always the same serialized size for this to work - let sum_item_cost_size = value.serialized_size() as u32; + let sum_item_cost_size = + cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) + as u32; let value_len = sum_item_cost_size + flags_len; add_cost_case_merk_patch( &mut cost, @@ -312,7 +360,17 @@ impl GroveDb { key: &KeyInfo, worst_case_layer_information: &WorstCaseLayerInformation, propagate: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "worst_case_merk_delete_element", + grove_version + .grovedb_versions + .operations + .worst_case + .worst_case_merk_delete_element + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; add_worst_case_merk_delete(&mut cost, key_len, MERK_BIGGEST_VALUE_SIZE); @@ -332,8 +390,18 @@ impl GroveDb { key: &KeyInfo, max_element_size: u32, in_parent_tree_using_sums: bool, - ) { - let value_size = Tree::worst_case_encoded_tree_size( + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_worst_case_has_raw_cost", + grove_version + .grovedb_versions + .operations + .worst_case + .add_worst_case_has_raw_cost + ); + + let value_size = TreeNode::worst_case_encoded_tree_size( key.max_length() as u32, max_element_size, in_parent_tree_using_sums, @@ -341,6 +409,7 @@ impl GroveDb { cost.seek_count += 1; cost.storage_loaded_bytes += value_size; *cost += S::get_storage_context_cost(path.as_vec()); + Ok(()) } /// Add worst case cost for get raw tree into merk @@ -350,7 +419,17 @@ impl GroveDb { key: &KeyInfo, is_sum_tree: bool, in_parent_tree_using_sums: bool, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_worst_case_get_raw_tree_cost", + grove_version + .grovedb_versions + .operations + .worst_case + .add_worst_case_get_raw_tree_cost + ); + cost.seek_count += 1; let tree_cost_size = if is_sum_tree { SUM_TREE_COST_SIZE @@ -362,7 +441,8 @@ impl GroveDb { key.max_length() as u32, tree_cost_size, in_parent_tree_using_sums, - ); + ) + .map_err(Error::MerkError) } /// Add worst case cost for get raw into merk @@ -372,14 +452,25 @@ impl GroveDb { key: &KeyInfo, max_element_size: u32, in_parent_tree_using_sums: bool, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_worst_case_get_raw_cost", + grove_version + .grovedb_versions + .operations + .worst_case + .add_worst_case_get_raw_cost + ); + cost.seek_count += 1; add_worst_case_get_merk_node( cost, key.max_length() as u32, max_element_size, in_parent_tree_using_sums, - ); + ) + .map_err(Error::MerkError) } /// Add worst case cost for get into merk @@ -390,9 +481,19 @@ impl GroveDb { max_element_size: u32, in_parent_tree_using_sums: bool, max_references_sizes: Vec, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_worst_case_get_cost", + grove_version + .grovedb_versions + .operations + .worst_case + .add_worst_case_get_cost + ); + // todo: verify - let value_size: u32 = Tree::worst_case_encoded_tree_size( + let value_size: u32 = TreeNode::worst_case_encoded_tree_size( key.max_length() as u32, max_element_size, in_parent_tree_using_sums, @@ -400,6 +501,7 @@ impl GroveDb { cost.seek_count += 1 + max_references_sizes.len() as u16; cost.storage_loaded_bytes += value_size + max_references_sizes.iter().sum::(); *cost += S::get_storage_context_cost(path.as_vec()); + Ok(()) } } @@ -411,12 +513,14 @@ mod test { use grovedb_merk::{ estimated_costs::worst_case_costs::add_worst_case_get_merk_node, test_utils::{empty_path_merk, empty_path_merk_read_only, make_batch_seq}, + tree::kv::ValueDefinedCostType, }; use grovedb_storage::{ rocksdb_storage::{test_utils::TempStorage, RocksDbStorage}, worst_case_costs::WorstKeyLength, Storage, StorageBatch, }; + use grovedb_version::version::GroveVersion; use tempfile::TempDir; use crate::{ @@ -427,13 +531,14 @@ mod test { #[test] fn test_get_merk_node_worst_case() { + let grove_version = GroveVersion::latest(); // Open a merk and insert 10 elements. let storage = TempStorage::new(); let batch = StorageBatch::new(); - let mut merk = empty_path_merk(&*storage, &batch); + let mut merk = empty_path_merk(&*storage, &batch, grove_version); let merk_batch = make_batch_seq(1..10); - merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None) + merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None, grove_version) .unwrap() .unwrap(); @@ -444,47 +549,82 @@ mod test { .unwrap(); // Reopen merk: this time, only root node is loaded to memory - let merk = empty_path_merk_read_only(&*storage); + let merk = empty_path_merk_read_only(&*storage, grove_version); // To simulate worst case, we need to pick a node that: // 1. Is not in memory // 2. Left link exists // 3. Right link exists // Based on merk's avl rotation algorithm node is key 8 satisfies this - let node_result = merk.get(&8_u64.to_be_bytes(), true); + let node_result = merk.get( + &8_u64.to_be_bytes(), + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ); // By tweaking the max element size, we can adapt the worst case function to // this scenario. make_batch_seq creates values that are 60 bytes in size // (this will be the max_element_size) let mut cost = OperationCost::default(); let key = KnownKey(8_u64.to_be_bytes().to_vec()); - add_worst_case_get_merk_node(&mut cost, key.max_length() as u32, 60, false); + add_worst_case_get_merk_node(&mut cost, key.max_length() as u32, 60, false) + .expect("no issue with version"); assert_eq!(cost, node_result.cost); } #[test] fn test_has_raw_worst_case() { + let grove_version = GroveVersion::latest(); let tmp_dir = TempDir::new().unwrap(); let db = GroveDb::open(tmp_dir.path()).unwrap(); // insert empty tree to start - db.insert(EMPTY_PATH, TEST_LEAF, Element::empty_tree(), None, None) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + TEST_LEAF, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); // In this tree, we insert 3 items with keys [1, 2, 3] // after tree rotation, 2 will be at the top hence would have both left and // right links this will serve as our worst case candidate. let elem = Element::new_item(b"value".to_vec()); - db.insert([TEST_LEAF].as_ref(), &[1], elem.clone(), None, None) - .unwrap() - .expect("expected insert"); - db.insert([TEST_LEAF].as_ref(), &[2], elem.clone(), None, None) - .unwrap() - .expect("expected insert"); - db.insert([TEST_LEAF].as_ref(), &[3], elem.clone(), None, None) - .unwrap() - .expect("expected insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[1], + elem.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[2], + elem.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[3], + elem.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected insert"); let path = KeyInfoPath::from_vec(vec![KnownKey(TEST_LEAF.to_vec())]); let key = KnownKey(vec![1]); @@ -493,11 +633,13 @@ mod test { &mut worst_case_has_raw_cost, &path, &key, - elem.serialized_size() as u32, + elem.serialized_size(grove_version).expect("expected size") as u32, false, - ); + GroveVersion::latest(), + ) + .expect("expected to add cost"); - let actual_cost = db.has_raw([TEST_LEAF].as_ref(), &[2], None); + let actual_cost = db.has_raw([TEST_LEAF].as_ref(), &[2], None, GroveVersion::latest()); assert_eq!(worst_case_has_raw_cost, actual_cost.cost); } diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index 8dac7975e..012785ca4 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! GroveDB is a database that enables cryptographic proofs for complex queries. //! //! # Examples @@ -48,8 +20,11 @@ //! Insert, Update, Delete and Prove elements. //! ``` //! use grovedb::{Element, GroveDb}; +//! use grovedb_version::version::GroveVersion; //! use tempfile::TempDir; //! +//! let grove_version = GroveVersion::latest(); +//! //! // Specify the path where you want to set up the GroveDB instance //! let tmp_dir = TempDir::new().unwrap(); //! let path = tmp_dir.path(); @@ -60,9 +35,16 @@ //! let root_path: &[&[u8]] = &[]; //! //! // Insert new tree to root -//! db.insert(root_path, b"tree1", Element::empty_tree(), None, None) -//! .unwrap() -//! .expect("successful tree insert"); +//! db.insert( +//! root_path, +//! b"tree1", +//! Element::empty_tree(), +//! None, +//! None, +//! grove_version, +//! ) +//! .unwrap() +//! .expect("successful tree insert"); //! //! // Insert key-value 1 into tree1 //! // key - hello, value - world @@ -72,6 +54,7 @@ //! Element::new_item(b"world".to_vec()), //! None, //! None, +//! grove_version, //! ) //! .unwrap() //! .expect("successful key1 insert"); @@ -84,19 +67,20 @@ //! Element::new_item(b"rocks".to_vec()), //! None, //! None, +//! grove_version, //! ) //! .unwrap() //! .expect("successful key2 insert"); //! //! // Retrieve inserted elements //! let elem = db -//! .get(&[b"tree1"], b"hello", None) +//! .get(&[b"tree1"], b"hello", None, grove_version) //! .unwrap() //! .expect("successful get"); //! assert_eq!(elem, Element::new_item(b"world".to_vec())); //! //! let elem = db -//! .get(&[b"tree1"], b"grovedb", None) +//! .get(&[b"tree1"], b"grovedb", None, grove_version) //! .unwrap() //! .expect("successful get"); //! assert_eq!(elem, Element::new_item(b"rocks".to_vec())); @@ -109,27 +93,28 @@ //! Element::new_item(b"WORLD".to_vec()), //! None, //! None, +//! grove_version, //! ) //! .unwrap() //! .expect("successful update"); //! //! // Retrieve updated element //! let elem = db -//! .get(&[b"tree1"], b"hello", None) +//! .get(&[b"tree1"], b"hello", None, grove_version) //! .unwrap() //! .expect("successful get"); //! assert_eq!(elem, Element::new_item(b"WORLD".to_vec())); //! //! // Deletion -//! db.delete(&[b"tree1"], b"hello", None, None) +//! db.delete(&[b"tree1"], b"hello", None, None, grove_version) //! .unwrap() //! .expect("successful delete"); -//! let elem_result = db.get(&[b"tree1"], b"hello", None).unwrap(); +//! let elem_result = db.get(&[b"tree1"], b"hello", None, grove_version).unwrap(); //! assert_eq!(elem_result.is_err(), true); //! //! // State Root //! // Get the GroveDB root hash -//! let root_hash = db.root_hash(None).unwrap().unwrap(); +//! let root_hash = db.root_hash(None, grove_version).unwrap().unwrap(); //! assert_eq!( //! hex::encode(root_hash), //! "3884be3d197ac49981e54b21ea423351fc4ccdb770aaf7cf40f5e65dc3e2e1aa" @@ -140,11 +125,10 @@ //! [Architectural Decision Records](https://github.com/dashpay/grovedb/tree/master/adr) or //! [Tutorial](https://www.grovedb.org/tutorials.html) -#[cfg(feature = "full")] -extern crate core; - #[cfg(feature = "full")] pub mod batch; +#[cfg(feature = "grovedbg")] +pub mod debugger; #[cfg(any(feature = "full", feature = "verify"))] pub mod element; #[cfg(any(feature = "full", feature = "verify"))] @@ -160,20 +144,21 @@ pub mod query_result_type; #[cfg(any(feature = "full", feature = "verify"))] pub mod reference_path; #[cfg(feature = "full")] -mod replication; +pub mod replication; #[cfg(all(test, feature = "full"))] mod tests; #[cfg(feature = "full")] mod util; -mod versioning; #[cfg(feature = "full")] mod visualize; +#[cfg(feature = "grovedbg")] +use std::sync::Arc; #[cfg(feature = "full")] use std::{collections::HashMap, option::Option::None, path::Path}; -#[cfg(any(feature = "full", feature = "verify"))] -use element::helpers; +#[cfg(feature = "grovedbg")] +use debugger::start_visualizer; #[cfg(any(feature = "full", feature = "verify"))] pub use element::Element; #[cfg(feature = "full")] @@ -194,11 +179,14 @@ pub use grovedb_merk::proofs::query::query_item::QueryItem; #[cfg(any(feature = "full", feature = "verify"))] pub use grovedb_merk::proofs::Query; #[cfg(feature = "full")] +use grovedb_merk::tree::kv::ValueDefinedCostType; +#[cfg(feature = "full")] use grovedb_merk::{ self, tree::{combine_hash, value_hash}, BatchEntry, CryptoHash, KVIterator, Merk, }; +#[cfg(feature = "full")] use grovedb_path::SubtreePath; #[cfg(feature = "full")] use grovedb_storage::rocksdb_storage::PrefixedRocksDbImmediateStorageContext; @@ -211,19 +199,22 @@ use grovedb_storage::{ }; #[cfg(feature = "full")] use grovedb_storage::{Storage, StorageContext}; +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] use grovedb_visualize::DebugByteVectors; #[cfg(any(feature = "full", feature = "verify"))] pub use query::{PathQuery, SizedQuery}; -#[cfg(feature = "full")] -pub use replication::{BufferedRestorer, Restorer, SiblingsChunkProducer, SubtreeChunkProducer}; +#[cfg(feature = "grovedbg")] +use tokio::net::ToSocketAddrs; +#[cfg(feature = "full")] +use crate::element::helpers::raw_decode; #[cfg(any(feature = "full", feature = "verify"))] pub use crate::error::Error; #[cfg(feature = "full")] -use crate::helpers::raw_decode; -#[cfg(feature = "full")] use crate::util::{root_merk_optional_tx, storage_context_optional_tx}; +#[cfg(feature = "full")] +use crate::Error::MerkError; #[cfg(feature = "full")] type Hash = [u8; 32]; @@ -234,6 +225,9 @@ pub struct GroveDb { db: RocksDbStorage, } +#[cfg(feature = "full")] +pub(crate) type SubtreePrefix = [u8; blake3::OUT_LEN]; + /// Transaction #[cfg(feature = "full")] pub type Transaction<'db> = >::Transaction; @@ -249,12 +243,29 @@ impl GroveDb { Ok(GroveDb { db }) } + #[cfg(feature = "grovedbg")] + // Start visualizer server for the GroveDB instance + pub fn start_visualizer(self: &Arc, addr: A) + where + A: ToSocketAddrs + Send + 'static, + { + let weak = Arc::downgrade(self); + start_visualizer(weak, addr); + } + + /// Uses raw iter to delete GroveDB key values pairs from rocksdb + pub fn wipe(&self) -> Result<(), Error> { + self.db.wipe()?; + Ok(()) + } + /// Opens the transactional Merk at the given path. Returns CostResult. fn open_transactional_merk_at_path<'db, 'b, B>( &'db self, path: SubtreePath<'b, B>, tx: &'db Transaction, batch: Option<&'db StorageBatch>, + grove_version: &GroveVersion, ) -> CostResult>, Error> where B: AsRef<[u8]> + 'b, @@ -272,42 +283,56 @@ impl GroveDb { .unwrap_add_cost(&mut cost); let element = cost_return_on_error!( &mut cost, - Element::get_from_storage(&parent_storage, parent_key).map_err(|e| { - Error::InvalidParentLayerPath(format!( - "could not get key {} for parent {:?} of subtree: {}", - hex::encode(parent_key), - DebugByteVectors(parent_path.to_vec()), - e - )) - }) + Element::get_from_storage(&parent_storage, parent_key, grove_version).map_err( + |e| { + Error::InvalidParentLayerPath(format!( + "could not get key {} for parent {:?} of subtree: {}", + hex::encode(parent_key), + DebugByteVectors(parent_path.to_vec()), + e + )) + } + ) ); let is_sum_tree = element.is_sum_tree(); if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { - Merk::open_layered_with_root_key(storage, root_key, is_sum_tree) - .map_err(|_| { - Error::CorruptedData("cannot open a subtree with given root key".to_owned()) - }) - .add_cost(cost) + Merk::open_layered_with_root_key( + storage, + root_key, + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| { + Error::CorruptedData("cannot open a subtree with given root key".to_owned()) + }) + .add_cost(cost) } else { Err(Error::CorruptedPath( - "cannot open a subtree as parent exists but is not a tree", + "cannot open a subtree as parent exists but is not a tree".to_string(), )) .wrap_with_cost(cost) } } else { - Merk::open_base(storage, false) - .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) - .add_cost(cost) + Merk::open_base( + storage, + false, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) + .add_cost(cost) } } /// Opens a Merk at given path for with direct write access. Intended for /// replication purposes. - fn open_merk_for_replication<'db, 'b, B>( + fn open_merk_for_replication<'tx, 'db: 'tx, 'b, B>( &'db self, path: SubtreePath<'b, B>, - tx: &'db Transaction, - ) -> Result>, Error> + tx: &'tx Transaction<'db>, + grove_version: &GroveVersion, + ) -> Result>, Error> where B: AsRef<[u8]> + 'b, { @@ -322,7 +347,7 @@ impl GroveDb { .db .get_immediate_storage_context(parent_path.clone(), tx) .unwrap_add_cost(&mut cost); - let element = Element::get_from_storage(&parent_storage, parent_key) + let element = Element::get_from_storage(&parent_storage, parent_key, grove_version) .map_err(|e| { Error::InvalidParentLayerPath(format!( "could not get key {} for parent {:?} of subtree: {}", @@ -334,20 +359,31 @@ impl GroveDb { .unwrap()?; let is_sum_tree = element.is_sum_tree(); if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { - Merk::open_layered_with_root_key(storage, root_key, is_sum_tree) - .map_err(|_| { - Error::CorruptedData("cannot open a subtree with given root key".to_owned()) - }) - .unwrap() + Merk::open_layered_with_root_key( + storage, + root_key, + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| { + Error::CorruptedData("cannot open a subtree with given root key".to_owned()) + }) + .unwrap() } else { Err(Error::CorruptedPath( - "cannot open a subtree as parent exists but is not a tree", + "cannot open a subtree as parent exists but is not a tree".to_string(), )) } } else { - Merk::open_base(storage, false) - .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) - .unwrap() + Merk::open_base( + storage, + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) + .unwrap() } } @@ -356,6 +392,7 @@ impl GroveDb { &'db self, path: SubtreePath<'b, B>, batch: Option<&'db StorageBatch>, + grove_version: &GroveVersion, ) -> CostResult, Error> where B: AsRef<[u8]> + 'b, @@ -374,32 +411,45 @@ impl GroveDb { .unwrap_add_cost(&mut cost); let element = cost_return_on_error!( &mut cost, - Element::get_from_storage(&parent_storage, parent_key).map_err(|e| { - Error::InvalidParentLayerPath(format!( - "could not get key {} for parent {:?} of subtree: {}", - hex::encode(parent_key), - DebugByteVectors(parent_path.to_vec()), - e - )) - }) + Element::get_from_storage(&parent_storage, parent_key, grove_version).map_err( + |e| { + Error::InvalidParentLayerPath(format!( + "could not get key {} for parent {:?} of subtree: {}", + hex::encode(parent_key), + DebugByteVectors(parent_path.to_vec()), + e + )) + } + ) ); let is_sum_tree = element.is_sum_tree(); if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { - Merk::open_layered_with_root_key(storage, root_key, is_sum_tree) - .map_err(|_| { - Error::CorruptedData("cannot open a subtree with given root key".to_owned()) - }) - .add_cost(cost) + Merk::open_layered_with_root_key( + storage, + root_key, + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| { + Error::CorruptedData("cannot open a subtree with given root key".to_owned()) + }) + .add_cost(cost) } else { Err(Error::CorruptedPath( - "cannot open a subtree as parent exists but is not a tree", + "cannot open a subtree as parent exists but is not a tree".to_string(), )) .wrap_with_cost(cost) } } else { - Merk::open_base(storage, false) - .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) - .add_cost(cost) + Merk::open_base( + storage, + false, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) + .add_cost(cost) } } @@ -410,28 +460,52 @@ impl GroveDb { /// Returns root key of GroveDb. /// Will be `None` if GroveDb is empty. - pub fn root_key(&self, transaction: TransactionArg) -> CostResult, Error> { + pub fn root_key( + &self, + transaction: TransactionArg, + grove_version: &GroveVersion, + ) -> CostResult, Error> { let mut cost = OperationCost { ..Default::default() }; - root_merk_optional_tx!(&mut cost, self.db, None, transaction, subtree, { - let root_key = subtree.root_key().unwrap(); - Ok(root_key).wrap_with_cost(cost) - }) + root_merk_optional_tx!( + &mut cost, + self.db, + None, + transaction, + subtree, + grove_version, + { + let root_key = subtree.root_key().unwrap(); + Ok(root_key).wrap_with_cost(cost) + } + ) } /// Returns root hash of GroveDb. /// Will be `None` if GroveDb is empty. - pub fn root_hash(&self, transaction: TransactionArg) -> CostResult { + pub fn root_hash( + &self, + transaction: TransactionArg, + grove_version: &GroveVersion, + ) -> CostResult { let mut cost = OperationCost { ..Default::default() }; - root_merk_optional_tx!(&mut cost, self.db, None, transaction, subtree, { - let root_hash = subtree.root_hash().unwrap_add_cost(&mut cost); - Ok(root_hash).wrap_with_cost(cost) - }) + root_merk_optional_tx!( + &mut cost, + self.db, + None, + transaction, + subtree, + grove_version, + { + let root_hash = subtree.root_hash().unwrap_add_cost(&mut cost); + Ok(root_hash).wrap_with_cost(cost) + } + ) } /// Method to propagate updated subtree key changes one level up inside a @@ -442,6 +516,7 @@ impl GroveDb { mut merk_cache: HashMap, Merk>, path: &SubtreePath<'b, B>, transaction: &Transaction, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); @@ -461,7 +536,8 @@ impl GroveDb { storage_batch, parent_path.clone(), transaction, - false + false, + grove_version, ) ); let (root_hash, root_key, sum) = cost_return_on_error!( @@ -475,7 +551,8 @@ impl GroveDb { parent_key, root_key, root_hash, - sum + sum, + grove_version, ) ); child_tree = parent_tree; @@ -492,6 +569,7 @@ impl GroveDb { path: SubtreePath<'b, B>, transaction: &Transaction, batch: &StorageBatch, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); @@ -509,7 +587,12 @@ impl GroveDb { while let Some((parent_path, parent_key)) = current_path.derive_parent() { let mut parent_tree: Merk = cost_return_on_error!( &mut cost, - self.open_transactional_merk_at_path(parent_path.clone(), transaction, Some(batch)) + self.open_transactional_merk_at_path( + parent_path.clone(), + transaction, + Some(batch), + grove_version + ) ); let (root_hash, root_key, sum) = cost_return_on_error!( &mut cost, @@ -522,7 +605,8 @@ impl GroveDb { parent_key, root_key, root_hash, - sum + sum, + grove_version, ) ); child_tree = parent_tree; @@ -537,6 +621,7 @@ impl GroveDb { mut merk_cache: HashMap, Merk>, path: SubtreePath<'b, B>, batch: &StorageBatch, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); @@ -554,7 +639,11 @@ impl GroveDb { while let Some((parent_path, parent_key)) = current_path.derive_parent() { let mut parent_tree: Merk = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(parent_path.clone(), Some(batch)) + self.open_non_transactional_merk_at_path( + parent_path.clone(), + Some(batch), + grove_version + ) ); let (root_hash, root_key, sum) = cost_return_on_error!( &mut cost, @@ -567,7 +656,8 @@ impl GroveDb { parent_key, root_key, root_hash, - sum + sum, + grove_version, ) ); child_tree = parent_tree; @@ -583,20 +673,27 @@ impl GroveDb { maybe_root_key: Option>, root_tree_hash: Hash, sum: Option, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { let key_ref = key.as_ref(); - Self::get_element_from_subtree(parent_tree, key_ref).flat_map_ok(|element| { + Self::get_element_from_subtree(parent_tree, key_ref, grove_version).flat_map_ok(|element| { if let Element::Tree(_, flag) = element { let tree = Element::new_tree_with_flags(maybe_root_key, flag); - tree.insert_subtree(parent_tree, key_ref, root_tree_hash, None) + tree.insert_subtree(parent_tree, key_ref, root_tree_hash, None, grove_version) } else if let Element::SumTree(.., flag) = element { let tree = Element::new_sum_tree_with_flags_and_sum_value( maybe_root_key, sum.unwrap_or_default(), flag, ); - tree.insert_subtree(parent_tree, key.as_ref(), root_tree_hash, None) + tree.insert_subtree( + parent_tree, + key.as_ref(), + root_tree_hash, + None, + grove_version, + ) } else { Err(Error::InvalidPath( "can only propagate on tree items".to_owned(), @@ -619,57 +716,68 @@ impl GroveDb { root_tree_hash: Hash, sum: Option, batch_operations: &mut Vec>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); - Self::get_element_from_subtree(parent_tree, key.as_ref()).flat_map_ok(|element| { - if let Element::Tree(_, flag) = element { - let tree = Element::new_tree_with_flags(maybe_root_key, flag); - let merk_feature_type = cost_return_on_error!( - &mut cost, - tree.get_feature_type(parent_tree.is_sum_tree) - .wrap_with_cost(OperationCost::default()) - ); - tree.insert_subtree_into_batch_operations( - key, - root_tree_hash, - true, - batch_operations, - merk_feature_type, - ) - } else if let Element::SumTree(.., flag) = element { - let tree = Element::new_sum_tree_with_flags_and_sum_value( - maybe_root_key, - sum.unwrap_or_default(), - flag, - ); - let merk_feature_type = cost_return_on_error!( - &mut cost, - tree.get_feature_type(parent_tree.is_sum_tree) - .wrap_with_cost(OperationCost::default()) - ); - tree.insert_subtree_into_batch_operations( - key, - root_tree_hash, - true, - batch_operations, - merk_feature_type, - ) - } else { - Err(Error::InvalidPath( - "can only propagate on tree items".to_owned(), - )) - .wrap_with_cost(Default::default()) - } - }) + Self::get_element_from_subtree(parent_tree, key.as_ref(), grove_version).flat_map_ok( + |element| { + if let Element::Tree(_, flag) = element { + let tree = Element::new_tree_with_flags(maybe_root_key, flag); + let merk_feature_type = cost_return_on_error!( + &mut cost, + tree.get_feature_type(parent_tree.is_sum_tree) + .wrap_with_cost(OperationCost::default()) + ); + tree.insert_subtree_into_batch_operations( + key, + root_tree_hash, + true, + batch_operations, + merk_feature_type, + grove_version, + ) + } else if let Element::SumTree(.., flag) = element { + let tree = Element::new_sum_tree_with_flags_and_sum_value( + maybe_root_key, + sum.unwrap_or_default(), + flag, + ); + let merk_feature_type = cost_return_on_error!( + &mut cost, + tree.get_feature_type(parent_tree.is_sum_tree) + .wrap_with_cost(OperationCost::default()) + ); + tree.insert_subtree_into_batch_operations( + key, + root_tree_hash, + true, + batch_operations, + merk_feature_type, + grove_version, + ) + } else { + Err(Error::InvalidPath( + "can only propagate on tree items".to_owned(), + )) + .wrap_with_cost(Default::default()) + } + }, + ) } /// Get element from subtree. Return CostResult. fn get_element_from_subtree<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( subtree: &Merk, key: K, + grove_version: &GroveVersion, ) -> CostResult { subtree - .get(key.as_ref(), true) + .get( + key.as_ref(), + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) .map_err(|_| { Error::InvalidPath("can't find subtree in parent during propagation".to_owned()) }) @@ -690,7 +798,7 @@ impl GroveDb { }) .flatten() .map_ok(|element_bytes| { - Element::deserialize(&element_bytes).map_err(|_| { + Element::deserialize(&element_bytes, grove_version).map_err(|_| { Error::CorruptedData( "failed to deserialized parent during propagation".to_owned(), ) @@ -713,11 +821,16 @@ impl GroveDb { /// # use std::convert::TryFrom; /// # use tempfile::TempDir; /// # use grovedb_path::SubtreePath; + /// # use grovedb_version::version::GroveVersion; /// # /// # fn main() -> Result<(), Box> { /// use std::option::Option::None; + /// /// + /// /// const TEST_LEAF: &[u8] = b"test_leaf"; /// + /// let grove_version = GroveVersion::latest(); + /// /// let tmp_dir = TempDir::new().unwrap(); /// let mut db = GroveDb::open(tmp_dir.path())?; /// db.insert( @@ -726,6 +839,7 @@ impl GroveDb { /// Element::empty_tree(), /// None, /// None, + /// grove_version, /// ) /// .unwrap()?; /// @@ -738,22 +852,27 @@ impl GroveDb { /// Element::empty_tree(), /// None, /// Some(&tx), + /// grove_version, /// ) /// .unwrap()?; /// /// // This action exists only inside the transaction for now - /// let result = db.get([TEST_LEAF].as_ref(), subtree_key, None).unwrap(); + /// let result = db + /// .get([TEST_LEAF].as_ref(), subtree_key, None, grove_version) + /// .unwrap(); /// assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); /// /// // To access values inside the transaction, transaction needs to be passed to the `db::get` /// let result_with_transaction = db - /// .get([TEST_LEAF].as_ref(), subtree_key, Some(&tx)) + /// .get([TEST_LEAF].as_ref(), subtree_key, Some(&tx), grove_version) /// .unwrap()?; /// assert_eq!(result_with_transaction, Element::empty_tree()); /// /// // After transaction is committed, the value from it can be accessed normally. - /// db.commit_transaction(tx); - /// let result = db.get([TEST_LEAF].as_ref(), subtree_key, None).unwrap()?; + /// let _ = db.commit_transaction(tx); + /// let result = db + /// .get([TEST_LEAF].as_ref(), subtree_key, None, grove_version) + /// .unwrap()?; /// assert_eq!(result, Element::empty_tree()); /// /// # Ok(()) @@ -777,8 +896,12 @@ impl GroveDb { } /// Method to visualize hash mismatch after verification - pub fn visualize_verify_grovedb(&self) -> HashMap { - self.verify_grovedb() + pub fn visualize_verify_grovedb( + &self, + grove_version: &GroveVersion, + ) -> Result, Error> { + Ok(self + .verify_grovedb(None, grove_version)? .iter() .map(|(path, (root_hash, expected, actual))| { ( @@ -793,27 +916,49 @@ impl GroveDb { ), ) }) - .collect() + .collect()) } /// Method to check that the value_hash of Element::Tree nodes are computed /// correctly. - pub fn verify_grovedb(&self) -> HashMap>, (CryptoHash, CryptoHash, CryptoHash)> { - let root_merk = self - .open_non_transactional_merk_at_path(SubtreePath::empty(), None) - .unwrap() - .expect("should exist"); - self.verify_merk_and_submerks(root_merk, &SubtreePath::empty(), None) + pub fn verify_grovedb( + &self, + transaction: TransactionArg, + grove_version: &GroveVersion, + ) -> Result>, (CryptoHash, CryptoHash, CryptoHash)>, Error> { + if let Some(transaction) = transaction { + let root_merk = self + .open_transactional_merk_at_path( + SubtreePath::empty(), + transaction, + None, + grove_version, + ) + .unwrap()?; + self.verify_merk_and_submerks_in_transaction( + root_merk, + &SubtreePath::empty(), + None, + transaction, + grove_version, + ) + } else { + let root_merk = self + .open_non_transactional_merk_at_path(SubtreePath::empty(), None, grove_version) + .unwrap()?; + self.verify_merk_and_submerks(root_merk, &SubtreePath::empty(), None, grove_version) + } } /// Verifies that the root hash of the given merk and all submerks match /// those of the merk and submerks at the given path. Returns any issues. - fn verify_merk_and_submerks<'db, B: AsRef<[u8]>>( + fn verify_merk_and_submerks<'db, B: AsRef<[u8]>, S: StorageContext<'db>>( &'db self, - merk: Merk, + merk: Merk, path: &SubtreePath, batch: Option<&'db StorageBatch>, - ) -> HashMap>, (CryptoHash, CryptoHash, CryptoHash)> { + grove_version: &GroveVersion, + ) -> Result>, (CryptoHash, CryptoHash, CryptoHash)>, Error> { let mut all_query = Query::new(); all_query.insert_all(); @@ -822,20 +967,109 @@ impl GroveDb { let mut element_iterator = KVIterator::new(merk.storage.raw_iter(), &all_query).unwrap(); while let Some((key, element_value)) = element_iterator.next_kv().unwrap() { - let element = raw_decode(&element_value).unwrap(); - if element.is_tree() { + let element = raw_decode(&element_value, grove_version)?; + if element.is_any_tree() { let (kv_value, element_value_hash) = merk - .get_value_and_value_hash(&key, true) - .unwrap() + .get_value_and_value_hash( + &key, + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() - .unwrap(); + .map_err(MerkError)? + .ok_or(Error::CorruptedData( + "expected merk to contain value at key".to_string(), + ))?; let new_path = path.derive_owned_with_child(key); let new_path_ref = SubtreePath::from(&new_path); let inner_merk = self - .open_non_transactional_merk_at_path(new_path_ref.clone(), batch) + .open_non_transactional_merk_at_path(new_path_ref.clone(), batch, grove_version) + .unwrap()?; + let root_hash = inner_merk.root_hash().unwrap(); + + let actual_value_hash = value_hash(&kv_value).unwrap(); + let combined_value_hash = combine_hash(&actual_value_hash, &root_hash).unwrap(); + + if combined_value_hash != element_value_hash { + issues.insert( + new_path.to_vec(), + (root_hash, combined_value_hash, element_value_hash), + ); + } + issues.extend(self.verify_merk_and_submerks( + inner_merk, + &new_path_ref, + batch, + grove_version, + )?); + } else if element.is_any_item() { + let (kv_value, element_value_hash) = merk + .get_value_and_value_hash( + &key, + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() - .expect("should exist"); + .map_err(MerkError)? + .ok_or(Error::CorruptedData( + "expected merk to contain value at key".to_string(), + ))?; + let actual_value_hash = value_hash(&kv_value).unwrap(); + if actual_value_hash != element_value_hash { + issues.insert( + path.derive_owned_with_child(key).to_vec(), + (actual_value_hash, element_value_hash, actual_value_hash), + ); + } + } + } + Ok(issues) + } + + fn verify_merk_and_submerks_in_transaction<'db, B: AsRef<[u8]>, S: StorageContext<'db>>( + &'db self, + merk: Merk, + path: &SubtreePath, + batch: Option<&'db StorageBatch>, + transaction: &Transaction, + grove_version: &GroveVersion, + ) -> Result>, (CryptoHash, CryptoHash, CryptoHash)>, Error> { + let mut all_query = Query::new(); + all_query.insert_all(); + + let _in_sum_tree = merk.is_sum_tree; + let mut issues = HashMap::new(); + let mut element_iterator = KVIterator::new(merk.storage.raw_iter(), &all_query).unwrap(); + + while let Some((key, element_value)) = element_iterator.next_kv().unwrap() { + let element = raw_decode(&element_value, grove_version)?; + if element.is_any_tree() { + let (kv_value, element_value_hash) = merk + .get_value_and_value_hash( + &key, + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .map_err(MerkError)? + .ok_or(Error::CorruptedData( + "expected merk to contain value at key".to_string(), + ))?; + let new_path = path.derive_owned_with_child(key); + let new_path_ref = SubtreePath::from(&new_path); + + let inner_merk = self + .open_transactional_merk_at_path( + new_path_ref.clone(), + transaction, + batch, + grove_version, + ) + .unwrap()?; let root_hash = inner_merk.root_hash().unwrap(); let actual_value_hash = value_hash(&kv_value).unwrap(); @@ -847,9 +1081,35 @@ impl GroveDb { (root_hash, combined_value_hash, element_value_hash), ); } - issues.extend(self.verify_merk_and_submerks(inner_merk, &new_path_ref, batch)); + issues.extend(self.verify_merk_and_submerks_in_transaction( + inner_merk, + &new_path_ref, + batch, + transaction, + grove_version, + )?); + } else if element.is_any_item() { + let (kv_value, element_value_hash) = merk + .get_value_and_value_hash( + &key, + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .map_err(MerkError)? + .ok_or(Error::CorruptedData( + "expected merk to contain value at key".to_string(), + ))?; + let actual_value_hash = value_hash(&kv_value).unwrap(); + if actual_value_hash != element_value_hash { + issues.insert( + path.derive_owned_with_child(key).to_vec(), + (actual_value_hash, element_value_hash, actual_value_hash), + ); + } } } - issues + Ok(issues) } } diff --git a/grovedb/src/operations.rs b/grovedb/src/operations.rs deleted file mode 100644 index af637f42b..000000000 --- a/grovedb/src/operations.rs +++ /dev/null @@ -1,42 +0,0 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Operations for the manipulation of GroveDB state - -#[cfg(feature = "full")] -pub(crate) mod auxiliary; -#[cfg(feature = "full")] -pub mod delete; -#[cfg(feature = "full")] -pub(crate) mod get; -#[cfg(feature = "full")] -pub mod insert; -#[cfg(feature = "full")] -pub(crate) mod is_empty_tree; -#[cfg(any(feature = "full", feature = "verify"))] -pub mod proof; diff --git a/grovedb/src/operations/auxiliary.rs b/grovedb/src/operations/auxiliary.rs index 0a29c510e..516796ed5 100644 --- a/grovedb/src/operations/auxiliary.rs +++ b/grovedb/src/operations/auxiliary.rs @@ -30,15 +30,18 @@ #[cfg(feature = "full")] use grovedb_costs::{ - cost_return_on_error_no_add, storage_cost::key_value_cost::KeyValueStorageCost, CostResult, - CostsExt, OperationCost, + cost_return_on_error, cost_return_on_error_no_add, + storage_cost::key_value_cost::KeyValueStorageCost, CostResult, CostsExt, OperationCost, }; +use grovedb_path::SubtreePath; #[cfg(feature = "full")] use grovedb_storage::StorageContext; use grovedb_storage::{Storage, StorageBatch}; +use grovedb_version::version::GroveVersion; +use crate::util::storage_context_optional_tx; #[cfg(feature = "full")] -use crate::{util::meta_storage_context_optional_tx, Error, GroveDb, TransactionArg}; +use crate::{util::meta_storage_context_optional_tx, Element, Error, GroveDb, TransactionArg}; #[cfg(feature = "full")] impl GroveDb { @@ -118,4 +121,51 @@ impl GroveDb { Ok(value).wrap_with_cost(cost) }) } + + // TODO: dumb traversal should not be tolerated + /// Finds keys which are trees for a given subtree recursively. + /// One element means a key of a `merk`, n > 1 elements mean relative path + /// for a deeply nested subtree. + pub fn find_subtrees>( + &self, + path: &SubtreePath, + transaction: TransactionArg, + grove_version: &GroveVersion, + ) -> CostResult>>, Error> { + let mut cost = OperationCost::default(); + + // TODO: remove conversion to vec; + // However, it's not easy for a reason: + // new keys to enqueue are taken from raw iterator which returns Vec; + // changing that to slice is hard as cursor should be moved for next iteration + // which requires exclusive (&mut) reference, also there is no guarantee that + // slice which points into storage internals will remain valid if raw + // iterator got altered so why that reference should be exclusive; + // + // Update: there are pinned views into RocksDB to return slices of data, perhaps + // there is something for iterators + + let mut queue: Vec>> = vec![path.to_vec()]; + let mut result: Vec>> = queue.clone(); + + while let Some(q) = queue.pop() { + let subtree_path: SubtreePath> = q.as_slice().into(); + // Get the correct subtree with q_ref as path + storage_context_optional_tx!(self.db, subtree_path, None, transaction, storage, { + let storage = storage.unwrap_add_cost(&mut cost); + let mut raw_iter = Element::iterator(storage.raw_iter()).unwrap_add_cost(&mut cost); + while let Some((key, value)) = + cost_return_on_error!(&mut cost, raw_iter.next_element(grove_version)) + { + if value.is_any_tree() { + let mut sub_path = q.clone(); + sub_path.push(key.to_vec()); + queue.push(sub_path.clone()); + result.push(sub_path); + } + } + }) + } + Ok(result).wrap_with_cost(cost) + } } diff --git a/grovedb/src/operations/delete/average_case.rs b/grovedb/src/operations/delete/average_case.rs index ce3d141a1..3ed1abd18 100644 --- a/grovedb/src/operations/delete/average_case.rs +++ b/grovedb/src/operations/delete/average_case.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Average case delete cost use grovedb_costs::{ @@ -39,6 +11,9 @@ use grovedb_merk::{ HASH_LENGTH_U32, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; use intmap::IntMap; use crate::{ @@ -58,7 +33,16 @@ impl GroveDb { stop_path_height: Option, validate: bool, estimated_layer_info: IntMap, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "average_case_delete_operations_for_delete_up_tree_while_empty", + grove_version + .grovedb_versions + .operations + .delete_up_tree + .average_case_delete_operations_for_delete_up_tree_while_empty + ); let mut cost = OperationCost::default(); let stop_path_height = stop_path_height.unwrap_or_default(); @@ -73,7 +57,7 @@ impl GroveDb { let mut used_path = path.0.as_slice(); let mut ops = vec![]; let path_len = path.len() as u16; - for height in (stop_path_height..(path_len as u16)).rev() { + for height in (stop_path_height..path_len).rev() { let ( path_at_level, key_at_level, @@ -134,14 +118,15 @@ impl GroveDb { ); let op = cost_return_on_error!( &mut cost, - Self::average_case_delete_operation_for_delete_internal::( + Self::average_case_delete_operation_for_delete::( &KeyInfoPath::from_vec(path_at_level.to_vec()), key_at_level, is_sum_tree, validate, check_if_tree, except_keys_count, - (key_len, estimated_element_size) + (key_len, estimated_element_size), + grove_version, ) ); ops.push(op); @@ -150,8 +135,8 @@ impl GroveDb { } } - /// Average case delete operation for delete internal - pub fn average_case_delete_operation_for_delete_internal<'db, S: Storage<'db>>( + /// Average case delete operation for delete + pub fn average_case_delete_operation_for_delete<'db, S: Storage<'db>>( path: &KeyInfoPath, key: &KeyInfo, parent_tree_is_sum_tree: bool, @@ -159,24 +144,41 @@ impl GroveDb { check_if_tree: bool, except_keys_count: u16, estimated_key_element_size: EstimatedKeyAndElementSize, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "average_case_delete_operation_for_delete", + grove_version + .grovedb_versions + .operations + .delete + .average_case_delete_operation_for_delete + ); let mut cost = OperationCost::default(); if validate { - GroveDb::add_average_case_get_merk_at_path::( - &mut cost, - path, - false, - parent_tree_is_sum_tree, + cost_return_on_error_no_add!( + &cost, + GroveDb::add_average_case_get_merk_at_path::( + &mut cost, + path, + false, + parent_tree_is_sum_tree, + grove_version, + ) ); } if check_if_tree { - GroveDb::add_average_case_get_raw_cost::( - &mut cost, - path, - key, - estimated_key_element_size.1, - parent_tree_is_sum_tree, + cost_return_on_error_no_add!( + &cost, + GroveDb::add_average_case_get_raw_cost::( + &mut cost, + path, + key, + estimated_key_element_size.1, + parent_tree_is_sum_tree, + grove_version, + ) ); } // in the worst case this is a tree diff --git a/grovedb/src/operations/delete/delete_up_tree.rs b/grovedb/src/operations/delete/delete_up_tree.rs index 5e0439b22..dd331b697 100644 --- a/grovedb/src/operations/delete/delete_up_tree.rs +++ b/grovedb/src/operations/delete/delete_up_tree.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Delete up tree use grovedb_costs::{ @@ -34,6 +6,9 @@ use grovedb_costs::{ CostResult, CostsExt, OperationCost, }; use grovedb_path::SubtreePath; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; use crate::{ batch::GroveDbOp, operations::delete::DeleteOptions, ElementFlags, Error, GroveDb, @@ -91,11 +66,20 @@ impl GroveDb { key: &[u8], options: &DeleteUpTreeOptions, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult where B: AsRef<[u8]> + 'b, P: Into>, { + check_grovedb_v0_with_cost!( + "delete", + grove_version + .grovedb_versions + .operations + .delete_up_tree + .delete_up_tree_while_empty + ); self.delete_up_tree_while_empty_with_sectional_storage( path.into(), key, @@ -107,6 +91,7 @@ impl GroveDb { (BasicStorageRemoval(removed_value_bytes)), )) }, + grove_version, ) } @@ -126,7 +111,16 @@ impl GroveDb { (StorageRemovedBytes, StorageRemovedBytes), Error, >, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "delete", + grove_version + .grovedb_versions + .operations + .delete_up_tree + .delete_up_tree_while_empty_with_sectional_storage + ); let mut cost = OperationCost::default(); let mut batch_operations: Vec = Vec::new(); @@ -139,6 +133,7 @@ impl GroveDb { None, &mut batch_operations, transaction, + grove_version, ) ); @@ -163,6 +158,7 @@ impl GroveDb { |_, _, _| Ok(false), split_removal_bytes_function, transaction, + grove_version, ) .map_ok(|_| ops_len as u16) } @@ -176,7 +172,16 @@ impl GroveDb { is_known_to_be_subtree_with_sum: Option<(bool, bool)>, mut current_batch_operations: Vec, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "delete", + grove_version + .grovedb_versions + .operations + .delete_up_tree + .delete_operations_for_delete_up_tree_while_empty + ); self.add_delete_operations_for_delete_up_tree_while_empty( path, key, @@ -184,6 +189,7 @@ impl GroveDb { is_known_to_be_subtree_with_sum, &mut current_batch_operations, transaction, + grove_version, ) .map_ok(|ops| ops.unwrap_or_default()) } @@ -198,7 +204,16 @@ impl GroveDb { is_known_to_be_subtree_with_sum: Option<(bool, bool)>, current_batch_operations: &mut Vec, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult>, Error> { + check_grovedb_v0_with_cost!( + "delete", + grove_version + .grovedb_versions + .operations + .delete_up_tree + .add_delete_operations_for_delete_up_tree_while_empty + ); let mut cost = OperationCost::default(); if let Some(stop_path_height) = options.stop_path_height { @@ -210,7 +225,7 @@ impl GroveDb { if options.validate_tree_at_path_exists { cost_return_on_error!( &mut cost, - self.check_subtree_exists_path_not_found(path.clone(), transaction) + self.check_subtree_exists_path_not_found(path.clone(), transaction, grove_version) ); } if let Some(delete_operation_this_level) = cost_return_on_error!( @@ -222,6 +237,7 @@ impl GroveDb { is_known_to_be_subtree_with_sum, current_batch_operations, transaction, + grove_version, ) ) { let mut delete_operations = vec![delete_operation_this_level.clone()]; @@ -240,6 +256,7 @@ impl GroveDb { None, // todo: maybe we can know this? current_batch_operations, transaction, + grove_version, ) ) { delete_operations.append(&mut delete_operations_upper_level); diff --git a/grovedb/src/operations/delete/mod.rs b/grovedb/src/operations/delete/mod.rs index fb2ce5ce7..31d96b853 100644 --- a/grovedb/src/operations/delete/mod.rs +++ b/grovedb/src/operations/delete/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Delete operations and costs #[cfg(feature = "estimated_costs")] @@ -46,6 +18,7 @@ use grovedb_costs::{ storage_cost::removal::{StorageRemovedBytes, StorageRemovedBytes::BasicStorageRemoval}, CostResult, CostsExt, OperationCost, }; +use grovedb_merk::{proofs::Query, KVIterator}; #[cfg(feature = "full")] use grovedb_merk::{Error as MerkError, Merk, MerkOptions}; use grovedb_path::SubtreePath; @@ -54,20 +27,47 @@ use grovedb_storage::{ rocksdb_storage::{PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext}, Storage, StorageBatch, StorageContext, }; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; -use crate::util::merk_optional_tx_path_not_empty; #[cfg(feature = "full")] use crate::{ batch::{GroveDbOp, Op}, - util::{storage_context_optional_tx, storage_context_with_parent_optional_tx}, + util::storage_context_with_parent_optional_tx, Element, ElementFlags, Error, GroveDb, Transaction, TransactionArg, }; +use crate::{raw_decode, util::merk_optional_tx_path_not_empty}; + +#[cfg(feature = "full")] +#[derive(Clone)] +/// Clear options +pub struct ClearOptions { + /// Check for Subtrees + pub check_for_subtrees: bool, + /// Allow deleting non-empty trees if we check for subtrees + pub allow_deleting_subtrees: bool, + /// If we check for subtrees, and we don't allow deleting and there are + /// some, should we error? + pub trying_to_clear_with_subtrees_returns_error: bool, +} + +#[cfg(feature = "full")] +impl Default for ClearOptions { + fn default() -> Self { + ClearOptions { + check_for_subtrees: true, + allow_deleting_subtrees: false, + trying_to_clear_with_subtrees_returns_error: true, + } + } +} #[cfg(feature = "full")] #[derive(Clone)] /// Delete options pub struct DeleteOptions { - /// Allow deleting non empty trees + /// Allow deleting non-empty trees pub allow_deleting_non_empty_trees: bool, /// Deleting non empty trees returns error pub deleting_non_empty_trees_returns_error: bool, @@ -107,11 +107,17 @@ impl GroveDb { key: &[u8], options: Option, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> where B: AsRef<[u8]> + 'b, P: Into>, { + check_grovedb_v0_with_cost!( + "delete", + grove_version.grovedb_versions.operations.delete.delete + ); + let options = options.unwrap_or_default(); let batch = StorageBatch::new(); @@ -128,6 +134,7 @@ impl GroveDb { )) }, &batch, + grove_version, ) .map_ok(|_| ()); @@ -138,6 +145,200 @@ impl GroveDb { }) } + /// Delete all elements in a specified subtree + /// Returns if we successfully cleared the subtree + pub fn clear_subtree<'b, B, P>( + &self, + path: P, + options: Option, + transaction: TransactionArg, + grove_version: &GroveVersion, + ) -> Result + where + B: AsRef<[u8]> + 'b, + P: Into>, + { + self.clear_subtree_with_costs(path, options, transaction, grove_version) + .unwrap() + } + + /// Delete all elements in a specified subtree and get back costs + /// Warning: The costs for this operation are not yet correct, hence we + /// should keep this private for now + /// Returns if we successfully cleared the subtree + fn clear_subtree_with_costs<'b, B, P>( + &self, + path: P, + options: Option, + transaction: TransactionArg, + grove_version: &GroveVersion, + ) -> CostResult + where + B: AsRef<[u8]> + 'b, + P: Into>, + { + check_grovedb_v0_with_cost!( + "clear_subtree", + grove_version + .grovedb_versions + .operations + .delete + .clear_subtree + ); + + let subtree_path: SubtreePath = path.into(); + let mut cost = OperationCost::default(); + let batch = StorageBatch::new(); + + let options = options.unwrap_or_default(); + + if let Some(transaction) = transaction { + let mut merk_to_clear = cost_return_on_error!( + &mut cost, + self.open_transactional_merk_at_path( + subtree_path.clone(), + transaction, + Some(&batch), + grove_version, + ) + ); + + if options.check_for_subtrees { + let mut all_query = Query::new(); + all_query.insert_all(); + + let mut element_iterator = + KVIterator::new(merk_to_clear.storage.raw_iter(), &all_query).unwrap(); + + // delete all nested subtrees + while let Some((key, element_value)) = + element_iterator.next_kv().unwrap_add_cost(&mut cost) + { + let element = raw_decode(&element_value, grove_version).unwrap(); + if element.is_any_tree() { + if options.allow_deleting_subtrees { + cost_return_on_error!( + &mut cost, + self.delete( + subtree_path.clone(), + key.as_slice(), + Some(DeleteOptions { + allow_deleting_non_empty_trees: true, + deleting_non_empty_trees_returns_error: false, + ..Default::default() + }), + Some(transaction), + grove_version, + ) + ); + } else if options.trying_to_clear_with_subtrees_returns_error { + return Err(Error::ClearingTreeWithSubtreesNotAllowed( + "options do not allow to clear this merk tree as it contains \ + subtrees", + )) + .wrap_with_cost(cost); + } else { + return Ok(false).wrap_with_cost(cost); + } + } + } + } + + // delete non subtree values + cost_return_on_error!(&mut cost, merk_to_clear.clear().map_err(Error::MerkError)); + + // propagate changes + let mut merk_cache: HashMap, Merk> = + HashMap::default(); + merk_cache.insert(subtree_path.clone(), merk_to_clear); + cost_return_on_error!( + &mut cost, + self.propagate_changes_with_transaction( + merk_cache, + subtree_path.clone(), + transaction, + &batch, + grove_version, + ) + ); + } else { + let mut merk_to_clear = cost_return_on_error!( + &mut cost, + self.open_non_transactional_merk_at_path( + subtree_path.clone(), + Some(&batch), + grove_version + ) + ); + + if options.check_for_subtrees { + let mut all_query = Query::new(); + all_query.insert_all(); + + let mut element_iterator = + KVIterator::new(merk_to_clear.storage.raw_iter(), &all_query).unwrap(); + + // delete all nested subtrees + while let Some((key, element_value)) = + element_iterator.next_kv().unwrap_add_cost(&mut cost) + { + let element = raw_decode(&element_value, grove_version).unwrap(); + if options.allow_deleting_subtrees { + if element.is_any_tree() { + cost_return_on_error!( + &mut cost, + self.delete( + subtree_path.clone(), + key.as_slice(), + Some(DeleteOptions { + allow_deleting_non_empty_trees: true, + deleting_non_empty_trees_returns_error: false, + ..Default::default() + }), + None, + grove_version, + ) + ); + } + } else if options.trying_to_clear_with_subtrees_returns_error { + return Err(Error::ClearingTreeWithSubtreesNotAllowed( + "options do not allow to clear this merk tree as it contains subtrees", + )) + .wrap_with_cost(cost); + } else { + return Ok(false).wrap_with_cost(cost); + } + } + } + + // delete non subtree values + cost_return_on_error!(&mut cost, merk_to_clear.clear().map_err(Error::MerkError)); + + // propagate changes + let mut merk_cache: HashMap, Merk> = + HashMap::default(); + merk_cache.insert(subtree_path.clone(), merk_to_clear); + cost_return_on_error!( + &mut cost, + self.propagate_changes_without_transaction( + merk_cache, + subtree_path.clone(), + &batch, + grove_version, + ) + ); + } + + cost_return_on_error!( + &mut cost, + self.db + .commit_multi_context_batch(batch, transaction) + .map_err(Into::into) + ); + + Ok(true).wrap_with_cost(cost) + } + /// Delete element with sectional storage function pub fn delete_with_sectional_storage_function>( &self, @@ -153,7 +354,17 @@ impl GroveDb { (StorageRemovedBytes, StorageRemovedBytes), Error, >, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "delete_with_sectional_storage_function", + grove_version + .grovedb_versions + .operations + .delete + .delete_with_sectional_storage_function + ); + let options = options.unwrap_or_default(); let batch = StorageBatch::new(); @@ -164,7 +375,7 @@ impl GroveDb { &options, transaction, &mut |value, removed_key_bytes, removed_value_bytes| { - let mut element = Element::deserialize(value.as_slice()) + let mut element = Element::deserialize(value.as_slice(), grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string()))?; let maybe_flags = element.get_flags_mut(); match maybe_flags { @@ -172,7 +383,7 @@ impl GroveDb { BasicStorageRemoval(removed_key_bytes), BasicStorageRemoval(removed_value_bytes), )), - Some(flags) => (split_removal_bytes_function)( + Some(flags) => split_removal_bytes_function( flags, removed_key_bytes, removed_value_bytes, @@ -181,6 +392,7 @@ impl GroveDb { } }, &batch, + grove_version, ) .map_ok(|_| ()); @@ -197,11 +409,21 @@ impl GroveDb { path: P, key: &[u8], transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult where B: AsRef<[u8]> + 'b, P: Into>, { + check_grovedb_v0_with_cost!( + "delete_if_empty_tree", + grove_version + .grovedb_versions + .operations + .delete + .delete_if_empty_tree + ); + let batch = StorageBatch::new(); let collect_costs = self.delete_if_empty_tree_with_sectional_storage_function( @@ -211,10 +433,11 @@ impl GroveDb { &mut |_, removed_key_bytes, removed_value_bytes| { Ok(( BasicStorageRemoval(removed_key_bytes), - (BasicStorageRemoval(removed_value_bytes)), + BasicStorageRemoval(removed_value_bytes), )) }, &batch, + grove_version, ); collect_costs.flat_map_ok(|r| { @@ -240,7 +463,17 @@ impl GroveDb { Error, >, batch: &StorageBatch, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "delete_if_empty_tree_with_sectional_storage_function", + grove_version + .grovedb_versions + .operations + .delete + .delete_if_empty_tree_with_sectional_storage_function + ); + let options = DeleteOptions { allow_deleting_non_empty_trees: false, deleting_non_empty_trees_returns_error: false, @@ -253,7 +486,7 @@ impl GroveDb { &options, transaction, &mut |value, removed_key_bytes, removed_value_bytes| { - let mut element = Element::deserialize(value.as_slice()) + let mut element = Element::deserialize(value.as_slice(), grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string()))?; let maybe_flags = element.get_flags_mut(); match maybe_flags { @@ -261,15 +494,14 @@ impl GroveDb { BasicStorageRemoval(removed_key_bytes), BasicStorageRemoval(removed_value_bytes), )), - Some(flags) => (split_removal_bytes_function)( - flags, - removed_key_bytes, - removed_value_bytes, - ) - .map_err(|e| MerkError::ClientCorruptionError(e.to_string())), + Some(flags) => { + split_removal_bytes_function(flags, removed_key_bytes, removed_value_bytes) + .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) + } } }, - &batch, + batch, + grove_version, ) } @@ -282,7 +514,17 @@ impl GroveDb { is_known_to_be_subtree_with_sum: Option<(bool, bool)>, current_batch_operations: &[GroveDbOp], transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "delete_operation_for_delete_internal", + grove_version + .grovedb_versions + .operations + .delete + .delete_operation_for_delete_internal + ); + let mut cost = OperationCost::default(); if path.is_root() { @@ -295,14 +537,18 @@ impl GroveDb { if options.validate_tree_at_path_exists { cost_return_on_error!( &mut cost, - self.check_subtree_exists_path_not_found(path.clone(), transaction) + self.check_subtree_exists_path_not_found( + path.clone(), + transaction, + grove_version + ) ); } let (is_subtree, is_subtree_with_sum) = match is_known_to_be_subtree_with_sum { None => { let element = cost_return_on_error!( &mut cost, - self.get_raw(path.clone(), key.as_ref(), transaction) + self.get_raw(path.clone(), key.as_ref(), transaction, grove_version) ); match element { Element::Tree(..) => (true, false), @@ -338,6 +584,7 @@ impl GroveDb { None, transaction, subtree, + grove_version, { subtree .is_empty_tree_except(batch_deleted_keys) @@ -370,7 +617,7 @@ impl GroveDb { ))) } else { Err(Error::NotSupported( - "deletion operation for non empty tree not currently supported", + "deletion operation for non empty tree not currently supported".to_string(), )) }; result.wrap_with_cost(cost) @@ -395,6 +642,7 @@ impl GroveDb { MerkError, >, batch: &StorageBatch, + grove_version: &GroveVersion, ) -> CostResult { if let Some(transaction) = transaction { self.delete_internal_on_transaction( @@ -404,9 +652,17 @@ impl GroveDb { transaction, sectioned_removal, batch, + grove_version, ) } else { - self.delete_internal_without_transaction(path, key, options, sectioned_removal, batch) + self.delete_internal_without_transaction( + path, + key, + options, + sectioned_removal, + batch, + grove_version, + ) } } @@ -425,19 +681,34 @@ impl GroveDb { MerkError, >, batch: &StorageBatch, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "delete_internal_on_transaction", + grove_version + .grovedb_versions + .operations + .delete + .delete_internal_on_transaction + ); + let mut cost = OperationCost::default(); let element = cost_return_on_error!( &mut cost, - self.get_raw(path.clone(), key.as_ref(), Some(transaction)) + self.get_raw(path.clone(), key.as_ref(), Some(transaction), grove_version) ); let mut subtree_to_delete_from = cost_return_on_error!( &mut cost, - self.open_transactional_merk_at_path(path.clone(), transaction, Some(batch)) + self.open_transactional_merk_at_path( + path.clone(), + transaction, + Some(batch), + grove_version + ) ); let uses_sum_tree = subtree_to_delete_from.is_sum_tree; - if element.is_tree() { + if element.is_any_tree() { let subtree_merk_path = path.derive_owned_with_child(key); let subtree_merk_path_ref = SubtreePath::from(&subtree_merk_path); @@ -446,7 +717,8 @@ impl GroveDb { self.open_transactional_merk_at_path( subtree_merk_path_ref.clone(), transaction, - Some(batch) + Some(batch), + grove_version, ) ); let is_empty = subtree_of_tree_we_are_deleting @@ -466,7 +738,7 @@ impl GroveDb { } else if !is_empty { let subtrees_paths = cost_return_on_error!( &mut cost, - self.find_subtrees(&subtree_merk_path_ref, Some(transaction)) + self.find_subtrees(&subtree_merk_path_ref, Some(transaction), grove_version) ); for subtree_path in subtrees_paths { let p: SubtreePath<_> = subtree_path.as_slice().into(); @@ -495,7 +767,9 @@ impl GroveDb { Merk::open_layered_with_root_key( storage, subtree_to_delete_from.root_key(), - element.is_sum_tree() + element.is_sum_tree(), + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|_| { Error::CorruptedData("cannot open a subtree with given root key".to_owned()) @@ -510,7 +784,8 @@ impl GroveDb { Some(options.as_merk_options()), true, uses_sum_tree, - sectioned_removal + sectioned_removal, + grove_version, ) ); let mut merk_cache: HashMap< @@ -521,10 +796,11 @@ impl GroveDb { cost_return_on_error!( &mut cost, self.propagate_changes_with_batch_transaction( - &batch, + batch, merk_cache, &path, - transaction + transaction, + grove_version, ) ); } else { @@ -537,7 +813,8 @@ impl GroveDb { Some(options.as_merk_options()), true, uses_sum_tree, - sectioned_removal + sectioned_removal, + grove_version, ) ); let mut merk_cache: HashMap< @@ -547,7 +824,13 @@ impl GroveDb { merk_cache.insert(path.clone(), subtree_to_delete_from); cost_return_on_error!( &mut cost, - self.propagate_changes_with_transaction(merk_cache, path, transaction, batch) + self.propagate_changes_with_transaction( + merk_cache, + path, + transaction, + batch, + grove_version + ) ); } } else { @@ -560,6 +843,7 @@ impl GroveDb { false, uses_sum_tree, sectioned_removal, + grove_version, ) ); let mut merk_cache: HashMap, Merk> = @@ -567,7 +851,13 @@ impl GroveDb { merk_cache.insert(path.clone(), subtree_to_delete_from); cost_return_on_error!( &mut cost, - self.propagate_changes_with_transaction(merk_cache, path, transaction, batch) + self.propagate_changes_with_transaction( + merk_cache, + path, + transaction, + batch, + grove_version + ) ); } @@ -588,25 +878,38 @@ impl GroveDb { MerkError, >, batch: &StorageBatch, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "delete_internal_without_transaction", + grove_version + .grovedb_versions + .operations + .delete + .delete_internal_without_transaction + ); + let mut cost = OperationCost::default(); - let element = - cost_return_on_error!(&mut cost, self.get_raw(path.clone(), key.as_ref(), None)); + let element = cost_return_on_error!( + &mut cost, + self.get_raw(path.clone(), key.as_ref(), None, grove_version) + ); let mut merk_cache: HashMap, Merk> = HashMap::default(); let mut subtree_to_delete_from = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(path.clone(), Some(batch)) + self.open_non_transactional_merk_at_path(path.clone(), Some(batch), grove_version) ); let uses_sum_tree = subtree_to_delete_from.is_sum_tree; - if element.is_tree() { + if element.is_any_tree() { let subtree_merk_path = path.derive_owned_with_child(key); let subtree_of_tree_we_are_deleting = cost_return_on_error!( &mut cost, self.open_non_transactional_merk_at_path( SubtreePath::from(&subtree_merk_path), - Some(batch) + Some(batch), + grove_version, ) ); let is_empty = subtree_of_tree_we_are_deleting @@ -627,14 +930,18 @@ impl GroveDb { if !is_empty { let subtrees_paths = cost_return_on_error!( &mut cost, - self.find_subtrees(&SubtreePath::from(&subtree_merk_path), None) + self.find_subtrees( + &SubtreePath::from(&subtree_merk_path), + None, + grove_version + ) ); // TODO: dumb traversal should not be tolerated for subtree_path in subtrees_paths.into_iter().rev() { let p: SubtreePath<_> = subtree_path.as_slice().into(); let mut inner_subtree_to_delete_from = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(p, Some(batch)) + self.open_non_transactional_merk_at_path(p, Some(batch), grove_version) ); cost_return_on_error!( &mut cost, @@ -655,6 +962,7 @@ impl GroveDb { true, uses_sum_tree, sectioned_removal, + grove_version, ) ); } @@ -668,63 +976,18 @@ impl GroveDb { false, uses_sum_tree, sectioned_removal, + grove_version, ) ); } merk_cache.insert(path.clone(), subtree_to_delete_from); cost_return_on_error!( &mut cost, - self.propagate_changes_without_transaction(merk_cache, path, batch) + self.propagate_changes_without_transaction(merk_cache, path, batch, grove_version) ); Ok(true).wrap_with_cost(cost) } - - // TODO: dumb traversal should not be tolerated - /// Finds keys which are trees for a given subtree recursively. - /// One element means a key of a `merk`, n > 1 elements mean relative path - /// for a deeply nested subtree. - pub(crate) fn find_subtrees>( - &self, - path: &SubtreePath, - transaction: TransactionArg, - ) -> CostResult>>, Error> { - let mut cost = OperationCost::default(); - - // TODO: remove conversion to vec; - // However, it's not easy for a reason: - // new keys to enqueue are taken from raw iterator which returns Vec; - // changing that to slice is hard as cursor should be moved for next iteration - // which requires exclusive (&mut) reference, also there is no guarantee that - // slice which points into storage internals will remain valid if raw - // iterator got altered so why that reference should be exclusive; - // - // Update: there are pinned views into RocksDB to return slices of data, perhaps - // there is something for iterators - - let mut queue: Vec>> = vec![path.to_vec()]; - let mut result: Vec>> = queue.clone(); - - while let Some(q) = queue.pop() { - let subtree_path: SubtreePath> = q.as_slice().into(); - // Get the correct subtree with q_ref as path - storage_context_optional_tx!(self.db, subtree_path, None, transaction, storage, { - let storage = storage.unwrap_add_cost(&mut cost); - let mut raw_iter = Element::iterator(storage.raw_iter()).unwrap_add_cost(&mut cost); - while let Some((key, value)) = - cost_return_on_error!(&mut cost, raw_iter.next_element()) - { - if value.is_tree() { - let mut sub_path = q.clone(); - sub_path.push(key.to_vec()); - queue.push(sub_path.clone()); - result.push(sub_path); - } - } - }) - } - Ok(result).wrap_with_cost(cost) - } } #[cfg(feature = "full")] @@ -734,10 +997,11 @@ mod tests { storage_cost::{removal::StorageRemovedBytes::BasicStorageRemoval, StorageCost}, OperationCost, }; + use grovedb_version::version::GroveVersion; use pretty_assertions::assert_eq; use crate::{ - operations::delete::{delete_up_tree::DeleteUpTreeOptions, DeleteOptions}, + operations::delete::{delete_up_tree::DeleteUpTreeOptions, ClearOptions, DeleteOptions}, tests::{ common::EMPTY_PATH, make_empty_grovedb, make_test_grovedb, ANOTHER_TEST_LEAF, TEST_LEAF, }, @@ -746,8 +1010,9 @@ mod tests { #[test] fn test_empty_subtree_deletion_without_transaction() { + let grove_version = GroveVersion::latest(); let _element = Element::new_item(b"ayy".to_vec()); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); // Insert some nested subtrees db.insert( [TEST_LEAF].as_ref(), @@ -755,6 +1020,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 1 insert"); @@ -764,32 +1030,51 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 3 insert"); - let root_hash = db.root_hash(None).unwrap().unwrap(); - db.delete([TEST_LEAF].as_ref(), b"key1", None, None) + let root_hash = db.root_hash(None, grove_version).unwrap().unwrap(); + db.delete([TEST_LEAF].as_ref(), b"key1", None, None, grove_version) .unwrap() .expect("unable to delete subtree"); assert!(matches!( - db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) - .unwrap(), + db.get( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap(), Err(Error::PathParentLayerNotFound(_)) )); // assert_eq!(db.subtrees.len().unwrap(), 3); // TEST_LEAF, ANOTHER_TEST_LEAF // TEST_LEAF.key4 stay - assert!(db.get(EMPTY_PATH, TEST_LEAF, None).unwrap().is_ok()); - assert!(db.get(EMPTY_PATH, ANOTHER_TEST_LEAF, None).unwrap().is_ok()); - assert!(db.get([TEST_LEAF].as_ref(), b"key4", None).unwrap().is_ok()); - assert_ne!(root_hash, db.root_hash(None).unwrap().unwrap()); + assert!(db + .get(EMPTY_PATH, TEST_LEAF, None, grove_version) + .unwrap() + .is_ok()); + assert!(db + .get(EMPTY_PATH, ANOTHER_TEST_LEAF, None, grove_version) + .unwrap() + .is_ok()); + assert!(db + .get([TEST_LEAF].as_ref(), b"key4", None, grove_version) + .unwrap() + .is_ok()); + assert_ne!( + root_hash, + db.root_hash(None, grove_version).unwrap().unwrap() + ); } #[test] fn test_empty_subtree_deletion_with_transaction() { + let grove_version = GroveVersion::latest(); let _element = Element::new_item(b"ayy".to_vec()); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let transaction = db.start_transaction(); // Insert some nested subtrees @@ -799,6 +1084,7 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree 1 insert"); @@ -808,37 +1094,47 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree 3 insert"); - db.delete([TEST_LEAF].as_ref(), b"key1", None, Some(&transaction)) - .unwrap() - .expect("unable to delete subtree"); + db.delete( + [TEST_LEAF].as_ref(), + b"key1", + None, + Some(&transaction), + grove_version, + ) + .unwrap() + .expect("unable to delete subtree"); assert!(matches!( db.get( [TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", - Some(&transaction) + Some(&transaction), + grove_version ) .unwrap(), Err(Error::PathParentLayerNotFound(_)) )); transaction.commit().expect("cannot commit transaction"); assert!(matches!( - db.get([TEST_LEAF].as_ref(), b"key1", None).unwrap(), + db.get([TEST_LEAF].as_ref(), b"key1", None, grove_version) + .unwrap(), Err(Error::PathKeyNotFound(_)) )); - assert!(matches!( - db.get([TEST_LEAF].as_ref(), b"key4", None).unwrap(), - Ok(_) - )); + assert!(db + .get([TEST_LEAF].as_ref(), b"key4", None, grove_version) + .unwrap() + .is_ok()); } #[test] fn test_subtree_deletion_if_empty_with_transaction() { + let grove_version = GroveVersion::latest(); let element = Element::new_item(b"value".to_vec()); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let transaction = db.start_transaction(); @@ -849,6 +1145,7 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree insert A on level 1"); @@ -858,6 +1155,7 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree insert A on level 2"); @@ -867,6 +1165,7 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree insert B on level 2"); @@ -877,6 +1176,7 @@ mod tests { element, None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful value insert"); @@ -886,6 +1186,7 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree insert B on level 1"); @@ -904,7 +1205,12 @@ mod tests { let transaction = db.start_transaction(); let deleted = db - .delete_if_empty_tree([TEST_LEAF].as_ref(), b"level1-A", Some(&transaction)) + .delete_if_empty_tree( + [TEST_LEAF].as_ref(), + b"level1-A", + Some(&transaction), + grove_version, + ) .unwrap() .expect("unable to delete subtree"); assert!(!deleted); @@ -918,6 +1224,7 @@ mod tests { ..Default::default() }, Some(&transaction), + grove_version, ) .unwrap() .expect("unable to delete subtree"); @@ -927,7 +1234,8 @@ mod tests { db.get( [TEST_LEAF, b"level1-A", b"level2-A"].as_ref(), b"level3-A", - Some(&transaction) + Some(&transaction), + grove_version ) .unwrap(), Err(Error::PathParentLayerNotFound(_)) @@ -937,23 +1245,30 @@ mod tests { db.get( [TEST_LEAF, b"level1-A"].as_ref(), b"level2-A", - Some(&transaction) + Some(&transaction), + grove_version ) .unwrap(), Err(Error::PathKeyNotFound(_)) )); assert!(matches!( - db.get([TEST_LEAF].as_ref(), b"level1-A", Some(&transaction)) - .unwrap(), + db.get( + [TEST_LEAF].as_ref(), + b"level1-A", + Some(&transaction), + grove_version + ) + .unwrap(), Ok(Element::Tree(..)), )); } #[test] fn test_subtree_deletion_if_empty_without_transaction() { + let grove_version = GroveVersion::latest(); let element = Element::new_item(b"value".to_vec()); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); // Insert some nested subtrees db.insert( @@ -962,6 +1277,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert A on level 1"); @@ -971,6 +1287,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert A on level 2"); @@ -980,6 +1297,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert B on level 2"); @@ -990,6 +1308,7 @@ mod tests { element, None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -999,6 +1318,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert B on level 1"); @@ -1011,7 +1331,7 @@ mod tests { // Level 3: A: value let deleted = db - .delete_if_empty_tree([TEST_LEAF].as_ref(), b"level1-A", None) + .delete_if_empty_tree([TEST_LEAF].as_ref(), b"level1-A", None, grove_version) .unwrap() .expect("unable to delete subtree"); assert!(!deleted); @@ -1025,6 +1345,7 @@ mod tests { ..Default::default() }, None, + grove_version, ) .unwrap() .expect("unable to delete subtree"); @@ -1035,28 +1356,36 @@ mod tests { [TEST_LEAF, b"level1-A", b"level2-A"].as_ref(), b"level3-A", None, + grove_version ) .unwrap(), Err(Error::PathParentLayerNotFound(_)) )); assert!(matches!( - db.get([TEST_LEAF, b"level1-A"].as_ref(), b"level2-A", None) - .unwrap(), + db.get( + [TEST_LEAF, b"level1-A"].as_ref(), + b"level2-A", + None, + grove_version + ) + .unwrap(), Err(Error::PathKeyNotFound(_)) )); assert!(matches!( - db.get([TEST_LEAF].as_ref(), b"level1-A", None).unwrap(), + db.get([TEST_LEAF].as_ref(), b"level1-A", None, grove_version) + .unwrap(), Ok(Element::Tree(..)), )); } #[test] fn test_recurring_deletion_through_subtrees_with_transaction() { + let grove_version = GroveVersion::latest(); let element = Element::new_item(b"ayy".to_vec()); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let transaction = db.start_transaction(); // Insert some nested subtrees @@ -1066,6 +1395,7 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree 1 insert"); @@ -1075,6 +1405,7 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree 2 insert"); @@ -1086,6 +1417,7 @@ mod tests { element, None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful value insert"); @@ -1095,6 +1427,7 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree 3 insert"); @@ -1108,6 +1441,7 @@ mod tests { ..Default::default() }), Some(&transaction), + grove_version, ) .unwrap() .expect("unable to delete subtree"); @@ -1115,26 +1449,29 @@ mod tests { db.get( [TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", - Some(&transaction) + Some(&transaction), + grove_version ) .unwrap(), Err(Error::PathParentLayerNotFound(_)) )); transaction.commit().expect("cannot commit transaction"); assert!(matches!( - db.get([TEST_LEAF].as_ref(), b"key1", None).unwrap(), + db.get([TEST_LEAF].as_ref(), b"key1", None, grove_version) + .unwrap(), Err(Error::PathKeyNotFound(_)) )); - db.get([TEST_LEAF].as_ref(), b"key4", None) + db.get([TEST_LEAF].as_ref(), b"key4", None, grove_version) .unwrap() .expect("expected to get key4"); } #[test] fn test_recurring_deletion_through_subtrees_without_transaction() { + let grove_version = GroveVersion::latest(); let element = Element::new_item(b"ayy".to_vec()); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); // Insert some nested subtrees db.insert( @@ -1143,6 +1480,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 1 insert"); @@ -1152,6 +1490,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 2 insert"); @@ -1163,6 +1502,7 @@ mod tests { element, None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -1172,6 +1512,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 3 insert"); @@ -1185,45 +1526,65 @@ mod tests { ..Default::default() }), None, + grove_version, ) .unwrap() .expect("unable to delete subtree"); assert!(matches!( - db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) - .unwrap(), + db.get( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap(), Err(Error::PathParentLayerNotFound(_)) )); assert!(matches!( - db.get([TEST_LEAF].as_ref(), b"key1", None).unwrap(), + db.get([TEST_LEAF].as_ref(), b"key1", None, grove_version) + .unwrap(), Err(Error::PathKeyNotFound(_)) )); - assert!(matches!( - db.get([TEST_LEAF].as_ref(), b"key4", None).unwrap(), - Ok(_) - )); + assert!(db + .get([TEST_LEAF].as_ref(), b"key4", None, grove_version) + .unwrap() + .is_ok()); } #[test] fn test_item_deletion() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); - db.insert([TEST_LEAF].as_ref(), b"key", element, None, None) - .unwrap() - .expect("successful insert"); - let root_hash = db.root_hash(None).unwrap().unwrap(); + db.insert( + [TEST_LEAF].as_ref(), + b"key", + element, + None, + None, + grove_version, + ) + .unwrap() + .expect("successful insert"); + let root_hash = db.root_hash(None, grove_version).unwrap().unwrap(); assert!(db - .delete([TEST_LEAF].as_ref(), b"key", None, None) + .delete([TEST_LEAF].as_ref(), b"key", None, None, grove_version) .unwrap() .is_ok()); assert!(matches!( - db.get([TEST_LEAF].as_ref(), b"key", None).unwrap(), + db.get([TEST_LEAF].as_ref(), b"key", None, grove_version) + .unwrap(), Err(Error::PathKeyNotFound(_)) )); - assert_ne!(root_hash, db.root_hash(None).unwrap().unwrap()); + assert_ne!( + root_hash, + db.root_hash(None, grove_version).unwrap().unwrap() + ); } #[test] fn test_delete_one_item_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1234,12 +1595,13 @@ mod tests { Element::new_item(b"cat".to_vec()), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); let cost = db - .delete(EMPTY_PATH, b"key1", None, Some(&tx)) + .delete(EMPTY_PATH, b"key1", None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete"); @@ -1292,6 +1654,7 @@ mod tests { #[test] fn test_delete_one_sum_item_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1301,6 +1664,7 @@ mod tests { Element::empty_sum_tree(), None, Some(&tx), + grove_version, ) .unwrap() .expect("expected to insert"); @@ -1312,12 +1676,19 @@ mod tests { Element::new_sum_item(15000), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); let cost = db - .delete([b"sum_tree".as_slice()].as_ref(), b"key1", None, Some(&tx)) + .delete( + [b"sum_tree".as_slice()].as_ref(), + b"key1", + None, + Some(&tx), + grove_version, + ) .cost_as_result() .expect("expected to delete"); @@ -1369,6 +1740,7 @@ mod tests { #[test] fn test_delete_one_item_in_sum_tree_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1378,6 +1750,7 @@ mod tests { Element::empty_sum_tree(), None, Some(&tx), + grove_version, ) .unwrap() .expect("expected to insert"); @@ -1389,12 +1762,19 @@ mod tests { Element::new_item(b"hello".to_vec()), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); let cost = db - .delete([b"sum_tree".as_slice()].as_ref(), b"key1", None, Some(&tx)) + .delete( + [b"sum_tree".as_slice()].as_ref(), + b"key1", + None, + Some(&tx), + grove_version, + ) .cost_as_result() .expect("expected to delete"); @@ -1444,4 +1824,138 @@ mod tests { } ); } + + #[test] + fn test_subtree_clear() { + let grove_version = GroveVersion::latest(); + let element = Element::new_item(b"ayy".to_vec()); + + let db = make_test_grovedb(grove_version); + + // Insert some nested subtrees + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree 1 insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key2", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree 2 insert"); + + // Insert an element into subtree + db.insert( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + element, + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key4", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree 3 insert"); + + let key1_tree = db + .get([TEST_LEAF].as_ref(), b"key1", None, grove_version) + .unwrap() + .unwrap(); + assert!(!matches!(key1_tree, Element::Tree(None, _))); + let key1_merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + None, + grove_version, + ) + .unwrap() + .unwrap(); + assert_ne!(key1_merk.root_hash().unwrap(), [0; 32]); + + let root_hash_before_clear = db.root_hash(None, grove_version).unwrap().unwrap(); + db.clear_subtree([TEST_LEAF, b"key1"].as_ref(), None, None, grove_version) + .expect_err("unable to delete subtree"); + + let success = db + .clear_subtree( + [TEST_LEAF, b"key1"].as_ref(), + Some(ClearOptions { + check_for_subtrees: true, + allow_deleting_subtrees: false, + trying_to_clear_with_subtrees_returns_error: false, + }), + None, + grove_version, + ) + .expect("expected no error"); + assert!(!success); + + let success = db + .clear_subtree( + [TEST_LEAF, b"key1"].as_ref(), + Some(ClearOptions { + check_for_subtrees: true, + allow_deleting_subtrees: true, + trying_to_clear_with_subtrees_returns_error: false, + }), + None, + grove_version, + ) + .expect("unable to delete subtree"); + + assert!(success); + + assert!(matches!( + db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", None, grove_version) + .unwrap(), + Err(Error::PathKeyNotFound(_)) + )); + assert!(matches!( + db.get( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap(), + Err(Error::PathParentLayerNotFound(_)) + )); + let key1_tree = db + .get([TEST_LEAF].as_ref(), b"key1", None, grove_version) + .unwrap() + .unwrap(); + assert!(matches!(key1_tree, Element::Tree(None, _))); + + let key1_merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + None, + grove_version, + ) + .unwrap() + .unwrap(); + assert_eq!(key1_merk.root_hash().unwrap(), [0; 32]); + + let root_hash_after_clear = db.root_hash(None, grove_version).unwrap().unwrap(); + assert_ne!(root_hash_before_clear, root_hash_after_clear); + } } diff --git a/grovedb/src/operations/delete/worst_case.rs b/grovedb/src/operations/delete/worst_case.rs index 60699f590..b2a50bb21 100644 --- a/grovedb/src/operations/delete/worst_case.rs +++ b/grovedb/src/operations/delete/worst_case.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Worst case delete costs use grovedb_costs::{ @@ -35,6 +7,9 @@ use grovedb_merk::{ estimated_costs::worst_case_costs::add_worst_case_cost_for_is_empty_tree_except, tree::kv::KV, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; use intmap::IntMap; use crate::{ @@ -53,7 +28,16 @@ impl GroveDb { validate: bool, intermediate_tree_info: IntMap<(bool, u32)>, max_element_size: u32, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "delete", + grove_version + .grovedb_versions + .operations + .delete_up_tree + .worst_case_delete_operations_for_delete_up_tree_while_empty + ); let mut cost = OperationCost::default(); let stop_path_height = stop_path_height.unwrap_or_default(); @@ -68,7 +52,7 @@ impl GroveDb { let mut used_path = path.0.as_slice(); let mut ops = vec![]; let path_len = path.len() as u16; - for height in (stop_path_height..(path_len as u16)).rev() { + for height in (stop_path_height..path_len).rev() { let ( path_at_level, key_at_level, @@ -116,14 +100,15 @@ impl GroveDb { ); let op = cost_return_on_error!( &mut cost, - Self::worst_case_delete_operation_for_delete_internal::( + Self::worst_case_delete_operation_for_delete::( &KeyInfoPath::from_vec(path_at_level.to_vec()), key_at_level, is_sum_tree, validate, check_if_tree, except_keys_count, - max_element_size + max_element_size, + grove_version ) ); ops.push(op); @@ -132,8 +117,8 @@ impl GroveDb { } } - /// Worst case costs for delete operation for delete internal - pub fn worst_case_delete_operation_for_delete_internal<'db, S: Storage<'db>>( + /// Worst case costs for delete operation for delete + pub fn worst_case_delete_operation_for_delete<'db, S: Storage<'db>>( path: &KeyInfoPath, key: &KeyInfo, parent_tree_is_sum_tree: bool, @@ -141,19 +126,40 @@ impl GroveDb { check_if_tree: bool, except_keys_count: u16, max_element_size: u32, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "worst_case_delete_operation_for_delete", + grove_version + .grovedb_versions + .operations + .delete + .worst_case_delete_operation_for_delete + ); let mut cost = OperationCost::default(); if validate { - GroveDb::add_worst_case_get_merk_at_path::(&mut cost, path, parent_tree_is_sum_tree); + cost_return_on_error_no_add!( + &cost, + GroveDb::add_worst_case_get_merk_at_path::( + &mut cost, + path, + parent_tree_is_sum_tree, + grove_version, + ) + ); } if check_if_tree { - GroveDb::add_worst_case_get_raw_cost::( - &mut cost, - path, - key, - max_element_size, - parent_tree_is_sum_tree, + cost_return_on_error_no_add!( + &cost, + GroveDb::add_worst_case_get_raw_cost::( + &mut cost, + path, + key, + max_element_size, + parent_tree_is_sum_tree, + grove_version, + ) ); } // in the worst case this is a tree diff --git a/grovedb/src/operations/get/average_case.rs b/grovedb/src/operations/get/average_case.rs index 4a6ee2eea..aca4426df 100644 --- a/grovedb/src/operations/get/average_case.rs +++ b/grovedb/src/operations/get/average_case.rs @@ -1,38 +1,12 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Average case get costs #[cfg(feature = "full")] use grovedb_costs::OperationCost; #[cfg(feature = "full")] use grovedb_storage::rocksdb_storage::RocksDbStorage; +use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; +use crate::Error; #[cfg(feature = "full")] use crate::{ batch::{key_info::KeyInfo, KeyInfoPath}, @@ -48,7 +22,16 @@ impl GroveDb { key: &KeyInfo, estimated_element_size: u32, in_parent_tree_using_sums: bool, - ) -> OperationCost { + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "average_case_for_has_raw", + grove_version + .grovedb_versions + .operations + .get + .average_case_for_has_raw + ); let mut cost = OperationCost::default(); GroveDb::add_average_case_has_raw_cost::( &mut cost, @@ -56,8 +39,9 @@ impl GroveDb { key, estimated_element_size, in_parent_tree_using_sums, - ); - cost + grove_version, + )?; + Ok(cost) } /// Get the Operation Cost for a has query where we estimate that we @@ -68,7 +52,16 @@ impl GroveDb { estimated_flags_size: u32, is_sum_tree: bool, in_parent_tree_using_sums: bool, - ) -> OperationCost { + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "average_case_for_has_raw_tree", + grove_version + .grovedb_versions + .operations + .get + .average_case_for_has_raw_tree + ); let mut cost = OperationCost::default(); GroveDb::add_average_case_has_raw_tree_cost::( &mut cost, @@ -77,8 +70,9 @@ impl GroveDb { estimated_flags_size, is_sum_tree, in_parent_tree_using_sums, - ); - cost + grove_version, + )?; + Ok(cost) } /// Get the Operation Cost for a get query that doesn't follow @@ -88,7 +82,16 @@ impl GroveDb { key: &KeyInfo, estimated_element_size: u32, in_parent_tree_using_sums: bool, - ) -> OperationCost { + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "average_case_for_get_raw", + grove_version + .grovedb_versions + .operations + .get + .average_case_for_get_raw + ); let mut cost = OperationCost::default(); GroveDb::add_average_case_get_raw_cost::( &mut cost, @@ -96,8 +99,9 @@ impl GroveDb { key, estimated_element_size, in_parent_tree_using_sums, - ); - cost + grove_version, + )?; + Ok(cost) } /// Get the Operation Cost for a get query with the following parameters @@ -107,7 +111,16 @@ impl GroveDb { in_parent_tree_using_sums: bool, estimated_element_size: u32, estimated_references_sizes: Vec, - ) -> OperationCost { + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "average_case_for_get", + grove_version + .grovedb_versions + .operations + .get + .average_case_for_get + ); let mut cost = OperationCost::default(); GroveDb::add_average_case_get_cost::( &mut cost, @@ -116,8 +129,9 @@ impl GroveDb { in_parent_tree_using_sums, estimated_element_size, estimated_references_sizes, - ); - cost + grove_version, + )?; + Ok(cost) } /// Get the Operation Cost for a get query with the following parameters @@ -127,7 +141,16 @@ impl GroveDb { estimated_flags_size: u32, is_sum_tree: bool, in_parent_tree_using_sums: bool, - ) -> OperationCost { + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "average_case_for_get", + grove_version + .grovedb_versions + .operations + .get + .average_case_for_get_tree + ); let mut cost = OperationCost::default(); GroveDb::add_average_case_get_raw_tree_cost::( &mut cost, @@ -136,7 +159,8 @@ impl GroveDb { estimated_flags_size, is_sum_tree, in_parent_tree_using_sums, - ); - cost + grove_version, + )?; + Ok(cost) } } diff --git a/grovedb/src/operations/get/mod.rs b/grovedb/src/operations/get/mod.rs index 69512567d..b62896996 100644 --- a/grovedb/src/operations/get/mod.rs +++ b/grovedb/src/operations/get/mod.rs @@ -1,37 +1,11 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Get operations and costs #[cfg(feature = "estimated_costs")] mod average_case; #[cfg(feature = "full")] mod query; +#[cfg(feature = "full")] +pub use query::QueryItemOrSumReturnType; #[cfg(feature = "estimated_costs")] mod worst_case; @@ -44,6 +18,9 @@ use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; use grovedb_path::SubtreePath; #[cfg(feature = "full")] use grovedb_storage::StorageContext; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; #[cfg(feature = "full")] use crate::{ @@ -66,12 +43,15 @@ impl GroveDb { path: P, key: &[u8], transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult where B: AsRef<[u8]> + 'b, P: Into>, { - self.get_caching_optional(path.into(), key, true, transaction) + check_grovedb_v0_with_cost!("get", grove_version.grovedb_versions.operations.get.get); + + self.get_caching_optional(path.into(), key, true, transaction, grove_version) } /// Get an element from the backing store @@ -82,12 +62,28 @@ impl GroveDb { key: &[u8], allow_cache: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "get_caching_optional", + grove_version + .grovedb_versions + .operations + .get + .get_caching_optional + ); + let mut cost = OperationCost::default(); match cost_return_on_error!( &mut cost, - self.get_raw_caching_optional(path.clone(), key, allow_cache, transaction) + self.get_raw_caching_optional( + path.clone(), + key, + allow_cache, + transaction, + grove_version + ) ) { Element::Reference(reference_path, ..) => { let path_owned = cost_return_on_error!( @@ -95,8 +91,13 @@ impl GroveDb { path_from_reference_path_type(reference_path, &path.to_vec(), Some(key)) .wrap_with_cost(OperationCost::default()) ); - self.follow_reference(path_owned.as_slice().into(), allow_cache, transaction) - .add_cost(cost) + self.follow_reference( + path_owned.as_slice().into(), + allow_cache, + transaction, + grove_version, + ) + .add_cost(cost) } other => Ok(other).wrap_with_cost(cost), } @@ -110,7 +111,17 @@ impl GroveDb { path: SubtreePath, allow_cache: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "follow_reference", + grove_version + .grovedb_versions + .operations + .get + .follow_reference + ); + let mut cost = OperationCost::default(); let mut hops_left = MAX_REFERENCE_HOPS; @@ -126,22 +137,28 @@ impl GroveDb { if let Some((key, path_slice)) = current_path.split_last() { current_element = cost_return_on_error!( &mut cost, - self.get_raw_caching_optional(path_slice.into(), key, allow_cache, transaction) - .map_err(|e| match e { - Error::PathParentLayerNotFound(p) => { - Error::CorruptedReferencePathParentLayerNotFound(p) - } - Error::PathKeyNotFound(p) => { - Error::CorruptedReferencePathKeyNotFound(p) - } - Error::PathNotFound(p) => { - Error::CorruptedReferencePathNotFound(p) - } - _ => e, - }) + self.get_raw_caching_optional( + path_slice.into(), + key, + allow_cache, + transaction, + grove_version + ) + .map_err(|e| match e { + Error::PathParentLayerNotFound(p) => { + Error::CorruptedReferencePathParentLayerNotFound(p) + } + Error::PathKeyNotFound(p) => { + Error::CorruptedReferencePathKeyNotFound(p) + } + Error::PathNotFound(p) => { + Error::CorruptedReferencePathNotFound(p) + } + _ => e, + }) ) } else { - return Err(Error::CorruptedPath("empty path")).wrap_with_cost(cost); + return Err(Error::CorruptedPath("empty path".to_string())).wrap_with_cost(cost); } visited.insert(current_path.clone()); match current_element { @@ -166,8 +183,14 @@ impl GroveDb { path: SubtreePath, key: &[u8], transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult { - self.get_raw_caching_optional(path, key, true, transaction) + check_grovedb_v0_with_cost!( + "get_raw", + grove_version.grovedb_versions.operations.get.get_raw + ); + + self.get_raw_caching_optional(path, key, true, transaction, grove_version) } /// Get tree item without following references @@ -177,11 +200,27 @@ impl GroveDb { key: &[u8], allow_cache: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "get_raw_caching_optional", + grove_version + .grovedb_versions + .operations + .get + .get_raw_caching_optional + ); + if let Some(transaction) = transaction { - self.get_raw_on_transaction_caching_optional(path, key, allow_cache, transaction) + self.get_raw_on_transaction_caching_optional( + path, + key, + allow_cache, + transaction, + grove_version, + ) } else { - self.get_raw_without_transaction_caching_optional(path, key, allow_cache) + self.get_raw_without_transaction_caching_optional(path, key, allow_cache, grove_version) } } @@ -193,8 +232,18 @@ impl GroveDb { path: SubtreePath, key: &[u8], transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult, Error> { - self.get_raw_optional_caching_optional(path, key, true, transaction) + check_grovedb_v0_with_cost!( + "get_raw_optional", + grove_version + .grovedb_versions + .operations + .get + .get_raw_optional + ); + + self.get_raw_optional_caching_optional(path, key, true, transaction, grove_version) } /// Get tree item without following references @@ -204,16 +253,32 @@ impl GroveDb { key: &[u8], allow_cache: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "get_raw_optional_caching_optional", + grove_version + .grovedb_versions + .operations + .get + .get_raw_optional_caching_optional + ); + if let Some(transaction) = transaction { self.get_raw_optional_on_transaction_caching_optional( path, key, allow_cache, transaction, + grove_version, ) } else { - self.get_raw_optional_without_transaction_caching_optional(path, key, allow_cache) + self.get_raw_optional_without_transaction_caching_optional( + path, + key, + allow_cache, + grove_version, + ) } } @@ -224,12 +289,13 @@ impl GroveDb { key: &[u8], allow_cache: bool, transaction: &Transaction, + grove_version: &GroveVersion, ) -> CostResult { let mut cost = OperationCost::default(); let merk_to_get_from = cost_return_on_error!( &mut cost, - self.open_transactional_merk_at_path(path, transaction, None) + self.open_transactional_merk_at_path(path, transaction, None, grove_version) .map_err(|e| match e { Error::InvalidParentLayerPath(s) => { Error::PathParentLayerNotFound(s) @@ -238,7 +304,7 @@ impl GroveDb { }) ); - Element::get(&merk_to_get_from, key, allow_cache).add_cost(cost) + Element::get(&merk_to_get_from, key, allow_cache, grove_version).add_cost(cost) } /// Get tree item without following references @@ -248,10 +314,11 @@ impl GroveDb { key: &[u8], allow_cache: bool, transaction: &Transaction, + grove_version: &GroveVersion, ) -> CostResult, Error> { let mut cost = OperationCost::default(); let merk_result = self - .open_transactional_merk_at_path(path, transaction, None) + .open_transactional_merk_at_path(path, transaction, None, grove_version) .map_err(|e| match e { Error::InvalidParentLayerPath(s) => Error::PathParentLayerNotFound(s), _ => e, @@ -268,7 +335,7 @@ impl GroveDb { ); if let Some(merk_to_get_from) = merk { - Element::get_optional(&merk_to_get_from, key, allow_cache).add_cost(cost) + Element::get_optional(&merk_to_get_from, key, allow_cache, grove_version).add_cost(cost) } else { Ok(None).wrap_with_cost(cost) } @@ -280,12 +347,13 @@ impl GroveDb { path: SubtreePath, key: &[u8], allow_cache: bool, + grove_version: &GroveVersion, ) -> CostResult { let mut cost = OperationCost::default(); let merk_to_get_from = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(path, None) + self.open_non_transactional_merk_at_path(path, None, grove_version) .map_err(|e| match e { Error::InvalidParentLayerPath(s) => { Error::PathParentLayerNotFound(s) @@ -294,7 +362,7 @@ impl GroveDb { }) ); - Element::get(&merk_to_get_from, key, allow_cache).add_cost(cost) + Element::get(&merk_to_get_from, key, allow_cache, grove_version).add_cost(cost) } /// Get tree item without following references @@ -303,11 +371,12 @@ impl GroveDb { path: SubtreePath, key: &[u8], allow_cache: bool, + grove_version: &GroveVersion, ) -> CostResult, Error> { let mut cost = OperationCost::default(); let merk_result = self - .open_non_transactional_merk_at_path(path, None) + .open_non_transactional_merk_at_path(path, None, grove_version) .map_err(|e| match e { Error::InvalidParentLayerPath(s) => Error::PathParentLayerNotFound(s), _ => e, @@ -324,7 +393,7 @@ impl GroveDb { ); if let Some(merk_to_get_from) = merk { - Element::get_optional(&merk_to_get_from, key, allow_cache).add_cost(cost) + Element::get_optional(&merk_to_get_from, key, allow_cache, grove_version).add_cost(cost) } else { Ok(None).wrap_with_cost(cost) } @@ -337,11 +406,17 @@ impl GroveDb { path: P, key: &[u8], transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult where B: AsRef<[u8]> + 'b, P: Into>, { + check_grovedb_v0_with_cost!( + "has_raw", + grove_version.grovedb_versions.operations.get.has_raw + ); + // Merk's items should be written into data storage and checked accordingly storage_context_optional_tx!(self.db, path.into(), None, transaction, storage, { storage.flat_map(|s| s.get(key).map_err(|e| e.into()).map_ok(|x| x.is_some())) @@ -353,6 +428,7 @@ impl GroveDb { path: SubtreePath, transaction: TransactionArg, error_fn: impl FnOnce() -> Error, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); @@ -360,17 +436,22 @@ impl GroveDb { let element = if let Some(transaction) = transaction { let merk_to_get_from = cost_return_on_error!( &mut cost, - self.open_transactional_merk_at_path(parent_path, transaction, None) + self.open_transactional_merk_at_path( + parent_path, + transaction, + None, + grove_version + ) ); - Element::get(&merk_to_get_from, parent_key, true) + Element::get(&merk_to_get_from, parent_key, true, grove_version) } else { let merk_to_get_from = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(parent_path, None) + self.open_non_transactional_merk_at_path(parent_path, None, grove_version) ); - Element::get(&merk_to_get_from, parent_key, true) + Element::get(&merk_to_get_from, parent_key, true, grove_version) } .unwrap_add_cost(&mut cost); match element { @@ -388,19 +469,25 @@ impl GroveDb { &self, path: SubtreePath<'b, B>, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> where B: AsRef<[u8]> + 'b, { - self.check_subtree_exists(path.clone(), transaction, || { - Error::PathNotFound(format!( - "subtree doesn't exist at path {:?}", - path.to_vec() - .into_iter() - .map(hex::encode) - .collect::>() - )) - }) + self.check_subtree_exists( + path.clone(), + transaction, + || { + Error::PathNotFound(format!( + "subtree doesn't exist at path {:?}", + path.to_vec() + .into_iter() + .map(hex::encode) + .collect::>() + )) + }, + grove_version, + ) } /// Check subtree exists with invalid path error @@ -408,9 +495,22 @@ impl GroveDb { &self, path: SubtreePath, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { - self.check_subtree_exists(path, transaction, || { - Error::InvalidPath("subtree doesn't exist".to_owned()) - }) + check_grovedb_v0_with_cost!( + "check_subtree_exists_invalid_path", + grove_version + .grovedb_versions + .operations + .get + .check_subtree_exists_invalid_path + ); + + self.check_subtree_exists( + path, + transaction, + || Error::InvalidPath("subtree doesn't exist".to_owned()), + grove_version, + ) } } diff --git a/grovedb/src/operations/get/query.rs b/grovedb/src/operations/get/query.rs index 2dbd89b8c..81046dbfc 100644 --- a/grovedb/src/operations/get/query.rs +++ b/grovedb/src/operations/get/query.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Query operations use grovedb_costs::cost_return_on_error_default; @@ -33,10 +5,18 @@ use grovedb_costs::cost_return_on_error_default; use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, }; +use grovedb_version::{ + check_grovedb_v0, check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; #[cfg(feature = "full")] use integer_encoding::VarInt; -use crate::query_result_type::PathKeyOptionalElementTrio; +#[cfg(feature = "full")] +use crate::element::SumValue; +use crate::{ + element::QueryOptions, operations::proof::ProveOptions, + query_result_type::PathKeyOptionalElementTrio, +}; #[cfg(feature = "full")] use crate::{ query_result_type::{QueryResultElement, QueryResultElements, QueryResultType}, @@ -44,6 +24,16 @@ use crate::{ Element, Error, GroveDb, PathQuery, TransactionArg, }; +#[cfg(feature = "full")] +#[derive(Debug, Eq, PartialEq, Clone)] +/// A return type for query_item_value_or_sum +pub enum QueryItemOrSumReturnType { + /// an Item in serialized form + ItemData(Vec), + /// A sum item or a sum tree value + SumValue(SumValue), +} + #[cfg(feature = "full")] impl GroveDb { /// Encoded query for multiple path queries @@ -51,8 +41,20 @@ impl GroveDb { &self, path_queries: &[&PathQuery], allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult>, Error> { + check_grovedb_v0_with_cost!( + "query_encoded_many", + grove_version + .grovedb_versions + .operations + .query + .query_encoded_many + ); + let mut cost = OperationCost::default(); let elements = cost_return_on_error!( @@ -60,8 +62,11 @@ impl GroveDb { self.query_many_raw( path_queries, allow_cache, + decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, QueryResultType::QueryElementResultType, - transaction + transaction, + grove_version ) ); let results_wrapped = elements @@ -79,6 +84,7 @@ impl GroveDb { absolute_path.as_slice().into(), allow_cache, transaction, + grove_version, ) .unwrap_add_cost(&mut cost)?; @@ -109,36 +115,65 @@ impl GroveDb { &self, path_queries: &[&PathQuery], allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, result_type: QueryResultType, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult where { + check_grovedb_v0_with_cost!( + "query_many_raw", + grove_version + .grovedb_versions + .operations + .query + .query_many_raw + ); let mut cost = OperationCost::default(); - let query = cost_return_on_error_no_add!(&cost, PathQuery::merge(path_queries.to_vec())); + let query = cost_return_on_error_no_add!( + &cost, + PathQuery::merge(path_queries.to_vec(), grove_version) + ); let (result, _) = cost_return_on_error!( &mut cost, - self.query_raw(&query, allow_cache, result_type, transaction) + self.query_raw( + &query, + allow_cache, + decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, + result_type, + transaction, + grove_version + ) ); Ok(result).wrap_with_cost(cost) } - /// Prove a path query as either verbose or non verbose + /// Prove a path query as either verbose or non-verbose pub fn get_proved_path_query( &self, path_query: &PathQuery, - is_verbose: bool, + prove_options: Option, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "get_proved_path_query", + grove_version + .grovedb_versions + .operations + .query + .get_proved_path_query + ); if transaction.is_some() { Err(Error::NotSupported( - "transactions are not currently supported", + "transactions are not currently supported".to_string(), )) .wrap_with_cost(Default::default()) - } else if is_verbose { - self.prove_verbose(path_query) } else { - self.prove_query(path_query) + self.prove_query(path_query, prove_options, grove_version) } } @@ -148,7 +183,16 @@ where { allow_cache: bool, cost: &mut OperationCost, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> Result { + check_grovedb_v0!( + "follow_element", + grove_version + .grovedb_versions + .operations + .query + .follow_element + ); match element { Element::Reference(reference_path, ..) => { match reference_path { @@ -163,10 +207,11 @@ where { absolute_path.as_slice().into(), allow_cache, transaction, + grove_version, ) .unwrap_add_cost(cost)?; - if maybe_item.is_item() { + if maybe_item.is_any_item() { Ok(maybe_item) } else { Err(Error::InvalidQuery("the reference must result in an item")) @@ -177,10 +222,8 @@ where { )), } } - Element::Item(..) | Element::SumItem(..) => Ok(element), - Element::Tree(..) | Element::SumTree(..) => Err(Error::InvalidQuery( - "path_queries can only refer to items and references", - )), + Element::Item(..) | Element::SumItem(..) | Element::SumTree(..) => Ok(element), + Element::Tree(..) => Err(Error::InvalidQuery("path_queries can not refer to trees")), } } @@ -189,21 +232,36 @@ where { &self, path_query: &PathQuery, allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, result_type: QueryResultType, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(QueryResultElements, u16), Error> { + check_grovedb_v0_with_cost!( + "query", + grove_version.grovedb_versions.operations.query.query + ); let mut cost = OperationCost::default(); let (elements, skipped) = cost_return_on_error!( &mut cost, - self.query_raw(path_query, allow_cache, result_type, transaction) + self.query_raw( + path_query, + allow_cache, + decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, + result_type, + transaction, + grove_version + ) ); let results_wrapped = elements .into_iterator() .map(|result_item| { result_item.map_element(|element| { - self.follow_element(element, allow_cache, &mut cost, transaction) + self.follow_element(element, allow_cache, &mut cost, transaction, grove_version) }) }) .collect::, Error>>(); @@ -218,8 +276,19 @@ where { &self, path_query: &PathQuery, allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(Vec>, u16), Error> { + check_grovedb_v0_with_cost!( + "query_item_value", + grove_version + .grovedb_versions + .operations + .query + .query_item_value + ); let mut cost = OperationCost::default(); let (elements, skipped) = cost_return_on_error!( @@ -227,8 +296,11 @@ where { self.query_raw( path_query, allow_cache, + decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, QueryResultType::QueryElementResultType, - transaction + transaction, + grove_version ) ); @@ -250,6 +322,7 @@ where { absolute_path.as_slice().into(), allow_cache, transaction, + grove_version, ) .unwrap_add_cost(&mut cost)?; @@ -283,13 +356,119 @@ where { Ok((results, skipped)).wrap_with_cost(cost) } + /// Queries the backing store and returns element items by their value, + /// Sum Items are returned + pub fn query_item_value_or_sum( + &self, + path_query: &PathQuery, + allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, + transaction: TransactionArg, + grove_version: &GroveVersion, + ) -> CostResult<(Vec, u16), Error> { + check_grovedb_v0_with_cost!( + "query_item_value_or_sum", + grove_version + .grovedb_versions + .operations + .query + .query_item_value_or_sum + ); + let mut cost = OperationCost::default(); + + let (elements, skipped) = cost_return_on_error!( + &mut cost, + self.query_raw( + path_query, + allow_cache, + decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, + QueryResultType::QueryElementResultType, + transaction, + grove_version + ) + ); + + let results_wrapped = elements + .into_iterator() + .map(|result_item| match result_item { + QueryResultElement::ElementResultItem(element) => { + match element { + Element::Reference(reference_path, ..) => { + match reference_path { + ReferencePathType::AbsolutePathReference(absolute_path) => { + // While `map` on iterator is lazy, we should accumulate costs + // even if `collect` will + // end in `Err`, so we'll use + // external costs accumulator instead of + // returning costs from `map` call. + let maybe_item = self + .follow_reference( + absolute_path.as_slice().into(), + allow_cache, + transaction, + grove_version, + ) + .unwrap_add_cost(&mut cost)?; + + match maybe_item { + Element::Item(item, _) => { + Ok(QueryItemOrSumReturnType::ItemData(item)) + } + Element::SumItem(sum_value, _) => { + Ok(QueryItemOrSumReturnType::SumValue(sum_value)) + } + Element::SumTree(_, sum_value, _) => { + Ok(QueryItemOrSumReturnType::SumValue(sum_value)) + } + _ => Err(Error::InvalidQuery( + "the reference must result in an item", + )), + } + } + _ => Err(Error::CorruptedCodeExecution( + "reference after query must have absolute paths", + )), + } + } + Element::Item(item, _) => Ok(QueryItemOrSumReturnType::ItemData(item)), + Element::SumItem(sum_value, _) => { + Ok(QueryItemOrSumReturnType::SumValue(sum_value)) + } + Element::SumTree(_, sum_value, _) => { + Ok(QueryItemOrSumReturnType::SumValue(sum_value)) + } + Element::Tree(..) => Err(Error::InvalidQuery( + "path_queries can only refer to items, sum items, references and sum \ + trees", + )), + } + } + _ => Err(Error::CorruptedCodeExecution( + "query returned incorrect result type", + )), + }) + .collect::, Error>>(); + + let results = cost_return_on_error_no_add!(&cost, results_wrapped); + Ok((results, skipped)).wrap_with_cost(cost) + } + /// Retrieves only SumItem elements that match a path query pub fn query_sums( &self, path_query: &PathQuery, allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(Vec, u16), Error> { + check_grovedb_v0_with_cost!( + "query_sums", + grove_version.grovedb_versions.operations.query.query_sums + ); let mut cost = OperationCost::default(); let (elements, skipped) = cost_return_on_error!( @@ -297,8 +476,11 @@ where { self.query_raw( path_query, allow_cache, + decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, QueryResultType::QueryElementResultType, - transaction + transaction, + grove_version ) ); @@ -320,6 +502,7 @@ where { absolute_path.as_slice().into(), allow_cache, transaction, + grove_version, ) .unwrap_add_cost(&mut cost)?; @@ -360,10 +543,29 @@ where { &self, path_query: &PathQuery, allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, result_type: QueryResultType, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(QueryResultElements, u16), Error> { - Element::get_raw_path_query(&self.db, path_query, allow_cache, result_type, transaction) + check_grovedb_v0_with_cost!( + "query_raw", + grove_version.grovedb_versions.operations.query.query_raw + ); + Element::get_path_query( + &self.db, + path_query, + QueryOptions { + allow_get_raw: true, + allow_cache, + decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, + }, + result_type, + transaction, + grove_version, + ) } /// Splits the result set of a path query by query path. @@ -372,29 +574,45 @@ where { &self, path_query: &PathQuery, allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "query_keys_optional", + grove_version + .grovedb_versions + .operations + .query + .query_keys_optional + ); let max_results = cost_return_on_error_default!(path_query.query.limit.ok_or( - Error::NotSupported("limits must be set in query_keys_optional",) + Error::NotSupported("limits must be set in query_keys_optional".to_string()) )) as usize; if path_query.query.offset.is_some() { return Err(Error::NotSupported( - "offsets are not supported in query_raw_keys_optional", + "offsets are not supported in query_raw_keys_optional".to_string(), )) .wrap_with_cost(OperationCost::default()); } let mut cost = OperationCost::default(); - let terminal_keys = - cost_return_on_error_no_add!(&cost, path_query.terminal_keys(max_results)); + let terminal_keys = cost_return_on_error_no_add!( + &cost, + path_query.terminal_keys(max_results, grove_version) + ); let (elements, _) = cost_return_on_error!( &mut cost, self.query( path_query, allow_cache, + decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, QueryResultType::QueryPathKeyElementTrioResultType, - transaction + transaction, + grove_version ) ); @@ -415,29 +633,45 @@ where { &self, path_query: &PathQuery, allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "query_raw_keys_optional", + grove_version + .grovedb_versions + .operations + .query + .query_raw_keys_optional + ); let max_results = cost_return_on_error_default!(path_query.query.limit.ok_or( - Error::NotSupported("limits must be set in query_raw_keys_optional",) + Error::NotSupported("limits must be set in query_raw_keys_optional".to_string()) )) as usize; if path_query.query.offset.is_some() { return Err(Error::NotSupported( - "offsets are not supported in query_raw_keys_optional", + "offsets are not supported in query_raw_keys_optional".to_string(), )) .wrap_with_cost(OperationCost::default()); } let mut cost = OperationCost::default(); - let terminal_keys = - cost_return_on_error_no_add!(&cost, path_query.terminal_keys(max_results)); + let terminal_keys = cost_return_on_error_no_add!( + &cost, + path_query.terminal_keys(max_results, grove_version) + ); let (elements, _) = cost_return_on_error!( &mut cost, self.query_raw( path_query, allow_cache, + decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, QueryResultType::QueryPathKeyElementTrioResultType, - transaction + transaction, + grove_version ) ); @@ -460,6 +694,7 @@ mod tests { use std::collections::HashMap; use grovedb_merk::proofs::{query::query_item::QueryItem, Query}; + use grovedb_version::version::GroveVersion; use pretty_assertions::assert_eq; use crate::{ @@ -470,7 +705,8 @@ mod tests { #[test] fn test_query_raw_keys_options() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -478,6 +714,7 @@ mod tests { Element::new_item(b"hello".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -487,6 +724,7 @@ mod tests { Element::new_item(b"hello too".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -496,6 +734,7 @@ mod tests { Element::new_item(b"bye".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -507,7 +746,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path.clone(), SizedQuery::new(query, Some(5), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("should get successfully"); @@ -527,7 +766,8 @@ mod tests { #[test] fn test_query_raw_keys_options_with_range() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -535,6 +775,7 @@ mod tests { Element::new_item(b"hello".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -544,6 +785,7 @@ mod tests { Element::new_item(b"hello too".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -553,6 +795,7 @@ mod tests { Element::new_item(b"bye".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -563,7 +806,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path.clone(), SizedQuery::new(query, Some(5), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("should get successfully"); @@ -584,7 +827,8 @@ mod tests { #[test] fn test_query_raw_keys_options_with_range_inclusive() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -592,6 +836,7 @@ mod tests { Element::new_item(b"hello".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -601,6 +846,7 @@ mod tests { Element::new_item(b"hello too".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -610,6 +856,7 @@ mod tests { Element::new_item(b"bye".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -620,7 +867,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path.clone(), SizedQuery::new(query, Some(5), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("should get successfully"); @@ -644,7 +891,8 @@ mod tests { #[test] fn test_query_raw_keys_options_with_range_bounds() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -652,6 +900,7 @@ mod tests { Element::new_item(b"empty".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -661,6 +910,7 @@ mod tests { Element::new_item(b"hello".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -670,6 +920,7 @@ mod tests { Element::new_item(b"hello too".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -679,6 +930,7 @@ mod tests { Element::new_item(b"bye".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -688,7 +940,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(4), None)); - db.query_raw_keys_optional(&path_query, true, None) + db.query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect_err("range a should error"); @@ -697,7 +949,7 @@ mod tests { query.insert_key(b"5".to_vec()); // 3 let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(3), None)); - db.query_raw_keys_optional(&path_query, true, None) + db.query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("range b should not error"); @@ -706,7 +958,7 @@ mod tests { query.insert_key(b"5".to_vec()); // 4 let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(3), None)); - db.query_raw_keys_optional(&path_query, true, None) + db.query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect_err("range c should error"); @@ -715,7 +967,7 @@ mod tests { query.insert_key(b"5".to_vec()); // 3 let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(2), None)); - db.query_raw_keys_optional(&path_query, true, None) + db.query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect_err("range d should error"); @@ -723,14 +975,15 @@ mod tests { query.insert_range(b"z".to_vec()..b"10".to_vec()); let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); - db.query_raw_keys_optional(&path_query, true, None) + db.query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect_err("range using 2 bytes should error"); } #[test] fn test_query_raw_keys_options_with_empty_start_range() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -738,6 +991,7 @@ mod tests { Element::new_item(b"empty".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -747,6 +1001,7 @@ mod tests { Element::new_item(b"hello".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -756,6 +1011,7 @@ mod tests { Element::new_item(b"hello too".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -765,6 +1021,7 @@ mod tests { Element::new_item(b"bye".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -774,7 +1031,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path.clone(), SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("range starting with null should not error"); @@ -802,17 +1059,26 @@ mod tests { #[test] fn test_query_raw_keys_options_with_subquery_path() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); - db.insert([TEST_LEAF].as_ref(), b"", Element::empty_tree(), None, None) - .unwrap() - .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF].as_ref(), + b"", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); db.insert( [TEST_LEAF, b""].as_ref(), b"", Element::new_item(b"null in null".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -822,6 +1088,7 @@ mod tests { Element::new_item(b"1 in null".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -831,6 +1098,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -840,6 +1108,7 @@ mod tests { Element::new_item(b"1 in 2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -849,6 +1118,7 @@ mod tests { Element::new_item(b"5 in 2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -857,7 +1127,7 @@ mod tests { query.insert_range(b"".to_vec()..b"c".to_vec()); let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); - db.query_keys_optional(&path_query, true, None) + db.query_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect_err("range should error because we didn't subquery"); @@ -867,7 +1137,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("query with subquery should not error"); @@ -888,7 +1158,7 @@ mod tests { assert_eq!( raw_result.get(&(vec![TEST_LEAF.to_vec(), b"4".to_vec()], b"1".to_vec())), Some(&None) - ); // because we are subquerying 1 + ); // because we are sub-querying 1 assert_eq!( raw_result.get(&(vec![TEST_LEAF.to_vec(), b"4".to_vec()], b"4".to_vec())), None @@ -906,17 +1176,26 @@ mod tests { #[test] fn test_query_raw_keys_options_with_subquery() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); - db.insert([TEST_LEAF].as_ref(), b"", Element::empty_tree(), None, None) - .unwrap() - .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF].as_ref(), + b"", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); db.insert( [TEST_LEAF, b""].as_ref(), b"", Element::new_item(b"null in null".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -926,6 +1205,7 @@ mod tests { Element::new_item(b"1 in null".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -935,6 +1215,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -944,6 +1225,7 @@ mod tests { Element::new_item(b"1 in 2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -953,6 +1235,7 @@ mod tests { Element::new_item(b"5 in 2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -962,6 +1245,7 @@ mod tests { Element::new_item(b"2 in 2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -975,7 +1259,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("query with subquery should not error"); @@ -997,11 +1281,11 @@ mod tests { assert_eq!( raw_result.get(&(vec![TEST_LEAF.to_vec(), b"4".to_vec()], b"1".to_vec())), Some(&None) - ); // because we are subquerying 1 + ); // because we are sub-querying 1 assert_eq!( raw_result.get(&(vec![TEST_LEAF.to_vec(), b"4".to_vec()], b"2".to_vec())), Some(&None) - ); // because we are subquerying 1 + ); // because we are sub-querying 1 assert_eq!( raw_result.get(&(vec![TEST_LEAF.to_vec(), b"4".to_vec()], b"4".to_vec())), None @@ -1026,18 +1310,224 @@ mod tests { } #[test] - fn test_query_raw_keys_options_with_subquery_and_subquery_path() { - let db = make_test_grovedb(); + fn test_query_raw_keys_options_with_subquery_having_intermediate_paths_missing() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + + db.insert( + [TEST_LEAF].as_ref(), + b"", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF].as_ref(), + b"1", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF].as_ref(), + b"2", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF].as_ref(), + b"3", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"1"].as_ref(), + b"deep_1", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"1", b"deep_1"].as_ref(), + b"deeper_1", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"1", b"deep_1", b"deeper_1"].as_ref(), + b"2", + Element::new_item(b"found_me".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"2"].as_ref(), + b"1", + Element::new_item(b"1 in 2".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"2"].as_ref(), + b"5", + Element::new_item(b"5 in 2".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"2"].as_ref(), + b"2", + Element::new_item(b"2 in 2".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); + + let mut sub_query = Query::new(); + sub_query.insert_key(b"1".to_vec()); + sub_query.insert_key(b"2".to_vec()); + let mut query = Query::new(); + query.insert_keys(vec![b"1".to_vec(), b"2".to_vec(), b"3".to_vec()]); + query.set_subquery_path(vec![b"deep_1".to_vec(), b"deeper_1".to_vec()]); + query.set_subquery(sub_query); + let path = vec![TEST_LEAF.to_vec()]; + let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); + + db.query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) + .unwrap() + .expect_err( + "query with subquery should error if error_if_intermediate_path_tree_not_present \ + is set to true", + ); - db.insert([TEST_LEAF].as_ref(), b"", Element::empty_tree(), None, None) + let raw_result = db + .query_raw_keys_optional(&path_query, true, true, false, None, GroveVersion::latest()) .unwrap() - .expect("should insert subtree successfully"); + .expect("query with subquery should not error"); + + // because is 99 ascii, and we have empty too = 100 then x 2 + assert_eq!(raw_result.len(), 6); + + let expected_result = vec![ + ( + vec![ + b"test_leaf".to_vec(), + b"1".to_vec(), + b"deep_1".to_vec(), + b"deeper_1".to_vec(), + ], + b"1".to_vec(), + None, + ), + ( + vec![ + b"test_leaf".to_vec(), + b"1".to_vec(), + b"deep_1".to_vec(), + b"deeper_1".to_vec(), + ], + b"2".to_vec(), + Some(Element::new_item(b"found_me".to_vec())), + ), + ( + vec![ + b"test_leaf".to_vec(), + b"2".to_vec(), + b"deep_1".to_vec(), + b"deeper_1".to_vec(), + ], + b"1".to_vec(), + None, + ), + ( + vec![ + b"test_leaf".to_vec(), + b"2".to_vec(), + b"deep_1".to_vec(), + b"deeper_1".to_vec(), + ], + b"2".to_vec(), + None, + ), + ( + vec![ + b"test_leaf".to_vec(), + b"3".to_vec(), + b"deep_1".to_vec(), + b"deeper_1".to_vec(), + ], + b"1".to_vec(), + None, + ), + ( + vec![ + b"test_leaf".to_vec(), + b"3".to_vec(), + b"deep_1".to_vec(), + b"deeper_1".to_vec(), + ], + b"2".to_vec(), + None, + ), + ]; + + assert_eq!(raw_result, expected_result); + } + + #[test] + fn test_query_raw_keys_options_with_subquery_and_subquery_path() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + + db.insert( + [TEST_LEAF].as_ref(), + b"", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); db.insert( [TEST_LEAF, b""].as_ref(), b"", Element::new_item(b"null in null".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1047,6 +1537,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1056,6 +1547,7 @@ mod tests { Element::new_item(b"2 in null/1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1065,6 +1557,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1074,6 +1567,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1083,6 +1577,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1092,6 +1587,7 @@ mod tests { Element::new_item(b"2 in 2/1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1101,6 +1597,7 @@ mod tests { Element::new_item(b"5 in 2/1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1123,7 +1620,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("query with subquery should not error"); @@ -1187,17 +1684,26 @@ mod tests { #[test] fn test_query_raw_keys_options_with_subquery_and_conditional_subquery() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); - db.insert([TEST_LEAF].as_ref(), b"", Element::empty_tree(), None, None) - .unwrap() - .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF].as_ref(), + b"", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); db.insert( [TEST_LEAF, b""].as_ref(), b"", Element::new_item(b"null in null".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1207,6 +1713,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1216,6 +1723,7 @@ mod tests { Element::new_item(b"2 in null/1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1225,6 +1733,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1234,6 +1743,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1243,6 +1753,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1252,6 +1763,7 @@ mod tests { Element::new_item(b"2 in 2/1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1261,6 +1773,7 @@ mod tests { Element::new_item(b"5 in 2/1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1290,7 +1803,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("query with subquery should not error"); @@ -1355,26 +1868,36 @@ mod tests { #[test] fn test_query_keys_options_with_subquery_and_conditional_subquery_and_reference() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [ANOTHER_TEST_LEAF].as_ref(), b"5", Element::new_item(b"ref result".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); - db.insert([TEST_LEAF].as_ref(), b"", Element::empty_tree(), None, None) - .unwrap() - .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF].as_ref(), + b"", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); db.insert( [TEST_LEAF, b""].as_ref(), b"", Element::new_item(b"null in null".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1384,6 +1907,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1393,6 +1917,7 @@ mod tests { Element::new_item(b"2 in null/1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1402,6 +1927,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1411,6 +1937,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1420,6 +1947,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1429,6 +1957,7 @@ mod tests { Element::new_item(b"2 in 2/1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1441,6 +1970,7 @@ mod tests { ), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1470,7 +2000,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let result = db - .query_keys_optional(&path_query, true, None) + .query_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("query with subquery should not error"); diff --git a/grovedb/src/operations/get/worst_case.rs b/grovedb/src/operations/get/worst_case.rs index 126f2b5b5..7554a9111 100644 --- a/grovedb/src/operations/get/worst_case.rs +++ b/grovedb/src/operations/get/worst_case.rs @@ -1,38 +1,12 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Worst case get costs #[cfg(feature = "full")] use grovedb_costs::OperationCost; #[cfg(feature = "full")] use grovedb_storage::rocksdb_storage::RocksDbStorage; +use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; +use crate::Error; #[cfg(feature = "full")] use crate::{ batch::{key_info::KeyInfo, KeyInfoPath}, @@ -47,7 +21,16 @@ impl GroveDb { key: &KeyInfo, max_element_size: u32, in_parent_tree_using_sums: bool, - ) -> OperationCost { + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "worst_case_for_has_raw", + grove_version + .grovedb_versions + .operations + .get + .worst_case_for_has_raw + ); let mut cost = OperationCost::default(); GroveDb::add_worst_case_has_raw_cost::( &mut cost, @@ -55,8 +38,9 @@ impl GroveDb { key, max_element_size, in_parent_tree_using_sums, - ); - cost + grove_version, + )?; + Ok(cost) } /// Worst case cost for get raw @@ -65,7 +49,16 @@ impl GroveDb { key: &KeyInfo, max_element_size: u32, in_parent_tree_using_sums: bool, - ) -> OperationCost { + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "worst_case_for_get_raw", + grove_version + .grovedb_versions + .operations + .get + .worst_case_for_get_raw + ); let mut cost = OperationCost::default(); GroveDb::add_worst_case_get_raw_cost::( &mut cost, @@ -73,8 +66,9 @@ impl GroveDb { key, max_element_size, in_parent_tree_using_sums, - ); - cost + grove_version, + )?; + Ok(cost) } /// Worst case cost for get @@ -84,7 +78,16 @@ impl GroveDb { max_element_size: u32, max_references_sizes: Vec, in_parent_tree_using_sums: bool, - ) -> OperationCost { + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "worst_case_for_get", + grove_version + .grovedb_versions + .operations + .get + .worst_case_for_get + ); let mut cost = OperationCost::default(); GroveDb::add_worst_case_get_cost::( &mut cost, @@ -93,7 +96,8 @@ impl GroveDb { max_element_size, in_parent_tree_using_sums, max_references_sizes, - ); - cost + grove_version, + )?; + Ok(cost) } } diff --git a/grovedb/src/operations/insert/mod.rs b/grovedb/src/operations/insert/mod.rs index 9b83ff87f..b7e00fc1e 100644 --- a/grovedb/src/operations/insert/mod.rs +++ b/grovedb/src/operations/insert/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Insert operations #[cfg(feature = "full")] @@ -43,6 +15,9 @@ use grovedb_storage::rocksdb_storage::{ PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext, }; use grovedb_storage::{Storage, StorageBatch}; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; #[cfg(feature = "full")] use crate::{ @@ -97,11 +72,17 @@ impl GroveDb { element: Element, options: Option, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> where B: AsRef<[u8]> + 'b, P: Into>, { + check_grovedb_v0_with_cost!( + "insert", + grove_version.grovedb_versions.operations.insert.insert + ); + let subtree_path: SubtreePath = path.into(); let batch = StorageBatch::new(); @@ -113,6 +94,7 @@ impl GroveDb { options.unwrap_or_default(), transaction, &batch, + grove_version, ) } else { self.insert_without_transaction( @@ -121,6 +103,7 @@ impl GroveDb { element, options.unwrap_or_default(), &batch, + grove_version, ) }; @@ -139,7 +122,17 @@ impl GroveDb { options: InsertOptions, transaction: &'db Transaction, batch: &StorageBatch, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "insert_on_transaction", + grove_version + .grovedb_versions + .operations + .insert + .insert_on_transaction + ); + let mut cost = OperationCost::default(); let mut merk_cache: HashMap, Merk> = @@ -153,13 +146,20 @@ impl GroveDb { element, options, transaction, - batch + batch, + grove_version ) ); merk_cache.insert(path.clone(), merk); cost_return_on_error!( &mut cost, - self.propagate_changes_with_transaction(merk_cache, path, transaction, batch) + self.propagate_changes_with_transaction( + merk_cache, + path, + transaction, + batch, + grove_version + ) ); Ok(()).wrap_with_cost(cost) @@ -172,7 +172,17 @@ impl GroveDb { element: Element, options: InsertOptions, batch: &StorageBatch, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "insert_without_transaction", + grove_version + .grovedb_versions + .operations + .insert + .insert_without_transaction + ); + let mut cost = OperationCost::default(); let mut merk_cache: HashMap, Merk> = @@ -180,13 +190,20 @@ impl GroveDb { let merk = cost_return_on_error!( &mut cost, - self.add_element_without_transaction(&path.to_vec(), key, element, options, batch) + self.add_element_without_transaction( + &path.to_vec(), + key, + element, + options, + batch, + grove_version + ) ); merk_cache.insert(path.clone(), merk); cost_return_on_error!( &mut cost, - self.propagate_changes_without_transaction(merk_cache, path, batch) + self.propagate_changes_without_transaction(merk_cache, path, batch, grove_version) ); Ok(()).wrap_with_cost(cost) @@ -205,12 +222,27 @@ impl GroveDb { options: InsertOptions, transaction: &'db Transaction, batch: &'db StorageBatch, + grove_version: &GroveVersion, ) -> CostResult>, Error> { + check_grovedb_v0_with_cost!( + "add_element_on_transaction", + grove_version + .grovedb_versions + .operations + .insert + .add_element_on_transaction + ); + let mut cost = OperationCost::default(); let mut subtree_to_insert_into = cost_return_on_error!( &mut cost, - self.open_transactional_merk_at_path(path.clone(), transaction, Some(batch)) + self.open_transactional_merk_at_path( + path.clone(), + transaction, + Some(batch), + grove_version + ) ); // if we don't allow a tree override then we should check @@ -218,7 +250,12 @@ impl GroveDb { let maybe_element_bytes = cost_return_on_error!( &mut cost, subtree_to_insert_into - .get(key, true) + .get( + key, + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) .map_err(|e| Error::CorruptedData(e.to_string())) ); if let Some(element_bytes) = maybe_element_bytes { @@ -231,11 +268,13 @@ impl GroveDb { if options.validate_insertion_does_not_override_tree { let element = cost_return_on_error_no_add!( &cost, - Element::deserialize(element_bytes.as_slice()).map_err(|_| { - Error::CorruptedData(String::from("unable to deserialize element")) - }) + Element::deserialize(element_bytes.as_slice(), grove_version).map_err( + |_| { + Error::CorruptedData(String::from("unable to deserialize element")) + } + ) ); - if element.is_tree() { + if element.is_any_tree() { return Err(Error::OverrideNotAllowed( "insertion not allowed to override tree", )) @@ -260,13 +299,19 @@ impl GroveDb { self.open_transactional_merk_at_path( referenced_path.into(), transaction, - Some(batch) + Some(batch), + grove_version, ) ); let referenced_element_value_hash_opt = cost_return_on_error!( &mut cost, - Element::get_value_hash(&subtree_for_reference, referenced_key, true) + Element::get_value_hash( + &subtree_for_reference, + referenced_key, + true, + grove_version + ) ); let referenced_element_value_hash = cost_return_on_error!( @@ -294,6 +339,7 @@ impl GroveDb { key, referenced_element_value_hash, Some(options.as_merk_options()), + grove_version, ) ); } @@ -310,7 +356,8 @@ impl GroveDb { &mut subtree_to_insert_into, key, NULL_HASH, - Some(options.as_merk_options()) + Some(options.as_merk_options()), + grove_version ) ); } @@ -321,7 +368,8 @@ impl GroveDb { element.insert( &mut subtree_to_insert_into, key, - Some(options.as_merk_options()) + Some(options.as_merk_options()), + grove_version ) ); } @@ -342,18 +390,33 @@ impl GroveDb { element: Element, options: InsertOptions, batch: &'db StorageBatch, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "add_element_without_transaction", + grove_version + .grovedb_versions + .operations + .insert + .add_element_without_transaction + ); + let mut cost = OperationCost::default(); let mut subtree_to_insert_into = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(path.into(), Some(batch)) + self.open_non_transactional_merk_at_path(path.into(), Some(batch), grove_version) ); if options.checks_for_override() { let maybe_element_bytes = cost_return_on_error!( &mut cost, subtree_to_insert_into - .get(key, true) + .get( + key, + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) .map_err(|e| Error::CorruptedData(e.to_string())) ); if let Some(element_bytes) = maybe_element_bytes { @@ -366,11 +429,13 @@ impl GroveDb { if options.validate_insertion_does_not_override_tree { let element = cost_return_on_error_no_add!( &cost, - Element::deserialize(element_bytes.as_slice()).map_err(|_| { - Error::CorruptedData(String::from("unable to deserialize element")) - }) + Element::deserialize(element_bytes.as_slice(), grove_version).map_err( + |_| { + Error::CorruptedData(String::from("unable to deserialize element")) + } + ) ); - if element.is_tree() { + if element.is_any_tree() { return Err(Error::OverrideNotAllowed( "insertion not allowed to override tree", )) @@ -392,13 +457,22 @@ impl GroveDb { let (referenced_key, referenced_path) = reference_path.split_last().unwrap(); let subtree_for_reference = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(referenced_path.into(), Some(batch)) + self.open_non_transactional_merk_at_path( + referenced_path.into(), + Some(batch), + grove_version + ) ); // when there is no transaction, we don't want to use caching let referenced_element_value_hash_opt = cost_return_on_error!( &mut cost, - Element::get_value_hash(&subtree_for_reference, referenced_key, false) + Element::get_value_hash( + &subtree_for_reference, + referenced_key, + false, + grove_version + ) ); let referenced_element_value_hash = cost_return_on_error!( @@ -425,7 +499,8 @@ impl GroveDb { &mut subtree_to_insert_into, key, referenced_element_value_hash, - Some(options.as_merk_options()) + Some(options.as_merk_options()), + grove_version ) ); } @@ -442,7 +517,8 @@ impl GroveDb { &mut subtree_to_insert_into, key, NULL_HASH, - Some(options.as_merk_options()) + Some(options.as_merk_options()), + grove_version ) ); } @@ -453,7 +529,8 @@ impl GroveDb { element.insert( &mut subtree_to_insert_into, key, - Some(options.as_merk_options()) + Some(options.as_merk_options()), + grove_version ) ); } @@ -469,21 +546,31 @@ impl GroveDb { key: &[u8], element: Element, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult where B: AsRef<[u8]> + 'b, P: Into>, { + check_grovedb_v0_with_cost!( + "insert_if_not_exists", + grove_version + .grovedb_versions + .operations + .insert + .insert_if_not_exists + ); + let mut cost = OperationCost::default(); let subtree_path: SubtreePath<_> = path.into(); if cost_return_on_error!( &mut cost, - self.has_raw(subtree_path.clone(), key, transaction) + self.has_raw(subtree_path.clone(), key, transaction, grove_version) ) { Ok(false).wrap_with_cost(cost) } else { - self.insert(subtree_path, key, element, None, transaction) + self.insert(subtree_path, key, element, None, transaction, grove_version) .map_ok(|_| true) .add_cost(cost) } @@ -498,17 +585,27 @@ impl GroveDb { key: &[u8], element: Element, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(bool, Option), Error> where B: AsRef<[u8]> + 'b, P: Into>, { + check_grovedb_v0_with_cost!( + "insert_if_changed_value", + grove_version + .grovedb_versions + .operations + .insert + .insert_if_changed_value + ); + let mut cost = OperationCost::default(); let subtree_path: SubtreePath = path.into(); let previous_element = cost_return_on_error!( &mut cost, - self.get_raw_optional(subtree_path.clone(), key, transaction) + self.get_raw_optional(subtree_path.clone(), key, transaction, grove_version) ); let needs_insert = match &previous_element { None => true, @@ -517,7 +614,7 @@ impl GroveDb { if !needs_insert { Ok((false, None)).wrap_with_cost(cost) } else { - self.insert(subtree_path, key, element, None, transaction) + self.insert(subtree_path, key, element, None, transaction, grove_version) .map_ok(|_| (true, previous_element)) .add_cost(cost) } @@ -531,6 +628,7 @@ mod tests { storage_cost::{removal::StorageRemovedBytes::NoStorageRemoval, StorageCost}, OperationCost, }; + use grovedb_version::version::GroveVersion; use pretty_assertions::assert_eq; use crate::{ @@ -541,13 +639,21 @@ mod tests { #[test] fn test_non_root_insert_item_without_transaction() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); - db.insert([TEST_LEAF].as_ref(), b"key", element.clone(), None, None) - .unwrap() - .expect("successful insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key", + element.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful insert"); assert_eq!( - db.get([TEST_LEAF].as_ref(), b"key", None) + db.get([TEST_LEAF].as_ref(), b"key", None, grove_version) .unwrap() .expect("successful get"), element @@ -556,7 +662,8 @@ mod tests { #[test] fn test_non_root_insert_subtree_then_insert_item_without_transaction() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); // Insert a subtree first @@ -566,6 +673,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -576,11 +684,12 @@ mod tests { element.clone(), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); assert_eq!( - db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", None) + db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", None, grove_version) .unwrap() .expect("successful get"), element @@ -589,13 +698,16 @@ mod tests { #[test] fn test_non_root_insert_item_with_transaction() { + let grove_version = GroveVersion::latest(); let item_key = b"key3"; - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let transaction = db.start_transaction(); // Check that there's no such key in the DB - let result = db.get([TEST_LEAF].as_ref(), item_key, None).unwrap(); + let result = db + .get([TEST_LEAF].as_ref(), item_key, None, grove_version) + .unwrap(); assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); let element1 = Element::new_item(b"ayy".to_vec()); @@ -606,17 +718,25 @@ mod tests { element1, None, Some(&transaction), + grove_version, ) .unwrap() .expect("cannot insert an item into GroveDB"); // The key was inserted inside the transaction, so it shouldn't be // possible to get it back without committing or using transaction - let result = db.get([TEST_LEAF].as_ref(), item_key, None).unwrap(); + let result = db + .get([TEST_LEAF].as_ref(), item_key, None, grove_version) + .unwrap(); assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); // Check that the element can be retrieved when transaction is passed let result_with_transaction = db - .get([TEST_LEAF].as_ref(), item_key, Some(&transaction)) + .get( + [TEST_LEAF].as_ref(), + item_key, + Some(&transaction), + grove_version, + ) .unwrap() .expect("Expected to work"); assert_eq!(result_with_transaction, Element::new_item(b"ayy".to_vec())); @@ -626,7 +746,7 @@ mod tests { // Check that the change was committed let result = db - .get([TEST_LEAF].as_ref(), item_key, None) + .get([TEST_LEAF].as_ref(), item_key, None, grove_version) .unwrap() .expect("Expected transaction to work"); assert_eq!(result, Element::new_item(b"ayy".to_vec())); @@ -634,13 +754,16 @@ mod tests { #[test] fn test_non_root_insert_subtree_with_transaction() { + let grove_version = GroveVersion::latest(); let subtree_key = b"subtree_key"; - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let transaction = db.start_transaction(); // Check that there's no such key in the DB - let result = db.get([TEST_LEAF].as_ref(), subtree_key, None).unwrap(); + let result = db + .get([TEST_LEAF].as_ref(), subtree_key, None, grove_version) + .unwrap(); assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); db.insert( @@ -649,15 +772,23 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("cannot insert an item into GroveDB"); - let result = db.get([TEST_LEAF].as_ref(), subtree_key, None).unwrap(); + let result = db + .get([TEST_LEAF].as_ref(), subtree_key, None, grove_version) + .unwrap(); assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); let result_with_transaction = db - .get([TEST_LEAF].as_ref(), subtree_key, Some(&transaction)) + .get( + [TEST_LEAF].as_ref(), + subtree_key, + Some(&transaction), + grove_version, + ) .unwrap() .expect("Expected to work"); assert_eq!(result_with_transaction, Element::empty_tree()); @@ -665,7 +796,7 @@ mod tests { db.commit_transaction(transaction).unwrap().unwrap(); let result = db - .get([TEST_LEAF].as_ref(), subtree_key, None) + .get([TEST_LEAF].as_ref(), subtree_key, None, grove_version) .unwrap() .expect("Expected transaction to work"); assert_eq!(result, Element::empty_tree()); @@ -673,15 +804,28 @@ mod tests { #[test] fn test_insert_if_not_exists() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); // Insert twice at the same path assert!(db - .insert_if_not_exists([TEST_LEAF].as_ref(), b"key1", Element::empty_tree(), None) + .insert_if_not_exists( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + grove_version + ) .unwrap() .expect("Provided valid path")); assert!(!db - .insert_if_not_exists([TEST_LEAF].as_ref(), b"key1", Element::empty_tree(), None) + .insert_if_not_exists( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + grove_version + ) .unwrap() .expect("Provided valid path")); @@ -692,6 +836,7 @@ mod tests { b"key1", Element::empty_tree(), None, + grove_version, ) .unwrap(); assert!(matches!(result, Err(Error::InvalidParentLayerPath(_)))); @@ -699,6 +844,7 @@ mod tests { #[test] fn test_one_insert_item_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -709,6 +855,7 @@ mod tests { Element::new_item(b"cat".to_vec()), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("should insert"); @@ -758,12 +905,20 @@ mod tests { #[test] fn test_one_insert_sum_item_in_sum_tree_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"s", Element::empty_sum_tree(), None, Some(&tx)) - .unwrap() - .expect("expected to add upper tree"); + db.insert( + EMPTY_PATH, + b"s", + Element::empty_sum_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("expected to add upper tree"); let cost = db .insert( @@ -772,6 +927,7 @@ mod tests { Element::new_sum_item(5), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("should insert"); @@ -816,12 +972,20 @@ mod tests { #[test] fn test_one_insert_sum_item_under_sum_item_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"s", Element::empty_sum_tree(), None, Some(&tx)) - .unwrap() - .expect("expected to add upper tree"); + db.insert( + EMPTY_PATH, + b"s", + Element::empty_sum_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("expected to add upper tree"); db.insert( [b"s".as_slice()].as_ref(), @@ -829,6 +993,7 @@ mod tests { Element::new_sum_item(5), None, Some(&tx), + grove_version, ) .unwrap() .expect("should insert"); @@ -840,6 +1005,7 @@ mod tests { Element::new_sum_item(6), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("should insert"); @@ -867,13 +1033,18 @@ mod tests { // Child Heights 2 // Total 37 + 85 + 48 = 170 + + // replaced bytes + // 133 for key1 (higher node/same merk level) + // ? + assert_eq!( cost, OperationCost { seek_count: 7, storage_cost: StorageCost { added_bytes: 170, - replaced_bytes: 209, // todo: verify + replaced_bytes: 217, removed_bytes: NoStorageRemoval }, storage_loaded_bytes: 232, @@ -884,12 +1055,20 @@ mod tests { #[test] fn test_one_insert_bigger_sum_item_under_sum_item_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"s", Element::empty_sum_tree(), None, Some(&tx)) - .unwrap() - .expect("expected to add upper tree"); + db.insert( + EMPTY_PATH, + b"s", + Element::empty_sum_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("expected to add upper tree"); db.insert( [b"s".as_slice()].as_ref(), @@ -897,6 +1076,7 @@ mod tests { Element::new_sum_item(126), None, Some(&tx), + grove_version, ) .unwrap() .expect("should insert"); @@ -909,6 +1089,7 @@ mod tests { Element::new_sum_item(32768), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("should insert"); @@ -942,7 +1123,7 @@ mod tests { seek_count: 7, storage_cost: StorageCost { added_bytes: 170, - replaced_bytes: 211, // todo: verify + replaced_bytes: 217, // todo: verify removed_bytes: NoStorageRemoval }, storage_loaded_bytes: 237, @@ -953,6 +1134,7 @@ mod tests { #[test] fn test_one_insert_item_cost_with_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -963,6 +1145,7 @@ mod tests { Element::new_item_with_flags(b"cat".to_vec(), Some(b"dog".to_vec())), None, Some(&tx), + grove_version, ) .cost; // Explanation for 183 storage_written_bytes @@ -1012,11 +1195,19 @@ mod tests { #[test] fn test_one_insert_empty_tree_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); let cost = db - .insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, Some(&tx)) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) .cost; // Explanation for 183 storage_written_bytes @@ -1064,6 +1255,7 @@ mod tests { #[test] fn test_one_insert_empty_sum_tree_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1074,6 +1266,7 @@ mod tests { Element::empty_sum_tree(), None, Some(&tx), + grove_version, ) .cost; // Explanation for 183 storage_written_bytes @@ -1123,6 +1316,7 @@ mod tests { #[test] fn test_one_insert_empty_tree_cost_with_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1133,6 +1327,7 @@ mod tests { Element::empty_tree_with_flags(Some(b"cat".to_vec())), None, Some(&tx), + grove_version, ) .cost; // Explanation for 183 storage_written_bytes @@ -1185,12 +1380,20 @@ mod tests { #[test] fn test_one_insert_item_cost_under_tree() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .unwrap(); let cost = db .insert( @@ -1199,6 +1402,7 @@ mod tests { Element::new_item(b"test".to_vec()), None, Some(&tx), + grove_version, ) .cost_as_result() .unwrap(); @@ -1256,6 +1460,7 @@ mod tests { #[test] fn test_one_insert_item_with_apple_flags_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1266,6 +1471,7 @@ mod tests { Element::new_item_with_flags(b"test".to_vec(), Some(b"apple".to_vec())), None, Some(&tx), + grove_version, ) .cost_as_result() .unwrap(); @@ -1319,12 +1525,20 @@ mod tests { #[test] fn test_one_insert_item_with_flags_cost_under_tree() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .unwrap(); let cost = db .insert( @@ -1333,6 +1547,7 @@ mod tests { Element::new_item_with_flags(b"test".to_vec(), Some(b"apple".to_vec())), None, Some(&tx), + grove_version, ) .cost_as_result() .unwrap(); @@ -1404,6 +1619,7 @@ mod tests { #[test] fn test_one_insert_item_with_flags_cost_under_tree_with_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1413,6 +1629,7 @@ mod tests { Element::empty_tree_with_flags(Some(b"cat".to_vec())), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1424,6 +1641,7 @@ mod tests { Element::new_item_with_flags(b"test".to_vec(), Some(b"apple".to_vec())), None, Some(&tx), + grove_version, ) .cost_as_result() .unwrap(); @@ -1497,6 +1715,7 @@ mod tests { #[test] fn test_one_update_item_same_cost_at_root() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1506,6 +1725,7 @@ mod tests { Element::new_item(b"cat".to_vec()), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1517,6 +1737,7 @@ mod tests { Element::new_item(b"dog".to_vec()), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); @@ -1563,12 +1784,20 @@ mod tests { #[test] fn test_one_update_same_cost_in_underlying_tree() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .unwrap(); db.insert( [b"tree".as_slice()].as_ref(), @@ -1576,6 +1805,7 @@ mod tests { Element::new_item(b"cat".to_vec()), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1587,6 +1817,7 @@ mod tests { Element::new_item(b"dog".to_vec()), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); @@ -1607,6 +1838,7 @@ mod tests { #[test] fn test_one_update_same_cost_in_underlying_sum_tree_bigger_sum_item() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1616,6 +1848,7 @@ mod tests { Element::empty_sum_tree(), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1626,6 +1859,7 @@ mod tests { Element::new_sum_item(15), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1637,6 +1871,7 @@ mod tests { Element::new_sum_item(1000000), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); @@ -1658,6 +1893,7 @@ mod tests { #[test] fn test_one_update_same_cost_in_underlying_sum_tree_bigger_sum_item_parent_sum_tree_already_big( ) { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1667,6 +1903,7 @@ mod tests { Element::empty_sum_tree(), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1677,6 +1914,7 @@ mod tests { Element::new_sum_item(1000000), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1687,6 +1925,7 @@ mod tests { Element::new_sum_item(15), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1698,6 +1937,7 @@ mod tests { Element::new_sum_item(1000000), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); @@ -1707,7 +1947,7 @@ mod tests { seek_count: 9, // todo: verify this storage_cost: StorageCost { added_bytes: 0, - replaced_bytes: 405, // todo: verify this + replaced_bytes: 409, // todo: verify this removed_bytes: NoStorageRemoval }, storage_loaded_bytes: 487, // todo verify this @@ -1718,6 +1958,7 @@ mod tests { #[test] fn test_one_update_same_cost_in_underlying_sum_tree_smaller_sum_item() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1727,6 +1968,7 @@ mod tests { Element::empty_sum_tree(), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1737,6 +1979,7 @@ mod tests { Element::new_sum_item(1000000), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1748,6 +1991,7 @@ mod tests { Element::new_sum_item(15), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); @@ -1768,12 +2012,20 @@ mod tests { #[test] fn test_one_update_bigger_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .unwrap(); db.insert( [b"tree".as_slice()].as_ref(), @@ -1781,6 +2033,7 @@ mod tests { Element::new_item(b"test".to_vec()), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1792,6 +2045,7 @@ mod tests { Element::new_item(b"test1".to_vec()), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); @@ -1812,12 +2066,20 @@ mod tests { #[test] fn test_one_update_tree_bigger_cost_with_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .unwrap(); db.insert( [b"tree".as_slice()].as_ref(), @@ -1825,6 +2087,7 @@ mod tests { Element::new_tree(None), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1840,6 +2103,7 @@ mod tests { base_root_storage_is_free: true, }), Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); diff --git a/grovedb/src/operations/is_empty_tree.rs b/grovedb/src/operations/is_empty_tree.rs index 40abd62fe..07c349991 100644 --- a/grovedb/src/operations/is_empty_tree.rs +++ b/grovedb/src/operations/is_empty_tree.rs @@ -1,36 +1,11 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Check if empty tree operations #[cfg(feature = "full")] use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; use grovedb_path::SubtreePath; +#[cfg(feature = "full")] +use grovedb_version::error::GroveVersionError; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; #[cfg(feature = "full")] use crate::{util::merk_optional_tx, Element, Error, GroveDb, TransactionArg}; @@ -42,20 +17,32 @@ impl GroveDb { &self, path: P, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult where B: AsRef<[u8]> + 'b, P: Into>, { + check_grovedb_v0_with_cost!( + "is_empty_tree", + grove_version.grovedb_versions.operations.get.is_empty_tree + ); let mut cost = OperationCost::default(); let path: SubtreePath = path.into(); cost_return_on_error!( &mut cost, - self.check_subtree_exists_path_not_found(path.clone(), transaction) + self.check_subtree_exists_path_not_found(path.clone(), transaction, grove_version) ); - merk_optional_tx!(&mut cost, self.db, path, None, transaction, subtree, { - Ok(subtree.is_empty_tree().unwrap_add_cost(&mut cost)).wrap_with_cost(cost) - }) + merk_optional_tx!( + &mut cost, + self.db, + path, + None, + transaction, + subtree, + grove_version, + { Ok(subtree.is_empty_tree().unwrap_add_cost(&mut cost)).wrap_with_cost(cost) } + ) } } diff --git a/grovedb/src/operations/mod.rs b/grovedb/src/operations/mod.rs new file mode 100644 index 000000000..ba9b85999 --- /dev/null +++ b/grovedb/src/operations/mod.rs @@ -0,0 +1,18 @@ +//! Operations for the manipulation of GroveDB state + +#[cfg(feature = "full")] +pub(crate) mod auxiliary; +#[cfg(feature = "full")] +pub mod delete; +#[cfg(feature = "full")] +pub(crate) mod get; +#[cfg(feature = "full")] +pub mod insert; +#[cfg(feature = "full")] +pub(crate) mod is_empty_tree; + +#[cfg(any(feature = "full", feature = "verify"))] +pub mod proof; + +#[cfg(feature = "full")] +pub use get::{QueryItemOrSumReturnType, MAX_REFERENCE_HOPS}; diff --git a/grovedb/src/operations/proof.rs b/grovedb/src/operations/proof.rs deleted file mode 100644 index 1734c6c6c..000000000 --- a/grovedb/src/operations/proof.rs +++ /dev/null @@ -1,36 +0,0 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Proof operations - -#[cfg(feature = "full")] -mod generate; -#[cfg(any(feature = "full", feature = "verify"))] -pub mod util; -#[cfg(any(feature = "full", feature = "verify"))] -pub mod verify; diff --git a/grovedb/src/operations/proof/generate.rs b/grovedb/src/operations/proof/generate.rs index 22bf49a95..a5297eaf7 100644 --- a/grovedb/src/operations/proof/generate.rs +++ b/grovedb/src/operations/proof/generate.rs @@ -1,93 +1,54 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Generate proof operations -// TODO: entire file is due for a refactor, need some kind of path generator -// that supports multiple implementations for verbose and non-verbose -// generation +use std::collections::BTreeMap; -use grovedb_costs::cost_return_on_error_default; -#[cfg(feature = "full")] use grovedb_costs::{ - cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, + cost_return_on_error, cost_return_on_error_default, cost_return_on_error_no_add, CostResult, + CostsExt, OperationCost, }; -#[cfg(feature = "full")] use grovedb_merk::{ - proofs::{encode_into, Node, Op}, + proofs::{encode_into, query::QueryItem, Node, Op}, tree::value_hash, - KVIterator, Merk, ProofWithoutEncodingResult, + Merk, ProofWithoutEncodingResult, }; -use grovedb_path::SubtreePath; -#[cfg(feature = "full")] use grovedb_storage::StorageContext; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; -#[cfg(feature = "full")] -use crate::element::helpers::raw_decode; -#[cfg(feature = "full")] +#[cfg(feature = "proof_debug")] +use crate::query_result_type::QueryResultType; use crate::{ - operations::proof::util::{ - reduce_limit_and_offset_by, write_to_vec, ProofTokenType, EMPTY_TREE_HASH, + operations::proof::{ + util::hex_to_ascii, GroveDBProof, GroveDBProofV0, LayerProof, ProveOptions, }, reference_path::path_from_reference_path_type, - Element, Error, GroveDb, PathQuery, Query, -}; -use crate::{ - operations::proof::util::{write_slice_of_slice_to_slice, write_slice_to_vec}, - versioning::{prepend_version_to_bytes, PROOF_VERSION}, + Element, Error, GroveDb, PathQuery, }; -#[cfg(feature = "full")] -type LimitOffset = (Option, Option); - -#[cfg(feature = "full")] impl GroveDb { /// Prove one or more path queries. - /// If we more than one path query, we merge into a single path query before - /// proving. - pub fn prove_query_many(&self, query: Vec<&PathQuery>) -> CostResult, Error> { - if query.len() > 1 { - let query = cost_return_on_error_default!(PathQuery::merge(query)); - self.prove_query(&query) - } else { - self.prove_query(query[0]) - } - } - - /// Prove one or more path queries verbose. - /// If we more than one path query, we merge into a single path query before - /// proving verbose. - pub fn prove_verbose_many(&self, query: Vec<&PathQuery>) -> CostResult, Error> { + /// If we have more than one path query, we merge into a single path query + /// before proving. + pub fn prove_query_many( + &self, + query: Vec<&PathQuery>, + prove_options: Option, + grove_version: &GroveVersion, + ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "prove_query_many", + grove_version + .grovedb_versions + .operations + .proof + .prove_query_many + ); if query.len() > 1 { - let query = cost_return_on_error_default!(PathQuery::merge(query)); - self.prove_verbose(&query) + let query = cost_return_on_error_default!(PathQuery::merge(query, grove_version)); + self.prove_query(&query, prove_options, grove_version) } else { - self.prove_verbose(query[0]) + self.prove_query(query[0], prove_options, grove_version) } } @@ -95,535 +56,212 @@ impl GroveDb { /// doesn't allow for subset verification /// Proofs generated with this can only be verified by the path query used /// to generate them. - pub fn prove_query(&self, query: &PathQuery) -> CostResult, Error> { - self.prove_internal(query, false) - } - - /// Generate a verbose proof for a given path query - /// Any path query that is a subset of the original proof generating path - /// query can be used to verify this (subset verification) - pub fn prove_verbose(&self, query: &PathQuery) -> CostResult, Error> { - // TODO: we need to solve the localized limit and offset problem. - // when using a path query that has a limit and offset value, - // to get the expected behaviour, you need to know exactly - // how the proving internals work and how your state looks. - self.prove_internal(query, true) - } - - /// Generates a verbose or non verbose proof based on a bool - fn prove_internal(&self, query: &PathQuery, is_verbose: bool) -> CostResult, Error> { - let mut cost = OperationCost::default(); - - let mut proof_result = - cost_return_on_error_default!(prepend_version_to_bytes(vec![], PROOF_VERSION)); - - let mut limit: Option = query.query.limit; - let mut offset: Option = query.query.offset; - - let path_slices = query.path.iter().map(|x| x.as_slice()).collect::>(); - - let subtree_exists = self - .check_subtree_exists_path_not_found(path_slices.as_slice().into(), None) - .unwrap_add_cost(&mut cost); - - // if the subtree at the given path doesn't exists, prove that this path - // doesn't point to a valid subtree - match subtree_exists { - Ok(_) => { - // subtree exists - // do nothing - } - Err(_) => { - cost_return_on_error!( - &mut cost, - self.generate_and_store_absent_path_proof( - &path_slices, - &mut proof_result, - is_verbose - ) - ); - // return the absence proof no need to continue proof generation - return Ok(proof_result).wrap_with_cost(cost); - } - } - - // if the subtree exists and the proof type is verbose we need to insert - // the path information to the proof - if is_verbose { - cost_return_on_error!( - &mut cost, - Self::generate_and_store_path_proof(path_slices.clone(), &mut proof_result) - ); - } - - cost_return_on_error!( - &mut cost, - self.prove_subqueries( - &mut proof_result, - path_slices.clone(), - query, - &mut limit, - &mut offset, - true, - is_verbose - ) - ); - cost_return_on_error!( - &mut cost, - self.prove_path(&mut proof_result, path_slices, is_verbose) + pub fn prove_query( + &self, + query: &PathQuery, + prove_options: Option, + grove_version: &GroveVersion, + ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "prove_query_many", + grove_version.grovedb_versions.operations.proof.prove_query ); - - Ok(proof_result).wrap_with_cost(cost) + self.prove_internal_serialized(query, prove_options, grove_version) } - /// Perform a pre-order traversal of the tree based on the provided - /// subqueries - fn prove_subqueries( + /// Generates a proof and serializes it + fn prove_internal_serialized( &self, - proofs: &mut Vec, - path: Vec<&[u8]>, - query: &PathQuery, - current_limit: &mut Option, - current_offset: &mut Option, - is_first_call: bool, - is_verbose: bool, - ) -> CostResult<(), Error> { + path_query: &PathQuery, + prove_options: Option, + grove_version: &GroveVersion, + ) -> CostResult, Error> { let mut cost = OperationCost::default(); - let mut to_add_to_result_set: u16 = 0; - - let subtree = cost_return_on_error!( + let proof = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(path.as_slice().into(), None) + self.prove_internal(path_query, prove_options, grove_version) ); - if subtree.root_hash().unwrap_add_cost(&mut cost) == EMPTY_TREE_HASH { - cost_return_on_error_no_add!( - &cost, - write_to_vec(proofs, &[ProofTokenType::EmptyTree.into()]) - ); - return Ok(()).wrap_with_cost(cost); - } - - let reached_limit = query.query.limit.is_some() && query.query.limit.unwrap() == 0; - if reached_limit { - if is_first_call { - cost_return_on_error!( - &mut cost, - self.generate_and_store_merk_proof( - &path.as_slice().into(), - &subtree, - &query.query.query, - (*current_limit, *current_offset), - ProofTokenType::SizedMerk, - proofs, - is_verbose, - path.iter().last().unwrap_or(&(&[][..])) - ) - ); - } - return Ok(()).wrap_with_cost(cost); - } - - let mut is_leaf_tree = true; - - let mut kv_iterator = KVIterator::new(subtree.storage.raw_iter(), &query.query.query) - .unwrap_add_cost(&mut cost); - - while let Some((key, value_bytes)) = kv_iterator.next_kv().unwrap_add_cost(&mut cost) { - let mut encountered_absence = false; - - let element = cost_return_on_error_no_add!(&cost, raw_decode(&value_bytes)); - match element { - Element::Tree(root_key, _) | Element::SumTree(root_key, ..) => { - let (mut subquery_path, subquery_value) = - Element::subquery_paths_and_value_for_sized_query(&query.query, &key); - - if subquery_value.is_none() && subquery_path.is_none() { - // this element should be added to the result set - // hence we have to update the limit and offset value - reduce_limit_and_offset_by(current_limit, current_offset, 1); - continue; - } - - if root_key.is_none() { - continue; - } - - // if the element is a non empty tree then current tree is not a leaf tree - if is_leaf_tree { - is_leaf_tree = false; - cost_return_on_error!( - &mut cost, - self.generate_and_store_merk_proof( - &path.as_slice().into(), - &subtree, - &query.query.query, - (None, None), - ProofTokenType::Merk, - proofs, - is_verbose, - path.iter().last().unwrap_or(&Default::default()) - ) - ); - } - - let mut new_path = path.clone(); - new_path.push(key.as_ref()); - - let mut query = subquery_value; - - if query.is_some() { - if let Some(subquery_path) = &subquery_path { - for subkey in subquery_path.iter() { - let inner_subtree = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path( - new_path.as_slice().into(), - None, - ) - ); - - let mut key_as_query = Query::new(); - key_as_query.insert_key(subkey.clone()); - - cost_return_on_error!( - &mut cost, - self.generate_and_store_merk_proof( - &new_path.as_slice().into(), - &inner_subtree, - &key_as_query, - (None, None), - ProofTokenType::Merk, - proofs, - is_verbose, - new_path.iter().last().unwrap_or(&Default::default()) - ) - ); - - new_path.push(subkey); - - if self - .check_subtree_exists_path_not_found( - new_path.as_slice().into(), - None, - ) - .unwrap_add_cost(&mut cost) - .is_err() - { - encountered_absence = true; - break; - } - } - - if encountered_absence { - continue; - } - } - } else if let Some(subquery_path) = &mut subquery_path { - if subquery_path.is_empty() { - // nothing to do on this path, since subquery path is empty - // and there is no consecutive subquery value - continue; - } - - let last_key = subquery_path.remove(subquery_path.len() - 1); - - for subkey in subquery_path.iter() { - let inner_subtree = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path( - new_path.as_slice().into(), - None - ) - ); - - let mut key_as_query = Query::new(); - key_as_query.insert_key(subkey.clone()); - - cost_return_on_error!( - &mut cost, - self.generate_and_store_merk_proof( - &new_path.as_slice().into(), - &inner_subtree, - &key_as_query, - (None, None), - ProofTokenType::Merk, - proofs, - is_verbose, - new_path.iter().last().unwrap_or(&Default::default()) - ) - ); - - new_path.push(subkey); - - // check if the new path points to a valid subtree - // if it does not, we should stop proof generation on this path - if self - .check_subtree_exists_path_not_found( - new_path.as_slice().into(), - None, - ) - .unwrap_add_cost(&mut cost) - .is_err() - { - encountered_absence = true; - break; - } - } - - if encountered_absence { - continue; - } - - let mut key_as_query = Query::new(); - key_as_query.insert_key(last_key); - query = Some(key_as_query); - } else { - return Err(Error::CorruptedCodeExecution("subquery_path must exist")) - .wrap_with_cost(cost); - } - - let new_path_owned = new_path.iter().map(|a| a.to_vec()).collect(); - - let new_path_query = PathQuery::new_unsized(new_path_owned, query.unwrap()); - - if self - .check_subtree_exists_path_not_found(new_path.as_slice().into(), None) - .unwrap_add_cost(&mut cost) - .is_err() - { - continue; - } - - cost_return_on_error!( - &mut cost, - self.prove_subqueries( - proofs, - new_path, - &new_path_query, - current_limit, - current_offset, - false, - is_verbose, - ) - ); - - if *current_limit == Some(0) { - break; - } - } - _ => { - to_add_to_result_set += 1; - } - } - } - - if is_leaf_tree { - // if no useful subtree, then we care about the result set of this subtree. - // apply the sized query - let limit_offset = cost_return_on_error!( - &mut cost, - self.generate_and_store_merk_proof( - &path.as_slice().into(), - &subtree, - &query.query.query, - (*current_limit, *current_offset), - ProofTokenType::SizedMerk, - proofs, - is_verbose, - path.iter().last().unwrap_or(&Default::default()) - ) - ); - - // update limit and offset values - *current_limit = limit_offset.0; - *current_offset = limit_offset.1; - } else { - reduce_limit_and_offset_by(current_limit, current_offset, to_add_to_result_set); + #[cfg(feature = "proof_debug")] + { + println!("constructed proof is {}", proof); } - - Ok(()).wrap_with_cost(cost) + let config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + let encoded_proof = cost_return_on_error_no_add!( + &cost, + bincode::encode_to_vec(proof, config) + .map_err(|e| Error::CorruptedData(format!("unable to encode proof {}", e))) + ); + Ok(encoded_proof).wrap_with_cost(cost) } - /// Given a path, construct and append a set of proofs that shows there is - /// a valid path from the root of the db to that point. - fn prove_path( + /// Generates a proof + fn prove_internal( &self, - proof_result: &mut Vec, - path_slices: Vec<&[u8]>, - is_verbose: bool, - ) -> CostResult<(), Error> { + path_query: &PathQuery, + prove_options: Option, + grove_version: &GroveVersion, + ) -> CostResult { let mut cost = OperationCost::default(); - // generate proof to show that the path leads up to the root - let mut split_path = path_slices.split_last(); - while let Some((key, path_slice)) = split_path { - let subtree = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path(path_slice.into(), None) - ); - let mut query = Query::new(); - query.insert_key(key.to_vec()); + let prove_options = prove_options.unwrap_or_default(); - cost_return_on_error!( - &mut cost, - self.generate_and_store_merk_proof( - &path_slice.into(), - &subtree, - &query, - (None, None), - ProofTokenType::Merk, - proof_result, - is_verbose, - path_slice.iter().last().unwrap_or(&Default::default()) - ) - ); - split_path = path_slice.split_last(); + if path_query.query.offset.is_some() && path_query.query.offset != Some(0) { + return Err(Error::InvalidQuery( + "proved path queries can not have offsets", + )) + .wrap_with_cost(cost); } - Ok(()).wrap_with_cost(cost) - } - /// Generates query proof given a subtree and appends the result to a proof - /// list - fn generate_and_store_merk_proof<'a, S, B>( - &self, - path: &SubtreePath, - subtree: &'a Merk, - query: &Query, - limit_offset: LimitOffset, - proof_token_type: ProofTokenType, - proofs: &mut Vec, - is_verbose: bool, - key: &[u8], - ) -> CostResult<(Option, Option), Error> - where - S: StorageContext<'a> + 'a, - B: AsRef<[u8]>, - { - if proof_token_type != ProofTokenType::Merk && proof_token_type != ProofTokenType::SizedMerk - { - return Err(Error::InvalidInput( - "expect proof type for merk proof generation to be sized or merk proof type", + if path_query.query.limit == Some(0) { + return Err(Error::InvalidQuery( + "proved path queries can not be for limit 0", )) - .wrap_with_cost(Default::default()); + .wrap_with_cost(cost); } - let mut cost = OperationCost::default(); - - let mut proof_result = subtree - .prove_without_encoding(query.clone(), limit_offset.0, limit_offset.1) - .unwrap() - .expect("should generate proof"); + #[cfg(feature = "proof_debug")] + { + // we want to query raw because we want the references to not be resolved at + // this point - cost_return_on_error!(&mut cost, self.post_process_proof(path, &mut proof_result)); + let values = cost_return_on_error!( + &mut cost, + self.query_raw( + path_query, + false, + prove_options.decrease_limit_on_empty_sub_query_result, + false, + QueryResultType::QueryPathKeyElementTrioResultType, + None, + grove_version, + ) + ) + .0; - let mut proof_bytes = Vec::with_capacity(128); - encode_into(proof_result.proof.iter(), &mut proof_bytes); + println!("values are {}", values); - cost_return_on_error_no_add!(&cost, write_to_vec(proofs, &[proof_token_type.into()])); + let precomputed_result_map = cost_return_on_error!( + &mut cost, + self.query_raw( + path_query, + false, + prove_options.decrease_limit_on_empty_sub_query_result, + false, + QueryResultType::QueryPathKeyElementTrioResultType, + None, + grove_version, + ) + ) + .0 + .to_btree_map_level_results(); - // if is verbose, write the key - if is_verbose { - cost_return_on_error_no_add!(&cost, write_slice_to_vec(proofs, key)); + println!("precomputed results are {}", precomputed_result_map); } - // write the merk proof - cost_return_on_error_no_add!(&cost, write_slice_to_vec(proofs, &proof_bytes)); - - Ok((proof_result.limit, proof_result.offset)).wrap_with_cost(cost) - } - - /// Serializes a path and add it to the proof vector - fn generate_and_store_path_proof( - path: Vec<&[u8]>, - proofs: &mut Vec, - ) -> CostResult<(), Error> { - let cost = OperationCost::default(); + let mut limit = path_query.query.limit; - cost_return_on_error_no_add!( - &cost, - write_to_vec(proofs, &[ProofTokenType::PathInfo.into()]) + let root_layer = cost_return_on_error!( + &mut cost, + self.prove_subqueries( + vec![], + path_query, + &mut limit, + &prove_options, + grove_version + ) ); - cost_return_on_error_no_add!(&cost, write_slice_of_slice_to_slice(proofs, &path)); - - Ok(()).wrap_with_cost(cost) + Ok(GroveDBProofV0 { + root_layer, + prove_options, + } + .into()) + .wrap_with_cost(cost) } - fn generate_and_store_absent_path_proof( + /// Perform a pre-order traversal of the tree based on the provided + /// subqueries + fn prove_subqueries( &self, - path_slices: &[&[u8]], - proof_result: &mut Vec, - is_verbose: bool, - ) -> CostResult<(), Error> { + path: Vec<&[u8]>, + path_query: &PathQuery, + overall_limit: &mut Option, + prove_options: &ProveOptions, + grove_version: &GroveVersion, + ) -> CostResult { let mut cost = OperationCost::default(); - cost_return_on_error_no_add!( + let query = cost_return_on_error_no_add!( &cost, - write_to_vec(proof_result, &[ProofTokenType::AbsentPath.into()]) + path_query + .query_items_at_path(path.as_slice(), grove_version) + .and_then(|query_items| { + query_items.ok_or(Error::CorruptedPath(format!( + "prove subqueries: path {} should be part of path_query {}", + path.iter() + .map(|a| hex_to_ascii(a)) + .collect::>() + .join("/"), + path_query + ))) + }) ); - let mut current_path: Vec<&[u8]> = vec![]; - let mut split_path = path_slices.split_first(); - while let Some((key, path_slice)) = split_path { - let subtree = self - .open_non_transactional_merk_at_path(current_path.as_slice().into(), None) - .unwrap_add_cost(&mut cost); + let subtree = cost_return_on_error!( + &mut cost, + self.open_non_transactional_merk_at_path(path.as_slice().into(), None, grove_version) + ); - if subtree.is_err() { - break; - } + let limit = if path.len() < path_query.path.len() { + // There is no need for a limit because we are only asking for a single item + None + } else { + *overall_limit + }; - let has_item = Element::get( - subtree.as_ref().expect("confirmed not error above"), - key, - true, + let mut merk_proof = cost_return_on_error!( + &mut cost, + self.generate_merk_proof( + &subtree, + &query.items, + query.left_to_right, + limit, + grove_version ) - .unwrap_add_cost(&mut cost); + ); - let mut next_key_query = Query::new(); - next_key_query.insert_key(key.to_vec()); - cost_return_on_error!( - &mut cost, - self.generate_and_store_merk_proof( - ¤t_path.as_slice().into(), - &subtree.expect("confirmed not error above"), - &next_key_query, - (None, None), - ProofTokenType::Merk, - proof_result, - is_verbose, - current_path.iter().last().unwrap_or(&(&[][..])) - ) + #[cfg(feature = "proof_debug")] + { + println!( + "generated merk proof at level path level [{}], limit is {:?}, {}", + path.iter() + .map(|a| hex_to_ascii(a)) + .collect::>() + .join("/"), + overall_limit, + if query.left_to_right { + "left to right" + } else { + "right to left" + } ); - - current_path.push(key); - - if has_item.is_err() || path_slice.is_empty() { - // reached last key - break; - } - - split_path = path_slice.split_first(); } - Ok(()).wrap_with_cost(cost) - } + let mut lower_layers = BTreeMap::new(); - /// Converts Items to Node::KV from Node::KVValueHash - /// Converts References to Node::KVRefValueHash and sets the value to the - /// referenced element - fn post_process_proof>( - &self, - path: &SubtreePath, - proof_result: &mut ProofWithoutEncodingResult, - ) -> CostResult<(), Error> { - let mut cost = OperationCost::default(); + let mut has_a_result_at_level = false; + let mut done_with_results = false; - for op in proof_result.proof.iter_mut() { + for op in merk_proof.proof.iter_mut() { + done_with_results |= overall_limit == &Some(0); match op { Op::Push(node) | Op::PushInverted(node) => match node { - Node::KV(key, value) | Node::KVValueHash(key, value, ..) => { - let elem = Element::deserialize(value); + Node::KV(key, value) | Node::KVValueHash(key, value, ..) + if !done_with_results => + { + let elem = Element::deserialize(value, grove_version); match elem { Ok(Element::Reference(reference_path, ..)) => { let absolute_path = cost_return_on_error!( @@ -641,11 +279,13 @@ impl GroveDb { self.follow_reference( absolute_path.as_slice().into(), true, - None + None, + grove_version ) ); - let serialized_referenced_elem = referenced_elem.serialize(); + let serialized_referenced_elem = + referenced_elem.serialize(grove_version); if serialized_referenced_elem.is_err() { return Err(Error::CorruptedData(String::from( "unable to serialize element", @@ -657,11 +297,83 @@ impl GroveDb { key.to_owned(), serialized_referenced_elem.expect("confirmed ok above"), value_hash(value).unwrap_add_cost(&mut cost), - ) + ); + if let Some(limit) = overall_limit.as_mut() { + *limit -= 1; + } + has_a_result_at_level |= true; } - Ok(Element::Item(..)) => { - *node = Node::KV(key.to_owned(), value.to_owned()) + Ok(Element::Item(..)) if !done_with_results => { + #[cfg(feature = "proof_debug")] + { + println!("found {}", hex_to_ascii(key)); + } + *node = Node::KV(key.to_owned(), value.to_owned()); + if let Some(limit) = overall_limit.as_mut() { + *limit -= 1; + } + has_a_result_at_level |= true; } + Ok(Element::Tree(Some(_), _)) | Ok(Element::SumTree(Some(_), ..)) + if !done_with_results + && query.has_subquery_or_matching_in_path_on_key(key) => + { + #[cfg(feature = "proof_debug")] + { + println!( + "found tree {}, query is {}", + hex_to_ascii(key), + query + ); + } + // We only want to check in sub nodes for the proof if the tree has + // elements + let mut lower_path = path.clone(); + lower_path.push(key.as_slice()); + + let previous_limit = *overall_limit; + + let layer_proof = cost_return_on_error!( + &mut cost, + self.prove_subqueries( + lower_path, + path_query, + overall_limit, + prove_options, + grove_version, + ) + ); + + if previous_limit != *overall_limit { + // a lower layer updated the limit, don't subtract 1 at this + // level + has_a_result_at_level |= true; + } + lower_layers.insert(key.clone(), layer_proof); + } + + Ok(Element::Tree(..)) | Ok(Element::SumTree(..)) + if !done_with_results => + { + #[cfg(feature = "proof_debug")] + { + println!( + "found tree {}, no subquery query is {:?}", + hex_to_ascii(key), + query + ); + } + if let Some(limit) = overall_limit.as_mut() { + *limit -= 1; + } + has_a_result_at_level |= true; + } + // todo: transform the unused trees into a Hash or KVHash to make proof + // smaller Ok(Element::Tree(..)) if + // done_with_results => { *node = + // Node::Hash() // we are done with the + // results, we can modify the proof to alter + // } _ => continue, } } @@ -670,236 +382,62 @@ impl GroveDb { _ => continue, } } - Ok(()).wrap_with_cost(cost) - } -} - -#[cfg(test)] -mod tests { - use grovedb_merk::{execute_proof, proofs::Query}; - use grovedb_storage::StorageBatch; - - use crate::{ - operations::proof::util::{ProofReader, ProofTokenType}, - tests::{common::EMPTY_PATH, make_deep_tree, TEST_LEAF}, - GroveDb, - }; - - #[test] - fn test_path_info_encoding_and_decoding() { - let path = vec![b"a".as_slice(), b"b".as_slice(), b"c".as_slice()]; - let mut proof_vector = vec![]; - GroveDb::generate_and_store_path_proof(path.clone(), &mut proof_vector) - .unwrap() - .unwrap(); - - let mut proof_reader = ProofReader::new(proof_vector.as_slice()); - let decoded_path = proof_reader.read_path_info().unwrap(); - - assert_eq!(path, decoded_path); - } - - #[test] - fn test_reading_of_verbose_proofs() { - let db = make_deep_tree(); - let path = vec![TEST_LEAF, b"innertree"]; - let mut query = Query::new(); - query.insert_all(); + if !has_a_result_at_level + && !done_with_results + && prove_options.decrease_limit_on_empty_sub_query_result + { + #[cfg(feature = "proof_debug")] + { + println!( + "no results at level {}", + path.iter() + .map(|a| hex_to_ascii(a)) + .collect::>() + .join("/") + ); + } + if let Some(limit) = overall_limit.as_mut() { + *limit -= 1; + } + } - let batch = StorageBatch::new(); + let mut serialized_merk_proof = Vec::with_capacity(1024); + encode_into(merk_proof.proof.iter(), &mut serialized_merk_proof); - let merk = db - .open_non_transactional_merk_at_path( - [TEST_LEAF, b"innertree"].as_ref().into(), - Some(&batch), - ) - .unwrap() - .unwrap(); - let expected_root_hash = merk.root_hash().unwrap(); - - let mut proof = vec![]; - db.generate_and_store_merk_proof( - &path.as_slice().into(), - &merk, - &query, - (None, None), - ProofTokenType::Merk, - &mut proof, - true, - b"innertree", - ) - .unwrap() - .unwrap(); - assert_ne!(proof.len(), 0); - - let mut proof_reader = ProofReader::new(&proof); - let (proof_token_type, proof, key) = proof_reader.read_verbose_proof().unwrap(); - - assert_eq!(proof_token_type, ProofTokenType::Merk); - assert_eq!(key, Some(b"innertree".to_vec())); - - let (root_hash, result_set) = execute_proof(&proof, &query, None, None, true) - .unwrap() - .unwrap(); - assert_eq!(root_hash, expected_root_hash); - assert_eq!(result_set.result_set.len(), 3); - - // what is the key is empty?? - let merk = db - .open_non_transactional_merk_at_path(EMPTY_PATH, Some(&batch)) - .unwrap() - .unwrap(); - let expected_root_hash = merk.root_hash().unwrap(); - - let mut proof = vec![]; - db.generate_and_store_merk_proof( - &EMPTY_PATH, - &merk, - &query, - (None, None), - ProofTokenType::Merk, - &mut proof, - true, - &[], - ) - .unwrap() - .unwrap(); - assert_ne!(proof.len(), 0); - - let mut proof_reader = ProofReader::new(&proof); - let (proof_token_type, proof, key) = proof_reader.read_verbose_proof().unwrap(); - - assert_eq!(proof_token_type, ProofTokenType::Merk); - assert_eq!(key, Some(vec![])); - - let (root_hash, result_set) = execute_proof(&proof, &query, None, None, true) - .unwrap() - .unwrap(); - assert_eq!(root_hash, expected_root_hash); - assert_eq!(result_set.result_set.len(), 3); + Ok(LayerProof { + merk_proof: serialized_merk_proof, + lower_layers, + }) + .wrap_with_cost(cost) } - #[test] - fn test_reading_verbose_proof_at_key() { - // going to generate an array of multiple proofs with different keys - let db = make_deep_tree(); - let mut proofs = vec![]; - - let mut query = Query::new(); - query.insert_all(); - - // insert all under inner tree - let path = vec![TEST_LEAF, b"innertree"]; - - let batch = StorageBatch::new(); - - let merk = db - .open_non_transactional_merk_at_path(path.as_slice().into(), Some(&batch)) - .unwrap() - .unwrap(); - let inner_tree_root_hash = merk.root_hash().unwrap(); - db.generate_and_store_merk_proof( - &path.as_slice().into(), - &merk, - &query, - (None, None), - ProofTokenType::Merk, - &mut proofs, - true, - path.iter().last().unwrap_or(&(&[][..])), - ) - .unwrap() - .unwrap(); - - // insert all under innertree4 - let path = vec![TEST_LEAF, b"innertree4"]; - let merk = db - .open_non_transactional_merk_at_path(path.as_slice().into(), Some(&batch)) - .unwrap() - .unwrap(); - let inner_tree_4_root_hash = merk.root_hash().unwrap(); - db.generate_and_store_merk_proof( - &path.as_slice().into(), - &merk, - &query, - (None, None), - ProofTokenType::Merk, - &mut proofs, - true, - path.iter().last().unwrap_or(&(&[][..])), - ) - .unwrap() - .unwrap(); - - // insert all for deeper_1 - let path: Vec<&[u8]> = vec![b"deep_leaf", b"deep_node_1", b"deeper_1"]; - let merk = db - .open_non_transactional_merk_at_path(path.as_slice().into(), Some(&batch)) - .unwrap() - .unwrap(); - let deeper_1_root_hash = merk.root_hash().unwrap(); - db.generate_and_store_merk_proof( - &path.as_slice().into(), - &merk, - &query, - (None, None), - ProofTokenType::Merk, - &mut proofs, - true, - path.iter().last().unwrap_or(&(&[][..])), - ) - .unwrap() - .unwrap(); - - // read the proof at innertree - let contextual_proof = proofs.clone(); - let mut proof_reader = ProofReader::new(&contextual_proof); - let (proof_token_type, proof) = proof_reader - .read_verbose_proof_at_key(b"innertree") - .unwrap(); - - assert_eq!(proof_token_type, ProofTokenType::Merk); - - let (root_hash, result_set) = execute_proof(&proof, &query, None, None, true) - .unwrap() - .unwrap(); - assert_eq!(root_hash, inner_tree_root_hash); - assert_eq!(result_set.result_set.len(), 3); - - // read the proof at innertree4 - let contextual_proof = proofs.clone(); - let mut proof_reader = ProofReader::new(&contextual_proof); - let (proof_token_type, proof) = proof_reader - .read_verbose_proof_at_key(b"innertree4") - .unwrap(); - - assert_eq!(proof_token_type, ProofTokenType::Merk); - - let (root_hash, result_set) = execute_proof(&proof, &query, None, None, true) - .unwrap() - .unwrap(); - assert_eq!(root_hash, inner_tree_4_root_hash); - assert_eq!(result_set.result_set.len(), 2); - - // read the proof at deeper_1 - let contextual_proof = proofs.clone(); - let mut proof_reader = ProofReader::new(&contextual_proof); - let (proof_token_type, proof) = - proof_reader.read_verbose_proof_at_key(b"deeper_1").unwrap(); - - assert_eq!(proof_token_type, ProofTokenType::Merk); - - let (root_hash, result_set) = execute_proof(&proof, &query, None, None, true) - .unwrap() - .unwrap(); - assert_eq!(root_hash, deeper_1_root_hash); - assert_eq!(result_set.result_set.len(), 3); - - // read the proof at an invalid key - let contextual_proof = proofs.clone(); - let mut proof_reader = ProofReader::new(&contextual_proof); - let reading_result = proof_reader.read_verbose_proof_at_key(b"unknown_key"); - assert!(reading_result.is_err()) + /// Generates query proof given a subtree and appends the result to a proof + /// list + fn generate_merk_proof<'a, S>( + &self, + subtree: &'a Merk, + query_items: &[QueryItem], + left_to_right: bool, + limit: Option, + grove_version: &GroveVersion, + ) -> CostResult + where + S: StorageContext<'a> + 'a, + { + subtree + .prove_unchecked_query_items(query_items, limit, left_to_right, grove_version) + .map_ok(|(proof, limit)| ProofWithoutEncodingResult::new(proof, limit)) + .map_err(|e| { + Error::InternalError(format!( + "failed to generate proof for query_items [{}] error is : {}", + query_items + .iter() + .map(|e| e.to_string()) + .collect::>() + .join(", "), + e + )) + }) } } diff --git a/grovedb/src/operations/proof/mod.rs b/grovedb/src/operations/proof/mod.rs new file mode 100644 index 000000000..88243d59d --- /dev/null +++ b/grovedb/src/operations/proof/mod.rs @@ -0,0 +1,165 @@ +//! Proof operations + +#[cfg(feature = "full")] +mod generate; +pub mod util; +mod verify; + +use std::{collections::BTreeMap, fmt}; + +use bincode::{Decode, Encode}; +use derive_more::From; +use grovedb_merk::proofs::{query::Key, Decoder, Node, Op}; + +use crate::operations::proof::util::{element_hex_to_ascii, hex_to_ascii}; + +#[derive(Debug, Clone, Copy, Encode, Decode)] +pub struct ProveOptions { + /// This tells the proof system to decrease the available limit of the query + /// by 1 in the case of empty subtrees. Generally this should be set to + /// true. The case where this could be set to false is if there is a + /// known structure where we know that there are only a few empty + /// subtrees. + /// + /// !!! Warning !!! Be very careful: + /// If this is set to `false` then you must be sure that the sub queries do + /// not match many trees, Otherwise you could crash the system as the + /// proof system goes through millions of subtrees and eventually runs + /// out of memory + pub decrease_limit_on_empty_sub_query_result: bool, +} + +impl fmt::Display for ProveOptions { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "ProveOptions {{ decrease_limit_on_empty_sub_query_result: {} }}", + self.decrease_limit_on_empty_sub_query_result + ) + } +} + +impl Default for ProveOptions { + fn default() -> Self { + ProveOptions { + decrease_limit_on_empty_sub_query_result: true, + } + } +} + +#[derive(Encode, Decode)] +pub struct LayerProof { + pub merk_proof: Vec, + pub lower_layers: BTreeMap, +} + +#[derive(Encode, Decode, From)] +pub enum GroveDBProof { + V0(GroveDBProofV0), +} + +#[derive(Encode, Decode)] +pub struct GroveDBProofV0 { + pub root_layer: LayerProof, + pub prove_options: ProveOptions, +} + +impl fmt::Display for LayerProof { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "LayerProof {{")?; + writeln!(f, " merk_proof: {}", decode_merk_proof(&self.merk_proof))?; + if !self.lower_layers.is_empty() { + writeln!(f, " lower_layers: {{")?; + for (key, layer_proof) in &self.lower_layers { + writeln!(f, " {} => {{", hex_to_ascii(key))?; + for line in format!("{}", layer_proof).lines() { + writeln!(f, " {}", line)?; + } + writeln!(f, " }}")?; + } + writeln!(f, " }}")?; + } + write!(f, "}}") + } +} + +impl fmt::Display for GroveDBProof { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + GroveDBProof::V0(proof) => write!(f, "{}", proof), + } + } +} + +impl fmt::Display for GroveDBProofV0 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "GroveDBProofV0 {{")?; + for line in format!("{}", self.root_layer).lines() { + writeln!(f, " {}", line)?; + } + write!(f, "}}") + } +} + +fn decode_merk_proof(proof: &[u8]) -> String { + let mut result = String::new(); + let ops = Decoder::new(proof); + + for (i, op) in ops.enumerate() { + match op { + Ok(op) => { + result.push_str(&format!("\n {}: {}", i, op_to_string(&op))); + } + Err(e) => { + result.push_str(&format!("\n {}: Error decoding op: {}", i, e)); + } + } + } + + result +} + +fn op_to_string(op: &Op) -> String { + match op { + Op::Push(node) => format!("Push({})", node_to_string(node)), + Op::PushInverted(node) => format!("PushInverted({})", node_to_string(node)), + Op::Parent => "Parent".to_string(), + Op::Child => "Child".to_string(), + Op::ParentInverted => "ParentInverted".to_string(), + Op::ChildInverted => "ChildInverted".to_string(), + } +} + +fn node_to_string(node: &Node) -> String { + match node { + Node::Hash(hash) => format!("Hash(HASH[{}])", hex::encode(hash)), + Node::KVHash(kv_hash) => format!("KVHash(HASH[{}])", hex::encode(kv_hash)), + Node::KV(key, value) => { + format!("KV({}, {})", hex_to_ascii(key), element_hex_to_ascii(value)) + } + Node::KVValueHash(key, value, value_hash) => format!( + "KVValueHash({}, {}, HASH[{}])", + hex_to_ascii(key), + element_hex_to_ascii(value), + hex::encode(value_hash) + ), + Node::KVDigest(key, value_hash) => format!( + "KVDigest({}, HASH[{}])", + hex_to_ascii(key), + hex::encode(value_hash) + ), + Node::KVRefValueHash(key, value, value_hash) => format!( + "KVRefValueHash({}, {}, HASH[{}])", + hex_to_ascii(key), + element_hex_to_ascii(value), + hex::encode(value_hash) + ), + Node::KVValueHashFeatureType(key, value, value_hash, feature_type) => format!( + "KVValueHashFeatureType({}, {}, HASH[{}], {:?})", + hex_to_ascii(key), + element_hex_to_ascii(value), + hex::encode(value_hash), + feature_type + ), + } +} diff --git a/grovedb/src/operations/proof/util.rs b/grovedb/src/operations/proof/util.rs index a3802a620..d6b34eccb 100644 --- a/grovedb/src/operations/proof/util.rs +++ b/grovedb/src/operations/proof/util.rs @@ -1,360 +1,166 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -#[cfg(any(feature = "full", feature = "verify"))] -use std::io::Read; -#[cfg(feature = "full")] -use std::io::Write; +use std::fmt; use grovedb_merk::{ - proofs::query::{Key, Path, ProvedKeyValue}, - CryptoHash, + proofs::query::{Key, Path, ProvedKeyOptionalValue, ProvedKeyValue}, + CryptoHash, Error, }; -#[cfg(any(feature = "full", feature = "verify"))] -use integer_encoding::{VarInt, VarIntReader}; +use grovedb_version::version::GroveVersion; + +use crate::Element; -use crate::operations::proof::verify::ProvedKeyValues; #[cfg(any(feature = "full", feature = "verify"))] -use crate::Error; +pub type ProvedKeyValues = Vec; #[cfg(any(feature = "full", feature = "verify"))] -pub const EMPTY_TREE_HASH: [u8; 32] = [0; 32]; +pub type ProvedKeyOptionalValues = Vec; -pub type ProofTokenInfo = (ProofTokenType, Vec, Option>); +#[cfg(any(feature = "full", feature = "verify"))] +pub type ProvedPathKeyValues = Vec; #[cfg(any(feature = "full", feature = "verify"))] -#[derive(Debug, PartialEq, Eq)] -/// Proof type -// TODO: there might be a better name for this -pub enum ProofTokenType { - Merk, - SizedMerk, - EmptyTree, - AbsentPath, - PathInfo, - Invalid, -} +pub type ProvedPathKeyOptionalValues = Vec; +/// Proved path-key-value #[cfg(any(feature = "full", feature = "verify"))] -impl From for u8 { - fn from(proof_token_type: ProofTokenType) -> Self { - match proof_token_type { - ProofTokenType::Merk => 0x01, - ProofTokenType::SizedMerk => 0x02, - ProofTokenType::EmptyTree => 0x04, - ProofTokenType::AbsentPath => 0x05, - ProofTokenType::PathInfo => 0x06, - ProofTokenType::Invalid => 0x10, - } - } +#[derive(Debug, PartialEq, Eq)] +pub struct ProvedPathKeyOptionalValue { + /// Path + pub path: Path, + /// Key + pub key: Key, + /// Value + pub value: Option>, + /// Proof + pub proof: CryptoHash, } #[cfg(any(feature = "full", feature = "verify"))] -impl From for ProofTokenType { - fn from(val: u8) -> Self { - match val { - 0x01 => ProofTokenType::Merk, - 0x02 => ProofTokenType::SizedMerk, - 0x04 => ProofTokenType::EmptyTree, - 0x05 => ProofTokenType::AbsentPath, - 0x06 => ProofTokenType::PathInfo, - _ => ProofTokenType::Invalid, - } +impl fmt::Display for ProvedPathKeyOptionalValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "ProvedPathKeyValue {{")?; + writeln!( + f, + " path: [{}],", + self.path + .iter() + .map(|p| hex_to_ascii(p)) + .collect::>() + .join(", ") + )?; + writeln!(f, " key: {},", hex_to_ascii(&self.key))?; + writeln!( + f, + " value: {},", + optional_element_hex_to_ascii(self.value.as_ref()) + )?; + writeln!(f, " proof: {}", hex::encode(self.proof))?; + write!(f, "}}") } } +/// Proved path-key-value #[cfg(any(feature = "full", feature = "verify"))] -#[derive(Debug)] -// TODO: possibility for a proof writer?? -/// Proof reader -pub struct ProofReader<'a> { - proof_data: &'a [u8], - is_verbose: bool, +#[derive(Debug, PartialEq, Eq)] +pub struct ProvedPathKeyValue { + /// Path + pub path: Path, + /// Key + pub key: Key, + /// Value + pub value: Vec, + /// Proof + pub proof: CryptoHash, } #[cfg(any(feature = "full", feature = "verify"))] -impl<'a> ProofReader<'a> { - /// New proof reader - pub fn new(proof_data: &'a [u8]) -> Self { - Self { - proof_data, - is_verbose: false, - } - } - - /// New proof reader with verbose_status - pub fn new_with_verbose_status(proof_data: &'a [u8], is_verbose: bool) -> Self { - Self { - proof_data, - is_verbose, - } - } - - /// For non verbose proof read the immediate next proof, for verbose proof - /// read the first proof that matches a given key - pub fn read_next_proof(&mut self, key: &[u8]) -> Result<(ProofTokenType, Vec), Error> { - if self.is_verbose { - self.read_verbose_proof_at_key(key) - } else { - let (proof_token_type, proof, _) = self.read_proof_with_optional_type(None)?; - Ok((proof_token_type, proof)) - } - } - - /// Read the next proof, return the proof type - pub fn read_proof(&mut self) -> Result { - if self.is_verbose { - self.read_verbose_proof_with_optional_type(None) - } else { - self.read_proof_with_optional_type(None) - } - } - - /// Read verbose proof - pub fn read_verbose_proof(&mut self) -> Result { - self.read_verbose_proof_with_optional_type(None) - } - - /// Reads data from proof into slice of specific size - fn read_into_slice(&mut self, buf: &mut [u8]) -> Result { - self.proof_data - .read(buf) - .map_err(|_| Error::CorruptedData(String::from("failed to read proof data"))) - } - - /// Read varint encoded length information from proof data - fn read_length_data(&mut self) -> Result { - self.proof_data - .read_varint() - .map_err(|_| Error::InvalidProof("expected length data")) - } - - /// Read proof with optional type - pub fn read_proof_with_optional_type( - &mut self, - expected_data_type_option: Option, - ) -> Result { - let (proof_token_type, proof, _) = - self.read_proof_internal_with_optional_type(expected_data_type_option, false)?; - Ok((proof_token_type, proof, None)) +impl fmt::Display for ProvedPathKeyValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "ProvedPathKeyValue {{")?; + writeln!( + f, + " path: [{}],", + self.path + .iter() + .map(|p| hex_to_ascii(p)) + .collect::>() + .join(", ") + )?; + writeln!(f, " key: {},", hex_to_ascii(&self.key))?; + writeln!(f, " value: {},", element_hex_to_ascii(self.value.as_ref()))?; + writeln!(f, " proof: {}", hex::encode(self.proof))?; + write!(f, "}}") } +} - /// Read verbose proof with optional type - pub fn read_verbose_proof_with_optional_type( - &mut self, - expected_data_type_option: Option, - ) -> Result { - let (proof_token_type, proof, key) = - self.read_proof_internal_with_optional_type(expected_data_type_option, true)?; - Ok(( - proof_token_type, +impl From for ProvedPathKeyOptionalValue { + fn from(value: ProvedPathKeyValue) -> Self { + let ProvedPathKeyValue { + path, + key, + value, proof, - Some(key.ok_or(Error::InvalidProof( - "key must exist for verbose merk proofs", - ))?), - )) - } - - /// Read verbose proof at key - /// Returns an error if it can't find a proof for that key - pub fn read_verbose_proof_at_key( - &mut self, - expected_key: &[u8], - ) -> Result<(ProofTokenType, Vec), Error> { - let (proof_token_type, proof, _) = loop { - let (proof_token_type, proof, key) = self.read_verbose_proof()?; - let key = key.expect("read_verbose_proof enforces that this exists"); - if key.as_slice() == expected_key { - break (proof_token_type, proof, key); - } - }; - - Ok((proof_token_type, proof)) - } - - /// Read proof with optional type - pub fn read_proof_internal_with_optional_type( - &mut self, - expected_data_type_option: Option, - is_verbose: bool, - ) -> Result { - let mut data_type = [0; 1]; - self.read_into_slice(&mut data_type)?; - - if let Some(expected_data_type) = expected_data_type_option { - if data_type != [expected_data_type] { - return Err(Error::InvalidProof("wrong data_type")); - } - } - - let proof_token_type: ProofTokenType = data_type[0].into(); - - if proof_token_type == ProofTokenType::EmptyTree - || proof_token_type == ProofTokenType::AbsentPath - { - return Ok((proof_token_type, vec![], None)); - } - - let (proof, key) = if proof_token_type == ProofTokenType::Merk - || proof_token_type == ProofTokenType::SizedMerk - { - // if verbose we need to read the key first - let key = if is_verbose { - let key_length = self.read_length_data()?; - - let mut key = vec![0; key_length]; - self.read_into_slice(&mut key)?; - - Some(key) - } else { - None - }; - - let proof_length = self.read_length_data()?; - - let mut proof = vec![0; proof_length]; - self.read_into_slice(&mut proof)?; - - (proof, key) - } else { - return Err(Error::InvalidProof("expected merk or sized merk proof")); - }; - - Ok((proof_token_type, proof, key)) - } - - /// Reads path information from the proof vector - pub fn read_path_info(&mut self) -> Result>, Error> { - let mut data_type = [0; 1]; - self.read_into_slice(&mut data_type)?; - - if data_type != [ProofTokenType::PathInfo.into()] { - return Err(Error::InvalidProof("wrong data_type, expected path_info")); - } - - let mut path = vec![]; - let path_slice_len = self.read_length_data()?; + } = value; - for _ in 0..path_slice_len { - let path_len = self.read_length_data()?; - let mut path_value = vec![0; path_len]; - self.read_into_slice(&mut path_value)?; - path.push(path_value); + ProvedPathKeyOptionalValue { + path, + key, + value: Some(value), + proof, } - - Ok(path) } } -#[cfg(feature = "full")] -/// Write to vec -// TODO: this can error out handle the error -pub fn write_to_vec(dest: &mut W, value: &[u8]) -> Result<(), Error> { - dest.write_all(value) - .map_err(|_e| Error::InternalError("failed to write to vector")) -} - -#[cfg(feature = "full")] -/// Write a slice to the vector, first write the length of the slice -pub fn write_slice_to_vec(dest: &mut W, value: &[u8]) -> Result<(), Error> { - write_to_vec(dest, value.len().encode_var_vec().as_slice())?; - write_to_vec(dest, value)?; - Ok(()) -} +impl TryFrom for ProvedPathKeyValue { + type Error = Error; -#[cfg(feature = "full")] -/// Write a slice of a slice to a flat vector:w -pub fn write_slice_of_slice_to_slice(dest: &mut W, value: &[&[u8]]) -> Result<(), Error> { - // write the number of slices we are about to write - write_to_vec(dest, value.len().encode_var_vec().as_slice())?; - for inner_slice in value { - write_slice_to_vec(dest, inner_slice)?; + fn try_from(value: ProvedPathKeyOptionalValue) -> Result { + let ProvedPathKeyOptionalValue { + path, + key, + value, + proof, + } = value; + let value = value.ok_or(Error::InvalidProofError(format!( + "expected {}", + hex_to_ascii(&key) + )))?; + Ok(ProvedPathKeyValue { + path, + key, + value, + proof, + }) } - Ok(()) } -#[cfg(any(feature = "full", feature = "verify"))] -pub fn reduce_limit_and_offset_by( - limit: &mut Option, - offset: &mut Option, - n: u16, -) -> bool { - let mut skip_limit = false; - let mut n = n; - - if let Some(offset_value) = *offset { - if offset_value > 0 { - if offset_value >= n { - *offset = Some(offset_value - n); - n = 0; - } else { - *offset = Some(0); - n -= offset_value; - } - skip_limit = true; +impl ProvedPathKeyValue { + // TODO: make path a reference + /// Consumes the ProvedKeyValue and returns a ProvedPathKeyValue given a + /// Path + pub fn from_proved_key_value(path: Path, proved_key_value: ProvedKeyValue) -> Self { + Self { + path, + key: proved_key_value.key, + value: proved_key_value.value, + proof: proved_key_value.proof, } } - if let Some(limit_value) = *limit { - if !skip_limit && limit_value > 0 { - if limit_value >= n { - *limit = Some(limit_value - n); - } else { - *limit = Some(0); - } - } + /// Transforms multiple ProvedKeyValues to their equivalent + /// ProvedPathKeyValue given a Path + pub fn from_proved_key_values(path: Path, proved_key_values: ProvedKeyValues) -> Vec { + proved_key_values + .into_iter() + .map(|pkv| Self::from_proved_key_value(path.clone(), pkv)) + .collect() } - - skip_limit } -/// Proved path-key-values -pub type ProvedPathKeyValues = Vec; - -/// Proved path-key-value -#[cfg(any(feature = "full", feature = "verify"))] -#[derive(Debug, PartialEq, Eq)] -pub struct ProvedPathKeyValue { - /// Path - pub path: Path, - /// Key - pub key: Key, - /// Value - pub value: Vec, - /// Proof - pub proof: CryptoHash, -} - -impl ProvedPathKeyValue { +impl ProvedPathKeyOptionalValue { // TODO: make path a reference /// Consumes the ProvedKeyValue and returns a ProvedPathKeyValue given a /// Path - pub fn from_proved_key_value(path: Path, proved_key_value: ProvedKeyValue) -> Self { + pub fn from_proved_key_value(path: Path, proved_key_value: ProvedKeyOptionalValue) -> Self { Self { path, key: proved_key_value.key, @@ -365,7 +171,10 @@ impl ProvedPathKeyValue { /// Transforms multiple ProvedKeyValues to their equivalent /// ProvedPathKeyValue given a Path - pub fn from_proved_key_values(path: Path, proved_key_values: ProvedKeyValues) -> Vec { + pub fn from_proved_key_values( + path: Path, + proved_key_values: ProvedKeyOptionalValues, + ) -> Vec { proved_key_values .into_iter() .map(|pkv| Self::from_proved_key_value(path.clone(), pkv)) @@ -375,46 +184,26 @@ impl ProvedPathKeyValue { #[cfg(test)] mod tests { - use grovedb_merk::proofs::query::ProvedKeyValue; - - use crate::operations::proof::util::{ProofTokenType, ProvedPathKeyValue}; - - #[test] - fn test_proof_token_type_encoding() { - assert_eq!(0x01_u8, ProofTokenType::Merk.into()); - assert_eq!(0x02_u8, ProofTokenType::SizedMerk.into()); - assert_eq!(0x04_u8, ProofTokenType::EmptyTree.into()); - assert_eq!(0x05_u8, ProofTokenType::AbsentPath.into()); - assert_eq!(0x06_u8, ProofTokenType::PathInfo.into()); - assert_eq!(0x10_u8, ProofTokenType::Invalid.into()); - } + use grovedb_merk::proofs::query::ProvedKeyOptionalValue; - #[test] - fn test_proof_token_type_decoding() { - assert_eq!(ProofTokenType::Merk, 0x01_u8.into()); - assert_eq!(ProofTokenType::SizedMerk, 0x02_u8.into()); - assert_eq!(ProofTokenType::EmptyTree, 0x04_u8.into()); - assert_eq!(ProofTokenType::AbsentPath, 0x05_u8.into()); - assert_eq!(ProofTokenType::PathInfo, 0x06_u8.into()); - assert_eq!(ProofTokenType::Invalid, 0x10_u8.into()); - } + use crate::operations::proof::util::ProvedPathKeyOptionalValue; #[test] fn test_proved_path_from_single_proved_key_value() { let path = vec![b"1".to_vec(), b"2".to_vec()]; - let proved_key_value = ProvedKeyValue { + let proved_key_value = ProvedKeyOptionalValue { key: b"a".to_vec(), - value: vec![5, 6], + value: Some(vec![5, 6]), proof: [0; 32], }; let proved_path_key_value = - ProvedPathKeyValue::from_proved_key_value(path.clone(), proved_key_value); + ProvedPathKeyOptionalValue::from_proved_key_value(path.clone(), proved_key_value); assert_eq!( proved_path_key_value, - ProvedPathKeyValue { + ProvedPathKeyOptionalValue { path, key: b"a".to_vec(), - value: vec![5, 6], + value: Some(vec![5, 6]), proof: [0; 32] } ); @@ -423,51 +212,116 @@ mod tests { #[test] fn test_many_proved_path_from_many_proved_key_value() { let path = vec![b"1".to_vec(), b"2".to_vec()]; - let proved_key_value_a = ProvedKeyValue { + let proved_key_value_a = ProvedKeyOptionalValue { key: b"a".to_vec(), - value: vec![5, 6], + value: Some(vec![5, 6]), proof: [0; 32], }; - let proved_key_value_b = ProvedKeyValue { + let proved_key_value_b = ProvedKeyOptionalValue { key: b"b".to_vec(), - value: vec![5, 7], + value: Some(vec![5, 7]), proof: [1; 32], }; - let proved_key_value_c = ProvedKeyValue { + let proved_key_value_c = ProvedKeyOptionalValue { key: b"c".to_vec(), - value: vec![6, 7], + value: Some(vec![6, 7]), + proof: [2; 32], + }; + let proved_key_value_d = ProvedKeyOptionalValue { + key: b"d".to_vec(), + value: None, proof: [2; 32], }; - let proved_key_values = vec![proved_key_value_a, proved_key_value_b, proved_key_value_c]; + let proved_key_values = vec![ + proved_key_value_a, + proved_key_value_b, + proved_key_value_c, + proved_key_value_d, + ]; let proved_path_key_values = - ProvedPathKeyValue::from_proved_key_values(path.clone(), proved_key_values); - assert_eq!(proved_path_key_values.len(), 3); + ProvedPathKeyOptionalValue::from_proved_key_values(path.clone(), proved_key_values); + assert_eq!(proved_path_key_values.len(), 4); assert_eq!( proved_path_key_values[0], - ProvedPathKeyValue { + ProvedPathKeyOptionalValue { path: path.clone(), key: b"a".to_vec(), - value: vec![5, 6], + value: Some(vec![5, 6]), proof: [0; 32] } ); assert_eq!( proved_path_key_values[1], - ProvedPathKeyValue { + ProvedPathKeyOptionalValue { path: path.clone(), key: b"b".to_vec(), - value: vec![5, 7], + value: Some(vec![5, 7]), proof: [1; 32] } ); assert_eq!( proved_path_key_values[2], - ProvedPathKeyValue { - path, + ProvedPathKeyOptionalValue { + path: path.clone(), key: b"c".to_vec(), - value: vec![6, 7], + value: Some(vec![6, 7]), + proof: [2; 32] + } + ); + + assert_eq!( + proved_path_key_values[3], + ProvedPathKeyOptionalValue { + path, + key: b"d".to_vec(), + value: None, proof: [2; 32] } ); } } + +pub fn hex_to_ascii(hex_value: &[u8]) -> String { + // Define the set of allowed characters + const ALLOWED_CHARS: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\ + abcdefghijklmnopqrstuvwxyz\ + 0123456789_-/\\[]@"; + + // Check if all characters in hex_value are allowed + if hex_value.iter().all(|&c| ALLOWED_CHARS.contains(&c)) { + // Try to convert to UTF-8 + String::from_utf8(hex_value.to_vec()) + .unwrap_or_else(|_| format!("0x{}", hex::encode(hex_value))) + } else { + // Hex encode and prepend "0x" + format!("0x{}", hex::encode(hex_value)) + } +} + +pub fn path_hex_to_ascii(path: &Path) -> String { + path.iter() + .map(|e| hex_to_ascii(e.as_slice())) + .collect::>() + .join("/") +} + +pub fn path_as_slices_hex_to_ascii(path: &[&[u8]]) -> String { + path.into_iter() + .map(|e| hex_to_ascii(e)) + .collect::>() + .join("/") +} +pub fn optional_element_hex_to_ascii(hex_value: Option<&Vec>) -> String { + match hex_value { + None => "None".to_string(), + Some(hex_value) => Element::deserialize(hex_value, GroveVersion::latest()) + .map(|e| e.to_string()) + .unwrap_or_else(|_| hex::encode(hex_value)), + } +} + +pub fn element_hex_to_ascii(hex_value: &[u8]) -> String { + Element::deserialize(hex_value, GroveVersion::latest()) + .map(|e| e.to_string()) + .unwrap_or_else(|_| hex::encode(hex_value)) +} diff --git a/grovedb/src/operations/proof/verify.rs b/grovedb/src/operations/proof/verify.rs index 60b954dcc..31cd9ae51 100644 --- a/grovedb/src/operations/proof/verify.rs +++ b/grovedb/src/operations/proof/verify.rs @@ -1,200 +1,485 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Verify proof operations - -use std::{borrow::Cow, collections::BTreeMap}; - -use grovedb_merk::proofs::query::PathKey; -#[cfg(any(feature = "full", feature = "verify"))] -pub use grovedb_merk::proofs::query::{Path, ProvedKeyValue}; -#[cfg(any(feature = "full", feature = "verify"))] +use std::collections::{BTreeMap, BTreeSet}; + use grovedb_merk::{ - proofs::Query, - tree::{combine_hash, value_hash as value_hash_fn}, + proofs::{ + query::{PathKey, VerifyOptions}, + Query, + }, + tree::{combine_hash, value_hash}, CryptoHash, }; +use grovedb_version::{ + check_grovedb_v0, error::GroveVersionError, version::GroveVersion, TryFromVersioned, + TryIntoVersioned, +}; -use crate::{ - operations::proof::util::{ - reduce_limit_and_offset_by, ProvedPathKeyValue, ProvedPathKeyValues, - }, - query_result_type::PathKeyOptionalElementTrio, - versioning::read_and_consume_proof_version, - SizedQuery, +#[cfg(feature = "proof_debug")] +use crate::operations::proof::util::{ + hex_to_ascii, path_as_slices_hex_to_ascii, path_hex_to_ascii, }; -#[cfg(any(feature = "full", feature = "verify"))] use crate::{ - operations::proof::util::{ - ProofReader, ProofTokenType, ProofTokenType::AbsentPath, EMPTY_TREE_HASH, + operations::proof::{ + util::{ProvedPathKeyOptionalValue, ProvedPathKeyValues}, + GroveDBProof, GroveDBProofV0, LayerProof, ProveOptions, }, + query_result_type::PathKeyOptionalElementTrio, Element, Error, GroveDb, PathQuery, }; -#[cfg(any(feature = "full", feature = "verify"))] -pub type ProvedKeyValues = Vec; - -#[cfg(any(feature = "full", feature = "verify"))] -type EncounteredAbsence = bool; - -#[cfg(any(feature = "full", feature = "verify"))] impl GroveDb { - /// Verify proof given a path query - /// Returns the root hash + deserialized elements - pub fn verify_query( + pub fn verify_query_with_options( proof: &[u8], query: &PathQuery, - ) -> Result<([u8; 32], Vec), Error> { - let (root_hash, proved_path_key_values) = Self::verify_query_raw(proof, query)?; - let path_key_optional_elements = proved_path_key_values - .into_iter() - .map(|pkv| pkv.try_into()) - .collect::, Error>>()?; - Ok((root_hash, path_key_optional_elements)) + options: VerifyOptions, + grove_version: &GroveVersion, + ) -> Result<(CryptoHash, Vec), Error> { + check_grovedb_v0!( + "verify_query_with_options", + grove_version + .grovedb_versions + .operations + .proof + .verify_query_with_options + ); + if options.absence_proofs_for_non_existing_searched_keys { + // must have a limit + query.query.limit.ok_or(Error::NotSupported( + "limits must be set in verify_query_with_absence_proof".to_string(), + ))?; + } + + // must have no offset + if query.query.offset.is_some() { + return Err(Error::NotSupported( + "offsets in path queries are not supported for proofs".to_string(), + )); + } + + let config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + let grovedb_proof: GroveDBProof = bincode::decode_from_slice(proof, config) + .map_err(|e| Error::CorruptedData(format!("unable to decode proof: {}", e)))? + .0; + + let (root_hash, result) = + Self::verify_proof_internal(&grovedb_proof, query, options, grove_version)?; + + Ok((root_hash, result)) } - /// Verify proof for a given path query returns serialized elements pub fn verify_query_raw( proof: &[u8], query: &PathQuery, - ) -> Result<([u8; 32], ProvedPathKeyValues), Error> { - let mut verifier = ProofVerifier::new(query); - let hash = verifier.execute_proof(proof, query, false)?; - - Ok((hash, verifier.result_set)) + grove_version: &GroveVersion, + ) -> Result<(CryptoHash, ProvedPathKeyValues), Error> { + check_grovedb_v0!( + "verify_query_raw", + grove_version + .grovedb_versions + .operations + .proof + .verify_query_raw + ); + let config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + let grovedb_proof: GroveDBProof = bincode::decode_from_slice(proof, config) + .map_err(|e| Error::CorruptedData(format!("unable to decode proof: {}", e)))? + .0; + + let (root_hash, result) = Self::verify_proof_raw_internal( + &grovedb_proof, + query, + VerifyOptions { + absence_proofs_for_non_existing_searched_keys: false, + verify_proof_succinctness: false, + include_empty_trees_in_result: true, + }, + grove_version, + )?; + + Ok((root_hash, result)) } - /// Verify proof given multiple path queries. - /// If we have more than one path query we merge before performing - /// verification. - pub fn verify_query_many( - proof: &[u8], - query: Vec<&PathQuery>, - ) -> Result<([u8; 32], ProvedPathKeyValues), Error> { - if query.len() > 1 { - let query = PathQuery::merge(query)?; - GroveDb::verify_query_raw(proof, &query) - } else { - GroveDb::verify_query_raw(proof, query[0]) + fn verify_proof_internal( + proof: &GroveDBProof, + query: &PathQuery, + options: VerifyOptions, + grove_version: &GroveVersion, + ) -> Result<(CryptoHash, Vec), Error> { + match proof { + GroveDBProof::V0(proof_v0) => { + Self::verify_proof_v0_internal(proof_v0, query, options, grove_version) + } } } - /// Given a verbose proof, we can verify it with a subset path query. - /// Returning the root hash and the deserialized result set. - pub fn verify_subset_query( - proof: &[u8], + fn verify_proof_v0_internal( + proof: &GroveDBProofV0, query: &PathQuery, - ) -> Result<([u8; 32], Vec), Error> { - let (root_hash, proved_path_key_values) = Self::verify_subset_query_raw(proof, query)?; - let path_key_optional_elements = proved_path_key_values - .into_iter() - .map(|pkv| pkv.try_into()) - .collect::, Error>>()?; - Ok((root_hash, path_key_optional_elements)) - } + options: VerifyOptions, + grove_version: &GroveVersion, + ) -> Result<(CryptoHash, Vec), Error> { + let mut result = Vec::new(); + let mut limit = query.query.limit; + let root_hash = Self::verify_layer_proof( + &proof.root_layer, + &proof.prove_options, + query, + &mut limit, + &[], + &mut result, + &options, + grove_version, + )?; + + if options.absence_proofs_for_non_existing_searched_keys { + // must have a limit + let max_results = query.query.limit.ok_or(Error::NotSupported( + "limits must be set in verify_query_with_absence_proof".to_string(), + ))? as usize; + + let terminal_keys = query.terminal_keys(max_results, grove_version)?; + + // convert the result set to a btree map + let mut result_set_as_map: BTreeMap> = result + .into_iter() + .map(|(path, key, element)| ((path, key), element)) + .collect(); + #[cfg(feature = "proof_debug")] + { + println!( + "terminal keys are [{}] \n result set is [{}]", + terminal_keys + .iter() + .map(|(path, key)| format!( + "path: {} key: {}", + path_hex_to_ascii(path), + hex_to_ascii(key) + )) + .collect::>() + .join(", "), + result_set_as_map + .iter() + .map(|((path, key), e)| { + let element_string = if let Some(e) = e { + e.to_string() + } else { + "None".to_string() + }; + format!( + "path: {} key: {} element: {}", + path_hex_to_ascii(path), + hex_to_ascii(key), + element_string, + ) + }) + .collect::>() + .join(", ") + ); + } - /// Given a verbose proof, we can verify it with a subset path query. - /// Returning the root hash and the serialized result set. - pub fn verify_subset_query_raw( - proof: &[u8], - query: &PathQuery, - ) -> Result<([u8; 32], ProvedPathKeyValues), Error> { - let mut verifier = ProofVerifier::new(query); - let hash = verifier.execute_proof(proof, query, true)?; - Ok((hash, verifier.result_set)) + result = terminal_keys + .into_iter() + .map(|terminal_key| { + let element = result_set_as_map.remove(&terminal_key).flatten(); + (terminal_key.0, terminal_key.1, element) + }) + .collect(); + } + + Ok((root_hash, result)) } - /// Verify non subset query return the absence proof - /// Returns all possible keys within the Path Query with an optional Element - /// Value Element is set to None if absent - pub fn verify_query_with_absence_proof( - proof: &[u8], + fn verify_proof_raw_internal( + proof: &GroveDBProof, query: &PathQuery, - ) -> Result<([u8; 32], Vec), Error> { - Self::verify_with_absence_proof(proof, query, Self::verify_query) + options: VerifyOptions, + grove_version: &GroveVersion, + ) -> Result<(CryptoHash, ProvedPathKeyValues), Error> { + match proof { + GroveDBProof::V0(proof_v0) => { + Self::verify_proof_raw_internal_v0(proof_v0, query, options, grove_version) + } + } } - /// Verify subset query return the absence proof - /// Returns all possible keys within the Path Query with an optional Element - /// Value Element is set to None if absent - pub fn verify_subset_query_with_absence_proof( - proof: &[u8], + fn verify_proof_raw_internal_v0( + proof: &GroveDBProofV0, query: &PathQuery, - ) -> Result<([u8; 32], Vec), Error> { - Self::verify_with_absence_proof(proof, query, Self::verify_subset_query) + options: VerifyOptions, + grove_version: &GroveVersion, + ) -> Result<(CryptoHash, ProvedPathKeyValues), Error> { + let mut result = Vec::new(); + let mut limit = query.query.limit; + let root_hash = Self::verify_layer_proof( + &proof.root_layer, + &proof.prove_options, + query, + &mut limit, + &[], + &mut result, + &options, + grove_version, + )?; + Ok((root_hash, result)) } - /// Verifies the proof and returns both elements in the result set and the - /// elements in query but not in state. - /// Note: This only works for certain path queries. - // TODO: We should not care about terminal keys, as theoretically they can be - // infinite we should perform the absence check solely on the proof and the - // given key, this is a temporary solution - fn verify_with_absence_proof( - proof: &[u8], + fn verify_layer_proof( + layer_proof: &LayerProof, + prove_options: &ProveOptions, query: &PathQuery, - verification_fn: T, - ) -> Result<([u8; 32], Vec), Error> + limit_left: &mut Option, + current_path: &[&[u8]], + result: &mut Vec, + options: &VerifyOptions, + grove_version: &GroveVersion, + ) -> Result where - T: Fn(&[u8], &PathQuery) -> Result<([u8; 32], Vec), Error>, + T: TryFromVersioned, + Error: From<>::Error>, { - // must have a limit - let max_results = query.query.limit.ok_or(Error::NotSupported( - "limits must be set in verify_query_with_absence_proof", - ))? as usize; + check_grovedb_v0!( + "verify_layer_proof", + grove_version + .grovedb_versions + .operations + .proof + .verify_layer_proof + ); + let internal_query = query + .query_items_at_path(current_path, grove_version)? + .ok_or(Error::CorruptedPath(format!( + "verify raw: path {} should be part of path_query {}", + current_path + .iter() + .map(hex::encode) + .collect::>() + .join("/"), + query + )))?; + + let level_query = Query { + items: internal_query.items.to_vec(), + left_to_right: internal_query.left_to_right, + ..Default::default() + }; - // must have no offset - if query.query.offset.is_some() { - return Err(Error::NotSupported( - "offsets are not supported for verify_query_with_absence_proof", - )); + let (root_hash, merk_result) = level_query + .execute_proof( + &layer_proof.merk_proof, + *limit_left, + internal_query.left_to_right, + ) + .unwrap() + .map_err(|e| { + eprintln!("{e}"); + Error::InvalidProof(format!("invalid proof verification parameters: {}", e)) + })?; + #[cfg(feature = "proof_debug")] + { + println!( + "current path {} \n merk result is {}", + path_as_slices_hex_to_ascii(current_path), + merk_result + ); } - let terminal_keys = query.terminal_keys(max_results)?; + let mut verified_keys = BTreeSet::new(); - // need to actually verify the query - let (root_hash, result_set) = verification_fn(proof, query)?; + if merk_result.result_set.is_empty() { + if prove_options.decrease_limit_on_empty_sub_query_result { + limit_left.as_mut().map(|limit| *limit -= 1); + } + } else { + for proved_key_value in merk_result.result_set { + let mut path = current_path.to_vec(); + let key = &proved_key_value.key; + let hash = &proved_key_value.proof; + if let Some(value_bytes) = &proved_key_value.value { + let element = Element::deserialize(value_bytes, grove_version)?; + + verified_keys.insert(key.clone()); + + if let Some(lower_layer) = layer_proof.lower_layers.get(key) { + #[cfg(feature = "proof_debug")] + { + println!("lower layer had key {}", hex_to_ascii(key)); + } + match element { + Element::Tree(Some(_), _) | Element::SumTree(Some(_), ..) => { + path.push(key); + let lower_hash = Self::verify_layer_proof( + lower_layer, + prove_options, + query, + limit_left, + &path, + result, + options, + grove_version, + )?; + let combined_root_hash = + combine_hash(value_hash(value_bytes).value(), &lower_hash) + .value() + .to_owned(); + if hash != &combined_root_hash { + return Err(Error::InvalidProof(format!( + "Mismatch in lower layer hash, expected {}, got {}", + hex::encode(hash), + hex::encode(combined_root_hash) + ))); + } + if limit_left == &Some(0) { + break; + } + } + Element::Tree(None, _) + | Element::SumTree(None, ..) + | Element::SumItem(..) + | Element::Item(..) + | Element::Reference(..) => { + return Err(Error::InvalidProof( + "Proof has lower layer for a non Tree".into(), + )); + } + } + } else if element.is_any_item() + || !internal_query.has_subquery_or_matching_in_path_on_key(key) + && (options.include_empty_trees_in_result + || !matches!(element, Element::Tree(None, _))) + { + let path_key_optional_value = + ProvedPathKeyOptionalValue::from_proved_key_value( + path.iter().map(|p| p.to_vec()).collect(), + proved_key_value, + ); + #[cfg(feature = "proof_debug")] + { + println!( + "pushing {} limit left after is {:?}", + &path_key_optional_value, limit_left + ); + } + result.push(path_key_optional_value.try_into_versioned(grove_version)?); + + limit_left.as_mut().map(|limit| *limit -= 1); + if limit_left == &Some(0) { + break; + } + } else { + #[cfg(feature = "proof_debug")] + { + println!( + "we have subquery on key {} with value {}: {}", + hex_to_ascii(key), + element, + level_query + ) + } + } + } + } + } - // convert the result set to a btree map - let mut result_set_as_map: BTreeMap> = result_set - .into_iter() - .map(|(path, key, element)| ((path, key), element)) - .collect(); + Ok(root_hash) + } + + pub fn verify_query( + proof: &[u8], + query: &PathQuery, + grove_version: &GroveVersion, + ) -> Result<(CryptoHash, Vec), Error> { + check_grovedb_v0!( + "verify_query", + grove_version.grovedb_versions.operations.proof.verify_query + ); + Self::verify_query_with_options( + proof, + query, + VerifyOptions { + absence_proofs_for_non_existing_searched_keys: false, + verify_proof_succinctness: true, + include_empty_trees_in_result: false, + }, + grove_version, + ) + } + + pub fn verify_subset_query( + proof: &[u8], + query: &PathQuery, + grove_version: &GroveVersion, + ) -> Result<(CryptoHash, Vec), Error> { + check_grovedb_v0!( + "verify_subset_query", + grove_version + .grovedb_versions + .operations + .proof + .verify_subset_query + ); + Self::verify_query_with_options( + proof, + query, + VerifyOptions { + absence_proofs_for_non_existing_searched_keys: false, + verify_proof_succinctness: false, + include_empty_trees_in_result: false, + }, + grove_version, + ) + } - let result_set_with_absence: Vec = terminal_keys - .into_iter() - .map(|terminal_key| { - let element = result_set_as_map.remove(&terminal_key).flatten(); - (terminal_key.0, terminal_key.1, element) - }) - .collect(); + pub fn verify_query_with_absence_proof( + proof: &[u8], + query: &PathQuery, + grove_version: &GroveVersion, + ) -> Result<(CryptoHash, Vec), Error> { + check_grovedb_v0!( + "verify_query_with_absence_proof", + grove_version + .grovedb_versions + .operations + .proof + .verify_query_with_absence_proof + ); + Self::verify_query_with_options( + proof, + query, + VerifyOptions { + absence_proofs_for_non_existing_searched_keys: true, + verify_proof_succinctness: true, + include_empty_trees_in_result: false, + }, + grove_version, + ) + } - Ok((root_hash, result_set_with_absence)) + pub fn verify_subset_query_with_absence_proof( + proof: &[u8], + query: &PathQuery, + grove_version: &GroveVersion, + ) -> Result<(CryptoHash, Vec), Error> { + check_grovedb_v0!( + "verify_subset_query_with_absence_proof", + grove_version + .grovedb_versions + .operations + .proof + .verify_subset_query_with_absence_proof + ); + Self::verify_query_with_options( + proof, + query, + VerifyOptions { + absence_proofs_for_non_existing_searched_keys: true, + verify_proof_succinctness: false, + include_empty_trees_in_result: false, + }, + grove_version, + ) } /// Verify subset proof with a chain of path query functions. @@ -207,13 +492,23 @@ impl GroveDb { proof: &[u8], first_query: &PathQuery, chained_path_queries: Vec, + grove_version: &GroveVersion, ) -> Result<(CryptoHash, Vec>), Error> where C: Fn(Vec) -> Option, { + check_grovedb_v0!( + "verify_query_with_chained_path_queries", + grove_version + .grovedb_versions + .operations + .proof + .verify_query_with_chained_path_queries + ); let mut results = vec![]; - let (last_root_hash, elements) = Self::verify_subset_query(proof, first_query)?; + let (last_root_hash, elements) = + Self::verify_subset_query(proof, first_query, grove_version)?; results.push(elements); // we should iterate over each chained path queries @@ -221,11 +516,14 @@ impl GroveDb { let new_path_query = path_query_generator(results[results.len() - 1].clone()).ok_or( Error::InvalidInput("one of the path query generators returns no path query"), )?; - let (new_root_hash, new_elements) = Self::verify_subset_query(proof, &new_path_query)?; + let (new_root_hash, new_elements) = + Self::verify_subset_query(proof, &new_path_query, grove_version)?; if new_root_hash != last_root_hash { - return Err(Error::InvalidProof( - "root hash for different path queries do no match", - )); + return Err(Error::InvalidProof(format!( + "root hash for different path queries do no match, first is {}, this one is {}", + hex::encode(last_root_hash), + hex::encode(new_root_hash) + ))); } results.push(new_elements); } @@ -233,644 +531,3 @@ impl GroveDb { Ok((last_root_hash, results)) } } - -#[cfg(any(feature = "full", feature = "verify"))] -/// Proof verifier -struct ProofVerifier { - limit: Option, - offset: Option, - result_set: ProvedPathKeyValues, -} - -#[cfg(any(feature = "full", feature = "verify"))] -impl ProofVerifier { - /// New query - pub fn new(query: &PathQuery) -> Self { - ProofVerifier { - limit: query.query.limit, - offset: query.query.offset, - result_set: vec![], - } - } - - /// Execute proof - pub fn execute_proof( - &mut self, - proof: &[u8], - query: &PathQuery, - is_verbose: bool, - ) -> Result<[u8; 32], Error> { - let (proof_version, proof) = read_and_consume_proof_version(proof)?; - let mut proof_reader = ProofReader::new_with_verbose_status(proof, is_verbose); - - let path_slices = query.path.iter().map(|x| x.as_slice()).collect::>(); - let mut query = Cow::Borrowed(query); - - // TODO: refactor and add better comments - // if verbose, the first thing we want to do is read the path info - if is_verbose { - let original_path = proof_reader.read_path_info()?; - - if original_path == path_slices { - // do nothing - } else if original_path.len() > path_slices.len() { - // TODO: can we relax this constraint - return Err(Error::InvalidProof( - "original path query path must not be greater than the subset path len", - )); - } else { - let original_path_in_new_path = original_path - .iter() - .all(|key| path_slices.contains(&key.as_slice())); - - if !original_path_in_new_path { - return Err(Error::InvalidProof( - "the original path should be a subset of the subset path", - )); - } else { - // We construct a new path query - let path_not_common = path_slices[original_path.len()..].to_vec(); - let mut path_iter = path_not_common.iter(); - - let mut new_query = Query::new(); - if path_iter.len() >= 1 { - new_query - .insert_key(path_iter.next().expect("confirmed has value").to_vec()); - } - - // need to add the first key to the query - new_query.set_subquery_path(path_iter.map(|a| a.to_vec()).collect()); - new_query.set_subquery(query.query.query.clone()); - - query = Cow::Owned(PathQuery::new( - original_path, - SizedQuery::new(new_query, query.query.limit, query.query.offset), - )); - } - } - } - - let (proof_token_type, proof, _) = proof_reader.read_proof()?; - - let root_hash = if proof_token_type == AbsentPath { - self.verify_absent_path(&mut proof_reader, path_slices)? - } else { - let path_owned = query.path.iter().map(|a| a.to_vec()).collect(); - let mut last_subtree_root_hash = self.execute_subquery_proof( - proof_token_type, - proof, - &mut proof_reader, - query.as_ref(), - path_owned, - )?; - - // validate the path elements are connected - self.verify_path_to_root( - query.as_ref(), - query.path.iter().map(|a| a.as_ref()).collect(), - &mut proof_reader, - &mut last_subtree_root_hash, - )? - }; - - Ok(root_hash) - } - - fn execute_subquery_proof( - &mut self, - proof_token_type: ProofTokenType, - proof: Vec, - proof_reader: &mut ProofReader, - query: &PathQuery, - path: Path, - ) -> Result<[u8; 32], Error> { - let last_root_hash: [u8; 32]; - - match proof_token_type { - ProofTokenType::SizedMerk => { - // verify proof with limit and offset values - let verification_result = self.execute_merk_proof( - ProofTokenType::SizedMerk, - &proof, - &query.query.query, - query.query.query.left_to_right, - path, - )?; - - last_root_hash = verification_result.0; - } - ProofTokenType::Merk => { - // for non leaf subtrees, we want to prove that all the queried keys - // have an accompanying proof as long as the limit is non zero - // and their child subtree is not empty - let (proof_root_hash, children) = self.execute_merk_proof( - ProofTokenType::Merk, - &proof, - &query.query.query, - query.query.query.left_to_right, - path, - )?; - - last_root_hash = proof_root_hash; - let children = children.ok_or(Error::InvalidProof( - "MERK_PROOF always returns a result set", - ))?; - - for proved_path_key_value in children { - let ProvedPathKeyValue { - path, - key, - value: value_bytes, - proof: value_hash, - } = proved_path_key_value; - let child_element = Element::deserialize(value_bytes.as_slice())?; - match child_element { - Element::Tree(expected_root_key, _) - | Element::SumTree(expected_root_key, ..) => { - let mut expected_combined_child_hash = value_hash; - let mut current_value_bytes = value_bytes; - - if self.limit == Some(0) { - // we are done verifying the subqueries - break; - } - - let (subquery_path, subquery_value) = - Element::subquery_paths_and_value_for_sized_query( - &query.query, - key.as_slice(), - ); - - if subquery_value.is_none() && subquery_path.is_none() { - // add this element to the result set - let skip_limit = reduce_limit_and_offset_by( - &mut self.limit, - &mut self.offset, - 1, - ); - - if !skip_limit { - // only insert to the result set if the offset value is not - // greater than 0 - self.result_set.push( - ProvedPathKeyValue::from_proved_key_value( - path, - ProvedKeyValue { - key, - value: current_value_bytes, - proof: value_hash, - }, - ), - ); - } - - continue; - } - - // What is the equivalent for an empty tree - if expected_root_key.is_none() { - // child node is empty, move on to next - continue; - } - - // update the path, we are about to perform a subquery call - let mut new_path = path.to_owned(); - new_path.push(key); - - if subquery_path.is_some() - && !subquery_path.as_ref().unwrap().is_empty() - { - if subquery_value.is_none() { - self.verify_subquery_path( - proof_reader, - ProofTokenType::SizedMerk, - &mut subquery_path.expect("confirmed it has a value above"), - &mut expected_combined_child_hash, - &mut current_value_bytes, - &mut new_path, - )?; - continue; - } else { - let (_, result_set_opt, encountered_absence) = self - .verify_subquery_path( - proof_reader, - ProofTokenType::Merk, - &mut subquery_path - .expect("confirmed it has a value above"), - &mut expected_combined_child_hash, - &mut current_value_bytes, - &mut new_path, - )?; - - if encountered_absence { - // we hit an absence proof while verifying the subquery path - continue; - } - - let subquery_path_result_set = result_set_opt; - if subquery_path_result_set.is_none() { - // this means a sized proof was generated for the subquery - // key - // which is invalid as there exists a subquery value - return Err(Error::InvalidProof( - "expected unsized proof for subquery path as subquery \ - value exists", - )); - } - let subquery_path_result_set = - subquery_path_result_set.expect("confirmed exists above"); - - if subquery_path_result_set.is_empty() { - // we have a valid proof that shows the absence of the - // subquery path in the tree, hence the subquery value - // cannot be applied, move on to the next. - continue; - } - - Self::update_root_key_from_subquery_path_element( - &mut expected_combined_child_hash, - &mut current_value_bytes, - &subquery_path_result_set, - )?; - } - } - - let new_path_query = - PathQuery::new_unsized(vec![], subquery_value.unwrap()); - - let (child_proof_token_type, child_proof) = proof_reader - .read_next_proof(new_path.last().unwrap_or(&Default::default()))?; - - let child_hash = self.execute_subquery_proof( - child_proof_token_type, - child_proof, - proof_reader, - &new_path_query, - new_path, - )?; - - let combined_child_hash = combine_hash( - value_hash_fn(¤t_value_bytes).value(), - &child_hash, - ) - .value() - .to_owned(); - - if combined_child_hash != expected_combined_child_hash { - return Err(Error::InvalidProof( - "child hash doesn't match the expected hash", - )); - } - } - _ => { - // encountered a non tree element, we can't apply a subquery to it - // add it to the result set. - if self.limit == Some(0) { - break; - } - - let skip_limit = - reduce_limit_and_offset_by(&mut self.limit, &mut self.offset, 1); - - if !skip_limit { - // only insert to the result set if the offset value is not greater - // than 0 - self.result_set - .push(ProvedPathKeyValue::from_proved_key_value( - path, - ProvedKeyValue { - key, - value: value_bytes, - proof: value_hash, - }, - )); - } - } - } - } - } - ProofTokenType::EmptyTree => { - last_root_hash = EMPTY_TREE_HASH; - } - _ => { - // execute_subquery_proof only expects proofs for merk trees - // root proof is handled separately - return Err(Error::InvalidProof("wrong proof type")); - } - } - Ok(last_root_hash) - } - - /// Deserialize subkey_element and update expected root hash and element - /// value - fn update_root_key_from_subquery_path_element( - expected_child_hash: &mut CryptoHash, - current_value_bytes: &mut Vec, - subquery_path_result_set: &[ProvedPathKeyValue], - ) -> Result<(), Error> { - let elem_value = &subquery_path_result_set[0].value; - let subquery_path_element = Element::deserialize(elem_value) - .map_err(|_| Error::CorruptedData("failed to deserialize element".to_string()))?; - match subquery_path_element { - Element::Tree(..) | Element::SumTree(..) => { - *expected_child_hash = subquery_path_result_set[0].proof; - *current_value_bytes = subquery_path_result_set[0].value.to_owned(); - } - _ => { - // the means that the subquery path pointed to a non tree - // element, this is not valid as you cannot apply the - // the subquery value to non tree items - return Err(Error::InvalidProof( - "subquery path cannot point to non tree element", - )); - } - } - Ok(()) - } - - /// Checks that a valid proof showing the existence or absence of the - /// subquery path is present - fn verify_subquery_path( - &mut self, - proof_reader: &mut ProofReader, - expected_proof_token_type: ProofTokenType, - subquery_path: &mut Path, - expected_root_hash: &mut CryptoHash, - current_value_bytes: &mut Vec, - current_path: &mut Path, - ) -> Result<(CryptoHash, Option, EncounteredAbsence), Error> { - // the subquery path contains at least one item. - let last_key = subquery_path.remove(subquery_path.len() - 1); - - for subquery_key in subquery_path.iter() { - let (proof_token_type, subkey_proof) = - proof_reader.read_next_proof(current_path.last().unwrap_or(&Default::default()))?; - // intermediate proofs are all going to be unsized merk proofs - if proof_token_type != ProofTokenType::Merk { - return Err(Error::InvalidProof( - "expected MERK proof type for intermediate subquery path keys", - )); - } - match proof_token_type { - ProofTokenType::Merk => { - let mut key_as_query = Query::new(); - key_as_query.insert_key(subquery_key.to_owned()); - current_path.push(subquery_key.to_owned()); - - let (proof_root_hash, result_set) = self.execute_merk_proof( - proof_token_type, - &subkey_proof, - &key_as_query, - key_as_query.left_to_right, - current_path.to_owned(), - )?; - - // should always be some as we force the proof type to be MERK - debug_assert!(result_set.is_some(), "{}", true); - - // result_set being empty means we could not find the given key in the subtree - // which essentially means an absence proof - if result_set - .as_ref() - .expect("result set should always be some for merk proof type") - .is_empty() - { - return Ok((proof_root_hash, None, true)); - } - - // verify that the elements in the subquery path are linked by root hashes. - let combined_child_hash = - combine_hash(value_hash_fn(current_value_bytes).value(), &proof_root_hash) - .value() - .to_owned(); - - if combined_child_hash != *expected_root_hash { - return Err(Error::InvalidProof( - "child hash doesn't match the expected hash", - )); - } - - // after confirming they are linked use the latest hash values for subsequent - // checks - Self::update_root_key_from_subquery_path_element( - expected_root_hash, - current_value_bytes, - &result_set.expect("confirmed is some"), - )?; - } - _ => { - return Err(Error::InvalidProof( - "expected merk of sized merk proof type for subquery path", - )); - } - } - } - - let (proof_token_type, subkey_proof) = - proof_reader.read_next_proof(current_path.last().unwrap_or(&Default::default()))?; - if proof_token_type != expected_proof_token_type { - return Err(Error::InvalidProof( - "unexpected proof type for subquery path", - )); - } - - match proof_token_type { - ProofTokenType::Merk | ProofTokenType::SizedMerk => { - let mut key_as_query = Query::new(); - key_as_query.insert_key(last_key.to_owned()); - current_path.push(last_key); - - let verification_result = self.execute_merk_proof( - proof_token_type, - &subkey_proof, - &key_as_query, - key_as_query.left_to_right, - current_path.to_owned(), - )?; - - Ok((verification_result.0, verification_result.1, false)) - } - _ => Err(Error::InvalidProof( - "expected merk or sized merk proof type for subquery path", - )), - } - } - - fn verify_absent_path( - &mut self, - proof_reader: &mut ProofReader, - path_slices: Vec<&[u8]>, - ) -> Result<[u8; 32], Error> { - let mut root_key_hash = None; - let mut expected_child_hash = None; - let mut last_result_set: ProvedPathKeyValues = vec![]; - - for key in path_slices { - let (proof_token_type, merk_proof, _) = proof_reader.read_proof()?; - if proof_token_type != ProofTokenType::Merk { - return Err(Error::InvalidProof("expected a merk proof for absent path")); - } - - let mut child_query = Query::new(); - child_query.insert_key(key.to_vec()); - - // TODO: don't pass empty vec - let proof_result = self.execute_merk_proof( - ProofTokenType::Merk, - &merk_proof, - &child_query, - true, - // cannot return a result set - Vec::new(), - )?; - - if expected_child_hash.is_none() { - root_key_hash = Some(proof_result.0); - } else { - let combined_hash = combine_hash( - value_hash_fn(last_result_set[0].value.as_slice()).value(), - &proof_result.0, - ) - .value() - .to_owned(); - if Some(combined_hash) != expected_child_hash { - return Err(Error::InvalidProof("proof invalid: invalid parent")); - } - } - - last_result_set = proof_result - .1 - .expect("MERK_PROOF always returns a result set"); - if last_result_set.is_empty() { - // if result set is empty then we have reached the absence point, break - break; - } - - let elem = Element::deserialize(last_result_set[0].value.as_slice())?; - let child_hash = match elem { - Element::Tree(..) | Element::SumTree(..) => Ok(Some(last_result_set[0].proof)), - _ => Err(Error::InvalidProof( - "intermediate proofs should be for trees", - )), - }?; - expected_child_hash = child_hash; - } - - if last_result_set.is_empty() { - if let Some(hash) = root_key_hash { - Ok(hash) - } else { - Err(Error::InvalidProof("proof invalid: no non root tree found")) - } - } else { - Err(Error::InvalidProof("proof invalid: path not absent")) - } - } - - /// Verifies that the correct proof was provided to confirm the path in - /// query - fn verify_path_to_root( - &mut self, - query: &PathQuery, - path_slices: Vec<&[u8]>, - proof_reader: &mut ProofReader, - expected_root_hash: &mut [u8; 32], - ) -> Result<[u8; 32], Error> { - let mut split_path = path_slices.split_last(); - while let Some((key, path_slice)) = split_path { - // for every subtree, there should be a corresponding proof for the parent - // which should prove that this subtree is a child of the parent tree - let (proof_token_type, parent_merk_proof) = - proof_reader.read_next_proof(path_slice.last().unwrap_or(&Default::default()))?; - if proof_token_type != ProofTokenType::Merk { - return Err(Error::InvalidProof("wrong data_type expected merk proof")); - } - - let mut parent_query = Query::new(); - parent_query.insert_key(key.to_vec()); - - let proof_result = self.execute_merk_proof( - ProofTokenType::Merk, - &parent_merk_proof, - &parent_query, - query.query.query.left_to_right, - // TODO: don't pass empty vec - Vec::new(), - )?; - - let result_set = proof_result - .1 - .expect("MERK_PROOF always returns a result set"); - if result_set.is_empty() || &result_set[0].key != key { - return Err(Error::InvalidProof("proof invalid: invalid parent")); - } - - let elem = Element::deserialize(result_set[0].value.as_slice())?; - let child_hash = match elem { - Element::Tree(..) | Element::SumTree(..) => Ok(result_set[0].proof), - _ => Err(Error::InvalidProof( - "intermediate proofs should be for trees", - )), - }?; - - let combined_root_hash = combine_hash( - value_hash_fn(&result_set[0].value).value(), - expected_root_hash, - ) - .value() - .to_owned(); - if child_hash != combined_root_hash { - return Err(Error::InvalidProof( - "Bad path: tree hash does not have expected hash", - )); - } - - *expected_root_hash = proof_result.0; - - split_path = path_slice.split_last(); - } - - Ok(*expected_root_hash) - } - - /// Execute a merk proof, update the state when a sized proof is - /// encountered i.e. update the limit, offset and result set values - fn execute_merk_proof( - &mut self, - proof_token_type: ProofTokenType, - proof: &[u8], - query: &Query, - left_to_right: bool, - path: Path, - ) -> Result<(CryptoHash, Option), Error> { - let is_sized_proof = proof_token_type == ProofTokenType::SizedMerk; - let mut limit = None; - let mut offset = None; - - if is_sized_proof { - limit = self.limit; - offset = self.offset; - } - - let (hash, result) = - grovedb_merk::execute_proof(proof, query, limit, offset, left_to_right) - .unwrap() - .map_err(|e| { - eprintln!("{e}"); - Error::InvalidProof("invalid proof verification parameters") - })?; - - // convert the result set to proved_path_key_values - let proved_path_key_values = - ProvedPathKeyValue::from_proved_key_values(path, result.result_set); - - if is_sized_proof { - self.limit = result.limit; - self.offset = result.offset; - self.result_set.extend(proved_path_key_values); - Ok((hash, None)) - } else { - Ok((hash, Some(proved_path_key_values))) - } - } -} diff --git a/grovedb/src/query/mod.rs b/grovedb/src/query/mod.rs index 7563dc73d..f140bb051 100644 --- a/grovedb/src/query/mod.rs +++ b/grovedb/src/query/mod.rs @@ -1,41 +1,20 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Queries -use std::cmp::Ordering; +use std::{ + borrow::{Cow, Cow::Borrowed}, + cmp::Ordering, + fmt, +}; #[cfg(any(feature = "full", feature = "verify"))] use grovedb_merk::proofs::query::query_item::QueryItem; -use grovedb_merk::proofs::query::SubqueryBranch; +use grovedb_merk::proofs::query::{Key, SubqueryBranch}; #[cfg(any(feature = "full", feature = "verify"))] use grovedb_merk::proofs::Query; +use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; +use indexmap::IndexMap; +use crate::operations::proof::util::hex_to_ascii; #[cfg(any(feature = "full", feature = "verify"))] use crate::query_result_type::PathKey; #[cfg(any(feature = "full", feature = "verify"))] @@ -55,6 +34,20 @@ pub struct PathQuery { pub query: SizedQuery, } +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for PathQuery { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "PathQuery {{ path: [")?; + for (i, path_element) in self.path.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + write!(f, "{}", hex_to_ascii(path_element))?; + } + write!(f, "], query: {} }}", self.query) + } +} + #[cfg(any(feature = "full", feature = "verify"))] #[derive(Debug, Clone)] /// Holds a query to apply to a tree and an optional limit/offset value. @@ -68,6 +61,20 @@ pub struct SizedQuery { pub offset: Option, } +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for SizedQuery { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "SizedQuery {{ query: {}", self.query)?; + if let Some(limit) = self.limit { + write!(f, ", limit: {}", limit)?; + } + if let Some(offset) = self.offset { + write!(f, ", offset: {}", offset)?; + } + write!(f, " }}") + } +} + #[cfg(any(feature = "full", feature = "verify"))] impl SizedQuery { /// New sized query @@ -128,7 +135,18 @@ impl PathQuery { } /// Gets the path of all terminal keys - pub fn terminal_keys(&self, max_results: usize) -> Result, Error> { + pub fn terminal_keys( + &self, + max_results: usize, + grove_version: &GroveVersion, + ) -> Result, Error> { + check_grovedb_v0!( + "merge", + grove_version + .grovedb_versions + .path_query_methods + .terminal_keys + ); let mut result: Vec<(Vec>, Vec)> = vec![]; self.query .query @@ -138,7 +156,14 @@ impl PathQuery { } /// Combines multiple path queries into one equivalent path query - pub fn merge(mut path_queries: Vec<&PathQuery>) -> Result { + pub fn merge( + mut path_queries: Vec<&PathQuery>, + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "merge", + grove_version.grovedb_versions.path_query_methods.merge + ); if path_queries.is_empty() { return Err(Error::InvalidInput( "merge function requires at least 1 path query", @@ -158,13 +183,14 @@ impl PathQuery { path_queries.into_iter().try_for_each(|path_query| { if path_query.query.offset.is_some() { return Err(Error::NotSupported( - "can not merge pathqueries with offsets", + "can not merge pathqueries with offsets".to_string(), )); } if path_query.query.limit.is_some() { return Err(Error::NotSupported( "can not merge pathqueries with limits, consider setting the limit after the \ - merge", + merge" + .to_string(), )); } path_query @@ -268,24 +294,307 @@ impl PathQuery { } } } + + pub fn query_items_at_path( + &self, + path: &[&[u8]], + grove_version: &GroveVersion, + ) -> Result, Error> { + check_grovedb_v0!( + "query_items_at_path", + grove_version + .grovedb_versions + .path_query_methods + .query_items_at_path + ); + fn recursive_query_items<'b>( + query: &'b Query, + path: &[&[u8]], + ) -> Option> { + if path.is_empty() { + return Some(SinglePathSubquery::from_query(query)); + } + + let key = path[0]; + let path_after_top_removed = &path[1..]; + + if let Some(conditional_branches) = &query.conditional_subquery_branches { + for (query_item, subquery_branch) in conditional_branches { + if query_item.contains(key) { + if let Some(subquery_path) = &subquery_branch.subquery_path { + if path_after_top_removed.len() <= subquery_path.len() { + if path_after_top_removed + .iter() + .zip(subquery_path) + .all(|(a, b)| *a == b.as_slice()) + { + return if path_after_top_removed.len() == subquery_path.len() { + subquery_branch.subquery.as_ref().map(|subquery| { + SinglePathSubquery::from_query(subquery) + }) + } else { + let last_path_item = path.len() == subquery_path.len(); + let has_subquery = subquery_branch.subquery.is_some(); + Some(SinglePathSubquery::from_key_when_in_path( + &subquery_path[path_after_top_removed.len()], + last_path_item, + has_subquery, + )) + }; + } + } else if path_after_top_removed + .iter() + .take(subquery_path.len()) + .zip(subquery_path) + .all(|(a, b)| *a == b.as_slice()) + { + if let Some(subquery) = &subquery_branch.subquery { + return recursive_query_items( + subquery, + &path_after_top_removed[subquery_path.len()..], + ); + } + } + } else if let Some(subquery) = &subquery_branch.subquery { + return recursive_query_items(subquery, path_after_top_removed); + } + + return None; + } + } + } + + if let Some(subquery_path) = &query.default_subquery_branch.subquery_path { + if path_after_top_removed.len() <= subquery_path.len() { + if path_after_top_removed + .iter() + .zip(subquery_path) + .all(|(a, b)| *a == b.as_slice()) + { + // The paths are equal for example if we had a sub path of + // path : 1 / 2 + // subquery : All items + + // If we are asking what is the subquery when we are at 1 / 2 + // we should get + return if path_after_top_removed.len() == subquery_path.len() { + query + .default_subquery_branch + .subquery + .as_ref() + .map(|subquery| SinglePathSubquery::from_query(subquery)) + } else { + let last_path_item = path.len() == subquery_path.len(); + let has_subquery = query.default_subquery_branch.subquery.is_some(); + Some(SinglePathSubquery::from_key_when_in_path( + &subquery_path[path_after_top_removed.len()], + last_path_item, + has_subquery, + )) + }; + } + } else if path_after_top_removed + .iter() + .take(subquery_path.len()) + .zip(subquery_path) + .all(|(a, b)| *a == b.as_slice()) + { + if let Some(subquery) = &query.default_subquery_branch.subquery { + return recursive_query_items( + subquery, + &path_after_top_removed[subquery_path.len()..], + ); + } + } + } else if let Some(subquery) = &query.default_subquery_branch.subquery { + return recursive_query_items(subquery, path_after_top_removed); + } + + None + } + + let self_path_len = self.path.len(); + let given_path_len = path.len(); + + Ok(match given_path_len.cmp(&self_path_len) { + Ordering::Less => { + if path.iter().zip(&self.path).all(|(a, b)| *a == b.as_slice()) { + Some(SinglePathSubquery::from_key_when_in_path( + &self.path[given_path_len], + false, + true, + )) + } else { + None + } + } + Ordering::Equal => { + if path.iter().zip(&self.path).all(|(a, b)| *a == b.as_slice()) { + Some(SinglePathSubquery::from_path_query(self)) + } else { + None + } + } + Ordering::Greater => { + if !self.path.iter().zip(path).all(|(a, b)| a.as_slice() == *b) { + return Ok(None); + } + recursive_query_items(&self.query.query, &path[self_path_len..]) + } + }) + } +} + +#[cfg(any(feature = "full", feature = "verify"))] +#[derive(Debug, Clone, PartialEq)] +pub enum HasSubquery<'a> { + NoSubquery, + Always, + Conditionally(Cow<'a, IndexMap>), +} + +#[cfg(any(feature = "full", feature = "verify"))] +impl<'a> fmt::Display for HasSubquery<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + HasSubquery::NoSubquery => write!(f, "NoSubquery"), + HasSubquery::Always => write!(f, "Always"), + HasSubquery::Conditionally(map) => { + writeln!(f, "Conditionally {{")?; + for (query_item, subquery_branch) in map.iter() { + writeln!(f, " {query_item}: {subquery_branch},")?; + } + write!(f, "}}") + } + } + } +} + +impl<'a> HasSubquery<'a> { + /// Checks to see if we have a subquery on a specific key + pub fn has_subquery_on_key(&self, key: &[u8]) -> bool { + match self { + HasSubquery::NoSubquery => false, + HasSubquery::Conditionally(conditionally) => conditionally + .keys() + .any(|query_item| query_item.contains(key)), + HasSubquery::Always => true, + } + } +} + +/// This represents a query where the items might be borrowed, it is used to get +/// subquery information +#[cfg(any(feature = "full", feature = "verify"))] +#[derive(Debug, Clone, PartialEq)] +pub struct SinglePathSubquery<'a> { + /// Items + pub items: Cow<'a, Vec>, + /// Default subquery branch + pub has_subquery: HasSubquery<'a>, + /// Left to right? + pub left_to_right: bool, + /// In the path of the path_query, or in a subquery path + pub in_path: Option>, +} + +#[cfg(any(feature = "full", feature = "verify"))] +impl<'a> fmt::Display for SinglePathSubquery<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "InternalCowItemsQuery {{")?; + writeln!(f, " items: [")?; + for item in self.items.iter() { + writeln!(f, " {item},")?; + } + writeln!(f, " ]")?; + writeln!(f, " has_subquery: {}", self.has_subquery)?; + writeln!(f, " left_to_right: {}", self.left_to_right)?; + match &self.in_path { + Some(path) => writeln!(f, " in_path: Some({})", hex_to_ascii(path)), + None => writeln!(f, " in_path: None"), + }?; + write!(f, "}}") + } +} + +impl<'a> SinglePathSubquery<'a> { + /// Checks to see if we have a subquery on a specific key + pub fn has_subquery_or_matching_in_path_on_key(&self, key: &[u8]) -> bool { + if self.has_subquery.has_subquery_on_key(key) { + true + } else if let Some(path) = self.in_path.as_ref() { + path.as_slice() == key + } else { + false + } + } + + pub fn from_key_when_in_path( + key: &'a Vec, + subquery_is_last_path_item: bool, + subquery_has_inner_subquery: bool, + ) -> SinglePathSubquery<'a> { + // in this case there should be no in_path, because we are trying to get this + // level of items and nothing underneath + let in_path = if subquery_is_last_path_item && !subquery_has_inner_subquery { + None + } else { + Some(Borrowed(key)) + }; + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(key.clone())]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path, + } + } + + pub fn from_path_query(path_query: &PathQuery) -> SinglePathSubquery { + Self::from_query(&path_query.query.query) + } + + pub fn from_query(query: &Query) -> SinglePathSubquery { + let has_subquery = if query.default_subquery_branch.subquery.is_some() + || query.default_subquery_branch.subquery_path.is_some() + { + HasSubquery::Always + } else if let Some(conditional) = query.conditional_subquery_branches.as_ref() { + HasSubquery::Conditionally(Cow::Borrowed(conditional)) + } else { + HasSubquery::NoSubquery + }; + SinglePathSubquery { + items: Cow::Borrowed(&query.items), + has_subquery, + left_to_right: query.left_to_right, + in_path: None, + } + } } #[cfg(feature = "full")] #[cfg(test)] mod tests { - use std::ops::RangeFull; + use std::{borrow::Cow, ops::RangeFull}; - use grovedb_merk::proofs::{query::query_item::QueryItem, Query}; + use grovedb_merk::proofs::{ + query::{query_item::QueryItem, SubqueryBranch}, + Query, + }; + use grovedb_version::version::GroveVersion; + use indexmap::IndexMap; use crate::{ + query::{HasSubquery, SinglePathSubquery}, query_result_type::QueryResultType, tests::{common::compare_result_tuples, make_deep_tree, TEST_LEAF}, - Element, GroveDb, PathQuery, + Element, GroveDb, PathQuery, SizedQuery, }; #[test] fn test_same_path_different_query_merge() { - let temp_db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); // starting with no subquery, just a single path and a key query let mut query_one = Query::new(); @@ -293,9 +602,13 @@ mod tests { let path_query_one = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_one); - let proof = temp_db.prove_query(&path_query_one).unwrap().unwrap(); - let (_, result_set_one) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_one, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_one) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_one, grove_version) + .expect("should execute proof"); assert_eq!(result_set_one.len(), 1); let mut query_two = Query::new(); @@ -303,35 +616,49 @@ mod tests { let path_query_two = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_two); - let proof = temp_db.prove_query(&path_query_two).unwrap().unwrap(); - let (_, result_set_two) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_two, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_two) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_two, grove_version) + .expect("should execute proof"); assert_eq!(result_set_two.len(), 1); - let merged_path_query = PathQuery::merge(vec![&path_query_one, &path_query_two]) - .expect("should merge path queries"); - - let proof = temp_db.prove_query(&merged_path_query).unwrap().unwrap(); - let (_, result_set_tree) = GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) - .expect("should execute proof"); + let merged_path_query = + PathQuery::merge(vec![&path_query_one, &path_query_two], grove_version) + .expect("should merge path queries"); + + let proof = temp_db + .prove_query(&merged_path_query, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_tree) = + GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query, grove_version) + .expect("should execute proof"); assert_eq!(result_set_tree.len(), 2); } #[test] fn test_different_same_length_path_with_different_query_merge() { + let grove_version = GroveVersion::latest(); // Tests for // [a, c, Q] // [a, m, Q] - let temp_db = make_deep_tree(); + let temp_db = make_deep_tree(grove_version); let mut query_one = Query::new(); query_one.insert_key(b"key1".to_vec()); let path_query_one = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_one); - let proof = temp_db.prove_query(&path_query_one).unwrap().unwrap(); - let (_, result_set_one) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_one, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_one) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_one, grove_version) + .expect("should execute proof"); assert_eq!(result_set_one.len(), 1); let mut query_two = Query::new(); @@ -339,25 +666,33 @@ mod tests { let path_query_two = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()], query_two); - let proof = temp_db.prove_query(&path_query_two).unwrap().unwrap(); - let (_, result_set_two) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_two, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_two) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_two, grove_version) + .expect("should execute proof"); assert_eq!(result_set_two.len(), 1); - let merged_path_query = PathQuery::merge(vec![&path_query_one, &path_query_two]) - .expect("expect to merge path queries"); + let merged_path_query = + PathQuery::merge(vec![&path_query_one, &path_query_two], grove_version) + .expect("expect to merge path queries"); assert_eq!(merged_path_query.path, vec![TEST_LEAF.to_vec()]); assert_eq!(merged_path_query.query.query.items.len(), 2); - let proof = temp_db.prove_query(&merged_path_query).unwrap().unwrap(); + let proof = temp_db + .prove_query(&merged_path_query, None, grove_version) + .unwrap() + .unwrap(); let (_, result_set_merged) = - GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) + GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query, grove_version) .expect("should execute proof"); assert_eq!(result_set_merged.len(), 2); let keys = [b"key1".to_vec(), b"key4".to_vec()]; let values = [b"value1".to_vec(), b"value4".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); compare_result_tuples(result_set_merged, expected_result_set); @@ -373,9 +708,13 @@ mod tests { query_one.clone(), ); - let proof = temp_db.prove_query(&path_query_one).unwrap().unwrap(); - let (_, result_set_one) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_one, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_one) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_one, grove_version) + .expect("should execute proof"); assert_eq!(result_set_one.len(), 3); let mut query_two = Query::new(); @@ -390,9 +729,13 @@ mod tests { query_two.clone(), ); - let proof = temp_db.prove_query(&path_query_two).unwrap().unwrap(); - let (_, result_set_two) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_two, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_two) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_two, grove_version) + .expect("should execute proof"); assert_eq!(result_set_two.len(), 2); let mut query_three = Query::new(); @@ -407,9 +750,13 @@ mod tests { query_three.clone(), ); - let proof = temp_db.prove_query(&path_query_three).unwrap().unwrap(); - let (_, result_set_two) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_three) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_three, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_two) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_three, grove_version) + .expect("should execute proof"); assert_eq!(result_set_two.len(), 2); #[rustfmt::skip] @@ -439,9 +786,11 @@ mod tests { } - let merged_path_query = - PathQuery::merge(vec![&path_query_one, &path_query_two, &path_query_three]) - .expect("expect to merge path queries"); + let merged_path_query = PathQuery::merge( + vec![&path_query_one, &path_query_two, &path_query_three], + grove_version, + ) + .expect("expect to merge path queries"); assert_eq!(merged_path_query.path, vec![b"deep_leaf".to_vec()]); assert_eq!(merged_path_query.query.query.items.len(), 2); let conditional_subquery_branches = merged_path_query @@ -534,16 +883,22 @@ mod tests { .query_raw( &merged_path_query, true, + true, + true, QueryResultType::QueryPathKeyElementTrioResultType, None, + grove_version, ) .value .expect("expected to get results"); assert_eq!(result_set_merged.len(), 7); - let proof = temp_db.prove_query(&merged_path_query).unwrap().unwrap(); + let proof = temp_db + .prove_query(&merged_path_query, None, grove_version) + .unwrap() + .unwrap(); let (_, proved_result_set_merged) = - GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) + GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query, grove_version) .expect("should execute proof"); assert_eq!(proved_result_set_merged.len(), 7); @@ -565,14 +920,15 @@ mod tests { b"value10".to_vec(), b"value11".to_vec(), ]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); compare_result_tuples(proved_result_set_merged, expected_result_set); } #[test] fn test_different_length_paths_merge() { - let temp_db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); let mut query_one = Query::new(); query_one.insert_all(); @@ -586,9 +942,13 @@ mod tests { query_one, ); - let proof = temp_db.prove_query(&path_query_one).unwrap().unwrap(); - let (_, result_set_one) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_one, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_one) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_one, grove_version) + .expect("should execute proof"); assert_eq!(result_set_one.len(), 6); let mut query_two = Query::new(); @@ -603,18 +963,26 @@ mod tests { query_two, ); - let proof = temp_db.prove_query(&path_query_two).unwrap().unwrap(); - let (_, result_set_two) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_two, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_two) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_two, grove_version) + .expect("should execute proof"); assert_eq!(result_set_two.len(), 2); - let merged_path_query = PathQuery::merge(vec![&path_query_one, &path_query_two]) - .expect("expect to merge path queries"); + let merged_path_query = + PathQuery::merge(vec![&path_query_one, &path_query_two], grove_version) + .expect("expect to merge path queries"); assert_eq!(merged_path_query.path, vec![b"deep_leaf".to_vec()]); - let proof = temp_db.prove_query(&merged_path_query).unwrap().unwrap(); + let proof = temp_db + .prove_query(&merged_path_query, None, grove_version) + .unwrap() + .unwrap(); let (_, result_set_merged) = - GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) + GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query, grove_version) .expect("should execute proof"); assert_eq!(result_set_merged.len(), 8); @@ -638,23 +1006,28 @@ mod tests { b"value10".to_vec(), b"value11".to_vec(), ]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); compare_result_tuples(result_set_merged, expected_result_set); } #[test] fn test_same_path_and_different_path_query_merge() { - let temp_db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); let mut query_one = Query::new(); query_one.insert_key(b"key1".to_vec()); let path_query_one = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_one); - let proof = temp_db.prove_query(&path_query_one).unwrap().unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_one, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_one, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 1); let mut query_two = Query::new(); @@ -662,9 +1035,13 @@ mod tests { let path_query_two = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_two); - let proof = temp_db.prove_query(&path_query_two).unwrap().unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_two, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_two, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 1); let mut query_three = Query::new(); @@ -674,37 +1051,52 @@ mod tests { query_three, ); - let proof = temp_db.prove_query(&path_query_three).unwrap().unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_three) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_three, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_three, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 2); - let merged_path_query = - PathQuery::merge(vec![&path_query_one, &path_query_two, &path_query_three]) - .expect("should merge three queries"); - - let proof = temp_db.prove_query(&merged_path_query).unwrap().unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) - .expect("should execute proof"); + let merged_path_query = PathQuery::merge( + vec![&path_query_one, &path_query_two, &path_query_three], + grove_version, + ) + .expect("should merge three queries"); + + let proof = temp_db + .prove_query(&merged_path_query, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 4); } #[test] fn test_equal_path_merge() { + let grove_version = GroveVersion::latest(); // [a, b, Q] // [a, b, Q2] // We should be able to merge this if Q and Q2 have no subqueries. - let temp_db = make_deep_tree(); + let temp_db = make_deep_tree(grove_version); let mut query_one = Query::new(); query_one.insert_key(b"key1".to_vec()); let path_query_one = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_one); - let proof = temp_db.prove_query(&path_query_one).unwrap().unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_one, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_one, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 1); let mut query_two = Query::new(); @@ -712,17 +1104,26 @@ mod tests { let path_query_two = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_two); - let proof = temp_db.prove_query(&path_query_two).unwrap().unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_two, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_two, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 1); - let merged_path_query = PathQuery::merge(vec![&path_query_one, &path_query_two]) - .expect("should merge three queries"); + let merged_path_query = + PathQuery::merge(vec![&path_query_one, &path_query_two], grove_version) + .expect("should merge three queries"); - let proof = temp_db.prove_query(&merged_path_query).unwrap().unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&merged_path_query, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 2); // [a, b, Q] @@ -735,9 +1136,13 @@ mod tests { query_one, ); - let proof = temp_db.prove_query(&path_query_one).unwrap().unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_one, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_one, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 2); let mut query_one = Query::new(); @@ -752,9 +1157,13 @@ mod tests { query_one, ); - let proof = temp_db.prove_query(&path_query_two).unwrap().unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_two, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_two, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 3); #[rustfmt::skip] @@ -784,8 +1193,9 @@ mod tests { } - let merged_path_query = PathQuery::merge(vec![&path_query_one, &path_query_two]) - .expect("expected to be able to merge path_query"); + let merged_path_query = + PathQuery::merge(vec![&path_query_one, &path_query_two], grove_version) + .expect("expected to be able to merge path_query"); // we expect the common path to be the path of both before merge assert_eq!( @@ -826,16 +1236,478 @@ mod tests { .query_raw( &merged_path_query, true, + true, + true, QueryResultType::QueryPathKeyElementTrioResultType, None, + grove_version, ) .value .expect("expected to get results"); assert_eq!(result_set_merged.len(), 4); - let proof = temp_db.prove_query(&merged_path_query).unwrap().unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&merged_path_query, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 4); } + + #[test] + fn test_path_query_items_with_subquery_and_inner_subquery_path() { + let grove_version = GroveVersion::latest(); + // Constructing the keys and paths + let root_path_key_1 = b"root_path_key_1".to_vec(); + let root_path_key_2 = b"root_path_key_2".to_vec(); + let root_item_key = b"root_item_key".to_vec(); + let subquery_path_key_1 = b"subquery_path_key_1".to_vec(); + let subquery_path_key_2 = b"subquery_path_key_2".to_vec(); + let subquery_item_key = b"subquery_item_key".to_vec(); + let inner_subquery_path_key = b"inner_subquery_path_key".to_vec(); + + // Constructing the subquery + let subquery = Query { + items: vec![QueryItem::Key(subquery_item_key.clone())], + default_subquery_branch: SubqueryBranch { + subquery_path: Some(vec![inner_subquery_path_key.clone()]), + subquery: None, + }, + left_to_right: true, + conditional_subquery_branches: None, + }; + + // Constructing the PathQuery + let path_query = PathQuery { + path: vec![root_path_key_1.clone(), root_path_key_2.clone()], + query: SizedQuery { + query: Query { + items: vec![QueryItem::Key(root_item_key.clone())], + default_subquery_branch: SubqueryBranch { + subquery_path: Some(vec![ + subquery_path_key_1.clone(), + subquery_path_key_2.clone(), + ]), + subquery: Some(Box::new(subquery)), + }, + left_to_right: true, + conditional_subquery_branches: None, + }, + limit: Some(2), + offset: None, + }, + }; + + { + let path = vec![root_path_key_1.as_slice()]; + let first = path_query + .query_items_at_path(&path, grove_version) + .expect("expected valid version") + .expect("expected query items"); + + assert_eq!( + first, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(root_path_key_2.clone())]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path: Some(Cow::Borrowed(&root_path_key_2)), + } + ); + } + + { + let path = vec![root_path_key_1.as_slice(), root_path_key_2.as_slice()]; + + let second = path_query + .query_items_at_path(&path, grove_version) + .expect("expected valid version") + .expect("expected query items"); + + assert_eq!( + second, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(root_item_key.clone())]), + has_subquery: HasSubquery::Always, /* This is correct because there's a + * subquery for one item */ + left_to_right: true, + in_path: None, + } + ); + } + + { + let path = vec![ + root_path_key_1.as_slice(), + root_path_key_2.as_slice(), + root_item_key.as_slice(), + ]; + + let third = path_query + .query_items_at_path(&path, grove_version) + .expect("expected valid version") + .expect("expected query items"); + + assert_eq!( + third, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(subquery_path_key_1.clone())]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path: Some(Cow::Borrowed(&subquery_path_key_1)) + } + ); + } + + { + let path = vec![ + root_path_key_1.as_slice(), + root_path_key_2.as_slice(), + root_item_key.as_slice(), + subquery_path_key_1.as_slice(), + ]; + + let fourth = path_query + .query_items_at_path(&path, grove_version) + .expect("expected valid version") + .expect("expected query items"); + + assert_eq!( + fourth, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(subquery_path_key_2.clone())]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path: Some(Cow::Borrowed(&subquery_path_key_2)) + } + ); + } + + { + let path = vec![ + root_path_key_1.as_slice(), + root_path_key_2.as_slice(), + root_item_key.as_slice(), + subquery_path_key_1.as_slice(), + subquery_path_key_2.as_slice(), + ]; + + let fifth = path_query + .query_items_at_path(&path, grove_version) + .expect("expected valid version") + .expect("expected query items"); + + assert_eq!( + fifth, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(subquery_item_key.clone())]), + has_subquery: HasSubquery::Always, /* This means that we should be able to + * add items underneath */ + left_to_right: true, + in_path: None, + } + ); + } + + { + let path = vec![ + root_path_key_1.as_slice(), + root_path_key_2.as_slice(), + root_item_key.as_slice(), + subquery_path_key_1.as_slice(), + subquery_path_key_2.as_slice(), + subquery_item_key.as_slice(), + ]; + + let sixth = path_query + .query_items_at_path(&path, grove_version) + .expect("expected valid version") + .expect("expected query items"); + + assert_eq!( + sixth, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(inner_subquery_path_key.clone())]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path: None, + } + ); + } + } + + #[test] + fn test_path_query_items_with_subquery_path() { + let grove_version = GroveVersion::latest(); + // Constructing the keys and paths + let root_path_key = b"higher".to_vec(); + let dash_key = b"dash".to_vec(); + let quantum_key = b"quantum".to_vec(); + + // Constructing the PathQuery + let path_query = PathQuery { + path: vec![root_path_key.clone()], + query: SizedQuery { + query: Query { + items: vec![QueryItem::RangeFull(RangeFull)], + default_subquery_branch: SubqueryBranch { + subquery_path: Some(vec![quantum_key.clone()]), + subquery: None, + }, + left_to_right: true, + conditional_subquery_branches: None, + }, + limit: Some(100), + offset: None, + }, + }; + + // Validating the PathQuery structure + { + let path = vec![root_path_key.as_slice()]; + let first = path_query + .query_items_at_path(&path, grove_version) + .expect("expected valid version") + .expect("expected query items"); + + assert_eq!( + first, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::RangeFull(RangeFull)]), + has_subquery: HasSubquery::Always, + left_to_right: true, + in_path: None, + } + ); + } + + { + let path = vec![root_path_key.as_slice(), dash_key.as_slice()]; + + let second = path_query + .query_items_at_path(&path, grove_version) + .expect("expected valid version") + .expect("expected query items"); + + assert_eq!( + second, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(quantum_key.clone())]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path: None, // There should be no path because we are at the end of the path + } + ); + } + } + + #[test] + fn test_conditional_subquery_refusing_elements() { + let grove_version = GroveVersion::latest(); + let empty_vec: Vec = vec![]; + let zero_vec: Vec = vec![0]; + + let mut conditional_subquery_branches = IndexMap::new(); + conditional_subquery_branches.insert( + QueryItem::Key(b"".to_vec()), + SubqueryBranch { + subquery_path: Some(vec![zero_vec.clone()]), + subquery: Some(Query::new().into()), + }, + ); + + let path_query = PathQuery { + path: vec![TEST_LEAF.to_vec()], + query: SizedQuery { + query: Query { + items: vec![QueryItem::RangeFull(RangeFull)], + default_subquery_branch: SubqueryBranch { + subquery_path: Some(vec![zero_vec.clone()]), + subquery: None, + }, + left_to_right: true, + conditional_subquery_branches: Some(conditional_subquery_branches), + }, + limit: Some(100), + offset: None, + }, + }; + + { + let path = vec![TEST_LEAF, empty_vec.as_slice()]; + + let second = path_query + .query_items_at_path(&path, grove_version) + .expect("expected valid version") + .expect("expected query items"); + + assert_eq!( + second, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(zero_vec.clone())]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path: Some(Cow::Borrowed(&zero_vec)), + } + ); + } + } + + #[test] + fn test_complex_path_query_with_conditional_subqueries() { + let grove_version = GroveVersion::latest(); + let identity_id = + hex::decode("8b8948a6801501bbe0431e3d994dcf71cf5a2a0939fe51b0e600076199aba4fb") + .unwrap(); + + let key_20 = vec![20u8]; + + let key_80 = vec![80u8]; + + let inner_conditional_subquery_branches = IndexMap::from([( + QueryItem::Key(vec![80]), + SubqueryBranch { + subquery_path: None, + subquery: Some(Box::new(Query { + items: vec![QueryItem::RangeFull(RangeFull)], + default_subquery_branch: SubqueryBranch { + subquery_path: None, + subquery: None, + }, + left_to_right: true, + conditional_subquery_branches: None, + })), + }, + )]); + + let conditional_subquery_branches = IndexMap::from([ + ( + QueryItem::Key(vec![]), + SubqueryBranch { + subquery_path: None, + subquery: Some(Box::new(Query { + items: vec![QueryItem::Key(identity_id.to_vec())], + default_subquery_branch: SubqueryBranch { + subquery_path: None, + subquery: None, + }, + left_to_right: true, + conditional_subquery_branches: None, + })), + }, + ), + ( + QueryItem::Key(vec![20]), + SubqueryBranch { + subquery_path: Some(vec![identity_id.to_vec()]), + subquery: Some(Box::new(Query { + items: vec![QueryItem::Key(vec![80]), QueryItem::Key(vec![0xc0])], + default_subquery_branch: SubqueryBranch { + subquery_path: None, + subquery: None, + }, + conditional_subquery_branches: Some( + inner_conditional_subquery_branches.clone(), + ), + left_to_right: true, + })), + }, + ), + ]); + + let path_query = PathQuery { + path: vec![], + query: SizedQuery { + query: Query { + items: vec![QueryItem::Key(vec![20]), QueryItem::Key(vec![96])], + default_subquery_branch: SubqueryBranch { + subquery_path: None, + subquery: None, + }, + conditional_subquery_branches: Some(conditional_subquery_branches.clone()), + left_to_right: true, + }, + limit: Some(100), + offset: None, + }, + }; + + { + let path = vec![]; + let first = path_query + .query_items_at_path(&path, grove_version) + .expect("expected valid version") + .expect("expected query items"); + + assert_eq!( + first, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(vec![20]), QueryItem::Key(vec![96]),]), + has_subquery: HasSubquery::Conditionally(Cow::Borrowed( + &conditional_subquery_branches + )), + left_to_right: true, + in_path: None, + } + ); + } + + { + let path = vec![key_20.as_slice()]; + let query = path_query + .query_items_at_path(&path, grove_version) + .expect("expected valid version") + .expect("expected query items"); + + assert_eq!( + query, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(identity_id.clone()),]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path: Some(Cow::Borrowed(&identity_id)), + } + ); + } + + { + let path = vec![key_20.as_slice(), identity_id.as_slice()]; + let query = path_query + .query_items_at_path(&path, grove_version) + .expect("expected valid version") + .expect("expected query items"); + + assert_eq!( + query, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(vec![80]), QueryItem::Key(vec![0xc0]),]), + has_subquery: HasSubquery::Conditionally(Cow::Borrowed( + &inner_conditional_subquery_branches + )), + left_to_right: true, + in_path: None, + } + ); + } + + { + let path = vec![key_20.as_slice(), identity_id.as_slice(), key_80.as_slice()]; + let query = path_query + .query_items_at_path(&path, grove_version) + .expect("expected valid version") + .expect("expected query items"); + + assert_eq!( + query, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::RangeFull(RangeFull)]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path: None, + } + ); + } + } } diff --git a/grovedb/src/query_result_type.rs b/grovedb/src/query_result_type.rs index 37de6c0d1..020352382 100644 --- a/grovedb/src/query_result_type.rs +++ b/grovedb/src/query_result_type.rs @@ -1,41 +1,20 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Determines the query result form use std::{ collections::{BTreeMap, HashMap}, + fmt, vec::IntoIter, }; pub use grovedb_merk::proofs::query::{Key, Path, PathKey}; +use grovedb_version::{version::GroveVersion, TryFromVersioned}; -use crate::{operations::proof::util::ProvedPathKeyValue, Element, Error}; +use crate::{ + operations::proof::util::{ + hex_to_ascii, path_hex_to_ascii, ProvedPathKeyOptionalValue, ProvedPathKeyValue, + }, + Element, Error, +}; #[derive(Copy, Clone)] /// Query result type @@ -48,12 +27,116 @@ pub enum QueryResultType { QueryPathKeyElementTrioResultType, } +impl fmt::Display for QueryResultType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + QueryResultType::QueryElementResultType => write!(f, "QueryElementResultType"), + QueryResultType::QueryKeyElementPairResultType => { + write!(f, "QueryKeyElementPairResultType") + } + QueryResultType::QueryPathKeyElementTrioResultType => { + write!(f, "QueryPathKeyElementTrioResultType") + } + } + } +} + /// Query result elements +#[derive(Debug, Clone, Eq, PartialEq)] pub struct QueryResultElements { /// Elements pub elements: Vec, } +impl fmt::Display for QueryResultElements { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "QueryResultElements {{")?; + for (index, element) in self.elements.iter().enumerate() { + writeln!(f, " {}: {}", index, element)?; + } + write!(f, "}}") + } +} + +#[derive(Debug, Clone)] +pub enum BTreeMapLevelResultOrItem { + BTreeMapLevelResult(BTreeMapLevelResult), + ResultItem(Element), +} + +/// BTreeMap level result +#[derive(Debug, Clone)] +pub struct BTreeMapLevelResult { + pub key_values: BTreeMap, +} + +impl fmt::Display for BTreeMapLevelResultOrItem { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + BTreeMapLevelResultOrItem::BTreeMapLevelResult(result) => { + write!(f, "{}", result) + } + BTreeMapLevelResultOrItem::ResultItem(element) => { + write!(f, "{}", element) + } + } + } +} + +impl fmt::Display for BTreeMapLevelResult { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "BTreeMapLevelResult {{")?; + self.fmt_inner(f, 1)?; + write!(f, "}}") + } +} + +impl BTreeMapLevelResult { + fn fmt_inner(&self, f: &mut fmt::Formatter<'_>, indent: usize) -> fmt::Result { + for (key, value) in &self.key_values { + write!(f, "{:indent$}", "", indent = indent * 2)?; + write!(f, "{}: ", hex_to_ascii(key))?; + match value { + BTreeMapLevelResultOrItem::BTreeMapLevelResult(result) => { + writeln!(f, "BTreeMapLevelResult {{")?; + result.fmt_inner(f, indent + 1)?; + write!(f, "{:indent$}}}", "", indent = indent * 2)?; + } + BTreeMapLevelResultOrItem::ResultItem(element) => { + write!(f, "{}", element)?; + } + } + writeln!(f)?; + } + Ok(()) + } +} + +impl BTreeMapLevelResult { + pub fn len_of_values_at_path(&self, path: &[&[u8]]) -> u16 { + let mut current = self; + + // Traverse the path + for segment in path { + match current.key_values.get(*segment) { + Some(BTreeMapLevelResultOrItem::BTreeMapLevelResult(next_level)) => { + current = next_level; + } + Some(BTreeMapLevelResultOrItem::ResultItem(_)) => { + // We've reached a ResultItem before the end of the path + return 0; + } + None => { + // Path not found + return 0; + } + } + } + + current.key_values.len() as u16 + } +} + impl QueryResultElements { /// New pub fn new() -> Self { @@ -61,7 +144,7 @@ impl QueryResultElements { } /// From elements - pub(crate) fn from_elements(elements: Vec) -> Self { + pub fn from_elements(elements: Vec) -> Self { QueryResultElements { elements } } @@ -187,6 +270,152 @@ impl QueryResultElements { }) .collect() } + + /// To last path to keys btree map + /// This is useful if for example the element is a sum item and isn't + /// important Used in Platform Drive for getting voters for multiple + /// contenders + pub fn to_last_path_to_keys_btree_map(self) -> BTreeMap> { + let mut map: BTreeMap, Vec> = BTreeMap::new(); + + for result_item in self.elements.into_iter() { + if let QueryResultElement::PathKeyElementTrioResultItem((mut path, key, _)) = + result_item + { + if let Some(last) = path.pop() { + map.entry(last).or_insert_with(Vec::new).push(key); + } + } + } + + map + } + + /// To path to key, elements btree map + pub fn to_path_to_key_elements_btree_map(self) -> BTreeMap> { + let mut map: BTreeMap> = BTreeMap::new(); + + for result_item in self.elements.into_iter() { + if let QueryResultElement::PathKeyElementTrioResultItem((path, key, element)) = + result_item + { + map.entry(path).or_default().insert(key, element); + } + } + + map + } + + /// To last path to key, elements btree map + pub fn to_last_path_to_key_elements_btree_map(self) -> BTreeMap> { + let mut map: BTreeMap, BTreeMap> = BTreeMap::new(); + + for result_item in self.elements.into_iter() { + if let QueryResultElement::PathKeyElementTrioResultItem((mut path, key, element)) = + result_item + { + if let Some(last) = path.pop() { + map.entry(last).or_default().insert(key, element); + } + } + } + + map + } + + /// To last path to elements btree map + /// This is useful if the key is not import + pub fn to_last_path_to_elements_btree_map(self) -> BTreeMap> { + let mut map: BTreeMap, Vec> = BTreeMap::new(); + + for result_item in self.elements.into_iter() { + if let QueryResultElement::PathKeyElementTrioResultItem((mut path, _, element)) = + result_item + { + if let Some(last) = path.pop() { + map.entry(last).or_insert_with(Vec::new).push(element); + } + } + } + + map + } + + /// To last path to elements btree map + /// This is useful if the key is not import + pub fn to_btree_map_level_results(self) -> BTreeMapLevelResult { + fn insert_recursive( + current_level: &mut BTreeMapLevelResult, + mut path: std::vec::IntoIter>, + key: Vec, + element: Element, + ) { + if let Some(segment) = path.next() { + let next_level = current_level.key_values.entry(segment).or_insert_with(|| { + BTreeMapLevelResultOrItem::BTreeMapLevelResult(BTreeMapLevelResult { + key_values: BTreeMap::new(), + }) + }); + + match next_level { + BTreeMapLevelResultOrItem::BTreeMapLevelResult(inner) => { + insert_recursive(inner, path, key, element); + } + BTreeMapLevelResultOrItem::ResultItem(_) => { + // This shouldn't happen in a well-formed structure, but we'll handle it + // anyway + *next_level = + BTreeMapLevelResultOrItem::BTreeMapLevelResult(BTreeMapLevelResult { + key_values: BTreeMap::new(), + }); + if let BTreeMapLevelResultOrItem::BTreeMapLevelResult(inner) = next_level { + insert_recursive(inner, path, key, element); + } + } + } + } else { + current_level + .key_values + .insert(key, BTreeMapLevelResultOrItem::ResultItem(element)); + } + } + + let mut root = BTreeMapLevelResult { + key_values: BTreeMap::new(), + }; + + for result_item in self.elements { + if let QueryResultElement::PathKeyElementTrioResultItem((path, key, element)) = + result_item + { + insert_recursive(&mut root, path.into_iter(), key, element); + } + } + + root + } + + /// To last path to keys btree map + /// This is useful if for example the element is a sum item and isn't + /// important Used in Platform Drive for getting voters for multiple + /// contenders + pub fn to_previous_of_last_path_to_keys_btree_map(self) -> BTreeMap> { + let mut map: BTreeMap, Vec> = BTreeMap::new(); + + for result_item in self.elements.into_iter() { + if let QueryResultElement::PathKeyElementTrioResultItem((mut path, key, _)) = + result_item + { + if path.pop().is_some() { + if let Some(last) = path.pop() { + map.entry(last).or_default().push(key); + } + } + } + } + + map + } } impl Default for QueryResultElements { @@ -196,6 +425,7 @@ impl Default for QueryResultElements { } /// Query result element +#[derive(Debug, Clone, Eq, PartialEq)] pub enum QueryResultElement { /// Element result item ElementResultItem(Element), @@ -205,6 +435,33 @@ pub enum QueryResultElement { PathKeyElementTrioResultItem(PathKeyElementTrio), } +impl fmt::Display for QueryResultElement { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + QueryResultElement::ElementResultItem(element) => { + write!(f, "ElementResultItem({})", element) + } + QueryResultElement::KeyElementPairResultItem((key, element)) => { + write!( + f, + "KeyElementPairResultItem(key: {}, element: {})", + hex_to_ascii(key), + element + ) + } + QueryResultElement::PathKeyElementTrioResultItem((path, key, element)) => { + write!( + f, + "PathKeyElementTrioResultItem(path: {}, key: {}, element: {})", + path_hex_to_ascii(path), + hex_to_ascii(key), + element + ) + } + } + } +} + #[cfg(feature = "full")] impl QueryResultElement { /// Map element @@ -247,11 +504,14 @@ pub type PathKeyElementTrio = (Path, Key, Element); pub type PathKeyOptionalElementTrio = (Path, Key, Option); #[cfg(any(feature = "full", feature = "verify"))] -impl TryFrom for PathKeyOptionalElementTrio { +impl TryFromVersioned for PathKeyOptionalElementTrio { type Error = Error; - fn try_from(proved_path_key_value: ProvedPathKeyValue) -> Result { - let element = Element::deserialize(proved_path_key_value.value.as_slice())?; + fn try_from_versioned( + proved_path_key_value: ProvedPathKeyValue, + grove_version: &GroveVersion, + ) -> Result { + let element = Element::deserialize(proved_path_key_value.value.as_slice(), grove_version)?; Ok(( proved_path_key_value.path, proved_path_key_value.key, @@ -260,9 +520,31 @@ impl TryFrom for PathKeyOptionalElementTrio { } } +#[cfg(any(feature = "full", feature = "verify"))] +impl TryFromVersioned for PathKeyOptionalElementTrio { + type Error = Error; + + fn try_from_versioned( + proved_path_key_value: ProvedPathKeyOptionalValue, + grove_version: &GroveVersion, + ) -> Result { + let element = proved_path_key_value + .value + .map(|e| Element::deserialize(e.as_slice(), grove_version)) + .transpose()?; + Ok(( + proved_path_key_value.path, + proved_path_key_value.key, + element, + )) + } +} + #[cfg(feature = "full")] #[cfg(test)] mod tests { + use grovedb_version::{version::GroveVersion, TryIntoVersioned}; + use crate::{ operations::proof::util::ProvedPathKeyValue, query_result_type::PathKeyOptionalElementTrio, Element, @@ -270,6 +552,7 @@ mod tests { #[test] fn test_single_proved_path_key_value_to_path_key_optional_element() { + let grove_version = GroveVersion::latest(); let path = vec![b"1".to_vec(), b"2".to_vec()]; let proved_path_key_value = ProvedPathKeyValue { path: path.clone(), @@ -278,7 +561,7 @@ mod tests { proof: [0; 32], }; let path_key_element_trio: PathKeyOptionalElementTrio = proved_path_key_value - .try_into() + .try_into_versioned(grove_version) .expect("should convert to path key optional element trio"); assert_eq!( path_key_element_trio, diff --git a/grovedb/src/reference_path.rs b/grovedb/src/reference_path.rs index 41dd9b6b5..aa01d4007 100644 --- a/grovedb/src/reference_path.rs +++ b/grovedb/src/reference_path.rs @@ -28,22 +28,22 @@ //! Space efficient methods for referencing other elements in GroveDB -#[cfg(feature = "full")] +#[cfg(any(feature = "full", feature = "verify"))] use std::fmt; +use bincode::{Decode, Encode}; #[cfg(feature = "full")] use grovedb_visualize::visualize_to_vec; #[cfg(feature = "full")] use integer_encoding::VarInt; -#[cfg(any(feature = "full", feature = "verify"))] -use serde::{Deserialize, Serialize}; -#[cfg(feature = "full")] +#[cfg(any(feature = "full", feature = "verify"))] use crate::Error; #[cfg(any(feature = "full", feature = "verify"))] +#[cfg_attr(not(any(feature = "full", feature = "visualize")), derive(Debug))] /// Reference path variants -#[derive(Hash, Eq, PartialEq, Serialize, Deserialize, Clone)] +#[derive(Hash, Eq, PartialEq, Encode, Decode, Clone)] pub enum ReferencePathType { /// Holds the absolute path to the element the reference points to AbsolutePathReference(Vec>), @@ -54,6 +54,16 @@ pub enum ReferencePathType { /// path [p, q] result = [a, b, p, q] UpstreamRootHeightReference(u8, Vec>), + /// This is very similar to the UpstreamRootHeightReference, however + /// it appends to the absolute path when resolving the parent of the + /// reference. If the reference is stored at 15/9/80/7 then 80 will be + /// appended to what we are referring to. For example if we have the + /// reference at [a, b, c, d, e, f] (e is the parent path here) and we + /// have in the UpstreamRootHeightWithParentPathAdditionReference the + /// height set to 2 and the addon path set to [x, y], we would get as a + /// result [a, b, x, y, e] + UpstreamRootHeightWithParentPathAdditionReference(u8, Vec>), + /// This discards the last n elements from the current path and appends a /// new path to the subpath. If current path is [a, b, c, d] and we /// discard the last element, subpath = [a, b, c] we can then append @@ -76,7 +86,82 @@ pub enum ReferencePathType { SiblingReference(Vec), } -#[cfg(feature = "full")] +// Helper function to display paths +fn display_path(path: &[Vec]) -> String { + path.iter() + .map(hex::encode) + .collect::>() + .join("/") +} + +impl fmt::Display for ReferencePathType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ReferencePathType::AbsolutePathReference(path) => { + write!(f, "AbsolutePathReference({})", display_path(path)) + } + ReferencePathType::UpstreamRootHeightReference(height, path) => { + write!( + f, + "UpstreamRootHeightReference({}, {})", + height, + display_path(path) + ) + } + ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference(height, path) => { + write!( + f, + "UpstreamRootHeightWithParentPathAdditionReference({}, {})", + height, + display_path(path) + ) + } + ReferencePathType::UpstreamFromElementHeightReference(height, path) => { + write!( + f, + "UpstreamFromElementHeightReference({}, {})", + height, + display_path(path) + ) + } + ReferencePathType::CousinReference(key) => { + write!(f, "CousinReference({})", hex::encode(key)) + } + ReferencePathType::RemovedCousinReference(path) => { + write!(f, "RemovedCousinReference({})", display_path(path)) + } + ReferencePathType::SiblingReference(key) => { + write!(f, "SiblingReference({})", hex::encode(key)) + } + } + } +} + +#[cfg(any(feature = "full", feature = "verify"))] +impl ReferencePathType { + /// Given the reference path type and the current qualified path (path+key), + /// this computes the absolute path of the item the reference is pointing + /// to. + pub fn absolute_path_using_current_qualified_path>( + self, + current_qualified_path: &[B], + ) -> Result>, Error> { + path_from_reference_qualified_path_type(self, current_qualified_path) + } + + /// Given the reference path type, the current path and the terminal key, + /// this computes the absolute path of the item the reference is + /// pointing to. + pub fn absolute_path>( + self, + current_path: &[B], + current_key: Option<&[u8]>, + ) -> Result>, Error> { + path_from_reference_path_type(self, current_path, current_key) + } +} + +#[cfg(any(feature = "full", feature = "visualize"))] impl fmt::Debug for ReferencePathType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut v = Vec::new(); @@ -86,7 +171,7 @@ impl fmt::Debug for ReferencePathType { } } -#[cfg(feature = "full")] +#[cfg(any(feature = "full", feature = "verify"))] /// Given the reference path type and the current qualified path (path+key), /// this computes the absolute path of the item the reference is pointing to. pub fn path_from_reference_qualified_path_type>( @@ -95,7 +180,7 @@ pub fn path_from_reference_qualified_path_type>( ) -> Result>, Error> { match current_qualified_path.split_last() { None => Err(Error::CorruptedPath( - "qualified path should always have an element", + "qualified path should always have an element".to_string(), )), Some((key, path)) => { path_from_reference_path_type(reference_path_type, path, Some(key.as_ref())) @@ -103,7 +188,7 @@ pub fn path_from_reference_qualified_path_type>( } } -#[cfg(feature = "full")] +#[cfg(any(feature = "full", feature = "verify"))] /// Given the reference path type, the current path and the terminal key, this /// computes the absolute path of the item the reference is pointing to. pub fn path_from_reference_path_type>( @@ -130,6 +215,25 @@ pub fn path_from_reference_path_type>( subpath_as_vec.append(&mut path); Ok(subpath_as_vec) } + ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference( + no_of_elements_to_keep, + mut path, + ) => { + if usize::from(no_of_elements_to_keep) > current_path.len() || current_path.is_empty() { + return Err(Error::InvalidInput( + "reference stored path cannot satisfy reference constraints", + )); + } + let last = current_path.last().unwrap().as_ref().to_vec(); + let current_path_iter = current_path.iter(); + let mut subpath_as_vec = current_path_iter + .take(no_of_elements_to_keep as usize) + .map(|x| x.as_ref().to_vec()) + .collect::>(); + subpath_as_vec.append(&mut path); + subpath_as_vec.push(last); + Ok(subpath_as_vec) + } // Discard the last n elements from current path, append new path to subpath ReferencePathType::UpstreamFromElementHeightReference( @@ -224,6 +328,7 @@ impl ReferencePathType { .sum::() } ReferencePathType::UpstreamRootHeightReference(_, path) + | ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference(_, path) | ReferencePathType::UpstreamFromElementHeightReference(_, path) => { 1 + 1 + path @@ -246,6 +351,7 @@ impl ReferencePathType { #[cfg(test)] mod tests { use grovedb_merk::proofs::Query; + use grovedb_version::version::GroveVersion; use crate::{ reference_path::{path_from_reference_path_type, ReferencePathType}, @@ -266,6 +372,27 @@ mod tests { ); } + #[test] + fn test_upstream_root_height_with_parent_addition_reference() { + let stored_path = vec![b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]; + // selects the first 2 elements from the stored path and appends the new path. + let ref1 = ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference( + 2, + vec![b"c".to_vec(), b"d".to_vec()], + ); + let final_path = path_from_reference_path_type(ref1, &stored_path, None).unwrap(); + assert_eq!( + final_path, + vec![ + b"a".to_vec(), + b"b".to_vec(), + b"c".to_vec(), + b"d".to_vec(), + b"m".to_vec() + ] + ); + } + #[test] fn test_upstream_from_element_height_reference() { let stored_path = vec![b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]; @@ -339,7 +466,8 @@ mod tests { #[test] fn test_query_many_with_different_reference_types() { - let db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); db.insert( [TEST_LEAF, b"innertree4"].as_ref(), @@ -351,6 +479,7 @@ mod tests { ])), None, None, + grove_version, ) .unwrap() .expect("should insert successfully"); @@ -364,6 +493,7 @@ mod tests { )), None, None, + grove_version, ) .unwrap() .expect("should insert successfully"); @@ -377,6 +507,7 @@ mod tests { )), None, None, + grove_version, ) .unwrap() .expect("should insert successfully"); @@ -387,7 +518,7 @@ mod tests { let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()], query); let result = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("should query items"); assert_eq!(result.0.len(), 5); @@ -403,12 +534,12 @@ mod tests { ); let proof = db - .prove_query(&path_query) + .prove_query(&path_query, None, grove_version) .unwrap() .expect("should generate proof"); - let (hash, result) = - GroveDb::verify_query_raw(&proof, &path_query).expect("should verify proof"); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let (hash, result) = GroveDb::verify_query_raw(&proof, &path_query, grove_version) + .expect("should verify proof"); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result.len(), 5); } } diff --git a/grovedb/src/replication.rs b/grovedb/src/replication.rs index 86c1c3f04..876fe62c1 100644 --- a/grovedb/src/replication.rs +++ b/grovedb/src/replication.rs @@ -1,989 +1,659 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Replication - use std::{ - collections::VecDeque, - iter::{empty, once}, + collections::{BTreeMap, BTreeSet}, + fmt, }; use grovedb_merk::{ - proofs::{Node, Op}, - Merk, TreeFeatureType, + ed::Encode, + merk::restore::Restorer, + proofs::{Decoder, Op}, + tree::{hash::CryptoHash, kv::ValueDefinedCostType, value_hash}, + ChunkProducer, }; use grovedb_path::SubtreePath; -use grovedb_storage::{ - rocksdb_storage::{PrefixedRocksDbImmediateStorageContext, PrefixedRocksDbStorageContext}, - Storage, StorageContext, -}; - -use crate::{Element, Error, GroveDb, Hash, Transaction}; - -const OPS_PER_CHUNK: usize = 128; +use grovedb_storage::rocksdb_storage::RocksDbStorage; +#[rustfmt::skip] +use grovedb_storage::rocksdb_storage::storage_context::context_immediate::PrefixedRocksDbImmediateStorageContext; +use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; + +use crate::{replication, Error, GroveDb, Transaction, TransactionArg}; + +pub(crate) type SubtreePrefix = [u8; blake3::OUT_LEN]; + +pub const CURRENT_STATE_SYNC_VERSION: u16 = 1; + +#[derive(Default)] +struct SubtreeStateSyncInfo<'db> { + // Current Chunk restorer + restorer: Option>>, + // Set of global chunk ids requested to be fetched and pending for processing. For the + // description of global chunk id check fetch_chunk(). + pending_chunks: BTreeSet>, + // Number of processed chunks in current prefix (Path digest) + num_processed_chunks: usize, +} -impl GroveDb { - /// Creates a chunk producer to replicate GroveDb. - pub fn chunks(&self) -> SubtreeChunkProducer { - SubtreeChunkProducer::new(self) - } +// Struct governing state sync +pub struct MultiStateSyncInfo<'db> { + // Map of current processing subtrees + // SubtreePrefix (Path digest) -> SubtreeStateSyncInfo + current_prefixes: BTreeMap>, + // Set of processed prefixes (Path digests) + processed_prefixes: BTreeSet, + // Root app_hash + app_hash: [u8; 32], + // Version of state sync protocol, + version: u16, } -/// Subtree chunks producer. -pub struct SubtreeChunkProducer<'db> { - grove_db: &'db GroveDb, - cache: Option>, +impl<'db> Default for MultiStateSyncInfo<'db> { + fn default() -> Self { + Self { + current_prefixes: BTreeMap::new(), + processed_prefixes: BTreeSet::new(), + app_hash: [0; 32], + version: CURRENT_STATE_SYNC_VERSION, + } + } } -struct SubtreeChunkProducerCache<'db> { - current_merk_path: Vec>, - current_merk: Merk>, - // This needed to be an `Option` because it requires a reference on Merk but it's within the - // same struct and during struct init a referenced Merk would be moved inside a struct, - // using `Option` this init happens in two steps. - current_chunk_producer: - Option>>, +// Struct containing information about current subtrees found in GroveDB +pub struct SubtreesMetadata { + // Map of Prefix (Path digest) -> (Actual path, Parent Subtree actual_value_hash, Parent + // Subtree elem_value_hash) Note: Parent Subtree actual_value_hash, Parent Subtree + // elem_value_hash are needed when verifying the new constructed subtree after wards. + pub data: BTreeMap>, CryptoHash, CryptoHash)>, } -impl<'db> SubtreeChunkProducer<'db> { - fn new(storage: &'db GroveDb) -> Self { - SubtreeChunkProducer { - grove_db: storage, - cache: None, +impl SubtreesMetadata { + pub fn new() -> SubtreesMetadata { + SubtreesMetadata { + data: BTreeMap::new(), } } +} - /// Chunks in current producer - pub fn chunks_in_current_producer(&self) -> usize { - self.cache - .as_ref() - .and_then(|c| c.current_chunk_producer.as_ref().map(|p| p.len())) - .unwrap_or(0) +impl Default for SubtreesMetadata { + fn default() -> Self { + Self::new() } +} - /// Get chunk - pub fn get_chunk<'p, P>(&mut self, path: P, index: usize) -> Result, Error> - where - P: IntoIterator, -

::IntoIter: Clone + DoubleEndedIterator, - { - let path_iter = path.into_iter(); - - if let Some(SubtreeChunkProducerCache { - current_merk_path, .. - }) = &self.cache - { - if !itertools::equal(current_merk_path, path_iter.clone()) { - self.cache = None; - } - } - - if self.cache.is_none() { - let current_merk = self - .grove_db - .open_non_transactional_merk_at_path( - path_iter.clone().collect::>().as_slice().into(), - None, - ) - .unwrap()?; - - if current_merk.root_key().is_none() { - return Ok(Vec::new()); - } - - self.cache = Some(SubtreeChunkProducerCache { - current_merk_path: path_iter.map(|p| p.to_vec()).collect(), - current_merk, - current_chunk_producer: None, - }); - let cache = self.cache.as_mut().expect("exists at this point"); - cache.current_chunk_producer = Some( - grovedb_merk::ChunkProducer::new(&cache.current_merk) - .map_err(|e| Error::CorruptedData(e.to_string()))?, - ); +impl fmt::Debug for SubtreesMetadata { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + for (prefix, metadata) in self.data.iter() { + let metadata_path = &metadata.0; + let metadata_path_str = util_path_to_string(metadata_path); + writeln!( + f, + " prefix:{:?} -> path:{:?}", + hex::encode(prefix), + metadata_path_str + )?; } - - self.cache - .as_mut() - .expect("must exist at this point") - .current_chunk_producer - .as_mut() - .expect("must exist at this point") - .chunk(index) - .map_err(|e| Error::CorruptedData(e.to_string())) + Ok(()) } } -// TODO: make generic over storage_cost context -type MerkRestorer<'db> = grovedb_merk::Restorer>; - -type Path = Vec>; - -/// Structure to drive GroveDb restore process. -pub struct Restorer<'db> { - current_merk_restorer: Option>, - current_merk_chunk_index: usize, - current_merk_path: Path, - queue: VecDeque<(Path, Vec, Hash, TreeFeatureType)>, - grove_db: &'db GroveDb, - tx: &'db Transaction<'db>, -} - -/// Indicates what next piece of information `Restorer` expects or wraps a -/// successful result. -#[derive(Debug)] -pub enum RestorerResponse { - AwaitNextChunk { path: Vec>, index: usize }, - Ready, +// Converts a path into a human-readable string (for debugging) +pub fn util_path_to_string(path: &[Vec]) -> Vec { + let mut subtree_path_str: Vec = vec![]; + for subtree in path { + let string = std::str::from_utf8(subtree).expect("should be able to convert path"); + subtree_path_str.push( + string + .parse() + .expect("should be able to parse path to string"), + ); + } + subtree_path_str } -#[derive(Debug)] -pub struct RestorerError(String); - -impl<'db> Restorer<'db> { - /// Create a GroveDb restorer using a backing storage_cost and root hash. - pub fn new( - grove_db: &'db GroveDb, - root_hash: Hash, - tx: &'db Transaction<'db>, - ) -> Result { - Ok(Restorer { - tx, - current_merk_restorer: Some(MerkRestorer::new( - Merk::open_base( - grove_db - .db - .get_immediate_storage_context(SubtreePath::empty(), tx) - .unwrap(), - false, - ) - .unwrap() - .map_err(|e| RestorerError(e.to_string()))?, - None, - root_hash, - )), - current_merk_chunk_index: 0, - current_merk_path: vec![], - queue: VecDeque::new(), - grove_db, - }) +// Splits the given global chunk id into [SUBTREE_PREFIX:CHUNK_ID] +pub fn util_split_global_chunk_id( + global_chunk_id: &[u8], + app_hash: &[u8], +) -> Result<(crate::SubtreePrefix, Vec), Error> { + let chunk_prefix_length: usize = 32; + if global_chunk_id.len() < chunk_prefix_length { + return Err(Error::CorruptedData( + "expected global chunk id of at least 32 length".to_string(), + )); } - /// Process next chunk and receive instruction on what to do next. - pub fn process_chunk( - &mut self, - chunk_ops: impl IntoIterator, - ) -> Result { - if self.current_merk_restorer.is_none() { - // Last restorer was consumed and no more Merks to process. - return Ok(RestorerResponse::Ready); - } - // First we decode a chunk to take out info about nested trees to add them into - // todo list. - let mut ops = Vec::new(); - for op in chunk_ops { - ops.push(op); - match ops.last().expect("just inserted") { - Op::Push(Node::KVValueHashFeatureType( - key, - value_bytes, - value_hash, - feature_type, - )) - | Op::PushInverted(Node::KVValueHashFeatureType( - key, - value_bytes, - value_hash, - feature_type, - )) => { - if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = - Element::deserialize(value_bytes) - .map_err(|e| RestorerError(e.to_string()))? - { - if root_key.is_none() || self.current_merk_path.last() == Some(key) { - // We add only subtrees of the current subtree to queue, skipping - // itself; Also skipping empty Merks. - continue; - } - let mut path = self.current_merk_path.clone(); - path.push(key.clone()); - // The value hash is the root tree hash - self.queue.push_back(( - path, - value_bytes.to_owned(), - *value_hash, - *feature_type, - )); - } - } - _ => {} - } - } - - // Process chunk using Merk's possibilities. - let remaining = self - .current_merk_restorer - .as_mut() - .expect("restorer exists at this point") - .process_chunk(ops) - .map_err(|e| RestorerError(e.to_string()))?; - - self.current_merk_chunk_index += 1; - - if remaining == 0 { - // If no more chunks for this Merk required decide if we're done or take a next - // Merk to process. - self.current_merk_restorer - .take() - .expect("restorer exists at this point") - .finalize() - .map_err(|e| RestorerError(e.to_string()))?; - if let Some((next_path, combining_value, expected_hash, _)) = self.queue.pop_front() { - // Process next subtree. - let merk = self - .grove_db - .open_merk_for_replication(next_path.as_slice().into(), self.tx) - .map_err(|e| RestorerError(e.to_string()))?; - self.current_merk_restorer = Some(MerkRestorer::new( - merk, - Some(combining_value), - expected_hash, - )); - self.current_merk_chunk_index = 0; - self.current_merk_path = next_path; - - Ok(RestorerResponse::AwaitNextChunk { - path: self.current_merk_path.clone(), - index: self.current_merk_chunk_index, - }) - } else { - Ok(RestorerResponse::Ready) - } - } else { - // Request a chunk at the same path but with incremented index. - Ok(RestorerResponse::AwaitNextChunk { - path: self.current_merk_path.clone(), - index: self.current_merk_chunk_index, - }) - } + if global_chunk_id == app_hash { + let array_of_zeros: [u8; 32] = [0; 32]; + let root_chunk_prefix_key: crate::SubtreePrefix = array_of_zeros; + return Ok((root_chunk_prefix_key, vec![])); } -} - -/// Chunk producer wrapper which uses bigger messages that may include chunks of -/// requested subtree with its right siblings. -/// -/// Because `Restorer` builds GroveDb replica breadth-first way from top to -/// bottom it makes sense to send a subtree's siblings next instead of its own -/// subtrees. -pub struct SiblingsChunkProducer<'db> { - chunk_producer: SubtreeChunkProducer<'db>, -} -#[derive(Debug)] -pub struct GroveChunk { - subtree_chunks: Vec<(usize, Vec)>, + let (chunk_prefix, chunk_id) = global_chunk_id.split_at(chunk_prefix_length); + let mut array = [0u8; 32]; + array.copy_from_slice(chunk_prefix); + let chunk_prefix_key: crate::SubtreePrefix = array; + Ok((chunk_prefix_key, chunk_id.to_vec())) } -impl<'db> SiblingsChunkProducer<'db> { - /// New - pub fn new(chunk_producer: SubtreeChunkProducer<'db>) -> Self { - SiblingsChunkProducer { chunk_producer } +pub fn util_encode_vec_ops(chunk: Vec) -> Result, Error> { + let mut res = vec![]; + for op in chunk { + op.encode_into(&mut res) + .map_err(|e| Error::CorruptedData(format!("unable to encode chunk: {}", e)))?; } + Ok(res) +} - /// Get a collection of chunks possibly from different Merks with the first - /// one as requested. - pub fn get_chunk<'p, P>(&mut self, path: P, index: usize) -> Result, Error> - where - P: IntoIterator, -

::IntoIter: Clone + DoubleEndedIterator + ExactSizeIterator, - { - let path_iter = path.into_iter(); - let mut result = Vec::new(); - let mut ops_count = 0; - - if path_iter.len() == 0 { - // We're at the root of GroveDb, no siblings here. - self.process_subtree_chunks(&mut result, &mut ops_count, empty(), index)?; - return Ok(result); - }; - - // Get siblings on the right to send chunks of multiple Merks if it meets the - // limit. - - let mut siblings_keys: VecDeque> = VecDeque::new(); - - let mut parent_path = path_iter; - let requested_key = parent_path.next_back(); - - let parent_ctx = self - .chunk_producer - .grove_db - .db - .get_storage_context( - parent_path.clone().collect::>().as_slice().into(), - None, - ) - .unwrap(); - let mut siblings_iter = Element::iterator(parent_ctx.raw_iter()).unwrap(); - - if let Some(key) = requested_key { - siblings_iter.fast_forward(key)?; - } - - while let Some(element) = siblings_iter.next_element().unwrap()? { - if let (key, Element::Tree(..)) | (key, Element::SumTree(..)) = element { - siblings_keys.push_back(key); +pub fn util_decode_vec_ops(chunk: Vec) -> Result, Error> { + let decoder = Decoder::new(&chunk); + let mut res = vec![]; + for op in decoder { + match op { + Ok(op) => res.push(op), + Err(e) => { + return Err(Error::CorruptedData(format!( + "unable to decode chunk: {}", + e + ))); } } + } + Ok(res) +} - let mut current_index = index; - // Process each subtree - while let Some(subtree_key) = siblings_keys.pop_front() { - #[allow(clippy::map_identity)] - let subtree_path = parent_path - .clone() - .map(|x| x) - .chain(once(subtree_key.as_slice())); - - self.process_subtree_chunks(&mut result, &mut ops_count, subtree_path, current_index)?; - // Going to a next sibling, should start from 0. - - if ops_count >= OPS_PER_CHUNK { - break; +#[cfg(feature = "full")] +impl GroveDb { + // Returns the discovered subtrees found recursively along with their associated + // metadata Params: + // tx: Transaction. Function returns the data by opening merks at given tx. + // TODO: Add a SubTreePath as param and start searching from that path instead + // of root (as it is now) + pub fn get_subtrees_metadata( + &self, + tx: TransactionArg, + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "is_empty_tree", + grove_version + .grovedb_versions + .replication + .get_subtrees_metadata + ); + let mut subtrees_metadata = SubtreesMetadata::new(); + + let subtrees_root = self + .find_subtrees(&SubtreePath::empty(), tx, grove_version) + .value?; + for subtree in subtrees_root.into_iter() { + let subtree_path: Vec<&[u8]> = subtree.iter().map(|vec| vec.as_slice()).collect(); + let path: &[&[u8]] = &subtree_path; + let prefix = RocksDbStorage::build_prefix(path.as_ref().into()).unwrap(); + + let current_path = SubtreePath::from(path); + + match (current_path.derive_parent(), subtree.last()) { + (Some((parent_path, _)), Some(parent_key)) => match tx { + None => { + let parent_merk = self + .open_non_transactional_merk_at_path(parent_path, None, grove_version) + .value?; + if let Ok(Some((elem_value, elem_value_hash))) = parent_merk + .get_value_and_value_hash( + parent_key, + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .value + { + let actual_value_hash = value_hash(&elem_value).unwrap(); + subtrees_metadata.data.insert( + prefix, + (current_path.to_vec(), actual_value_hash, elem_value_hash), + ); + } + } + Some(t) => { + let parent_merk = self + .open_transactional_merk_at_path(parent_path, t, None, grove_version) + .value?; + if let Ok(Some((elem_value, elem_value_hash))) = parent_merk + .get_value_and_value_hash( + parent_key, + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .value + { + let actual_value_hash = value_hash(&elem_value).unwrap(); + subtrees_metadata.data.insert( + prefix, + (current_path.to_vec(), actual_value_hash, elem_value_hash), + ); + } + } + }, + _ => { + subtrees_metadata.data.insert( + prefix, + ( + current_path.to_vec(), + CryptoHash::default(), + CryptoHash::default(), + ), + ); + } } - current_index = 0; } - - Ok(result) + Ok(subtrees_metadata) } - /// Process one subtree's chunks - fn process_subtree_chunks<'p, P>( - &mut self, - result: &mut Vec, - ops_count: &mut usize, - subtree_path: P, - from_index: usize, - ) -> Result<(), Error> - where - P: IntoIterator, -

::IntoIter: Clone + DoubleEndedIterator, - { - let path_iter = subtree_path.into_iter(); - - let mut current_index = from_index; - let mut subtree_chunks = Vec::new(); - - loop { - let ops = self - .chunk_producer - .get_chunk(path_iter.clone(), current_index)?; - - *ops_count += ops.len(); - subtree_chunks.push((current_index, ops)); - current_index += 1; - if current_index >= self.chunk_producer.chunks_in_current_producer() - || *ops_count >= OPS_PER_CHUNK - { - break; - } + // Fetch a chunk by global chunk id (should be called by ABCI when + // LoadSnapshotChunk method is called) Params: + // global_chunk_id: Global chunk id in the following format: + // [SUBTREE_PREFIX:CHUNK_ID] SUBTREE_PREFIX: 32 bytes (mandatory) (All zeros + // = Root subtree) CHUNK_ID: 0.. bytes (optional) Traversal instructions to + // the root of the given chunk. Traversal instructions are "1" for left, and + // "0" for right. TODO: Compact CHUNK_ID into bitset for size optimization + // as a subtree can be big hence traversal instructions for the deepest chunks + // tx: Transaction. Function returns the data by opening merks at given tx. + // Returns the Chunk proof operators for the requested chunk encoded in bytes + pub fn fetch_chunk( + &self, + global_chunk_id: &[u8], + tx: TransactionArg, + version: u16, + grove_version: &GroveVersion, + ) -> Result, Error> { + check_grovedb_v0!( + "fetch_chunk", + grove_version.grovedb_versions.replication.fetch_chunk + ); + // For now, only CURRENT_STATE_SYNC_VERSION is supported + if version != CURRENT_STATE_SYNC_VERSION { + return Err(Error::CorruptedData( + "Unsupported state sync protocol version".to_string(), + )); } - result.push(GroveChunk { subtree_chunks }); + let root_app_hash = self.root_hash(tx, grove_version).value?; + let (chunk_prefix, chunk_id) = + replication::util_split_global_chunk_id(global_chunk_id, &root_app_hash)?; - Ok(()) - } -} - -/// `Restorer` wrapper that applies multiple chunks at once and eventually -/// returns less requests. It is named by analogy with IO types that do less -/// syscalls. -pub struct BufferedRestorer<'db> { - restorer: Restorer<'db>, -} + let subtrees_metadata = self.get_subtrees_metadata(tx, grove_version)?; -impl<'db> BufferedRestorer<'db> { - /// New - pub fn new(restorer: Restorer<'db>) -> Self { - BufferedRestorer { restorer } - } + match subtrees_metadata.data.get(&chunk_prefix) { + Some(path_data) => { + let subtree = &path_data.0; + let subtree_path: Vec<&[u8]> = subtree.iter().map(|vec| vec.as_slice()).collect(); + let path: &[&[u8]] = &subtree_path; - /// Process next chunk and receive instruction on what to do next. - pub fn process_grove_chunks(&mut self, chunks: I) -> Result - where - I: IntoIterator + ExactSizeIterator, - { - let mut response = RestorerResponse::Ready; - - for c in chunks.into_iter() { - for ops in c.subtree_chunks.into_iter().map(|x| x.1) { - if !ops.is_empty() { - response = self.restorer.process_chunk(ops)?; - } - } - } + match tx { + None => { + let merk = self + .open_non_transactional_merk_at_path(path.into(), None, grove_version) + .value?; - Ok(response) - } -} - -#[cfg(test)] -mod test { - use rand::RngCore; - use tempfile::TempDir; + if merk.is_empty_tree().unwrap() { + return Ok(vec![]); + } - use super::*; - use crate::{ - batch::GroveDbOp, - reference_path::ReferencePathType, - tests::{common::EMPTY_PATH, make_test_grovedb, TempGroveDb, ANOTHER_TEST_LEAF, TEST_LEAF}, - }; + let chunk_producer_res = ChunkProducer::new(&merk); + match chunk_producer_res { + Ok(mut chunk_producer) => { + let chunk_res = chunk_producer.chunk(&chunk_id, grove_version); + match chunk_res { + Ok((chunk, _)) => match util_encode_vec_ops(chunk) { + Ok(op_bytes) => Ok(op_bytes), + Err(_) => Err(Error::CorruptedData( + "Unable to create to load chunk".to_string(), + )), + }, + Err(_) => Err(Error::CorruptedData( + "Unable to create to load chunk".to_string(), + )), + } + } + Err(_) => Err(Error::CorruptedData( + "Unable to create Chunk producer".to_string(), + )), + } + } + Some(t) => { + let merk = self + .open_transactional_merk_at_path(path.into(), t, None, grove_version) + .value?; - fn replicate(original_db: &GroveDb) -> TempDir { - let replica_tempdir = TempDir::new().unwrap(); + if merk.is_empty_tree().unwrap() { + return Ok(vec![]); + } - { - let replica_db = GroveDb::open(replica_tempdir.path()).unwrap(); - let mut chunk_producer = original_db.chunks(); - let tx = replica_db.start_transaction(); - - let mut restorer = Restorer::new( - &replica_db, - original_db.root_hash(None).unwrap().unwrap(), - &tx, - ) - .expect("cannot create restorer"); - - // That means root tree chunk with index 0 - let mut next_chunk: (Vec>, usize) = (vec![], 0); - - loop { - let chunk = chunk_producer - .get_chunk(next_chunk.0.iter().map(|x| x.as_slice()), next_chunk.1) - .expect("cannot get next chunk"); - match restorer.process_chunk(chunk).expect("cannot process chunk") { - RestorerResponse::Ready => break, - RestorerResponse::AwaitNextChunk { path, index } => { - next_chunk = (path, index); + let chunk_producer_res = ChunkProducer::new(&merk); + match chunk_producer_res { + Ok(mut chunk_producer) => { + let chunk_res = chunk_producer.chunk(&chunk_id, grove_version); + match chunk_res { + Ok((chunk, _)) => match util_encode_vec_ops(chunk) { + Ok(op_bytes) => Ok(op_bytes), + Err(_) => Err(Error::CorruptedData( + "Unable to create to load chunk".to_string(), + )), + }, + Err(_) => Err(Error::CorruptedData( + "Unable to create to load chunk".to_string(), + )), + } + } + Err(_) => Err(Error::CorruptedData( + "Unable to create Chunk producer".to_string(), + )), + } } } } - - replica_db.commit_transaction(tx).unwrap().unwrap(); + None => Err(Error::CorruptedData("Prefix not found".to_string())), } - replica_tempdir } - fn replicate_bigger_messages(original_db: &GroveDb) -> TempDir { - let replica_tempdir = TempDir::new().unwrap(); + // Starts a state sync process (should be called by ABCI when OfferSnapshot + // method is called) Params: + // state_sync_info: Consumed StateSyncInfo + // app_hash: Snapshot's AppHash + // tx: Transaction for the state sync + // Returns the StateSyncInfo transferring ownership back to the caller) + pub fn start_snapshot_syncing<'db>( + &'db self, + mut state_sync_info: MultiStateSyncInfo<'db>, + app_hash: CryptoHash, + tx: &'db Transaction, + version: u16, + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "start_snapshot_syncing", + grove_version + .grovedb_versions + .replication + .start_snapshot_syncing + ); + // For now, only CURRENT_STATE_SYNC_VERSION is supported + if version != CURRENT_STATE_SYNC_VERSION { + return Err(Error::CorruptedData( + "Unsupported state sync protocol version".to_string(), + )); + } + if version != state_sync_info.version { + return Err(Error::CorruptedData( + "Unsupported state sync protocol version".to_string(), + )); + } + if !state_sync_info.current_prefixes.is_empty() + || !state_sync_info.processed_prefixes.is_empty() { - let replica_grove_db = GroveDb::open(replica_tempdir.path()).unwrap(); - let mut chunk_producer = SiblingsChunkProducer::new(original_db.chunks()); - let tx = replica_grove_db.start_transaction(); - - let mut restorer = BufferedRestorer::new( - Restorer::new( - &replica_grove_db, - original_db.root_hash(None).unwrap().unwrap(), - &tx, - ) - .expect("cannot create restorer"), - ); - - // That means root tree chunk with index 0 - let mut next_chunk: (Vec>, usize) = (vec![], 0); - - loop { - let chunks = chunk_producer - .get_chunk(next_chunk.0.iter().map(|x| x.as_slice()), next_chunk.1) - .expect("cannot get next chunk"); - match restorer - .process_grove_chunks(chunks.into_iter()) - .expect("cannot process chunk") - { - RestorerResponse::Ready => break, - RestorerResponse::AwaitNextChunk { path, index } => { - next_chunk = (path, index); - } - } - } - - replica_grove_db.commit_transaction(tx).unwrap().unwrap(); + return Err(Error::InternalError( + "GroveDB has already started a snapshot syncing".to_string(), + )); } - replica_tempdir - } - - fn test_replication_internal<'a, I, R, F>( - original_db: &TempGroveDb, - to_compare: I, - replicate_fn: F, - ) where - R: AsRef<[u8]> + 'a, - I: Iterator, - F: Fn(&GroveDb) -> TempDir, - { - let expected_root_hash = original_db.root_hash(None).unwrap().unwrap(); - - let replica_tempdir = replicate_fn(original_db); - - let replica = GroveDb::open(replica_tempdir.path()).unwrap(); - assert_eq!( - replica.root_hash(None).unwrap().unwrap(), - expected_root_hash + println!( + " starting:{:?}...", + replication::util_path_to_string(&[]) ); - for full_path in to_compare { - let (key, path) = full_path.split_last().unwrap(); - assert_eq!( - original_db.get(path, key.as_ref(), None).unwrap().unwrap(), - replica.get(path, key.as_ref(), None).unwrap().unwrap() - ); + let mut root_prefix_state_sync_info = SubtreeStateSyncInfo::default(); + let root_prefix = [0u8; 32]; + if let Ok(merk) = self.open_merk_for_replication(SubtreePath::empty(), tx, grove_version) { + let restorer = Restorer::new(merk, app_hash, None); + root_prefix_state_sync_info.restorer = Some(restorer); + root_prefix_state_sync_info.pending_chunks.insert(vec![]); + state_sync_info + .current_prefixes + .insert(root_prefix, root_prefix_state_sync_info); + state_sync_info.app_hash = app_hash; + } else { + return Err(Error::InternalError( + "Unable to open merk for replication".to_string(), + )); } - } - - fn test_replication<'a, I, R>(original_db: &TempGroveDb, to_compare: I) - where - R: AsRef<[u8]> + 'a, - I: Iterator + Clone, - { - test_replication_internal(original_db, to_compare.clone(), replicate); - test_replication_internal(original_db, to_compare, replicate_bigger_messages); - } - #[test] - fn replicate_wrong_root_hash() { - let db = make_test_grovedb(); - let mut bad_hash = db.root_hash(None).unwrap().unwrap(); - bad_hash[0] = bad_hash[0].wrapping_add(1); - - let tmp_dir = TempDir::new().unwrap(); - let restored_db = GroveDb::open(tmp_dir.path()).unwrap(); - let tx = restored_db.start_transaction(); - let mut restorer = Restorer::new(&restored_db, bad_hash, &tx).unwrap(); - let mut chunks = db.chunks(); - assert!(restorer - .process_chunk(chunks.get_chunk([], 0).unwrap()) - .is_err()); + Ok(state_sync_info) } - #[test] - fn replicate_provide_wrong_tree() { - let db = make_test_grovedb(); - db.insert( - &[TEST_LEAF], - b"key1", - Element::new_item(b"ayya".to_vec()), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[ANOTHER_TEST_LEAF], - b"key1", - Element::new_item(b"ayyb".to_vec()), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - - let expected_hash = db.root_hash(None).unwrap().unwrap(); - - let tmp_dir = TempDir::new().unwrap(); - let restored_db = GroveDb::open(tmp_dir.path()).unwrap(); - let tx = restored_db.start_transaction(); - let mut restorer = Restorer::new(&restored_db, expected_hash, &tx).unwrap(); - let mut chunks = db.chunks(); - - let next_op = restorer - .process_chunk(chunks.get_chunk([], 0).unwrap()) - .unwrap(); - match next_op { - RestorerResponse::AwaitNextChunk { path, index } => { - // Feed restorer a wrong Merk! - let chunk = if path == [TEST_LEAF] { - chunks.get_chunk([ANOTHER_TEST_LEAF], index).unwrap() - } else { - chunks.get_chunk([TEST_LEAF], index).unwrap() - }; - assert!(restorer.process_chunk(chunk).is_err()); - } - _ => {} + // Apply a chunk (should be called by ABCI when ApplySnapshotChunk method is + // called) Params: + // state_sync_info: Consumed MultiStateSyncInfo + // global_chunk_id: Global chunk id + // chunk: Chunk proof operators encoded in bytes + // tx: Transaction for the state sync + // Returns the next set of global chunk ids that can be fetched from sources (+ + // the MultiStateSyncInfo transferring ownership back to the caller) + pub fn apply_chunk<'db>( + &'db self, + mut state_sync_info: MultiStateSyncInfo<'db>, + global_chunk_id: &[u8], + chunk: Vec, + tx: &'db Transaction, + version: u16, + grove_version: &GroveVersion, + ) -> Result<(Vec>, MultiStateSyncInfo), Error> { + check_grovedb_v0!( + "apply_chunk", + grove_version.grovedb_versions.replication.apply_chunk + ); + // For now, only CURRENT_STATE_SYNC_VERSION is supported + if version != CURRENT_STATE_SYNC_VERSION { + return Err(Error::CorruptedData( + "Unsupported state sync protocol version".to_string(), + )); + } + if version != state_sync_info.version { + return Err(Error::CorruptedData( + "Unsupported state sync protocol version".to_string(), + )); } - } - - #[test] - fn replicate_nested_grovedb() { - let db = make_test_grovedb(); - db.insert( - &[TEST_LEAF], - b"key1", - Element::new_item(b"ayya".to_vec()), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[TEST_LEAF], - b"key2", - Element::new_reference(ReferencePathType::SiblingReference(b"key1".to_vec())), - None, - None, - ) - .unwrap() - .expect("should insert reference"); - db.insert( - &[ANOTHER_TEST_LEAF], - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[ANOTHER_TEST_LEAF, b"key2"], - b"key3", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[ANOTHER_TEST_LEAF, b"key2", b"key3"], - b"key4", - Element::new_item(b"ayyb".to_vec()), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - - let to_compare = [ - [TEST_LEAF].as_ref(), - [TEST_LEAF, b"key1"].as_ref(), - [TEST_LEAF, b"key2"].as_ref(), - [ANOTHER_TEST_LEAF].as_ref(), - [ANOTHER_TEST_LEAF, b"key2"].as_ref(), - [ANOTHER_TEST_LEAF, b"key2", b"key3"].as_ref(), - [ANOTHER_TEST_LEAF, b"key2", b"key3", b"key4"].as_ref(), - ]; - test_replication(&db, to_compare.into_iter()); - } - #[test] - fn replicate_nested_grovedb_with_sum_trees() { - let db = make_test_grovedb(); - db.insert( - &[TEST_LEAF], - b"key1", - Element::new_item(b"ayya".to_vec()), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[TEST_LEAF], - b"key2", - Element::new_reference(ReferencePathType::SiblingReference(b"key1".to_vec())), - None, - None, - ) - .unwrap() - .expect("should insert reference"); - db.insert( - &[ANOTHER_TEST_LEAF], - b"key2", - Element::empty_sum_tree(), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[ANOTHER_TEST_LEAF, b"key2"], - b"sumitem", - Element::new_sum_item(15), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[ANOTHER_TEST_LEAF, b"key2"], - b"key3", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[ANOTHER_TEST_LEAF, b"key2", b"key3"], - b"key4", - Element::new_item(b"ayyb".to_vec()), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - - let to_compare = [ - [TEST_LEAF].as_ref(), - [TEST_LEAF, b"key1"].as_ref(), - [TEST_LEAF, b"key2"].as_ref(), - [ANOTHER_TEST_LEAF].as_ref(), - [ANOTHER_TEST_LEAF, b"key2"].as_ref(), - [ANOTHER_TEST_LEAF, b"key2", b"sumitem"].as_ref(), - [ANOTHER_TEST_LEAF, b"key2", b"key3"].as_ref(), - [ANOTHER_TEST_LEAF, b"key2", b"key3", b"key4"].as_ref(), - ]; - test_replication(&db, to_compare.into_iter()); - } + let mut next_chunk_ids = vec![]; - // TODO: Highlights a bug in replication - #[test] - fn replicate_grovedb_with_sum_tree() { - let db = make_test_grovedb(); - db.insert(&[TEST_LEAF], b"key1", Element::empty_tree(), None, None) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[TEST_LEAF, b"key1"], - b"key2", - Element::new_item(vec![4]), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[TEST_LEAF, b"key1"], - b"key3", - Element::new_item(vec![10]), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - - let to_compare = [ - [TEST_LEAF].as_ref(), - [ANOTHER_TEST_LEAF].as_ref(), - [TEST_LEAF, b"key1"].as_ref(), - [TEST_LEAF, b"key1", b"key2"].as_ref(), - [TEST_LEAF, b"key1", b"key3"].as_ref(), - ]; - test_replication(&db, to_compare.into_iter()); - } + let (chunk_prefix, chunk_id) = + replication::util_split_global_chunk_id(global_chunk_id, &state_sync_info.app_hash)?; - #[test] - fn replicate_a_big_one() { - const HEIGHT: usize = 3; - const SUBTREES_FOR_EACH: usize = 3; - const SCALARS_FOR_EACH: usize = 600; - - let db = make_test_grovedb(); - let mut to_compare = Vec::new(); - - let mut rng = rand::thread_rng(); - let mut subtrees: VecDeque> = VecDeque::new(); - - // Generate root tree leafs - for _ in 0..SUBTREES_FOR_EACH { - let mut bytes = [0; 8]; - rng.fill_bytes(&mut bytes); - db.insert(EMPTY_PATH, &bytes, Element::empty_tree(), None, None) - .unwrap() - .unwrap(); - subtrees.push_front(vec![bytes]); - to_compare.push(vec![bytes]); + if state_sync_info.current_prefixes.is_empty() { + return Err(Error::InternalError( + "GroveDB is not in syncing mode".to_string(), + )); } + if let Some(subtree_state_sync) = state_sync_info.current_prefixes.remove(&chunk_prefix) { + if let Ok((res, mut new_subtree_state_sync)) = + self.apply_inner_chunk(subtree_state_sync, &chunk_id, chunk, grove_version) + { + if !res.is_empty() { + for local_chunk_id in res.iter() { + let mut next_global_chunk_id = chunk_prefix.to_vec(); + next_global_chunk_id.extend(local_chunk_id.to_vec()); + next_chunk_ids.push(next_global_chunk_id); + } - while let Some(path) = subtrees.pop_front() { - let mut batch = Vec::new(); + // re-insert subtree_state_sync in state_sync_info + state_sync_info + .current_prefixes + .insert(chunk_prefix, new_subtree_state_sync); + Ok((next_chunk_ids, state_sync_info)) + } else { + if !new_subtree_state_sync.pending_chunks.is_empty() { + // re-insert subtree_state_sync in state_sync_info + state_sync_info + .current_prefixes + .insert(chunk_prefix, new_subtree_state_sync); + return Ok((vec![], state_sync_info)); + } - if path.len() < HEIGHT { - for _ in 0..SUBTREES_FOR_EACH { - let mut bytes = [0; 8]; - rng.fill_bytes(&mut bytes); + // Subtree is finished. We can save it. + match new_subtree_state_sync.restorer.take() { + None => Err(Error::InternalError( + "Unable to finalize subtree".to_string(), + )), + Some(restorer) => { + if (new_subtree_state_sync.num_processed_chunks > 0) + && (restorer.finalize(grove_version).is_err()) + { + return Err(Error::InternalError( + "Unable to finalize Merk".to_string(), + )); + } + state_sync_info.processed_prefixes.insert(chunk_prefix); + + // Subtree was successfully save. Time to discover new subtrees that + // need to be processed + let subtrees_metadata = + self.get_subtrees_metadata(Some(tx), grove_version)?; + if let Some(value) = subtrees_metadata.data.get(&chunk_prefix) { + println!( + " path:{:?} done (num_processed_chunks:{:?})", + replication::util_path_to_string(&value.0), + new_subtree_state_sync.num_processed_chunks + ); + } + + if let Ok((res, new_state_sync_info)) = self.discover_subtrees( + state_sync_info, + subtrees_metadata, + tx, + grove_version, + ) { + next_chunk_ids.extend(res); + Ok((next_chunk_ids, new_state_sync_info)) + } else { + Err(Error::InternalError( + "Unable to discover Subtrees".to_string(), + )) + } + } + } + } + } else { + Err(Error::InternalError( + "Unable to process incoming chunk".to_string(), + )) + } + } else { + Err(Error::InternalError("Invalid incoming prefix".to_string())) + } + } - batch.push(GroveDbOp::insert_op( - path.iter().map(|x| x.to_vec()).collect(), - bytes.to_vec(), - Element::empty_tree(), + // Apply a chunk using the given SubtreeStateSyncInfo + // state_sync_info: Consumed SubtreeStateSyncInfo + // chunk_id: Local chunk id + // chunk_data: Chunk proof operators encoded in bytes + // Returns the next set of global chunk ids that can be fetched from sources (+ + // the SubtreeStateSyncInfo transferring ownership back to the caller) + fn apply_inner_chunk<'db>( + &'db self, + mut state_sync_info: SubtreeStateSyncInfo<'db>, + chunk_id: &[u8], + chunk_data: Vec, + grove_version: &GroveVersion, + ) -> Result<(Vec>, SubtreeStateSyncInfo), Error> { + let mut res = vec![]; + + match &mut state_sync_info.restorer { + Some(restorer) => { + if !state_sync_info.pending_chunks.contains(chunk_id) { + return Err(Error::InternalError( + "Incoming global_chunk_id not expected".to_string(), )); - - let mut new_path = path.clone(); - new_path.push(bytes); - subtrees.push_front(new_path.clone()); - to_compare.push(new_path.clone()); + } + state_sync_info.pending_chunks.remove(chunk_id); + if !chunk_data.is_empty() { + match util_decode_vec_ops(chunk_data) { + Ok(ops) => { + match restorer.process_chunk(chunk_id, ops, grove_version) { + Ok(next_chunk_ids) => { + state_sync_info.num_processed_chunks += 1; + for next_chunk_id in next_chunk_ids { + state_sync_info + .pending_chunks + .insert(next_chunk_id.clone()); + res.push(next_chunk_id); + } + } + _ => { + return Err(Error::InternalError( + "Unable to process incoming chunk".to_string(), + )); + } + }; + } + Err(_) => { + return Err(Error::CorruptedData( + "Unable to decode incoming chunk".to_string(), + )); + } + } } } - - for _ in 0..SCALARS_FOR_EACH { - let mut bytes = [0; 8]; - let mut bytes_val = vec![]; - rng.fill_bytes(&mut bytes); - rng.fill_bytes(&mut bytes_val); - - batch.push(GroveDbOp::insert_op( - path.iter().map(|x| x.to_vec()).collect(), - bytes.to_vec(), - Element::new_item(bytes_val), + _ => { + return Err(Error::InternalError( + "Invalid internal state (restorer".to_string(), )); - - let mut new_path = path.clone(); - new_path.push(bytes); - to_compare.push(new_path.clone()); } - - db.apply_batch(batch, None, None).unwrap().unwrap(); } - test_replication(&db, to_compare.iter().map(|x| x.as_slice())); + Ok((res, state_sync_info)) } - #[test] - fn replicate_from_checkpoint() { - // Create a simple GroveDb first - let db = make_test_grovedb(); - db.insert( - &[TEST_LEAF], - b"key1", - Element::new_item(b"ayya".to_vec()), - None, - None, - ) - .unwrap() - .unwrap(); - db.insert( - &[ANOTHER_TEST_LEAF], - b"key2", - Element::new_item(b"ayyb".to_vec()), - None, - None, - ) - .unwrap() - .unwrap(); - - // Save its state with checkpoint - let checkpoint_dir_parent = TempDir::new().unwrap(); - let checkpoint_dir = checkpoint_dir_parent.path().join("cp"); - db.create_checkpoint(&checkpoint_dir).unwrap(); - - // Alter the db to make difference between current state and checkpoint - db.delete(&[TEST_LEAF], b"key1", None, None) - .unwrap() - .unwrap(); - db.insert( - &[TEST_LEAF], - b"key3", - Element::new_item(b"ayyd".to_vec()), - None, - None, - ) - .unwrap() - .unwrap(); - db.insert( - &[ANOTHER_TEST_LEAF], - b"key2", - Element::new_item(b"ayyc".to_vec()), - None, - None, - ) - .unwrap() - .unwrap(); - - let checkpoint_db = GroveDb::open(&checkpoint_dir).unwrap(); - - // Ensure checkpoint differs from current state - assert_ne!( - checkpoint_db - .get(&[ANOTHER_TEST_LEAF], b"key2", None) - .unwrap() - .unwrap(), - db.get(&[ANOTHER_TEST_LEAF], b"key2", None) - .unwrap() - .unwrap(), - ); - - // Build a replica from checkpoint - let replica_dir = replicate(&checkpoint_db); - let replica_db = GroveDb::open(&replica_dir).unwrap(); - - assert_eq!( - checkpoint_db.root_hash(None).unwrap().unwrap(), - replica_db.root_hash(None).unwrap().unwrap() - ); - - assert_eq!( - checkpoint_db - .get(&[TEST_LEAF], b"key1", None) - .unwrap() - .unwrap(), - replica_db - .get(&[TEST_LEAF], b"key1", None) - .unwrap() - .unwrap(), - ); - assert_eq!( - checkpoint_db - .get(&[ANOTHER_TEST_LEAF], b"key2", None) - .unwrap() - .unwrap(), - replica_db - .get(&[ANOTHER_TEST_LEAF], b"key2", None) - .unwrap() - .unwrap(), - ); - assert!(matches!( - replica_db.get(&[TEST_LEAF], b"key3", None).unwrap(), - Err(Error::PathKeyNotFound(_)) - )); + // Prepares SubtreeStateSyncInfos for the freshly discovered subtrees in + // subtrees_metadata and returns the root global chunk ids for all of those + // new subtrees. state_sync_info: Consumed MultiStateSyncInfo + // subtrees_metadata: Metadata about discovered subtrees + // chunk_data: Chunk proof operators + // Returns the next set of global chunk ids that can be fetched from sources (+ + // the MultiStateSyncInfo transferring ownership back to the caller) + fn discover_subtrees<'db>( + &'db self, + mut state_sync_info: MultiStateSyncInfo<'db>, + subtrees_metadata: SubtreesMetadata, + tx: &'db Transaction, + grove_version: &GroveVersion, + ) -> Result<(Vec>, MultiStateSyncInfo), Error> { + let mut res = vec![]; + + for (prefix, prefix_metadata) in &subtrees_metadata.data { + if !state_sync_info.processed_prefixes.contains(prefix) + && !state_sync_info.current_prefixes.contains_key(prefix) + { + let (current_path, s_actual_value_hash, s_elem_value_hash) = &prefix_metadata; + + let subtree_path: Vec<&[u8]> = + current_path.iter().map(|vec| vec.as_slice()).collect(); + let path: &[&[u8]] = &subtree_path; + println!( + " path:{:?} starting...", + replication::util_path_to_string(&prefix_metadata.0) + ); + + let mut subtree_state_sync_info = SubtreeStateSyncInfo::default(); + if let Ok(merk) = self.open_merk_for_replication(path.into(), tx, grove_version) { + let restorer = + Restorer::new(merk, *s_elem_value_hash, Some(*s_actual_value_hash)); + subtree_state_sync_info.restorer = Some(restorer); + subtree_state_sync_info.pending_chunks.insert(vec![]); + + state_sync_info + .current_prefixes + .insert(*prefix, subtree_state_sync_info); + + let root_chunk_prefix = prefix.to_vec(); + res.push(root_chunk_prefix.to_vec()); + } else { + return Err(Error::InternalError( + "Unable to open Merk for replication".to_string(), + )); + } + } + } - // Drop original db and checkpoint dir too to ensure there is no dependency - drop(db); - drop(checkpoint_db); - drop(checkpoint_dir); - - assert_eq!( - replica_db - .get(&[ANOTHER_TEST_LEAF], b"key2", None) - .unwrap() - .unwrap(), - Element::new_item(b"ayyb".to_vec()) - ); + Ok((res, state_sync_info)) } } diff --git a/grovedb/src/tests/common.rs b/grovedb/src/tests/common.rs index 10f05b804..a02ef9c67 100644 --- a/grovedb/src/tests/common.rs +++ b/grovedb/src/tests/common.rs @@ -1,34 +1,7 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Common tests use grovedb_path::SubtreePath; +use grovedb_version::version::GroveVersion; use crate::{operations::proof::util::ProvedPathKeyValues, Element, Error}; @@ -45,10 +18,10 @@ pub fn compare_result_tuples( } fn deserialize_and_extract_item_bytes(raw_bytes: &[u8]) -> Result, Error> { - let elem = Element::deserialize(raw_bytes)?; + let elem = Element::deserialize(raw_bytes, GroveVersion::latest())?; match elem { Element::Item(item, _) => Ok(item), - _ => Err(Error::CorruptedPath("expected only item type")), + _ => Err(Error::CorruptedPath("expected only item type".to_string())), } } diff --git a/grovedb/src/tests/mod.rs b/grovedb/src/tests/mod.rs index c67b119ea..226a047bb 100644 --- a/grovedb/src/tests/mod.rs +++ b/grovedb/src/tests/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Tests pub mod common; @@ -41,14 +13,16 @@ use std::{ option::Option::None, }; +use grovedb_version::version::GroveVersion; use grovedb_visualize::{Drawer, Visualize}; use tempfile::TempDir; use self::common::EMPTY_PATH; use super::*; use crate::{ - query_result_type::QueryResultType::QueryKeyElementPairResultType, - reference_path::ReferencePathType, tests::common::compare_result_tuples, + query_result_type::{QueryResultType, QueryResultType::QueryKeyElementPairResultType}, + reference_path::ReferencePathType, + tests::common::compare_result_tuples, }; pub const TEST_LEAF: &[u8] = b"test_leaf"; @@ -94,36 +68,44 @@ pub fn make_empty_grovedb() -> TempGroveDb { } /// A helper method to create GroveDB with one leaf for a root tree -pub fn make_test_grovedb() -> TempGroveDb { +pub fn make_test_grovedb(grove_version: &GroveVersion) -> TempGroveDb { // Tree Structure // root // test_leaf // another_test_leaf let tmp_dir = TempDir::new().unwrap(); let mut db = GroveDb::open(tmp_dir.path()).unwrap(); - add_test_leaves(&mut db); + add_test_leaves(&mut db, grove_version); TempGroveDb { _tmp_dir: tmp_dir, grove_db: db, } } -fn add_test_leaves(db: &mut GroveDb) { - db.insert(EMPTY_PATH, TEST_LEAF, Element::empty_tree(), None, None) - .unwrap() - .expect("successful root tree leaf insert"); +fn add_test_leaves(db: &mut GroveDb, grove_version: &GroveVersion) { + db.insert( + EMPTY_PATH, + TEST_LEAF, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); db.insert( EMPTY_PATH, ANOTHER_TEST_LEAF, Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful root tree leaf 2 insert"); } -pub fn make_deep_tree() -> TempGroveDb { +pub fn make_deep_tree(grove_version: &GroveVersion) -> TempGroveDb { // Tree Structure // root // test_leaf @@ -157,13 +139,24 @@ pub fn make_deep_tree() -> TempGroveDb { // deeper_4 // k10,v10 // k11,v11 + // deeper_5 + // k12,v12 + // k13,v13 + // k14,v14 // Insert elements into grovedb instance - let temp_db = make_test_grovedb(); + let temp_db = make_test_grovedb(grove_version); // add an extra root leaf temp_db - .insert(EMPTY_PATH, DEEP_LEAF, Element::empty_tree(), None, None) + .insert( + EMPTY_PATH, + DEEP_LEAF, + Element::empty_tree(), + None, + None, + grove_version, + ) .unwrap() .expect("successful root tree leaf insert"); @@ -175,6 +168,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -185,6 +179,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -195,6 +190,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -205,6 +201,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -215,6 +212,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -225,6 +223,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -236,6 +235,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -246,6 +246,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -256,6 +257,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value3".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -266,6 +268,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value4".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -276,6 +279,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value5".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -286,6 +290,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value3".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -296,6 +301,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value4".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -306,6 +312,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -316,6 +323,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -326,6 +334,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -336,6 +345,18 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [DEEP_LEAF, b"deep_node_2"].as_ref(), + b"deeper_5", + Element::empty_tree(), + None, + None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -347,6 +368,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -357,6 +379,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -367,6 +390,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value3".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -377,6 +401,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value4".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -387,6 +412,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value5".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -397,6 +423,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value6".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -408,6 +435,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value7".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -418,6 +446,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value8".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -428,6 +457,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value9".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -438,6 +468,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value10".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -448,2462 +479,3439 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value11".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); temp_db -} - -#[test] -fn test_init() { - let tmp_dir = TempDir::new().unwrap(); - GroveDb::open(tmp_dir).expect("empty tree is ok"); -} - -#[test] -fn test_element_with_flags() { - let db = make_test_grovedb(); - - db.insert( - [TEST_LEAF.as_ref()].as_ref(), - b"key1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("should insert subtree successfully"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"elem1", - Element::new_item(b"flagless".to_vec()), - None, - None, - ) - .unwrap() - .expect("should insert subtree successfully"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"elem2", - Element::new_item_with_flags(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())), - None, - None, - ) - .unwrap() - .expect("should insert subtree successfully"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"elem3", - Element::new_tree_with_flags(None, Some([1].to_vec())), - None, - None, - ) - .unwrap() - .expect("should insert subtree successfully"); - db.insert( - [TEST_LEAF, b"key1", b"elem3"].as_ref(), - b"elem4", - Element::new_reference_with_flags( - ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - b"key1".to_vec(), - b"elem2".to_vec(), - ]), - Some([9].to_vec()), - ), - None, - None, - ) - .unwrap() - .expect("should insert subtree successfully"); - - let element_without_flag = db - .get([TEST_LEAF, b"key1"].as_ref(), b"elem1", None) - .unwrap() - .expect("should get successfully"); - let element_with_flag = db - .get([TEST_LEAF, b"key1"].as_ref(), b"elem2", None) - .unwrap() - .expect("should get successfully"); - let tree_element_with_flag = db - .get([TEST_LEAF, b"key1"].as_ref(), b"elem3", None) - .unwrap() - .expect("should get successfully"); - let flagged_ref_follow = db - .get([TEST_LEAF, b"key1", b"elem3"].as_ref(), b"elem4", None) - .unwrap() - .expect("should get successfully"); - - let mut query = Query::new(); - query.insert_key(b"elem4".to_vec()); - let path_query = PathQuery::new( - vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"elem3".to_vec()], - SizedQuery::new(query, None, None), - ); - let (flagged_ref_no_follow, _) = db - .query_raw(&path_query, true, QueryKeyElementPairResultType, None) - .unwrap() - .expect("should get successfully"); - - assert_eq!( - element_without_flag, - Element::Item(b"flagless".to_vec(), None) - ); - assert_eq!( - element_with_flag, - Element::Item(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())) - ); - assert_eq!(tree_element_with_flag.get_flags(), &Some([1].to_vec())); - assert_eq!( - flagged_ref_follow, - Element::Item(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())) - ); - assert_eq!( - flagged_ref_no_follow.to_key_elements()[0], - ( - b"elem4".to_vec(), - Element::Reference( - ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - b"key1".to_vec(), - b"elem2".to_vec() - ]), - None, - Some([9].to_vec()) - ) + .insert( + [DEEP_LEAF, b"deep_node_2", b"deeper_5"].as_ref(), + b"key12", + Element::new_item(b"value12".to_vec()), + None, + None, + grove_version, ) - ); - - // Test proofs with flags - let mut query = Query::new(); - query.insert_all(); - - let path_query = PathQuery::new( - vec![TEST_LEAF.to_vec(), b"key1".to_vec()], - SizedQuery::new(query, None, None), - ); - let proof = db - .prove_query(&path_query) - .unwrap() - .expect("should successfully create proof"); - let (root_hash, result_set) = - GroveDb::verify_query_raw(&proof, &path_query).expect("should verify proof"); - assert_eq!(root_hash, db.grove_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - assert_eq!( - Element::deserialize(&result_set[0].value).expect("should deserialize element"), - Element::Item(b"flagless".to_vec(), None) - ); - assert_eq!( - Element::deserialize(&result_set[1].value).expect("should deserialize element"), - Element::Item(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())) - ); - assert_eq!( - Element::deserialize(&result_set[2].value) - .expect("should deserialize element") - .get_flags(), - &Some([1].to_vec()) - ); -} - -#[test] -fn test_cannot_update_populated_tree_item() { - // This test shows that you cannot update a tree item - // in a way that disconnects it's root hash from that of - // the merk it points to. - let db = make_deep_tree(); - - let old_element = db - .get([TEST_LEAF].as_ref(), b"innertree", None) - .unwrap() - .expect("should fetch item"); - - let new_element = Element::empty_tree(); - db.insert( - [TEST_LEAF].as_ref(), - b"innertree", - new_element.clone(), - None, - None, - ) - .unwrap() - .expect_err("should not override tree"); - - let current_element = db - .get([TEST_LEAF].as_ref(), b"innertree", None) .unwrap() - .expect("should fetch item"); - - assert_eq!(current_element, old_element); - assert_ne!(current_element, new_element); -} - -#[test] -fn test_changes_propagated() { - let db = make_test_grovedb(); - let old_hash = db.root_hash(None).unwrap().unwrap(); - let element = Element::new_item(b"ayy".to_vec()); - - // Insert some nested subtrees - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 1 insert"); - - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 2 insert"); - - db.insert( - [TEST_LEAF, b"key1", b"key2"].as_ref(), - b"key3", - element.clone(), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - - assert_eq!( - db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) - .unwrap() - .expect("successful get"), - element - ); - assert_ne!(old_hash, db.root_hash(None).unwrap().unwrap()); -} - -// TODO: Add solid test cases to this - -#[test] -fn test_references() { - let db = make_test_grovedb(); - db.insert( - [TEST_LEAF].as_ref(), - b"merk_1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, b"merk_1"].as_ref(), - b"key1", - Element::new_item(b"value1".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, b"merk_1"].as_ref(), - b"key2", - Element::new_item(b"value2".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - db.insert( - [TEST_LEAF].as_ref(), - b"merk_2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - // db.insert([TEST_LEAF, b"merk_2"].as_ref(), b"key2", - // Element::new_item(b"value2".to_vec()), None).expect("successful subtree - // insert"); - db.insert( - [TEST_LEAF, b"merk_2"].as_ref(), - b"key1", - Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - b"merk_1".to_vec(), - b"key1".to_vec(), - ])), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, b"merk_2"].as_ref(), - b"key2", - Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - b"merk_1".to_vec(), - b"key2".to_vec(), - ])), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - assert!(db - .get([TEST_LEAF].as_ref(), b"merk_1", None) + .expect("successful subtree insert"); + temp_db + .insert( + [DEEP_LEAF, b"deep_node_2", b"deeper_5"].as_ref(), + b"key13", + Element::new_item(b"value13".to_vec()), + None, + None, + grove_version, + ) .unwrap() - .is_ok()); - assert!(db - .get([TEST_LEAF].as_ref(), b"merk_2", None) + .expect("successful subtree insert"); + temp_db + .insert( + [DEEP_LEAF, b"deep_node_2", b"deeper_5"].as_ref(), + b"key14", + Element::new_item(b"value14".to_vec()), + None, + None, + grove_version, + ) .unwrap() - .is_ok()); -} - -#[test] -fn test_follow_references() { - let db = make_test_grovedb(); - let element = Element::new_item(b"ayy".to_vec()); - - // Insert an item to refer to - db.insert( - [TEST_LEAF].as_ref(), - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 1 insert"); - db.insert( - [TEST_LEAF, b"key2"].as_ref(), - b"key3", - element.clone(), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - - // Insert a reference - db.insert( - [TEST_LEAF].as_ref(), - b"reference_key", - Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - b"key2".to_vec(), - b"key3".to_vec(), - ])), - None, - None, - ) - .unwrap() - .expect("successful reference insert"); - - assert_eq!( - db.get([TEST_LEAF].as_ref(), b"reference_key", None) - .unwrap() - .expect("successful get"), - element - ); + .expect("successful subtree insert"); + temp_db } -#[test] -fn test_reference_must_point_to_item() { - let db = make_test_grovedb(); - - let result = db +pub fn make_deep_tree_with_sum_trees(grove_version: &GroveVersion) -> TempGroveDb { + // Tree Structure + // root + // deep_leaf + // deep_node_1 + // "" -> "empty" + // a -> "storage" + // c + // 1 (sum tree) + // [0;32], 1 + // [1;32], 1 + // d + // 0,v1 + // 1 (sum tree) + // [0;32], 4 + // [1;32], 1 + // e + // 0,v4 + // 1 (sum tree) + // [0;32], 1 + // [1;32], 4 + // f + // 0,v1 + // 1 (sum tree) + // [0;32], 1 + // [1;32], 4 + // g + // 0,v4 + // 1 (sum tree) + // [3;32], 4 + // [5;32], 4 + // h -> "h" + // .. -> .. + // z -> "z" + + let temp_db = make_test_grovedb(grove_version); + + // Add deep_leaf to root + temp_db .insert( - [TEST_LEAF].as_ref(), - b"reference_key_1", - Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - b"reference_key_2".to_vec(), - ])), + EMPTY_PATH, + DEEP_LEAF, + Element::empty_tree(), None, None, + grove_version, ) - .unwrap(); - - assert!(matches!(result, Err(Error::MissingReference(_)))); -} - -#[test] -fn test_too_many_indirections() { - use crate::operations::get::MAX_REFERENCE_HOPS; - let db = make_test_grovedb(); - - let keygen = |idx| format!("key{}", idx).bytes().collect::>(); - - db.insert( - [TEST_LEAF].as_ref(), - b"key0", - Element::new_item(b"oops".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful item insert"); + .unwrap() + .expect("successful root tree leaf insert"); - for i in 1..=(MAX_REFERENCE_HOPS) { - db.insert( - [TEST_LEAF].as_ref(), - &keygen(i), - Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - keygen(i - 1), - ])), + // Add deep_node_1 to deep_leaf + temp_db + .insert( + [DEEP_LEAF].as_ref(), + b"deep_node_1", + Element::empty_tree(), None, None, + grove_version, ) .unwrap() - .expect("successful reference insert"); - } - - // Add one more reference - db.insert( - [TEST_LEAF].as_ref(), - &keygen(MAX_REFERENCE_HOPS + 1), - Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - keygen(MAX_REFERENCE_HOPS), - ])), - None, - None, - ) - .unwrap() - .expect("expected insert"); - - let result = db - .get([TEST_LEAF].as_ref(), &keygen(MAX_REFERENCE_HOPS + 1), None) - .unwrap(); + .expect("successful subtree insert"); - assert!(matches!(result, Err(Error::ReferenceLimit))); -} + // Add a -> "storage" to deep_node_1 + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1"].as_ref(), + b"", + Element::new_item("empty".as_bytes().to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful item insert"); -#[test] -fn test_reference_value_affects_state() { - let db_one = make_test_grovedb(); - db_one + // Add a -> "storage" to deep_node_1 + temp_db .insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::new_item(vec![0]), + [DEEP_LEAF, b"deep_node_1"].as_ref(), + b"a", + Element::new_item("storage".as_bytes().to_vec()), None, None, + grove_version, ) .unwrap() - .expect("should insert item"); - db_one + .expect("successful item insert"); + + // Add c, d, e, f, g to deep_node_1 + for key in [b"c", b"d", b"e", b"f", b"g"].iter() { + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1"].as_ref(), + key.as_slice(), + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + } + + // Add sum tree to c + temp_db .insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"ref", - Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - b"key1".to_vec(), - ])), + [DEEP_LEAF, b"deep_node_1", b"c"].as_ref(), + b"1", + Element::new_sum_tree(None), None, None, + grove_version, ) .unwrap() - .expect("should insert item"); + .expect("successful sum tree insert"); - let db_two = make_test_grovedb(); - db_two + // Add items to sum tree in c + temp_db .insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::new_item(vec![0]), + [DEEP_LEAF, b"deep_node_1", b"c", b"1"].as_ref(), + &[0; 32], + Element::SumItem(1, None), None, None, + grove_version, ) .unwrap() - .expect("should insert item"); - db_two + .expect("successful sum item insert"); + temp_db .insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"ref", - Element::new_reference(ReferencePathType::UpstreamRootHeightReference( - 0, - vec![TEST_LEAF.to_vec(), b"key1".to_vec()], - )), + [DEEP_LEAF, b"deep_node_1", b"c", b"1"].as_ref(), + &[1; 32], + Element::SumItem(1, None), None, None, + grove_version, ) .unwrap() - .expect("should insert item"); + .expect("successful sum item insert"); - assert_ne!( - db_one - .root_hash(None) + // Add items to 4, 5, 6, 7 + for (key, value) in [(b"d", b"v1"), (b"e", b"v4"), (b"f", b"v1"), (b"g", b"v4")].iter() { + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1", key.as_slice()].as_ref(), + b"0", + Element::new_item(value.to_vec()), + None, + None, + grove_version, + ) .unwrap() - .expect("should return root hash"), - db_two - .root_hash(None) + .expect("successful item insert"); + + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1", key.as_slice()].as_ref(), + b"1", + Element::new_sum_tree(None), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful sum tree insert"); + } + + // Add items to sum trees in d, e, f + for key in [b"d", b"e", b"f"].iter() { + let (value1, value2) = if *key == b"d" { (4, 1) } else { (1, 4) }; + + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1", key.as_slice(), b"1"].as_ref(), + &[0; 32], + Element::SumItem(value1, None), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful sum item insert"); + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1", key.as_slice(), b"1"].as_ref(), + &[1; 32], + Element::SumItem(value2, None), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful sum item insert"); + } + + // Add items to sum tree in 7 + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1", b"g", b"1"].as_ref(), + &[3; 32], + Element::SumItem(4, None), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful sum item insert"); + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1", b"g", b"1"].as_ref(), + &[5; 32], + Element::SumItem(4, None), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful sum item insert"); + + // Add entries for all letters from "h" to "z" + for letter in b'h'..=b'z' { + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1"].as_ref(), + &[letter], + Element::new_item(vec![letter]), + None, + None, + grove_version, + ) .unwrap() - .expect("should return toor hash") - ); + .expect(&format!("successful item insert for {}", letter as char)); + } + + temp_db } -#[test] -fn test_tree_structure_is_persistent() { - let tmp_dir = TempDir::new().unwrap(); - let element = Element::new_item(b"ayy".to_vec()); - // Create a scoped GroveDB - let prev_root_hash = { - let mut db = GroveDb::open(tmp_dir.path()).unwrap(); - add_test_leaves(&mut db); +mod tests { + use super::*; + + #[test] + fn test_init() { + let tmp_dir = TempDir::new().unwrap(); + GroveDb::open(tmp_dir).expect("empty tree is ok"); + } + + #[test] + fn test_element_with_flags() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); - // Insert some nested subtrees db.insert( [TEST_LEAF].as_ref(), b"key1", Element::empty_tree(), None, None, + grove_version, ) .unwrap() - .expect("successful subtree 1 insert"); + .expect("should insert subtree successfully"); db.insert( [TEST_LEAF, b"key1"].as_ref(), - b"key2", - Element::empty_tree(), + b"elem1", + Element::new_item(b"flagless".to_vec()), None, None, + grove_version, ) .unwrap() - .expect("successful subtree 2 insert"); - // Insert an element into subtree + .expect("should insert subtree successfully"); db.insert( - [TEST_LEAF, b"key1", b"key2"].as_ref(), - b"key3", - element.clone(), + [TEST_LEAF, b"key1"].as_ref(), + b"elem2", + Element::new_item_with_flags(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())), None, None, + grove_version, ) .unwrap() - .expect("successful value insert"); - assert_eq!( - db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) - .unwrap() - .expect("successful get 1"), - element - ); - db.root_hash(None).unwrap().unwrap() - }; - // Open a persisted GroveDB - let db = GroveDb::open(tmp_dir).unwrap(); - assert_eq!( - db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) - .unwrap() - .expect("successful get 2"), - element - ); - assert!(db - .get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key4", None) - .unwrap() - .is_err()); - assert_eq!(prev_root_hash, db.root_hash(None).unwrap().unwrap()); -} - -#[test] -fn test_root_tree_leaves_are_noted() { - let db = make_test_grovedb(); - db.check_subtree_exists_path_not_found([TEST_LEAF].as_ref().into(), None) + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"elem3", + Element::new_tree_with_flags(None, Some([1].to_vec())), + None, + None, + grove_version, + ) .unwrap() - .expect("should exist"); - db.check_subtree_exists_path_not_found([ANOTHER_TEST_LEAF].as_ref().into(), None) + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"key1", b"elem3"].as_ref(), + b"elem4", + Element::new_reference_with_flags( + ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + b"key1".to_vec(), + b"elem2".to_vec(), + ]), + Some([9].to_vec()), + ), + None, + None, + grove_version, + ) .unwrap() - .expect("should exist"); -} - -#[test] -fn test_proof_for_invalid_path_root_key() { - let db = make_test_grovedb(); - - let query = Query::new(); - let path_query = PathQuery::new_unsized(vec![b"invalid_path_key".to_vec()], query); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); -} + .expect("should insert subtree successfully"); -#[test] -fn test_proof_for_invalid_path() { - let db = make_deep_tree(); - - let query = Query::new(); - let path_query = - PathQuery::new_unsized(vec![b"deep_leaf".to_vec(), b"invalid_key".to_vec()], query); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + let element_without_flag = db + .get([TEST_LEAF, b"key1"].as_ref(), b"elem1", None, grove_version) + .unwrap() + .expect("should get successfully"); + let element_with_flag = db + .get([TEST_LEAF, b"key1"].as_ref(), b"elem2", None, grove_version) + .unwrap() + .expect("should get successfully"); + let tree_element_with_flag = db + .get([TEST_LEAF, b"key1"].as_ref(), b"elem3", None, grove_version) + .unwrap() + .expect("should get successfully"); + let flagged_ref_follow = db + .get( + [TEST_LEAF, b"key1", b"elem3"].as_ref(), + b"elem4", + None, + grove_version, + ) + .unwrap() + .expect("should get successfully"); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); + let mut query = Query::new(); + query.insert_key(b"elem4".to_vec()); + let path_query = PathQuery::new( + vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"elem3".to_vec()], + SizedQuery::new(query, None, None), + ); + let (flagged_ref_no_follow, _) = db + .query_raw( + &path_query, + true, + true, + true, + QueryKeyElementPairResultType, + None, + grove_version, + ) + .unwrap() + .expect("should get successfully"); - let query = Query::new(); - let path_query = PathQuery::new_unsized( - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"invalid_key".to_vec(), - ], - query, - ); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); - - let query = Query::new(); - let path_query = PathQuery::new_unsized( - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"deeper_1".to_vec(), - b"invalid_key".to_vec(), - ], - query, - ); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); - - let query = Query::new(); - let path_query = PathQuery::new_unsized( - vec![ - b"deep_leaf".to_vec(), - b"early_invalid_key".to_vec(), - b"deeper_1".to_vec(), - b"invalid_key".to_vec(), - ], - query, - ); + assert_eq!( + element_without_flag, + Element::Item(b"flagless".to_vec(), None) + ); + assert_eq!( + element_with_flag, + Element::Item(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())) + ); + assert_eq!(tree_element_with_flag.get_flags(), &Some([1].to_vec())); + assert_eq!( + flagged_ref_follow, + Element::Item(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())) + ); + assert_eq!( + flagged_ref_no_follow.to_key_elements()[0], + ( + b"elem4".to_vec(), + Element::Reference( + ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + b"key1".to_vec(), + b"elem2".to_vec() + ]), + None, + Some([9].to_vec()) + ) + ) + ); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + // Test proofs with flags + let mut query = Query::new(); + query.insert_all(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); -} + let path_query = PathQuery::new( + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + SizedQuery::new(query, None, None), + ); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .expect("should successfully create proof"); + let (root_hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query, grove_version) + .expect("should verify proof"); + assert_eq!( + root_hash, + db.grove_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 3); + assert_eq!( + Element::deserialize(&result_set[0].value, grove_version) + .expect("should deserialize element"), + Element::Item(b"flagless".to_vec(), None) + ); + assert_eq!( + Element::deserialize(&result_set[1].value, grove_version) + .expect("should deserialize element"), + Element::Item(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())) + ); + assert_eq!( + Element::deserialize(&result_set[2].value, grove_version) + .expect("should deserialize element") + .get_flags(), + &Some([1].to_vec()) + ); + } -#[test] -fn test_proof_for_non_existent_data() { - let temp_db = make_test_grovedb(); + #[test] + fn test_cannot_update_populated_tree_item() { + let grove_version = GroveVersion::latest(); + // This test shows that you cannot update a tree item + // in a way that disconnects its root hash from that of + // the merk it points to. + let db = make_deep_tree(grove_version); - let mut query = Query::new(); - query.insert_key(b"key1".to_vec()); + let old_element = db + .get([TEST_LEAF].as_ref(), b"innertree", None, grove_version) + .unwrap() + .expect("should fetch item"); - // path to empty subtree - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); + let new_element = Element::empty_tree(); + db.insert( + [TEST_LEAF].as_ref(), + b"innertree", + new_element.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect_err("should not override tree"); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + let current_element = db + .get([TEST_LEAF].as_ref(), b"innertree", None, grove_version) + .unwrap() + .expect("should fetch item"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); -} + assert_eq!(current_element, old_element); + assert_ne!(current_element, new_element); + } -#[test] -fn test_path_query_proofs_without_subquery_with_reference() { - // Tree Structure - // root - // test_leaf - // innertree - // k1,v1 - // k2,v2 - // k3,v3 - // another_test_leaf - // innertree2 - // k3,v3 - // k4, reference to k1 in innertree - // k5, reference to k4 in innertree3 - // innertree3 - // k4,v4 + #[test] + fn test_changes_propagated() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + let old_hash = db.root_hash(None, grove_version).unwrap().unwrap(); + let element = Element::new_item(b"ayy".to_vec()); - // Insert elements into grovedb instance - let temp_db = make_test_grovedb(); - // Insert level 1 nodes - temp_db - .insert( + // Insert some nested subtrees + db.insert( [TEST_LEAF].as_ref(), - b"innertree", + b"key1", Element::empty_tree(), None, None, + grove_version, ) .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"innertree2", + .expect("successful subtree 1 insert"); + + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key2", Element::empty_tree(), None, None, + grove_version, ) .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"innertree3", + .expect("successful subtree 2 insert"); + + db.insert( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + element.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + + assert_eq!( + db.get( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap() + .expect("successful get"), + element + ); + assert_ne!( + old_hash, + db.root_hash(None, grove_version).unwrap().unwrap() + ); + } + + // TODO: Add solid test cases to this + + #[test] + fn test_references() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + db.insert( + [TEST_LEAF].as_ref(), + b"merk_1", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); - // Insert level 2 nodes - temp_db - .insert( - [TEST_LEAF, b"innertree"].as_ref(), + db.insert( + [TEST_LEAF, b"merk_1"].as_ref(), b"key1", Element::new_item(b"value1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); - temp_db - .insert( - [TEST_LEAF, b"innertree"].as_ref(), + db.insert( + [TEST_LEAF, b"merk_1"].as_ref(), b"key2", Element::new_item(b"value2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); - temp_db - .insert( - [TEST_LEAF, b"innertree"].as_ref(), - b"key3", - Element::new_item(b"value3".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF, b"innertree2"].as_ref(), - b"key3", - Element::new_item(b"value3".to_vec()), + + db.insert( + [TEST_LEAF].as_ref(), + b"merk_2", + Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF, b"innertree2"].as_ref(), - b"key4", + // db.insert([TEST_LEAF, b"merk_2"].as_ref(), b"key2", + // Element::new_item(b"value2".to_vec()), None).expect("successful subtree + // insert"); + db.insert( + [TEST_LEAF, b"merk_2"].as_ref(), + b"key1", Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ TEST_LEAF.to_vec(), - b"innertree".to_vec(), + b"merk_1".to_vec(), b"key1".to_vec(), ])), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF, b"innertree3"].as_ref(), - b"key4", - Element::new_item(b"value4".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF, b"innertree2"].as_ref(), - b"key5", + db.insert( + [TEST_LEAF, b"merk_2"].as_ref(), + b"key2", Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - ANOTHER_TEST_LEAF.to_vec(), - b"innertree3".to_vec(), - b"key4".to_vec(), + TEST_LEAF.to_vec(), + b"merk_1".to_vec(), + b"key2".to_vec(), ])), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); + assert!(db + .get([TEST_LEAF].as_ref(), b"merk_1", None, grove_version) + .unwrap() + .is_ok()); + assert!(db + .get([TEST_LEAF].as_ref(), b"merk_2", None, grove_version) + .unwrap() + .is_ok()); + } - // Single key query - let mut query = Query::new(); - query.insert_range_from(b"key4".to_vec()..); - - let path_query = PathQuery::new_unsized( - vec![ANOTHER_TEST_LEAF.to_vec(), b"innertree2".to_vec()], - query, - ); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - assert_eq!( - hex::encode(&proof), - "010285010198ebd6dc7e1c82951c41fcfa6487711cac6a399ebb01bb979cb\ - e4a51e0b2f08d06046b6579340009000676616c75653100bf2f052b01c2b\ - b83ff3a40504d42b5b9141c582a3e0c98679189b33a24478a6f1006046b6\ - 579350009000676616c75653400f084ffdbc429a89c9b6620e7224d73c2e\ - e505eb7e6fb5eb574e1a8dc8b0d0884110158040a696e6e6572747265653\ - 200080201046b657934008ba21f835b2ff60f16b7fccfbda107bec3da0c4\ - 709357d40de223d769547ec21013a090155ea7d14038c7062d94930798f8\ - 85a19d6ebff8a87489a1debf665604711015e02cfb7d035b8f4a3631be46\ - c597510a16770c15c74331b3dc8dcb577a206e49675040a746573745f6c6\ - 5616632000e02010a696e6e657274726565320049870f2813c0c3c5c105a\ - 988c0ef1372178245152fa9a43b209a6b6d95589bdc11" - ); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - let r1 = Element::new_item(b"value1".to_vec()).serialize().unwrap(); - let r2 = Element::new_item(b"value4".to_vec()).serialize().unwrap(); - - compare_result_tuples( - result_set, - vec![(b"key4".to_vec(), r1), (b"key5".to_vec(), r2)], - ); -} + #[test] + fn test_follow_references() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + let element = Element::new_item(b"ayy".to_vec()); -#[test] -fn test_path_query_proofs_without_subquery() { - // Tree Structure - // root - // test_leaf - // innertree - // k1,v1 - // k2,v2 - // k3,v3 - // another_test_leaf - // innertree2 - // k3,v3 - // innertree3 - // k4,v4 - - // Insert elements into grovedb instance - let temp_db = make_test_grovedb(); - // Insert level 1 nodes - temp_db - .insert( + // Insert an item to refer to + db.insert( [TEST_LEAF].as_ref(), - b"innertree", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"innertree2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"innertree3", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - // Insert level 2 nodes - temp_db - .insert( - [TEST_LEAF, b"innertree"].as_ref(), - b"key1", - Element::new_item(b"value1".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [TEST_LEAF, b"innertree"].as_ref(), b"key2", - Element::new_item(b"value2".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [TEST_LEAF, b"innertree"].as_ref(), - b"key3", - Element::new_item(b"value3".to_vec()), + Element::empty_tree(), None, None, + grove_version, ) .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF, b"innertree2"].as_ref(), + .expect("successful subtree 1 insert"); + db.insert( + [TEST_LEAF, b"key2"].as_ref(), b"key3", - Element::new_item(b"value3".to_vec()), + element.clone(), None, None, + grove_version, ) .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF, b"innertree3"].as_ref(), - b"key4", - Element::new_item(b"value4".to_vec()), + .expect("successful value insert"); + + // Insert a reference + db.insert( + [TEST_LEAF].as_ref(), + b"reference_key", + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + b"key2".to_vec(), + b"key3".to_vec(), + ])), None, None, + grove_version, ) .unwrap() - .expect("successful subtree insert"); - - // Single key query - let mut query = Query::new(); - query.insert_key(b"key1".to_vec()); - - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - assert_eq!( - hex::encode(proof.as_slice()), - "01025503046b6579310009000676616c7565310002018655e18e4555b0b65\ - bbcec64c749db6b9ad84231969fb4fbe769a3093d10f2100198ebd6dc7e1\ - c82951c41fcfa6487711cac6a399ebb01bb979cbe4a51e0b2f08d1101350\ - 409696e6e65727472656500080201046b657932004910536da659a3dbdbc\ - f68c4a6630e72de4ba20cfc60b08b3dd45b4225a599b6015c04097465737\ - 45f6c656166000d020109696e6e65727472656500fafa16d06e8d8696dae\ - 443731ae2a4eae521e4a9a79c331c8a7e22e34c0f1a6e01b55f830550604\ - 719833d54ce2bf139aff4bb699fa4111b9741633554318792c511" - ); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - let r1 = Element::new_item(b"value1".to_vec()).serialize().unwrap(); - compare_result_tuples(result_set, vec![(b"key1".to_vec(), r1)]); - - // Range query + limit - let mut query = Query::new(); - query.insert_range_after(b"key1".to_vec()..); - let path_query = PathQuery::new( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - SizedQuery::new(query, Some(1), None), - ); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - let r1 = Element::new_item(b"value2".to_vec()).serialize().unwrap(); - compare_result_tuples(result_set, vec![(b"key2".to_vec(), r1)]); - - // Range query + offset + limit - let mut query = Query::new(); - query.insert_range_after(b"key1".to_vec()..); - let path_query = PathQuery::new( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - SizedQuery::new(query, Some(1), Some(1)), - ); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - let r1 = Element::new_item(b"value3".to_vec()).serialize().unwrap(); - compare_result_tuples(result_set, vec![(b"key3".to_vec(), r1)]); - - // Range query + direction + limit - let mut query = Query::new_with_direction(false); - query.insert_all(); - let path_query = PathQuery::new( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - SizedQuery::new(query, Some(2), None), - ); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - let r1 = Element::new_item(b"value3".to_vec()).serialize().unwrap(); - let r2 = Element::new_item(b"value2".to_vec()).serialize().unwrap(); - compare_result_tuples( - result_set, - vec![(b"key3".to_vec(), r1), (b"key2".to_vec(), r2)], - ); -} - -#[test] -fn test_path_query_proofs_with_default_subquery() { - let temp_db = make_deep_tree(); - - let mut query = Query::new(); - query.insert_all(); - - let mut subq = Query::new(); - subq.insert_all(); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); - - let keys = [ - b"key1".to_vec(), - b"key2".to_vec(), - b"key3".to_vec(), - b"key4".to_vec(), - b"key5".to_vec(), - ]; - let values = [ - b"value1".to_vec(), - b"value2".to_vec(), - b"value3".to_vec(), - b"value4".to_vec(), - b"value5".to_vec(), - ]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); - - let mut query = Query::new(); - query.insert_range_after(b"innertree".to_vec()..); - - let mut subq = Query::new(); - subq.insert_all(); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 2); - - let keys = [b"key4".to_vec(), b"key5".to_vec()]; - let values = [b"value4".to_vec(), b"value5".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); - - // range subquery - let mut query = Query::new(); - query.insert_all(); - - let mut subq = Query::new(); - subq.insert_range_after_to_inclusive(b"key1".to_vec()..=b"key4".to_vec()); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect( - "should - execute proof", - ); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - - let keys = [b"key2".to_vec(), b"key3".to_vec(), b"key4".to_vec()]; - let values = [b"value2".to_vec(), b"value3".to_vec(), b"value4".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); - - // deep tree test - let mut query = Query::new(); - query.insert_all(); - - let mut subq = Query::new(); - subq.insert_all(); - - let mut sub_subquery = Query::new(); - sub_subquery.insert_all(); - - subq.set_subquery(sub_subquery); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 11); - - let keys = [ - b"key1".to_vec(), - b"key2".to_vec(), - b"key3".to_vec(), - b"key4".to_vec(), - b"key5".to_vec(), - b"key6".to_vec(), - b"key7".to_vec(), - b"key8".to_vec(), - b"key9".to_vec(), - b"key10".to_vec(), - b"key11".to_vec(), - ]; - let values = [ - b"value1".to_vec(), - b"value2".to_vec(), - b"value3".to_vec(), - b"value4".to_vec(), - b"value5".to_vec(), - b"value6".to_vec(), - b"value7".to_vec(), - b"value8".to_vec(), - b"value9".to_vec(), - b"value10".to_vec(), - b"value11".to_vec(), - ]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); -} - -#[test] -fn test_path_query_proofs_with_subquery_path() { - let temp_db = make_deep_tree(); - - let mut query = Query::new(); - query.insert_all(); - - let mut subq = Query::new(); - subq.insert_all(); - - query.set_subquery_key(b"deeper_1".to_vec()); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - - let keys = [b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()]; - let values = [b"value1".to_vec(), b"value2".to_vec(), b"value3".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); - - // test subquery path with valid n > 1 valid translation - let mut query = Query::new(); - query.insert_all(); - - let mut subq = Query::new(); - subq.insert_all(); - - query.set_subquery_path(vec![b"deep_node_1".to_vec(), b"deeper_1".to_vec()]); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![], query); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - - let keys = [b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()]; - let values = [b"value1".to_vec(), b"value2".to_vec(), b"value3".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); - - // test subquery path with empty subquery path - let mut query = Query::new(); - query.insert_all(); - - let mut subq = Query::new(); - subq.insert_all(); - - query.set_subquery_path(vec![]); - query.set_subquery(subq); - - let path_query = - PathQuery::new_unsized(vec![b"deep_leaf".to_vec(), b"deep_node_1".to_vec()], query); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 6); - - let keys = [ - b"key1".to_vec(), - b"key2".to_vec(), - b"key3".to_vec(), - b"key4".to_vec(), - b"key5".to_vec(), - b"key6".to_vec(), - ]; - let values = [ - b"value1".to_vec(), - b"value2".to_vec(), - b"value3".to_vec(), - b"value4".to_vec(), - b"value5".to_vec(), - b"value6".to_vec(), - ]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); - - // test subquery path with an invalid translation - // should generate a valid absence proof with an empty result set - let mut query = Query::new(); - query.insert_all(); - - let mut subq = Query::new(); - subq.insert_all(); - - query.set_subquery_path(vec![ - b"deep_node_1".to_vec(), - b"deeper_10".to_vec(), - b"another_invalid_key".to_vec(), - ]); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![], query); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); -} - -#[test] -fn test_path_query_proofs_with_key_and_subquery() { - let temp_db = make_deep_tree(); - - let mut query = Query::new(); - query.insert_key(b"deep_node_1".to_vec()); + .expect("successful reference insert"); - let mut subq = Query::new(); - subq.insert_all(); + assert_eq!( + db.get([TEST_LEAF].as_ref(), b"reference_key", None, grove_version) + .unwrap() + .expect("successful get"), + element + ); + } - query.set_subquery_key(b"deeper_1".to_vec()); - query.set_subquery(subq); + #[test] + fn test_reference_must_point_to_item() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); - let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); + let result = db + .insert( + [TEST_LEAF].as_ref(), + b"reference_key_1", + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + b"reference_key_2".to_vec(), + ])), + None, + None, + grove_version, + ) + .unwrap(); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + assert!(matches!(result, Err(Error::MissingReference(_)))); + } - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); + #[test] + fn test_too_many_indirections() { + let grove_version = GroveVersion::latest(); + use crate::operations::get::MAX_REFERENCE_HOPS; + let db = make_test_grovedb(grove_version); - let keys = [b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()]; - let values = [b"value1".to_vec(), b"value2".to_vec(), b"value3".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); -} + let keygen = |idx| format!("key{}", idx).bytes().collect::>(); -#[test] -fn test_path_query_proofs_with_conditional_subquery() { - let temp_db = make_deep_tree(); + db.insert( + [TEST_LEAF].as_ref(), + b"key0", + Element::new_item(b"oops".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful item insert"); - let mut query = Query::new(); - query.insert_all(); + for i in 1..=(MAX_REFERENCE_HOPS) { + db.insert( + [TEST_LEAF].as_ref(), + &keygen(i), + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + keygen(i - 1), + ])), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful reference insert"); + } - let mut subquery = Query::new(); - subquery.insert_all(); + // Add one more reference + db.insert( + [TEST_LEAF].as_ref(), + &keygen(MAX_REFERENCE_HOPS + 1), + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + keygen(MAX_REFERENCE_HOPS), + ])), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected insert"); - let mut final_subquery = Query::new(); - final_subquery.insert_all(); + let result = db + .get( + [TEST_LEAF].as_ref(), + &keygen(MAX_REFERENCE_HOPS + 1), + None, + grove_version, + ) + .unwrap(); - subquery.add_conditional_subquery( - QueryItem::Key(b"deeper_4".to_vec()), - None, - Some(final_subquery), - ); - - query.set_subquery(subquery); - - let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - - let keys = [ - b"deeper_1".to_vec(), - b"deeper_2".to_vec(), - b"deeper_3".to_vec(), - b"key10".to_vec(), - b"key11".to_vec(), - ]; - assert_eq!(result_set.len(), keys.len()); - - // TODO: Is this defined behaviour - for (index, key) in keys.iter().enumerate() { - assert_eq!(&result_set[index].key, key); + assert!(matches!(result, Err(Error::ReferenceLimit))); } - // Default + Conditional subquery - let mut query = Query::new(); - query.insert_all(); + #[test] + fn test_reference_value_affects_state() { + let grove_version = GroveVersion::latest(); + let db_one = make_test_grovedb(grove_version); + db_one + .insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::new_item(vec![0]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db_one + .insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"ref", + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + b"key1".to_vec(), + ])), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); - let mut subquery = Query::new(); - subquery.insert_all(); + let db_two = make_test_grovedb(grove_version); + db_two + .insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::new_item(vec![0]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db_two + .insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"ref", + Element::new_reference(ReferencePathType::UpstreamRootHeightReference( + 0, + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + )), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); - let mut final_conditional_subquery = Query::new(); - final_conditional_subquery.insert_all(); + assert_ne!( + db_one + .root_hash(None, grove_version) + .unwrap() + .expect("should return root hash"), + db_two + .root_hash(None, grove_version) + .unwrap() + .expect("should return toor hash") + ); + } - let mut final_default_subquery = Query::new(); - final_default_subquery.insert_range_inclusive(b"key3".to_vec()..=b"key6".to_vec()); + #[test] + fn test_tree_structure_is_persistent() { + let grove_version = GroveVersion::latest(); + let tmp_dir = TempDir::new().unwrap(); + let element = Element::new_item(b"ayy".to_vec()); + // Create a scoped GroveDB + let prev_root_hash = { + let mut db = GroveDb::open(tmp_dir.path()).unwrap(); + add_test_leaves(&mut db, grove_version); + + // Insert some nested subtrees + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree 1 insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key2", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree 2 insert"); + // Insert an element into subtree + db.insert( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + element.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + assert_eq!( + db.get( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap() + .expect("successful get 1"), + element + ); + db.root_hash(None, grove_version).unwrap().unwrap() + }; + // Open a persisted GroveDB + let db = GroveDb::open(tmp_dir).unwrap(); + assert_eq!( + db.get( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap() + .expect("successful get 2"), + element + ); + assert!(db + .get( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key4", + None, + grove_version + ) + .unwrap() + .is_err()); + assert_eq!( + prev_root_hash, + db.root_hash(None, grove_version).unwrap().unwrap() + ); + } - subquery.add_conditional_subquery( - QueryItem::Key(b"deeper_4".to_vec()), - None, - Some(final_conditional_subquery), - ); - subquery.set_subquery(final_default_subquery); - - query.set_subquery(subquery); - - let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 6); - - let keys = [ - b"key3".to_vec(), - b"key4".to_vec(), - b"key5".to_vec(), - b"key6".to_vec(), - b"key10".to_vec(), - b"key11".to_vec(), - ]; - let values = [ - b"value3".to_vec(), - b"value4".to_vec(), - b"value5".to_vec(), - b"value6".to_vec(), - b"value10".to_vec(), - b"value11".to_vec(), - ]; - let elements = values - .map(|x| Element::new_item(x).serialize().unwrap()) - .to_vec(); - // compare_result_sets(&elements, &result_set); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); -} + #[test] + fn test_root_tree_leaves_are_noted() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + db.check_subtree_exists_path_not_found([TEST_LEAF].as_ref().into(), None, grove_version) + .unwrap() + .expect("should exist"); + db.check_subtree_exists_path_not_found( + [ANOTHER_TEST_LEAF].as_ref().into(), + None, + grove_version, + ) + .unwrap() + .expect("should exist"); + } -#[test] -fn test_path_query_proofs_with_sized_query() { - let temp_db = make_deep_tree(); + #[test] + fn test_proof_for_invalid_path_root_key() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); - let mut query = Query::new(); - query.insert_all(); + let query = Query::new(); + let path_query = PathQuery::new_unsized(vec![b"invalid_path_key".to_vec()], query); - let mut subquery = Query::new(); - subquery.insert_all(); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - let mut final_conditional_subquery = Query::new(); - final_conditional_subquery.insert_all(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 0); + } - let mut final_default_subquery = Query::new(); - final_default_subquery.insert_range_inclusive(b"key3".to_vec()..=b"key6".to_vec()); + #[test] + fn test_proof_for_invalid_path() { + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); - subquery.add_conditional_subquery( - QueryItem::Key(b"deeper_4".to_vec()), - None, - Some(final_conditional_subquery), - ); - subquery.set_subquery(final_default_subquery); - - query.set_subquery(subquery); - - let path_query = PathQuery::new( - vec![DEEP_LEAF.to_vec()], - SizedQuery::new(query, Some(3), Some(1)), - ); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - - let keys = [b"key4".to_vec(), b"key5".to_vec(), b"key6".to_vec()]; - let values = [b"value4".to_vec(), b"value5".to_vec(), b"value6".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); -} + let query = Query::new(); + let path_query = + PathQuery::new_unsized(vec![b"deep_leaf".to_vec(), b"invalid_key".to_vec()], query); -#[test] -fn test_path_query_proofs_with_direction() { - let temp_db = make_deep_tree(); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); + + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 0); + + let query = Query::new(); + let path_query = PathQuery::new_unsized( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"invalid_key".to_vec(), + ], + query, + ); - let mut query = Query::new_with_direction(false); - query.insert_all(); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); + + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 0); + + let query = Query::new(); + let path_query = PathQuery::new_unsized( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"deeper_1".to_vec(), + b"invalid_key".to_vec(), + ], + query, + ); - let mut subquery = Query::new_with_direction(false); - subquery.insert_all(); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); + + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 0); + + let query = Query::new(); + let path_query = PathQuery::new_unsized( + vec![ + b"deep_leaf".to_vec(), + b"early_invalid_key".to_vec(), + b"deeper_1".to_vec(), + b"invalid_key".to_vec(), + ], + query, + ); - let mut final_conditional_subquery = Query::new_with_direction(false); - final_conditional_subquery.insert_all(); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - let mut final_default_subquery = Query::new_with_direction(false); - final_default_subquery.insert_range_inclusive(b"key3".to_vec()..=b"key6".to_vec()); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 0); + } - subquery.add_conditional_subquery( - QueryItem::Key(b"deeper_4".to_vec()), - None, - Some(final_conditional_subquery), - ); - subquery.set_subquery(final_default_subquery); - - query.set_subquery(subquery); - - let path_query = PathQuery::new( - vec![DEEP_LEAF.to_vec()], - SizedQuery::new(query, Some(3), Some(1)), - ); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - - let keys = [b"key10".to_vec(), b"key6".to_vec(), b"key5".to_vec()]; - let values = [b"value10".to_vec(), b"value6".to_vec(), b"value5".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); - - // combined directions - let mut query = Query::new(); - query.insert_all(); - - let mut subq = Query::new_with_direction(false); - subq.insert_all(); - - let mut sub_subquery = Query::new(); - sub_subquery.insert_all(); - - subq.set_subquery(sub_subquery); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 11); - - let keys = [ - b"key4".to_vec(), - b"key5".to_vec(), - b"key6".to_vec(), - b"key1".to_vec(), - b"key2".to_vec(), - b"key3".to_vec(), - b"key10".to_vec(), - b"key11".to_vec(), - b"key7".to_vec(), - b"key8".to_vec(), - b"key9".to_vec(), - ]; - let values = [ - b"value4".to_vec(), - b"value5".to_vec(), - b"value6".to_vec(), - b"value1".to_vec(), - b"value2".to_vec(), - b"value3".to_vec(), - b"value10".to_vec(), - b"value11".to_vec(), - b"value7".to_vec(), - b"value8".to_vec(), - b"value9".to_vec(), - ]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); -} + #[test] + fn test_proof_for_non_existent_data() { + let grove_version = GroveVersion::latest(); + let temp_db = make_test_grovedb(grove_version); -#[test] -fn test_checkpoint() { - let db = make_test_grovedb(); - let element1 = Element::new_item(b"ayy".to_vec()); + let mut query = Query::new(); + query.insert_key(b"key1".to_vec()); - db.insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, None) - .unwrap() - .expect("cannot insert a subtree 1 into GroveDB"); - db.insert( - [b"key1".as_ref()].as_ref(), - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("cannot insert a subtree 2 into GroveDB"); - db.insert( - [b"key1".as_ref(), b"key2".as_ref()].as_ref(), - b"key3", - element1.clone(), - None, - None, - ) - .unwrap() - .expect("cannot insert an item into GroveDB"); + // path to empty subtree + let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - assert_eq!( - db.get([b"key1".as_ref(), b"key2".as_ref()].as_ref(), b"key3", None) + let proof = temp_db + .prove_query(&path_query, None, grove_version) .unwrap() - .expect("cannot get from grovedb"), - element1 - ); - - let tempdir_parent = TempDir::new().expect("cannot open tempdir"); - let checkpoint_tempdir = tempdir_parent.path().join("checkpoint"); - db.create_checkpoint(&checkpoint_tempdir) - .expect("cannot create checkpoint"); + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - let checkpoint_db = - GroveDb::open(checkpoint_tempdir).expect("cannot open grovedb from checkpoint"); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 0); + } - assert_eq!( - db.get([b"key1".as_ref(), b"key2".as_ref()].as_ref(), b"key3", None) + #[test] + fn test_path_query_proofs_without_subquery_with_reference() { + let grove_version = GroveVersion::latest(); + // Tree Structure + // root + // test_leaf + // innertree + // k1,v1 + // k2,v2 + // k3,v3 + // another_test_leaf + // innertree2 + // k3,v3 + // k4, reference to k1 in innertree + // k5, reference to k4 in innertree3 + // innertree3 + // k4,v4 + + // Insert elements into grovedb instance + let temp_db = make_test_grovedb(grove_version); + // Insert level 1 nodes + temp_db + .insert( + [TEST_LEAF].as_ref(), + b"innertree", + Element::empty_tree(), + None, + None, + grove_version, + ) .unwrap() - .expect("cannot get from grovedb"), - element1 - ); - assert_eq!( - checkpoint_db - .get([b"key1".as_ref(), b"key2".as_ref()].as_ref(), b"key3", None) + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"innertree2", + Element::empty_tree(), + None, + None, + grove_version, + ) .unwrap() - .expect("cannot get from checkpoint"), - element1 - ); - - let element2 = Element::new_item(b"ayy2".to_vec()); - let element3 = Element::new_item(b"ayy3".to_vec()); - - checkpoint_db - .insert( - [b"key1".as_ref()].as_ref(), - b"key4", - element2.clone(), - None, - None, - ) - .unwrap() - .expect("cannot insert into checkpoint"); - - db.insert( - [b"key1".as_ref()].as_ref(), - b"key4", - element3.clone(), - None, - None, - ) - .unwrap() - .expect("cannot insert into GroveDB"); + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"innertree3", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + // Insert level 2 nodes + temp_db + .insert( + [TEST_LEAF, b"innertree"].as_ref(), + b"key1", + Element::new_item(b"value1".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [TEST_LEAF, b"innertree"].as_ref(), + b"key2", + Element::new_item(b"value2".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [TEST_LEAF, b"innertree"].as_ref(), + b"key3", + Element::new_item(b"value3".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF, b"innertree2"].as_ref(), + b"key3", + Element::new_item(b"value3".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF, b"innertree2"].as_ref(), + b"key4", + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + b"innertree".to_vec(), + b"key1".to_vec(), + ])), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF, b"innertree3"].as_ref(), + b"key4", + Element::new_item(b"value4".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF, b"innertree2"].as_ref(), + b"key5", + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + ANOTHER_TEST_LEAF.to_vec(), + b"innertree3".to_vec(), + b"key4".to_vec(), + ])), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); - assert_eq!( - checkpoint_db - .get([b"key1".as_ref()].as_ref(), b"key4", None) + // Single key query + let mut query = Query::new(); + query.insert_range_from(b"key4".to_vec()..); + + let path_query = PathQuery::new_unsized( + vec![ANOTHER_TEST_LEAF.to_vec(), b"innertree2".to_vec()], + query, + ); + + let proof = temp_db + .prove_query(&path_query, None, grove_version) .unwrap() - .expect("cannot get from checkpoint"), - element2, - ); + .unwrap(); + assert_eq!( + hex::encode(&proof), + "005e02cfb7d035b8f4a3631be46c597510a16770c15c74331b3dc8dcb577a206e49675040a746\ + 573745f6c65616632000e02010a696e6e657274726565320049870f2813c0c3c5c105a988c0ef1\ + 372178245152fa9a43b209a6b6d95589bdc11010a746573745f6c6561663258040a696e6e65727\ + 47265653200080201046b657934008ba21f835b2ff60f16b7fccfbda107bec3da0c4709357d40d\ + e223d769547ec21013a090155ea7d14038c7062d94930798f885a19d6ebff8a87489a1debf6656\ + 04711010a696e6e65727472656532850198ebd6dc7e1c82951c41fcfa6487711cac6a399ebb01b\ + b979cbe4a51e0b2f08d06046b6579340009000676616c75653100bf2f052b01c2bb83ff3a40504\ + d42b5b9141c582a3e0c98679189b33a24478a6f1006046b6579350009000676616c75653400f08\ + 4ffdbc429a89c9b6620e7224d73c2ee505eb7e6fb5eb574e1a8dc8b0d0884110001" + ); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); + + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + let r1 = Element::new_item(b"value1".to_vec()) + .serialize(grove_version) + .unwrap(); + let r2 = Element::new_item(b"value4".to_vec()) + .serialize(grove_version) + .unwrap(); + + compare_result_tuples( + result_set, + vec![(b"key4".to_vec(), r1), (b"key5".to_vec(), r2)], + ); + } - assert_eq!( - db.get([b"key1".as_ref()].as_ref(), b"key4", None) + #[test] + fn test_path_query_proofs_without_subquery() { + let grove_version = GroveVersion::latest(); + // Tree Structure + // root + // test_leaf + // innertree + // k1,v1 + // k2,v2 + // k3,v3 + // another_test_leaf + // innertree2 + // k3,v3 + // innertree3 + // k4,v4 + + // Insert elements into grovedb instance + let temp_db = make_test_grovedb(grove_version); + // Insert level 1 nodes + temp_db + .insert( + [TEST_LEAF].as_ref(), + b"innertree", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"innertree2", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"innertree3", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + // Insert level 2 nodes + temp_db + .insert( + [TEST_LEAF, b"innertree"].as_ref(), + b"key1", + Element::new_item(b"value1".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [TEST_LEAF, b"innertree"].as_ref(), + b"key2", + Element::new_item(b"value2".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [TEST_LEAF, b"innertree"].as_ref(), + b"key3", + Element::new_item(b"value3".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF, b"innertree2"].as_ref(), + b"key3", + Element::new_item(b"value3".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF, b"innertree3"].as_ref(), + b"key4", + Element::new_item(b"value4".to_vec()), + None, + None, + grove_version, + ) .unwrap() - .expect("cannot get from GroveDB"), - element3 - ); + .expect("successful subtree insert"); - checkpoint_db - .insert( - [b"key1".as_ref()].as_ref(), - b"key5", - element3.clone(), - None, - None, - ) - .unwrap() - .expect("cannot insert into checkpoint"); + // Single key query + let mut query = Query::new(); + query.insert_key(b"key1".to_vec()); - db.insert([b"key1".as_ref()].as_ref(), b"key6", element3, None, None) - .unwrap() - .expect("cannot insert into GroveDB"); + let path_query = + PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query); - assert!(matches!( - checkpoint_db - .get([b"key1".as_ref()].as_ref(), b"key6", None) - .unwrap(), - Err(Error::PathKeyNotFound(_)) - )); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + assert_eq!( + hex::encode(proof.as_slice()), + "005c0409746573745f6c656166000d020109696e6e65727472656500fafa16d06e8d8696dae443731\ + ae2a4eae521e4a9a79c331c8a7e22e34c0f1a6e01b55f830550604719833d54ce2bf139aff4bb699fa\ + 4111b9741633554318792c5110109746573745f6c656166350409696e6e65727472656500080201046\ + b657932004910536da659a3dbdbcf68c4a6630e72de4ba20cfc60b08b3dd45b4225a599b60109696e6\ + e6572747265655503046b6579310009000676616c7565310002018655e18e4555b0b65bbcec64c749d\ + b6b9ad84231969fb4fbe769a3093d10f2100198ebd6dc7e1c82951c41fcfa6487711cac6a399ebb01b\ + b979cbe4a51e0b2f08d110001" + ); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert!(matches!( - db.get([b"key1".as_ref()].as_ref(), b"key5", None).unwrap(), - Err(Error::PathKeyNotFound(_)) - )); -} + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + let r1 = Element::new_item(b"value1".to_vec()) + .serialize(grove_version) + .unwrap(); + compare_result_tuples(result_set, vec![(b"key1".to_vec(), r1)]); + + // Range query + limit + let mut query = Query::new(); + query.insert_range_after(b"key1".to_vec()..); + let path_query = PathQuery::new( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + SizedQuery::new(query, Some(1), None), + ); -#[test] -fn test_is_empty_tree() { - let db = make_test_grovedb(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - // Create an empty tree with no elements - db.insert( - [TEST_LEAF].as_ref(), - b"innertree", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .unwrap(); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + let r1 = Element::new_item(b"value2".to_vec()) + .serialize(grove_version) + .unwrap(); + compare_result_tuples(result_set, vec![(b"key2".to_vec(), r1)]); + + // Range query + direction + limit + let mut query = Query::new_with_direction(false); + query.insert_all(); + let path_query = PathQuery::new( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + SizedQuery::new(query, Some(2), None), + ); - assert!(db - .is_empty_tree([TEST_LEAF, b"innertree"].as_ref(), None) - .unwrap() - .expect("path is valid tree")); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - // add an element to the tree to make it non empty - db.insert( - [TEST_LEAF, b"innertree"].as_ref(), - b"key1", - Element::new_item(b"hello".to_vec()), - None, - None, - ) - .unwrap() - .unwrap(); - assert!(!db - .is_empty_tree([TEST_LEAF, b"innertree"].as_ref(), None) - .unwrap() - .expect("path is valid tree")); -} + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + let r1 = Element::new_item(b"value3".to_vec()) + .serialize(grove_version) + .unwrap(); + let r2 = Element::new_item(b"value2".to_vec()) + .serialize(grove_version) + .unwrap(); + compare_result_tuples( + result_set, + vec![(b"key3".to_vec(), r1), (b"key2".to_vec(), r2)], + ); + } -#[test] -fn transaction_should_be_aborted_when_rollback_is_called() { - let item_key = b"key3"; + #[test] + fn test_path_query_proofs_with_default_subquery() { + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); - let db = make_test_grovedb(); - let transaction = db.start_transaction(); + let mut query = Query::new(); + query.insert_all(); - let element1 = Element::new_item(b"ayy".to_vec()); + let mut subq = Query::new(); + subq.insert_all(); + query.set_subquery(subq); - let result = db - .insert( - [TEST_LEAF].as_ref(), - item_key, - element1, - None, - Some(&transaction), - ) - .unwrap(); + let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - assert!(matches!(result, Ok(()))); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - db.rollback_transaction(&transaction).unwrap(); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 5); - let result = db - .get([TEST_LEAF].as_ref(), item_key, Some(&transaction)) - .unwrap(); - assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); -} + let keys = [ + b"key1".to_vec(), + b"key2".to_vec(), + b"key3".to_vec(), + b"key4".to_vec(), + b"key5".to_vec(), + ]; + let values = [ + b"value1".to_vec(), + b"value2".to_vec(), + b"value3".to_vec(), + b"value4".to_vec(), + b"value5".to_vec(), + ]; + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + + let mut query = Query::new(); + query.insert_range_after(b"innertree".to_vec()..); + + let mut subq = Query::new(); + subq.insert_all(); + query.set_subquery(subq); + + let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); + + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); -#[test] -fn transaction_should_be_aborted() { - let db = make_test_grovedb(); - let transaction = db.start_transaction(); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 2); - let item_key = b"key3"; - let element = Element::new_item(b"ayy".to_vec()); + let keys = [b"key4".to_vec(), b"key5".to_vec()]; + let values = [b"value4".to_vec(), b"value5".to_vec()]; + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); - db.insert( - [TEST_LEAF].as_ref(), - item_key, - element, - None, - Some(&transaction), - ) - .unwrap() - .unwrap(); + // range subquery + let mut query = Query::new(); + query.insert_all(); - drop(transaction); + let mut subq = Query::new(); + subq.insert_range_after_to_inclusive(b"key1".to_vec()..=b"key4".to_vec()); + query.set_subquery(subq); - // Transactional data shouldn't be committed to the main database - let result = db.get([TEST_LEAF].as_ref(), item_key, None).unwrap(); - assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); -} + let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); -#[test] -fn test_subtree_pairs_iterator() { - let db = make_test_grovedb(); - let element = Element::new_item(b"ayy".to_vec()); - let element2 = Element::new_item(b"lmao".to_vec()); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version).expect( + "should + execute proof", + ); - // Insert some nested subtrees - db.insert( - [TEST_LEAF].as_ref(), - b"subtree1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 1 insert"); - db.insert( - [TEST_LEAF, b"subtree1"].as_ref(), - b"subtree11", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 2 insert"); - // Insert an element into subtree - db.insert( - [TEST_LEAF, b"subtree1", b"subtree11"].as_ref(), - b"key1", - element.clone(), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - assert_eq!( - db.get( - [TEST_LEAF, b"subtree1", b"subtree11"].as_ref(), - b"key1", - None - ) - .unwrap() - .expect("successful get 1"), - element - ); - db.insert( - [TEST_LEAF, b"subtree1", b"subtree11"].as_ref(), - b"key0", - element.clone(), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - db.insert( - [TEST_LEAF, b"subtree1"].as_ref(), - b"subtree12", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 3 insert"); - db.insert( - [TEST_LEAF, b"subtree1"].as_ref(), - b"key1", - element.clone(), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - db.insert( - [TEST_LEAF, b"subtree1"].as_ref(), - b"key2", - element2.clone(), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - - // Iterate over subtree1 to see if keys of other subtrees messed up - // let mut iter = db - // .elements_iterator([TEST_LEAF, b"subtree1"].as_ref(), None) - // .expect("cannot create iterator"); - let storage_context = db - .grove_db - .db - .get_storage_context([TEST_LEAF, b"subtree1"].as_ref().into(), None) - .unwrap(); - let mut iter = Element::iterator(storage_context.raw_iter()).unwrap(); - assert_eq!( - iter.next_element().unwrap().unwrap(), - Some((b"key1".to_vec(), element)) - ); - assert_eq!( - iter.next_element().unwrap().unwrap(), - Some((b"key2".to_vec(), element2)) - ); - let subtree_element = iter.next_element().unwrap().unwrap().unwrap(); - assert_eq!(subtree_element.0, b"subtree11".to_vec()); - assert!(matches!(subtree_element.1, Element::Tree(..))); - let subtree_element = iter.next_element().unwrap().unwrap().unwrap(); - assert_eq!(subtree_element.0, b"subtree12".to_vec()); - assert!(matches!(subtree_element.1, Element::Tree(..))); - assert!(matches!(iter.next_element().unwrap(), Ok(None))); -} + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 3); -#[test] -fn test_find_subtrees() { - let element = Element::new_item(b"ayy".to_vec()); - let db = make_test_grovedb(); - // Insert some nested subtrees - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 1 insert"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 2 insert"); - // Insert an element into subtree - db.insert( - [TEST_LEAF, b"key1", b"key2"].as_ref(), - b"key3", - element, - None, - None, - ) - .unwrap() - .expect("successful value insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"key4", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 3 insert"); - let subtrees = db - .find_subtrees(&[TEST_LEAF].as_ref().into(), None) - .unwrap() - .expect("cannot get subtrees"); - assert_eq!( - vec![ - vec![TEST_LEAF], - vec![TEST_LEAF, b"key1"], - vec![TEST_LEAF, b"key4"], - vec![TEST_LEAF, b"key1", b"key2"], - ], - subtrees - ); -} + let keys = [b"key2".to_vec(), b"key3".to_vec(), b"key4".to_vec()]; + let values = [b"value2".to_vec(), b"value3".to_vec(), b"value4".to_vec()]; + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); -#[test] -fn test_root_subtree_has_root_key() { - let db = make_test_grovedb(); - let storage = db.db.get_storage_context(EMPTY_PATH, None).unwrap(); - let root_merk = Merk::open_base(storage, false) - .unwrap() - .expect("expected to get root merk"); - let (_, root_key, _) = root_merk - .root_hash_key_and_sum() - .unwrap() - .expect("expected to get root hash, key and sum"); - assert!(root_key.is_some()) -} + // deep tree test + let mut query = Query::new(); + query.insert_all(); -#[test] -fn test_get_subtree() { - let db = make_test_grovedb(); - let element = Element::new_item(b"ayy".to_vec()); + let mut subq = Query::new(); + subq.insert_all(); - // Returns error is subtree is not valid - { - let subtree = db.get([TEST_LEAF].as_ref(), b"invalid_tree", None).unwrap(); - assert!(subtree.is_err()); + let mut sub_subquery = Query::new(); + sub_subquery.insert_all(); - // Doesn't return an error for subtree that exists but empty - let subtree = db.get(EMPTY_PATH, TEST_LEAF, None).unwrap(); - assert!(subtree.is_ok()); - } + subq.set_subquery(sub_subquery); + query.set_subquery(subq); - // Insert some nested subtrees - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 1 insert"); - - let key1_tree = db - .get(EMPTY_PATH, TEST_LEAF, None) - .unwrap() - .expect("expected to get a root tree"); - - assert!( - matches!(key1_tree, Element::Tree(Some(_), _)), - "{}", - format!( - "expected tree with root key, got {:?}", - if let Element::Tree(tree, ..) = key1_tree { - format!("{:?}", tree) - } else { - "not a tree".to_string() - } - ) - ); + let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 2 insert"); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - // Insert an element into subtree - db.insert( - [TEST_LEAF, b"key1", b"key2"].as_ref(), - b"key3", - element.clone(), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"key4", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 3 insert"); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 14); + + let keys = [ + b"key1".to_vec(), + b"key2".to_vec(), + b"key3".to_vec(), + b"key4".to_vec(), + b"key5".to_vec(), + b"key6".to_vec(), + b"key7".to_vec(), + b"key8".to_vec(), + b"key9".to_vec(), + b"key10".to_vec(), + b"key11".to_vec(), + b"key12".to_vec(), + b"key13".to_vec(), + b"key14".to_vec(), + ]; + let values = [ + b"value1".to_vec(), + b"value2".to_vec(), + b"value3".to_vec(), + b"value4".to_vec(), + b"value5".to_vec(), + b"value6".to_vec(), + b"value7".to_vec(), + b"value8".to_vec(), + b"value9".to_vec(), + b"value10".to_vec(), + b"value11".to_vec(), + b"value12".to_vec(), + b"value13".to_vec(), + b"value14".to_vec(), + ]; + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + } + + #[test] + fn test_path_query_proofs_with_subquery_path() { + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); + + let mut query = Query::new(); + query.insert_all(); + + let mut subq = Query::new(); + subq.insert_all(); + + query.set_subquery_key(b"deeper_1".to_vec()); + query.set_subquery(subq); + + let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); + + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); + + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 3); + + let keys = [b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()]; + let values = [b"value1".to_vec(), b"value2".to_vec(), b"value3".to_vec()]; + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + + // test subquery path with valid n > 1 valid translation + let mut query = Query::new(); + query.insert_all(); + + let mut subq = Query::new(); + subq.insert_all(); + + query.set_subquery_path(vec![b"deep_node_1".to_vec(), b"deeper_1".to_vec()]); + query.set_subquery(subq); + + let path_query = PathQuery::new_unsized(vec![], query); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 3); + + let keys = [b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()]; + let values = [b"value1".to_vec(), b"value2".to_vec(), b"value3".to_vec()]; + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + + // test subquery path with empty subquery path + let mut query = Query::new(); + query.insert_all(); + + let mut subq = Query::new(); + subq.insert_all(); + + query.set_subquery_path(vec![]); + query.set_subquery(subq); + + let path_query = + PathQuery::new_unsized(vec![b"deep_leaf".to_vec(), b"deep_node_1".to_vec()], query); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 6); + + let keys = [ + b"key1".to_vec(), + b"key2".to_vec(), + b"key3".to_vec(), + b"key4".to_vec(), + b"key5".to_vec(), + b"key6".to_vec(), + ]; + let values = [ + b"value1".to_vec(), + b"value2".to_vec(), + b"value3".to_vec(), + b"value4".to_vec(), + b"value5".to_vec(), + b"value6".to_vec(), + ]; + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + + // test subquery path with an invalid translation + // should generate a valid absence proof with an empty result set + let mut query = Query::new(); + query.insert_all(); + + let mut subq = Query::new(); + subq.insert_all(); + + query.set_subquery_path(vec![ + b"deep_node_1".to_vec(), + b"deeper_10".to_vec(), + b"another_invalid_key".to_vec(), + ]); + query.set_subquery(subq); + + let path_query = PathQuery::new_unsized(vec![], query); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 0); + } + + #[test] + fn test_path_query_proofs_with_key_and_subquery() { + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); + + let mut query = Query::new(); + query.insert_key(b"deep_node_1".to_vec()); + + let mut subq = Query::new(); + subq.insert_all(); + + query.set_subquery_key(b"deeper_1".to_vec()); + query.set_subquery(subq); + + let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); + + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); + + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 3); + + let keys = [b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()]; + let values = [b"value1".to_vec(), b"value2".to_vec(), b"value3".to_vec()]; + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + } + + #[test] + fn test_path_query_proofs_with_conditional_subquery() { + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); + + let mut query = Query::new(); + query.insert_all(); + + let mut subquery = Query::new(); + subquery.insert_all(); + + let mut final_subquery = Query::new(); + final_subquery.insert_all(); + + subquery.add_conditional_subquery( + QueryItem::Key(b"deeper_4".to_vec()), + None, + Some(final_subquery), + ); + + query.set_subquery(subquery); + + let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); + + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + + let keys = [ + b"deeper_1".to_vec(), + b"deeper_2".to_vec(), + b"deeper_3".to_vec(), + b"key10".to_vec(), + b"key11".to_vec(), + b"deeper_5".to_vec(), + ]; + assert_eq!(result_set.len(), keys.len()); + + // TODO: Is this defined behaviour + for (index, key) in keys.iter().enumerate() { + assert_eq!(&result_set[index].key, key); + } + + // Default + Conditional subquery + let mut query = Query::new(); + query.insert_all(); + + let mut subquery = Query::new(); + subquery.insert_all(); + + let mut final_conditional_subquery = Query::new(); + final_conditional_subquery.insert_all(); + + let mut final_default_subquery = Query::new(); + final_default_subquery.insert_range_inclusive(b"key3".to_vec()..=b"key6".to_vec()); + + subquery.add_conditional_subquery( + QueryItem::Key(b"deeper_4".to_vec()), + None, + Some(final_conditional_subquery), + ); + subquery.set_subquery(final_default_subquery); + + query.set_subquery(subquery); + + let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); + + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 6); + + let keys = [ + b"key3".to_vec(), + b"key4".to_vec(), + b"key5".to_vec(), + b"key6".to_vec(), + b"key10".to_vec(), + b"key11".to_vec(), + ]; + let values = [ + b"value3".to_vec(), + b"value4".to_vec(), + b"value5".to_vec(), + b"value6".to_vec(), + b"value10".to_vec(), + b"value11".to_vec(), + ]; + let elements = values + .map(|x| Element::new_item(x).serialize(grove_version).unwrap()) + .to_vec(); + // compare_result_sets(&elements, &result_set); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + } + + #[test] + fn test_path_query_proofs_with_sized_query() { + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); + + let mut query = Query::new(); + query.insert_all(); + + let mut subquery = Query::new(); + subquery.insert_all(); + + let mut final_conditional_subquery = Query::new(); + final_conditional_subquery.insert_all(); + + let mut final_default_subquery = Query::new(); + final_default_subquery.insert_range_inclusive(b"key4".to_vec()..=b"key6".to_vec()); + + subquery.add_conditional_subquery( + QueryItem::Key(b"deeper_4".to_vec()), + None, + Some(final_conditional_subquery), + ); + subquery.set_subquery(final_default_subquery); + + query.set_subquery(subquery); + + let path_query = PathQuery::new( + vec![DEEP_LEAF.to_vec()], + SizedQuery::new(query, Some(5), None), /* we need to add a bigger limit because of + * empty proved subtrees */ + ); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); + + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 3); + + let keys = [b"key4".to_vec(), b"key5".to_vec(), b"key6".to_vec()]; + let values = [b"value4".to_vec(), b"value5".to_vec(), b"value6".to_vec()]; + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + } + + #[test] + fn test_path_query_proof_with_range_subquery_and_limit() { + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); + + // Create a path query with a range query, subquery, and limit + let mut main_query = Query::new(); + main_query.insert_range_after(b"deeper_3".to_vec()..); + + let mut subquery = Query::new(); + subquery.insert_all(); + + main_query.set_subquery(subquery); + + let path_query = PathQuery::new( + vec![DEEP_LEAF.to_vec(), b"deep_node_2".to_vec()], + SizedQuery::new(main_query.clone(), Some(3), None), + ); + + // Generate proof + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + + // Verify proof + let verification_result = GroveDb::verify_query_raw(&proof, &path_query, grove_version); + + match verification_result { + Ok((hash, result_set)) => { + // Check if the hash matches the root hash + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + // Check if we got the correct number of results + assert_eq!(result_set.len(), 3, "Expected 3 results due to limit"); + } + Err(e) => { + panic!("Proof verification failed: {:?}", e); + } + } + + // Now test without a limit to compare + let path_query_no_limit = PathQuery::new( + vec![DEEP_LEAF.to_vec(), b"deep_node_2".to_vec()], + SizedQuery::new(main_query.clone(), None, None), + ); + + let proof_no_limit = db + .prove_query(&path_query_no_limit, None, grove_version) + .unwrap() + .unwrap(); + let verification_result_no_limit = + GroveDb::verify_query_raw(&proof_no_limit, &path_query_no_limit, grove_version); + + match verification_result_no_limit { + Ok((hash, result_set)) => { + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 5, "Expected 5 results without limit"); + } + Err(e) => { + panic!("Proof verification failed (no limit): {:?}", e); + } + } + } + + #[test] + fn test_path_query_proof_with_range_subquery_and_limit_with_sum_trees() { + let grove_version = GroveVersion::latest(); + let db = make_deep_tree_with_sum_trees(grove_version); + + // Create a path query with a range query, subquery, and limit + let mut main_query = Query::new(); + main_query.insert_key(b"a".to_vec()); + main_query.insert_range_after(b"b".to_vec()..); + + let mut subquery = Query::new(); + subquery.insert_all(); + + main_query.set_subquery(subquery); + + main_query.add_conditional_subquery(QueryItem::Key(b"a".to_vec()), None, None); + + let path_query = PathQuery::new( + vec![DEEP_LEAF.to_vec(), b"deep_node_1".to_vec()], + SizedQuery::new(main_query.clone(), Some(3), None), + ); + + let non_proved_result_elements = db + .query( + &path_query, + false, + false, + false, + QueryResultType::QueryPathKeyElementTrioResultType, + None, + grove_version, + ) + .unwrap() + .expect("expected query to execute") + .0; + + assert_eq!( + non_proved_result_elements.len(), + 3, + "Expected 3 results due to limit" + ); + + let key_elements = non_proved_result_elements.to_key_elements(); + + assert_eq!( + key_elements, + vec![ + (vec![97], Element::new_item("storage".as_bytes().to_vec())), + (vec![49], Element::SumTree(Some(vec![0; 32]), 2, None)), + (vec![48], Element::new_item("v1".as_bytes().to_vec())) + ] + ); + + // Generate proof + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + + // Verify proof + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query, grove_version) + .expect("proof verification failed"); + + // Check if the hash matches the root hash + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + // Check if we got the correct number of results + assert_eq!(result_set.len(), 3, "Expected 3 results due to limit"); + + // Now test without a limit to compare + let path_query_no_limit = PathQuery::new( + vec![DEEP_LEAF.to_vec(), b"deep_node_1".to_vec()], + SizedQuery::new(main_query.clone(), None, None), + ); + + let proof_no_limit = db + .prove_query(&path_query_no_limit, None, grove_version) + .unwrap() + .unwrap(); + let verification_result_no_limit = + GroveDb::verify_query_raw(&proof_no_limit, &path_query_no_limit, grove_version); + + match verification_result_no_limit { + Ok((hash, result_set)) => { + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 29, "Expected 29 results without limit"); + } + Err(e) => { + panic!("Proof verification failed (no limit): {:?}", e); + } + } + } + + #[test] + fn test_path_query_proofs_with_direction() { + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); + + // root + // deep_leaf + // deep_node_1 + // deeper_1 + // k1,v1 + // k2,v2 + // k3,v3 + // deeper_2 + // k4,v4 + // k5,v5 + // k6,v6 + // deep_node_2 + // deeper_3 + // k7,v7 + // k8,v8 + // k9,v9 + // deeper_4 + // k10,v10 + // k11,v11 + // deeper_5 + // k12,v12 + // k13,v13 + // k14,v14 + + let mut query = Query::new_with_direction(false); + query.insert_all(); + + let mut subquery = Query::new_with_direction(false); + subquery.insert_all(); + + let mut final_conditional_subquery = Query::new_with_direction(false); + final_conditional_subquery.insert_all(); + + let mut final_default_subquery = Query::new_with_direction(false); + final_default_subquery.insert_range_inclusive(b"key3".to_vec()..=b"key6".to_vec()); + + subquery.add_conditional_subquery( + QueryItem::Key(b"deeper_4".to_vec()), + None, + Some(final_conditional_subquery), + ); + subquery.set_subquery(final_default_subquery); + + query.set_subquery(subquery); + + let path_query = PathQuery::new( + vec![DEEP_LEAF.to_vec()], + SizedQuery::new(query, Some(6), None), /* we need 6 because of intermediate empty + * trees in proofs */ + ); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); + + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 4); + + let keys = [ + b"key11".to_vec(), + b"key10".to_vec(), + b"key6".to_vec(), + b"key5".to_vec(), + ]; + let values = [ + b"value11".to_vec(), + b"value10".to_vec(), + b"value6".to_vec(), + b"value5".to_vec(), + ]; + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + + // combined directions + let mut query = Query::new(); + query.insert_all(); + + let mut subq = Query::new_with_direction(false); + subq.insert_all(); + + let mut sub_subquery = Query::new(); + sub_subquery.insert_all(); + + subq.set_subquery(sub_subquery); + query.set_subquery(subq); + + let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); + + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); + + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 14); + + let keys = [ + b"key4".to_vec(), + b"key5".to_vec(), + b"key6".to_vec(), + b"key1".to_vec(), + b"key2".to_vec(), + b"key3".to_vec(), + b"key12".to_vec(), + b"key13".to_vec(), + b"key14".to_vec(), + b"key10".to_vec(), + b"key11".to_vec(), + b"key7".to_vec(), + b"key8".to_vec(), + b"key9".to_vec(), + ]; + let values = [ + b"value4".to_vec(), + b"value5".to_vec(), + b"value6".to_vec(), + b"value1".to_vec(), + b"value2".to_vec(), + b"value3".to_vec(), + b"value12".to_vec(), + b"value13".to_vec(), + b"value14".to_vec(), + b"value10".to_vec(), + b"value11".to_vec(), + b"value7".to_vec(), + b"value8".to_vec(), + b"value9".to_vec(), + ]; + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + } + + #[test] + fn test_checkpoint() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + let element1 = Element::new_item(b"ayy".to_vec()); + + db.insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("cannot insert a subtree 1 into GroveDB"); + db.insert( + [b"key1".as_ref()].as_ref(), + b"key2", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("cannot insert a subtree 2 into GroveDB"); + db.insert( + [b"key1".as_ref(), b"key2".as_ref()].as_ref(), + b"key3", + element1.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("cannot insert an item into GroveDB"); + + assert_eq!( + db.get( + [b"key1".as_ref(), b"key2".as_ref()].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap() + .expect("cannot get from grovedb"), + element1 + ); + + let tempdir_parent = TempDir::new().expect("cannot open tempdir"); + let checkpoint_tempdir = tempdir_parent.path().join("checkpoint"); + db.create_checkpoint(&checkpoint_tempdir) + .expect("cannot create checkpoint"); + + let checkpoint_db = + GroveDb::open(checkpoint_tempdir).expect("cannot open grovedb from checkpoint"); + + assert_eq!( + db.get( + [b"key1".as_ref(), b"key2".as_ref()].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap() + .expect("cannot get from grovedb"), + element1 + ); + assert_eq!( + checkpoint_db + .get( + [b"key1".as_ref(), b"key2".as_ref()].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap() + .expect("cannot get from checkpoint"), + element1 + ); + + let element2 = Element::new_item(b"ayy2".to_vec()); + let element3 = Element::new_item(b"ayy3".to_vec()); + + checkpoint_db + .insert( + [b"key1".as_ref()].as_ref(), + b"key4", + element2.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("cannot insert into checkpoint"); + + db.insert( + [b"key1".as_ref()].as_ref(), + b"key4", + element3.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("cannot insert into GroveDB"); + + assert_eq!( + checkpoint_db + .get([b"key1".as_ref()].as_ref(), b"key4", None, grove_version) + .unwrap() + .expect("cannot get from checkpoint"), + element2, + ); + + assert_eq!( + db.get([b"key1".as_ref()].as_ref(), b"key4", None, grove_version) + .unwrap() + .expect("cannot get from GroveDB"), + element3 + ); + + checkpoint_db + .insert( + [b"key1".as_ref()].as_ref(), + b"key5", + element3.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("cannot insert into checkpoint"); + + db.insert( + [b"key1".as_ref()].as_ref(), + b"key6", + element3, + None, + None, + grove_version, + ) + .unwrap() + .expect("cannot insert into GroveDB"); + + assert!(matches!( + checkpoint_db + .get([b"key1".as_ref()].as_ref(), b"key6", None, grove_version) + .unwrap(), + Err(Error::PathKeyNotFound(_)) + )); + + assert!(matches!( + db.get([b"key1".as_ref()].as_ref(), b"key5", None, grove_version) + .unwrap(), + Err(Error::PathKeyNotFound(_)) + )); + } + + #[test] + fn test_is_empty_tree() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + + // Create an empty tree with no elements + db.insert( + [TEST_LEAF].as_ref(), + b"innertree", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .unwrap(); + + assert!(db + .is_empty_tree([TEST_LEAF, b"innertree"].as_ref(), None, grove_version) + .unwrap() + .expect("path is valid tree")); + + // add an element to the tree to make it non-empty + db.insert( + [TEST_LEAF, b"innertree"].as_ref(), + b"key1", + Element::new_item(b"hello".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .unwrap(); + assert!(!db + .is_empty_tree([TEST_LEAF, b"innertree"].as_ref(), None, grove_version) + .unwrap() + .expect("path is valid tree")); + } + + #[test] + fn transaction_should_be_aborted_when_rollback_is_called() { + let grove_version = GroveVersion::latest(); + let item_key = b"key3"; + + let db = make_test_grovedb(grove_version); + let transaction = db.start_transaction(); + + let element1 = Element::new_item(b"ayy".to_vec()); + + let result = db + .insert( + [TEST_LEAF].as_ref(), + item_key, + element1, + None, + Some(&transaction), + grove_version, + ) + .unwrap(); + + assert!(matches!(result, Ok(()))); + + db.rollback_transaction(&transaction).unwrap(); + + let result = db + .get( + [TEST_LEAF].as_ref(), + item_key, + Some(&transaction), + grove_version, + ) + .unwrap(); + assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); + } + + #[test] + fn transaction_should_be_aborted() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + let transaction = db.start_transaction(); + + let item_key = b"key3"; + let element = Element::new_item(b"ayy".to_vec()); + + db.insert( + [TEST_LEAF].as_ref(), + item_key, + element, + None, + Some(&transaction), + grove_version, + ) + .unwrap() + .unwrap(); + + drop(transaction); + + // Transactional data shouldn't be committed to the main database + let result = db + .get([TEST_LEAF].as_ref(), item_key, None, grove_version) + .unwrap(); + assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); + } + + #[test] + fn test_subtree_pairs_iterator() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + let element = Element::new_item(b"ayy".to_vec()); + let element2 = Element::new_item(b"lmao".to_vec()); + + // Insert some nested subtrees + db.insert( + [TEST_LEAF].as_ref(), + b"subtree1", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree 1 insert"); + db.insert( + [TEST_LEAF, b"subtree1"].as_ref(), + b"subtree11", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree 2 insert"); + // Insert an element into subtree + db.insert( + [TEST_LEAF, b"subtree1", b"subtree11"].as_ref(), + b"key1", + element.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + assert_eq!( + db.get( + [TEST_LEAF, b"subtree1", b"subtree11"].as_ref(), + b"key1", + None, + grove_version + ) + .unwrap() + .expect("successful get 1"), + element + ); + db.insert( + [TEST_LEAF, b"subtree1", b"subtree11"].as_ref(), + b"key0", + element.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + db.insert( + [TEST_LEAF, b"subtree1"].as_ref(), + b"subtree12", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree 3 insert"); + db.insert( + [TEST_LEAF, b"subtree1"].as_ref(), + b"key1", + element.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + db.insert( + [TEST_LEAF, b"subtree1"].as_ref(), + b"key2", + element2.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + + // Iterate over subtree1 to see if keys of other subtrees messed up + // let mut iter = db + // .elements_iterator([TEST_LEAF, b"subtree1"].as_ref(), None) + // .expect("cannot create iterator"); + let storage_context = db + .grove_db + .db + .get_storage_context([TEST_LEAF, b"subtree1"].as_ref().into(), None) + .unwrap(); + let mut iter = Element::iterator(storage_context.raw_iter()).unwrap(); + assert_eq!( + iter.next_element(grove_version).unwrap().unwrap(), + Some((b"key1".to_vec(), element)) + ); + assert_eq!( + iter.next_element(grove_version).unwrap().unwrap(), + Some((b"key2".to_vec(), element2)) + ); + let subtree_element = iter.next_element(grove_version).unwrap().unwrap().unwrap(); + assert_eq!(subtree_element.0, b"subtree11".to_vec()); + assert!(matches!(subtree_element.1, Element::Tree(..))); + let subtree_element = iter.next_element(grove_version).unwrap().unwrap().unwrap(); + assert_eq!(subtree_element.0, b"subtree12".to_vec()); + assert!(matches!(subtree_element.1, Element::Tree(..))); + assert!(matches!( + iter.next_element(grove_version).unwrap(), + Ok(None) + )); + } + + #[test] + fn test_find_subtrees() { + let grove_version = GroveVersion::latest(); + let element = Element::new_item(b"ayy".to_vec()); + let db = make_test_grovedb(grove_version); + // Insert some nested subtrees + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree 1 insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key2", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree 2 insert"); + // Insert an element into subtree + db.insert( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + element, + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key4", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree 3 insert"); + let subtrees = db + .find_subtrees(&[TEST_LEAF].as_ref().into(), None, grove_version) + .unwrap() + .expect("cannot get subtrees"); + assert_eq!( + vec![ + vec![TEST_LEAF], + vec![TEST_LEAF, b"key1"], + vec![TEST_LEAF, b"key4"], + vec![TEST_LEAF, b"key1", b"key2"], + ], + subtrees + ); + } + + #[test] + fn test_root_subtree_has_root_key() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + let storage = db.db.get_storage_context(EMPTY_PATH, None).unwrap(); + let root_merk = Merk::open_base( + storage, + false, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .unwrap() + .expect("expected to get root merk"); + let (_, root_key, _) = root_merk + .root_hash_key_and_sum() + .unwrap() + .expect("expected to get root hash, key and sum"); + assert!(root_key.is_some()) + } + + #[test] + fn test_get_subtree() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + let element = Element::new_item(b"ayy".to_vec()); + + // Returns error is subtree is not valid + { + let subtree = db + .get([TEST_LEAF].as_ref(), b"invalid_tree", None, grove_version) + .unwrap(); + assert!(subtree.is_err()); + + // Doesn't return an error for subtree that exists but empty + let subtree = db.get(EMPTY_PATH, TEST_LEAF, None, grove_version).unwrap(); + assert!(subtree.is_ok()); + } + + // Insert some nested subtrees + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree 1 insert"); + + let key1_tree = db + .get(EMPTY_PATH, TEST_LEAF, None, grove_version) + .unwrap() + .expect("expected to get a root tree"); + + assert!( + matches!(key1_tree, Element::Tree(Some(_), _)), + "{}", + format!( + "expected tree with root key, got {:?}", + if let Element::Tree(tree, ..) = key1_tree { + format!("{:?}", tree) + } else { + "not a tree".to_string() + } + ) + ); + + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key2", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree 2 insert"); + + // Insert an element into subtree + db.insert( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + element.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key4", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree 3 insert"); + + // Retrieve subtree instance + // Check if it returns the same instance that was inserted + { + let subtree_storage = db + .grove_db + .db + .get_storage_context([TEST_LEAF, b"key1", b"key2"].as_ref().into(), None) + .unwrap(); + let subtree = Merk::open_layered_with_root_key( + subtree_storage, + Some(b"key3".to_vec()), + false, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .unwrap() + .expect("cannot open merk"); + let result_element = Element::get(&subtree, b"key3", true, grove_version) + .unwrap() + .unwrap(); + assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); + } + // Insert a new tree with transaction + let transaction = db.start_transaction(); + + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"innertree", + Element::empty_tree(), + None, + Some(&transaction), + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + + db.insert( + [TEST_LEAF, b"key1", b"innertree"].as_ref(), + b"key4", + element, + None, + Some(&transaction), + grove_version, + ) + .unwrap() + .expect("successful value insert"); + + // Retrieve subtree instance with transaction + let subtree_storage = db + .grove_db + .db + .get_transactional_storage_context( + [TEST_LEAF, b"key1", b"innertree"].as_ref().into(), + None, + &transaction, + ) + .unwrap(); + let subtree = Merk::open_layered_with_root_key( + subtree_storage, + Some(b"key4".to_vec()), + false, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .unwrap() + .expect("cannot open merk"); + let result_element = Element::get(&subtree, b"key4", true, grove_version) + .unwrap() + .unwrap(); + assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); + + // Should be able to retrieve instances created before transaction + let subtree_storage = db + .grove_db + .db + .get_storage_context([TEST_LEAF, b"key1", b"key2"].as_ref().into(), None) + .unwrap(); + let subtree = Merk::open_layered_with_root_key( + subtree_storage, + Some(b"key3".to_vec()), + false, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .unwrap() + .expect("cannot open merk"); + let result_element = Element::get(&subtree, b"key3", true, grove_version) + .unwrap() + .unwrap(); + assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); + } + + #[test] + fn test_get_full_query() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + + // Insert a couple of subtrees first + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key2", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + // Insert some elements into subtree + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key3", + Element::new_item(b"ayya".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key4", + Element::new_item(b"ayyb".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key5", + Element::new_item(b"ayyc".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"key6", + Element::new_item(b"ayyd".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + + // Test_Leaf + // ___________________________ + // / \ + // key1 key2 + // ___________________________ + // | | + // key4 key6 + // / \ + // key3 key5 + // + + let path1 = vec![TEST_LEAF.to_vec(), b"key1".to_vec()]; + let path2 = vec![TEST_LEAF.to_vec(), b"key2".to_vec()]; + let mut query1 = Query::new(); + let mut query2 = Query::new(); + query1.insert_range_inclusive(b"key3".to_vec()..=b"key4".to_vec()); + query2.insert_key(b"key6".to_vec()); + + let path_query1 = PathQuery::new_unsized(path1, query1); + // should get back key3, key4 + let path_query2 = PathQuery::new_unsized(path2, query2); + // should get back key6 + + assert_eq!( + db.query_many_raw( + &[&path_query1, &path_query2], + true, + true, + true, + QueryKeyElementPairResultType, + None, + grove_version + ) + .unwrap() + .expect("expected successful get_query") + .to_key_elements(), + vec![ + (b"key3".to_vec(), Element::new_item(b"ayya".to_vec())), + (b"key4".to_vec(), Element::new_item(b"ayyb".to_vec())), + (b"key6".to_vec(), Element::new_item(b"ayyd".to_vec())), + ] + ); + } + + #[test] + fn test_aux_uses_separate_cf() { + let grove_version = GroveVersion::latest(); + let element = Element::new_item(b"ayy".to_vec()); + let db = make_test_grovedb(grove_version); + // Insert some nested subtrees + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree 1 insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key2", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree 2 insert"); + // Insert an element into subtree + db.insert( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + element.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); - // Retrieve subtree instance - // Check if it returns the same instance that was inserted - { - let subtree_storage = db - .grove_db - .db - .get_storage_context([TEST_LEAF, b"key1", b"key2"].as_ref().into(), None) - .unwrap(); - let subtree = - Merk::open_layered_with_root_key(subtree_storage, Some(b"key3".to_vec()), false) + db.put_aux(b"key1", b"a", None, None) + .unwrap() + .expect("cannot put aux"); + db.put_aux(b"key2", b"b", None, None) + .unwrap() + .expect("cannot put aux"); + db.put_aux(b"key3", b"c", None, None) + .unwrap() + .expect("cannot put aux"); + db.delete_aux(b"key3", None, None) + .unwrap() + .expect("cannot delete from aux"); + + assert_eq!( + db.get( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap() + .expect("cannot get element"), + element + ); + assert_eq!( + db.get_aux(b"key1", None) .unwrap() - .expect("cannot open merk"); - let result_element = Element::get(&subtree, b"key3", true).unwrap().unwrap(); - assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); + .expect("cannot get from aux"), + Some(b"a".to_vec()) + ); + assert_eq!( + db.get_aux(b"key2", None) + .unwrap() + .expect("cannot get from aux"), + Some(b"b".to_vec()) + ); + assert_eq!( + db.get_aux(b"key3", None) + .unwrap() + .expect("cannot get from aux"), + None + ); + assert_eq!( + db.get_aux(b"key4", None) + .unwrap() + .expect("cannot get from aux"), + None + ); } - // Insert a new tree with transaction - let transaction = db.start_transaction(); - - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"innertree", - Element::empty_tree(), - None, - Some(&transaction), - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, b"key1", b"innertree"].as_ref(), - b"key4", - element, - None, - Some(&transaction), - ) - .unwrap() - .expect("successful value insert"); + #[test] + fn test_aux_with_transaction() { + let grove_version = GroveVersion::latest(); + let element = Element::new_item(b"ayy".to_vec()); + let aux_value = b"ayylmao".to_vec(); + let key = b"key".to_vec(); + let db = make_test_grovedb(grove_version); + let transaction = db.start_transaction(); - // Retrieve subtree instance with transaction - let subtree_storage = db - .grove_db - .db - .get_transactional_storage_context( - [TEST_LEAF, b"key1", b"innertree"].as_ref().into(), + // Insert a regular data with aux data in the same transaction + db.insert( + [TEST_LEAF].as_ref(), + &key, + element, None, - &transaction, + Some(&transaction), + grove_version, ) - .unwrap(); - let subtree = Merk::open_layered_with_root_key(subtree_storage, Some(b"key4".to_vec()), false) .unwrap() - .expect("cannot open merk"); - let result_element = Element::get(&subtree, b"key4", true).unwrap().unwrap(); - assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); - - // Should be able to retrieve instances created before transaction - let subtree_storage = db - .grove_db - .db - .get_storage_context([TEST_LEAF, b"key1", b"key2"].as_ref().into(), None) - .unwrap(); - let subtree = Merk::open_layered_with_root_key(subtree_storage, Some(b"key3".to_vec()), false) + .expect("unable to insert"); + db.put_aux(&key, &aux_value, None, Some(&transaction)) + .unwrap() + .expect("unable to insert aux value"); + assert_eq!( + db.get_aux(&key, Some(&transaction)) + .unwrap() + .expect("unable to get aux value"), + Some(aux_value.clone()) + ); + // Cannot reach the data outside of transaction + assert_eq!( + db.get_aux(&key, None) + .unwrap() + .expect("unable to get aux value"), + None + ); + // And should be able to get data when committed + db.commit_transaction(transaction) + .unwrap() + .expect("unable to commit transaction"); + assert_eq!( + db.get_aux(&key, None) + .unwrap() + .expect("unable to get committed aux value"), + Some(aux_value) + ); + } + + #[test] + fn test_root_hash() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + // Check hashes are different if tree is edited + let old_root_hash = db.root_hash(None, grove_version).unwrap(); + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::new_item(b"ayy".to_vec()), + None, + None, + grove_version, + ) .unwrap() - .expect("cannot open merk"); - let result_element = Element::get(&subtree, b"key3", true).unwrap().unwrap(); - assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); -} + .expect("unable to insert an item"); + assert_ne!( + old_root_hash.unwrap(), + db.root_hash(None, grove_version).unwrap().unwrap() + ); -#[test] -fn test_get_full_query() { - let db = make_test_grovedb(); + // Check isolation + let transaction = db.start_transaction(); - // Insert a couple of subtrees first - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - // Insert some elements into subtree - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"key3", - Element::new_item(b"ayya".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"key4", - Element::new_item(b"ayyb".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"key5", - Element::new_item(b"ayyc".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - db.insert( - [TEST_LEAF, b"key2"].as_ref(), - b"key6", - Element::new_item(b"ayyd".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - - // Test_Leaf - // ___________________________ - // / \ - // key1 key2 - // ___________________________ - // | | - // key4 key6 - // / \ - // key3 key5 - // - - let path1 = vec![TEST_LEAF.to_vec(), b"key1".to_vec()]; - let path2 = vec![TEST_LEAF.to_vec(), b"key2".to_vec()]; - let mut query1 = Query::new(); - let mut query2 = Query::new(); - query1.insert_range_inclusive(b"key3".to_vec()..=b"key4".to_vec()); - query2.insert_key(b"key6".to_vec()); - - let path_query1 = PathQuery::new_unsized(path1, query1); - // should get back key3, key4 - let path_query2 = PathQuery::new_unsized(path2, query2); - // should get back key6 - - assert_eq!( - db.query_many_raw( - &[&path_query1, &path_query2], - true, - QueryKeyElementPairResultType, - None + db.insert( + [TEST_LEAF].as_ref(), + b"key2", + Element::new_item(b"ayy".to_vec()), + None, + Some(&transaction), + grove_version, ) .unwrap() - .expect("expected successful get_query") - .to_key_elements(), - vec![ - (b"key3".to_vec(), Element::new_item(b"ayya".to_vec())), - (b"key4".to_vec(), Element::new_item(b"ayyb".to_vec())), - (b"key6".to_vec(), Element::new_item(b"ayyd".to_vec())), - ] - ); -} + .expect("unable to insert an item"); + let root_hash_outside = db.root_hash(None, grove_version).unwrap().unwrap(); + assert_ne!( + db.root_hash(Some(&transaction), grove_version) + .unwrap() + .unwrap(), + root_hash_outside + ); -#[test] -fn test_aux_uses_separate_cf() { - let element = Element::new_item(b"ayy".to_vec()); - let db = make_test_grovedb(); - // Insert some nested subtrees - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 1 insert"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 2 insert"); - // Insert an element into subtree - db.insert( - [TEST_LEAF, b"key1", b"key2"].as_ref(), - b"key3", - element.clone(), - None, - None, - ) - .unwrap() - .expect("successful value insert"); + assert_eq!( + db.root_hash(None, grove_version).unwrap().unwrap(), + root_hash_outside + ); + db.commit_transaction(transaction).unwrap().unwrap(); + assert_ne!( + db.root_hash(None, grove_version).unwrap().unwrap(), + root_hash_outside + ); + } - db.put_aux(b"key1", b"a", None, None) - .unwrap() - .expect("cannot put aux"); - db.put_aux(b"key2", b"b", None, None) - .unwrap() - .expect("cannot put aux"); - db.put_aux(b"key3", b"c", None, None) + #[test] + fn test_get_non_existing_root_leaf() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + assert!(db + .get(EMPTY_PATH, b"ayy", None, grove_version) + .unwrap() + .is_err()); + } + + #[test] + fn test_check_subtree_exists_function() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + db.insert( + [TEST_LEAF].as_ref(), + b"key_scalar", + Element::new_item(b"ayy".to_vec()), + None, + None, + grove_version, + ) .unwrap() - .expect("cannot put aux"); - db.delete_aux(b"key3", None, None) + .expect("cannot insert item"); + db.insert( + [TEST_LEAF].as_ref(), + b"key_subtree", + Element::empty_tree(), + None, + None, + grove_version, + ) .unwrap() - .expect("cannot delete from aux"); + .expect("cannot insert item"); - assert_eq!( - db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) + // Empty tree path means root always exist + assert!(db + .check_subtree_exists_invalid_path(EMPTY_PATH, None, grove_version) .unwrap() - .expect("cannot get element"), - element - ); - assert_eq!( - db.get_aux(b"key1", None) - .unwrap() - .expect("cannot get from aux"), - Some(b"a".to_vec()) - ); - assert_eq!( - db.get_aux(b"key2", None) - .unwrap() - .expect("cannot get from aux"), - Some(b"b".to_vec()) - ); - assert_eq!( - db.get_aux(b"key3", None) - .unwrap() - .expect("cannot get from aux"), - None - ); - assert_eq!( - db.get_aux(b"key4", None) - .unwrap() - .expect("cannot get from aux"), - None - ); -} - -#[test] -fn test_aux_with_transaction() { - let element = Element::new_item(b"ayy".to_vec()); - let aux_value = b"ayylmao".to_vec(); - let key = b"key".to_vec(); - let db = make_test_grovedb(); - let transaction = db.start_transaction(); - - // Insert a regular data with aux data in the same transaction - db.insert( - [TEST_LEAF].as_ref(), - &key, - element, - None, - Some(&transaction), - ) - .unwrap() - .expect("unable to insert"); - db.put_aux(&key, &aux_value, None, Some(&transaction)) - .unwrap() - .expect("unable to insert aux value"); - assert_eq!( - db.get_aux(&key, Some(&transaction)) - .unwrap() - .expect("unable to get aux value"), - Some(aux_value.clone()) - ); - // Cannot reach the data outside of transaction - assert_eq!( - db.get_aux(&key, None) - .unwrap() - .expect("unable to get aux value"), - None - ); - // And should be able to get data when committed - db.commit_transaction(transaction) - .unwrap() - .expect("unable to commit transaction"); - assert_eq!( - db.get_aux(&key, None) - .unwrap() - .expect("unable to get committed aux value"), - Some(aux_value) - ); -} - -#[test] -fn test_root_hash() { - let db = make_test_grovedb(); - // Check hashes are different if tree is edited - let old_root_hash = db.root_hash(None).unwrap(); - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::new_item(b"ayy".to_vec()), - None, - None, - ) - .unwrap() - .expect("unable to insert an item"); - assert_ne!(old_root_hash.unwrap(), db.root_hash(None).unwrap().unwrap()); + .is_ok()); - // Check isolation - let transaction = db.start_transaction(); - - db.insert( - [TEST_LEAF].as_ref(), - b"key2", - Element::new_item(b"ayy".to_vec()), - None, - Some(&transaction), - ) - .unwrap() - .expect("unable to insert an item"); - let root_hash_outside = db.root_hash(None).unwrap().unwrap(); - assert_ne!( - db.root_hash(Some(&transaction)).unwrap().unwrap(), - root_hash_outside - ); - - assert_eq!(db.root_hash(None).unwrap().unwrap(), root_hash_outside); - db.commit_transaction(transaction).unwrap().unwrap(); - assert_ne!(db.root_hash(None).unwrap().unwrap(), root_hash_outside); -} - -#[test] -fn test_get_non_existing_root_leaf() { - let db = make_test_grovedb(); - assert!(matches!(db.get(EMPTY_PATH, b"ayy", None).unwrap(), Err(_))); -} + // TEST_LEAF should be a tree + assert!(db + .check_subtree_exists_invalid_path([TEST_LEAF].as_ref().into(), None, grove_version) + .unwrap() + .is_ok()); -#[test] -fn test_check_subtree_exists_function() { - let db = make_test_grovedb(); - db.insert( - [TEST_LEAF].as_ref(), - b"key_scalar", - Element::new_item(b"ayy".to_vec()), - None, - None, - ) - .unwrap() - .expect("cannot insert item"); - db.insert( - [TEST_LEAF].as_ref(), - b"key_subtree", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("cannot insert item"); + // TEST_LEAF.key_subtree should be a tree + assert!(db + .check_subtree_exists_invalid_path( + [TEST_LEAF, b"key_subtree"].as_ref().into(), + None, + grove_version + ) + .unwrap() + .is_ok()); - // Empty tree path means root always exist - assert!(db - .check_subtree_exists_invalid_path(EMPTY_PATH.into(), None) - .unwrap() - .is_ok()); + // TEST_LEAF.key_scalar should NOT be a tree + assert!(matches!( + db.check_subtree_exists_invalid_path( + [TEST_LEAF, b"key_scalar"].as_ref().into(), + None, + grove_version + ) + .unwrap(), + Err(Error::InvalidPath(_)) + )); + } - // TEST_LEAF should be a tree - assert!(db - .check_subtree_exists_invalid_path([TEST_LEAF].as_ref().into(), None) + #[test] + fn test_tree_value_exists_method_no_tx() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + // Test keys in non-root tree + db.insert( + [TEST_LEAF].as_ref(), + b"key", + Element::new_item(b"ayy".to_vec()), + None, + None, + grove_version, + ) .unwrap() - .is_ok()); + .expect("cannot insert item"); + assert!(db + .has_raw([TEST_LEAF].as_ref(), b"key", None, grove_version) + .unwrap() + .unwrap()); + assert!(!db + .has_raw([TEST_LEAF].as_ref(), b"badkey", None, grove_version) + .unwrap() + .unwrap()); - // TEST_LEAF.key_subtree should be a tree - assert!(db - .check_subtree_exists_invalid_path([TEST_LEAF, b"key_subtree"].as_ref().into(), None) + // Test keys for a root tree + db.insert( + EMPTY_PATH, + b"leaf", + Element::empty_tree(), + None, + None, + grove_version, + ) .unwrap() - .is_ok()); + .expect("cannot insert item"); - // TEST_LEAF.key_scalar should NOT be a tree - assert!(matches!( - db.check_subtree_exists_invalid_path([TEST_LEAF, b"key_scalar"].as_ref().into(), None) - .unwrap(), - Err(Error::InvalidPath(_)) - )); -} + assert!(db + .has_raw(EMPTY_PATH, b"leaf", None, grove_version) + .unwrap() + .unwrap()); + assert!(db + .has_raw(EMPTY_PATH, TEST_LEAF, None, grove_version) + .unwrap() + .unwrap()); + assert!(!db + .has_raw(EMPTY_PATH, b"badleaf", None, grove_version) + .unwrap() + .unwrap()); + } -#[test] -fn test_tree_value_exists_method_no_tx() { - let db = make_test_grovedb(); - // Test keys in non-root tree - db.insert( - [TEST_LEAF].as_ref(), - b"key", - Element::new_item(b"ayy".to_vec()), - None, - None, - ) - .unwrap() - .expect("cannot insert item"); - assert!(db - .has_raw([TEST_LEAF].as_ref(), b"key", None) - .unwrap() - .unwrap()); - assert!(!db - .has_raw([TEST_LEAF].as_ref(), b"badkey", None) + #[test] + fn test_tree_value_exists_method_tx() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + let tx = db.start_transaction(); + // Test keys in non-root tree + db.insert( + [TEST_LEAF].as_ref(), + b"key", + Element::new_item(b"ayy".to_vec()), + None, + Some(&tx), + grove_version, + ) .unwrap() - .unwrap()); + .expect("cannot insert item"); + assert!(db + .has_raw([TEST_LEAF].as_ref(), b"key", Some(&tx), grove_version) + .unwrap() + .unwrap()); + assert!(!db + .has_raw([TEST_LEAF].as_ref(), b"key", None, grove_version) + .unwrap() + .unwrap()); - // Test keys for a root tree - db.insert(EMPTY_PATH, b"leaf", Element::empty_tree(), None, None) + // Test keys for a root tree + db.insert( + EMPTY_PATH, + b"leaf", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) .unwrap() .expect("cannot insert item"); + assert!(db + .has_raw(EMPTY_PATH, b"leaf", Some(&tx), grove_version) + .unwrap() + .unwrap()); + assert!(!db + .has_raw(EMPTY_PATH, b"leaf", None, grove_version) + .unwrap() + .unwrap()); - assert!(db.has_raw(EMPTY_PATH, b"leaf", None).unwrap().unwrap()); - assert!(db.has_raw(EMPTY_PATH, TEST_LEAF, None).unwrap().unwrap()); - assert!(!db.has_raw(EMPTY_PATH, b"badleaf", None).unwrap().unwrap()); -} + db.commit_transaction(tx) + .unwrap() + .expect("cannot commit transaction"); + assert!(db + .has_raw([TEST_LEAF].as_ref(), b"key", None, grove_version) + .unwrap() + .unwrap()); + assert!(db + .has_raw(EMPTY_PATH, b"leaf", None, grove_version) + .unwrap() + .unwrap()); + } -#[test] -fn test_tree_value_exists_method_tx() { - let db = make_test_grovedb(); - let tx = db.start_transaction(); - // Test keys in non-root tree - db.insert( - [TEST_LEAF].as_ref(), - b"key", - Element::new_item(b"ayy".to_vec()), - None, - Some(&tx), - ) - .unwrap() - .expect("cannot insert item"); - assert!(db - .has_raw([TEST_LEAF].as_ref(), b"key", Some(&tx)) - .unwrap() - .unwrap()); - assert!(!db - .has_raw([TEST_LEAF].as_ref(), b"key", None) - .unwrap() - .unwrap()); + #[test] + fn test_storage_wipe() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + let _path = db._tmp_dir.path(); - // Test keys for a root tree - db.insert(EMPTY_PATH, b"leaf", Element::empty_tree(), None, Some(&tx)) + // Test keys in non-root tree + db.insert( + [TEST_LEAF].as_ref(), + b"key", + Element::new_item(b"ayy".to_vec()), + None, + None, + grove_version, + ) .unwrap() .expect("cannot insert item"); - assert!(db.has_raw(EMPTY_PATH, b"leaf", Some(&tx)).unwrap().unwrap()); - assert!(!db.has_raw(EMPTY_PATH, b"leaf", None).unwrap().unwrap()); - db.commit_transaction(tx) - .unwrap() - .expect("cannot commit transaction"); - assert!(db - .has_raw([TEST_LEAF].as_ref(), b"key", None) - .unwrap() - .unwrap()); - assert!(db.has_raw(EMPTY_PATH, b"leaf", None).unwrap().unwrap()); + // retrieve key before wipe + let elem = db + .get(&[TEST_LEAF], b"key", None, grove_version) + .unwrap() + .unwrap(); + assert_eq!(elem, Element::new_item(b"ayy".to_vec())); + + // wipe the database + db.grove_db.wipe().unwrap(); + + // retrieve key after wipe + let elem_result = db.get(&[TEST_LEAF], b"key", None, grove_version).unwrap(); + assert!(elem_result.is_err()); + assert!(matches!( + elem_result, + Err(Error::PathParentLayerNotFound(..)) + )); + } } diff --git a/grovedb/src/tests/query_tests.rs b/grovedb/src/tests/query_tests.rs index 0bb6a1f04..48c358c6b 100644 --- a/grovedb/src/tests/query_tests.rs +++ b/grovedb/src/tests/query_tests.rs @@ -1,2660 +1,3119 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Query tests - -use grovedb_merk::proofs::{query::QueryItem, Query}; -use rand::Rng; -use tempfile::TempDir; - -use crate::{ - batch::GroveDbOp, - query_result_type::{PathKeyOptionalElementTrio, QueryResultType}, - reference_path::ReferencePathType, - tests::{ - common::compare_result_sets, make_deep_tree, make_test_grovedb, TempGroveDb, - ANOTHER_TEST_LEAF, TEST_LEAF, - }, - Element, GroveDb, PathQuery, SizedQuery, -}; - -fn populate_tree_for_non_unique_range_subquery(db: &TempGroveDb) { - // Insert a couple of subtrees first - for i in 1985u32..2000 { - let i_vec = (i as u32).to_be_bytes().to_vec(); - db.insert( - [TEST_LEAF].as_ref(), - &i_vec, - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - // Insert element 0 - // Insert some elements into subtree - db.insert( - [TEST_LEAF, i_vec.as_slice()].as_ref(), - b"\0", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); +mod tests { + //! Query tests + + use grovedb_merk::proofs::{query::QueryItem, Query}; + use grovedb_version::version::GroveVersion; + use rand::random; + use tempfile::TempDir; + + use crate::{ + batch::GroveDbOp, + query_result_type::{ + PathKeyOptionalElementTrio, QueryResultElement::PathKeyElementTrioResultItem, + QueryResultElements, QueryResultType, + }, + reference_path::ReferencePathType, + tests::{ + common::compare_result_sets, make_deep_tree, make_test_grovedb, TempGroveDb, + ANOTHER_TEST_LEAF, TEST_LEAF, + }, + Element, GroveDb, PathQuery, SizedQuery, + }; - for j in 100u32..150 { - let mut j_vec = i_vec.clone(); - j_vec.append(&mut (j as u32).to_be_bytes().to_vec()); + fn populate_tree_for_non_unique_range_subquery(db: &TempGroveDb, grove_version: &GroveVersion) { + // Insert a couple of subtrees first + for i in 1985u32..2000 { + let i_vec = i.to_be_bytes().to_vec(); db.insert( - [TEST_LEAF, i_vec.as_slice(), b"\0"].as_ref(), - &j_vec.clone(), - Element::new_item(j_vec), + [TEST_LEAF].as_ref(), + &i_vec, + Element::empty_tree(), None, None, + grove_version, ) .unwrap() - .expect("successful value insert"); + .expect("successful subtree insert"); + // Insert element 0 + // Insert some elements into subtree + db.insert( + [TEST_LEAF, i_vec.as_slice()].as_ref(), + b"\0", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + + for j in 100u32..150 { + let mut j_vec = i_vec.clone(); + j_vec.append(&mut j.to_be_bytes().to_vec()); + db.insert( + [TEST_LEAF, i_vec.as_slice(), b"\0"].as_ref(), + &j_vec.clone(), + Element::new_item(j_vec), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + } } } -} -fn populate_tree_for_non_unique_double_range_subquery(db: &TempGroveDb) { - // Insert a couple of subtrees first - for i in 0u32..10 { - let i_vec = (i as u32).to_be_bytes().to_vec(); + fn populate_tree_for_non_unique_double_range_subquery( + db: &TempGroveDb, + grove_version: &GroveVersion, + ) { + // Insert a couple of subtrees first + for i in 0u32..10 { + let i_vec = i.to_be_bytes().to_vec(); + db.insert( + [TEST_LEAF].as_ref(), + &i_vec, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + // Insert element 0 + // Insert some elements into subtree + db.insert( + [TEST_LEAF, i_vec.as_slice()].as_ref(), + b"a", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + + for j in 25u32..50 { + let j_vec = j.to_be_bytes().to_vec(); + db.insert( + [TEST_LEAF, i_vec.as_slice(), b"a"].as_ref(), + &j_vec, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + + // Insert element 0 + // Insert some elements into subtree + db.insert( + [TEST_LEAF, i_vec.as_slice(), b"a", j_vec.as_slice()].as_ref(), + b"\0", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + + for k in 100u32..110 { + let k_vec = k.to_be_bytes().to_vec(); + db.insert( + [TEST_LEAF, i_vec.as_slice(), b"a", &j_vec, b"\0"].as_ref(), + &k_vec.clone(), + Element::new_item(k_vec), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + } + } + } + } + + fn populate_tree_by_reference_for_non_unique_range_subquery( + db: &TempGroveDb, + grove_version: &GroveVersion, + ) { + // This subtree will be holding values db.insert( [TEST_LEAF].as_ref(), - &i_vec, + b"\0", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); - // Insert element 0 - // Insert some elements into subtree + + // This subtree will be holding references db.insert( - [TEST_LEAF, i_vec.as_slice()].as_ref(), - b"a", + [TEST_LEAF].as_ref(), + b"1", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); - - for j in 25u32..50 { - let j_vec = (j as u32).to_be_bytes().to_vec(); + // Insert a couple of subtrees first + for i in 1985u32..2000 { + let i_vec = i.to_be_bytes().to_vec(); db.insert( - [TEST_LEAF, i_vec.as_slice(), b"a"].as_ref(), - &j_vec, + [TEST_LEAF, b"1"].as_ref(), + &i_vec, Element::empty_tree(), None, None, + grove_version, ) .unwrap() - .expect("successful value insert"); - + .expect("successful subtree insert"); // Insert element 0 // Insert some elements into subtree db.insert( - [TEST_LEAF, i_vec.as_slice(), b"a", j_vec.as_slice()].as_ref(), + [TEST_LEAF, b"1", i_vec.as_slice()].as_ref(), b"\0", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); - for k in 100u32..110 { - let k_vec = (k as u32).to_be_bytes().to_vec(); + for j in 100u32..150 { + let random_key = random::<[u8; 32]>(); + let mut j_vec = i_vec.clone(); + j_vec.append(&mut j.to_be_bytes().to_vec()); + + // We should insert every item to the tree holding items + db.insert( + [TEST_LEAF, b"\0"].as_ref(), + &random_key, + Element::new_item(j_vec.clone()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + db.insert( - [TEST_LEAF, i_vec.as_slice(), b"a", &j_vec, b"\0"].as_ref(), - &k_vec.clone(), - Element::new_item(k_vec), + [TEST_LEAF, b"1", i_vec.clone().as_slice(), b"\0"].as_ref(), + &random_key, + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + b"\0".to_vec(), + random_key.to_vec(), + ])), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); } } } -} -fn populate_tree_by_reference_for_non_unique_range_subquery(db: &TempGroveDb) { - // This subtree will be holding values - db.insert( - [TEST_LEAF].as_ref(), - b"\0", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - // This subtree will be holding references - db.insert( - [TEST_LEAF].as_ref(), - b"1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - // Insert a couple of subtrees first - for i in 1985u32..2000 { - let i_vec = (i as u32).to_be_bytes().to_vec(); + fn populate_tree_for_unique_range_subquery(db: &TempGroveDb, grove_version: &GroveVersion) { + // Insert a couple of subtrees first + for i in 1985u32..2000 { + let i_vec = i.to_be_bytes().to_vec(); + db.insert( + [TEST_LEAF].as_ref(), + &i_vec, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + + db.insert( + [TEST_LEAF, &i_vec.clone()].as_ref(), + b"\0", + Element::new_item(i_vec), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + } + } + + fn populate_tree_by_reference_for_unique_range_subquery( + db: &TempGroveDb, + grove_version: &GroveVersion, + ) { + // This subtree will be holding values db.insert( - [TEST_LEAF, b"1"].as_ref(), - &i_vec, + [TEST_LEAF].as_ref(), + b"\0", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); - // Insert element 0 - // Insert some elements into subtree + + // This subtree will be holding references db.insert( - [TEST_LEAF, b"1", i_vec.as_slice()].as_ref(), - b"\0", + [TEST_LEAF].as_ref(), + b"1", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); - for j in 100u32..150 { - let random_key = rand::thread_rng().gen::<[u8; 32]>(); - let mut j_vec = i_vec.clone(); - j_vec.append(&mut (j as u32).to_be_bytes().to_vec()); + for i in 1985u32..2000 { + let i_vec = i.to_be_bytes().to_vec(); + db.insert( + [TEST_LEAF, b"1"].as_ref(), + &i_vec, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); // We should insert every item to the tree holding items db.insert( [TEST_LEAF, b"\0"].as_ref(), - &random_key, - Element::new_item(j_vec.clone()), + &i_vec, + Element::new_item(i_vec.clone()), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); + // We should insert a reference to the item db.insert( - [TEST_LEAF, b"1", i_vec.clone().as_slice(), b"\0"].as_ref(), - &random_key, + [TEST_LEAF, b"1", i_vec.clone().as_slice()].as_ref(), + b"\0", Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ TEST_LEAF.to_vec(), b"\0".to_vec(), - random_key.to_vec(), + i_vec.clone(), ])), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); } } -} -fn populate_tree_for_unique_range_subquery(db: &TempGroveDb) { - // Insert a couple of subtrees first - for i in 1985u32..2000 { - let i_vec = (i as u32).to_be_bytes().to_vec(); + fn populate_tree_for_unique_range_subquery_with_non_unique_null_values( + db: &mut TempGroveDb, + grove_version: &GroveVersion, + ) { + populate_tree_for_unique_range_subquery(db, grove_version); db.insert( [TEST_LEAF].as_ref(), - &i_vec, + &[], Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, &i_vec.clone()].as_ref(), + [TEST_LEAF, &[]].as_ref(), b"\0", - Element::new_item(i_vec), + Element::empty_tree(), None, None, + grove_version, ) .unwrap() - .expect("successful value insert"); + .expect("successful subtree insert"); + // Insert a couple of subtrees first + for i in 100u32..200 { + let i_vec = i.to_be_bytes().to_vec(); + db.insert( + [TEST_LEAF, &[], b"\0"].as_ref(), + &i_vec, + Element::new_item(i_vec.clone()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful value insert"); + } } -} -fn populate_tree_by_reference_for_unique_range_subquery(db: &TempGroveDb) { - // This subtree will be holding values - db.insert( - [TEST_LEAF].as_ref(), - b"\0", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - // This subtree will be holding references - db.insert( - [TEST_LEAF].as_ref(), - b"1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - for i in 1985u32..2000 { - let i_vec = (i as u32).to_be_bytes().to_vec(); + fn populate_tree_for_uneven_keys(db: &TempGroveDb, grove_version: &GroveVersion) { db.insert( - [TEST_LEAF, b"1"].as_ref(), - &i_vec, - Element::empty_tree(), + [TEST_LEAF].as_ref(), + "b".as_ref(), + Element::new_item(1u8.to_be_bytes().to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); - // We should insert every item to the tree holding items db.insert( - [TEST_LEAF, b"\0"].as_ref(), - &i_vec, - Element::new_item(i_vec.clone()), + [TEST_LEAF].as_ref(), + "ab".as_ref(), + Element::new_item(2u8.to_be_bytes().to_vec()), None, None, + grove_version, ) .unwrap() - .expect("successful value insert"); + .expect("successful subtree insert"); - // We should insert a reference to the item db.insert( - [TEST_LEAF, b"1", i_vec.clone().as_slice()].as_ref(), - b"\0", - Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - b"\0".to_vec(), - i_vec.clone(), - ])), + [TEST_LEAF].as_ref(), + "x".as_ref(), + Element::new_item(3u8.to_be_bytes().to_vec()), None, None, + grove_version, ) .unwrap() - .expect("successful value insert"); - } -} + .expect("successful subtree insert"); -fn populate_tree_for_unique_range_subquery_with_non_unique_null_values(db: &mut TempGroveDb) { - populate_tree_for_unique_range_subquery(db); - db.insert([TEST_LEAF].as_ref(), &[], Element::empty_tree(), None, None) + db.insert( + [TEST_LEAF].as_ref(), + &[3; 32], + Element::new_item(4u8.to_be_bytes().to_vec()), + None, + None, + grove_version, + ) .unwrap() .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, &[]].as_ref(), - b"\0", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - // Insert a couple of subtrees first - for i in 100u32..200 { - let i_vec = (i as u32).to_be_bytes().to_vec(); + db.insert( - [TEST_LEAF, &[], b"\0"].as_ref(), - &i_vec, - Element::new_item(i_vec.clone()), + [TEST_LEAF].as_ref(), + "k".as_ref(), + Element::new_item(5u8.to_be_bytes().to_vec()), None, None, + grove_version, ) .unwrap() - .expect("successful value insert"); + .expect("successful subtree insert"); } -} -#[test] -fn test_get_range_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + #[test] + fn test_get_correct_order() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_uneven_keys(&db, grove_version); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range(1988_u32.to_be_bytes().to_vec()..1992_u32.to_be_bytes().to_vec()); + let path = vec![TEST_LEAF.to_vec()]; + let query = Query::new_range_full(); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + let path_query = PathQuery::new_unsized(path, query.clone()); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); - - let path_query = PathQuery::new_unsized(path, query.clone()); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + assert_eq!(elements, vec![vec![4], vec![2], vec![1], vec![5], vec![3]]); + } - assert_eq!(elements.len(), 200); + #[test] + fn test_get_range_query_with_non_unique_subquery() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); - let mut first_value = 1988_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range(1988_u32.to_be_bytes().to_vec()..1992_u32.to_be_bytes().to_vec()); - let mut last_value = 1991_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 200); - compare_result_sets(&elements, &result_set); -} + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); -#[test] -fn test_get_range_query_with_unique_subquery() { - let mut db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&mut db); + let path_query = PathQuery::new_unsized(path, query.clone()); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range(1988_u32.to_be_bytes().to_vec()..1992_u32.to_be_bytes().to_vec()); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - let subquery_key: Vec = b"\0".to_vec(); + assert_eq!(elements.len(), 200); - query.set_subquery_key(subquery_key); + let mut first_value = 1988_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let mut last_value = 1991_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 200); + compare_result_sets(&elements, &result_set); + } - assert_eq!(elements.len(), 4); + #[test] + fn test_get_range_query_with_unique_subquery() { + let grove_version = GroveVersion::latest(); + let mut db = make_test_grovedb(grove_version); + populate_tree_for_unique_range_subquery(&mut db, grove_version); - let first_value = 1988_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range(1988_u32.to_be_bytes().to_vec()..1992_u32.to_be_bytes().to_vec()); - let last_value = 1991_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + let subquery_key: Vec = b"\0".to_vec(); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 4); - compare_result_sets(&elements, &result_set); -} + query.set_subquery_key(subquery_key); -#[test] -fn test_get_range_query_with_unique_subquery_on_references() { - let db = make_test_grovedb(); - populate_tree_by_reference_for_unique_range_subquery(&db); + let path_query = PathQuery::new_unsized(path, query.clone()); - let path = vec![TEST_LEAF.to_vec(), b"1".to_vec()]; - let mut query = Query::new(); - query.insert_range(1988_u32.to_be_bytes().to_vec()..1992_u32.to_be_bytes().to_vec()); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - let subquery_key: Vec = b"\0".to_vec(); + assert_eq!(elements.len(), 4); - query.set_subquery_key(subquery_key); + let first_value = 1988_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let last_value = 1991_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 4); + compare_result_sets(&elements, &result_set); + } - assert_eq!(elements.len(), 4); + #[test] + fn test_get_range_query_with_unique_subquery_on_references() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_by_reference_for_unique_range_subquery(&db, grove_version); - let first_value = 1988_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + let path = vec![TEST_LEAF.to_vec(), b"1".to_vec()]; + let mut query = Query::new(); + query.insert_range(1988_u32.to_be_bytes().to_vec()..1992_u32.to_be_bytes().to_vec()); - let last_value = 1991_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + let subquery_key: Vec = b"\0".to_vec(); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 4); - compare_result_sets(&elements, &result_set); -} + query.set_subquery_key(subquery_key); -#[test] -fn test_get_range_query_with_unique_subquery_with_non_unique_null_values() { - let mut db = make_test_grovedb(); - populate_tree_for_unique_range_subquery_with_non_unique_null_values(&mut db); + let path_query = PathQuery::new_unsized(path, query.clone()); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_all(); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - let subquery_key: Vec = b"\0".to_vec(); + assert_eq!(elements.len(), 4); - query.set_subquery_key(subquery_key); + let first_value = 1988_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - let mut subquery = Query::new(); - subquery.insert_all(); + let last_value = 1991_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - query.add_conditional_subquery( - QueryItem::Key(b"".to_vec()), - Some(vec![b"\0".to_vec()]), - Some(subquery), - ); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 4); + compare_result_sets(&elements, &result_set); + } - let path_query = PathQuery::new_unsized(path, query.clone()); + #[test] + fn test_get_range_query_with_unique_subquery_with_non_unique_null_values() { + let grove_version = GroveVersion::latest(); + let mut db = make_test_grovedb(grove_version); + populate_tree_for_unique_range_subquery_with_non_unique_null_values(&mut db, grove_version); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_all(); - assert_eq!(elements.len(), 115); + let subquery_key: Vec = b"\0".to_vec(); - let first_value = 100_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + query.set_subquery_key(subquery_key); - let last_value = 1999_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + let mut subquery = Query::new(); + subquery.insert_all(); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 115); - compare_result_sets(&elements, &result_set); -} + query.add_conditional_subquery( + QueryItem::Key(b"".to_vec()), + Some(vec![b"\0".to_vec()]), + Some(subquery), + ); -#[test] -fn test_get_range_query_with_unique_subquery_ignore_non_unique_null_values() { - let mut db = make_test_grovedb(); - populate_tree_for_unique_range_subquery_with_non_unique_null_values(&mut db); + let path_query = PathQuery::new_unsized(path, query.clone()); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_all(); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - let subquery_key: Vec = b"\0".to_vec(); + assert_eq!(elements.len(), 115); - query.set_subquery_key(subquery_key); + let first_value = 100_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - let subquery = Query::new(); + let last_value = 1999_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - query.add_conditional_subquery( - QueryItem::Key(b"".to_vec()), - Some(vec![b"\0".to_vec()]), - Some(subquery), - ); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 115); + compare_result_sets(&elements, &result_set); + } - let path_query = PathQuery::new_unsized(path, query.clone()); + #[test] + fn test_get_range_query_with_unique_subquery_ignore_non_unique_null_values() { + let grove_version = GroveVersion::latest(); + let mut db = make_test_grovedb(grove_version); + populate_tree_for_unique_range_subquery_with_non_unique_null_values(&mut db, grove_version); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_all(); - assert_eq!(elements.len(), 15); + let subquery_key: Vec = b"\0".to_vec(); - let first_value = 1985_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + query.set_subquery_key(subquery_key); - let last_value = 1999_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + let subquery = Query::new(); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 15); - compare_result_sets(&elements, &result_set); -} + // This conditional subquery expresses that we do not want to get values in "" + // tree + query.add_conditional_subquery( + QueryItem::Key(b"".to_vec()), + Some(vec![b"\0".to_vec()]), // We want to go into 0, but we don't want to get anything + Some(subquery), + ); -#[test] -fn test_get_range_inclusive_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let path_query = PathQuery::new_unsized(path, query.clone()); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_inclusive(1988_u32.to_be_bytes().to_vec()..=1995_u32.to_be_bytes().to_vec()); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + assert_eq!(elements.len(), 15); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + let first_value = 1985_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let last_value = 1999_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 15); + compare_result_sets(&elements, &result_set); + } - assert_eq!(elements.len(), 400); + #[test] + fn test_get_range_inclusive_query_with_non_unique_subquery() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); - let mut first_value = 1988_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_inclusive( + 1988_u32.to_be_bytes().to_vec()..=1995_u32.to_be_bytes().to_vec(), + ); - let mut last_value = 1995_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 400); - compare_result_sets(&elements, &result_set); -} + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); -#[test] -fn test_get_range_inclusive_query_with_non_unique_subquery_on_references() { - let db = make_test_grovedb(); - populate_tree_by_reference_for_non_unique_range_subquery(&db); + let path_query = PathQuery::new_unsized(path, query.clone()); - let path = vec![TEST_LEAF.to_vec(), b"1".to_vec()]; - let mut query = Query::new(); - query.insert_range_inclusive(1988_u32.to_be_bytes().to_vec()..=1995_u32.to_be_bytes().to_vec()); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + assert_eq!(elements.len(), 400); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + let mut first_value = 1988_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let mut last_value = 1995_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); - - assert_eq!(elements.len(), 400); - - let mut first_value = 1988_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - // using contains as the elements get stored at random key locations - // hence impossible to predict the final location - // but must exist - assert!(elements.contains(&first_value)); - - let mut last_value = 1995_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert!(elements.contains(&last_value)); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 400); - compare_result_sets(&elements, &result_set); -} + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 400); + compare_result_sets(&elements, &result_set); + } -#[test] -fn test_get_range_inclusive_query_with_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&db); + #[test] + fn test_get_range_inclusive_query_with_non_unique_subquery_on_references() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_by_reference_for_non_unique_range_subquery(&db, grove_version); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_inclusive(1988_u32.to_be_bytes().to_vec()..=1995_u32.to_be_bytes().to_vec()); + let path = vec![TEST_LEAF.to_vec(), b"1".to_vec()]; + let mut query = Query::new(); + query.insert_range_inclusive( + 1988_u32.to_be_bytes().to_vec()..=1995_u32.to_be_bytes().to_vec(), + ); - let subquery_key: Vec = b"\0".to_vec(); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - query.set_subquery_key(subquery_key); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let path_query = PathQuery::new_unsized(path, query.clone()); + let path_query = PathQuery::new_unsized(path, query.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - assert_eq!(elements.len(), 8); + assert_eq!(elements.len(), 400); - let first_value = 1988_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + let mut first_value = 1988_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + // using contains as the elements get stored at random key locations + // hence impossible to predict the final location + // but must exist + assert!(elements.contains(&first_value)); - let last_value = 1995_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + let mut last_value = 1995_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert!(elements.contains(&last_value)); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 8); - compare_result_sets(&elements, &result_set); -} + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 400); + compare_result_sets(&elements, &result_set); + } -#[test] -fn test_get_range_from_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + #[test] + fn test_get_range_inclusive_query_with_unique_subquery() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_unique_range_subquery(&db, grove_version); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_from(1995_u32.to_be_bytes().to_vec()..); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_inclusive( + 1988_u32.to_be_bytes().to_vec()..=1995_u32.to_be_bytes().to_vec(), + ); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + let subquery_key: Vec = b"\0".to_vec(); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + query.set_subquery_key(subquery_key); - let path_query = PathQuery::new_unsized(path, query.clone()); + let path_query = PathQuery::new_unsized(path, query.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - assert_eq!(elements.len(), 250); + assert_eq!(elements.len(), 8); - let mut first_value = 1995_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let first_value = 1988_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - let mut last_value = 1999_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + let last_value = 1995_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 250); - compare_result_sets(&elements, &result_set); -} + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 8); + compare_result_sets(&elements, &result_set); + } -#[test] -fn test_get_range_from_query_with_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&db); + #[test] + fn test_get_range_from_query_with_non_unique_subquery() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_from(1995_u32.to_be_bytes().to_vec()..); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_from(1995_u32.to_be_bytes().to_vec()..); - let subquery_key: Vec = b"\0".to_vec(); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - query.set_subquery_key(subquery_key); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let path_query = PathQuery::new_unsized(path, query.clone()); + let path_query = PathQuery::new_unsized(path, query.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - assert_eq!(elements.len(), 5); + assert_eq!(elements.len(), 250); - let first_value = 1995_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + let mut first_value = 1995_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - let last_value = 1999_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + let mut last_value = 1999_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); - compare_result_sets(&elements, &result_set); -} + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 250); + compare_result_sets(&elements, &result_set); + } -#[test] -fn test_get_range_to_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + #[test] + fn test_get_range_from_query_with_unique_subquery() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_unique_range_subquery(&db, grove_version); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_to(..1995_u32.to_be_bytes().to_vec()); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_from(1995_u32.to_be_bytes().to_vec()..); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + let subquery_key: Vec = b"\0".to_vec(); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + query.set_subquery_key(subquery_key); - let path_query = PathQuery::new_unsized(path, query.clone()); + let path_query = PathQuery::new_unsized(path, query.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - assert_eq!(elements.len(), 500); + assert_eq!(elements.len(), 5); - let mut first_value = 1985_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let first_value = 1995_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - let mut last_value = 1994_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + let last_value = 1999_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 500); - compare_result_sets(&elements, &result_set); -} + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 5); + compare_result_sets(&elements, &result_set); + } -#[test] -fn test_get_range_to_query_with_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&db); + #[test] + fn test_get_range_to_query_with_non_unique_subquery() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_to(..1995_u32.to_be_bytes().to_vec()); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_to(..1995_u32.to_be_bytes().to_vec()); - let subquery_key: Vec = b"\0".to_vec(); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - query.set_subquery_key(subquery_key); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let path_query = PathQuery::new_unsized(path, query.clone()); + let path_query = PathQuery::new_unsized(path, query.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - assert_eq!(elements.len(), 10); + assert_eq!(elements.len(), 500); - let first_value = 1985_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + let mut first_value = 1985_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - let last_value = 1994_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + let mut last_value = 1994_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 10); - compare_result_sets(&elements, &result_set); -} + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 500); + compare_result_sets(&elements, &result_set); + } -#[test] -fn test_get_range_to_inclusive_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + #[test] + fn test_get_range_to_query_with_unique_subquery() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_unique_range_subquery(&db, grove_version); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_to_inclusive(..=1995_u32.to_be_bytes().to_vec()); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_to(..1995_u32.to_be_bytes().to_vec()); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + let subquery_key: Vec = b"\0".to_vec(); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + query.set_subquery_key(subquery_key); - let path_query = PathQuery::new_unsized(path, query.clone()); + let path_query = PathQuery::new_unsized(path, query.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - assert_eq!(elements.len(), 550); + assert_eq!(elements.len(), 10); - let mut first_value = 1985_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let first_value = 1985_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - let mut last_value = 1995_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + let last_value = 1994_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 550); - compare_result_sets(&elements, &result_set); -} + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 10); + compare_result_sets(&elements, &result_set); + } -#[test] -fn test_get_range_to_inclusive_query_with_non_unique_subquery_and_key_out_of_bounds() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + #[test] + fn test_get_range_to_inclusive_query_with_non_unique_subquery() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new_with_direction(false); - query.insert_range_to_inclusive(..=5000_u32.to_be_bytes().to_vec()); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_to_inclusive(..=1995_u32.to_be_bytes().to_vec()); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new_with_direction(false); - subquery.insert_all(); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let path_query = PathQuery::new_unsized(path, query.clone()); + let path_query = PathQuery::new_unsized(path, query.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - assert_eq!(elements.len(), 750); + assert_eq!(elements.len(), 550); - let mut first_value = 1999_u32.to_be_bytes().to_vec(); - first_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let mut first_value = 1985_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - let mut last_value = 1985_u32.to_be_bytes().to_vec(); - last_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + let mut last_value = 1995_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 750); - compare_result_sets(&elements, &result_set); -} + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 550); + compare_result_sets(&elements, &result_set); + } -#[test] -fn test_get_range_to_inclusive_query_with_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&db); + #[test] + fn test_get_range_to_inclusive_query_with_non_unique_subquery_and_key_out_of_bounds() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_to_inclusive(..=1995_u32.to_be_bytes().to_vec()); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new_with_direction(false); + query.insert_range_to_inclusive(..=5000_u32.to_be_bytes().to_vec()); - let subquery_key: Vec = b"\0".to_vec(); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new_with_direction(false); + subquery.insert_all(); - query.set_subquery_key(subquery_key); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let path_query = PathQuery::new_unsized(path, query.clone()); + let path_query = PathQuery::new_unsized(path, query.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - assert_eq!(elements.len(), 11); + assert_eq!(elements.len(), 750); - let first_value = 1985_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + let mut first_value = 1999_u32.to_be_bytes().to_vec(); + first_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - let last_value = 1995_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + let mut last_value = 1985_u32.to_be_bytes().to_vec(); + last_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 11); - compare_result_sets(&elements, &result_set); -} + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 750); + compare_result_sets(&elements, &result_set); + } -#[test] -fn test_get_range_after_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + #[test] + fn test_get_range_to_inclusive_query_with_unique_subquery() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_unique_range_subquery(&db, grove_version); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_after(1995_u32.to_be_bytes().to_vec()..); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_to_inclusive(..=1995_u32.to_be_bytes().to_vec()); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + let subquery_key: Vec = b"\0".to_vec(); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + query.set_subquery_key(subquery_key); - let path_query = PathQuery::new_unsized(path, query.clone()); + let path_query = PathQuery::new_unsized(path, query.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - assert_eq!(elements.len(), 200); + assert_eq!(elements.len(), 11); - let mut first_value = 1996_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let first_value = 1985_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - let mut last_value = 1999_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + let last_value = 1995_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 200); - compare_result_sets(&elements, &result_set); -} + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 11); + compare_result_sets(&elements, &result_set); + } -#[test] -fn test_get_range_after_to_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + #[test] + fn test_get_range_after_query_with_non_unique_subquery() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_after_to(1995_u32.to_be_bytes().to_vec()..1997_u32.to_be_bytes().to_vec()); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_after(1995_u32.to_be_bytes().to_vec()..); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let path_query = PathQuery::new_unsized(path, query.clone()); + let path_query = PathQuery::new_unsized(path, query.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - assert_eq!(elements.len(), 50); + assert_eq!(elements.len(), 200); - let mut first_value = 1996_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let mut first_value = 1996_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - let mut last_value = 1996_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + let mut last_value = 1999_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 50); - compare_result_sets(&elements, &result_set); -} + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 200); + compare_result_sets(&elements, &result_set); + } -#[test] -fn test_get_range_after_to_inclusive_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + #[test] + fn test_get_range_after_to_query_with_non_unique_subquery() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_after_to_inclusive( - 1995_u32.to_be_bytes().to_vec()..=1997_u32.to_be_bytes().to_vec(), - ); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_after_to( + 1995_u32.to_be_bytes().to_vec()..1997_u32.to_be_bytes().to_vec(), + ); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let path_query = PathQuery::new_unsized(path, query.clone()); + let path_query = PathQuery::new_unsized(path, query.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - assert_eq!(elements.len(), 100); + assert_eq!(elements.len(), 50); - let mut first_value = 1996_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let mut first_value = 1996_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - let mut last_value = 1997_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + let mut last_value = 1996_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 100); - compare_result_sets(&elements, &result_set); -} + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 50); + compare_result_sets(&elements, &result_set); + } -#[test] -fn test_get_range_after_to_inclusive_query_with_non_unique_subquery_and_key_out_of_bounds() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + #[test] + fn test_get_range_after_to_inclusive_query_with_non_unique_subquery() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new_with_direction(false); - query.insert_range_after_to_inclusive( - 1995_u32.to_be_bytes().to_vec()..=5000_u32.to_be_bytes().to_vec(), - ); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_after_to_inclusive( + 1995_u32.to_be_bytes().to_vec()..=1997_u32.to_be_bytes().to_vec(), + ); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new_with_direction(false); - subquery.insert_all(); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let path_query = PathQuery::new_unsized(path, query.clone()); + let path_query = PathQuery::new_unsized(path, query.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - assert_eq!(elements.len(), 200); + assert_eq!(elements.len(), 100); - let mut first_value = 1999_u32.to_be_bytes().to_vec(); - first_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let mut first_value = 1996_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - let mut last_value = 1996_u32.to_be_bytes().to_vec(); - last_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + let mut last_value = 1997_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 200); - compare_result_sets(&elements, &result_set); -} + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 100); + compare_result_sets(&elements, &result_set); + } -#[test] -fn test_get_range_inclusive_query_with_double_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_double_range_subquery(&db); + #[test] + fn test_get_range_after_to_inclusive_query_with_non_unique_subquery_and_key_out_of_bounds() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_inclusive((3u32).to_be_bytes().to_vec()..=(4u32).to_be_bytes().to_vec()); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new_with_direction(false); + query.insert_range_after_to_inclusive( + 1995_u32.to_be_bytes().to_vec()..=5000_u32.to_be_bytes().to_vec(), + ); - query.set_subquery_key(b"a".to_vec()); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new_with_direction(false); + subquery.insert_all(); - let mut subquery = Query::new(); - subquery - .insert_range_inclusive((29u32).to_be_bytes().to_vec()..=(31u32).to_be_bytes().to_vec()); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - subquery.set_subquery_key(b"\0".to_vec()); + let path_query = PathQuery::new_unsized(path, query.clone()); - let mut subsubquery = Query::new(); - subsubquery.insert_all(); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - subquery.set_subquery(subsubquery); + assert_eq!(elements.len(), 200); - query.set_subquery(subquery); + let mut first_value = 1999_u32.to_be_bytes().to_vec(); + first_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let mut last_value = 1996_u32.to_be_bytes().to_vec(); + last_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 200); + compare_result_sets(&elements, &result_set); + } - assert_eq!(elements.len(), 60); + #[test] + fn test_get_range_inclusive_query_with_double_non_unique_subquery() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_double_range_subquery(&db, grove_version); - let first_value = 100_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_inclusive(3u32.to_be_bytes().to_vec()..=4u32.to_be_bytes().to_vec()); - let last_value = 109_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(b"a".to_vec()); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 60); - compare_result_sets(&elements, &result_set); -} + let mut subquery = Query::new(); + subquery + .insert_range_inclusive(29u32.to_be_bytes().to_vec()..=31u32.to_be_bytes().to_vec()); -#[test] -fn test_get_range_query_with_limit_and_offset() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + subquery.set_subquery_key(b"\0".to_vec()); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new_with_direction(true); - query.insert_range(1990_u32.to_be_bytes().to_vec()..1995_u32.to_be_bytes().to_vec()); + let mut subsubquery = Query::new(); + subsubquery.insert_all(); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + subquery.set_subquery(subsubquery); - query.set_subquery_key(subquery_key.clone()); - query.set_subquery(subquery.clone()); + query.set_subquery(subquery); - // Baseline query: no offset or limit + left to right - let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), None, None)); + let path_query = PathQuery::new_unsized(path, query.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - assert_eq!(elements.len(), 250); + assert_eq!(elements.len(), 60); - let mut first_value = 1990_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let first_value = 100_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - let mut last_value = 1994_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + let last_value = 109_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 250); - compare_result_sets(&elements, &result_set); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 60); + compare_result_sets(&elements, &result_set); + } - subquery.left_to_right = false; + #[test] + fn test_get_range_query_with_limit_and_offset() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); - query.set_subquery_key(subquery_key.clone()); - query.set_subquery(subquery.clone()); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new_with_direction(true); + query.insert_range(1990_u32.to_be_bytes().to_vec()..1995_u32.to_be_bytes().to_vec()); - query.left_to_right = false; + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - // Baseline query: no offset or limit + right to left - let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), None, None)); + query.set_subquery_key(subquery_key.clone()); + query.set_subquery(subquery.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + // Baseline query: no offset or limit + left to right + let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), None, None)); - assert_eq!(elements.len(), 250); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - let mut first_value = 1994_u32.to_be_bytes().to_vec(); - first_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + assert_eq!(elements.len(), 250); - let mut last_value = 1990_u32.to_be_bytes().to_vec(); - last_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + let mut first_value = 1990_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 250); - compare_result_sets(&elements, &result_set); + let mut last_value = 1994_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - subquery.left_to_right = true; + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 250); + compare_result_sets(&elements, &result_set); - query.set_subquery_key(subquery_key.clone()); - query.set_subquery(subquery.clone()); + subquery.left_to_right = false; - query.left_to_right = true; + query.set_subquery_key(subquery_key.clone()); + query.set_subquery(subquery.clone()); - // Limit the result to just 55 elements - let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), Some(55), None)); + query.left_to_right = false; - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + // Baseline query: no offset or limit + right to left + let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), None, None)); - assert_eq!(elements.len(), 55); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - let mut first_value = 1990_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + assert_eq!(elements.len(), 250); - // Second tree 5 element [100, 101, 102, 103, 104] - let mut last_value = 1991_u32.to_be_bytes().to_vec(); - last_value.append(&mut 104_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + let mut first_value = 1994_u32.to_be_bytes().to_vec(); + first_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 55); - compare_result_sets(&elements, &result_set); + let mut last_value = 1990_u32.to_be_bytes().to_vec(); + last_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - query.set_subquery_key(subquery_key.clone()); - query.set_subquery(subquery.clone()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 250); + compare_result_sets(&elements, &result_set); - // Limit the result set to 60 elements but skip the first 14 elements - let path_query = PathQuery::new( - path.clone(), - SizedQuery::new(query.clone(), Some(60), Some(14)), - ); + subquery.left_to_right = true; - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); - - assert_eq!(elements.len(), 60); - - // Skips the first 14 elements, starts from the 15th - // i.e skips [100 - 113] starts from 114 - let mut first_value = 1990_u32.to_be_bytes().to_vec(); - first_value.append(&mut 114_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); - - // Continues for 60 iterations - // Takes 36 elements from the first tree (50 - 14) - // takes the remaining 24 from the second three (60 - 36) - let mut last_value = 1991_u32.to_be_bytes().to_vec(); - last_value.append(&mut 123_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 60); - compare_result_sets(&elements, &result_set); - - query.set_subquery_key(subquery_key.clone()); - query.set_subquery(subquery.clone()); - - query.left_to_right = false; - - // Limit the result set to 60 element but skip first 10 elements (this time - // right to left) - let path_query = PathQuery::new( - path.clone(), - SizedQuery::new(query.clone(), Some(60), Some(10)), - ); - - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + query.set_subquery_key(subquery_key.clone()); + query.set_subquery(subquery.clone()); - assert_eq!(elements.len(), 60); + query.left_to_right = true; - // Skips the first 10 elements from the back - // last tree and starts from the 11th before the end - // Underlying subquery is ascending - let mut first_value = 1994_u32.to_be_bytes().to_vec(); - first_value.append(&mut 110_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + // Limit the result to just 55 elements + let path_query = + PathQuery::new(path.clone(), SizedQuery::new(query.clone(), Some(55), None)); - let mut last_value = 1993_u32.to_be_bytes().to_vec(); - last_value.append(&mut 119_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 60); - compare_result_sets(&elements, &result_set); + assert_eq!(elements.len(), 55); - query.set_subquery_key(subquery_key.clone()); - query.set_subquery(subquery.clone()); + let mut first_value = 1990_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - query.left_to_right = true; + // Second tree 5 element [100, 101, 102, 103, 104] + let mut last_value = 1991_u32.to_be_bytes().to_vec(); + last_value.append(&mut 104_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - // Offset bigger than elements in range - let path_query = PathQuery::new( - path.clone(), - SizedQuery::new(query.clone(), None, Some(5000)), - ); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 55); + compare_result_sets(&elements, &result_set); + + query.set_subquery_key(subquery_key.clone()); + query.set_subquery(subquery.clone()); + + // Limit the result set to 60 elements but skip the first 14 elements + let path_query = PathQuery::new( + path.clone(), + SizedQuery::new(query.clone(), Some(60), Some(14)), + ); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - assert_eq!(elements.len(), 0); + assert_eq!(elements.len(), 60); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); + // Skips the first 14 elements, starts from the 15th + // i.e. skips [100 - 113] starts from 114 + let mut first_value = 1990_u32.to_be_bytes().to_vec(); + first_value.append(&mut 114_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key.clone()); - query.set_subquery(subquery); + // Continues for 60 iterations + // Takes 36 elements from the first tree (50 - 14) + // takes the remaining 24 from the second three (60 - 36) + let mut last_value = 1991_u32.to_be_bytes().to_vec(); + last_value.append(&mut 123_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - // Limit bigger than elements in range - let path_query = PathQuery::new( - path.clone(), - SizedQuery::new(query.clone(), Some(5000), None), - ); + query.set_subquery_key(subquery_key.clone()); + query.set_subquery(subquery.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + query.left_to_right = false; - assert_eq!(elements.len(), 250); + // Limit the result set to 60 element but skip first 10 elements (this time + // right to left) + let path_query = PathQuery::new( + path.clone(), + SizedQuery::new(query.clone(), Some(60), Some(10)), + ); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 250); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - // Test on unique subtree build - let db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&db); + assert_eq!(elements.len(), 60); - let mut query = Query::new_with_direction(true); - query.insert_range(1990_u32.to_be_bytes().to_vec()..2000_u32.to_be_bytes().to_vec()); + // Skips the first 10 elements from the back + // last tree and starts from the 11th before the end + // Underlying subquery is ascending + let mut first_value = 1994_u32.to_be_bytes().to_vec(); + first_value.append(&mut 110_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); + let mut last_value = 1993_u32.to_be_bytes().to_vec(); + last_value.append(&mut 119_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(5), Some(2))); + query.set_subquery_key(subquery_key.clone()); + query.set_subquery(subquery.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("expected successful get_path_query"); + query.left_to_right = true; - assert_eq!(elements.len(), 5); + // Offset bigger than elements in range + let path_query = PathQuery::new( + path.clone(), + SizedQuery::new(query.clone(), None, Some(5000)), + ); - let first_value = 1992_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - let last_value = 1996_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + assert_eq!(elements.len(), 0); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); - compare_result_sets(&elements, &result_set); -} + query.set_subquery_key(subquery_key.clone()); + query.set_subquery(subquery); -#[test] -fn test_correct_child_root_hash_propagation_for_parent_in_same_batch() { - let tmp_dir = TempDir::new().unwrap(); - let db = GroveDb::open(tmp_dir.path()).unwrap(); - let tree_name_slice: &[u8] = &[ - 2, 17, 40, 46, 227, 17, 179, 211, 98, 50, 130, 107, 246, 26, 147, 45, 234, 189, 245, 77, - 252, 86, 99, 107, 197, 226, 188, 54, 239, 64, 17, 37, - ]; - - let batch = vec![GroveDbOp::insert_op(vec![], vec![1], Element::empty_tree())]; - db.apply_batch(batch, None, None) - .unwrap() - .expect("should apply batch"); + // Limit bigger than elements in range + let path_query = PathQuery::new( + path.clone(), + SizedQuery::new(query.clone(), Some(5000), None), + ); - let batch = vec![ - GroveDbOp::insert_op( - vec![vec![1]], - tree_name_slice.to_vec(), - Element::empty_tree(), - ), - GroveDbOp::insert_op( - vec![vec![1], tree_name_slice.to_vec()], - b"\0".to_vec(), - Element::empty_tree(), - ), - GroveDbOp::insert_op( - vec![vec![1], tree_name_slice.to_vec()], - vec![1], - Element::empty_tree(), - ), - GroveDbOp::insert_op( - vec![vec![1], tree_name_slice.to_vec(), vec![1]], - b"person".to_vec(), - Element::empty_tree(), - ), - GroveDbOp::insert_op( - vec![ - vec![1], - tree_name_slice.to_vec(), - vec![1], - b"person".to_vec(), - ], - b"\0".to_vec(), - Element::empty_tree(), - ), - GroveDbOp::insert_op( - vec![ - vec![1], - tree_name_slice.to_vec(), - vec![1], - b"person".to_vec(), - ], - b"firstName".to_vec(), - Element::empty_tree(), - ), - ]; - db.apply_batch(batch, None, None) - .unwrap() - .expect("should apply batch"); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); - let batch = vec![ - GroveDbOp::insert_op( - vec![ - vec![1], + assert_eq!(elements.len(), 250); + + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 250); + + // Test on unique subtree build + let db = make_test_grovedb(grove_version); + populate_tree_for_unique_range_subquery(&db, grove_version); + + let mut query = Query::new_with_direction(true); + query.insert_range(1990_u32.to_be_bytes().to_vec()..2000_u32.to_be_bytes().to_vec()); + + query.set_subquery_key(subquery_key); + + let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(5), Some(2))); + + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("expected successful get_path_query"); + + assert_eq!(elements.len(), 5); + + let first_value = 1992_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); + + let last_value = 1996_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); + } + + #[test] + fn test_correct_child_root_hash_propagation_for_parent_in_same_batch() { + let grove_version = GroveVersion::latest(); + let tmp_dir = TempDir::new().unwrap(); + let db = GroveDb::open(tmp_dir.path()).unwrap(); + let tree_name_slice: &[u8] = &[ + 2, 17, 40, 46, 227, 17, 179, 211, 98, 50, 130, 107, 246, 26, 147, 45, 234, 189, 245, + 77, 252, 86, 99, 107, 197, 226, 188, 54, 239, 64, 17, 37, + ]; + + let batch = vec![GroveDbOp::insert_op(vec![], vec![1], Element::empty_tree())]; + db.apply_batch(batch, None, None, grove_version) + .unwrap() + .expect("should apply batch"); + + let batch = vec![ + GroveDbOp::insert_op( + vec![vec![1]], tree_name_slice.to_vec(), - vec![1], - b"person".to_vec(), + Element::empty_tree(), + ), + GroveDbOp::insert_op( + vec![vec![1], tree_name_slice.to_vec()], b"\0".to_vec(), - ], - b"person_id_1".to_vec(), - Element::new_item(vec![50]), - ), - GroveDbOp::insert_op( - vec![ - vec![1], - tree_name_slice.to_vec(), - vec![1], - b"person".to_vec(), - b"firstName".to_vec(), - ], - b"cammi".to_vec(), - Element::empty_tree(), - ), - GroveDbOp::insert_op( - vec![ - vec![1], - tree_name_slice.to_vec(), - vec![1], - b"person".to_vec(), - b"firstName".to_vec(), - b"cammi".to_vec(), - ], - b"\0".to_vec(), - Element::empty_tree(), - ), - GroveDbOp::insert_op( - vec![ - vec![1], - tree_name_slice.to_vec(), + Element::empty_tree(), + ), + GroveDbOp::insert_op( + vec![vec![1], tree_name_slice.to_vec()], vec![1], + Element::empty_tree(), + ), + GroveDbOp::insert_op( + vec![vec![1], tree_name_slice.to_vec(), vec![1]], b"person".to_vec(), + Element::empty_tree(), + ), + GroveDbOp::insert_op( + vec![ + vec![1], + tree_name_slice.to_vec(), + vec![1], + b"person".to_vec(), + ], + b"\0".to_vec(), + Element::empty_tree(), + ), + GroveDbOp::insert_op( + vec![ + vec![1], + tree_name_slice.to_vec(), + vec![1], + b"person".to_vec(), + ], b"firstName".to_vec(), + Element::empty_tree(), + ), + ]; + db.apply_batch(batch, None, None, grove_version) + .unwrap() + .expect("should apply batch"); + + let batch = vec![ + GroveDbOp::insert_op( + vec![ + vec![1], + tree_name_slice.to_vec(), + vec![1], + b"person".to_vec(), + b"\0".to_vec(), + ], + b"person_id_1".to_vec(), + Element::new_item(vec![50]), + ), + GroveDbOp::insert_op( + vec![ + vec![1], + tree_name_slice.to_vec(), + vec![1], + b"person".to_vec(), + b"firstName".to_vec(), + ], b"cammi".to_vec(), + Element::empty_tree(), + ), + GroveDbOp::insert_op( + vec![ + vec![1], + tree_name_slice.to_vec(), + vec![1], + b"person".to_vec(), + b"firstName".to_vec(), + b"cammi".to_vec(), + ], b"\0".to_vec(), - ], - b"person_ref_id".to_vec(), - Element::new_reference(ReferencePathType::UpstreamRootHeightReference( - 4, - vec![b"\0".to_vec(), b"person_id_1".to_vec()], - )), - ), - ]; - db.apply_batch(batch, None, None) - .unwrap() - .expect("should apply batch"); - - let path = vec![ - vec![1], - tree_name_slice.to_vec(), - vec![1], - b"person".to_vec(), - b"firstName".to_vec(), - ]; - let mut query = Query::new(); - query.insert_all(); - query.set_subquery_key(b"\0".to_vec()); - let mut subquery = Query::new(); - subquery.insert_all(); - query.set_subquery(subquery); - let path_query = PathQuery::new( - path, - SizedQuery { - query: query.clone(), - limit: Some(100), - offset: Some(0), - }, - ); + Element::empty_tree(), + ), + GroveDbOp::insert_op( + vec![ + vec![1], + tree_name_slice.to_vec(), + vec![1], + b"person".to_vec(), + b"firstName".to_vec(), + b"cammi".to_vec(), + b"\0".to_vec(), + ], + b"person_ref_id".to_vec(), + Element::new_reference(ReferencePathType::UpstreamRootHeightReference( + 4, + vec![b"\0".to_vec(), b"person_id_1".to_vec()], + )), + ), + ]; + db.apply_batch(batch, None, None, grove_version) + .unwrap() + .expect("should apply batch"); - let proof = db - .prove_query(&path_query) - .unwrap() - .expect("expected successful proving"); - let (hash, _result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); -} + let path = vec![ + vec![1], + tree_name_slice.to_vec(), + vec![1], + b"person".to_vec(), + b"firstName".to_vec(), + ]; + let mut query = Query::new(); + query.insert_all(); + query.set_subquery_key(b"\0".to_vec()); + let mut subquery = Query::new(); + subquery.insert_all(); + query.set_subquery(subquery); + let path_query = PathQuery::new( + path, + SizedQuery { + query: query.clone(), + limit: Some(100), + offset: Some(0), + }, + ); -#[test] -fn test_mixed_level_proofs() { - let db = make_test_grovedb(); - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"key2", - Element::new_item(vec![1]), - None, - None, - ) - .unwrap() - .expect("successful item insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"key3", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"key4", - Element::new_reference(ReferencePathType::SiblingReference(b"key2".to_vec())), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"k1", - Element::new_item(vec![2]), - None, - None, - ) - .unwrap() - .expect("successful item insert"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"k2", - Element::new_item(vec![3]), - None, - None, - ) - .unwrap() - .expect("successful item insert"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"k3", - Element::new_item(vec![4]), - None, - None, - ) - .unwrap() - .expect("successful item insert"); - - let mut query = Query::new(); - query.insert_all(); - let mut subquery = Query::new(); - subquery.insert_all(); - query.set_subquery(subquery); - - let path = vec![TEST_LEAF.to_vec()]; - - let path_query = PathQuery::new_unsized(path.clone(), query.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) - .unwrap() - .expect("successful get_path_query"); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .expect("expected successful proving"); + let (hash, _result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + } - assert_eq!(elements.len(), 5); - assert_eq!(elements, vec![vec![2], vec![3], vec![4], vec![1], vec![1]]); + #[test] + fn test_mixed_level_proofs() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); - compare_result_sets(&elements, &result_set); + // TEST_LEAF + // / | | \ + // key1 key2 : [1] key3 key4 : (Ref -> Key2) + // / | \ + // k1 k2 k3 + // / / / + // 2 3 4 - // Test mixed element proofs with limit and offset - let path_query = PathQuery::new_unsized(path.clone(), query.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, None) + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) .unwrap() - .expect("successful get_path_query"); - - assert_eq!(elements.len(), 5); - assert_eq!(elements, vec![vec![2], vec![3], vec![4], vec![1], vec![1]]); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); - compare_result_sets(&elements, &result_set); - - // TODO: Fix noticed bug when limit and offset are both set to Some(0) - - let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), Some(1), None)); - let (elements, _) = db - .query_item_value(&path_query, true, None) + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key2", + Element::new_item(vec![1]), + None, + None, + grove_version, + ) .unwrap() - .expect("successful get_path_query"); - - assert_eq!(elements.len(), 1); - assert_eq!(elements, vec![vec![2]]); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 1); - compare_result_sets(&elements, &result_set); - - let path_query = PathQuery::new( - path.clone(), - SizedQuery::new(query.clone(), Some(3), Some(0)), - ); - let (elements, _) = db - .query_item_value(&path_query, true, None) + .expect("successful item insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key3", + Element::empty_tree(), + None, + None, + grove_version, + ) .unwrap() - .expect("successful get_path_query"); - - assert_eq!(elements.len(), 3); - assert_eq!(elements, vec![vec![2], vec![3], vec![4]]); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - compare_result_sets(&elements, &result_set); - - let path_query = PathQuery::new( - path.clone(), - SizedQuery::new(query.clone(), Some(4), Some(0)), - ); - let (elements, _) = db - .query_item_value(&path_query, true, None) + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key4", + Element::new_reference(ReferencePathType::SiblingReference(b"key2".to_vec())), + None, + None, + grove_version, + ) .unwrap() - .expect("successful get_path_query"); - - assert_eq!(elements.len(), 4); - assert_eq!(elements, vec![vec![2], vec![3], vec![4], vec![1]]); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 4); - compare_result_sets(&elements, &result_set); + .expect("successful subtree insert"); - let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(10), Some(4))); - let (elements, _) = db - .query_item_value(&path_query, true, None) + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"k1", + Element::new_item(vec![2]), + None, + None, + grove_version, + ) .unwrap() - .expect("successful get_path_query"); - - assert_eq!(elements.len(), 1); - assert_eq!(elements, vec![vec![1]]); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 1); - compare_result_sets(&elements, &result_set); -} - -#[test] -fn test_mixed_level_proofs_with_tree() { - let db = make_test_grovedb(); - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"key3", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"k1", - Element::new_item(vec![2]), - None, - None, - ) - .unwrap() - .expect("successful item insert"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"k2", - Element::new_item(vec![3]), - None, - None, - ) - .unwrap() - .expect("successful item insert"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"k3", - Element::new_item(vec![4]), - None, - None, - ) - .unwrap() - .expect("successful item insert"); - db.insert( - [TEST_LEAF, b"key2"].as_ref(), - b"k1", - Element::new_item(vec![5]), - None, - None, - ) - .unwrap() - .expect("successful item insert"); - - let mut query = Query::new(); - query.insert_all(); - let mut subquery = Query::new(); - subquery.insert_all(); - query.add_conditional_subquery(QueryItem::Key(b"key1".to_vec()), None, Some(subquery)); - - let path = vec![TEST_LEAF.to_vec()]; - - let path_query = PathQuery::new_unsized(path.clone(), query.clone()); - - let (elements, _) = db - .query_raw( - &path_query, - true, - QueryResultType::QueryPathKeyElementTrioResultType, + .expect("successful item insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"k2", + Element::new_item(vec![3]), + None, None, + grove_version, ) .unwrap() - .expect("expected successful get_path_query"); + .expect("successful item insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"k3", + Element::new_item(vec![4]), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful item insert"); + + let mut query = Query::new(); + query.insert_all(); + let mut subquery = Query::new(); + subquery.insert_all(); + query.set_subquery(subquery); - assert_eq!(elements.len(), 5); + let path = vec![TEST_LEAF.to_vec()]; - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); + let path_query = PathQuery::new_unsized(path.clone(), query.clone()); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("successful get_path_query"); - // TODO: verify that the result set is exactly the same - // compare_result_sets(&elements, &result_set); + assert_eq!(elements.len(), 5); + assert_eq!(elements, vec![vec![2], vec![3], vec![4], vec![1], vec![1]]); - let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(1), None)); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + // println!( + // "{}", + // result_set + // .iter() + // .map(|a| a.to_string()) + // .collect::>() + // .join(" | ") + // ); + assert_eq!(result_set.len(), 5); + compare_result_sets(&elements, &result_set); + + // Test mixed element proofs with limit and offset + let path_query = PathQuery::new_unsized(path.clone(), query.clone()); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("successful get_path_query"); - let (elements, _) = db - .query_raw( - &path_query, - true, - QueryResultType::QueryPathKeyElementTrioResultType, - None, - ) - .unwrap() - .expect("expected successful get_path_query"); + assert_eq!(elements.len(), 5); + assert_eq!(elements, vec![vec![2], vec![3], vec![4], vec![1], vec![1]]); - assert_eq!(elements.len(), 1); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 5); + compare_result_sets(&elements, &result_set); + + // TODO: Fix noticed bug when limit and offset are both set to Some(0) + + let path_query = + PathQuery::new(path.clone(), SizedQuery::new(query.clone(), Some(1), None)); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("successful get_path_query"); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 1); - // TODO: verify that the result set is exactly the same - // compare_result_sets(&elements, &result_set); -} + assert_eq!(elements.len(), 1); + assert_eq!(elements, vec![vec![2]]); -#[test] -fn test_mixed_level_proofs_with_subquery_paths() { - let db = make_test_grovedb(); - db.insert( - [TEST_LEAF].as_ref(), - b"a", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"b", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"c", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - db.insert( - [TEST_LEAF, b"a"].as_ref(), - b"d", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, b"a"].as_ref(), - b"e", - Element::new_item(vec![2]), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, b"a"].as_ref(), - b"f", - Element::new_item(vec![3]), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - db.insert( - [TEST_LEAF, b"a", b"d"].as_ref(), - b"d", - Element::new_item(vec![6]), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - db.insert( - [TEST_LEAF, b"b"].as_ref(), - b"g", - Element::new_item(vec![4]), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, b"b"].as_ref(), - b"d", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - db.insert( - [TEST_LEAF, b"b", b"d"].as_ref(), - b"i", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, b"b", b"d"].as_ref(), - b"j", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, b"b", b"d"].as_ref(), - b"k", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - // if you don't have an item at the subquery path translation, you shouldn't be - // added to the result set. - let mut query = Query::new(); - query.insert_all(); - query.set_subquery_path(vec![b"d".to_vec()]); - - let path = vec![TEST_LEAF.to_vec()]; - - let path_query = PathQuery::new_unsized(path, query.clone()); - - // TODO: proofs seems to be more expressive than query_raw now - // let (elements, _) = db - // .query_raw( - // &path_query, - // true, - // QueryResultType::QueryPathKeyElementTrioResultType, - // None, - // ) - // .unwrap() - // .expect("expected successful get_path_query"); - // - // assert_eq!(elements.len(), 2); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 2); - - // apply path translation then query - let mut query = Query::new(); - query.insert_all(); - let mut subquery = Query::new(); - subquery.insert_all(); - query.set_subquery_path(vec![b"d".to_vec()]); - query.set_subquery(subquery); - - let path = vec![TEST_LEAF.to_vec()]; - - let path_query = PathQuery::new_unsized(path, query.clone()); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 4); - - // apply empty path translation - let mut query = Query::new(); - query.insert_all(); - let mut subquery = Query::new(); - subquery.insert_all(); - query.set_subquery_path(vec![]); - query.set_subquery(subquery); - - let path = vec![TEST_LEAF.to_vec()]; - - let path_query = PathQuery::new_unsized(path, query.clone()); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); - - // use conditionals to return from more than 2 depth - let mut query = Query::new(); - query.insert_all(); - let mut subquery = Query::new(); - subquery.insert_all(); - let mut deeper_subquery = Query::new(); - deeper_subquery.insert_all(); - subquery.add_conditional_subquery(QueryItem::Key(b"d".to_vec()), None, Some(deeper_subquery)); - query.add_conditional_subquery(QueryItem::Key(b"a".to_vec()), None, Some(subquery.clone())); - query.add_conditional_subquery(QueryItem::Key(b"b".to_vec()), None, Some(subquery.clone())); - - let path = vec![TEST_LEAF.to_vec()]; - - let path_query = PathQuery::new_unsized(path, query.clone()); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 8); -} + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 1); + compare_result_sets(&elements, &result_set); + + let path_query = PathQuery::new( + path.clone(), + SizedQuery::new(query.clone(), Some(3), Some(0)), + ); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("successful get_path_query"); -#[test] -fn test_proof_with_limit_zero() { - let db = make_deep_tree(); - let mut query = Query::new(); - query.insert_all(); - let path_query = PathQuery::new( - vec![TEST_LEAF.to_vec()], - SizedQuery::new(query, Some(0), Some(0)), - ); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); -} + assert_eq!(elements.len(), 3); + assert_eq!(elements, vec![vec![2], vec![3], vec![4]]); -#[test] -fn test_result_set_path_after_verification() { - let db = make_deep_tree(); - let mut query = Query::new(); - query.insert_all(); - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - - // assert the result set path - assert_eq!( - result_set[0].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[1].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[2].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - - assert_eq!(result_set[0].key, b"key1".to_vec()); - assert_eq!(result_set[1].key, b"key2".to_vec()); - assert_eq!(result_set[2].key, b"key3".to_vec()); - - // Test path tracking with subquery - let mut query = Query::new(); - query.insert_all(); - let mut subq = Query::new(); - subq.insert_all(); - query.set_subquery(subq); - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); - - assert_eq!( - result_set[0].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[1].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[2].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[3].path, - vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()] - ); - assert_eq!( - result_set[4].path, - vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()] - ); - - // Test path tracking with subquery path - // perform a query, do a translation, perform another query - let mut query = Query::new(); - query.insert_key(b"deep_leaf".to_vec()); - query.set_subquery_path(vec![b"deep_node_1".to_vec(), b"deeper_1".to_vec()]); - let mut subq = Query::new(); - subq.insert_all(); - query.set_subquery(subq); - let path_query = PathQuery::new_unsized(vec![], query); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - - assert_eq!( - result_set[0].path, - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"deeper_1".to_vec() - ] - ); - assert_eq!( - result_set[1].path, - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"deeper_1".to_vec() - ] - ); - assert_eq!( - result_set[2].path, - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"deeper_1".to_vec() - ] - ); - - assert_eq!(result_set[0].key, b"key1".to_vec()); - assert_eq!(result_set[1].key, b"key2".to_vec()); - assert_eq!(result_set[2].key, b"key3".to_vec()); - - // Test path tracking for mixed level result set - let mut query = Query::new(); - query.insert_all(); - let mut subq = Query::new(); - subq.insert_all(); - query.add_conditional_subquery(QueryItem::Key(b"innertree".to_vec()), None, Some(subq)); - - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 4); - - assert_eq!( - result_set[0].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[1].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[2].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!(result_set[3].path, vec![TEST_LEAF.to_vec()]); - - assert_eq!(result_set[0].key, b"key1".to_vec()); - assert_eq!(result_set[1].key, b"key2".to_vec()); - assert_eq!(result_set[2].key, b"key3".to_vec()); - assert_eq!(result_set[3].key, b"innertree4".to_vec()); -} + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 3); + compare_result_sets(&elements, &result_set); + + let path_query = PathQuery::new( + path.clone(), + SizedQuery::new(query.clone(), Some(4), Some(0)), + ); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("successful get_path_query"); -#[test] -fn test_verification_with_path_key_optional_element_trio() { - let db = make_deep_tree(); - let mut query = Query::new(); - query.insert_all(); - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - - assert_eq!( - result_set[0], - ( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - b"key1".to_vec(), - Some(Element::new_item(b"value1".to_vec())) - ) - ); - assert_eq!( - result_set[1], - ( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - b"key2".to_vec(), - Some(Element::new_item(b"value2".to_vec())) - ) - ); - assert_eq!( - result_set[2], - ( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - b"key3".to_vec(), - Some(Element::new_item(b"value3".to_vec())) - ) - ); -} + assert_eq!(elements.len(), 4); + assert_eq!(elements, vec![vec![2], vec![3], vec![4], vec![1]]); -#[test] -fn test_absence_proof() { - let db = make_deep_tree(); - - // simple case, request for items k2..=k5 under inner tree - // we pass them as keys as terminal keys does not handle ranges with start or - // end len greater than 1 k2, k3 should be Some, k4, k5 should be None, k1, - // k6.. should not be in map - let mut query = Query::new(); - query.insert_key(b"key2".to_vec()); - query.insert_key(b"key3".to_vec()); - query.insert_key(b"key4".to_vec()); - query.insert_key(b"key5".to_vec()); - let path_query = PathQuery::new( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - SizedQuery::new(query, Some(4), None), - ); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_with_absence_proof(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 4); - - assert_eq!( - result_set[0].0, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[1].0, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[2].0, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[3].0, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - - assert_eq!(result_set[0].1, b"key2".to_vec()); - assert_eq!(result_set[1].1, b"key3".to_vec()); - assert_eq!(result_set[2].1, b"key4".to_vec()); - assert_eq!(result_set[3].1, b"key5".to_vec()); - - assert_eq!(result_set[0].2, Some(Element::new_item(b"value2".to_vec()))); - assert_eq!(result_set[1].2, Some(Element::new_item(b"value3".to_vec()))); - assert_eq!(result_set[2].2, None); - assert_eq!(result_set[3].2, None); -} + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 4); + compare_result_sets(&elements, &result_set); + + let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(10), Some(4))); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None, grove_version) + .unwrap() + .expect("successful get_path_query"); -#[test] -fn test_subset_proof_verification() { - let db = make_deep_tree(); - - // original path query - let mut query = Query::new(); - query.insert_all(); - let mut subq = Query::new(); - subq.insert_all(); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - - // first we prove non-verbose - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); - assert_eq!( - result_set[0], - ( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - b"key1".to_vec(), - Some(Element::new_item(b"value1".to_vec())) - ) - ); - assert_eq!( - result_set[1], - ( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - b"key2".to_vec(), - Some(Element::new_item(b"value2".to_vec())) + assert_eq!(elements.len(), 1); + assert_eq!(elements, vec![vec![1]]); + } + + #[test] + fn test_mixed_level_proofs_with_tree() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + None, + grove_version, ) - ); - assert_eq!( - result_set[2], - ( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - b"key3".to_vec(), - Some(Element::new_item(b"value3".to_vec())) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key2", + Element::empty_tree(), + None, + None, + grove_version, ) - ); - assert_eq!( - result_set[3], - ( - vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()], - b"key4".to_vec(), - Some(Element::new_item(b"value4".to_vec())) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key3", + Element::empty_tree(), + None, + None, + grove_version, ) - ); - assert_eq!( - result_set[4], - ( - vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()], - b"key5".to_vec(), - Some(Element::new_item(b"value5".to_vec())) + .unwrap() + .expect("successful subtree insert"); + + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"k1", + Element::new_item(vec![2]), + None, + None, + grove_version, ) - ); - - // prove verbose - let verbose_proof = db.prove_verbose(&path_query).unwrap().unwrap(); - assert!(verbose_proof.len() > proof.len()); - - // subset path query - let mut query = Query::new(); - query.insert_key(b"innertree".to_vec()); - let mut subq = Query::new(); - subq.insert_key(b"key1".to_vec()); - query.set_subquery(subq); - let subset_path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - - let (hash, result_set) = - GroveDb::verify_subset_query(&verbose_proof, &subset_path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 1); - assert_eq!( - result_set[0], - ( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - b"key1".to_vec(), - Some(Element::new_item(b"value1".to_vec())) + .unwrap() + .expect("successful item insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"k2", + Element::new_item(vec![3]), + None, + None, + grove_version, ) - ); -} + .unwrap() + .expect("successful item insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"k3", + Element::new_item(vec![4]), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful item insert"); + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"k1", + Element::new_item(vec![5]), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful item insert"); -#[test] -fn test_chained_path_query_verification() { - let db = make_deep_tree(); - - let mut query = Query::new(); - query.insert_all(); - let mut subq = Query::new(); - subq.insert_all(); - let mut subsubq = Query::new(); - subsubq.insert_all(); - - subq.set_subquery(subsubq); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![b"deep_leaf".to_vec()], query); - - // first prove non verbose - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 11); - - // prove verbose - let verbose_proof = db.prove_verbose(&path_query).unwrap().unwrap(); - assert!(verbose_proof.len() > proof.len()); - - // init deeper_1 path query - let mut query = Query::new(); - query.insert_all(); - - let deeper_1_path_query = PathQuery::new_unsized( - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"deeper_1".to_vec(), - ], - query, - ); - - // define the path query generators - let mut chained_path_queries = vec![]; - chained_path_queries.push(|_elements: Vec| { let mut query = Query::new(); query.insert_all(); + let mut subquery = Query::new(); + subquery.insert_all(); + query.add_conditional_subquery(QueryItem::Key(b"key1".to_vec()), None, Some(subquery)); - let deeper_2_path_query = PathQuery::new_unsized( - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"deeper_2".to_vec(), - ], - query, + let path = vec![TEST_LEAF.to_vec()]; + + let path_query = PathQuery::new_unsized(path.clone(), query.clone()); + + let (elements, _) = db + .query_raw( + &path_query, + true, + true, + true, + QueryResultType::QueryPathKeyElementTrioResultType, + None, + grove_version, + ) + .unwrap() + .expect("expected successful get_path_query"); + + assert_eq!(elements.len(), 5); + + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + + // println!( + // "{}", + // result_set + // .iter() + // .map(|a| a.to_string()) + // .collect::>() + // .join(", ") + // ); + assert_eq!(result_set.len(), 5); + + // TODO: verify that the result set is exactly the same + // compare_result_sets(&elements, &result_set); + + let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(1), None)); + + let (elements, _) = db + .query_raw( + &path_query, + true, + true, + true, + QueryResultType::QueryPathKeyElementTrioResultType, + None, + grove_version, + ) + .unwrap() + .expect("expected successful get_path_query"); + + assert_eq!(elements.len(), 1); + + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 1); + // TODO: verify that the result set is exactly the same + // compare_result_sets(&elements, &result_set); + } + + #[test] + fn test_mixed_level_proofs_with_subquery_paths() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + + // TEST_LEAF + // / | \ + // a b c + // / | \ / \ + // d e:2 f:3 g:4 d + // / / | \ + // d:6 i j k + // + + db.insert( + [TEST_LEAF].as_ref(), + b"a", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"b", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"c", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + + db.insert( + [TEST_LEAF, b"a"].as_ref(), + b"d", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF, b"a"].as_ref(), + b"e", + Element::new_item(vec![2]), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF, b"a"].as_ref(), + b"f", + Element::new_item(vec![3]), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + + db.insert( + [TEST_LEAF, b"a", b"d"].as_ref(), + b"d", + Element::new_item(vec![6]), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + + db.insert( + [TEST_LEAF, b"b"].as_ref(), + b"g", + Element::new_item(vec![4]), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF, b"b"].as_ref(), + b"d", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + + db.insert( + [TEST_LEAF, b"b", b"d"].as_ref(), + b"i", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF, b"b", b"d"].as_ref(), + b"j", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF, b"b", b"d"].as_ref(), + b"k", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); + // // if you don't have an item at the subquery path translation, you shouldn't + // be // added to the result set. + let mut query = Query::new(); + query.insert_all(); + query.set_subquery_path(vec![b"d".to_vec()]); + + let path = vec![TEST_LEAF.to_vec()]; + + let path_query = PathQuery::new_unsized(path, query.clone()); + + let (elements, _) = db + .query_raw( + &path_query, + false, + true, + false, + QueryResultType::QueryPathKeyElementTrioResultType, + None, + grove_version, + ) + .unwrap() + .expect("expected successful get_path_query"); + + assert_eq!( + elements, + QueryResultElements::from_elements(vec![ + PathKeyElementTrioResultItem(( + vec![b"test_leaf".to_vec(), b"a".to_vec()], + b"d".to_vec(), + Element::Tree(Some(b"d".to_vec()), None) + )), + PathKeyElementTrioResultItem(( + vec![b"test_leaf".to_vec(), b"b".to_vec()], + b"d".to_vec(), + Element::Tree(Some(b"j".to_vec()), None) + )) + ]) ); - Some(deeper_2_path_query) - }); - - // verify the path query chain - let (root_hash, results) = GroveDb::verify_query_with_chained_path_queries( - &verbose_proof, - &deeper_1_path_query, - chained_path_queries, - ) - .unwrap(); - assert_eq!(root_hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(results.len(), 2); - assert_eq!(results[0].len(), 3); - assert_eq!( - results[0][0], - ( + + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + // println!( + // "{}", + // result_set + // .iter() + // .map(|a| a.to_string()) + // .collect::>() + // .join("| ") + // ); + assert_eq!(result_set.len(), 2); + + // apply path translation then query + let mut query = Query::new(); + query.insert_all(); + let mut subquery = Query::new(); + subquery.insert_all(); + query.set_subquery_path(vec![b"d".to_vec()]); + query.set_subquery(subquery); + + let path = vec![TEST_LEAF.to_vec()]; + + let path_query = PathQuery::new_unsized(path, query.clone()); + + let (elements, _) = db + .query_raw( + &path_query, + false, + true, + false, + QueryResultType::QueryPathKeyElementTrioResultType, + None, + grove_version, + ) + .unwrap() + .expect("expected successful get_path_query"); + + assert_eq!( + elements, + QueryResultElements::from_elements(vec![ + PathKeyElementTrioResultItem(( + vec![b"test_leaf".to_vec(), b"a".to_vec(), b"d".to_vec()], + b"d".to_vec(), + Element::Item(vec![6], None) + )), + PathKeyElementTrioResultItem(( + vec![b"test_leaf".to_vec(), b"b".to_vec(), b"d".to_vec()], + b"i".to_vec(), + Element::Tree(None, None) + )), + PathKeyElementTrioResultItem(( + vec![b"test_leaf".to_vec(), b"b".to_vec(), b"d".to_vec()], + b"j".to_vec(), + Element::Tree(None, None) + )), + PathKeyElementTrioResultItem(( + vec![b"test_leaf".to_vec(), b"b".to_vec(), b"d".to_vec()], + b"k".to_vec(), + Element::Tree(None, None) + )) + ]) + ); + + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 4); + + // apply empty path translation + let mut query = Query::new(); + query.insert_all(); + let mut subquery = Query::new(); + subquery.insert_all(); + query.set_subquery_path(vec![]); + query.set_subquery(subquery); + + let path = vec![TEST_LEAF.to_vec()]; + + let path_query = PathQuery::new_unsized(path, query.clone()); + + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 5); + + // use conditionals to return from more than 2 depth + let mut query = Query::new(); + query.insert_all(); + let mut subquery = Query::new(); + subquery.insert_all(); + let mut deeper_subquery = Query::new(); + deeper_subquery.insert_all(); + subquery.add_conditional_subquery( + QueryItem::Key(b"d".to_vec()), + None, + Some(deeper_subquery), + ); + query.add_conditional_subquery(QueryItem::Key(b"a".to_vec()), None, Some(subquery.clone())); + query.add_conditional_subquery(QueryItem::Key(b"b".to_vec()), None, Some(subquery.clone())); + + let path = vec![TEST_LEAF.to_vec()]; + + let path_query = PathQuery::new_unsized(path, query.clone()); + + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 8); + } + + #[test] + fn test_proof_with_limit_zero() { + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); + let mut query = Query::new(); + query.insert_all(); + let path_query = PathQuery::new( + vec![TEST_LEAF.to_vec()], + SizedQuery::new(query, Some(0), Some(0)), + ); + + db.prove_query(&path_query, None, grove_version) + .unwrap() + .expect_err("expected error when trying to prove with limit 0"); + } + + #[test] + fn test_result_set_path_after_verification() { + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); + let mut query = Query::new(); + query.insert_all(); + let path_query = + PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query); + + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 3); + + // assert the result set path + assert_eq!( + result_set[0].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[1].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[2].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + + assert_eq!(result_set[0].key, b"key1".to_vec()); + assert_eq!(result_set[1].key, b"key2".to_vec()); + assert_eq!(result_set[2].key, b"key3".to_vec()); + + // Test path tracking with subquery + let mut query = Query::new(); + query.insert_all(); + let mut subq = Query::new(); + subq.insert_all(); + query.set_subquery(subq); + let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); + + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 5); + + assert_eq!( + result_set[0].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[1].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[2].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[3].path, + vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()] + ); + assert_eq!( + result_set[4].path, + vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()] + ); + + // Test path tracking with subquery path + // perform a query, do a translation, perform another query + let mut query = Query::new(); + query.insert_key(b"deep_leaf".to_vec()); + query.set_subquery_path(vec![b"deep_node_1".to_vec(), b"deeper_1".to_vec()]); + let mut subq = Query::new(); + subq.insert_all(); + query.set_subquery(subq); + let path_query = PathQuery::new_unsized(vec![], query); + + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 3); + + assert_eq!( + result_set[0].path, vec![ b"deep_leaf".to_vec(), b"deep_node_1".to_vec(), b"deeper_1".to_vec() - ], - b"key1".to_vec(), - Some(Element::new_item(b"value1".to_vec())) - ) - ); - assert_eq!( - results[0][1], - ( + ] + ); + assert_eq!( + result_set[1].path, vec![ b"deep_leaf".to_vec(), b"deep_node_1".to_vec(), b"deeper_1".to_vec() - ], - b"key2".to_vec(), - Some(Element::new_item(b"value2".to_vec())) - ) - ); - assert_eq!( - results[0][2], - ( + ] + ); + assert_eq!( + result_set[2].path, vec![ b"deep_leaf".to_vec(), b"deep_node_1".to_vec(), b"deeper_1".to_vec() - ], - b"key3".to_vec(), - Some(Element::new_item(b"value3".to_vec())) - ) - ); + ] + ); + + assert_eq!(result_set[0].key, b"key1".to_vec()); + assert_eq!(result_set[1].key, b"key2".to_vec()); + assert_eq!(result_set[2].key, b"key3".to_vec()); - assert_eq!(results[1].len(), 3); - assert_eq!( - results[1][0], - ( + // Test path tracking for mixed level result set + let mut query = Query::new(); + query.insert_all(); + let mut subq = Query::new(); + subq.insert_all(); + query.add_conditional_subquery(QueryItem::Key(b"innertree".to_vec()), None, Some(subq)); + + let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); + + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 4); + + assert_eq!( + result_set[0].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[1].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[2].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!(result_set[3].path, vec![TEST_LEAF.to_vec()]); + + assert_eq!(result_set[0].key, b"key1".to_vec()); + assert_eq!(result_set[1].key, b"key2".to_vec()); + assert_eq!(result_set[2].key, b"key3".to_vec()); + assert_eq!(result_set[3].key, b"innertree4".to_vec()); + } + + #[test] + fn test_verification_with_path_key_optional_element_trio() { + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); + let mut query = Query::new(); + query.insert_all(); + let path_query = + PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query); + + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = GroveDb::verify_query(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 3); + + assert_eq!( + result_set[0], + ( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + b"key1".to_vec(), + Some(Element::new_item(b"value1".to_vec())) + ) + ); + assert_eq!( + result_set[1], + ( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + b"key2".to_vec(), + Some(Element::new_item(b"value2".to_vec())) + ) + ); + assert_eq!( + result_set[2], + ( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + b"key3".to_vec(), + Some(Element::new_item(b"value3".to_vec())) + ) + ); + } + + #[test] + fn test_absence_proof() { + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); + + // simple case, request for items k2..=k5 under inner tree + // we pass them as keys as terminal keys does not handle ranges with start or + // end len greater than 1 k2, k3 should be Some, k4, k5 should be None, k1, + // k6.. should not be in map + let mut query = Query::new(); + query.insert_key(b"key2".to_vec()); + query.insert_key(b"key3".to_vec()); + query.insert_key(b"key4".to_vec()); + query.insert_key(b"key5".to_vec()); + let path_query = PathQuery::new( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + SizedQuery::new(query, Some(4), None), + ); + + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_with_absence_proof(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 4); + + assert_eq!( + result_set[0].0, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[1].0, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[2].0, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[3].0, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + + assert_eq!(result_set[0].1, b"key2".to_vec()); + assert_eq!(result_set[1].1, b"key3".to_vec()); + assert_eq!(result_set[2].1, b"key4".to_vec()); + assert_eq!(result_set[3].1, b"key5".to_vec()); + + assert_eq!(result_set[0].2, Some(Element::new_item(b"value2".to_vec()))); + assert_eq!(result_set[1].2, Some(Element::new_item(b"value3".to_vec()))); + assert_eq!(result_set[2].2, None); + assert_eq!(result_set[3].2, None); + } + + #[test] + fn test_subset_proof_verification() { + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); + + // original path query + let mut query = Query::new(); + query.insert_all(); + let mut subq = Query::new(); + subq.insert_all(); + query.set_subquery(subq); + + let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); + + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = GroveDb::verify_query(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 5); + assert_eq!( + result_set[0], + ( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + b"key1".to_vec(), + Some(Element::new_item(b"value1".to_vec())) + ) + ); + assert_eq!( + result_set[1], + ( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + b"key2".to_vec(), + Some(Element::new_item(b"value2".to_vec())) + ) + ); + assert_eq!( + result_set[2], + ( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + b"key3".to_vec(), + Some(Element::new_item(b"value3".to_vec())) + ) + ); + assert_eq!( + result_set[3], + ( + vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()], + b"key4".to_vec(), + Some(Element::new_item(b"value4".to_vec())) + ) + ); + assert_eq!( + result_set[4], + ( + vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()], + b"key5".to_vec(), + Some(Element::new_item(b"value5".to_vec())) + ) + ); + + // subset path query + let mut query = Query::new(); + query.insert_key(b"innertree".to_vec()); + let mut subq = Query::new(); + subq.insert_key(b"key1".to_vec()); + query.set_subquery(subq); + let subset_path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); + + let (hash, result_set) = + GroveDb::verify_subset_query(&proof, &subset_path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 1); + assert_eq!( + result_set[0], + ( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + b"key1".to_vec(), + Some(Element::new_item(b"value1".to_vec())) + ) + ); + } + #[test] + fn test_chained_path_query_verification() { + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); + + let mut query = Query::new(); + query.insert_all(); + let mut subq = Query::new(); + subq.insert_all(); + let mut subsubq = Query::new(); + subsubq.insert_all(); + + subq.set_subquery(subsubq); + query.set_subquery(subq); + + let path_query = PathQuery::new_unsized(vec![b"deep_leaf".to_vec()], query); + + // first prove non verbose + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = GroveDb::verify_query(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 14); + + // init deeper_1 path query + let mut query = Query::new(); + query.insert_all(); + + let deeper_1_path_query = PathQuery::new_unsized( vec![ b"deep_leaf".to_vec(), b"deep_node_1".to_vec(), - b"deeper_2".to_vec() + b"deeper_1".to_vec(), ], - b"key4".to_vec(), - Some(Element::new_item(b"value4".to_vec())) + query, + ); + + // define the path query generators + let mut chained_path_queries = vec![]; + chained_path_queries.push(|_elements: Vec| { + let mut query = Query::new(); + query.insert_all(); + + let deeper_2_path_query = PathQuery::new_unsized( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"deeper_2".to_vec(), + ], + query, + ); + Some(deeper_2_path_query) + }); + + // verify the path query chain + let (root_hash, results) = GroveDb::verify_query_with_chained_path_queries( + &proof, + &deeper_1_path_query, + chained_path_queries, + grove_version, ) - ); - assert_eq!( - results[1][1], - ( - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"deeper_2".to_vec() - ], - b"key5".to_vec(), - Some(Element::new_item(b"value5".to_vec())) + .unwrap(); + assert_eq!( + root_hash, + db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(results.len(), 2); + assert_eq!(results[0].len(), 3); + assert_eq!( + results[0][0], + ( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"deeper_1".to_vec() + ], + b"key1".to_vec(), + Some(Element::new_item(b"value1".to_vec())) + ) + ); + assert_eq!( + results[0][1], + ( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"deeper_1".to_vec() + ], + b"key2".to_vec(), + Some(Element::new_item(b"value2".to_vec())) + ) + ); + assert_eq!( + results[0][2], + ( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"deeper_1".to_vec() + ], + b"key3".to_vec(), + Some(Element::new_item(b"value3".to_vec())) + ) + ); + + assert_eq!(results[1].len(), 3); + assert_eq!( + results[1][0], + ( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"deeper_2".to_vec() + ], + b"key4".to_vec(), + Some(Element::new_item(b"value4".to_vec())) + ) + ); + assert_eq!( + results[1][1], + ( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"deeper_2".to_vec() + ], + b"key5".to_vec(), + Some(Element::new_item(b"value5".to_vec())) + ) + ); + assert_eq!( + results[1][2], + ( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"deeper_2".to_vec() + ], + b"key6".to_vec(), + Some(Element::new_item(b"value6".to_vec())) + ) + ); + } + + #[test] + fn test_query_b_depends_on_query_a() { + let grove_version = GroveVersion::latest(); + // we have two trees + // one with a mapping of id to name + // another with a mapping of name to age + // we want to get the age of every one after a certain id ordered by name + let db = make_test_grovedb(grove_version); + + // TEST_LEAF contains the id to name mapping + db.insert( + [TEST_LEAF].as_ref(), + &[1], + Element::new_item(b"d".to_vec()), + None, + None, + grove_version, ) - ); - assert_eq!( - results[1][2], - ( - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"deeper_2".to_vec() - ], - b"key6".to_vec(), - Some(Element::new_item(b"value6".to_vec())) + .unwrap() + .expect("successful root tree leaf insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[2], + Element::new_item(b"b".to_vec()), + None, + None, + grove_version, ) - ); -} + .unwrap() + .expect("successful root tree leaf insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[3], + Element::new_item(b"c".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[4], + Element::new_item(b"a".to_vec()), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); + + // ANOTHER_TEST_LEAF contains the name to age mapping + db.insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"a", + Element::new_item(vec![10]), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); + db.insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"b", + Element::new_item(vec![30]), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); + db.insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"c", + Element::new_item(vec![12]), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); + db.insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"d", + Element::new_item(vec![46]), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); + + // Query: return the age of everyone greater than id 2 ordered by name + // id 2 - b + // we want to return the age for c and d = 12, 46 respectively + // the proof generator knows that id 2 = b, but the verifier doesn't + // hence we need to generate two proofs + // prove that 2 - b then prove age after b + // the verifier has to use the result of the first proof 2 - b + // to generate the path query for the verification of the second proof + + // query name associated with id 2 + let mut query = Query::new(); + query.insert_key(vec![2]); + let mut path_query_one = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); -#[test] -fn test_query_b_depends_on_query_a() { - // we have two trees - // one with a mapping of id to name - // another with a mapping of name to age - // we want to get the age of every one after a certain id ordered by name - let db = make_test_grovedb(); - - // TEST_LEAF contains the id to name mapping - db.insert( - [TEST_LEAF].as_ref(), - &[1], - Element::new_item(b"d".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful root tree leaf insert"); - db.insert( - [TEST_LEAF].as_ref(), - &[2], - Element::new_item(b"b".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful root tree leaf insert"); - db.insert( - [TEST_LEAF].as_ref(), - &[3], - Element::new_item(b"c".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful root tree leaf insert"); - db.insert( - [TEST_LEAF].as_ref(), - &[4], - Element::new_item(b"a".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful root tree leaf insert"); - - // ANOTHER_TEST_LEAF contains the name to age mapping - db.insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"a", - Element::new_item(vec![10]), - None, - None, - ) - .unwrap() - .expect("successful root tree leaf insert"); - db.insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"b", - Element::new_item(vec![30]), - None, - None, - ) - .unwrap() - .expect("successful root tree leaf insert"); - db.insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"c", - Element::new_item(vec![12]), - None, - None, - ) - .unwrap() - .expect("successful root tree leaf insert"); - db.insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"d", - Element::new_item(vec![46]), - None, - None, - ) - .unwrap() - .expect("successful root tree leaf insert"); - - // Query: return the age of everyone greater than id 2 ordered by name - // id 2 - b - // so we want to return the age for c and d = 12, 46 respectively - // the proof generator knows that id 2 = b, but the verifier doesn't - // hence we need to generate two proofs - // prove that 2 - b then prove age after b - // the verifier has to use the result of the first proof 2 - b - // to generate the path query for the verification of the second proof - - // query name associated with id 2 - let mut query = Query::new(); - query.insert_key(vec![2]); - let mut path_query_one = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - - // first we show that this returns the correct output - let proof = db.prove_query(&path_query_one).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query(&proof, &path_query_one).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 1); - assert_eq!(result_set[0].2, Some(Element::new_item(b"b".to_vec()))); - - // next query should return the age for elements above b - let mut query = Query::new(); - query.insert_range_after(b"b".to_vec()..); - let path_query_two = PathQuery::new_unsized(vec![ANOTHER_TEST_LEAF.to_vec()], query); - - // show that we get the correct output - let proof = db.prove_query(&path_query_two).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query(&proof, &path_query_two).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 2); - assert_eq!(result_set[0].2, Some(Element::new_item(vec![12]))); - assert_eq!(result_set[1].2, Some(Element::new_item(vec![46]))); - - // now we merge the path queries - let mut merged_path_queries = PathQuery::merge(vec![&path_query_one, &path_query_two]).unwrap(); - merged_path_queries.query.limit = Some(3); - let proof = db.prove_verbose(&merged_path_queries).unwrap().unwrap(); - - // verifier only has access to the statement age > 2 - // need to first get the name associated with 2 from the proof - // then use that to construct the next path query - let mut chained_path_queries = vec![]; - chained_path_queries.push(|prev_elements: Vec| { + // first we show that this returns the correct output + let proof = db + .prove_query(&path_query_one, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query(&proof, &path_query_one, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 1); + assert_eq!(result_set[0].2, Some(Element::new_item(b"b".to_vec()))); + + // next query should return the age for elements above b let mut query = Query::new(); - let name_element = prev_elements[0].2.as_ref().unwrap(); - if let Element::Item(name, ..) = name_element { - query.insert_range_after(name.to_owned()..); - Some(PathQuery::new( - vec![ANOTHER_TEST_LEAF.to_vec()], - SizedQuery::new(query, Some(2), None), - )) - } else { - None - } - }); - - // add limit to path query one - path_query_one.query.limit = Some(1); - - let (_, result_set) = GroveDb::verify_query_with_chained_path_queries( - proof.as_slice(), - &path_query_one, - chained_path_queries, - ) - .unwrap(); - assert_eq!(result_set.len(), 2); - assert_eq!(result_set[0].len(), 1); - assert_eq!(result_set[1].len(), 2); - - let age_result = result_set[1].clone(); - assert_eq!(age_result[0].2, Some(Element::new_item(vec![12]))); - assert_eq!(age_result[1].2, Some(Element::new_item(vec![46]))); + query.insert_range_after(b"b".to_vec()..); + let path_query_two = PathQuery::new_unsized(vec![ANOTHER_TEST_LEAF.to_vec()], query); + + // show that we get the correct output + let proof = db + .prove_query(&path_query_two, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query(&proof, &path_query_two, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); + assert_eq!(result_set.len(), 2); + assert_eq!(result_set[0].2, Some(Element::new_item(vec![12]))); + assert_eq!(result_set[1].2, Some(Element::new_item(vec![46]))); + + // now we merge the path queries + let mut merged_path_queries = + PathQuery::merge(vec![&path_query_one, &path_query_two], grove_version).unwrap(); + merged_path_queries.query.limit = Some(3); + let proof = db + .prove_query(&merged_path_queries, None, grove_version) + .unwrap() + .unwrap(); + + // verifier only has access to the statement age > 2 + // need to first get the name associated with 2 from the proof + // then use that to construct the next path query + let mut chained_path_queries = vec![]; + chained_path_queries.push(|prev_elements: Vec| { + let mut query = Query::new(); + let name_element = prev_elements[0].2.as_ref().unwrap(); + if let Element::Item(name, ..) = name_element { + query.insert_range_after(name.to_owned()..); + Some(PathQuery::new( + vec![ANOTHER_TEST_LEAF.to_vec()], + SizedQuery::new(query, Some(2), None), + )) + } else { + None + } + }); + + // add limit to path query one + path_query_one.query.limit = Some(1); + + let (_, result_set) = GroveDb::verify_query_with_chained_path_queries( + proof.as_slice(), + &path_query_one, + chained_path_queries, + grove_version, + ) + .unwrap(); + assert_eq!(result_set.len(), 2); + assert_eq!(result_set[0].len(), 1); + assert_eq!(result_set[1].len(), 2); + + let age_result = result_set[1].clone(); + assert_eq!(age_result[0].2, Some(Element::new_item(vec![12]))); + assert_eq!(age_result[1].2, Some(Element::new_item(vec![46]))); + } + + #[test] + fn test_prove_absent_path_with_intermediate_emtpy_tree() { + let grove_version = GroveVersion::latest(); + // root + // test_leaf (empty) + let grovedb = make_test_grovedb(grove_version); + + // prove the absence of key "book" in ["test_leaf", "invalid"] + let mut query = Query::new(); + query.insert_key(b"book".to_vec()); + let path_query = + PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"invalid".to_vec()], query); + + let proof = grovedb + .prove_query(&path_query, None, grove_version) + .unwrap() + .expect("should generate proofs"); + + let (root_hash, result_set) = + GroveDb::verify_query(proof.as_slice(), &path_query, grove_version) + .expect("should verify proof"); + assert_eq!(result_set.len(), 0); + assert_eq!( + root_hash, + grovedb.root_hash(None, grove_version).unwrap().unwrap() + ); + } } diff --git a/grovedb/src/tests/sum_tree_tests.rs b/grovedb/src/tests/sum_tree_tests.rs index 3bc6896e8..92df7d734 100644 --- a/grovedb/src/tests/sum_tree_tests.rs +++ b/grovedb/src/tests/sum_tree_tests.rs @@ -1,38 +1,12 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Sum tree tests use grovedb_merk::{ proofs::Query, - TreeFeatureType::{BasicMerk, SummedMerk}, + tree::kv::ValueDefinedCostType, + TreeFeatureType::{BasicMerkNode, SummedMerkNode}, }; use grovedb_storage::StorageBatch; +use grovedb_version::version::GroveVersion; use crate::{ batch::GroveDbOp, @@ -43,20 +17,22 @@ use crate::{ #[test] fn test_sum_tree_behaves_like_regular_tree() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), b"key", Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); // Can fetch sum tree let sum_tree = db - .get([TEST_LEAF].as_ref(), b"key", None) + .get([TEST_LEAF].as_ref(), b"key", None, grove_version) .unwrap() .expect("should get tree"); assert!(matches!(sum_tree, Element::SumTree(..))); @@ -67,6 +43,7 @@ fn test_sum_tree_behaves_like_regular_tree() { Element::new_item(vec![1]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -76,6 +53,7 @@ fn test_sum_tree_behaves_like_regular_tree() { Element::new_item(vec![3]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -85,13 +63,19 @@ fn test_sum_tree_behaves_like_regular_tree() { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert item"); // Test proper item retrieval let item = db - .get([TEST_LEAF, b"key"].as_ref(), b"innerkey", None) + .get( + [TEST_LEAF, b"key"].as_ref(), + b"innerkey", + None, + grove_version, + ) .unwrap() .expect("should get item"); assert_eq!(item, Element::new_item(vec![1])); @@ -102,28 +86,34 @@ fn test_sum_tree_behaves_like_regular_tree() { let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"key".to_vec()], query); let proof = db - .prove_query(&path_query) + .prove_query(&path_query, None, grove_version) .unwrap() .expect("should generate proof"); let (root_hash, result_set) = - GroveDb::verify_query_raw(&proof, &path_query).expect("should verify proof"); - assert_eq!(root_hash, db.grove_db.root_hash(None).unwrap().unwrap()); + GroveDb::verify_query_raw(&proof, &path_query, grove_version).expect("should verify proof"); + assert_eq!( + root_hash, + db.grove_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 1); assert_eq!( - Element::deserialize(&result_set[0].value).expect("should deserialize element"), + Element::deserialize(&result_set[0].value, grove_version) + .expect("should deserialize element"), Element::new_item(vec![3]) ); } #[test] fn test_sum_item_behaves_like_regular_item() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), b"sumkey", Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -133,6 +123,7 @@ fn test_sum_item_behaves_like_regular_item() { Element::new_item(vec![1]), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -142,6 +133,7 @@ fn test_sum_item_behaves_like_regular_item() { Element::new_sum_item(5), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -151,13 +143,14 @@ fn test_sum_item_behaves_like_regular_item() { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); // Test proper item retrieval let item = db - .get([TEST_LEAF, b"sumkey"].as_ref(), b"k2", None) + .get([TEST_LEAF, b"sumkey"].as_ref(), b"k2", None, grove_version) .unwrap() .expect("should get item"); assert_eq!(item, Element::new_sum_item(5)); @@ -168,28 +161,33 @@ fn test_sum_item_behaves_like_regular_item() { let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"sumkey".to_vec()], query); let proof = db - .prove_query(&path_query) + .prove_query(&path_query, None, grove_version) .unwrap() .expect("should generate proof"); let (root_hash, result_set) = - GroveDb::verify_query_raw(&proof, &path_query).expect("should verify proof"); - assert_eq!(root_hash, db.grove_db.root_hash(None).unwrap().unwrap()); + GroveDb::verify_query_raw(&proof, &path_query, grove_version).expect("should verify proof"); + assert_eq!( + root_hash, + db.grove_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 1); - let element_from_proof = - Element::deserialize(&result_set[0].value).expect("should deserialize element"); + let element_from_proof = Element::deserialize(&result_set[0].value, grove_version) + .expect("should deserialize element"); assert_eq!(element_from_proof, Element::new_sum_item(5)); assert_eq!(element_from_proof.sum_value_or_default(), 5); } #[test] fn test_cannot_insert_sum_item_in_regular_tree() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), b"sumkey", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -200,6 +198,7 @@ fn test_cannot_insert_sum_item_in_regular_tree() { Element::new_sum_item(5), None, None, + grove_version ) .unwrap(), Err(Error::InvalidInput("cannot add sum item to non sum tree")) @@ -208,14 +207,16 @@ fn test_cannot_insert_sum_item_in_regular_tree() { #[test] fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { + let grove_version = GroveVersion::latest(); // All elements in a sum tree must have a summed feature type - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), b"key", Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -226,6 +227,7 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { Element::new_sum_item(30), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -235,6 +237,7 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { Element::new_sum_item(10), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -245,6 +248,7 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { Element::new_item(vec![10]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -254,6 +258,7 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { Element::new_item(vec![15]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -262,43 +267,68 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { // Open merk and check all elements in it let merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert!(matches!( - merk.get_feature_type(b"item1", true) - .unwrap() - .expect("node should exist"), - Some(SummedMerk(30)) + merk.get_feature_type( + b"item1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(30)) )); assert!(matches!( - merk.get_feature_type(b"item2", true) - .unwrap() - .expect("node should exist"), - Some(SummedMerk(10)) + merk.get_feature_type( + b"item2", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(10)) )); assert!(matches!( - merk.get_feature_type(b"item3", true) - .unwrap() - .expect("node should exist"), - Some(SummedMerk(0)) + merk.get_feature_type( + b"item3", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(0)) )); assert!(matches!( - merk.get_feature_type(b"item4", true) - .unwrap() - .expect("node should exist"), - Some(SummedMerk(0)) + merk.get_feature_type( + b"item4", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(0)) )); assert_eq!(merk.sum().expect("expected to get sum"), Some(40)); // Perform the same test on regular trees - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), b"key", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -308,6 +338,7 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { Element::new_item(vec![30]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -317,38 +348,55 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { Element::new_item(vec![10]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert!(matches!( - merk.get_feature_type(b"item1", true) - .unwrap() - .expect("node should exist"), - Some(BasicMerk) + merk.get_feature_type( + b"item1", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(BasicMerkNode) )); assert!(matches!( - merk.get_feature_type(b"item2", true) - .unwrap() - .expect("node should exist"), - Some(BasicMerk) + merk.get_feature_type( + b"item2", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(BasicMerkNode) )); assert_eq!(merk.sum().expect("expected to get sum"), None); } #[test] fn test_sum_tree_feature() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), b"key", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -358,7 +406,11 @@ fn test_sum_tree_feature() { // Sum should be non for non sum tree // TODO: change interface to retrieve element directly let merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert_eq!(merk.sum().expect("expected to get sum"), None); @@ -370,11 +422,12 @@ fn test_sum_tree_feature() { Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert sum tree"); let sum_tree = db - .get([TEST_LEAF].as_ref(), b"key2", None) + .get([TEST_LEAF].as_ref(), b"key2", None, grove_version) .unwrap() .expect("should retrieve tree"); assert_eq!(sum_tree.sum_value_or_default(), 0); @@ -386,12 +439,17 @@ fn test_sum_tree_feature() { Element::new_sum_item(30), None, None, + grove_version, ) .unwrap() .expect("should insert item"); // TODO: change interface to retrieve element directly let merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key2"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert_eq!(merk.sum().expect("expected to get sum"), Some(30)); @@ -403,6 +461,7 @@ fn test_sum_tree_feature() { Element::new_sum_item(-10), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -412,11 +471,16 @@ fn test_sum_tree_feature() { Element::new_sum_item(50), None, None, + grove_version, ) .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key2"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert_eq!(merk.sum().expect("expected to get sum"), Some(70)); // 30 - 10 + 50 = 70 @@ -428,11 +492,16 @@ fn test_sum_tree_feature() { Element::new_item(vec![29]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key2"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert_eq!(merk.sum().expect("expected to get sum"), Some(70)); @@ -444,6 +513,7 @@ fn test_sum_tree_feature() { Element::new_sum_item(10), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -453,19 +523,30 @@ fn test_sum_tree_feature() { Element::new_sum_item(-100), None, None, + grove_version, ) .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key2"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert_eq!(merk.sum().expect("expected to get sum"), Some(-60)); // 30 + 10 - 100 = -60 // We can not replace a normal item with a sum item, so let's delete it first - db.delete([TEST_LEAF, b"key2"].as_ref(), b"item4", None, None) - .unwrap() - .expect("expected to delete"); + db.delete( + [TEST_LEAF, b"key2"].as_ref(), + b"item4", + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to delete"); // Use a large value db.insert( [TEST_LEAF, b"key2"].as_ref(), @@ -473,11 +554,16 @@ fn test_sum_tree_feature() { Element::new_sum_item(10000000), None, None, + grove_version, ) .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key2"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert_eq!(merk.sum().expect("expected to get sum"), Some(9999940)); // 30 + @@ -490,7 +576,8 @@ fn test_sum_tree_feature() { #[test] fn test_sum_tree_propagation() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); // Tree // SumTree // SumTree @@ -504,6 +591,7 @@ fn test_sum_tree_propagation() { Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -513,6 +601,7 @@ fn test_sum_tree_propagation() { Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -522,6 +611,7 @@ fn test_sum_tree_propagation() { Element::new_sum_item(20), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -531,6 +621,7 @@ fn test_sum_tree_propagation() { Element::new_item(vec![2]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -540,6 +631,7 @@ fn test_sum_tree_propagation() { Element::new_sum_item(5), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -549,6 +641,7 @@ fn test_sum_tree_propagation() { Element::new_sum_item(10), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -563,12 +656,13 @@ fn test_sum_tree_propagation() { ])), None, None, + grove_version, ) .unwrap() .expect("should insert item"); let sum_tree = db - .get([TEST_LEAF].as_ref(), b"key", None) + .get([TEST_LEAF].as_ref(), b"key", None, grove_version) .unwrap() .expect("should fetch tree"); assert_eq!(sum_tree.sum_value_or_default(), 35); @@ -577,73 +671,113 @@ fn test_sum_tree_propagation() { // Assert node feature types let test_leaf_merk = db - .open_non_transactional_merk_at_path([TEST_LEAF].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert!(matches!( test_leaf_merk - .get_feature_type(b"key", true) + .get_feature_type( + b"key", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) .unwrap() .expect("node should exist"), - Some(BasicMerk) + Some(BasicMerkNode) )); let parent_sum_tree = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert!(matches!( parent_sum_tree - .get_feature_type(b"tree2", true) + .get_feature_type( + b"tree2", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) .unwrap() .expect("node should exist"), - Some(SummedMerk(15)) /* 15 because the child sum tree has one sum item of - * value 5 and - * another of value 10 */ + Some(SummedMerkNode(15)) /* 15 because the child sum tree has one sum item of + * value 5 and + * another of value 10 */ )); let child_sum_tree = db .open_non_transactional_merk_at_path( [TEST_LEAF, b"key", b"tree2"].as_ref().into(), Some(&batch), + grove_version, ) .unwrap() .expect("should open tree"); assert!(matches!( child_sum_tree - .get_feature_type(b"item1", true) + .get_feature_type( + b"item1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) .unwrap() .expect("node should exist"), - Some(SummedMerk(0)) + Some(SummedMerkNode(0)) )); assert!(matches!( child_sum_tree - .get_feature_type(b"sumitem1", true) + .get_feature_type( + b"sumitem1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) .unwrap() .expect("node should exist"), - Some(SummedMerk(5)) + Some(SummedMerkNode(5)) )); assert!(matches!( child_sum_tree - .get_feature_type(b"sumitem2", true) + .get_feature_type( + b"sumitem2", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) .unwrap() .expect("node should exist"), - Some(SummedMerk(10)) + Some(SummedMerkNode(10)) )); // TODO: should references take the sum of the referenced element?? assert!(matches!( child_sum_tree - .get_feature_type(b"item2", true) + .get_feature_type( + b"item2", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) .unwrap() .expect("node should exist"), - Some(SummedMerk(0)) + Some(SummedMerkNode(0)) )); } #[test] fn test_sum_tree_with_batches() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let ops = vec![ GroveDbOp::insert_op( vec![TEST_LEAF.to_vec()], @@ -661,29 +795,43 @@ fn test_sum_tree_with_batches() { Element::new_sum_item(10), ), ]; - db.apply_batch(ops, None, None) + db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key1"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert!(matches!( sum_tree - .get_feature_type(b"a", true) + .get_feature_type( + b"a", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) .unwrap() .expect("node should exist"), - Some(SummedMerk(0)) + Some(SummedMerkNode(0)) )); assert!(matches!( sum_tree - .get_feature_type(b"b", true) + .get_feature_type( + b"b", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) .unwrap() .expect("node should exist"), - Some(SummedMerk(10)) + Some(SummedMerkNode(10)) )); // Create new batch to use existing tree @@ -692,26 +840,35 @@ fn test_sum_tree_with_batches() { b"c".to_vec(), Element::new_sum_item(10), )]; - db.apply_batch(ops, None, None) + db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key1"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert!(matches!( sum_tree - .get_feature_type(b"c", true) + .get_feature_type( + b"c", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) .unwrap() .expect("node should exist"), - Some(SummedMerk(10)) + Some(SummedMerkNode(10)) )); assert_eq!(sum_tree.sum().expect("expected to get sum"), Some(20)); // Test propagation - // Add a new sum tree with it's own sum items, should affect sum of original + // Add a new sum tree with its own sum items, should affect sum of original // tree let ops = vec![ GroveDbOp::insert_op( @@ -770,13 +927,17 @@ fn test_sum_tree_with_batches() { Element::new_item(vec![5]), ), ]; - db.apply_batch(ops, None, None) + db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key1"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert_eq!(sum_tree.sum().expect("expected to get sum"), Some(41)); diff --git a/grovedb/src/tests/tree_hashes_tests.rs b/grovedb/src/tests/tree_hashes_tests.rs index b67d1cf95..e86b8fd0a 100644 --- a/grovedb/src/tests/tree_hashes_tests.rs +++ b/grovedb/src/tests/tree_hashes_tests.rs @@ -28,8 +28,11 @@ //! Tree hashes tests -use grovedb_merk::tree::{combine_hash, kv_digest_to_kv_hash, node_hash, value_hash, NULL_HASH}; +use grovedb_merk::tree::{ + combine_hash, kv::ValueDefinedCostType, kv_digest_to_kv_hash, node_hash, value_hash, NULL_HASH, +}; use grovedb_storage::StorageBatch; +use grovedb_version::version::GroveVersion; use crate::{ tests::{make_test_grovedb, TEST_LEAF}, @@ -38,7 +41,8 @@ use crate::{ #[test] fn test_node_hashes_when_inserting_item() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -46,6 +50,7 @@ fn test_node_hashes_when_inserting_item() { Element::new_item(b"baguette".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -53,24 +58,43 @@ fn test_node_hashes_when_inserting_item() { let batch = StorageBatch::new(); let test_leaf_merk = db - .open_non_transactional_merk_at_path([TEST_LEAF].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open merk"); let (elem_value, elem_value_hash) = test_leaf_merk - .get_value_and_value_hash(b"key1", true) + .get_value_and_value_hash( + b"key1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); let elem_kv_hash = test_leaf_merk - .get_kv_hash(b"key1", true) + .get_kv_hash( + b"key1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); let elem_node_hash = test_leaf_merk - .get_hash(b"key1", true) + .get_hash( + b"key1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); @@ -90,7 +114,8 @@ fn test_node_hashes_when_inserting_item() { #[test] fn test_tree_hashes_when_inserting_empty_tree() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -98,6 +123,7 @@ fn test_tree_hashes_when_inserting_empty_tree() { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -105,30 +131,53 @@ fn test_tree_hashes_when_inserting_empty_tree() { let batch = StorageBatch::new(); let test_leaf_merk = db - .open_non_transactional_merk_at_path([TEST_LEAF].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open merk"); let (elem_value, elem_value_hash) = test_leaf_merk - .get_value_and_value_hash(b"key1", true) + .get_value_and_value_hash( + b"key1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); let elem_kv_hash = test_leaf_merk - .get_kv_hash(b"key1", true) + .get_kv_hash( + b"key1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); let elem_node_hash = test_leaf_merk - .get_hash(b"key1", true) + .get_hash( + b"key1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); let underlying_merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key1"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open merk"); @@ -150,7 +199,8 @@ fn test_tree_hashes_when_inserting_empty_tree() { #[test] fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -158,6 +208,7 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -168,6 +219,7 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -175,19 +227,32 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { let batch = StorageBatch::new(); let under_top_merk = db - .open_non_transactional_merk_at_path([TEST_LEAF].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open merk"); let middle_merk_key1 = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key1"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open merk"); // Let's first verify that the lowest nodes hashes are as we expect let (elem_value, elem_value_hash) = middle_merk_key1 - .get_value_and_value_hash(b"key2", true) + .get_value_and_value_hash( + b"key2", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); @@ -196,6 +261,7 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { .open_non_transactional_merk_at_path( [TEST_LEAF, b"key1", b"key2"].as_ref().into(), Some(&batch), + grove_version, ) .unwrap() .expect("should open merk"); @@ -210,7 +276,12 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { assert_eq!(elem_value_hash, combined_value_hash_key2); let elem_kv_hash = middle_merk_key1 - .get_kv_hash(b"key2", true) + .get_kv_hash( + b"key2", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("should get kv hash") .expect("value hash should be some"); @@ -220,7 +291,12 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { assert_eq!(elem_kv_hash, kv_hash_key2); let elem_node_hash = middle_merk_key1 - .get_hash(b"key2", true) + .get_hash( + b"key2", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("should get kv hash") .expect("value hash should be some"); @@ -238,7 +314,12 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { assert_eq!(root_hash, node_hash_key2); let (middle_elem_value_key1, middle_elem_value_hash_key1) = under_top_merk - .get_value_and_value_hash(b"key1", true) + .get_value_and_value_hash( + b"key1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); @@ -248,7 +329,7 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { "0201046b65793200" ); - let element = Element::deserialize(middle_elem_value_key1.as_slice()) + let element = Element::deserialize(middle_elem_value_key1.as_slice(), grove_version) .expect("expected to deserialize element"); assert_eq!(element, Element::new_tree(Some(b"key2".to_vec()))); @@ -276,7 +357,12 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { ); let middle_elem_kv_hash_key1 = under_top_merk - .get_kv_hash(b"key1", true) + .get_kv_hash( + b"key1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); @@ -290,7 +376,12 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { ); let middle_elem_node_hash_key1 = under_top_merk - .get_hash(b"key1", true) + .get_hash( + b"key1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); diff --git a/grovedb/src/util.rs b/grovedb/src/util.rs index da478128f..b9b624a44 100644 --- a/grovedb/src/util.rs +++ b/grovedb/src/util.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - /// Macro to execute same piece of code on different storage contexts /// (transactional or not) using path argument. macro_rules! storage_context_optional_tx { @@ -57,6 +29,7 @@ macro_rules! storage_context_with_parent_optional_tx { $storage:ident, $root_key:ident, $is_sum_tree:ident, + $grove_version:ident, { $($body:tt)* } ) => { { @@ -71,12 +44,13 @@ macro_rules! storage_context_with_parent_optional_tx { .unwrap_add_cost(&mut $cost); let element = cost_return_on_error!( &mut $cost, - Element::get_from_storage(&parent_storage, parent_key).map_err(|e| { + Element::get_from_storage(&parent_storage, parent_key, $grove_version) + .map_err(|e| { Error::PathParentLayerNotFound( format!( - "could not get key for parent of subtree optional on tx: {}", - e - ) + "could not get key for parent of subtree optional on tx: {}", + e + ) ) }) ); @@ -112,7 +86,7 @@ macro_rules! storage_context_with_parent_optional_tx { ).unwrap_add_cost(&mut $cost); let element = cost_return_on_error!( &mut $cost, - Element::get_from_storage(&parent_storage, parent_key).map_err(|e| { + Element::get_from_storage(&parent_storage, parent_key, $grove_version).map_err(|e| { Error::PathParentLayerNotFound( format!( "could not get key for parent of subtree optional no tx: {}", @@ -149,6 +123,124 @@ macro_rules! storage_context_with_parent_optional_tx { }; } +/// Macro to execute same piece of code on different storage contexts +/// (transactional or not) using path argument. +macro_rules! storage_context_with_parent_optional_tx_internal_error { + ( + &mut $cost:ident, + $db:expr, + $path:expr, + $batch:expr, + $transaction:ident, + $storage:ident, + $root_key:ident, + $is_sum_tree:ident, + $grove_version:ident, + { $($body:tt)* } + ) => { + { + use ::grovedb_storage::Storage; + if let Some(tx) = $transaction { + let $storage = $db + .get_transactional_storage_context($path.clone(), $batch, tx) + .unwrap_add_cost(&mut $cost); + if let Some((parent_path, parent_key)) = $path.derive_parent() { + let parent_storage = $db + .get_transactional_storage_context(parent_path, $batch, tx) + .unwrap_add_cost(&mut $cost); + let result = Element::get_from_storage( + &parent_storage, + parent_key, + $grove_version + ).map_err(|e| { + Error::PathParentLayerNotFound( + format!( + "could not get key for parent of subtree optional on tx: {}", + e + ) + ) + }).unwrap_add_cost(&mut $cost); + match result { + Ok(element) => { + match element { + Element::Tree(root_key, _) => { + let $root_key = root_key; + let $is_sum_tree = false; + $($body)* + } + Element::SumTree(root_key, ..) => { + let $root_key = root_key; + let $is_sum_tree = true; + $($body)* + } + _ => { + return Err(Error::CorruptedData( + "parent is not a tree" + .to_owned(), + )).wrap_with_cost($cost); + } + } + }, + Err(e) => Err(e), + } + } else { + return Err(Error::CorruptedData( + "path is empty".to_owned(), + )).wrap_with_cost($cost); + } + } else { + let $storage = $db + .get_storage_context($path.clone(), $batch).unwrap_add_cost(&mut $cost); + if let Some((parent_path, parent_key)) = $path.derive_parent() { + let parent_storage = $db.get_storage_context( + parent_path, + $batch + ).unwrap_add_cost(&mut $cost); + let result = Element::get_from_storage( + &parent_storage, + parent_key, + $grove_version + ).map_err(|e| { + Error::PathParentLayerNotFound( + format!( + "could not get key for parent of subtree optional no tx: {}", + e + ) + ) + }).unwrap_add_cost(&mut $cost); + match result { + Ok(element) => { + match element { + Element::Tree(root_key, _) => { + let $root_key = root_key; + let $is_sum_tree = false; + $($body)* + } + Element::SumTree(root_key, ..) => { + let $root_key = root_key; + let $is_sum_tree = true; + $($body)* + } + _ => { + return Err(Error::CorruptedData( + "parent is not a tree" + .to_owned(), + )).wrap_with_cost($cost); + } + } + }, + Err(e) => Err(e), + } + } else { + return Err(Error::CorruptedData( + "path is empty".to_owned(), + )).wrap_with_cost($cost); + } + } + } + }; +} + /// Macro to execute same piece of code on different storage contexts with /// empty prefix. macro_rules! meta_storage_context_optional_tx { @@ -185,6 +277,7 @@ macro_rules! merk_optional_tx { $batch:expr, $transaction:ident, $subtree:ident, + $grove_version:ident, { $($body:tt)* } ) => { if $path.is_root() { @@ -198,8 +291,12 @@ macro_rules! merk_optional_tx { { let $subtree = cost_return_on_error!( &mut $cost, - ::grovedb_merk::Merk::open_base(storage.unwrap_add_cost(&mut $cost), false) - .map(|merk_res| + ::grovedb_merk::Merk::open_base( + storage.unwrap_add_cost(&mut $cost), + false, + Some(&Element::value_defined_cost_for_serialized_value), + $grove_version, + ).map(|merk_res| merk_res .map_err(|_| crate::Error::CorruptedData( "cannot open a subtree".to_owned() @@ -219,6 +316,81 @@ macro_rules! merk_optional_tx { storage, root_key, is_sum_tree, + $grove_version, + { + #[allow(unused_mut)] + let mut $subtree = cost_return_on_error!( + &mut $cost, + ::grovedb_merk::Merk::open_layered_with_root_key( + storage, + root_key, + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), + $grove_version, + ).map(|merk_res| + merk_res + .map_err(|_| crate::Error::CorruptedData( + "cannot open a subtree".to_owned() + )) + ) + ); + $($body)* + } + ) + } + }; +} + +/// Macro to execute same piece of code on Merk with varying storage +/// contexts. +macro_rules! merk_optional_tx_internal_error { + ( + &mut $cost:ident, + $db:expr, + $path:expr, + $batch:expr, + $transaction:ident, + $subtree:ident, + $grove_version:ident, + { $($body:tt)* } + ) => { + if $path.is_root() { + use crate::util::storage_context_optional_tx; + storage_context_optional_tx!( + $db, + ::grovedb_path::SubtreePath::empty(), + $batch, + $transaction, + storage, + { + let $subtree = cost_return_on_error!( + &mut $cost, + ::grovedb_merk::Merk::open_base( + storage.unwrap_add_cost(&mut $cost), + false, + Some(&Element::value_defined_cost_for_serialized_value), + $grove_version + ).map(|merk_res| + merk_res + .map_err(|_| crate::Error::CorruptedData( + "cannot open a subtree".to_owned() + )) + ) + ); + $($body)* + }) + } else { + use crate::util::storage_context_with_parent_optional_tx_internal_error; + storage_context_with_parent_optional_tx_internal_error!( + &mut $cost, + $db, + $path, + $batch, + $transaction, + storage, + root_key, + is_sum_tree, + $grove_version, { #[allow(unused_mut)] let mut $subtree = cost_return_on_error!( @@ -226,7 +398,9 @@ macro_rules! merk_optional_tx { ::grovedb_merk::Merk::open_layered_with_root_key( storage, root_key, - is_sum_tree + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), + $grove_version, ).map(|merk_res| merk_res .map_err(|_| crate::Error::CorruptedData( @@ -251,6 +425,7 @@ macro_rules! merk_optional_tx_path_not_empty { $batch:expr, $transaction:ident, $subtree:ident, + $grove_version:ident, { $($body:tt)* } ) => { { @@ -264,6 +439,7 @@ macro_rules! merk_optional_tx_path_not_empty { storage, root_key, is_sum_tree, + $grove_version, { #[allow(unused_mut)] let mut $subtree = cost_return_on_error!( @@ -271,7 +447,9 @@ macro_rules! merk_optional_tx_path_not_empty { ::grovedb_merk::Merk::open_layered_with_root_key( storage, root_key, - is_sum_tree + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), + $grove_version, ).map(|merk_res| merk_res .map_err(|_| crate::Error::CorruptedData( @@ -295,6 +473,7 @@ macro_rules! root_merk_optional_tx { $batch:expr, $transaction:ident, $subtree:ident, + $grove_version:ident, { $($body:tt)* } ) => { { @@ -308,8 +487,12 @@ macro_rules! root_merk_optional_tx { { let $subtree = cost_return_on_error!( &mut $cost, - ::grovedb_merk::Merk::open_base(storage.unwrap_add_cost(&mut $cost), false) - .map(|merk_res| + ::grovedb_merk::Merk::open_base( + storage.unwrap_add_cost(&mut $cost), + false, + Some(&Element::value_defined_cost_for_serialized_value), + $grove_version, + ).map(|merk_res| merk_res .map_err(|_| crate::Error::CorruptedData( "cannot open a subtree".to_owned() @@ -323,8 +506,10 @@ macro_rules! root_merk_optional_tx { } pub(crate) use merk_optional_tx; +pub(crate) use merk_optional_tx_internal_error; pub(crate) use merk_optional_tx_path_not_empty; pub(crate) use meta_storage_context_optional_tx; pub(crate) use root_merk_optional_tx; pub(crate) use storage_context_optional_tx; pub(crate) use storage_context_with_parent_optional_tx; +pub(crate) use storage_context_with_parent_optional_tx_internal_error; diff --git a/grovedb/src/versioning.rs b/grovedb/src/versioning.rs deleted file mode 100644 index a241b1e46..000000000 --- a/grovedb/src/versioning.rs +++ /dev/null @@ -1,64 +0,0 @@ -use std::io::Cursor; - -use integer_encoding::{VarInt, VarIntReader, VarIntWriter}; - -use crate::{Error, Error::InternalError}; - -pub(crate) const PROOF_VERSION: u32 = 1; - -/// Reads a version number from the given byte slice using variable-length -/// encoding. Returns a Result containing the parsed u32 version number, or an -/// Error if the data is corrupted and could not be read. -pub fn read_proof_version(mut bytes: &[u8]) -> Result { - bytes - .read_varint() - .map_err(|_| Error::CorruptedData("could not read version info".to_string())) -} - -/// Reads a version number from the given byte slice using variable-length -/// encoding, and returns the version number as well as a slice of the remaining -/// bytes. -pub fn read_and_consume_proof_version(bytes: &[u8]) -> Result<(u32, &[u8]), Error> { - let mut cursor = Cursor::new(bytes); - let version_number = cursor - .read_varint() - .map_err(|_| Error::CorruptedData("sdfs".to_string()))?; - let version_length: usize = cursor.position() as usize; - Ok((version_number, &bytes[version_length..])) -} - -/// Encodes the given version number as variable-length bytes and adds it to the -/// beginning of the given Vec, returning the modified vector. -pub(crate) fn prepend_version_to_bytes(mut bytes: Vec, version: u32) -> Result, Error> { - let version_bytes = version.encode_var_vec(); - bytes.splice(..0, version_bytes); - Ok(bytes) -} - -#[cfg(test)] -mod tests { - use integer_encoding::VarIntWriter; - - use crate::versioning::{ - prepend_version_to_bytes, read_and_consume_proof_version, read_proof_version, - }; - - #[test] - fn read_correct_version() { - let mut data = vec![1, 2, 3]; - let version = 500_u32; - - // prepend the version information to the data vector - let mut new_data = prepend_version_to_bytes(data, version).unwrap(); - assert_eq!(new_data, [244, 3, 1, 2, 3]); - - // show that read_version doesn't consume - assert_eq!(read_proof_version(&mut new_data.as_slice()).unwrap(), 500); - assert_eq!(new_data, [244, 3, 1, 2, 3]); - - // show that we consume the version number and return the remaining vector - let (version_number, data_vec) = read_and_consume_proof_version(&new_data).unwrap(); - assert_eq!(version_number, 500_u32); - assert_eq!(data_vec, [1, 2, 3]); - } -} diff --git a/grovedb/src/visualize.rs b/grovedb/src/visualize.rs index 3abbbfd01..39cf3432b 100644 --- a/grovedb/src/visualize.rs +++ b/grovedb/src/visualize.rs @@ -30,10 +30,14 @@ use std::io::{Result, Write}; -use bincode::Options; +use bincode::{ + config, + config::{BigEndian, Configuration}, +}; use grovedb_merk::{Merk, VisualizeableMerk}; use grovedb_path::SubtreePathBuilder; use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; use grovedb_visualize::{visualize_stdout, Drawer, Visualize}; use crate::{ @@ -44,12 +48,24 @@ use crate::{ impl Visualize for Element { fn visualize(&self, mut drawer: Drawer) -> Result> { match self { - Element::Item(value, _) => { + Element::Item(value, flags) => { drawer.write(b"item: ")?; drawer = value.visualize(drawer)?; + + if let Some(f) = flags { + if !f.is_empty() { + drawer = f.visualize(drawer)?; + } + } } - Element::SumItem(value, _) => { + Element::SumItem(value, flags) => { drawer.write(format!("sum_item: {value}").as_bytes())?; + + if let Some(f) = flags { + if !f.is_empty() { + drawer = f.visualize(drawer)?; + } + } } Element::Reference(_ref, ..) => { drawer.write(b"ref")?; @@ -64,13 +80,26 @@ impl Visualize for Element { // } // drawer.write(b"]")?; } - Element::Tree(root_key, _) => { + Element::Tree(root_key, flags) => { drawer.write(b"tree: ")?; drawer = root_key.as_deref().visualize(drawer)?; + + if let Some(f) = flags { + if !f.is_empty() { + drawer = f.visualize(drawer)?; + } + } } - Element::SumTree(root_key, ..) => { + Element::SumTree(root_key, value, flags) => { drawer.write(b"sum_tree: ")?; drawer = root_key.as_deref().visualize(drawer)?; + drawer.write(format!(" {value}").as_bytes())?; + + if let Some(f) = flags { + if !f.is_empty() { + drawer = f.visualize(drawer)?; + } + } } } Ok(drawer) @@ -102,6 +131,21 @@ impl Visualize for ReferencePathType { .as_bytes(), )?; } + ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference( + height, + end_path, + ) => { + drawer.write(b"upstream root height with parent path addition reference: ")?; + drawer.write(format!("[height: {height}").as_bytes())?; + drawer.write( + end_path + .iter() + .map(hex::encode) + .collect::>() + .join("/") + .as_bytes(), + )?; + } ReferencePathType::UpstreamFromElementHeightReference(up, end_path) => { drawer.write(b"upstream from element reference: ")?; drawer.write(format!("[up: {up}").as_bytes())?; @@ -144,13 +188,14 @@ impl GroveDb { mut drawer: Drawer, path: SubtreePathBuilder<'_, B>, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> Result> { drawer.down(); storage_context_optional_tx!(self.db, (&path).into(), None, transaction, storage, { let mut iter = Element::iterator(storage.unwrap().raw_iter()).unwrap(); while let Some((key, element)) = iter - .next_element() + .next_element(grove_version) .unwrap() .expect("cannot get next element") { @@ -166,6 +211,7 @@ impl GroveDb { drawer, path.derive_owned_with_child(key), transaction, + grove_version, )?; drawer.up(); } @@ -184,10 +230,16 @@ impl GroveDb { &self, mut drawer: Drawer, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> Result> { drawer.down(); - drawer = self.draw_subtree(drawer, SubtreePathBuilder::new(), transaction)?; + drawer = self.draw_subtree( + drawer, + SubtreePathBuilder::new(), + transaction, + grove_version, + )?; drawer.up(); Ok(drawer) @@ -197,9 +249,10 @@ impl GroveDb { &self, mut drawer: Drawer, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> Result> { drawer.write(b"root")?; - drawer = self.draw_root_tree(drawer, transaction)?; + drawer = self.draw_root_tree(drawer, transaction, grove_version)?; drawer.flush()?; Ok(drawer) } @@ -207,18 +260,17 @@ impl GroveDb { impl Visualize for GroveDb { fn visualize(&self, drawer: Drawer) -> Result> { - self.visualize_start(drawer, None) + self.visualize_start(drawer, None, GroveVersion::latest()) } } #[allow(dead_code)] pub fn visualize_merk_stdout<'db, S: StorageContext<'db>>(merk: &Merk) { visualize_stdout(&VisualizeableMerk::new(merk, |bytes: &[u8]| { - bincode::DefaultOptions::default() - .with_varint_encoding() - .reject_trailing_bytes() - .deserialize::(bytes) + let config = config::standard().with_big_endian().with_no_limit(); + bincode::decode_from_slice::>(bytes, config) .expect("unable to deserialize Element") + .0 })); } diff --git a/grovedbg-types/Cargo.toml b/grovedbg-types/Cargo.toml new file mode 100644 index 000000000..300eb5c7d --- /dev/null +++ b/grovedbg-types/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "grovedbg-types" +version = "0.1.0" +edition = "2021" + +[dependencies] +serde = { version = "1.0.201", features = ["derive"] } diff --git a/grovedbg-types/src/lib.rs b/grovedbg-types/src/lib.rs new file mode 100644 index 000000000..dacc4255e --- /dev/null +++ b/grovedbg-types/src/lib.rs @@ -0,0 +1,64 @@ +use serde::{Deserialize, Serialize}; + +pub type Key = Vec; +pub type Path = Vec; +pub type PathSegment = Vec; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct NodeFetchRequest { + pub path: Path, + pub key: Key, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct RootFetchRequest; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct NodeUpdate { + pub left_child: Option, + pub right_child: Option, + pub path: Path, + pub key: Key, + pub element: Element, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum Element { + Subtree { + root_key: Option, + }, + Sumtree { + root_key: Option, + sum: i64, + }, + Item { + value: Vec, + }, + SumItem { + value: i64, + }, + AbsolutePathReference { + path: Path, + }, + UpstreamRootHeightReference { + n_keep: u32, + path_append: Vec, + }, + UpstreamRootHeightWithParentPathAdditionReference { + n_keep: u32, + path_append: Vec, + }, + UpstreamFromElementHeightReference { + n_remove: u32, + path_append: Vec, + }, + CousinReference { + swap_parent: PathSegment, + }, + RemovedCousinReference { + swap_parent: Vec, + }, + SiblingReference { + sibling_key: Key, + }, +} diff --git a/merk/Cargo.toml b/merk/Cargo.toml index 4fec012ee..4364a564f 100644 --- a/merk/Cargo.toml +++ b/merk/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "grovedb-merk" description = "Merkle key/value store adapted for GroveDB" -version = "1.0.0-rc.1" +version = "1.0.0-rc.2" authors = ["Samuel Westrich ", "Wisdom Ogwu ", "Matt Bell "] edition = "2021" license = "MIT" @@ -11,33 +11,31 @@ readme = "README.md" documentation = "https://docs.rs/grovedb-merk" [dependencies] -thiserror = "1.0.37" -grovedb-storage = { version = "1.0.0-rc.1", path = "../storage", optional = true } +thiserror = "1.0.58" +grovedb-storage = { version = "1.0.0-rc.2", path = "../storage", optional = true } failure = "0.1.8" -integer-encoding = "3.0.4" -indexmap = "1.9.2" -grovedb-costs = { version = "1.0.0-rc.1", path = "../costs" } -grovedb-visualize = { version = "1.0.0-rc.1", path = "../visualize" } -grovedb-path = { version = "1.0.0-rc.1", path = "../path" } +integer-encoding = "4.0.0" +indexmap = "2.2.6" +grovedb-costs = { version = "1.0.0-rc.2", path = "../costs" } +grovedb-visualize = { version = "1.0.0-rc.2", path = "../visualize" } +grovedb-path = { version = "1.0.0-rc.2", path = "../path" } +hex = { version = "0.4.3" } +grovedb-version = { version = "1.0.0-rc.2", path = "../grovedb-version" } [dependencies.time] -version = "0.3.17" -optional = true - -[dependencies.hex] -version = "0.4.3" +version = "0.3.34" optional = true [dependencies.colored] -version = "1.9.3" +version = "2.1.0" optional = true [dependencies.num_cpus] -version = "1.14.0" +version = "1.16.0" optional = true [dependencies.byteorder] -version = "1.4.3" +version = "1.5.0" optional = true [dependencies.ed] @@ -45,7 +43,7 @@ version = "0.2.2" optional = true [dependencies.blake3] -version = "1.3.3" +version = "1.5.1" optional = true [dependencies.rand] @@ -53,22 +51,16 @@ version = "0.8.5" features = ["small_rng"] optional = true -[dependencies.jemallocator] -version = "0.5.0" -features = ["disable_initial_exec_tls"] -optional = true - [features] default = ["full"] +proof_debug = [] full = ["rand", "time", - "hex", "colored", "num_cpus", "byteorder", "ed", "blake3", - "jemallocator", "grovedb-storage", "grovedb-storage/rocksdb_storage" ] @@ -76,10 +68,11 @@ verify = [ "ed", "blake3" ] +grovedbg = ["full"] [dev-dependencies] -tempfile = "3.3.0" -criterion = "0.4.0" +tempfile = "3.10.1" +criterion = "0.5.1" [[bench]] name = "merk" diff --git a/merk/benches/merk.rs b/merk/benches/merk.rs index 7846a78ce..e2d552193 100644 --- a/merk/benches/merk.rs +++ b/merk/benches/merk.rs @@ -30,9 +30,14 @@ use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use grovedb_costs::storage_cost::removal::StorageRemovedBytes::BasicStorageRemoval; +use grovedb_merk::{ + proofs, + test_utils::{make_batch_rand, make_batch_seq, make_del_batch_rand, TempMerk}, + tree::kv::ValueDefinedCostType, + Merk, +}; use grovedb_path::SubtreePath; use grovedb_storage::{rocksdb_storage::test_utils::TempStorage, Storage}; -use merk::{proofs::encode_into as encode_proof_into, test_utils::*, Merk}; use rand::prelude::*; /// 1 million gets in 2k batches @@ -41,16 +46,17 @@ pub fn get(c: &mut Criterion) { let batch_size = 2_000; let num_batches = initial_size / batch_size; - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let mut batches = vec![]; for i in 0..num_batches { let batch = make_batch_rand(batch_size, i); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -58,6 +64,7 @@ pub fn get(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -72,7 +79,14 @@ pub fn get(c: &mut Criterion) { let key_index = (i / num_batches) as usize; let key = &batches[batch_index][key_index].0; - merk.get(key, true).unwrap().expect("get failed"); + merk.get( + key, + true, + None:: Option>, + grove_version, + ) + .unwrap() + .expect("get failed"); i = (i + 1) % initial_size; }) @@ -93,16 +107,17 @@ pub fn insert_1m_2k_seq(c: &mut Criterion) { } c.bench_function("insert_1m_2k_seq", |b| { - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let mut i = 0; b.iter_with_large_drop(|| { let batch = &batches[i % n_batches]; - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -110,6 +125,7 @@ pub fn insert_1m_2k_seq(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -132,16 +148,17 @@ pub fn insert_1m_2k_rand(c: &mut Criterion) { } c.bench_function("insert_1m_2k_rand", |b| { - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let mut i = 0; b.iter_with_large_drop(|| { let batch = &batches[i % n_batches]; - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -149,6 +166,7 @@ pub fn insert_1m_2k_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -165,15 +183,16 @@ pub fn update_1m_2k_seq(c: &mut Criterion) { let n_batches: usize = initial_size / batch_size; let mut batches = Vec::with_capacity(n_batches); - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..n_batches { let batch = make_batch_seq(((i * batch_size) as u64)..((i + 1) * batch_size) as u64); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -181,6 +200,7 @@ pub fn update_1m_2k_seq(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -193,11 +213,12 @@ pub fn update_1m_2k_seq(c: &mut Criterion) { b.iter_with_large_drop(|| { let batch = &batches[i % n_batches]; - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -205,6 +226,7 @@ pub fn update_1m_2k_seq(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -221,15 +243,16 @@ pub fn update_1m_2k_rand(c: &mut Criterion) { let n_batches: usize = initial_size / batch_size; let mut batches = Vec::with_capacity(n_batches); - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -237,6 +260,7 @@ pub fn update_1m_2k_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -249,11 +273,12 @@ pub fn update_1m_2k_rand(c: &mut Criterion) { b.iter_with_large_drop(|| { let batch = &batches[i % n_batches]; - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -261,6 +286,7 @@ pub fn update_1m_2k_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -278,16 +304,17 @@ pub fn delete_1m_2k_rand(c: &mut Criterion) { let mut batches = Vec::with_capacity(n_batches); let mut delete_batches = Vec::with_capacity(n_batches); - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); let delete_batch = make_del_batch_rand(batch_size as u64, i as u64); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -295,6 +322,7 @@ pub fn delete_1m_2k_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -311,11 +339,12 @@ pub fn delete_1m_2k_rand(c: &mut Criterion) { // Merk tree is kept with 1m elements before each bench iteration for more or // less same inputs. - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( insert_batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -323,16 +352,18 @@ pub fn delete_1m_2k_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); b.iter_with_large_drop(|| { - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( delete_batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -340,6 +371,7 @@ pub fn delete_1m_2k_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -357,15 +389,16 @@ pub fn prove_1m_2k_rand(c: &mut Criterion) { let mut batches = Vec::with_capacity(n_batches); let mut prove_keys_per_batch = Vec::with_capacity(n_batches); - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -373,12 +406,13 @@ pub fn prove_1m_2k_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); let mut prove_keys = Vec::with_capacity(batch_size); for (key, _) in batch.iter() { - prove_keys.push(merk::proofs::query::query_item::QueryItem::Key(key.clone())); + prove_keys.push(proofs::query::query_item::QueryItem::Key(key.clone())); } prove_keys_per_batch.push(prove_keys); batches.push(batch); @@ -390,7 +424,7 @@ pub fn prove_1m_2k_rand(c: &mut Criterion) { b.iter_with_large_drop(|| { let keys = prove_keys_per_batch[i % n_batches].clone(); - merk.prove_unchecked(keys, None, None, true) + merk.prove_unchecked(keys, None, true, grove_version) .unwrap() .expect("prove failed"); i += 1; @@ -405,15 +439,16 @@ pub fn build_trunk_chunk_1m_2k_rand(c: &mut Criterion) { let n_batches: usize = initial_size / batch_size; - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -421,6 +456,7 @@ pub fn build_trunk_chunk_1m_2k_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed") @@ -434,7 +470,7 @@ pub fn build_trunk_chunk_1m_2k_rand(c: &mut Criterion) { let (ops, _) = merk.walk(|walker| walker.unwrap().create_trunk_proof().unwrap().unwrap()); - encode_proof_into(ops.iter(), &mut bytes); + proofs::encode_into(ops.iter(), &mut bytes); }); }); } @@ -446,15 +482,16 @@ pub fn chunkproducer_rand_1m_1_rand(c: &mut Criterion) { let n_batches: usize = initial_size / batch_size; - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -462,6 +499,7 @@ pub fn chunkproducer_rand_1m_1_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed") @@ -473,7 +511,7 @@ pub fn chunkproducer_rand_1m_1_rand(c: &mut Criterion) { c.bench_function("chunkproducer_rand_1m_1_rand", |b| { b.iter_with_large_drop(|| { let i = rng.gen::() % chunks.len(); - let _chunk = chunks.chunk(i).unwrap(); + let _chunk = chunks.chunk(i, grove_version).unwrap(); }); }); } @@ -485,15 +523,16 @@ pub fn chunk_iter_1m_1(c: &mut Criterion) { let n_batches: usize = initial_size / batch_size; - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -501,6 +540,7 @@ pub fn chunk_iter_1m_1(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed") @@ -508,11 +548,11 @@ pub fn chunk_iter_1m_1(c: &mut Criterion) { let mut chunks = merk.chunks().unwrap().into_iter(); - let mut next = || match chunks.next() { + let mut next = || match chunks.next(grove_version) { Some(chunk) => chunk, None => { chunks = merk.chunks().unwrap().into_iter(); - chunks.next().unwrap() + chunks.next(grove_version).unwrap() } }; @@ -527,14 +567,15 @@ pub fn chunk_iter_1m_1(c: &mut Criterion) { pub fn restore_500_1(c: &mut Criterion) { let merk_size = 500; - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_rand(merk_size as u64, 0_u64); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -542,6 +583,7 @@ pub fn restore_500_1(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -560,7 +602,14 @@ pub fn restore_500_1(c: &mut Criterion) { .0 .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(); - let m = Merk::open_standalone(ctx, false).unwrap().unwrap(); + let m = Merk::open_standalone( + ctx, + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .unwrap(); let mut restorer = Merk::restore(m, root_hash); for chunk in data.1 { diff --git a/merk/benches/ops.rs b/merk/benches/ops.rs index a1fa8bf26..d194e6fff 100644 --- a/merk/benches/ops.rs +++ b/merk/benches/ops.rs @@ -29,7 +29,12 @@ //! Merk benches ops use criterion::{criterion_group, criterion_main, Criterion}; -use merk::{owner::Owner, test_utils::*}; +use grovedb_merk::{ + owner::Owner, + test_utils::{ + apply_memonly_unchecked, make_batch_rand, make_batch_seq, make_tree_rand, make_tree_seq, + }, +}; /// 1m sequential inserts in 10k batches, memonly fn insert_1m_10k_seq_memonly(c: &mut Criterion) { @@ -37,7 +42,7 @@ fn insert_1m_10k_seq_memonly(c: &mut Criterion) { let batch_size = 10_000; let n_batches = initial_size / batch_size; - let mut tree = Owner::new(make_tree_seq(initial_size)); + let mut tree = Owner::new(make_tree_seq(initial_size, grove_version)); let mut batches = Vec::new(); for i in 0..n_batches { @@ -49,7 +54,7 @@ fn insert_1m_10k_seq_memonly(c: &mut Criterion) { b.iter(|| { let batch = &batches[i % n_batches as usize]; - tree.own(|tree| apply_memonly_unchecked(tree, batch)); + tree.own(|tree| apply_memonly_unchecked(tree, batch, grove_version)); i += 1; }); }); @@ -61,7 +66,13 @@ fn insert_1m_10k_rand_memonly(c: &mut Criterion) { let batch_size = 10_000; let n_batches = initial_size / batch_size; - let mut tree = Owner::new(make_tree_rand(initial_size, batch_size, 0, false)); + let mut tree = Owner::new(make_tree_rand( + initial_size, + batch_size, + 0, + false, + grove_version, + )); let mut batches = Vec::new(); for i in 0..n_batches { @@ -73,7 +84,7 @@ fn insert_1m_10k_rand_memonly(c: &mut Criterion) { b.iter(|| { let batch = &batches[i % n_batches as usize]; - tree.own(|tree| apply_memonly_unchecked(tree, batch)); + tree.own(|tree| apply_memonly_unchecked(tree, batch, grove_version)); i += 1; }); }); @@ -85,12 +96,12 @@ fn update_1m_10k_seq_memonly(c: &mut Criterion) { let batch_size = 10_000; let n_batches = initial_size / batch_size; - let mut tree = Owner::new(make_tree_seq(initial_size)); + let mut tree = Owner::new(make_tree_seq(initial_size, grove_version)); let mut batches = Vec::new(); for i in 0..n_batches { let batch = make_batch_seq((i * batch_size)..((i + 1) * batch_size)); - tree.own(|tree| apply_memonly_unchecked(tree, &batch)); + tree.own(|tree| apply_memonly_unchecked(tree, &batch, grove_version)); batches.push(batch); } @@ -99,7 +110,7 @@ fn update_1m_10k_seq_memonly(c: &mut Criterion) { b.iter(|| { let batch = &batches[i % n_batches as usize]; - tree.own(|tree| apply_memonly_unchecked(tree, batch)); + tree.own(|tree| apply_memonly_unchecked(tree, batch, grove_version)); i += 1; }); }); @@ -111,12 +122,18 @@ fn update_1m_10k_rand_memonly(c: &mut Criterion) { let batch_size = 10_000; let n_batches = initial_size / batch_size; - let mut tree = Owner::new(make_tree_rand(initial_size, batch_size, 0, false)); + let mut tree = Owner::new(make_tree_rand( + initial_size, + batch_size, + 0, + false, + grove_version, + )); let mut batches = Vec::new(); for i in 0..n_batches { let batch = make_batch_rand(batch_size, i); - tree.own(|tree| apply_memonly_unchecked(tree, &batch)); + tree.own(|tree| apply_memonly_unchecked(tree, &batch, grove_version)); batches.push(batch); } @@ -125,7 +142,7 @@ fn update_1m_10k_rand_memonly(c: &mut Criterion) { b.iter(|| { let batch = &batches[i % n_batches as usize]; - tree.own(|tree| apply_memonly_unchecked(tree, batch)); + tree.own(|tree| apply_memonly_unchecked(tree, batch, grove_version)); i += 1; }); }); diff --git a/merk/src/debugger.rs b/merk/src/debugger.rs new file mode 100644 index 000000000..c5d322c00 --- /dev/null +++ b/merk/src/debugger.rs @@ -0,0 +1,46 @@ +//! Merk API enhancements for GroveDbg support + +use grovedb_costs::CostsExt; +use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; + +use crate::{tree::kv::ValueDefinedCostType, Error, Merk}; + +impl<'a, S: StorageContext<'a>> Merk { + pub fn get_node_dbg(&self, key: &[u8]) -> Result, Error> { + self.get_node_direct_fn( + key, + |tree| { + NodeDbg { + key: tree.inner.key_as_slice().to_owned(), + value: tree.inner.value_as_slice().to_owned(), + left_child: tree.link(true).map(|link| link.key().to_owned()), + right_child: tree.link(false).map(|link| link.key().to_owned()), + } + .wrap_with_cost(Default::default()) + }, + None:: Option>, + GroveVersion::latest(), + ) + .unwrap() + } + + pub fn get_root_node_dbg(&self) -> Result, Error> { + Ok(self.use_tree(|tree_opt| { + tree_opt.map(|tree| NodeDbg { + key: tree.inner.key_as_slice().to_owned(), + value: tree.inner.value_as_slice().to_owned(), + left_child: tree.link(true).map(|link| link.key().to_owned()), + right_child: tree.link(false).map(|link| link.key().to_owned()), + }) + })) + } +} + +#[derive(Debug)] +pub struct NodeDbg { + pub key: Vec, + pub value: Vec, + pub left_child: Option>, + pub right_child: Option>, +} diff --git a/merk/src/error.rs b/merk/src/error.rs index 4455ef968..c365b898e 100644 --- a/merk/src/error.rs +++ b/merk/src/error.rs @@ -1,32 +1,6 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Errors +#[cfg(feature = "full")] +use crate::proofs::chunk::error::ChunkError; #[cfg(any(feature = "full", feature = "verify"))] #[derive(Debug, thiserror::Error)] @@ -57,13 +31,29 @@ pub enum Error { #[error("corrupted code execution error {0}")] CorruptedCodeExecution(&'static str), + /// Corrupted state + #[error("corrupted state: {0}")] + CorruptedState(&'static str), + /// Chunking error + #[cfg(feature = "full")] #[error("chunking error {0}")] - ChunkingError(&'static str), + ChunkingError(ChunkError), + + // TODO: remove + /// Old chunking error + #[error("chunking error {0}")] + OldChunkingError(&'static str), + + /// Chunk restoring error + #[cfg(feature = "full")] + #[error("chunk restoring error {0}")] + ChunkRestoringError(ChunkError), + // TODO: remove /// Chunk restoring error #[error("chunk restoring error {0}")] - ChunkRestoringError(String), + OldChunkRestoringError(String), /// Key not found error #[error("key not found error {0}")] @@ -87,7 +77,7 @@ pub enum Error { /// Not supported error #[error("not supported error {0}")] - NotSupported(&'static str), + NotSupported(String), /// Request amount exceeded error #[error("request amount exceeded error {0}")] @@ -97,6 +87,10 @@ pub enum Error { #[error("invalid operation error {0}")] InvalidOperation(&'static str), + /// Internal error + #[error("internal error {0}")] + InternalError(&'static str), + /// Specialized costs error #[error("specialized costs error {0}")] SpecializedCostsError(&'static str), @@ -119,4 +113,14 @@ pub enum Error { /// Costs errors #[error("costs error: {0}")] CostsError(grovedb_costs::error::Error), + // Version errors + #[error(transparent)] + /// Version error + VersionError(grovedb_version::error::GroveVersionError), +} + +impl From for Error { + fn from(value: grovedb_version::error::GroveVersionError) -> Self { + Error::VersionError(value) + } } diff --git a/merk/src/estimated_costs/average_case_costs.rs b/merk/src/estimated_costs/average_case_costs.rs index 23501844b..12f8c2c92 100644 --- a/merk/src/estimated_costs/average_case_costs.rs +++ b/merk/src/estimated_costs/average_case_costs.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Average case costs for Merk #[cfg(feature = "full")] @@ -37,7 +9,7 @@ use integer_encoding::VarInt; use crate::{ error::Error, estimated_costs::LAYER_COST_SIZE, - tree::{kv::KV, Link, Tree}, + tree::{kv::KV, Link, TreeNode}, HASH_BLOCK_SIZE, HASH_BLOCK_SIZE_U32, HASH_LENGTH, HASH_LENGTH_U32, }; @@ -55,10 +27,12 @@ pub type AverageFlagsSize = u32; pub type Weight = u8; #[cfg(feature = "full")] -#[derive(Clone, PartialEq, Eq, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] /// Estimated number of sum trees +#[derive(Default)] pub enum EstimatedSumTrees { /// No sum trees + #[default] NoSumTrees, /// Some sum trees SomeSumTrees { @@ -72,12 +46,6 @@ pub enum EstimatedSumTrees { } #[cfg(feature = "full")] -impl Default for EstimatedSumTrees { - fn default() -> Self { - EstimatedSumTrees::NoSumTrees - } -} - #[cfg(feature = "full")] impl EstimatedSumTrees { fn estimated_size(&self) -> Result { @@ -95,7 +63,7 @@ impl EstimatedSumTrees { } #[cfg(feature = "full")] -#[derive(Clone, PartialEq, Eq, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] /// Estimated layer sizes pub enum EstimatedLayerSizes { /// All subtrees @@ -263,7 +231,7 @@ pub type EstimatedLevelNumber = u32; pub type EstimatedToBeEmpty = bool; #[cfg(feature = "full")] -#[derive(Clone, PartialEq, Eq, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] /// Information on an estimated layer pub struct EstimatedLayerInformation { /// Is sum tree? @@ -278,7 +246,7 @@ pub struct EstimatedLayerInformation { impl EstimatedLayerInformation {} #[cfg(feature = "full")] -#[derive(Clone, PartialEq, Eq, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] /// Estimated elements and level number of a layer pub enum EstimatedLayerCount { /// Potentially at max elements @@ -318,7 +286,7 @@ impl EstimatedLayerCount { } #[cfg(feature = "full")] -impl Tree { +impl TreeNode { /// Return estimate of average encoded tree size pub fn average_case_encoded_tree_size( not_prefixed_key_len: u32, @@ -340,18 +308,19 @@ pub fn add_average_case_get_merk_node( not_prefixed_key_len: u32, approximate_element_size: u32, is_sum_tree: bool, -) { +) -> Result<(), Error> { // Worst case scenario, the element is not already in memory. // One direct seek has to be performed to read the node from storage. cost.seek_count += 1; // To write a node to disk, the left link, right link and kv nodes are encoded. // worst case, the node has both the left and right link present. - cost.storage_loaded_bytes += Tree::average_case_encoded_tree_size( + cost.storage_loaded_bytes += TreeNode::average_case_encoded_tree_size( not_prefixed_key_len, approximate_element_size, is_sum_tree, ); + Ok(()) } #[cfg(feature = "full")] @@ -461,16 +430,16 @@ pub fn add_average_case_merk_propagate( estimated_sum_trees, average_flags_size, ) => { - let flags_len = average_flags_size.unwrap_or(0); - // it is normal to have LAYER_COST_SIZE here, as we add estimated sum tree // additions right after - let value_len = LAYER_COST_SIZE + flags_len; + let value_len = LAYER_COST_SIZE + + average_flags_size + .map_or(0, |flags_len| flags_len + flags_len.required_space() as u32); // in order to simplify calculations we get the estimated size and remove the // cost for the basic merk let sum_tree_addition = estimated_sum_trees.estimated_size()?; nodes_updated - * (KV::value_byte_cost_size_for_key_and_raw_value_lengths( + * (KV::layered_value_byte_cost_size_for_key_and_value_lengths( *average_key_size as u32, value_len, *is_sum_tree, @@ -520,7 +489,7 @@ pub fn add_average_case_merk_propagate( let flags_len = average_flags_size.unwrap_or(0); let value_len = LAYER_COST_SIZE + flags_len; let sum_tree_addition = estimated_sum_trees.estimated_size()?; - let cost = KV::value_byte_cost_size_for_key_and_raw_value_lengths( + let cost = KV::layered_value_byte_cost_size_for_key_and_value_lengths( *average_key_size as u32, value_len, in_sum_tree, diff --git a/merk/src/estimated_costs/mod.rs b/merk/src/estimated_costs/mod.rs index faabce818..bd669db12 100644 --- a/merk/src/estimated_costs/mod.rs +++ b/merk/src/estimated_costs/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Estimated costs for Merk #[cfg(feature = "full")] diff --git a/merk/src/estimated_costs/worst_case_costs.rs b/merk/src/estimated_costs/worst_case_costs.rs index 42911b37a..f4623c8dd 100644 --- a/merk/src/estimated_costs/worst_case_costs.rs +++ b/merk/src/estimated_costs/worst_case_costs.rs @@ -37,7 +37,7 @@ use grovedb_costs::{CostResult, CostsExt, OperationCost}; use crate::{ error::Error, merk::defaults::MAX_PREFIXED_KEY_SIZE, - tree::{kv::KV, Link, Tree}, + tree::{kv::KV, Link, TreeNode}, HASH_BLOCK_SIZE, HASH_BLOCK_SIZE_U32, HASH_LENGTH, }; @@ -52,7 +52,7 @@ pub enum WorstCaseLayerInformation { } #[cfg(feature = "full")] -impl Tree { +impl TreeNode { /// Return worst case size of encoded tree pub fn worst_case_encoded_tree_size( not_prefixed_key_len: u32, @@ -74,7 +74,7 @@ pub fn add_worst_case_get_merk_node( not_prefixed_key_len: u32, max_element_size: u32, is_sum_node: bool, -) { +) -> Result<(), Error> { // Worst case scenario, the element is not already in memory. // One direct seek has to be performed to read the node from storage. cost.seek_count += 1; @@ -82,7 +82,8 @@ pub fn add_worst_case_get_merk_node( // To write a node to disk, the left link, right link and kv nodes are encoded. // worst case, the node has both the left and right link present. cost.storage_loaded_bytes += - Tree::worst_case_encoded_tree_size(not_prefixed_key_len, max_element_size, is_sum_node); + TreeNode::worst_case_encoded_tree_size(not_prefixed_key_len, max_element_size, is_sum_node); + Ok(()) } #[cfg(feature = "full")] diff --git a/merk/src/lib.rs b/merk/src/lib.rs index b780b6f4e..d746a885a 100644 --- a/merk/src/lib.rs +++ b/merk/src/lib.rs @@ -30,12 +30,12 @@ // #![deny(missing_docs)] -#[cfg(feature = "full")] -extern crate core; - /// The top-level store API. #[cfg(feature = "full")] -mod merk; +pub mod merk; + +#[cfg(feature = "grovedbg")] +pub mod debugger; #[cfg(feature = "full")] pub use crate::merk::{chunks::ChunkProducer, options::MerkOptions, restore::Restorer}; @@ -70,12 +70,8 @@ mod visualize; #[cfg(feature = "full")] pub use ed; -#[cfg(feature = "full")] -pub use error::Error; -#[cfg(any(feature = "full", feature = "verify"))] -pub use proofs::query::execute_proof; #[cfg(any(feature = "full", feature = "verify"))] -pub use proofs::query::verify_query; +pub use error::Error; #[cfg(feature = "full")] pub use tree::{ BatchEntry, Link, MerkBatch, Op, PanicSource, HASH_BLOCK_SIZE, HASH_BLOCK_SIZE_U32, @@ -86,8 +82,9 @@ pub use tree::{CryptoHash, TreeFeatureType}; #[cfg(feature = "full")] pub use crate::merk::{ - defaults::ROOT_KEY_KEY, IsSumTree, KVIterator, Merk, MerkType, ProofConstructionResult, - ProofWithoutEncodingResult, RootHashKeyAndSum, + defaults::ROOT_KEY_KEY, + prove::{ProofConstructionResult, ProofWithoutEncodingResult}, + IsSumTree, KVIterator, Merk, MerkType, RootHashKeyAndSum, }; #[cfg(feature = "full")] pub use crate::visualize::VisualizeableMerk; diff --git a/merk/src/merk/apply.rs b/merk/src/merk/apply.rs new file mode 100644 index 000000000..84b4cb9af --- /dev/null +++ b/merk/src/merk/apply.rs @@ -0,0 +1,356 @@ +use std::cmp::Ordering; + +use grovedb_costs::{ + storage_cost::{ + removal::{StorageRemovedBytes, StorageRemovedBytes::BasicStorageRemoval}, + StorageCost, + }, + CostResult, CostsExt, +}; +use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; + +use crate::{ + tree::{ + kv::{ValueDefinedCostType, KV}, + AuxMerkBatch, Walker, + }, + Error, Merk, MerkBatch, MerkOptions, +}; + +impl<'db, S> Merk +where + S: StorageContext<'db>, +{ + /// Applies a batch of operations (puts and deletes) to the tree. + /// + /// This will fail if the keys in `batch` are not sorted and unique. This + /// check creates some overhead, so if you are sure your batch is sorted and + /// unique you can use the unsafe `apply_unchecked` for a small performance + /// gain. + /// + /// # Example + /// ``` + /// # let grove_version = GroveVersion::latest(); + /// # let mut store = grovedb_merk::test_utils::TempMerk::new(grove_version); + /// # store.apply::<_, Vec<_>>( + /// &[(vec![4,5,6], + /// Op::Put(vec![0], BasicMerkNode))], + /// &[], + /// None, + /// grove_version + /// ).unwrap().expect(""); + /// + /// use grovedb_merk::Op; + /// use grovedb_merk::TreeFeatureType::BasicMerkNode; + /// use grovedb_version::version::GroveVersion; + /// + /// let batch = &[ + /// // puts value [4,5,6] to key[1,2,3] + /// (vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerkNode)), + /// // deletes key [4,5,6] + /// (vec![4, 5, 6], Op::Delete), + /// ]; + /// store.apply::<_, Vec<_>>(batch, &[], None,grove_version).unwrap().expect(""); + /// ``` + pub fn apply( + &mut self, + batch: &MerkBatch, + aux: &AuxMerkBatch, + options: Option, + grove_version: &GroveVersion, + ) -> CostResult<(), Error> + where + KB: AsRef<[u8]>, + KA: AsRef<[u8]>, + { + let use_sum_nodes = self.is_sum_tree; + self.apply_with_costs_just_in_time_value_update( + batch, + aux, + options, + &|key, value| { + Ok(KV::layered_value_byte_cost_size_for_key_and_value_lengths( + key.len() as u32, + value.len() as u32, + use_sum_nodes, + )) + }, + None::<&fn(&[u8], &GroveVersion) -> Option>, + &mut |_costs, _old_value, _value| Ok((false, None)), + &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { + Ok(( + BasicStorageRemoval(key_bytes_to_remove), + BasicStorageRemoval(value_bytes_to_remove), + )) + }, + grove_version, + ) + } + + /// Applies a batch of operations (puts and deletes) to the tree. + /// + /// This will fail if the keys in `batch` are not sorted and unique. This + /// check creates some overhead, so if you are sure your batch is sorted and + /// unique you can use the unsafe `apply_unchecked` for a small performance + /// gain. + /// + /// # Example + /// ``` + /// # let grove_version = GroveVersion::latest(); + /// # let mut store = grovedb_merk::test_utils::TempMerk::new(grove_version); + /// # store.apply::<_, Vec<_>>( + /// &[(vec![4,5,6], + /// Op::Put(vec![0], BasicMerkNode))], + /// &[], + /// None, + /// grove_version + /// ).unwrap().expect(""); + /// + /// use grovedb_merk::Op; + /// use grovedb_merk::TreeFeatureType::BasicMerkNode; + /// use grovedb_version::version::GroveVersion; + /// + /// let batch = &[ + /// // puts value [4,5,6] to key[1,2,3] + /// (vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerkNode)), + /// // deletes key [4,5,6] + /// (vec![4, 5, 6], Op::Delete), + /// ]; + /// store.apply::<_, Vec<_>>(batch, &[], None,grove_version).unwrap().expect(""); + /// ``` + pub fn apply_with_specialized_costs( + &mut self, + batch: &MerkBatch, + aux: &AuxMerkBatch, + options: Option, + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + value_defined_cost_fn: Option< + &impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult<(), Error> + where + KB: AsRef<[u8]>, + KA: AsRef<[u8]>, + { + self.apply_with_costs_just_in_time_value_update( + batch, + aux, + options, + old_specialized_cost, + value_defined_cost_fn, + &mut |_costs, _old_value, _value| Ok((false, None)), + &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { + Ok(( + BasicStorageRemoval(key_bytes_to_remove), + BasicStorageRemoval(value_bytes_to_remove), + )) + }, + grove_version, + ) + } + + /// Applies a batch of operations (puts and deletes) to the tree with the + /// ability to update values based on costs. + /// + /// This will fail if the keys in `batch` are not sorted and unique. This + /// check creates some overhead, so if you are sure your batch is sorted and + /// unique you can use the unsafe `apply_unchecked` for a small performance + /// gain. + /// + /// # Example + /// ``` + /// # let grove_version = GroveVersion::latest(); + /// # let mut store = grovedb_merk::test_utils::TempMerk::new(grove_version); + /// # store.apply_with_costs_just_in_time_value_update::<_, Vec<_>>( /// /// /// /// + /// &[(vec![4,5,6], Op::Put(vec![0], BasicMerkNode))], + /// &[], + /// None, + /// &|k, v| Ok(0), + /// None::<&fn(&[u8], &GroveVersion) -> Option>, + /// &mut |s, v, o| Ok((false, None)), + /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)), + /// grove_version, + /// ).unwrap().expect(""); + /// + /// use grovedb_costs::storage_cost::removal::StorageRemovedBytes::NoStorageRemoval; + /// use grovedb_merk::Op; + /// use grovedb_merk::tree::kv::ValueDefinedCostType; + /// use grovedb_merk::TreeFeatureType::BasicMerkNode; + /// use grovedb_version::version::GroveVersion; + /// + /// let batch = &[ + /// // puts value [4,5,6] to key[1,2,3] + /// (vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerkNode)), + /// // deletes key [4,5,6] + /// (vec![4, 5, 6], Op::Delete), + /// ]; + /// + /// store.apply_with_costs_just_in_time_value_update::<_, Vec<_>>( + /// batch, + /// &[], + /// None, + /// &|k, v| Ok(0), + /// None::<&fn(&[u8], &GroveVersion) -> Option>, + /// &mut |s, v, o| Ok((false, None)), + /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)), + /// grove_version, + /// ).unwrap().expect(""); + /// ``` + pub fn apply_with_costs_just_in_time_value_update( + &mut self, + batch: &MerkBatch, + aux: &AuxMerkBatch, + options: Option, + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + value_defined_cost_fn: Option< + &impl Fn(&[u8], &GroveVersion) -> Option, + >, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + grove_version: &GroveVersion, + ) -> CostResult<(), Error> + where + KB: AsRef<[u8]>, + KA: AsRef<[u8]>, + { + // ensure keys in batch are sorted and unique + let mut maybe_prev_key: Option<&KB> = None; + for (key, ..) in batch.iter() { + if let Some(prev_key) = maybe_prev_key { + match prev_key.as_ref().cmp(key.as_ref()) { + Ordering::Greater => { + return Err(Error::InvalidInputError("Keys in batch must be sorted")) + .wrap_with_cost(Default::default()) + } + Ordering::Equal => { + return Err(Error::InvalidInputError("Keys in batch must be unique")) + .wrap_with_cost(Default::default()) + } + _ => (), + } + } + maybe_prev_key = Some(key); + } + + self.apply_unchecked( + batch, + aux, + options, + old_specialized_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + grove_version, + ) + } + + /// Applies a batch of operations (puts and deletes) to the tree. + /// + /// # Safety + /// This is unsafe because the keys in `batch` must be sorted and unique - + /// if they are not, there will be undefined behavior. For a safe version of + /// this method which checks to ensure the batch is sorted and unique, see + /// `apply`. + /// + /// # Example + /// ``` + /// # let grove_version = GroveVersion::latest(); + /// # let mut store = grovedb_merk::test_utils::TempMerk::new(grove_version); + /// # store.apply_with_costs_just_in_time_value_update::<_, Vec<_>>( /// /// /// /// + /// &[(vec![4,5,6], Op::Put(vec![0], BasicMerkNode))], + /// &[], + /// None, + /// &|k, v| Ok(0), + /// None::<&fn(&[u8], &GroveVersion) -> Option>, + /// &mut |s, o, v| Ok((false, None)), + /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)), + /// grove_version, + /// ).unwrap().expect(""); + /// + /// use grovedb_costs::storage_cost::removal::StorageRemovedBytes::NoStorageRemoval; + /// use grovedb_merk::Op; + /// use grovedb_merk::tree::kv::ValueDefinedCostType; + /// use grovedb_merk::TreeFeatureType::BasicMerkNode; + /// use grovedb_version::version::GroveVersion; + /// + /// let batch = &[ + /// // puts value [4,5,6] to key [1,2,3] + /// (vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerkNode)), + /// // deletes key [4,5,6] + /// (vec![4, 5, 6], Op::Delete), + /// ]; + /// unsafe { store.apply_unchecked::<_, Vec<_>, _, _, _, _>( /// /// /// /// /// ////// + /// batch, + /// &[], + /// None, + /// &|k, v| Ok(0), + /// None::<&fn(&[u8], &GroveVersion) -> Option>, + /// &mut |s, o, v| Ok((false, None)), + /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)), + /// grove_version, + /// ).unwrap().expect(""); + /// } + /// ``` + pub fn apply_unchecked( + &mut self, + batch: &MerkBatch, + aux: &AuxMerkBatch, + options: Option, + old_specialized_cost: &C, + value_defined_cost_fn: Option<&V>, + update_tree_value_based_on_costs: &mut U, + section_removal_bytes: &mut R, + grove_version: &GroveVersion, + ) -> CostResult<(), Error> + where + KB: AsRef<[u8]>, + KA: AsRef<[u8]>, + C: Fn(&Vec, &Vec) -> Result, + V: Fn(&[u8], &GroveVersion) -> Option, + U: FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result<(bool, Option), Error>, + R: FnMut(&Vec, u32, u32) -> Result<(StorageRemovedBytes, StorageRemovedBytes), Error>, + { + let maybe_walker = self + .tree + .take() + .take() + .map(|tree| Walker::new(tree, self.source())); + + Walker::apply_to( + maybe_walker, + batch, + self.source(), + old_specialized_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + grove_version, + ) + .flat_map_ok(|(maybe_tree, key_updates)| { + // we set the new root node of the merk tree + self.tree.set(maybe_tree); + // commit changes to db + self.commit(key_updates, aux, options, old_specialized_cost) + }) + } +} diff --git a/merk/src/merk/chunks.rs b/merk/src/merk/chunks.rs index 7e8c588e8..20c6cc391 100644 --- a/merk/src/merk/chunks.rs +++ b/merk/src/merk/chunks.rs @@ -1,500 +1,1106 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Provides `ChunkProducer`, which creates chunk proofs for full replication of -//! a Merk. - -#[cfg(feature = "full")] -use grovedb_costs::CostsExt; -#[cfg(feature = "full")] -use grovedb_storage::{RawIterator, StorageContext}; - -#[cfg(feature = "full")] -use super::Merk; -#[cfg(feature = "full")] +use std::collections::VecDeque; + +use ed::Encode; +use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; + use crate::{ error::Error, - proofs::{chunk::get_next_chunk, Node, Op}, + proofs::{ + chunk::{ + chunk_op::ChunkOp, + error::ChunkError, + util::{ + chunk_height, chunk_index_from_traversal_instruction, + chunk_index_from_traversal_instruction_with_recovery, + generate_traversal_instruction, generate_traversal_instruction_as_vec_bytes, + number_of_chunks, vec_bytes_as_traversal_instruction, + }, + }, + Node, Op, + }, + Error::ChunkingError, + Merk, }; -#[cfg(feature = "full")] +/// ChunkProof for replication of a single subtree +#[derive(Debug)] +pub struct SubtreeChunk { + chunk: Vec, + next_index: Option, + remaining_limit: Option, +} + +impl SubtreeChunk { + pub fn new(chunk: Vec, next_index: Option, remaining_limit: Option) -> Self { + Self { + chunk, + next_index, + remaining_limit, + } + } +} + +/// ChunkProof for the replication of multiple subtrees. +#[derive(Debug)] +pub struct MultiChunk { + pub chunk: Vec, + pub next_index: Option>, + pub remaining_limit: Option, +} + +impl MultiChunk { + pub fn new( + chunk: Vec, + next_index: Option>, + remaining_limit: Option, + ) -> Self { + Self { + chunk, + next_index, + remaining_limit, + } + } +} + /// A `ChunkProducer` allows the creation of chunk proofs, used for trustlessly /// replicating entire Merk trees. Chunks can be generated on the fly in a /// random order, or iterated in order for slightly better performance. -pub struct ChunkProducer<'db, S: StorageContext<'db>> { - trunk: Vec, - chunk_boundaries: Vec>, - raw_iter: S::RawIterator, +pub struct ChunkProducer<'db, S> { + /// Represents the max height of the Merk tree + height: usize, + /// Represents the index of the next chunk index: usize, + merk: &'db Merk, } -#[cfg(feature = "full")] impl<'db, S> ChunkProducer<'db, S> where S: StorageContext<'db>, { - /// Creates a new `ChunkProducer` for the given `Merk` instance. In the - /// constructor, the first chunk (the "trunk") will be created. - pub fn new(merk: &Merk) -> Result { - let (trunk, has_more) = merk - .walk(|maybe_walker| match maybe_walker { - Some(mut walker) => walker.create_trunk_proof(), - None => Ok((vec![], false)).wrap_with_cost(Default::default()), - }) - .unwrap()?; - - let chunk_boundaries = if has_more { - trunk - .iter() - .filter_map(|op| match op { - Op::Push(Node::KVValueHashFeatureType(key, ..)) => Some(key.clone()), - _ => None, - }) - .collect() - } else { - vec![] - }; - - let mut raw_iter = merk.storage.raw_iter(); - raw_iter.seek_to_first().unwrap(); - - Ok(ChunkProducer { - trunk, - chunk_boundaries, - raw_iter, - index: 0, + /// Creates a new `ChunkProducer` for the given `Merk` instance + pub fn new(merk: &'db Merk) -> Result { + let tree_height = merk + .height() + .ok_or(Error::ChunkingError(ChunkError::EmptyTree( + "cannot create chunk producer for empty Merk", + )))?; + Ok(Self { + height: tree_height as usize, + index: 1, + merk, }) } /// Gets the chunk with the given index. Errors if the index is out of /// bounds or the tree is empty - the number of chunks can be checked by /// calling `producer.len()`. - pub fn chunk(&mut self, index: usize) -> Result, Error> { - if index >= self.len() { - return Err(Error::ChunkingError("Chunk index out-of-bounds")); + pub fn chunk_with_index( + &mut self, + chunk_index: usize, + grove_version: &GroveVersion, + ) -> Result<(Vec, Option), Error> { + let traversal_instructions = generate_traversal_instruction(self.height, chunk_index)?; + self.chunk_internal(chunk_index, traversal_instructions, grove_version) + } + + /// Returns the chunk at a given chunk id. + pub fn chunk( + &mut self, + chunk_id: &[u8], + grove_version: &GroveVersion, + ) -> Result<(Vec, Option>), Error> { + let traversal_instructions = vec_bytes_as_traversal_instruction(chunk_id)?; + let chunk_index = chunk_index_from_traversal_instruction_with_recovery( + traversal_instructions.as_slice(), + self.height, + )?; + let (chunk, next_index) = + self.chunk_internal(chunk_index, traversal_instructions, grove_version)?; + let next_chunk_id = next_index + .map(|index| generate_traversal_instruction_as_vec_bytes(self.height, index)) + .transpose()?; + Ok((chunk, next_chunk_id)) + } + + /// Returns the chunk at the given index + /// Assumes index and traversal_instructions represents the same information + fn chunk_internal( + &mut self, + index: usize, + traversal_instructions: Vec, + grove_version: &GroveVersion, + ) -> Result<(Vec, Option), Error> { + // ensure that the chunk index is within bounds + let max_chunk_index = self.len(); + if index < 1 || index > max_chunk_index { + return Err(ChunkingError(ChunkError::OutOfBounds( + "chunk index out of bounds", + ))); } - self.index = index; + self.index = index + 1; + + let chunk_height = chunk_height(self.height, index).unwrap(); - if index == 0 || index == 1 { - self.raw_iter.seek_to_first().unwrap(); + let chunk = self.merk.walk(|maybe_walker| match maybe_walker { + Some(mut walker) => walker.traverse_and_build_chunk( + &traversal_instructions, + chunk_height, + grove_version, + ), + None => Err(Error::ChunkingError(ChunkError::EmptyTree( + "cannot create chunk producer for empty Merk", + ))), + })?; + + // now we need to return the next index + // how do we know if we should return some or none + if self.index > max_chunk_index { + Ok((chunk, None)) } else { - let preceding_key = self.chunk_boundaries.get(index - 2).unwrap(); - self.raw_iter.seek(preceding_key).unwrap(); - self.raw_iter.next().unwrap(); + Ok((chunk, Some(self.index))) } + } - self.next_chunk() + /// Generate multichunk with chunk id + /// Multichunks accumulate as many chunks as they can until they have all + /// chunks or hit some optional limit + pub fn multi_chunk_with_limit( + &mut self, + chunk_id: &[u8], + limit: Option, + grove_version: &GroveVersion, + ) -> Result { + // we want to convert the chunk id to the index + let chunk_index = vec_bytes_as_traversal_instruction(chunk_id).and_then(|instruction| { + chunk_index_from_traversal_instruction(instruction.as_slice(), self.height) + })?; + self.multi_chunk_with_limit_and_index(chunk_index, limit, grove_version) } - /// Returns the total number of chunks for the underlying Merk tree. - #[allow(clippy::len_without_is_empty)] - pub fn len(&self) -> usize { - let boundaries_len = self.chunk_boundaries.len(); - if boundaries_len == 0 { - 1 - } else { - boundaries_len + 2 + /// Generate multichunk with chunk index + /// Multichunks accumulate as many chunks as they can until they have all + /// chunks or hit some optional limit + pub fn multi_chunk_with_limit_and_index( + &mut self, + index: usize, + limit: Option, + grove_version: &GroveVersion, + ) -> Result { + // TODO: what happens if the vec is filled? + // we need to have some kind of hardhoc limit value if none is supplied. + // maybe we can just do something with the length to fix this? + let mut chunk = vec![]; + + let mut current_index = Some(index); + let mut current_limit = limit; + + // generate as many subtree chunks as we can + // until we have exhausted all or hit a limit restriction + while current_index.is_some() { + let current_index_traversal_instruction = generate_traversal_instruction( + self.height, + current_index.expect("confirmed is Some"), + )?; + let chunk_id_op = ChunkOp::ChunkId(current_index_traversal_instruction); + + // factor in the ChunkId encoding length in limit calculations + let temp_limit = if let Some(limit) = current_limit { + let chunk_id_op_encoding_len = chunk_id_op.encoding_length().map_err(|_e| { + Error::ChunkingError(ChunkError::InternalError("cannot get encoding length")) + })?; + if limit >= chunk_id_op_encoding_len { + Some(limit - chunk_id_op_encoding_len) + } else { + Some(0) + } + } else { + None + }; + + let subtree_multi_chunk_result = self.subtree_multi_chunk_with_limit( + current_index.expect("confirmed is not None"), + temp_limit, + grove_version, + ); + + let limit_too_small_error = matches!( + subtree_multi_chunk_result, + Err(ChunkingError(ChunkError::LimitTooSmall(..))) + ); + + if limit_too_small_error { + if chunk.is_empty() { + // no progress, return limit too small error + return Err(Error::ChunkingError(ChunkError::LimitTooSmall( + "limit too small for initial chunk", + ))); + } else { + // made progress, send accumulated chunk + break; + } + } + + let subtree_multi_chunk = subtree_multi_chunk_result?; + + chunk.push(chunk_id_op); + chunk.push(ChunkOp::Chunk(subtree_multi_chunk.chunk)); + + // update loop parameters + current_index = subtree_multi_chunk.next_index; + current_limit = subtree_multi_chunk.remaining_limit; } + + let index_bytes = current_index + .map(|index| generate_traversal_instruction_as_vec_bytes(self.height, index)) + .transpose()?; + + Ok(MultiChunk::new(chunk, index_bytes, current_limit)) } - /// Gets the next chunk based on the `ChunkProducer`'s internal index state. - /// This is mostly useful for letting `ChunkIter` yield the chunks in order, - /// optimizing throughput compared to random access. - fn next_chunk(&mut self) -> Result, Error> { - if self.index == 0 { - if self.trunk.is_empty() { - return Err(Error::ChunkingError( - "Attempted to fetch chunk on empty tree", - )); + /// Packs as many chunks as it can from a starting chunk index, into a + /// vector. Stops when we have exhausted all chunks or we have reached + /// some limit. + fn subtree_multi_chunk_with_limit( + &mut self, + index: usize, + limit: Option, + grove_version: &GroveVersion, + ) -> Result { + let max_chunk_index = number_of_chunks(self.height); + let mut chunk_index = index; + + // we first get the chunk at the given index + // TODO: use the returned chunk index rather than tracking + let (chunk_ops, _) = self.chunk_with_index(chunk_index, grove_version)?; + let mut chunk_byte_length = chunk_ops.encoding_length().map_err(|_e| { + Error::ChunkingError(ChunkError::InternalError("can't get encoding length")) + })?; + chunk_index += 1; + + let mut chunk = VecDeque::from(chunk_ops); + + // ensure the limit is not less than first chunk byte length + // if it is we can't proceed and didn't make progress so we return an error + if let Some(limit) = limit { + if chunk_byte_length > limit { + return Err(Error::ChunkingError(ChunkError::LimitTooSmall( + "limit too small for initial chunk", + ))); } - self.index += 1; - return Ok(self.trunk.clone()); } - if self.index >= self.len() { - panic!("Called next_chunk after end"); + let mut iteration_index = 0; + while iteration_index < chunk.len() { + // we only perform replacements on Hash nodes + if matches!(chunk[iteration_index], Op::Push(Node::Hash(..))) { + // TODO: use the returned chunk index rather than tracking + let (replacement_chunk, _) = self.chunk_with_index(chunk_index, grove_version)?; + + // calculate the new total + let new_total = replacement_chunk.encoding_length().map_err(|_e| { + Error::ChunkingError(ChunkError::InternalError("can't get encoding length")) + })? + chunk_byte_length + - chunk[iteration_index].encoding_length().map_err(|_e| { + Error::ChunkingError(ChunkError::InternalError("can't get encoding length")) + })?; + + // verify that this chunk doesn't make use exceed the limit + if let Some(limit) = limit { + if new_total > limit { + let next_index = match chunk_index > max_chunk_index { + true => None, + _ => Some(chunk_index), + }; + + return Ok(SubtreeChunk::new( + chunk.into(), + next_index, + Some(limit - chunk_byte_length), + )); + } + } + + chunk_byte_length = new_total; + chunk_index += 1; + + chunk.remove(iteration_index); + for op in replacement_chunk.into_iter().rev() { + chunk.insert(iteration_index, op); + } + } else { + iteration_index += 1; + } } - let end_key = self.chunk_boundaries.get(self.index - 1); - let end_key_slice = end_key.as_ref().map(|k| k.as_slice()); + let remaining_limit = limit.map(|l| l - chunk_byte_length); + let next_index = match chunk_index > max_chunk_index { + true => None, + _ => Some(chunk_index), + }; - self.index += 1; + Ok(SubtreeChunk::new(chunk.into(), next_index, remaining_limit)) + } - get_next_chunk(&mut self.raw_iter, end_key_slice).unwrap() + /// Returns the total number of chunks for the underlying Merk tree. + pub fn len(&self) -> usize { + number_of_chunks(self.height) } -} -#[cfg(feature = "full")] -impl<'db, S> IntoIterator for ChunkProducer<'db, S> -where - S: StorageContext<'db>, -{ - type IntoIter = ChunkIter<'db, S>; - type Item = as Iterator>::Item; + pub fn is_empty(&self) -> bool { + number_of_chunks(self.height) == 0 + } + + /// Gets the next chunk based on the `ChunkProducer`'s internal index state. + /// This is mostly useful for letting `ChunkIter` yield the chunks in order, + /// optimizing throughput compared to random access. + // TODO: this is not better than random access, as we are not keeping state + // that will make this more efficient, decide if this should be fixed or not + fn next_chunk( + &mut self, + grove_version: &GroveVersion, + ) -> Option, Option>), Error>> { + let max_index = number_of_chunks(self.height); + if self.index > max_index { + return None; + } - fn into_iter(self) -> Self::IntoIter { - ChunkIter(self) + // get the chunk at the given index + // return the next index as a string + Some( + self.chunk_with_index(self.index, grove_version) + .and_then(|(chunk, chunk_index)| { + chunk_index + .map(|index| { + generate_traversal_instruction_as_vec_bytes(self.height, index) + }) + .transpose() + .map(|v| (chunk, v)) + }), + ) } } -#[cfg(feature = "full")] -/// A `ChunkIter` iterates through all the chunks for the underlying `Merk` -/// instance in order (the first chunk is the "trunk" chunk). Yields `None` -/// after all chunks have been yielded. -pub struct ChunkIter<'db, S>(ChunkProducer<'db, S>) -where - S: StorageContext<'db>; - -#[cfg(feature = "full")] -impl<'db, S> Iterator for ChunkIter<'db, S> +/// Iterate over each chunk, returning `None` after last chunk +impl<'db, S> ChunkProducer<'db, S> where S: StorageContext<'db>, { - type Item = Result, Error>; - - fn size_hint(&self) -> (usize, Option) { - (self.0.len(), Some(self.0.len())) - } - - fn next(&mut self) -> Option { - if self.0.index >= self.0.len() { - None - } else { - Some(self.0.next_chunk()) - } + pub fn next( + &mut self, + grove_version: &GroveVersion, + ) -> Option, Option>), Error>> { + self.next_chunk(grove_version) } } -#[cfg(feature = "full")] impl<'db, S> Merk where S: StorageContext<'db>, { /// Creates a `ChunkProducer` which can return chunk proofs for replicating /// the entire Merk tree. - pub fn chunks(&self) -> Result, Error> { + pub fn chunks(&'db self) -> Result, Error> { ChunkProducer::new(self) } } -#[cfg(feature = "full")] #[cfg(test)] -mod tests { - use grovedb_path::SubtreePath; - use grovedb_storage::{rocksdb_storage::RocksDbStorage, Storage, StorageBatch}; - use tempfile::TempDir; - +mod test { use super::*; use crate::{ - proofs::chunk::{verify_leaf, verify_trunk}, - test_utils::*, + proofs::{ + chunk::{ + chunk::{ + tests::{traverse_get_kv_feature_type, traverse_get_node_hash}, + LEFT, RIGHT, + }, + util::traversal_instruction_as_vec_bytes, + }, + tree::execute, + Tree, + }, + test_utils::{make_batch_seq, TempMerk}, + tree::RefWalker, + PanicSource, }; - #[test] - fn len_small() { - let mut merk = TempMerk::new(); - let batch = make_batch_seq(1..256); - merk.apply::<_, Vec<_>>(&batch, &[], None).unwrap().unwrap(); - merk.commit(); - - let chunks = merk.chunks().unwrap(); - assert_eq!(chunks.len(), 1); - assert_eq!(chunks.into_iter().size_hint().0, 1); + #[derive(Default)] + struct NodeCounts { + hash: usize, + kv_hash: usize, + kv: usize, + kv_value_hash: usize, + kv_digest: usize, + kv_ref_value_hash: usize, + kv_value_hash_feature_type: usize, + } + + impl NodeCounts { + fn sum(&self) -> usize { + self.hash + + self.kv_hash + + self.kv + + self.kv_value_hash + + self.kv_digest + + self.kv_ref_value_hash + + self.kv_value_hash_feature_type + } + } + + fn count_node_types(tree: Tree) -> NodeCounts { + let mut counts = NodeCounts::default(); + + tree.visit_nodes(&mut |node| { + match node { + Node::Hash(_) => counts.hash += 1, + Node::KVHash(_) => counts.kv_hash += 1, + Node::KV(..) => counts.kv += 1, + Node::KVValueHash(..) => counts.kv_value_hash += 1, + Node::KVDigest(..) => counts.kv_digest += 1, + Node::KVRefValueHash(..) => counts.kv_ref_value_hash += 1, + Node::KVValueHashFeatureType(..) => counts.kv_value_hash_feature_type += 1, + }; + }); + + counts } #[test] - fn len_big() { - let mut merk = TempMerk::new(); - let batch = make_batch_seq(1..10_000); - merk.apply::<_, Vec<_>>(&batch, &[], None).unwrap().unwrap(); - merk.commit(); - - let chunks = merk.chunks().unwrap(); - assert_eq!(chunks.len(), 129); - assert_eq!(chunks.into_iter().size_hint().0, 129); + fn test_merk_chunk_len() { + let grove_version = GroveVersion::latest(); + // Tree of height 5 - max of 31 elements, min of 16 elements + // 5 will be broken into 2 layers = [3, 2] + // exit nodes from first layer = 2^3 = 8 + // total_chunk = 1 + 8 = 9 chunks + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..20); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(5)); + let chunk_producer = ChunkProducer::new(&merk).unwrap(); + assert_eq!(chunk_producer.len(), 9); + + // Tree of height 10 - max of 1023 elements, min of 512 elements + // 4 layers -> [3,3,2,2] + // chunk_count_per_layer -> [1, 8, 64, 256] + // total = 341 chunks + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..1000); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(10)); + let chunk_producer = ChunkProducer::new(&merk).unwrap(); + assert_eq!(chunk_producer.len(), 329); } #[test] - fn generate_and_verify_chunks() { - let mut merk = TempMerk::new(); - let batch = make_batch_seq(1..10_000); - merk.apply::<_, Vec<_>>(&batch, &[], None).unwrap().unwrap(); - merk.commit(); + fn test_chunk_producer_iter() { + let grove_version = GroveVersion::latest(); + // tree with height 4 + // full tree + // 7 + // / \ + // 3 11 + // / \ / \ + // 1 5 9 13 + // / \ / \ / \ / \ + // 0 2 4 6 8 10 12 14 + // going to be broken into [2, 2] + // that's a total of 5 chunks - let mut chunks = merk.chunks().unwrap().into_iter().map(|x| x.unwrap()); + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); - let chunk = chunks.next().unwrap(); - let (trunk, height) = verify_trunk(chunk.into_iter().map(Ok)).unwrap().unwrap(); - assert_eq!(height, 14); - assert_eq!(trunk.hash().unwrap(), merk.root_hash().unwrap()); + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); - assert_eq!(trunk.layer(7).count(), 128); + // build iterator from first chunk producer + let mut chunks = merk.chunks().expect("should return producer"); - for (ops, node) in chunks.zip(trunk.layer(height / 2)) { - verify_leaf(ops.into_iter().map(Ok), node.hash().unwrap()) - .unwrap() - .unwrap(); + // ensure that the chunks gotten from the iterator is the same + // as that from the chunk producer + for i in 1..=5 { + assert_eq!( + chunks.next(grove_version).unwrap().unwrap().0, + chunk_producer.chunk_with_index(i, grove_version).unwrap().0 + ); } + + // returns None after max + assert!(chunks.next(grove_version).is_none()); } #[test] - fn chunks_from_reopen() { - let tmp_dir = TempDir::new().expect("cannot create tempdir"); - let original_chunks = { - let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) - .expect("cannot open rocksdb storage"); - let batch = StorageBatch::new(); - let mut merk = Merk::open_base( - storage - .get_storage_context(SubtreePath::empty(), Some(&batch)) - .unwrap(), - false, - ) + fn test_random_chunk_access() { + let grove_version = GroveVersion::latest(); + // tree with height 4 + // full tree + // 7 + // / \ + // 3 11 + // / \ / \ + // 1 5 9 13 + // / \ / \ / \ / \ + // 0 2 4 6 8 10 12 14 + // going to be broken into [2, 2] + // that's a total of 5 chunks + + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() - .unwrap(); - let merk_batch = make_batch_seq(1..10); - merk.apply::<_, Vec<_>>(&merk_batch, &[], None) - .unwrap() - .unwrap(); + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); - storage - .commit_multi_context_batch(batch, None) - .unwrap() - .expect("cannot commit batch"); + let mut inner_tree = merk.tree.take().expect("has inner tree"); + merk.tree.set(Some(inner_tree.clone())); - let merk = Merk::open_base( - storage - .get_storage_context(SubtreePath::empty(), None) - .unwrap(), - false, - ) - .unwrap() - .unwrap(); + // TODO: should I be using panic source? + let mut tree_walker = RefWalker::new(&mut inner_tree, PanicSource {}); - merk.chunks() - .unwrap() - .into_iter() - .map(|x| x.unwrap()) - .collect::>() - .into_iter() - }; - let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) - .expect("cannot open rocksdb storage"); - let merk = Merk::open_base( - storage - .get_storage_context(SubtreePath::empty(), None) - .unwrap(), - false, - ) - .unwrap() - .unwrap(); - let reopen_chunks = merk.chunks().unwrap().into_iter().map(|x| x.unwrap()); + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + assert_eq!(chunk_producer.len(), 5); - for (original, checkpoint) in original_chunks.zip(reopen_chunks) { - assert_eq!(original.len(), checkpoint.len()); - } - } + // assert bounds + assert!(chunk_producer.chunk_with_index(0, grove_version).is_err()); + assert!(chunk_producer.chunk_with_index(6, grove_version).is_err()); - // #[test] - // fn chunks_from_checkpoint() { - // let mut merk = TempMerk::new(); - // let batch = make_batch_seq(1..10); - // merk.apply(batch.as_slice(), &[]).unwrap(); + // first chunk + // expected: + // 7 + // / \ + // 3 11 + // / \ / \ + // H(1) H(5) H(9) H(13) + let (chunk, next_chunk) = chunk_producer + .chunk_with_index(1, grove_version) + .expect("should generate chunk"); + assert_eq!(chunk.len(), 13); + assert_eq!(next_chunk, Some(2)); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[LEFT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[LEFT, RIGHT], + grove_version + )), + Op::Child, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, RIGHT], + grove_version + )), + Op::Child, + Op::Child + ] + ); - // let path: std::path::PathBuf = - // "generate_and_verify_chunks_from_checkpoint.db".into(); if path. - // exists() { std::fs::remove_dir_all(&path).unwrap(); - // } - // let checkpoint = merk.checkpoint(&path).unwrap(); + // second chunk + // expected: + // 1 + // / \ + // 0 2 + let (chunk, next_chunk) = chunk_producer + .chunk_with_index(2, grove_version) + .expect("should generate chunk"); + assert_eq!(chunk.len(), 5); + assert_eq!(next_chunk, Some(3)); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, LEFT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, LEFT], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, LEFT, RIGHT], + grove_version + )), + Op::Child + ] + ); - // let original_chunks = - // merk.chunks().unwrap().into_iter().map(Result::unwrap); - // let checkpoint_chunks = - // checkpoint.chunks().unwrap().into_iter().map(Result::unwrap); + // third chunk + // expected: + // 5 + // / \ + // 4 6 + let (chunk, next_chunk) = chunk_producer + .chunk_with_index(3, grove_version) + .expect("should generate chunk"); + assert_eq!(chunk.len(), 5); + assert_eq!(next_chunk, Some(4)); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, RIGHT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, RIGHT], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, RIGHT, RIGHT], + grove_version + )), + Op::Child + ] + ); - // for (original, checkpoint) in original_chunks.zip(checkpoint_chunks) { - // assert_eq!(original.len(), checkpoint.len()); - // } + // third chunk + // expected: + // 9 + // / \ + // 8 10 + let (chunk, next_chunk) = chunk_producer + .chunk_with_index(4, grove_version) + .expect("should generate chunk"); + assert_eq!(chunk.len(), 5); + assert_eq!(next_chunk, Some(5)); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT, RIGHT], + grove_version + )), + Op::Child + ] + ); - // std::fs::remove_dir_all(&path).unwrap(); - // } + // third chunk + // expected: + // 13 + // / \ + // 12 14 + let (chunk, next_chunk) = chunk_producer + .chunk_with_index(5, grove_version) + .expect("should generate chunk"); + assert_eq!(chunk.len(), 5); + assert_eq!(next_chunk, None); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, RIGHT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, RIGHT], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, RIGHT, RIGHT], + grove_version + )), + Op::Child + ] + ); + } #[test] - fn random_access_chunks() { - let mut merk = TempMerk::new(); - let batch = make_batch_seq(1..111); - merk.apply::<_, Vec<_>>(&batch, &[], None).unwrap().unwrap(); + fn test_subtree_chunk_no_limit() { + let grove_version = GroveVersion::latest(); + // tree of height 4 + // 5 chunks + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + // generate multi chunk with no limit + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + let chunk_result = chunk_producer + .subtree_multi_chunk_with_limit(1, None, grove_version) + .expect("should generate chunk with limit"); + + assert_eq!(chunk_result.remaining_limit, None); + assert_eq!(chunk_result.next_index, None); - let chunks = merk - .chunks() + let tree = execute(chunk_result.chunk.into_iter().map(Ok), false, |_| Ok(())) .unwrap() - .into_iter() - .map(|x| x.unwrap()) - .collect::>(); - - let mut producer = merk.chunks().unwrap(); - for i in 0..chunks.len() * 2 { - let index = i % chunks.len(); - assert_eq!(producer.chunk(index).unwrap(), chunks[index]); - } + .expect("should reconstruct tree"); + assert_eq!(tree.hash().unwrap(), merk.root_hash().unwrap()); + + // assert that all nodes are of type kv_value_hash_feature_type + let node_counts = count_node_types(tree); + assert_eq!(node_counts.hash, 0); + assert_eq!(node_counts.kv_hash, 0); + assert_eq!(node_counts.kv, 0); + assert_eq!(node_counts.kv_value_hash, 0); + assert_eq!(node_counts.kv_digest, 0); + assert_eq!(node_counts.kv_ref_value_hash, 0); + assert_eq!(node_counts.kv_value_hash_feature_type, 15); } #[test] - #[should_panic(expected = "Attempted to fetch chunk on empty tree")] - fn test_chunk_empty() { - let merk = TempMerk::new(); + fn test_subtree_chunk_with_limit() { + let grove_version = GroveVersion::latest(); + // tree of height 4 + // 5 chunks + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + + // initial chunk is of size 453, so limit of 10 is too small + // should return an error + let chunk = chunk_producer.subtree_multi_chunk_with_limit(1, Some(10), grove_version); + assert!(chunk.is_err()); - let _chunks = merk - .chunks() + // get just the fist chunk + let chunk_result = chunk_producer + .subtree_multi_chunk_with_limit(1, Some(453), grove_version) + .expect("should generate chunk with limit"); + assert_eq!(chunk_result.remaining_limit, Some(0)); + assert_eq!(chunk_result.next_index, Some(2)); + + let chunk = chunk_result.chunk; + assert_eq!(chunk.encoding_length().unwrap(), 453); + assert_eq!(chunk.len(), 13); // op count + let tree = execute(chunk.into_iter().map(Ok), false, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(tree.hash().unwrap(), merk.root_hash().unwrap()); + + let node_counts = count_node_types(tree); + assert_eq!(node_counts.kv_value_hash_feature_type, 3); + assert_eq!(node_counts.hash, 4); + assert_eq!(node_counts.sum(), 4 + 3); + + // get up to second chunk + let chunk_result = chunk_producer + .subtree_multi_chunk_with_limit(1, Some(737), grove_version) + .expect("should generate chunk with limit"); + assert_eq!(chunk_result.remaining_limit, Some(0)); + assert_eq!(chunk_result.next_index, Some(3)); + + let chunk = chunk_result.chunk; + assert_eq!(chunk.encoding_length().unwrap(), 737); + assert_eq!(chunk.len(), 17); // op count + let tree = execute(chunk.into_iter().map(Ok), false, |_| Ok(())) .unwrap() - .into_iter() - .map(|x| x.unwrap()) - .collect::>(); + .expect("should reconstruct tree"); + assert_eq!(tree.hash().unwrap(), merk.root_hash().unwrap()); + + let node_counts = count_node_types(tree); + assert_eq!(node_counts.kv_value_hash_feature_type, 6); + assert_eq!(node_counts.hash, 3); + assert_eq!(node_counts.sum(), 6 + 3); + + // get up to third chunk + let chunk_result = chunk_producer + .subtree_multi_chunk_with_limit(1, Some(1021), grove_version) + .expect("should generate chunk with limit"); + assert_eq!(chunk_result.remaining_limit, Some(0)); + assert_eq!(chunk_result.next_index, Some(4)); + + let chunk = chunk_result.chunk; + assert_eq!(chunk.encoding_length().unwrap(), 1021); + assert_eq!(chunk.len(), 21); // op count + let tree = execute(chunk.into_iter().map(Ok), false, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(tree.hash().unwrap(), merk.root_hash().unwrap()); + + let node_counts = count_node_types(tree); + assert_eq!(node_counts.kv_value_hash_feature_type, 9); + assert_eq!(node_counts.hash, 2); + assert_eq!(node_counts.sum(), 9 + 2); + + // get up to fourth chunk + let chunk_result = chunk_producer + .subtree_multi_chunk_with_limit(1, Some(1305), grove_version) + .expect("should generate chunk with limit"); + assert_eq!(chunk_result.remaining_limit, Some(0)); + assert_eq!(chunk_result.next_index, Some(5)); + + let chunk = chunk_result.chunk; + assert_eq!(chunk.encoding_length().unwrap(), 1305); + assert_eq!(chunk.len(), 25); // op count + let tree = execute(chunk.into_iter().map(Ok), false, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(tree.hash().unwrap(), merk.root_hash().unwrap()); + + let node_counts = count_node_types(tree); + assert_eq!(node_counts.kv_value_hash_feature_type, 12); + assert_eq!(node_counts.hash, 1); + assert_eq!(node_counts.sum(), 12 + 1); + + // get up to fifth chunk + let chunk_result = chunk_producer + .subtree_multi_chunk_with_limit(1, Some(1589), grove_version) + .expect("should generate chunk with limit"); + assert_eq!(chunk_result.remaining_limit, Some(0)); + assert_eq!(chunk_result.next_index, None); + + let chunk = chunk_result.chunk; + assert_eq!(chunk.encoding_length().unwrap(), 1589); + assert_eq!(chunk.len(), 29); // op count + let tree = execute(chunk.into_iter().map(Ok), false, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(tree.hash().unwrap(), merk.root_hash().unwrap()); + + let node_counts = count_node_types(tree); + assert_eq!(node_counts.kv_value_hash_feature_type, 15); + assert_eq!(node_counts.hash, 0); + assert_eq!(node_counts.sum(), 15); + + // limit larger than total chunk + let chunk_result = chunk_producer + .subtree_multi_chunk_with_limit(1, Some(usize::MAX), grove_version) + .expect("should generate chunk with limit"); + assert_eq!(chunk_result.remaining_limit, Some(18446744073709550026)); + assert_eq!(chunk_result.next_index, None); + + let chunk = chunk_result.chunk; + assert_eq!(chunk.encoding_length().unwrap(), 1589); + assert_eq!(chunk.len(), 29); // op count + let tree = execute(chunk.into_iter().map(Ok), false, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(tree.hash().unwrap(), merk.root_hash().unwrap()); + + let node_counts = count_node_types(tree); + assert_eq!(node_counts.kv_value_hash_feature_type, 15); + assert_eq!(node_counts.hash, 0); + assert_eq!(node_counts.sum(), 15); } #[test] - #[should_panic(expected = "Chunk index out-of-bounds")] - fn test_chunk_index_oob() { - let mut merk = TempMerk::new(); - let batch = make_batch_seq(1..42); - merk.apply::<_, Vec<_>>(&batch, &[], None).unwrap().unwrap(); - - let mut producer = merk.chunks().unwrap(); - let _chunk = producer.chunk(50000).unwrap(); + fn test_multi_chunk_with_no_limit_trunk() { + let grove_version = GroveVersion::latest(); + // tree of height 4 + // 5 chunks + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + + // we generate the chunk starting from index 1, this has no hash nodes + // so no multi chunk will be generated + let chunk_result = chunk_producer + .multi_chunk_with_limit_and_index(1, None, grove_version) + .expect("should generate chunk with limit"); + + assert_eq!(chunk_result.remaining_limit, None); + assert_eq!(chunk_result.next_index, None); + + // should only contain 2 items, the starting chunk id and the entire tree + assert_eq!(chunk_result.chunk.len(), 2); + + // assert items + assert_eq!(chunk_result.chunk[0], ChunkOp::ChunkId(vec![])); + if let ChunkOp::Chunk(chunk) = &chunk_result.chunk[1] { + let tree = execute(chunk.clone().into_iter().map(Ok), false, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(tree.hash().unwrap(), merk.root_hash().unwrap()); + } else { + panic!("expected ChunkOp::Chunk"); + } } - // #[test] - // fn test_chunk_index_gt_1_access() { - // let mut merk = TempMerk::new(); - // let batch = make_batch_seq(1..513); - // merk.apply::<_, Vec<_>>(&batch, &[]).unwrap().unwrap(); - - // let mut producer = merk.chunks().unwrap(); - // println!("length: {}", producer.len()); - // let chunk = producer.chunk(2).unwrap(); - // assert_eq!( - // chunk, - // vec![ - // 3, 8, 0, 0, 0, 0, 0, 0, 0, 18, 0, 60, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 3, 8, 0, 0, 0, 0, 0, 0, 0, 19, 0, 60, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 16, 3, 8, 0, 0, - // 0, 0, 0, 0, 0, 20, 0, 60, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 17, 3, 8, 0, 0, 0, 0, 0, 0, 0, 21, 0, 60, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 16, 3, 8, 0, 0, 0, 0, 0, 0, 0, 22, - // 0, 60, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 3, 8, 0, 0, - // 0, 0, 0, 0, 0, 23, 0, 60, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 16, 3, 8, 0, 0, 0, 0, 0, 0, 0, 24, 0, 60, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 17, 17, 3, 8, 0, 0, 0, 0, 0, 0, 0, 25, 0, - // 60, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 16, 3, 8, 0, 0, - // 0, 0, 0, 0, 0, 26, 0, 60, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 3, 8, 0, 0, 0, 0, 0, 0, 0, 27, 0, 60, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 16, 3, 8, 0, 0, 0, 0, - // 0, 0, 0, 28, 0, 60, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 17, 3, 8, 0, 0, 0, 0, 0, 0, 0, 29, 0, 60, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 16, 3, 8, 0, 0, 0, 0, 0, 0, 0, - // 30, 0, 60, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 3, 8, 0, 0, - // 0, 0, 0, 0, 0, 31, 0, 60, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 16, 3, 8, 0, 0, 0, 0, 0, 0, 0, 32, 0, 60, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 17, 17, 17 - // ] - // ); - // } + #[test] + fn test_multi_chunk_with_no_limit_not_trunk() { + let grove_version = GroveVersion::latest(); + // tree of height 4 + // 5 chunks + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + + // we generate the chunk starting from index 2, this has no hash nodes + // so no multi chunk will be generated + let chunk_result = chunk_producer + .multi_chunk_with_limit_and_index(2, None, grove_version) + .expect("should generate chunk with limit"); + + assert_eq!(chunk_result.remaining_limit, None); + assert_eq!(chunk_result.next_index, None); + + // chunk 2 - 5 will be considered separate subtrees + // each will have an accompanying chunk id, so 8 elements total + assert_eq!(chunk_result.chunk.len(), 8); + + // assert the chunk id's + assert_eq!(chunk_result.chunk[0], ChunkOp::ChunkId(vec![LEFT, LEFT])); + assert_eq!(chunk_result.chunk[2], ChunkOp::ChunkId(vec![LEFT, RIGHT])); + assert_eq!(chunk_result.chunk[4], ChunkOp::ChunkId(vec![RIGHT, LEFT])); + assert_eq!(chunk_result.chunk[6], ChunkOp::ChunkId(vec![RIGHT, RIGHT])); + + // assert the chunks + assert_eq!( + chunk_result.chunk[1], + ChunkOp::Chunk( + chunk_producer + .chunk_with_index(2, grove_version) + .expect("should generate chunk") + .0 + ) + ); + assert_eq!( + chunk_result.chunk[3], + ChunkOp::Chunk( + chunk_producer + .chunk_with_index(3, grove_version) + .expect("should generate chunk") + .0 + ) + ); + assert_eq!( + chunk_result.chunk[5], + ChunkOp::Chunk( + chunk_producer + .chunk_with_index(4, grove_version) + .expect("should generate chunk") + .0 + ) + ); + assert_eq!( + chunk_result.chunk[7], + ChunkOp::Chunk( + chunk_producer + .chunk_with_index(5, grove_version) + .expect("should generate chunk") + .0 + ) + ); + } #[test] - #[should_panic(expected = "Called next_chunk after end")] - fn test_next_chunk_index_oob() { - let mut merk = TempMerk::new(); - let batch = make_batch_seq(1..42); - merk.apply::<_, Vec<_>>(&batch, &[], None).unwrap().unwrap(); - - let mut producer = merk.chunks().unwrap(); - let _chunk1 = producer.next_chunk(); - let _chunk2 = producer.next_chunk(); + fn test_multi_chunk_with_limit() { + let grove_version = GroveVersion::latest(); + // tree of height 4 + // 5 chunks + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + + // ensure that the remaining limit, next index and values given are correct + // if limit is smaller than first chunk, we should get an error + let chunk_result = + chunk_producer.multi_chunk_with_limit(vec![].as_slice(), Some(5), grove_version); + assert!(matches!( + chunk_result, + Err(Error::ChunkingError(ChunkError::LimitTooSmall(..))) + )); + + // get chunk 2 + // data size of chunk 2 is exactly 317 + // chunk op encoding for chunk 2 = 321 + // hence limit of 317 will be insufficient + let chunk_result = + chunk_producer.multi_chunk_with_limit_and_index(2, Some(317), grove_version); + assert!(matches!( + chunk_result, + Err(Error::ChunkingError(ChunkError::LimitTooSmall(..))) + )); + + // get chunk 2 and 3 + // chunk 2 chunk op = 331 + // chunk 3 chunk op = 321 + // padding = 5 + let chunk_result = chunk_producer + .multi_chunk_with_limit_and_index(2, Some(321 + 321 + 5), grove_version) + .expect("should generate chunk"); + assert_eq!( + chunk_result.next_index, + Some(traversal_instruction_as_vec_bytes( + &generate_traversal_instruction(4, 4).unwrap() + )) + ); + assert_eq!(chunk_result.remaining_limit, Some(5)); + assert_eq!(chunk_result.chunk.len(), 4); + assert_eq!(chunk_result.chunk[0], ChunkOp::ChunkId(vec![LEFT, LEFT])); + assert_eq!(chunk_result.chunk[2], ChunkOp::ChunkId(vec![LEFT, RIGHT])); } } diff --git a/merk/src/merk/clear.rs b/merk/src/merk/clear.rs new file mode 100644 index 000000000..0de28f6a7 --- /dev/null +++ b/merk/src/merk/clear.rs @@ -0,0 +1,32 @@ +use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; +use grovedb_storage::{Batch, RawIterator, StorageContext}; + +use crate::{Error, Error::StorageError, Merk}; + +impl<'db, S> Merk +where + S: StorageContext<'db>, +{ + /// Deletes tree data + pub fn clear(&mut self) -> CostResult<(), Error> { + let mut cost = OperationCost::default(); + + let mut iter = self.storage.raw_iter(); + iter.seek_to_first().unwrap_add_cost(&mut cost); + + let mut to_delete = self.storage.new_batch(); + while iter.valid().unwrap_add_cost(&mut cost) { + if let Some(key) = iter.key().unwrap_add_cost(&mut cost) { + // todo: deal with cost reimbursement + to_delete.delete(key, None); + } + iter.next().unwrap_add_cost(&mut cost); + } + cost_return_on_error!( + &mut cost, + self.storage.commit_batch(to_delete).map_err(StorageError) + ); + self.tree.set(None); + Ok(()).wrap_with_cost(cost) + } +} diff --git a/merk/src/merk/committer.rs b/merk/src/merk/committer.rs new file mode 100644 index 000000000..9fb029875 --- /dev/null +++ b/merk/src/merk/committer.rs @@ -0,0 +1,59 @@ +use crate::{ + merk::BatchValue, + tree::{Commit, TreeNode}, + Error, +}; + +pub struct MerkCommitter { + /// The batch has a key, maybe a value, with the value bytes, maybe the left + /// child size and maybe the right child size, then the + /// key_value_storage_cost + pub(in crate::merk) batch: Vec, + pub(in crate::merk) height: u8, + pub(in crate::merk) levels: u8, +} + +impl MerkCommitter { + pub(in crate::merk) fn new(height: u8, levels: u8) -> Self { + Self { + batch: Vec::with_capacity(10000), + height, + levels, + } + } +} + +impl Commit for MerkCommitter { + fn write( + &mut self, + tree: &mut TreeNode, + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + ) -> Result<(), Error> { + let tree_size = tree.encoding_length(); + let storage_costs = if let Some(storage_costs) = tree.known_storage_cost.take() { + storage_costs + } else { + tree.kv_with_parent_hook_size_and_storage_cost(old_specialized_cost)? + .1 + }; + + let mut buf = Vec::with_capacity(tree_size); + tree.encode_into(&mut buf); + + let left_child_sizes = tree.child_ref_and_sum_size(true); + let right_child_sizes = tree.child_ref_and_sum_size(false); + self.batch.push(( + tree.key().to_vec(), + tree.feature_type().sum_length(), + Some((buf, left_child_sizes, right_child_sizes)), + storage_costs, + )); + Ok(()) + } + + fn prune(&self, tree: &TreeNode) -> (bool, bool) { + // keep N top levels of tree + let prune = (self.height - tree.height()) >= self.levels; + (prune, prune) + } +} diff --git a/merk/src/merk/get.rs b/merk/src/merk/get.rs new file mode 100644 index 000000000..f38b6fc7b --- /dev/null +++ b/merk/src/merk/get.rs @@ -0,0 +1,402 @@ +use grovedb_costs::{CostContext, CostResult, CostsExt, OperationCost}; +use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; + +use crate::{ + tree::{kv::ValueDefinedCostType, TreeNode}, + CryptoHash, Error, + Error::StorageError, + Merk, TreeFeatureType, +}; + +impl<'db, S> Merk +where + S: StorageContext<'db>, +{ + /// Gets an auxiliary value. + pub fn get_aux(&self, key: &[u8]) -> CostResult>, Error> { + self.storage.get_aux(key).map_err(StorageError) + } + + /// Returns if the value at the given key exists + /// + /// Note that this is essentially the same as a normal RocksDB `get`, so + /// should be a fast operation and has almost no tree overhead. + pub fn exists( + &self, + key: &[u8], + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult { + self.has_node_direct(key, value_defined_cost_fn, grove_version) + } + + /// Returns if the value at the given key exists + /// + /// Note that this is essentially the same as a normal RocksDB `get`, so + /// should be a fast operation and has almost no tree overhead. + /// Contrary to a simple exists, this traverses the tree and can be faster + /// if the tree is cached, but slower if it is not + pub fn exists_by_traversing_tree( + &self, + key: &[u8], + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult { + self.has_node(key, value_defined_cost_fn, grove_version) + } + + /// Gets a value for the given key. If the key is not found, `None` is + /// returned. + /// + /// Note that this is essentially the same as a normal RocksDB `get`, so + /// should be a fast operation and has almost no tree overhead. + pub fn get( + &self, + key: &[u8], + allow_cache: bool, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult>, Error> { + if allow_cache { + self.get_node_fn( + key, + |node| { + node.value_as_slice() + .to_vec() + .wrap_with_cost(Default::default()) + }, + value_defined_cost_fn, + grove_version, + ) + } else { + self.get_node_direct_fn( + key, + |node| { + node.value_as_slice() + .to_vec() + .wrap_with_cost(Default::default()) + }, + value_defined_cost_fn, + grove_version, + ) + } + } + + /// Returns the feature type for the node at the given key. + pub fn get_feature_type( + &self, + key: &[u8], + allow_cache: bool, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult, Error> { + if allow_cache { + self.get_node_fn( + key, + |node| node.feature_type().wrap_with_cost(Default::default()), + value_defined_cost_fn, + grove_version, + ) + } else { + self.get_node_direct_fn( + key, + |node| node.feature_type().wrap_with_cost(Default::default()), + value_defined_cost_fn, + grove_version, + ) + } + } + + /// Gets a hash of a node by a given key, `None` is returned in case + /// when node not found by the key. + pub fn get_hash( + &self, + key: &[u8], + allow_cache: bool, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult, Error> { + if allow_cache { + self.get_node_fn( + key, + |node| node.hash(), + value_defined_cost_fn, + grove_version, + ) + } else { + self.get_node_direct_fn( + key, + |node| node.hash(), + value_defined_cost_fn, + grove_version, + ) + } + } + + /// Gets the value hash of a node by a given key, `None` is returned in case + /// when node not found by the key. + pub fn get_value_hash( + &self, + key: &[u8], + allow_cache: bool, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult, Error> { + if allow_cache { + self.get_node_fn( + key, + |node| (*node.value_hash()).wrap_with_cost(OperationCost::default()), + value_defined_cost_fn, + grove_version, + ) + } else { + self.get_node_direct_fn( + key, + |node| (*node.value_hash()).wrap_with_cost(OperationCost::default()), + value_defined_cost_fn, + grove_version, + ) + } + } + + /// Gets a hash of a node by a given key, `None` is returned in case + /// when node not found by the key. + pub fn get_kv_hash( + &self, + key: &[u8], + allow_cache: bool, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult, Error> { + if allow_cache { + self.get_node_fn( + key, + |node| (*node.inner.kv.hash()).wrap_with_cost(OperationCost::default()), + value_defined_cost_fn, + grove_version, + ) + } else { + self.get_node_direct_fn( + key, + |node| (*node.inner.kv.hash()).wrap_with_cost(OperationCost::default()), + value_defined_cost_fn, + grove_version, + ) + } + } + + /// Gets the value and value hash of a node by a given key, `None` is + /// returned in case when node not found by the key. + pub fn get_value_and_value_hash( + &self, + key: &[u8], + allow_cache: bool, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult, CryptoHash)>, Error> { + if allow_cache { + self.get_node_fn( + key, + |node| { + (node.value_as_slice().to_vec(), *node.value_hash()) + .wrap_with_cost(OperationCost::default()) + }, + value_defined_cost_fn, + grove_version, + ) + } else { + self.get_node_direct_fn( + key, + |node| { + (node.value_as_slice().to_vec(), *node.value_hash()) + .wrap_with_cost(OperationCost::default()) + }, + value_defined_cost_fn, + grove_version, + ) + } + } + + /// See if a node's field exists + fn has_node_direct( + &self, + key: &[u8], + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult { + TreeNode::get(&self.storage, key, value_defined_cost_fn, grove_version) + .map_ok(|x| x.is_some()) + } + + /// See if a node's field exists + fn has_node( + &self, + key: &[u8], + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult { + self.use_tree(move |maybe_tree| { + let mut cursor = match maybe_tree { + None => return Ok(false).wrap_with_cost(Default::default()), // empty tree + Some(tree) => tree, + }; + + loop { + if key == cursor.key() { + return Ok(true).wrap_with_cost(OperationCost::default()); + } + + let left = key < cursor.key(); + let link = match cursor.link(left) { + None => return Ok(false).wrap_with_cost(Default::default()), // not found + Some(link) => link, + }; + + let maybe_child = link.tree(); + match maybe_child { + None => { + // fetch from RocksDB + break self.has_node_direct(key, value_defined_cost_fn, grove_version); + } + Some(child) => cursor = child, // traverse to child + } + } + }) + } + + /// Generic way to get a node's field + pub(crate) fn get_node_direct_fn( + &self, + key: &[u8], + f: F, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult, Error> + where + F: FnOnce(&TreeNode) -> CostContext, + { + TreeNode::get(&self.storage, key, value_defined_cost_fn, grove_version).flat_map_ok( + |maybe_node| { + let mut cost = OperationCost::default(); + Ok(maybe_node.map(|node| f(&node).unwrap_add_cost(&mut cost))).wrap_with_cost(cost) + }, + ) + } + + /// Generic way to get a node's field + fn get_node_fn( + &self, + key: &[u8], + f: F, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult, Error> + where + F: FnOnce(&TreeNode) -> CostContext, + { + self.use_tree(move |maybe_tree| { + let mut cursor = match maybe_tree { + None => return Ok(None).wrap_with_cost(Default::default()), // empty tree + Some(tree) => tree, + }; + + loop { + if key == cursor.key() { + return f(cursor).map(|x| Ok(Some(x))); + } + + let left = key < cursor.key(); + let link = match cursor.link(left) { + None => return Ok(None).wrap_with_cost(Default::default()), // not found + Some(link) => link, + }; + + let maybe_child = link.tree(); + match maybe_child { + None => { + // fetch from RocksDB + break self.get_node_direct_fn( + key, + f, + value_defined_cost_fn, + grove_version, + ); + } + Some(child) => cursor = child, // traverse to child + } + } + }) + } +} + +#[cfg(test)] +mod test { + use grovedb_version::version::GroveVersion; + + use crate::{ + test_utils::TempMerk, tree::kv::ValueDefinedCostType, Op, TreeFeatureType::BasicMerkNode, + }; + + #[test] + fn test_has_node_with_empty_tree() { + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); + + let key = b"something"; + + let result = merk + .has_node( + key, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .unwrap(); + + assert!(!result); + + let batch_entry = (key, Op::Put(vec![123; 60], BasicMerkNode)); + + let batch = vec![batch_entry]; + + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("should ..."); + + let result = merk + .has_node( + key, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .unwrap(); + + assert!(result); + } +} diff --git a/merk/src/merk/mod.rs b/merk/src/merk/mod.rs index 37276b65a..ee0deccc3 100644 --- a/merk/src/merk/mod.rs +++ b/merk/src/merk/mod.rs @@ -29,97 +29,59 @@ //! Merk pub mod chunks; - pub(crate) mod defaults; pub mod options; +pub mod apply; +pub mod clear; +pub mod committer; +pub mod get; +pub mod open; +pub mod prove; pub mod restore; +pub mod source; use std::{ cell::Cell, - cmp::Ordering, - collections::{BTreeSet, LinkedList}, + collections::{BTreeMap, BTreeSet, LinkedList}, fmt, }; +use committer::MerkCommitter; use grovedb_costs::{ cost_return_on_error, cost_return_on_error_default, cost_return_on_error_no_add, - storage_cost::{ - key_value_cost::KeyValueStorageCost, - removal::{StorageRemovedBytes, StorageRemovedBytes::BasicStorageRemoval}, - StorageCost, - }, - ChildrenSizesWithValue, CostContext, CostResult, CostsExt, FeatureSumLength, OperationCost, + storage_cost::key_value_cost::KeyValueStorageCost, ChildrenSizesWithValue, CostContext, + CostResult, CostsExt, FeatureSumLength, OperationCost, }; use grovedb_storage::{self, Batch, RawIterator, StorageContext}; +use grovedb_version::version::GroveVersion; +use source::MerkSource; use crate::{ error::Error, - merk::{ - defaults::{MAX_UPDATE_VALUE_BASED_ON_COSTS_TIMES, ROOT_KEY_KEY}, - options::MerkOptions, + merk::{defaults::ROOT_KEY_KEY, options::MerkOptions}, + proofs::{ + chunk::{ + chunk::{LEFT, RIGHT}, + util::traversal_instruction_as_vec_bytes, + }, + query::query_item::QueryItem, + Query, }, - proofs::{encode_into, query::query_item::QueryItem, Op as ProofOp, Query}, tree::{ - kv::{ValueDefinedCostType, KV}, - AuxMerkBatch, Commit, CryptoHash, Fetch, Link, MerkBatch, Op, RefWalker, Tree, Walker, - NULL_HASH, + kv::ValueDefinedCostType, AuxMerkBatch, CryptoHash, Op, RefWalker, TreeNode, NULL_HASH, }, Error::{CostsError, EdError, StorageError}, + Link, MerkType::{BaseMerk, LayeredMerk, StandaloneMerk}, - TreeFeatureType, }; -type Proof = (LinkedList, Option, Option); - -/// Proof construction result -pub struct ProofConstructionResult { - /// Proof - pub proof: Vec, - /// Limit - pub limit: Option, - /// Offset - pub offset: Option, -} - -impl ProofConstructionResult { - /// New ProofConstructionResult - pub fn new(proof: Vec, limit: Option, offset: Option) -> Self { - Self { - proof, - limit, - offset, - } - } -} - -/// Proof without encoding result -pub struct ProofWithoutEncodingResult { - /// Proof - pub proof: LinkedList, - /// Limit - pub limit: Option, - /// Offset - pub offset: Option, -} - -impl ProofWithoutEncodingResult { - /// New ProofWithoutEncodingResult - pub fn new(proof: LinkedList, limit: Option, offset: Option) -> Self { - Self { - proof, - limit, - offset, - } - } -} - /// Key update types pub struct KeyUpdates { pub new_keys: BTreeSet>, pub updated_keys: BTreeSet>, - pub deleted_keys: LinkedList<(Vec, Option)>, + pub deleted_keys: LinkedList<(Vec, KeyValueStorageCost)>, pub updated_root_key_from: Option>, } @@ -128,7 +90,7 @@ impl KeyUpdates { pub fn new( new_keys: BTreeSet>, updated_keys: BTreeSet>, - deleted_keys: LinkedList<(Vec, Option)>, + deleted_keys: LinkedList<(Vec, KeyValueStorageCost)>, updated_root_key_from: Option>, ) -> Self { Self { @@ -145,7 +107,7 @@ pub type BatchValue = ( Vec, Option, ChildrenSizesWithValue, - Option, + KeyValueStorageCost, ); /// A bool type @@ -259,6 +221,17 @@ pub enum MerkType { LayeredMerk, } +impl fmt::Display for MerkType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let description = match self { + MerkType::StandaloneMerk => "StandaloneMerk", + MerkType::BaseMerk => "BaseMerk", + MerkType::LayeredMerk => "LayeredMerk", + }; + write!(f, "{}", description) + } +} + impl MerkType { /// Returns bool pub(crate) fn requires_root_storage_update(&self) -> bool { @@ -272,7 +245,7 @@ impl MerkType { /// A handle to a Merkle key/value store backed by RocksDB. pub struct Merk { - pub(crate) tree: Cell>, + pub(crate) tree: Cell>, pub(crate) root_tree_key: Cell>>, /// Storage pub storage: S, @@ -294,7 +267,7 @@ pub type UseTreeMutResult = CostResult< Vec, Option, ChildrenSizesWithValue, - Option, + KeyValueStorageCost, )>, Error, >; @@ -303,291 +276,6 @@ impl<'db, S> Merk where S: StorageContext<'db>, { - /// Open empty tree - pub fn open_empty(storage: S, merk_type: MerkType, is_sum_tree: bool) -> Self { - Self { - tree: Cell::new(None), - root_tree_key: Cell::new(None), - storage, - merk_type, - is_sum_tree, - } - } - - /// Open standalone tree - pub fn open_standalone(storage: S, is_sum_tree: bool) -> CostResult { - let mut merk = Self { - tree: Cell::new(None), - root_tree_key: Cell::new(None), - storage, - merk_type: StandaloneMerk, - is_sum_tree, - }; - - merk.load_base_root().map_ok(|_| merk) - } - - /// Open base tree - pub fn open_base(storage: S, is_sum_tree: bool) -> CostResult { - let mut merk = Self { - tree: Cell::new(None), - root_tree_key: Cell::new(None), - storage, - merk_type: BaseMerk, - is_sum_tree, - }; - - merk.load_base_root().map_ok(|_| merk) - } - - /// Open layered tree with root key - pub fn open_layered_with_root_key( - storage: S, - root_key: Option>, - is_sum_tree: bool, - ) -> CostResult { - let mut merk = Self { - tree: Cell::new(None), - root_tree_key: Cell::new(root_key), - storage, - merk_type: LayeredMerk, - is_sum_tree, - }; - - merk.load_root().map_ok(|_| merk) - } - - /// Deletes tree data - pub fn clear(&mut self) -> CostResult<(), Error> { - let mut cost = OperationCost::default(); - - let mut iter = self.storage.raw_iter(); - iter.seek_to_first().unwrap_add_cost(&mut cost); - - let mut to_delete = self.storage.new_batch(); - while iter.valid().unwrap_add_cost(&mut cost) { - if let Some(key) = iter.key().unwrap_add_cost(&mut cost) { - // todo: deal with cost reimbursement - to_delete.delete(key, None); - } - iter.next().unwrap_add_cost(&mut cost); - } - cost_return_on_error!( - &mut cost, - self.storage.commit_batch(to_delete).map_err(StorageError) - ); - self.tree.set(None); - Ok(()).wrap_with_cost(cost) - } - - /// Gets an auxiliary value. - pub fn get_aux(&self, key: &[u8]) -> CostResult>, Error> { - self.storage.get_aux(key).map_err(StorageError) - } - - /// Returns if the value at the given key exists - /// - /// Note that this is essentially the same as a normal RocksDB `get`, so - /// should be a fast operation and has almost no tree overhead. - pub fn exists(&self, key: &[u8]) -> CostResult { - self.has_node_direct(key) - } - - /// Returns if the value at the given key exists - /// - /// Note that this is essentially the same as a normal RocksDB `get`, so - /// should be a fast operation and has almost no tree overhead. - /// Contrary to a simple exists, this traverses the tree and can be faster - /// if the tree is cached, but slower if it is not - pub fn exists_by_traversing_tree(&self, key: &[u8]) -> CostResult { - self.has_node(key) - } - - /// Gets a value for the given key. If the key is not found, `None` is - /// returned. - /// - /// Note that this is essentially the same as a normal RocksDB `get`, so - /// should be a fast operation and has almost no tree overhead. - pub fn get(&self, key: &[u8], allow_cache: bool) -> CostResult>, Error> { - if allow_cache { - self.get_node_fn(key, |node| { - node.value_as_slice() - .to_vec() - .wrap_with_cost(Default::default()) - }) - } else { - self.get_node_direct_fn(key, |node| { - node.value_as_slice() - .to_vec() - .wrap_with_cost(Default::default()) - }) - } - } - - /// Returns the feature type for the node at the given key. - pub fn get_feature_type( - &self, - key: &[u8], - allow_cache: bool, - ) -> CostResult, Error> { - if allow_cache { - self.get_node_fn(key, |node| { - node.feature_type().wrap_with_cost(Default::default()) - }) - } else { - self.get_node_direct_fn(key, |node| { - node.feature_type().wrap_with_cost(Default::default()) - }) - } - } - - /// Gets a hash of a node by a given key, `None` is returned in case - /// when node not found by the key. - pub fn get_hash(&self, key: &[u8], allow_cache: bool) -> CostResult, Error> { - if allow_cache { - self.get_node_fn(key, |node| node.hash()) - } else { - self.get_node_direct_fn(key, |node| node.hash()) - } - } - - /// Gets the value hash of a node by a given key, `None` is returned in case - /// when node not found by the key. - pub fn get_value_hash( - &self, - key: &[u8], - allow_cache: bool, - ) -> CostResult, Error> { - if allow_cache { - self.get_node_fn(key, |node| { - (*node.value_hash()).wrap_with_cost(OperationCost::default()) - }) - } else { - self.get_node_direct_fn(key, |node| { - (*node.value_hash()).wrap_with_cost(OperationCost::default()) - }) - } - } - - /// Gets a hash of a node by a given key, `None` is returned in case - /// when node not found by the key. - pub fn get_kv_hash( - &self, - key: &[u8], - allow_cache: bool, - ) -> CostResult, Error> { - if allow_cache { - self.get_node_fn(key, |node| { - (*node.inner.kv.hash()).wrap_with_cost(OperationCost::default()) - }) - } else { - self.get_node_direct_fn(key, |node| { - (*node.inner.kv.hash()).wrap_with_cost(OperationCost::default()) - }) - } - } - - /// Gets the value and value hash of a node by a given key, `None` is - /// returned in case when node not found by the key. - pub fn get_value_and_value_hash( - &self, - key: &[u8], - allow_cache: bool, - ) -> CostResult, CryptoHash)>, Error> { - if allow_cache { - self.get_node_fn(key, |node| { - (node.value_as_slice().to_vec(), *node.value_hash()) - .wrap_with_cost(OperationCost::default()) - }) - } else { - self.get_node_direct_fn(key, |node| { - (node.value_as_slice().to_vec(), *node.value_hash()) - .wrap_with_cost(OperationCost::default()) - }) - } - } - - /// See if a node's field exists - fn has_node_direct(&self, key: &[u8]) -> CostResult { - Tree::get(&self.storage, key).map_ok(|x| x.is_some()) - } - - /// See if a node's field exists - fn has_node(&self, key: &[u8]) -> CostResult { - self.use_tree(move |maybe_tree| { - let mut cursor = match maybe_tree { - None => return Ok(false).wrap_with_cost(Default::default()), // empty tree - Some(tree) => tree, - }; - - loop { - if key == cursor.key() { - return Ok(true).wrap_with_cost(OperationCost::default()); - } - - let left = key < cursor.key(); - let link = match cursor.link(left) { - None => return Ok(false).wrap_with_cost(Default::default()), // not found - Some(link) => link, - }; - - let maybe_child = link.tree(); - match maybe_child { - None => { - // fetch from RocksDB - break self.has_node_direct(key); - } - Some(child) => cursor = child, // traverse to child - } - } - }) - } - - /// Generic way to get a node's field - fn get_node_direct_fn(&self, key: &[u8], f: F) -> CostResult, Error> - where - F: FnOnce(&Tree) -> CostContext, - { - Tree::get(&self.storage, key).flat_map_ok(|maybe_node| { - let mut cost = OperationCost::default(); - Ok(maybe_node.map(|node| f(&node).unwrap_add_cost(&mut cost))).wrap_with_cost(cost) - }) - } - - /// Generic way to get a node's field - fn get_node_fn(&self, key: &[u8], f: F) -> CostResult, Error> - where - F: FnOnce(&Tree) -> CostContext, - { - self.use_tree(move |maybe_tree| { - let mut cursor = match maybe_tree { - None => return Ok(None).wrap_with_cost(Default::default()), // empty tree - Some(tree) => tree, - }; - - loop { - if key == cursor.key() { - return f(cursor).map(|x| Ok(Some(x))); - } - - let left = key < cursor.key(); - let link = match cursor.link(left) { - None => return Ok(None).wrap_with_cost(Default::default()), // not found - Some(link) => link, - }; - - let maybe_child = link.tree(); - match maybe_child { - None => { - // fetch from RocksDB - break self.get_node_direct_fn(key, f); - } - Some(child) => cursor = child, // traverse to child - } - } - }) - } - /// Returns the root hash of the tree (a digest for the entire store which /// proofs can be checked against). If the tree is empty, returns the null /// hash (zero-filled). @@ -599,6 +287,14 @@ where }) } + /// Returns if the merk has a root tree set + pub fn has_root_key(&self) -> bool { + let tree = self.tree.take(); + let res = tree.is_some(); + self.tree.set(tree); + res + } + /// Returns the total sum value in the Merk tree pub fn sum(&self) -> Result, Error> { self.use_tree(|tree| match tree { @@ -607,6 +303,11 @@ where }) } + /// Returns the height of the Merk tree + pub fn height(&self) -> Option { + self.use_tree(|tree| tree.map(|tree| tree.height())) + } + /// Returns the root non-prefixed key of the tree. If the tree is empty, /// None. pub fn root_key(&self) -> Option> { @@ -625,384 +326,6 @@ where }) } - /// Applies a batch of operations (puts and deletes) to the tree. - /// - /// This will fail if the keys in `batch` are not sorted and unique. This - /// check creates some overhead, so if you are sure your batch is sorted and - /// unique you can use the unsafe `apply_unchecked` for a small performance - /// gain. - /// - /// # Example - /// ``` - /// # let mut store = grovedb_merk::test_utils::TempMerk::new(); - /// # store.apply::<_, Vec<_>>(&[(vec![4,5,6], Op::Put(vec![0], BasicMerk))], &[], None) - /// .unwrap().expect(""); - /// - /// use grovedb_merk::Op; - /// use grovedb_merk::TreeFeatureType::BasicMerk; - /// - /// let batch = &[ - /// // puts value [4,5,6] to key[1,2,3] - /// (vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerk)), - /// // deletes key [4,5,6] - /// (vec![4, 5, 6], Op::Delete), - /// ]; - /// store.apply::<_, Vec<_>>(batch, &[], None).unwrap().expect(""); - /// ``` - pub fn apply( - &mut self, - batch: &MerkBatch, - aux: &AuxMerkBatch, - options: Option, - ) -> CostResult<(), Error> - where - KB: AsRef<[u8]>, - KA: AsRef<[u8]>, - { - let use_sum_nodes = self.is_sum_tree; - self.apply_with_costs_just_in_time_value_update( - batch, - aux, - options, - &|key, value| { - Ok(KV::layered_value_byte_cost_size_for_key_and_value_lengths( - key.len() as u32, - value.len() as u32, - use_sum_nodes, - )) - }, - &mut |_costs, _old_value, _value| Ok((false, None)), - &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { - Ok(( - BasicStorageRemoval(key_bytes_to_remove), - BasicStorageRemoval(value_bytes_to_remove), - )) - }, - ) - } - - /// Applies a batch of operations (puts and deletes) to the tree. - /// - /// This will fail if the keys in `batch` are not sorted and unique. This - /// check creates some overhead, so if you are sure your batch is sorted and - /// unique you can use the unsafe `apply_unchecked` for a small performance - /// gain. - /// - /// # Example - /// ``` - /// # let mut store = grovedb_merk::test_utils::TempMerk::new(); - /// # store.apply::<_, Vec<_>>(&[(vec![4,5,6], Op::Put(vec![0], BasicMerk))], &[], None) - /// .unwrap().expect(""); - /// - /// use grovedb_merk::Op; - /// use grovedb_merk::TreeFeatureType::BasicMerk; - /// - /// let batch = &[ - /// // puts value [4,5,6] to key[1,2,3] - /// (vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerk)), - /// // deletes key [4,5,6] - /// (vec![4, 5, 6], Op::Delete), - /// ]; - /// store.apply::<_, Vec<_>>(batch, &[], None).unwrap().expect(""); - /// ``` - pub fn apply_with_specialized_costs( - &mut self, - batch: &MerkBatch, - aux: &AuxMerkBatch, - options: Option, - old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, - ) -> CostResult<(), Error> - where - KB: AsRef<[u8]>, - KA: AsRef<[u8]>, - { - self.apply_with_costs_just_in_time_value_update( - batch, - aux, - options, - old_specialized_cost, - &mut |_costs, _old_value, _value| Ok((false, None)), - &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { - Ok(( - BasicStorageRemoval(key_bytes_to_remove), - BasicStorageRemoval(value_bytes_to_remove), - )) - }, - ) - } - - /// Applies a batch of operations (puts and deletes) to the tree with the - /// ability to update values based on costs. - /// - /// This will fail if the keys in `batch` are not sorted and unique. This - /// check creates some overhead, so if you are sure your batch is sorted and - /// unique you can use the unsafe `apply_unchecked` for a small performance - /// gain. - /// - /// # Example - /// ``` - /// # let mut store = grovedb_merk::test_utils::TempMerk::new(); - /// # store.apply_with_costs_just_in_time_value_update::<_, Vec<_>>( - /// &[(vec![4,5,6], Op::Put(vec![0], BasicMerk))], - /// &[], - /// None, - /// &|k, v| Ok(0), - /// &mut |s, v, o| Ok((false, None)), - /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)) - /// ).unwrap().expect(""); - /// - /// use grovedb_costs::storage_cost::removal::StorageRemovedBytes::NoStorageRemoval; - /// use grovedb_merk::Op; - /// use grovedb_merk::TreeFeatureType::BasicMerk; - /// - /// let batch = &[ - /// // puts value [4,5,6] to key[1,2,3] - /// (vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerk)), - /// // deletes key [4,5,6] - /// (vec![4, 5, 6], Op::Delete), - /// ]; - /// - /// store.apply_with_costs_just_in_time_value_update::<_, Vec<_>>( - /// batch, - /// &[], - /// None, - /// &|k, v| Ok(0), - /// &mut |s, v, o| Ok((false, None)), - /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)) - /// ).unwrap().expect(""); - /// ``` - pub fn apply_with_costs_just_in_time_value_update( - &mut self, - batch: &MerkBatch, - aux: &AuxMerkBatch, - options: Option, - old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, - update_tree_value_based_on_costs: &mut impl FnMut( - &StorageCost, - &Vec, - &mut Vec, - ) -> Result< - (bool, Option), - Error, - >, - section_removal_bytes: &mut impl FnMut( - &Vec, - u32, - u32, - ) -> Result< - (StorageRemovedBytes, StorageRemovedBytes), - Error, - >, - ) -> CostResult<(), Error> - where - KB: AsRef<[u8]>, - KA: AsRef<[u8]>, - { - // ensure keys in batch are sorted and unique - let mut maybe_prev_key: Option<&KB> = None; - for (key, ..) in batch.iter() { - if let Some(prev_key) = maybe_prev_key { - match prev_key.as_ref().cmp(key.as_ref()) { - Ordering::Greater => { - return Err(Error::InvalidInputError("Keys in batch must be sorted")) - .wrap_with_cost(Default::default()) - } - Ordering::Equal => { - return Err(Error::InvalidInputError("Keys in batch must be unique")) - .wrap_with_cost(Default::default()) - } - _ => (), - } - } - maybe_prev_key = Some(key); - } - - self.apply_unchecked( - batch, - aux, - options, - old_specialized_cost, - update_tree_value_based_on_costs, - section_removal_bytes, - ) - } - - /// Applies a batch of operations (puts and deletes) to the tree. - /// - /// # Safety - /// This is unsafe because the keys in `batch` must be sorted and unique - - /// if they are not, there will be undefined behavior. For a safe version of - /// this method which checks to ensure the batch is sorted and unique, see - /// `apply`. - /// - /// # Example - /// ``` - /// # let mut store = grovedb_merk::test_utils::TempMerk::new(); - /// # store.apply_with_costs_just_in_time_value_update::<_, Vec<_>>( - /// &[(vec![4,5,6], Op::Put(vec![0], BasicMerk))], - /// &[], - /// None, - /// &|k, v| Ok(0), - /// &mut |s, o, v| Ok((false, None)), - /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)) - /// ).unwrap().expect(""); - /// - /// use grovedb_costs::storage_cost::removal::StorageRemovedBytes::NoStorageRemoval; - /// use grovedb_merk::Op; - /// use grovedb_merk::TreeFeatureType::BasicMerk; - /// - /// let batch = &[ - /// // puts value [4,5,6] to key [1,2,3] - /// (vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerk)), - /// // deletes key [4,5,6] - /// (vec![4, 5, 6], Op::Delete), - /// ]; - /// unsafe { store.apply_unchecked::<_, Vec<_>, _, _, _>( /// /// /// - /// batch, - /// &[], - /// None, - /// &|k, v| Ok(0), - /// &mut |s, o, v| Ok((false, None)), - /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)) - /// ).unwrap().expect(""); - /// } - /// ``` - pub fn apply_unchecked( - &mut self, - batch: &MerkBatch, - aux: &AuxMerkBatch, - options: Option, - old_specialized_cost: &C, - update_tree_value_based_on_costs: &mut U, - section_removal_bytes: &mut R, - ) -> CostResult<(), Error> - where - KB: AsRef<[u8]>, - KA: AsRef<[u8]>, - C: Fn(&Vec, &Vec) -> Result, - U: FnMut( - &StorageCost, - &Vec, - &mut Vec, - ) -> Result<(bool, Option), Error>, - R: FnMut(&Vec, u32, u32) -> Result<(StorageRemovedBytes, StorageRemovedBytes), Error>, - { - let maybe_walker = self - .tree - .take() - .take() - .map(|tree| Walker::new(tree, self.source())); - - Walker::apply_to( - maybe_walker, - batch, - self.source(), - old_specialized_cost, - section_removal_bytes, - ) - .flat_map_ok(|(maybe_tree, key_updates)| { - // we set the new root node of the merk tree - self.tree.set(maybe_tree); - // commit changes to db - self.commit( - key_updates, - aux, - options, - old_specialized_cost, - update_tree_value_based_on_costs, - section_removal_bytes, - ) - }) - } - - /// Creates a Merkle proof for the list of queried keys. For each key in the - /// query, if the key is found in the store then the value will be proven to - /// be in the tree. For each key in the query that does not exist in the - /// tree, its absence will be proven by including boundary keys. - /// - /// The proof returned is in an encoded format which can be verified with - /// `merk::verify`. - /// - /// This will fail if the keys in `query` are not sorted and unique. This - /// check adds some overhead, so if you are sure your batch is sorted and - /// unique you can use the unsafe `prove_unchecked` for a small performance - /// gain. - pub fn prove( - &self, - query: Query, - limit: Option, - offset: Option, - ) -> CostResult { - let left_to_right = query.left_to_right; - self.prove_unchecked(query, limit, offset, left_to_right) - .map_ok(|(proof, limit, offset)| { - let mut bytes = Vec::with_capacity(128); - encode_into(proof.iter(), &mut bytes); - ProofConstructionResult::new(bytes, limit, offset) - }) - } - - /// Creates a Merkle proof for the list of queried keys. For each key in the - /// query, if the key is found in the store then the value will be proven to - /// be in the tree. For each key in the query that does not exist in the - /// tree, its absence will be proven by including boundary keys. - /// - /// The proof returned is in an intermediate format to be later encoded - /// - /// This will fail if the keys in `query` are not sorted and unique. This - /// check adds some overhead, so if you are sure your batch is sorted and - /// unique you can use the unsafe `prove_unchecked` for a small performance - /// gain. - pub fn prove_without_encoding( - &self, - query: Query, - limit: Option, - offset: Option, - ) -> CostResult { - let left_to_right = query.left_to_right; - self.prove_unchecked(query, limit, offset, left_to_right) - .map_ok(|(proof, limit, offset)| ProofWithoutEncodingResult::new(proof, limit, offset)) - } - - /// Creates a Merkle proof for the list of queried keys. For each key in - /// the query, if the key is found in the store then the value will be - /// proven to be in the tree. For each key in the query that does not - /// exist in the tree, its absence will be proven by including - /// boundary keys. - /// The proof returned is in an encoded format which can be verified with - /// `merk::verify`. - /// - /// This is unsafe because the keys in `query` must be sorted and unique - - /// if they are not, there will be undefined behavior. For a safe version - /// of this method which checks to ensure the batch is sorted and - /// unique, see `prove`. - pub fn prove_unchecked( - &self, - query: I, - limit: Option, - offset: Option, - left_to_right: bool, - ) -> CostResult - where - Q: Into, - I: IntoIterator, - { - let query_vec: Vec = query.into_iter().map(Into::into).collect(); - - self.use_tree_mut(|maybe_tree| { - maybe_tree - .ok_or(Error::CorruptedCodeExecution( - "Cannot create proof for empty tree", - )) - .wrap_with_cost(Default::default()) - .flat_map_ok(|tree| { - let mut ref_walker = RefWalker::new(tree, self.source()); - ref_walker.create_proof(query_vec.as_slice(), limit, offset, left_to_right) - }) - .map_ok(|(proof, _, limit, offset, ..)| (proof, limit, offset)) - }) - } - /// Commit tree changes pub fn commit( &mut self, @@ -1010,22 +333,6 @@ where aux: &AuxMerkBatch, options: Option, old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, - update_tree_value_based_on_costs: &mut impl FnMut( - &StorageCost, - &Vec, - &mut Vec, - ) -> Result< - (bool, Option), - Error, - >, - section_removal_bytes: &mut impl FnMut( - &Vec, - u32, - u32, - ) -> Result< - (StorageRemovedBytes, StorageRemovedBytes), - Error, - >, ) -> CostResult<(), Error> where K: AsRef<[u8]>, @@ -1042,12 +349,7 @@ where let mut committer = MerkCommitter::new(tree.height(), 100); cost_return_on_error!( &mut inner_cost, - tree.commit( - &mut committer, - old_specialized_cost, - update_tree_value_based_on_costs, - section_removal_bytes - ) + tree.commit(&mut committer, old_specialized_cost) ); let tree_key = tree.key(); @@ -1109,7 +411,7 @@ where to_batch.push((key, None, None, maybe_cost)); } to_batch.sort_by(|a, b| a.0.cmp(&b.0)); - for (key, maybe_sum_tree_cost, maybe_value, maybe_cost) in to_batch { + for (key, maybe_sum_tree_cost, maybe_value, storage_cost) in to_batch { if let Some((value, left_size, right_size)) = maybe_value { cost_return_on_error_no_add!( &cost, @@ -1118,12 +420,12 @@ where &key, &value, Some((maybe_sum_tree_cost, left_size, right_size)), - maybe_cost + Some(storage_cost) ) .map_err(CostsError) ); } else { - batch.delete(&key, maybe_cost); + batch.delete(&key, Some(storage_cost)); } } @@ -1186,22 +488,15 @@ where true.wrap_with_cost(cost) } - fn source(&self) -> MerkSource { - MerkSource { - storage: &self.storage, - is_sum_tree: self.is_sum_tree, - } - } - /// Use tree - pub(crate) fn use_tree(&self, f: impl FnOnce(Option<&Tree>) -> T) -> T { + pub(crate) fn use_tree(&self, f: impl FnOnce(Option<&TreeNode>) -> T) -> T { let tree = self.tree.take(); let res = f(tree.as_ref()); self.tree.set(tree); res } - fn use_tree_mut(&self, mut f: impl FnMut(Option<&mut Tree>) -> T) -> T { + fn use_tree_mut(&self, mut f: impl FnMut(Option<&mut TreeNode>) -> T) -> T { let mut tree = self.tree.take(); let res = f(tree.as_mut()); self.tree.set(tree); @@ -1228,7 +523,13 @@ where /// Loads the Merk from the base root key /// The base root key should only be used if the Merk tree is independent /// Meaning that it doesn't have a parent Merk - pub(crate) fn load_base_root(&mut self) -> CostResult<(), Error> { + pub(crate) fn load_base_root( + &mut self, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult<(), Error> { self.storage .get_root(ROOT_KEY_KEY) .map(|root_result| root_result.map_err(Error::StorageError)) @@ -1237,7 +538,13 @@ where if let Some(tree_root_key) = tree_root_key_opt { // Trying to build a tree out of it, costs will be accumulated because // `Tree::get` returns `CostContext` and this call happens inside `flat_map_ok`. - Tree::get(&self.storage, tree_root_key).map_ok(|tree| { + TreeNode::get( + &self.storage, + tree_root_key, + value_defined_cost_fn, + grove_version, + ) + .map_ok(|tree| { if let Some(t) = tree.as_ref() { self.root_tree_key = Cell::new(Some(t.key().to_vec())); } @@ -1252,12 +559,24 @@ where /// Loads the Merk from it's parent root key /// The base root key should only be used if the Merk tree is independent /// Meaning that it doesn't have a parent Merk - pub(crate) fn load_root(&mut self) -> CostResult<(), Error> { + pub(crate) fn load_root( + &mut self, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult<(), Error> { // In case of successful seek for root key check if it exists if let Some(tree_root_key) = self.root_tree_key.get_mut() { // Trying to build a tree out of it, costs will be accumulated because // `Tree::get` returns `CostContext` and this call happens inside `flat_map_ok`. - Tree::get(&self.storage, tree_root_key).map_ok(|tree| { + TreeNode::get( + &self.storage, + tree_root_key, + value_defined_cost_fn, + grove_version, + ) + .map_ok(|tree| { self.tree = Cell::new(tree); }) } else { @@ -1265,184 +584,187 @@ where Ok(()).wrap_with_cost(Default::default()) } } -} -fn fetch_node<'db>(db: &impl StorageContext<'db>, key: &[u8]) -> Result, Error> { - let bytes = db.get(key).unwrap().map_err(StorageError)?; // TODO: get_pinned ? - if let Some(bytes) = bytes { - Ok(Some(Tree::decode(key.to_vec(), &bytes).map_err(EdError)?)) - } else { - Ok(None) - } -} + /// Verifies the correctness of a merk tree + /// hash values are computed correctly, heights are accurate and links + /// consistent with backing store. + // TODO: define the return types + pub fn verify( + &self, + skip_sum_checks: bool, + grove_version: &GroveVersion, + ) -> (BTreeMap, CryptoHash>, BTreeMap, Vec>) { + let tree = self.tree.take(); -// impl Clone for Merk { -// fn clone(&self) -> Self { -// let tree_clone = match self.tree.take() { -// None => None, -// Some(tree) => { -// let clone = tree.clone(); -// self.tree.set(Some(tree)); -// Some(clone) -// } -// }; -// Self { -// tree: Cell::new(tree_clone), -// storage_cost: self.storage_cost.clone(), -// } -// } -// } + let mut bad_link_map: BTreeMap, CryptoHash> = BTreeMap::new(); + let mut parent_keys: BTreeMap, Vec> = BTreeMap::new(); + let mut root_traversal_instruction = vec![]; + + // TODO: remove clone + self.verify_tree( + // TODO: handle unwrap + &tree.clone().unwrap(), + &mut root_traversal_instruction, + &mut bad_link_map, + &mut parent_keys, + skip_sum_checks, + grove_version, + ); + self.tree.set(tree); -// // TODO: get rid of Fetch/source and use GroveDB storage_cost abstraction + (bad_link_map, parent_keys) + } -#[derive(Debug)] -pub struct MerkSource<'s, S> { - storage: &'s S, - is_sum_tree: bool, -} + fn verify_tree( + &self, + tree: &TreeNode, + traversal_instruction: &mut Vec, + bad_link_map: &mut BTreeMap, CryptoHash>, + parent_keys: &mut BTreeMap, Vec>, + skip_sum_checks: bool, + grove_version: &GroveVersion, + ) { + if let Some(link) = tree.link(LEFT) { + traversal_instruction.push(LEFT); + self.verify_link( + link, + tree.key(), + traversal_instruction, + bad_link_map, + parent_keys, + skip_sum_checks, + grove_version, + ); + traversal_instruction.pop(); + } -impl<'s, S> Clone for MerkSource<'s, S> { - fn clone(&self) -> Self { - MerkSource { - storage: self.storage, - is_sum_tree: self.is_sum_tree, + if let Some(link) = tree.link(RIGHT) { + traversal_instruction.push(RIGHT); + self.verify_link( + link, + tree.key(), + traversal_instruction, + bad_link_map, + parent_keys, + skip_sum_checks, + grove_version, + ); + traversal_instruction.pop(); } } -} -impl<'s, 'db, S> Fetch for MerkSource<'s, S> -where - S: StorageContext<'db>, -{ - fn fetch(&self, link: &Link) -> CostResult { - Tree::get(self.storage, link.key()) - .map_ok(|x| x.ok_or(Error::KeyNotFoundError("Key not found for fetch"))) - .flatten() - } -} + fn verify_link( + &self, + link: &Link, + parent_key: &[u8], + traversal_instruction: &mut Vec, + bad_link_map: &mut BTreeMap, CryptoHash>, + parent_keys: &mut BTreeMap, Vec>, + skip_sum_checks: bool, + grove_version: &GroveVersion, + ) { + let (hash, key, sum) = match link { + Link::Reference { hash, key, sum, .. } => { + (hash.to_owned(), key.to_owned(), sum.to_owned()) + } + Link::Modified { tree, .. } => ( + tree.hash().unwrap(), + tree.key().to_vec(), + tree.sum().unwrap(), + ), + Link::Loaded { + hash, + child_heights: _, + sum, + tree, + } => (hash.to_owned(), tree.key().to_vec(), sum.to_owned()), + _ => todo!(), + }; -struct MerkCommitter { - /// The batch has a key, maybe a value, with the value bytes, maybe the left - /// child size and maybe the right child size, then the - /// key_value_storage_cost - batch: Vec, - height: u8, - levels: u8, -} + let instruction_id = traversal_instruction_as_vec_bytes(traversal_instruction); + let node = TreeNode::get( + &self.storage, + key, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap(); -impl MerkCommitter { - fn new(height: u8, levels: u8) -> Self { - Self { - batch: Vec::with_capacity(10000), - height, - levels, + if node.is_err() { + bad_link_map.insert(instruction_id.to_vec(), hash); + parent_keys.insert(instruction_id.to_vec(), parent_key.to_vec()); + return; } - } -} -impl Commit for MerkCommitter { - fn write( - &mut self, - tree: &mut Tree, - old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, - update_tree_value_based_on_costs: &mut impl FnMut( - &StorageCost, - &Vec, - &mut Vec, - ) -> Result< - (bool, Option), - Error, - >, - section_removal_bytes: &mut impl FnMut( - &Vec, - u32, - u32, - ) -> Result< - (StorageRemovedBytes, StorageRemovedBytes), - Error, - >, - ) -> Result<(), Error> { - let tree_size = tree.encoding_length(); - let (mut current_tree_plus_hook_size, mut storage_costs) = - tree.kv_with_parent_hook_size_and_storage_cost(old_specialized_cost)?; - let mut i = 0; - - if let Some(old_value) = tree.old_value.clone() { - // At this point the tree value can be updated based on client requirements - // For example to store the costs - loop { - let (flags_changed, value_defined_cost) = update_tree_value_based_on_costs( - &storage_costs.value_storage_cost, - &old_value, - tree.value_mut_ref(), - )?; - if !flags_changed { - break; - } else { - tree.inner.kv.value_defined_cost = value_defined_cost; - let after_update_tree_plus_hook_size = - tree.value_encoding_length_with_parent_to_child_reference(); - if after_update_tree_plus_hook_size == current_tree_plus_hook_size { - break; - } - let new_size_and_storage_costs = - tree.kv_with_parent_hook_size_and_storage_cost(old_specialized_cost)?; - current_tree_plus_hook_size = new_size_and_storage_costs.0; - storage_costs = new_size_and_storage_costs.1; - } - if i > MAX_UPDATE_VALUE_BASED_ON_COSTS_TIMES { - return Err(Error::CyclicError( - "updated value based on costs too many times", - )); - } - i += 1; - } + let node = node.unwrap(); + if node.is_none() { + bad_link_map.insert(instruction_id.to_vec(), hash); + parent_keys.insert(instruction_id.to_vec(), parent_key.to_vec()); + return; + } - if let BasicStorageRemoval(removed_bytes) = - storage_costs.value_storage_cost.removed_bytes - { - let (_, value_removed_bytes) = section_removal_bytes(&old_value, 0, removed_bytes)?; - storage_costs.value_storage_cost.removed_bytes = value_removed_bytes; - } + let node = node.unwrap(); + if node.hash().unwrap() != hash { + bad_link_map.insert(instruction_id.to_vec(), hash); + parent_keys.insert(instruction_id.to_vec(), parent_key.to_vec()); + return; } - // Update old tree size after generating value storage_cost cost - tree.old_size_with_parent_to_child_hook = current_tree_plus_hook_size; - tree.old_value = Some(tree.value_ref().clone()); - - let mut buf = Vec::with_capacity(tree_size); - tree.encode_into(&mut buf); - - let left_child_sizes = tree.child_ref_and_sum_size(true); - let right_child_sizes = tree.child_ref_and_sum_size(false); - self.batch.push(( - tree.key().to_vec(), - tree.feature_type().sum_length(), - Some((buf, left_child_sizes, right_child_sizes)), - Some(storage_costs), - )); - Ok(()) + // Need to skip this when restoring a sum tree + if !skip_sum_checks && node.sum().unwrap() != sum { + bad_link_map.insert(instruction_id.to_vec(), hash); + parent_keys.insert(instruction_id.to_vec(), parent_key.to_vec()); + return; + } + + // TODO: check child heights + // all checks passed, recurse + self.verify_tree( + &node, + traversal_instruction, + bad_link_map, + parent_keys, + skip_sum_checks, + grove_version, + ); } +} - fn prune(&self, tree: &Tree) -> (bool, bool) { - // keep N top levels of tree - let prune = (self.height - tree.height()) >= self.levels; - (prune, prune) +fn fetch_node<'db>( + db: &impl StorageContext<'db>, + key: &[u8], + value_defined_cost_fn: Option Option>, + grove_version: &GroveVersion, +) -> Result, Error> { + let bytes = db.get(key).unwrap().map_err(StorageError)?; // TODO: get_pinned ? + if let Some(bytes) = bytes { + Ok(Some( + TreeNode::decode(key.to_vec(), &bytes, value_defined_cost_fn, grove_version) + .map_err(EdError)?, + )) + } else { + Ok(None) } } +// // TODO: get rid of Fetch/source and use GroveDB storage_cost abstraction + #[cfg(test)] mod test { - use grovedb_costs::OperationCost; + use grovedb_path::SubtreePath; use grovedb_storage::{ - rocksdb_storage::{test_utils::TempStorage, PrefixedRocksDbStorageContext, RocksDbStorage}, + rocksdb_storage::{PrefixedRocksDbStorageContext, RocksDbStorage}, RawIterator, Storage, StorageBatch, StorageContext, }; + use grovedb_version::version::GroveVersion; use tempfile::TempDir; - use super::{Merk, MerkSource, RefWalker}; - use crate::{test_utils::*, Op, TreeFeatureType::BasicMerk}; + use super::{Merk, RefWalker}; + use crate::{ + merk::source::MerkSource, test_utils::*, tree::kv::ValueDefinedCostType, Op, + TreeFeatureType::BasicMerkNode, + }; // TODO: Close and then reopen test @@ -1453,103 +775,13 @@ mod test { }) } - #[test] - fn test_reopen_root_hash() { - let tmp_dir = TempDir::new().expect("cannot open tempdir"); - let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) - .expect("cannot open rocksdb storage"); - let test_prefix = [b"ayy"]; - - let batch = StorageBatch::new(); - let mut merk = Merk::open_base( - storage - .get_storage_context(SubtreePath::from(test_prefix.as_ref()), Some(&batch)) - .unwrap(), - false, - ) - .unwrap() - .unwrap(); - - merk.apply::<_, Vec<_>>( - &[(vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerk))], - &[], - None, - ) - .unwrap() - .expect("apply failed"); - - let root_hash = merk.root_hash(); - - storage - .commit_multi_context_batch(batch, None) - .unwrap() - .expect("cannot commit batch"); - - let merk = Merk::open_base( - storage - .get_storage_context(SubtreePath::from(test_prefix.as_ref()), None) - .unwrap(), - false, - ) - .unwrap() - .unwrap(); - assert_eq!(merk.root_hash(), root_hash); - } - - #[test] - fn test_open_fee() { - let storage = TempStorage::new(); - let batch = StorageBatch::new(); - - let merk_fee_context = Merk::open_base( - storage - .get_storage_context(SubtreePath::empty(), Some(&batch)) - .unwrap(), - false, - ); - - // Opening not existing merk should cost only root key seek (except context - // creation) - assert!(matches!( - merk_fee_context.cost(), - OperationCost { seek_count: 1, .. } - )); - - let mut merk = merk_fee_context.unwrap().unwrap(); - merk.apply::<_, Vec<_>>( - &[(vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerk))], - &[], - None, - ) - .unwrap() - .expect("apply failed"); - - storage - .commit_multi_context_batch(batch, None) - .unwrap() - .expect("cannot commit batch"); - - let merk_fee_context = Merk::open_base( - storage - .get_storage_context(SubtreePath::empty(), None) - .unwrap(), - false, - ); - - // Opening existing merk should cost two seeks. (except context creation) - assert!(matches!( - merk_fee_context.cost(), - OperationCost { seek_count: 2, .. } - )); - assert!(merk_fee_context.cost().storage_loaded_bytes > 0); - } - #[test] fn simple_insert_apply() { + let grove_version = GroveVersion::latest(); let batch_size = 20; - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..batch_size); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); @@ -1564,55 +796,70 @@ mod test { } #[test] - fn insert_uncached() { - let batch_size = 20; - let mut merk = TempMerk::new(); + fn tree_height() { + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..1); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(1)); - let batch = make_batch_seq(0..batch_size); - merk.apply::<_, Vec<_>>(&batch, &[], None) + // height 2 + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..2); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); - assert_invariants(&merk); + assert_eq!(merk.height(), Some(2)); - let batch = make_batch_seq(batch_size..(batch_size * 2)); - merk.apply::<_, Vec<_>>(&batch, &[], None) + // height 5 + // 2^5 - 1 = 31 (max number of elements in tree of height 5) + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..31); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); - assert_invariants(&merk); + assert_eq!(merk.height(), Some(5)); + + // should still be height 5 for 29 elements + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..29); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(5)); } #[test] - fn test_has_node_with_empty_tree() { - let mut merk = TempMerk::new(); - - let key = b"something"; - - let result = merk.has_node(key).unwrap().unwrap(); - - assert!(!result); - - let batch_entry = (key, Op::Put(vec![123; 60], BasicMerk)); - - let batch = vec![batch_entry]; + fn insert_uncached() { + let grove_version = GroveVersion::latest(); + let batch_size = 20; + let mut merk = TempMerk::new(grove_version); - merk.apply::<_, Vec<_>>(&batch, &[], None) + let batch = make_batch_seq(0..batch_size); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() - .expect("should ..."); - - let result = merk.has_node(key).unwrap().unwrap(); + .expect("apply failed"); + assert_invariants(&merk); - assert!(result); + let batch = make_batch_seq(batch_size..(batch_size * 2)); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + assert_invariants(&merk); } #[test] fn insert_two() { + let grove_version = GroveVersion::latest(); let tree_size = 2; let batch_size = 1; - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..(tree_size / batch_size) { let batch = make_batch_rand(batch_size, i); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); } @@ -1620,14 +867,15 @@ mod test { #[test] fn insert_rand() { + let grove_version = GroveVersion::latest(); let tree_size = 40; let batch_size = 4; - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..(tree_size / batch_size) { println!("i:{i}"); let batch = make_batch_rand(batch_size, i); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); } @@ -1635,15 +883,16 @@ mod test { #[test] fn actual_deletes() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_rand(10, 1); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); let key = batch.first().unwrap().0.clone(); - merk.apply::<_, Vec<_>>(&[(key.clone(), Op::Delete)], &[], None) + merk.apply::<_, Vec<_>>(&[(key.clone(), Op::Delete)], &[], None, grove_version) .unwrap() .unwrap(); @@ -1653,15 +902,17 @@ mod test { #[test] fn aux_data() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); merk.apply::, _>( &[], - &[(vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerk), None)], + &[(vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerkNode), None)], None, + grove_version, ) .unwrap() .expect("apply failed"); - merk.commit(); + merk.commit(grove_version); let val = merk.get_aux(&[1, 2, 3]).unwrap().unwrap(); assert_eq!(val, Some(vec![4, 5, 6])); @@ -1669,35 +920,70 @@ mod test { #[test] fn get_not_found() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); // no root - assert!(merk.get(&[1, 2, 3], true).unwrap().unwrap().is_none()); + assert!(merk + .get( + &[1, 2, 3], + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + .unwrap() + .unwrap() + .is_none()); // cached - merk.apply::<_, Vec<_>>(&[(vec![5, 5, 5], Op::Put(vec![], BasicMerk))], &[], None) + merk.apply::<_, Vec<_>>( + &[(vec![5, 5, 5], Op::Put(vec![], BasicMerkNode))], + &[], + None, + grove_version, + ) + .unwrap() + .unwrap(); + assert!(merk + .get( + &[1, 2, 3], + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) .unwrap() - .unwrap(); - assert!(merk.get(&[1, 2, 3], true).unwrap().unwrap().is_none()); + .unwrap() + .is_none()); // uncached merk.apply::<_, Vec<_>>( &[ - (vec![0, 0, 0], Op::Put(vec![], BasicMerk)), - (vec![1, 1, 1], Op::Put(vec![], BasicMerk)), - (vec![2, 2, 2], Op::Put(vec![], BasicMerk)), + (vec![0, 0, 0], Op::Put(vec![], BasicMerkNode)), + (vec![1, 1, 1], Op::Put(vec![], BasicMerkNode)), + (vec![2, 2, 2], Op::Put(vec![], BasicMerkNode)), ], &[], None, + grove_version, ) .unwrap() .unwrap(); - assert!(merk.get(&[3, 3, 3], true).unwrap().unwrap().is_none()); + assert!(merk + .get( + &[3, 3, 3], + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + .unwrap() + .unwrap() + .is_none()); } // TODO: what this test should do? #[test] fn reopen_check_root_hash() { + let grove_version = GroveVersion::latest(); let tmp_dir = TempDir::new().expect("cannot open tempdir"); let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); @@ -1706,21 +992,24 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); let batch = make_batch_seq(1..10); - merk.apply::<_, Vec<_>>(batch.as_slice(), &[], None) + merk.apply::<_, Vec<_>>(batch.as_slice(), &[], None, grove_version) .unwrap() .unwrap(); let batch = make_batch_seq(11..12); - merk.apply::<_, Vec<_>>(batch.as_slice(), &[], None) + merk.apply::<_, Vec<_>>(batch.as_slice(), &[], None, grove_version) .unwrap() .unwrap(); } #[test] fn test_get_node_cost() { + let grove_version = GroveVersion::latest(); let tmp_dir = TempDir::new().expect("cannot open tempdir"); let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); @@ -1729,11 +1018,13 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); let batch = make_batch_seq(1..10); - merk.apply::<_, Vec<_>>(batch.as_slice(), &[], None) + merk.apply::<_, Vec<_>>(batch.as_slice(), &[], None, grove_version) .unwrap() .unwrap(); drop(merk); @@ -1741,15 +1032,33 @@ mod test { #[test] fn reopen() { + let grove_version = GroveVersion::latest(); fn collect( mut node: RefWalker>, nodes: &mut Vec>, ) { + let grove_version = GroveVersion::latest(); nodes.push(node.tree().encode()); - if let Some(c) = node.walk(true).unwrap().unwrap() { + if let Some(c) = node + .walk( + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .unwrap() + { collect(c, nodes); } - if let Some(c) = node.walk(false).unwrap().unwrap() { + if let Some(c) = node + .walk( + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .unwrap() + { collect(c, nodes); } } @@ -1765,11 +1074,13 @@ mod test { .get_storage_context(SubtreePath::empty(), Some(&batch)) .unwrap(), false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); let merk_batch = make_batch_seq(1..10_000); - merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None) + merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None, grove_version) .unwrap() .unwrap(); @@ -1782,6 +1093,8 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); @@ -1801,6 +1114,8 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); @@ -1818,10 +1133,8 @@ mod test { #[test] fn reopen_iter() { - fn collect<'db, 'ctx>( - iter: PrefixedStorageIter<'db, 'ctx>, - nodes: &mut Vec<(Vec, Vec)>, - ) { + let grove_version = GroveVersion::latest(); + fn collect(iter: PrefixedStorageIter<'_, '_>, nodes: &mut Vec<(Vec, Vec)>) { while iter.valid().unwrap() { nodes.push(( iter.key().unwrap().unwrap().to_vec(), @@ -1841,11 +1154,13 @@ mod test { .get_storage_context(SubtreePath::empty(), Some(&batch)) .unwrap(), false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); let merk_batch = make_batch_seq(1..10_000); - merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None) + merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None, grove_version) .unwrap() .unwrap(); @@ -1860,6 +1175,8 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); @@ -1873,6 +1190,8 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); @@ -1885,6 +1204,7 @@ mod test { #[test] fn update_node() { + let grove_version = GroveVersion::latest(); let tmp_dir = TempDir::new().expect("cannot open tempdir"); let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); @@ -1894,41 +1214,56 @@ mod test { .get_storage_context(SubtreePath::empty(), Some(&batch)) .unwrap(), false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); merk.apply::<_, Vec<_>>( - &[(b"9".to_vec(), Op::Put(b"a".to_vec(), BasicMerk))], + &[(b"9".to_vec(), Op::Put(b"a".to_vec(), BasicMerkNode))], &[], None, + grove_version, ) .unwrap() .expect("should insert successfully"); merk.apply::<_, Vec<_>>( - &[(b"10".to_vec(), Op::Put(b"a".to_vec(), BasicMerk))], + &[(b"10".to_vec(), Op::Put(b"a".to_vec(), BasicMerkNode))], &[], None, + grove_version, ) .unwrap() .expect("should insert successfully"); let result = merk - .get(b"10".as_slice(), true) + .get( + b"10".as_slice(), + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("should get successfully"); assert_eq!(result, Some(b"a".to_vec())); // Update the node merk.apply::<_, Vec<_>>( - &[(b"10".to_vec(), Op::Put(b"b".to_vec(), BasicMerk))], + &[(b"10".to_vec(), Op::Put(b"b".to_vec(), BasicMerkNode))], &[], None, + grove_version, ) .unwrap() .expect("should insert successfully"); let result = merk - .get(b"10".as_slice(), true) + .get( + b"10".as_slice(), + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("should get successfully"); assert_eq!(result, Some(b"b".to_vec())); @@ -1943,20 +1278,28 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); // Update the node after dropping merk merk.apply::<_, Vec<_>>( - &[(b"10".to_vec(), Op::Put(b"c".to_vec(), BasicMerk))], + &[(b"10".to_vec(), Op::Put(b"c".to_vec(), BasicMerkNode))], &[], None, + grove_version, ) .unwrap() .expect("should insert successfully"); let result = merk - .get(b"10".as_slice(), true) + .get( + b"10".as_slice(), + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("should get successfully"); assert_eq!(result, Some(b"c".to_vec())); diff --git a/merk/src/merk/open.rs b/merk/src/merk/open.rs new file mode 100644 index 000000000..c8646afaf --- /dev/null +++ b/merk/src/merk/open.rs @@ -0,0 +1,207 @@ +use std::cell::Cell; + +use grovedb_costs::CostResult; +use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; + +use crate::{ + tree::kv::ValueDefinedCostType, + Error, Merk, MerkType, + MerkType::{BaseMerk, LayeredMerk, StandaloneMerk}, +}; + +impl<'db, S> Merk +where + S: StorageContext<'db>, +{ + /// Open empty tree + pub fn open_empty(storage: S, merk_type: MerkType, is_sum_tree: bool) -> Self { + Self { + tree: Cell::new(None), + root_tree_key: Cell::new(None), + storage, + merk_type, + is_sum_tree, + } + } + + /// Open standalone tree + pub fn open_standalone( + storage: S, + is_sum_tree: bool, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult { + let mut merk = Self { + tree: Cell::new(None), + root_tree_key: Cell::new(None), + storage, + merk_type: StandaloneMerk, + is_sum_tree, + }; + + merk.load_base_root(value_defined_cost_fn, grove_version) + .map_ok(|_| merk) + } + + /// Open base tree + pub fn open_base( + storage: S, + is_sum_tree: bool, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult { + let mut merk = Self { + tree: Cell::new(None), + root_tree_key: Cell::new(None), + storage, + merk_type: BaseMerk, + is_sum_tree, + }; + + merk.load_base_root(value_defined_cost_fn, grove_version) + .map_ok(|_| merk) + } + + /// Open layered tree with root key + pub fn open_layered_with_root_key( + storage: S, + root_key: Option>, + is_sum_tree: bool, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult { + let mut merk = Self { + tree: Cell::new(None), + root_tree_key: Cell::new(root_key), + storage, + merk_type: LayeredMerk, + is_sum_tree, + }; + + merk.load_root(value_defined_cost_fn, grove_version) + .map_ok(|_| merk) + } +} + +#[cfg(test)] +mod test { + use grovedb_costs::OperationCost; + use grovedb_path::SubtreePath; + use grovedb_storage::{ + rocksdb_storage::{test_utils::TempStorage, RocksDbStorage}, + Storage, StorageBatch, + }; + use grovedb_version::version::GroveVersion; + use tempfile::TempDir; + + use crate::{tree::kv::ValueDefinedCostType, Merk, Op, TreeFeatureType::BasicMerkNode}; + + #[test] + fn test_reopen_root_hash() { + let grove_version = GroveVersion::latest(); + let tmp_dir = TempDir::new().expect("cannot open tempdir"); + let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) + .expect("cannot open rocksdb storage"); + let test_prefix = [b"ayy"]; + + let batch = StorageBatch::new(); + let mut merk = Merk::open_base( + storage + .get_storage_context(SubtreePath::from(test_prefix.as_ref()), Some(&batch)) + .unwrap(), + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .unwrap(); + + merk.apply::<_, Vec<_>>( + &[(vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerkNode))], + &[], + None, + grove_version, + ) + .unwrap() + .expect("apply failed"); + + let root_hash = merk.root_hash(); + + storage + .commit_multi_context_batch(batch, None) + .unwrap() + .expect("cannot commit batch"); + + let merk = Merk::open_base( + storage + .get_storage_context(SubtreePath::from(test_prefix.as_ref()), None) + .unwrap(), + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .unwrap(); + assert_eq!(merk.root_hash(), root_hash); + } + + #[test] + fn test_open_fee() { + let grove_version = GroveVersion::latest(); + let storage = TempStorage::new(); + let batch = StorageBatch::new(); + + let merk_fee_context = Merk::open_base( + storage + .get_storage_context(SubtreePath::empty(), Some(&batch)) + .unwrap(), + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ); + // Opening not existing merk should cost only root key seek (except context + // creation) + assert!(matches!( + merk_fee_context.cost(), + OperationCost { seek_count: 1, .. } + )); + + let mut merk = merk_fee_context.unwrap().unwrap(); + merk.apply::<_, Vec<_>>( + &[(vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerkNode))], + &[], + None, + grove_version, + ) + .unwrap() + .expect("apply failed"); + + storage + .commit_multi_context_batch(batch, None) + .unwrap() + .expect("cannot commit batch"); + + let merk_fee_context = Merk::open_base( + storage + .get_storage_context(SubtreePath::empty(), None) + .unwrap(), + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ); + + // Opening existing merk should cost two seeks. (except context creation) + assert!(matches!( + merk_fee_context.cost(), + OperationCost { seek_count: 2, .. } + )); + assert!(merk_fee_context.cost().storage_loaded_bytes > 0); + } +} diff --git a/merk/src/merk/prove.rs b/merk/src/merk/prove.rs new file mode 100644 index 000000000..a92f28fb0 --- /dev/null +++ b/merk/src/merk/prove.rs @@ -0,0 +1,174 @@ +use std::collections::LinkedList; + +use grovedb_costs::{CostResult, CostsExt}; +use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; + +use crate::{ + proofs::{encode_into, query::QueryItem, Op as ProofOp, Query}, + tree::RefWalker, + Error, Merk, +}; + +impl<'db, S> Merk +where + S: StorageContext<'db>, +{ + /// Creates a Merkle proof for the list of queried keys. For each key in the + /// query, if the key is found in the store then the value will be proven to + /// be in the tree. For each key in the query that does not exist in the + /// tree, its absence will be proven by including boundary keys. + /// + /// The proof returned is in an encoded format which can be verified with + /// `merk::verify`. + /// + /// This will fail if the keys in `query` are not sorted and unique. This + /// check adds some overhead, so if you are sure your batch is sorted and + /// unique you can use the unsafe `prove_unchecked` for a small performance + /// gain. + pub fn prove( + &self, + query: Query, + limit: Option, + grove_version: &GroveVersion, + ) -> CostResult { + let left_to_right = query.left_to_right; + self.prove_unchecked(query, limit, left_to_right, grove_version) + .map_ok(|(proof, limit)| { + let mut bytes = Vec::with_capacity(128); + encode_into(proof.iter(), &mut bytes); + ProofConstructionResult::new(bytes, limit) + }) + } + + /// Creates a Merkle proof for the list of queried keys. For each key in the + /// query, if the key is found in the store then the value will be proven to + /// be in the tree. For each key in the query that does not exist in the + /// tree, its absence will be proven by including boundary keys. + /// + /// The proof returned is in an intermediate format to be later encoded + /// + /// This will fail if the keys in `query` are not sorted and unique. This + /// check adds some overhead, so if you are sure your batch is sorted and + /// unique you can use the unsafe `prove_unchecked` for a small performance + /// gain. + pub fn prove_without_encoding( + &self, + query: Query, + limit: Option, + grove_version: &GroveVersion, + ) -> CostResult { + let left_to_right = query.left_to_right; + self.prove_unchecked(query, limit, left_to_right, grove_version) + .map_ok(|(proof, limit)| ProofWithoutEncodingResult::new(proof, limit)) + } + + /// Creates a Merkle proof for the list of queried keys. For each key in + /// the query, if the key is found in the store then the value will be + /// proven to be in the tree. For each key in the query that does not + /// exist in the tree, its absence will be proven by including + /// boundary keys. + /// The proof returned is in an encoded format which can be verified with + /// `merk::verify`. + /// + /// This is unsafe because the keys in `query` must be sorted and unique - + /// if they are not, there will be undefined behavior. For a safe version + /// of this method which checks to ensure the batch is sorted and + /// unique, see `prove`. + pub fn prove_unchecked( + &self, + query: I, + limit: Option, + left_to_right: bool, + grove_version: &GroveVersion, + ) -> CostResult + where + Q: Into, + I: IntoIterator, + { + let query_vec: Vec = query.into_iter().map(Into::into).collect(); + + self.use_tree_mut(|maybe_tree| { + maybe_tree + .ok_or(Error::CorruptedCodeExecution( + "Cannot create proof for empty tree", + )) + .wrap_with_cost(Default::default()) + .flat_map_ok(|tree| { + let mut ref_walker = RefWalker::new(tree, self.source()); + ref_walker.create_proof( + query_vec.as_slice(), + limit, + left_to_right, + grove_version, + ) + }) + .map_ok(|(proof, _, limit, ..)| (proof, limit)) + }) + } + + /// Creates a Merkle proof for the list of queried keys. For each key in + /// the query, if the key is found in the store then the value will be + /// proven to be in the tree. For each key in the query that does not + /// exist in the tree, its absence will be proven by including + /// boundary keys. + /// The proof returned is in an encoded format which can be verified with + /// `merk::verify`. + /// + /// This is unsafe because the keys in `query` must be sorted and unique - + /// if they are not, there will be undefined behavior. For a safe version + /// of this method which checks to ensure the batch is sorted and + /// unique, see `prove`. + pub fn prove_unchecked_query_items( + &self, + query_items: &[QueryItem], + limit: Option, + left_to_right: bool, + grove_version: &GroveVersion, + ) -> CostResult { + self.use_tree_mut(|maybe_tree| { + maybe_tree + .ok_or(Error::CorruptedCodeExecution( + "Cannot create proof for empty tree", + )) + .wrap_with_cost(Default::default()) + .flat_map_ok(|tree| { + let mut ref_walker = RefWalker::new(tree, self.source()); + ref_walker.create_proof(query_items, limit, left_to_right, grove_version) + }) + .map_ok(|(proof, _, limit, ..)| (proof, limit)) + }) + } +} + +type Proof = (LinkedList, Option); + +/// Proof construction result +pub struct ProofConstructionResult { + /// Proof + pub proof: Vec, + /// Limit + pub limit: Option, +} + +impl ProofConstructionResult { + /// New ProofConstructionResult + pub fn new(proof: Vec, limit: Option) -> Self { + Self { proof, limit } + } +} + +/// Proof without encoding result +pub struct ProofWithoutEncodingResult { + /// Proof + pub proof: LinkedList, + /// Limit + pub limit: Option, +} + +impl ProofWithoutEncodingResult { + /// New ProofWithoutEncodingResult + pub fn new(proof: LinkedList, limit: Option) -> Self { + Self { proof, limit } + } +} diff --git a/merk/src/merk/restore.rs b/merk/src/merk/restore.rs index 23cef7038..1082e80b8 100644 --- a/merk/src/merk/restore.rs +++ b/merk/src/merk/restore.rs @@ -29,250 +29,298 @@ //! Provides `Restorer`, which can create a replica of a Merk instance by //! receiving chunk proofs. -#[cfg(feature = "full")] -use std::{iter::Peekable, u8}; +use std::collections::BTreeMap; -#[cfg(feature = "full")] use grovedb_storage::{Batch, StorageContext}; +use grovedb_version::version::GroveVersion; -#[cfg(feature = "full")] -use super::Merk; -#[cfg(feature = "full")] use crate::{ - error::Error, + merk, merk::MerkSource, proofs::{ - chunk::{verify_leaf, verify_trunk, MIN_TRUNK_HEIGHT}, - tree::{Child, Tree as ProofTree}, + chunk::{ + chunk::{LEFT, RIGHT}, + chunk_op::ChunkOp, + error::{ChunkError, ChunkError::InternalError}, + util::{traversal_instruction_as_vec_bytes, vec_bytes_as_traversal_instruction}, + }, + tree::{execute, Child, Tree as ProofTree}, Node, Op, }, - tree::{combine_hash, value_hash, Link, RefWalker, Tree}, - CryptoHash, - Error::{CostsError, EdError, StorageError}, - TreeFeatureType::BasicMerk, + tree::{combine_hash, kv::ValueDefinedCostType, RefWalker, TreeNode}, + CryptoHash, Error, + Error::{CostsError, StorageError}, + Link, Merk, }; -#[cfg(feature = "full")] -/// A `Restorer` handles decoding, verifying, and storing chunk proofs to -/// replicate an entire Merk tree. It expects the chunks to be processed in -/// order, retrying the last chunk if verification fails. +/// Restorer handles verification of chunks and replication of Merk trees. +/// Chunks can be processed randomly as long as their parent has been processed +/// already. pub struct Restorer { - leaf_hashes: Option>>, - parent_keys: Option>>>, - trunk_height: Option, merk: Merk, - expected_root_hash: CryptoHash, - combining_value: Option>, + chunk_id_to_root_hash: BTreeMap, CryptoHash>, + parent_key_value_hash: Option, + // this is used to keep track of parents whose links need to be rewritten + parent_keys: BTreeMap, Vec>, } -#[cfg(feature = "full")] impl<'db, S: StorageContext<'db>> Restorer { - /// Creates a new `Restorer`, which will initialize a new Merk at the given - /// file path. The first chunk (the "trunk") will be compared against - /// `expected_root_hash`, then each subsequent chunk will be compared - /// against the hashes stored in the trunk, so that the restore process will - /// never allow malicious peers to send more than a single invalid chunk. + /// Initializes a new chunk restorer with the expected root hash for the + /// first chunk pub fn new( merk: Merk, - combining_value: Option>, expected_root_hash: CryptoHash, + parent_key_value_hash: Option, ) -> Self { + let mut chunk_id_to_root_hash = BTreeMap::new(); + chunk_id_to_root_hash.insert(traversal_instruction_as_vec_bytes(&[]), expected_root_hash); Self { - expected_root_hash, - combining_value, - trunk_height: None, merk, - leaf_hashes: None, - parent_keys: None, + chunk_id_to_root_hash, + parent_key_value_hash, + parent_keys: BTreeMap::new(), } } - /// Verifies a chunk and writes it to the working RocksDB instance. Expects - /// to be called for each chunk in order. Returns the number of remaining - /// chunks. - /// - /// Once there are no remaining chunks to be processed, `finalize` should - /// be called. - pub fn process_chunk(&mut self, ops: impl IntoIterator) -> Result { - match self.leaf_hashes { - None => self.process_trunk(ops), - Some(_) => self.process_leaf(ops), + /// Processes a chunk at some chunk id, returns the chunks id's of chunks + /// that can be requested + pub fn process_chunk( + &mut self, + chunk_id: &[u8], + chunk: Vec, + grove_version: &GroveVersion, + ) -> Result>, Error> { + let expected_root_hash = self + .chunk_id_to_root_hash + .get(chunk_id) + .ok_or(Error::ChunkRestoringError(ChunkError::UnexpectedChunk))?; + + let mut parent_key_value_hash: Option = None; + if chunk_id.is_empty() { + parent_key_value_hash = self.parent_key_value_hash; } - } + let chunk_tree = Self::verify_chunk(chunk, expected_root_hash, &parent_key_value_hash)?; - /// Consumes the `Restorer` and returns the newly-created, fully-populated - /// Merk instance. This method will return an error if called before - /// processing all chunks (e.g. `restorer.remaining_chunks()` is not equal - /// to 0). - pub fn finalize(mut self) -> Result, Error> { - if self.remaining_chunks().unwrap_or(0) != 0 { - return Err(Error::ChunkRestoringError( - "Called finalize before all chunks were processed".to_string(), - )); - } + let mut root_traversal_instruction = vec_bytes_as_traversal_instruction(chunk_id)?; - if self.trunk_height.unwrap() >= MIN_TRUNK_HEIGHT { - self.rewrite_trunk_child_heights()?; + if root_traversal_instruction.is_empty() { + let _ = self.merk.set_base_root_key(Some(chunk_tree.key().to_vec())); + } else { + // every non root chunk has some associated parent with an placeholder link + // here we update the placeholder link to represent the true data + self.rewrite_parent_link( + chunk_id, + &root_traversal_instruction, + &chunk_tree, + grove_version, + )?; } - self.merk.load_base_root().unwrap()?; + // next up, we need to write the chunk and build the map again + let chunk_write_result = self.write_chunk(chunk_tree, &mut root_traversal_instruction); + if chunk_write_result.is_ok() { + // if we were able to successfully write the chunk, we can remove + // the chunk expected root hash from our chunk id map + self.chunk_id_to_root_hash.remove(chunk_id); + } - Ok(self.merk) + chunk_write_result } - /// Returns the number of remaining chunks to be processed. If called before - /// the first chunk is processed, this method will return `None` since we do - /// not yet have enough information to know about the number of chunks. - pub fn remaining_chunks(&self) -> Option { - self.leaf_hashes.as_ref().map(|lh| lh.len()) + /// Process multi chunks (space optimized chunk proofs that can contain + /// multiple singular chunks) + pub fn process_multi_chunk( + &mut self, + multi_chunk: Vec, + grove_version: &GroveVersion, + ) -> Result>, Error> { + let mut expect_chunk_id = true; + let mut chunk_ids = vec![]; + let mut current_chunk_id = vec![]; + + for chunk_op in multi_chunk { + if (matches!(chunk_op, ChunkOp::ChunkId(..)) && !expect_chunk_id) + || (matches!(chunk_op, ChunkOp::Chunk(..)) && expect_chunk_id) + { + return Err(Error::ChunkRestoringError(ChunkError::InvalidMultiChunk( + "invalid multi chunk ordering", + ))); + } + match chunk_op { + ChunkOp::ChunkId(instructions) => { + current_chunk_id = traversal_instruction_as_vec_bytes(&instructions); + } + ChunkOp::Chunk(chunk) => { + // TODO: remove clone + let next_chunk_ids = + self.process_chunk(¤t_chunk_id, chunk, grove_version)?; + chunk_ids.extend(next_chunk_ids); + } + } + expect_chunk_id = !expect_chunk_id; + } + Ok(chunk_ids) } - /// Writes the data contained in `tree` (extracted from a verified chunk - /// proof) to the RocksDB. - fn write_chunk(&mut self, tree: ProofTree) -> Result<(), Error> { - let mut batch = self.merk.storage.new_batch(); - - tree.visit_refs(&mut |proof_node| { - if let Some((mut node, key)) = match &proof_node.node { - Node::KV(key, value) => Some(( - Tree::new(key.clone(), value.clone(), None, BasicMerk).unwrap(), - key, - )), - Node::KVValueHash(key, value, value_hash) => Some(( - Tree::new_with_value_hash(key.clone(), value.clone(), *value_hash, BasicMerk) - .unwrap(), - key, - )), - Node::KVValueHashFeatureType(key, value, value_hash, feature_type) => Some(( - Tree::new_with_value_hash( - key.clone(), - value.clone(), - *value_hash, - *feature_type, - ) - .unwrap(), - key, - )), - _ => None, - } { - // TODO: encode tree node without cloning key/value - *node.slot_mut(true) = proof_node.left.as_ref().map(Child::as_link); - *node.slot_mut(false) = proof_node.right.as_ref().map(Child::as_link); - - let bytes = node.encode(); - batch.put(key, &bytes, None, None).map_err(CostsError) - } else { + /// Verifies the structure of a chunk and ensures the chunk matches the + /// expected root hash + fn verify_chunk( + chunk: Vec, + expected_root_hash: &CryptoHash, + parent_key_value_hash_opt: &Option, + ) -> Result { + let chunk_len = chunk.len(); + let mut kv_count = 0; + let mut hash_count = 0; + + // build tree from ops + // ensure only made of KvValueFeatureType and Hash nodes and count them + let tree = execute(chunk.clone().into_iter().map(Ok), false, |node| { + if matches!(node, Node::KVValueHashFeatureType(..)) { + kv_count += 1; Ok(()) + } else if matches!(node, Node::Hash(..)) { + hash_count += 1; + Ok(()) + } else { + Err(Error::ChunkRestoringError(ChunkError::InvalidChunkProof( + "expected chunk proof to contain only kvvaluefeaturetype or hash nodes", + ))) + } + }) + .unwrap()?; + + // chunk len must be exactly equal to the kv_count + hash_count + + // parent_branch_count + child_branch_count + debug_assert_eq!(chunk_len, ((kv_count + hash_count) * 2) - 1); + + // chunk structure verified, next verify root hash + match parent_key_value_hash_opt { + Some(val_hash) => { + let combined_hash = combine_hash(val_hash, &tree.hash().unwrap()).unwrap(); + if &combined_hash != expected_root_hash { + return Err(Error::ChunkRestoringError(ChunkError::InvalidChunkProof( + "chunk doesn't match expected root hash", + ))); + } } - })?; + None => { + if &tree.hash().unwrap() != expected_root_hash { + return Err(Error::ChunkRestoringError(ChunkError::InvalidChunkProof( + "chunk doesn't match expected root hash", + ))); + } + } + }; + Ok(tree) + } + + /// Write the verified chunk to storage + fn write_chunk( + &mut self, + chunk_tree: ProofTree, + traversal_instruction: &mut Vec, + ) -> Result>, Error> { + // this contains all the elements we want to write to storage + let mut batch = self.merk.storage.new_batch(); + let mut new_chunk_ids = Vec::new(); + + chunk_tree.visit_refs_track_traversal_and_parent( + traversal_instruction, + None, + &mut |proof_node, node_traversal_instruction, parent_key| { + match &proof_node.node { + Node::KVValueHashFeatureType(key, value, value_hash, feature_type) => { + // build tree from node value + let mut tree = TreeNode::new_with_value_hash( + key.clone(), + value.clone(), + *value_hash, + *feature_type, + ) + .unwrap(); + + // update tree links + *tree.slot_mut(LEFT) = proof_node.left.as_ref().map(Child::as_link); + *tree.slot_mut(RIGHT) = proof_node.right.as_ref().map(Child::as_link); + + // encode the node and add it to the batch + let bytes = tree.encode(); + + batch.put(key, &bytes, None, None).map_err(CostsError) + } + Node::Hash(hash) => { + // the node hash points to the root of another chunk + // we get the chunk id and add the hash to restorer state + let chunk_id = + traversal_instruction_as_vec_bytes(node_traversal_instruction); + new_chunk_ids.push(chunk_id.to_vec()); + self.chunk_id_to_root_hash.insert(chunk_id.to_vec(), *hash); + // TODO: handle unwrap + self.parent_keys + .insert(chunk_id, parent_key.unwrap().to_owned()); + Ok(()) + } + _ => { + // we do nothing for other node types + // technically verify chunk will be called before this + // as such this should be be reached + Ok(()) + } + } + }, + )?; + + // write the batch self.merk .storage .commit_batch(batch) .unwrap() - .map_err(StorageError) - } - - /// Verifies the trunk then writes its data to the RocksDB. - fn process_trunk(&mut self, ops: impl IntoIterator) -> Result { - let (trunk, height) = verify_trunk(ops.into_iter().map(Ok)).unwrap()?; - - let root_hash = if self.combining_value.is_none() { - trunk.hash().unwrap() - } else { - combine_hash( - value_hash(self.combining_value.as_ref().expect("confirmed exists")).value(), - &trunk.hash().unwrap(), - ) - .value - }; - - if root_hash != self.expected_root_hash { - return Err(Error::ChunkRestoringError(format!( - "Proof did not match expected hash\n\tExpected: {:?}\n\tActual: {:?}", - self.expected_root_hash, - trunk.hash() - ))); - } - - let root_key = trunk.key().to_vec(); - - let trunk_height = height / 2; - self.trunk_height = Some(trunk_height); - - let chunks_remaining = if trunk_height >= MIN_TRUNK_HEIGHT { - let leaf_hashes = trunk - .layer(trunk_height) - .map(|node| node.hash().unwrap()) - .collect::>() - .into_iter() - .peekable(); - self.leaf_hashes = Some(leaf_hashes); - - let parent_keys = trunk - .layer(trunk_height - 1) - .map(|node| node.key().to_vec()) - .collect::>>() - .into_iter() - .peekable(); - self.parent_keys = Some(parent_keys); - assert_eq!( - self.parent_keys.as_ref().unwrap().len(), - self.leaf_hashes.as_ref().unwrap().len() / 2 - ); - - let chunks_remaining = (2_usize).pow(trunk_height as u32); - assert_eq!(self.remaining_chunks_unchecked(), chunks_remaining); - chunks_remaining - } else { - self.leaf_hashes = Some(vec![].into_iter().peekable()); - self.parent_keys = Some(vec![].into_iter().peekable()); - 0 - }; - - // note that these writes don't happen atomically, which is fine here - // because if anything fails during the restore process we will just - // scrap the whole restore and start over - self.write_chunk(trunk)?; - self.merk.set_base_root_key(Some(root_key)).unwrap()?; - - Ok(chunks_remaining) - } - - /// Verifies a leaf chunk then writes it to the RocksDB. This needs to be - /// called in order, retrying the last chunk for any failed verifications. - fn process_leaf(&mut self, ops: impl IntoIterator) -> Result { - let leaf_hashes = self.leaf_hashes.as_mut().unwrap(); - let leaf_hash = leaf_hashes - .peek() - .expect("Received more chunks than expected"); - - let leaf = verify_leaf(ops.into_iter().map(Ok), *leaf_hash).unwrap()?; - self.rewrite_parent_link(&leaf)?; - self.write_chunk(leaf)?; - - let leaf_hashes = self.leaf_hashes.as_mut().unwrap(); - leaf_hashes.next(); + .map_err(StorageError)?; - Ok(self.remaining_chunks_unchecked()) + Ok(new_chunk_ids) } - /// The parent of the root node of the leaf does not know the key of its - /// children when it is first written. Now that we have verified this leaf, - /// we can write the key into the parent node's entry. Note that this does - /// not need to recalcuate hashes since it already had the child hash. - fn rewrite_parent_link(&mut self, leaf: &ProofTree) -> Result<(), Error> { - let parent_keys = self.parent_keys.as_mut().unwrap(); - let parent_key = parent_keys.peek().unwrap().clone(); - let mut parent = crate::merk::fetch_node(&self.merk.storage, parent_key.as_slice())? - .expect("Could not find parent of leaf chunk"); - - let is_left_child = self.remaining_chunks_unchecked() % 2 == 0; - if let Some(Link::Reference { ref mut key, .. }) = parent.link_mut(is_left_child) { - *key = leaf.key().to_vec(); - } else { - panic!("Expected parent links to be type Link::Reference"); - }; + /// When we process truncated chunks, the parents of Node::Hash have invalid + /// placeholder for links. + /// When we get the actual chunk associated with the Node::Hash, + /// we need to update the parent link to reflect the correct data. + fn rewrite_parent_link( + &mut self, + chunk_id: &[u8], + traversal_instruction: &[bool], + chunk_tree: &ProofTree, + grove_version: &GroveVersion, + ) -> Result<(), Error> { + let parent_key = self + .parent_keys + .get(chunk_id) + .ok_or(Error::ChunkRestoringError(InternalError( + "after successful chunk verification parent key should exist", + )))?; + + let mut parent = merk::fetch_node( + &self.merk.storage, + parent_key.as_slice(), + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + )? + .ok_or(Error::ChunkRestoringError(InternalError( + "cannot find expected parent in memory, most likely state corruption issue", + )))?; + + let is_left = traversal_instruction + .last() + .expect("rewrite is only called when traversal_instruction is not empty"); + + let updated_key = chunk_tree.key(); + let updated_sum = chunk_tree.sum(); + + if let Some(Link::Reference { key, sum, .. }) = parent.link_mut(*is_left) { + *key = updated_key.to_vec(); + *sum = updated_sum; + } let parent_bytes = parent.encode(); self.merk @@ -281,56 +329,77 @@ impl<'db, S: StorageContext<'db>> Restorer { .unwrap() .map_err(StorageError)?; - if !is_left_child { - let parent_keys = self.parent_keys.as_mut().unwrap(); - parent_keys.next(); - } + self.parent_keys + .remove(chunk_id) + .expect("confirmed parent key exists above"); Ok(()) } - fn rewrite_trunk_child_heights(&mut self) -> Result<(), Error> { - fn recurse<'s, 'db, S: StorageContext<'db>>( - mut node: RefWalker>, - remaining_depth: usize, + /// Each nodes height is not added to state as such the producer could lie + /// about the height values after replication we need to verify the + /// heights and if invalid recompute the correct values + fn rewrite_heights(&mut self, grove_version: &GroveVersion) -> Result<(), Error> { + fn rewrite_child_heights<'s, 'db, S: StorageContext<'db>>( + mut walker: RefWalker>, batch: &mut >::Batch, + grove_version: &GroveVersion, ) -> Result<(u8, u8), Error> { - if remaining_depth == 0 { - return Ok(node.tree().child_heights()); - } - - let mut cloned_node = - Tree::decode(node.tree().key().to_vec(), node.tree().encode().as_slice()) - .map_err(EdError)?; + // TODO: remove unwrap + let mut cloned_node = TreeNode::decode( + walker.tree().key().to_vec(), + walker.tree().encode().as_slice(), + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap(); - let left_child = node.walk(true).unwrap()?.unwrap(); - let left_child_heights = recurse(left_child, remaining_depth - 1, batch)?; - let left_height = left_child_heights.0.max(left_child_heights.1) + 1; - *cloned_node.link_mut(true).unwrap().child_heights_mut() = left_child_heights; + let mut left_height = 0; + let mut right_height = 0; + + if let Some(left_walker) = walker + .walk( + LEFT, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap()? + { + let left_child_heights = rewrite_child_heights(left_walker, batch, grove_version)?; + left_height = left_child_heights.0.max(left_child_heights.1) + 1; + *cloned_node.link_mut(LEFT).unwrap().child_heights_mut() = left_child_heights; + } - let right_child = node.walk(false).unwrap()?.unwrap(); - let right_child_heights = recurse(right_child, remaining_depth - 1, batch)?; - let right_height = right_child_heights.0.max(right_child_heights.1) + 1; - *cloned_node.link_mut(false).unwrap().child_heights_mut() = right_child_heights; + if let Some(right_walker) = walker + .walk( + RIGHT, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap()? + { + let right_child_heights = + rewrite_child_heights(right_walker, batch, grove_version)?; + right_height = right_child_heights.0.max(right_child_heights.1) + 1; + *cloned_node.link_mut(RIGHT).unwrap().child_heights_mut() = right_child_heights; + } let bytes = cloned_node.encode(); batch - .put(node.tree().key(), &bytes, None, None) + .put(walker.tree().key(), &bytes, None, None) .map_err(CostsError)?; Ok((left_height, right_height)) } - self.merk.load_base_root().unwrap()?; - let mut batch = self.merk.storage.new_batch(); + // TODO: deal with unwrap + let mut tree = self.merk.tree.take().unwrap(); + let walker = RefWalker::new(&mut tree, self.merk.source()); + + rewrite_child_heights(walker, &mut batch, grove_version)?; - let depth = self.trunk_height.unwrap(); - self.merk.use_tree_mut(|maybe_tree| { - let tree = maybe_tree.unwrap(); - let walker = RefWalker::new(tree, self.merk.source()); - recurse(walker, depth, &mut batch) - })?; + self.merk.tree.set(Some(tree)); self.merk .storage @@ -339,148 +408,482 @@ impl<'db, S: StorageContext<'db>> Restorer { .map_err(StorageError) } - /// Returns the number of remaining chunks to be processed. This method will - /// panic if called before processing the first chunk (since that chunk - /// gives us the information to know how many chunks to expect). - pub fn remaining_chunks_unchecked(&self) -> usize { - self.leaf_hashes.as_ref().unwrap().len() - } -} + /// Rebuild restoration state from partial storage state + fn attempt_state_recovery(&mut self, grove_version: &GroveVersion) -> Result<(), Error> { + // TODO: think about the return type some more + let (bad_link_map, parent_keys) = self.merk.verify(false, grove_version); + if !bad_link_map.is_empty() { + self.chunk_id_to_root_hash = bad_link_map; + self.parent_keys = parent_keys; + } -#[cfg(feature = "full")] -impl<'db, S: StorageContext<'db>> Merk { - /// Creates a new `Restorer`, which can be used to verify chunk proofs to - /// replicate an entire Merk tree. A new Merk instance will be initialized - /// by creating a RocksDB at `path`. - pub fn restore(merk: Merk, expected_root_hash: CryptoHash) -> Restorer { - Restorer::new(merk, None, expected_root_hash) + Ok(()) } -} -#[cfg(feature = "full")] -impl ProofTree { - fn child_heights(&self) -> (u8, u8) { - ( - self.left.as_ref().map_or(0, |c| c.tree.height as u8), - self.right.as_ref().map_or(0, |c| c.tree.height as u8), - ) + /// Consumes the `Restorer` and returns a newly created, fully populated + /// Merk instance. This method will return an error if called before + /// processing all chunks. + pub fn finalize(mut self, grove_version: &GroveVersion) -> Result, Error> { + // ensure all chunks have been processed + if !self.chunk_id_to_root_hash.is_empty() || !self.parent_keys.is_empty() { + return Err(Error::ChunkRestoringError( + ChunkError::RestorationNotComplete, + )); + } + + // get the latest version of the root node + let _ = self.merk.load_base_root( + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ); + + // if height values are wrong, rewrite height + if self.verify_height(grove_version).is_err() { + let _ = self.rewrite_heights(grove_version); + // update the root node after height rewrite + let _ = self.merk.load_base_root( + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ); + } + + if !self + .merk + .verify(self.merk.is_sum_tree, grove_version) + .0 + .is_empty() + { + return Err(Error::ChunkRestoringError(ChunkError::InternalError( + "restored tree invalid", + ))); + } + + Ok(self.merk) } -} -#[cfg(feature = "full")] -impl Child { - fn as_link(&self) -> Link { - let key = match &self.tree.node { - Node::KV(key, _) - | Node::KVValueHash(key, ..) - | Node::KVValueHashFeatureType(key, ..) => key.as_slice(), - // for the connection between the trunk and leaf chunks, we don't - // have the child key so we must first write in an empty one. once - // the leaf gets verified, we can write in this key to its parent - _ => &[], + /// Verify that the child heights of the merk tree links correctly represent + /// the tree + fn verify_height(&self, grove_version: &GroveVersion) -> Result<(), Error> { + let tree = self.merk.tree.take(); + let height_verification_result = if let Some(tree) = &tree { + self.verify_tree_height(tree, tree.height(), grove_version) + } else { + Ok(()) }; + self.merk.tree.set(tree); + height_verification_result + } + + fn verify_tree_height( + &self, + tree: &TreeNode, + parent_height: u8, + grove_version: &GroveVersion, + ) -> Result<(), Error> { + let (left_height, right_height) = tree.child_heights(); + + if (left_height.abs_diff(right_height)) > 1 { + return Err(Error::CorruptedState( + "invalid child heights, difference greater than 1 for AVL tree", + )); + } + + let max_child_height = left_height.max(right_height); + if parent_height <= max_child_height || parent_height - max_child_height != 1 { + return Err(Error::CorruptedState( + "invalid child heights, parent height is not 1 less than max child height", + )); + } + + let left_link = tree.link(LEFT); + let right_link = tree.link(RIGHT); + + if (left_height == 0 && left_link.is_some()) || (right_height == 0 && right_link.is_some()) + { + return Err(Error::CorruptedState( + "invalid child heights node has child height 0, but hash child", + )); + } - Link::Reference { - hash: self.hash, - sum: None, - child_heights: self.tree.child_heights(), - key: key.to_vec(), + if let Some(link) = left_link { + let left_tree = link.tree(); + if left_tree.is_none() { + let left_tree = TreeNode::get( + &self.merk.storage, + link.key(), + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap()? + .ok_or(Error::CorruptedState("link points to non-existent node"))?; + self.verify_tree_height(&left_tree, left_height, grove_version)?; + } else { + self.verify_tree_height(left_tree.unwrap(), left_height, grove_version)?; + } + } + + if let Some(link) = right_link { + let right_tree = link.tree(); + if right_tree.is_none() { + let right_tree = TreeNode::get( + &self.merk.storage, + link.key(), + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap()? + .ok_or(Error::CorruptedState("link points to non-existent node"))?; + self.verify_tree_height(&right_tree, right_height, grove_version)?; + } else { + self.verify_tree_height(right_tree.unwrap(), right_height, grove_version)?; + } } + + Ok(()) } } -#[cfg(feature = "full")] #[cfg(test)] mod tests { use grovedb_path::SubtreePath; use grovedb_storage::{ - rocksdb_storage::{test_utils::TempStorage, PrefixedRocksDbImmediateStorageContext}, + rocksdb_storage::{ + test_utils::TempStorage, PrefixedRocksDbImmediateStorageContext, + PrefixedRocksDbStorageContext, + }, RawIterator, Storage, }; use super::*; - use crate::{test_utils::*, tree::Op, MerkBatch}; + use crate::{ + merk::chunks::ChunkProducer, + proofs::chunk::{ + chunk::tests::traverse_get_node_hash, error::ChunkError::InvalidChunkProof, + }, + test_utils::{make_batch_seq, TempMerk}, + Error::ChunkRestoringError, + Merk, PanicSource, + }; + + #[test] + fn test_chunk_verification_non_avl_tree() { + let non_avl_tree_proof = vec![ + Op::Push(Node::KV(vec![1], vec![1])), + Op::Push(Node::KV(vec![2], vec![2])), + Op::Parent, + Op::Push(Node::KV(vec![3], vec![3])), + Op::Parent, + ]; + assert!(Restorer::::verify_chunk( + non_avl_tree_proof, + &[0; 32], + &None + ) + .is_err()); + } + + #[test] + fn test_chunk_verification_only_kv_feature_and_hash() { + // should not accept kv + let invalid_chunk_proof = vec![Op::Push(Node::KV(vec![1], vec![1]))]; + let verification_result = Restorer::::verify_chunk( + invalid_chunk_proof, + &[0; 32], + &None, + ); + assert!(matches!( + verification_result, + Err(ChunkRestoringError(InvalidChunkProof( + "expected chunk proof to contain only kvvaluefeaturetype or hash nodes", + ))) + )); + + // should not accept kvhash + let invalid_chunk_proof = vec![Op::Push(Node::KVHash([0; 32]))]; + let verification_result = Restorer::::verify_chunk( + invalid_chunk_proof, + &[0; 32], + &None, + ); + assert!(matches!( + verification_result, + Err(ChunkRestoringError(InvalidChunkProof( + "expected chunk proof to contain only kvvaluefeaturetype or hash nodes", + ))) + )); + + // should not accept kvdigest + let invalid_chunk_proof = vec![Op::Push(Node::KVDigest(vec![0], [0; 32]))]; + let verification_result = Restorer::::verify_chunk( + invalid_chunk_proof, + &[0; 32], + &None, + ); + assert!(matches!( + verification_result, + Err(ChunkRestoringError(InvalidChunkProof( + "expected chunk proof to contain only kvvaluefeaturetype or hash nodes", + ))) + )); + + // should not accept kvvaluehash + let invalid_chunk_proof = vec![Op::Push(Node::KVValueHash(vec![0], vec![0], [0; 32]))]; + let verification_result = Restorer::::verify_chunk( + invalid_chunk_proof, + &[0; 32], + &None, + ); + assert!(matches!( + verification_result, + Err(ChunkRestoringError(InvalidChunkProof( + "expected chunk proof to contain only kvvaluefeaturetype or hash nodes", + ))) + )); + + // should not accept kvrefvaluehash + let invalid_chunk_proof = vec![Op::Push(Node::KVRefValueHash(vec![0], vec![0], [0; 32]))]; + let verification_result = Restorer::::verify_chunk( + invalid_chunk_proof, + &[0; 32], + &None, + ); + assert!(matches!( + verification_result, + Err(ChunkRestoringError(InvalidChunkProof( + "expected chunk proof to contain only kvvaluefeaturetype or hash nodes", + ))) + )); + } + + fn get_node_hash(node: Node) -> Result { + match node { + Node::Hash(hash) => Ok(hash), + _ => Err("expected node hash".to_string()), + } + } + + #[test] + fn test_process_chunk_correct_chunk_id_map() { + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + let mut merk_tree = merk.tree.take().expect("should have inner tree"); + merk.tree.set(Some(merk_tree.clone())); + let mut tree_walker = RefWalker::new(&mut merk_tree, PanicSource {}); - fn restore_test(batches: &[&MerkBatch>], expected_nodes: usize) { let storage = TempStorage::new(); let tx = storage.start_transaction(); - let mut original = Merk::open_base( + let restoration_merk = Merk::open_base( storage .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .unwrap(); - for batch in batches { - original - .apply::, Vec<_>>(batch, &[], None) - .unwrap() - .unwrap(); - } - let chunks = original.chunks().unwrap(); + // restorer root hash should be empty + assert_eq!(restoration_merk.root_hash().unwrap(), [0; 32]); - let storage = TempStorage::default(); - let _tx2 = storage.start_transaction(); - let ctx = storage - .get_immediate_storage_context(SubtreePath::empty(), &tx) - .unwrap(); - let merk = Merk::open_base(ctx, false).unwrap().unwrap(); - let mut restorer = Merk::restore(merk, original.root_hash().unwrap()); - - assert_eq!(restorer.remaining_chunks(), None); + // at the start both merks should have different root hash values + assert_ne!( + merk.root_hash().unwrap(), + restoration_merk.root_hash().unwrap() + ); - let mut expected_remaining = chunks.len(); - for chunk in chunks { - let remaining = restorer.process_chunk(chunk.unwrap()).unwrap(); + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + let mut restorer = Restorer::new(restoration_merk, merk.root_hash().unwrap(), None); - expected_remaining -= 1; - assert_eq!(remaining, expected_remaining); - assert_eq!(restorer.remaining_chunks().unwrap(), expected_remaining); - } - assert_eq!(expected_remaining, 0); + // initial restorer state should contain just the root hash of the source merk + assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![].as_slice()), + Some(merk.root_hash().unwrap()).as_ref() + ); - let restored = restorer.finalize().unwrap(); - assert_eq!(restored.root_hash(), original.root_hash()); - assert_raw_db_entries_eq(&restored, &original, expected_nodes); - } + // generate first chunk + let (chunk, _) = chunk_producer.chunk_with_index(1, grove_version).unwrap(); + // apply first chunk + let new_chunk_ids = restorer + .process_chunk( + &traversal_instruction_as_vec_bytes(vec![].as_slice()), + chunk, + grove_version, + ) + .expect("should process chunk successfully"); + assert_eq!(new_chunk_ids.len(), 4); + + // after first chunk application + // the chunk_map should contain 4 items + assert_eq!(restorer.chunk_id_to_root_hash.len(), 4); + // assert all the chunk hash values + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![1, 1].as_slice()), + Some( + get_node_hash(traverse_get_node_hash( + &mut tree_walker, + &[LEFT, LEFT], + grove_version + )) + .unwrap() + ) + .as_ref() + ); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![1, 0].as_slice()), + Some( + get_node_hash(traverse_get_node_hash( + &mut tree_walker, + &[LEFT, RIGHT], + grove_version + )) + .unwrap() + ) + .as_ref() + ); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![0, 1].as_slice()), + Some( + get_node_hash(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, LEFT], + grove_version + )) + .unwrap() + ) + .as_ref() + ); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![0, 0].as_slice()), + Some( + get_node_hash(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, RIGHT], + grove_version + )) + .unwrap() + ) + .as_ref() + ); - #[test] - fn restore_10000() { - restore_test(&[&make_batch_seq(0..10_000)], 10_000); - } + // generate second chunk + let (chunk, _) = chunk_producer.chunk_with_index(2, grove_version).unwrap(); + // apply second chunk + let new_chunk_ids = restorer + .process_chunk( + &traversal_instruction_as_vec_bytes(&[LEFT, LEFT]), + chunk, + grove_version, + ) + .unwrap(); + assert_eq!(new_chunk_ids.len(), 0); + // chunk_map should have 1 less element + assert_eq!(restorer.chunk_id_to_root_hash.len(), 3); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![1, 1].as_slice()), + None + ); - #[test] - fn restore_3() { - restore_test(&[&make_batch_seq(0..3)], 3); - } + // let's try to apply the second chunk again, should not work + let (chunk, _) = chunk_producer.chunk_with_index(2, grove_version).unwrap(); + // apply second chunk + let chunk_process_result = restorer.process_chunk( + &traversal_instruction_as_vec_bytes(&[LEFT, LEFT]), + chunk, + grove_version, + ); + assert!(chunk_process_result.is_err()); + assert!(matches!( + chunk_process_result, + Err(Error::ChunkRestoringError(ChunkError::UnexpectedChunk)) + )); + + // next let's get a random but expected chunk and work with that e.g. chunk 4 + // but let's apply it to the wrong place + let (chunk, _) = chunk_producer.chunk_with_index(4, grove_version).unwrap(); + let chunk_process_result = restorer.process_chunk( + &traversal_instruction_as_vec_bytes(&[LEFT, RIGHT]), + chunk, + grove_version, + ); + assert!(chunk_process_result.is_err()); + assert!(matches!( + chunk_process_result, + Err(Error::ChunkRestoringError(ChunkError::InvalidChunkProof( + .. + ))) + )); + + // correctly apply chunk 5 + let (chunk, _) = chunk_producer.chunk_with_index(5, grove_version).unwrap(); + // apply second chunk + let new_chunk_ids = restorer + .process_chunk( + &traversal_instruction_as_vec_bytes(&[RIGHT, RIGHT]), + chunk, + grove_version, + ) + .unwrap(); + assert_eq!(new_chunk_ids.len(), 0); + // chunk_map should have 1 less element + assert_eq!(restorer.chunk_id_to_root_hash.len(), 2); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![0, 0].as_slice()), + None + ); - #[test] - fn restore_2_left_heavy() { - restore_test( - &[ - &[(vec![0], Op::Put(vec![], BasicMerk))], - &[(vec![1], Op::Put(vec![], BasicMerk))], - ], - 2, + // correctly apply chunk 3 + let (chunk, _) = chunk_producer.chunk_with_index(3, grove_version).unwrap(); + // apply second chunk + let new_chunk_ids = restorer + .process_chunk( + &traversal_instruction_as_vec_bytes(&[LEFT, RIGHT]), + chunk, + grove_version, + ) + .unwrap(); + assert_eq!(new_chunk_ids.len(), 0); + // chunk_map should have 1 less element + assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![1, 0].as_slice()), + None ); - } - #[test] - fn restore_2_right_heavy() { - restore_test( - &[ - &[(vec![1], Op::Put(vec![], BasicMerk))], - &[(vec![0], Op::Put(vec![], BasicMerk))], - ], - 2, + // correctly apply chunk 4 + let (chunk, _) = chunk_producer.chunk_with_index(4, grove_version).unwrap(); + // apply second chunk + let new_chunk_ids = restorer + .process_chunk( + &traversal_instruction_as_vec_bytes(&[RIGHT, LEFT]), + chunk, + grove_version, + ) + .unwrap(); + assert_eq!(new_chunk_ids.len(), 0); + // chunk_map should have 1 less element + assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![0, 1].as_slice()), + None ); - } - #[test] - fn restore_1() { - restore_test(&[&make_batch_seq(0..1)], 1); + // finalize merk + let restored_merk = restorer + .finalize(grove_version) + .expect("should finalized successfully"); + + assert_eq!( + restored_merk.root_hash().unwrap(), + merk.root_hash().unwrap() + ); } fn assert_raw_db_entries_eq( @@ -497,7 +900,10 @@ mod tests { let mut i = 0; loop { - assert_eq!(restored_entries.valid(), original_entries.valid()); + assert_eq!( + restored_entries.valid().unwrap(), + original_entries.valid().unwrap() + ); if !restored_entries.valid().unwrap() { break; } @@ -513,4 +919,516 @@ mod tests { assert_eq!(i, length); } + + // Builds a source merk with batch_size number of elements + // attempts restoration on some empty merk + // verifies that restoration was performed correctly. + fn test_restoration_single_chunk_strategy(batch_size: u64) { + let grove_version = GroveVersion::latest(); + // build the source merk + let storage = TempStorage::new(); + let tx = storage.start_transaction(); + let mut source_merk = Merk::open_base( + storage + .get_immediate_storage_context(SubtreePath::empty(), &tx) + .unwrap(), + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .unwrap(); + let batch = make_batch_seq(0..batch_size); + source_merk + .apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + + // build the restoration merk + let storage = TempStorage::new(); + let tx = storage.start_transaction(); + let restoration_merk = Merk::open_base( + storage + .get_immediate_storage_context(SubtreePath::empty(), &tx) + .unwrap(), + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .unwrap(); + + // at the start + // restoration merk should have empty root hash + // and source merk should have a different root hash + assert_eq!(restoration_merk.root_hash().unwrap(), [0; 32]); + assert_ne!( + source_merk.root_hash().unwrap(), + restoration_merk.root_hash().unwrap() + ); + + // instantiate chunk producer and restorer + let mut chunk_producer = + ChunkProducer::new(&source_merk).expect("should create chunk producer"); + let mut restorer = Restorer::new(restoration_merk, source_merk.root_hash().unwrap(), None); + + // perform chunk production and processing + let mut chunk_id_opt = Some(vec![]); + while let Some(chunk_id) = chunk_id_opt { + let (chunk, next_chunk_id) = chunk_producer + .chunk(&chunk_id, grove_version) + .expect("should get chunk"); + restorer + .process_chunk(&chunk_id, chunk, grove_version) + .expect("should process chunk successfully"); + chunk_id_opt = next_chunk_id; + } + + // after chunk processing we should be able to finalize + assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); + assert_eq!(restorer.parent_keys.len(), 0); + let restored_merk = restorer.finalize(grove_version).expect("should finalize"); + + // compare root hash values + assert_eq!( + source_merk.root_hash().unwrap(), + restored_merk.root_hash().unwrap() + ); + + assert_raw_db_entries_eq(&restored_merk, &source_merk, batch_size as usize); + } + + #[test] + fn restore_single_chunk_20() { + test_restoration_single_chunk_strategy(20); + } + + #[test] + fn restore_single_chunk_1000() { + test_restoration_single_chunk_strategy(1000); + } + + #[test] + fn test_process_multi_chunk_no_limit() { + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + let storage = TempStorage::new(); + let tx = storage.start_transaction(); + let restoration_merk = Merk::open_base( + storage + .get_immediate_storage_context(SubtreePath::empty(), &tx) + .unwrap(), + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .unwrap(); + + // restorer root hash should be empty + assert_eq!(restoration_merk.root_hash().unwrap(), [0; 32]); + + // at the start both merks should have different root hash values + assert_ne!( + merk.root_hash().unwrap(), + restoration_merk.root_hash().unwrap() + ); + + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + let mut restorer = Restorer::new(restoration_merk, merk.root_hash().unwrap(), None); + + assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![].as_slice()), + Some(merk.root_hash().unwrap()).as_ref() + ); + + // generate multi chunk from root with no limit + let chunk = chunk_producer + .multi_chunk_with_limit(vec![].as_slice(), None, grove_version) + .expect("should generate multichunk"); + + assert_eq!(chunk.chunk.len(), 2); + assert_eq!(chunk.next_index, None); + assert_eq!(chunk.remaining_limit, None); + + let next_ids = restorer + .process_multi_chunk(chunk.chunk, grove_version) + .expect("should process chunk"); + // should have replicated all chunks + assert_eq!(next_ids.len(), 0); + assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); + assert_eq!(restorer.parent_keys.len(), 0); + + let restored_merk = restorer + .finalize(grove_version) + .expect("should be able to finalize"); + + // compare root hash values + assert_eq!( + restored_merk.root_hash().unwrap(), + merk.root_hash().unwrap() + ); + } + + #[test] + fn test_process_multi_chunk_no_limit_but_non_root() { + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + let storage = TempStorage::new(); + let tx = storage.start_transaction(); + let restoration_merk = Merk::open_base( + storage + .get_immediate_storage_context(SubtreePath::empty(), &tx) + .unwrap(), + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .unwrap(); + + // restorer root hash should be empty + assert_eq!(restoration_merk.root_hash().unwrap(), [0; 32]); + + // at the start both merks should have different root hash values + assert_ne!( + merk.root_hash().unwrap(), + restoration_merk.root_hash().unwrap() + ); + + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + let mut restorer = Restorer::new(restoration_merk, merk.root_hash().unwrap(), None); + + assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![].as_slice()), + Some(merk.root_hash().unwrap()).as_ref() + ); + + // first restore the first chunk + let (chunk, next_chunk_index) = chunk_producer.chunk_with_index(1, grove_version).unwrap(); + let new_chunk_ids = restorer + .process_chunk( + &traversal_instruction_as_vec_bytes(&[]), + chunk, + grove_version, + ) + .expect("should process chunk"); + assert_eq!(new_chunk_ids.len(), 4); + assert_eq!(next_chunk_index, Some(2)); + assert_eq!(restorer.chunk_id_to_root_hash.len(), 4); + assert_eq!(restorer.parent_keys.len(), 4); + + // generate multi chunk from the 2nd chunk with no limit + let multi_chunk = chunk_producer + .multi_chunk_with_limit_and_index(next_chunk_index.unwrap(), None, grove_version) + .unwrap(); + // tree of height 4 has 5 chunks + // we have restored the first leaving 4 chunks + // each chunk has an extra chunk id, since they are disjoint + // hence the size of the multi chunk should be 8 + assert_eq!(multi_chunk.chunk.len(), 8); + let new_chunk_ids = restorer + .process_multi_chunk(multi_chunk.chunk, grove_version) + .unwrap(); + assert_eq!(new_chunk_ids.len(), 0); + assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); + assert_eq!(restorer.parent_keys.len(), 0); + + let restored_merk = restorer + .finalize(grove_version) + .expect("should be able to finalize"); + + // compare root hash values + assert_eq!( + restored_merk.root_hash().unwrap(), + merk.root_hash().unwrap() + ); + } + + #[test] + fn test_process_multi_chunk_with_limit() { + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + let storage = TempStorage::new(); + let tx = storage.start_transaction(); + let restoration_merk = Merk::open_base( + storage + .get_immediate_storage_context(SubtreePath::empty(), &tx) + .unwrap(), + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .unwrap(); + + // restorer root hash should be empty + assert_eq!(restoration_merk.root_hash().unwrap(), [0; 32]); + + // at the start both merks should have different root hash values + assert_ne!( + merk.root_hash().unwrap(), + restoration_merk.root_hash().unwrap() + ); + + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + let mut restorer = Restorer::new(restoration_merk, merk.root_hash().unwrap(), None); + + // build multi chunk with with limit of 325 + let multi_chunk = chunk_producer + .multi_chunk_with_limit(vec![].as_slice(), Some(600), grove_version) + .unwrap(); + // should only contain the first chunk + assert_eq!(multi_chunk.chunk.len(), 2); + // should point to chunk 2 + assert_eq!(multi_chunk.next_index, Some(vec![1, 1])); + let next_ids = restorer + .process_multi_chunk(multi_chunk.chunk, grove_version) + .unwrap(); + assert_eq!(next_ids.len(), 4); + assert_eq!(restorer.chunk_id_to_root_hash.len(), 4); + assert_eq!(restorer.parent_keys.len(), 4); + + // subsequent chunks are of size 321 + // with limit just above 642 should get 2 chunks (2 and 3) + // disjoint, so multi chunk len should be 4 + let multi_chunk = chunk_producer + .multi_chunk_with_limit( + multi_chunk.next_index.unwrap().as_slice(), + Some(645), + grove_version, + ) + .unwrap(); + assert_eq!(multi_chunk.chunk.len(), 4); + assert_eq!(multi_chunk.next_index, Some(vec![0u8, 1u8])); + let next_ids = restorer + .process_multi_chunk(multi_chunk.chunk, grove_version) + .unwrap(); + // chunks 2 and 3 are leaf chunks + assert_eq!(next_ids.len(), 0); + assert_eq!(restorer.chunk_id_to_root_hash.len(), 2); + assert_eq!(restorer.parent_keys.len(), 2); + + // get the last 2 chunks + let multi_chunk = chunk_producer + .multi_chunk_with_limit( + multi_chunk.next_index.unwrap().as_slice(), + Some(645), + grove_version, + ) + .unwrap(); + assert_eq!(multi_chunk.chunk.len(), 4); + assert_eq!(multi_chunk.next_index, None); + let next_ids = restorer + .process_multi_chunk(multi_chunk.chunk, grove_version) + .unwrap(); + // chunks 2 and 3 are leaf chunks + assert_eq!(next_ids.len(), 0); + assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); + assert_eq!(restorer.parent_keys.len(), 0); + + // finalize merk + let restored_merk = restorer.finalize(grove_version).unwrap(); + + // compare root hash values + assert_eq!( + restored_merk.root_hash().unwrap(), + merk.root_hash().unwrap() + ); + } + + // Builds a source merk with batch_size number of elements + // attempts restoration on some empty merk, with multi chunks + // verifies that restoration was performed correctly. + fn test_restoration_multi_chunk_strategy(batch_size: u64, limit: Option) { + let grove_version = GroveVersion::latest(); + // build the source merk + let mut source_merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..batch_size); + source_merk + .apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + + // build the restoration merk + let storage = TempStorage::new(); + let tx = storage.start_transaction(); + let restoration_merk = Merk::open_base( + storage + .get_immediate_storage_context(SubtreePath::empty(), &tx) + .unwrap(), + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .unwrap(); + + // at the start + // restoration merk should have empty root hash + // and source merk should have a different root hash + assert_eq!(restoration_merk.root_hash().unwrap(), [0; 32]); + assert_ne!( + source_merk.root_hash().unwrap(), + restoration_merk.root_hash().unwrap() + ); + + // instantiate chunk producer and restorer + let mut chunk_producer = + ChunkProducer::new(&source_merk).expect("should create chunk producer"); + let mut restorer = Restorer::new(restoration_merk, source_merk.root_hash().unwrap(), None); + + // perform chunk production and processing + let mut chunk_id_opt = Some(vec![]); + while let Some(chunk_id) = chunk_id_opt { + let multi_chunk = chunk_producer + .multi_chunk_with_limit(&chunk_id, limit, grove_version) + .expect("should get chunk"); + restorer + .process_multi_chunk(multi_chunk.chunk, grove_version) + .expect("should process chunk successfully"); + chunk_id_opt = multi_chunk.next_index; + } + + // after chunk processing we should be able to finalize + assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); + assert_eq!(restorer.parent_keys.len(), 0); + let restored_merk = restorer.finalize(grove_version).expect("should finalize"); + + // compare root hash values + assert_eq!( + source_merk.root_hash().unwrap(), + restored_merk.root_hash().unwrap() + ); + } + + #[test] + fn restore_multi_chunk_20_no_limit() { + test_restoration_multi_chunk_strategy(20, None); + } + + #[test] + #[should_panic] + fn restore_multi_chunk_20_tiny_limit() { + test_restoration_multi_chunk_strategy(20, Some(1)); + } + + #[test] + fn restore_multi_chunk_20_limit() { + test_restoration_multi_chunk_strategy(20, Some(1200)); + } + + #[test] + fn restore_multi_chunk_10000_limit() { + test_restoration_multi_chunk_strategy(10000, Some(1200)); + } + + #[test] + fn test_restoration_interruption() { + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + let storage = TempStorage::new(); + let tx = storage.start_transaction(); + let restoration_merk = Merk::open_base( + storage + .get_immediate_storage_context(SubtreePath::empty(), &tx) + .unwrap(), + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .unwrap(); + + // restorer root hash should be empty + assert_eq!(restoration_merk.root_hash().unwrap(), [0; 32]); + + // at the start both merks should have different root hash values + assert_ne!( + merk.root_hash().unwrap(), + restoration_merk.root_hash().unwrap() + ); + + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + let mut restorer = Restorer::new(restoration_merk, merk.root_hash().unwrap(), None); + + assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![].as_slice()), + Some(merk.root_hash().unwrap()).as_ref() + ); + + // first restore the first chunk + let (chunk, next_chunk_index) = chunk_producer.chunk_with_index(1, grove_version).unwrap(); + let new_chunk_ids = restorer + .process_chunk( + &traversal_instruction_as_vec_bytes(&[]), + chunk, + grove_version, + ) + .expect("should process chunk"); + assert_eq!(new_chunk_ids.len(), 4); + assert_eq!(next_chunk_index, Some(2)); + assert_eq!(restorer.chunk_id_to_root_hash.len(), 4); + assert_eq!(restorer.parent_keys.len(), 4); + + // store old state for later reference + let old_chunk_id_to_root_hash = restorer.chunk_id_to_root_hash.clone(); + let old_parent_keys = restorer.parent_keys.clone(); + + // drop the restorer and the restoration merk + drop(restorer); + // open the restoration merk again and build a restorer from it + let restoration_merk = Merk::open_base( + storage + .get_immediate_storage_context(SubtreePath::empty(), &tx) + .unwrap(), + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .unwrap(); + let mut restorer = Restorer::new(restoration_merk, merk.root_hash().unwrap(), None); + + // assert the state of the restorer + assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); + assert_eq!(restorer.parent_keys.len(), 0); + + // recover state + let recovery_attempt = restorer.attempt_state_recovery(grove_version); + assert!(recovery_attempt.is_ok()); + assert_eq!(restorer.chunk_id_to_root_hash.len(), 4); + assert_eq!(restorer.parent_keys.len(), 4); + + // assert equality to old state + assert_eq!(old_chunk_id_to_root_hash, restorer.chunk_id_to_root_hash); + assert_eq!(old_parent_keys, restorer.parent_keys); + } } diff --git a/merk/src/merk/source.rs b/merk/src/merk/source.rs new file mode 100644 index 000000000..dd71e74ed --- /dev/null +++ b/merk/src/merk/source.rs @@ -0,0 +1,58 @@ +use grovedb_costs::CostResult; +use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; + +use crate::{ + tree::{kv::ValueDefinedCostType, Fetch, TreeNode}, + Error, Link, Merk, +}; + +impl<'db, S> Merk +where + S: StorageContext<'db>, +{ + pub(in crate::merk) fn source(&self) -> MerkSource { + MerkSource { + storage: &self.storage, + is_sum_tree: self.is_sum_tree, + } + } +} + +#[derive(Debug)] +pub struct MerkSource<'s, S> { + storage: &'s S, + is_sum_tree: bool, +} + +impl<'s, S> Clone for MerkSource<'s, S> { + fn clone(&self) -> Self { + MerkSource { + storage: self.storage, + is_sum_tree: self.is_sum_tree, + } + } +} + +impl<'s, 'db, S> Fetch for MerkSource<'s, S> +where + S: StorageContext<'db>, +{ + fn fetch( + &self, + link: &Link, + value_defined_cost_fn: Option< + &impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult { + TreeNode::get( + self.storage, + link.key(), + value_defined_cost_fn, + grove_version, + ) + .map_ok(|x| x.ok_or(Error::KeyNotFoundError("Key not found for fetch"))) + .flatten() + } +} diff --git a/merk/src/owner.rs b/merk/src/owner.rs index d84917b75..1543a089a 100644 --- a/merk/src/owner.rs +++ b/merk/src/owner.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Owner use std::ops::{Deref, DerefMut}; @@ -91,6 +63,27 @@ impl Owner { return_value } + /// Takes temporary ownership of the contained value by passing it to `f`. + /// The function must return a result of the same type (the same value, or a + /// new value to take its place). + /// + /// Like `own`, but uses a tuple return type which allows specifying a value + /// to return from the call to `own_result` for convenience. + pub fn own_result(&mut self, f: F) -> Result<(), E> + where + F: FnOnce(T) -> Result, + { + let old_value = unwrap(self.inner.take()); + let new_value_result = f(old_value); + match new_value_result { + Ok(new_value) => { + self.inner = Some(new_value); + Ok(()) + } + Err(e) => Err(e), + } + } + /// Sheds the `Owner` container and returns the value it contained. pub fn into_inner(mut self) -> T { unwrap(self.inner.take()) diff --git a/merk/src/proofs/chunk.rs b/merk/src/proofs/chunk.rs index 48afe8f30..063a35754 100644 --- a/merk/src/proofs/chunk.rs +++ b/merk/src/proofs/chunk.rs @@ -1,645 +1,9 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Chunk proofs +mod binary_range; #[cfg(feature = "full")] -use grovedb_costs::{ - cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, -}; -#[cfg(feature = "full")] -use grovedb_storage::RawIterator; -#[cfg(feature = "full")] -use { - super::tree::{execute, Tree as ProofTree}, - crate::tree::CryptoHash, - crate::tree::Tree, -}; - -#[cfg(feature = "full")] -use super::{Node, Op}; -#[cfg(feature = "full")] -use crate::{ - error::Error, - tree::{Fetch, RefWalker}, - Error::EdError, - TreeFeatureType::BasicMerk, -}; - -/// The minimum number of layers the trunk will be guaranteed to have before -/// splitting into multiple chunks. If the tree's height is less than double -/// this value, the trunk should be verified as a leaf chunk. -#[cfg(feature = "full")] -pub const MIN_TRUNK_HEIGHT: usize = 5; - -#[cfg(feature = "full")] -impl<'a, S> RefWalker<'a, S> -where - S: Fetch + Sized + Clone, -{ - /// Generates a trunk proof by traversing the tree. - /// - /// Returns a tuple containing the produced proof, and a boolean indicating - /// whether or not there will be more chunks to follow. If the chunk - /// contains the entire tree, the boolean will be `false`, if the chunk - /// is abridged and will be connected to leaf chunks, it will be `true`. - pub fn create_trunk_proof(&mut self) -> CostResult<(Vec, bool), Error> { - let approx_size = 2usize.pow((self.tree().height() / 2) as u32) * 3; - let mut proof = Vec::with_capacity(approx_size); - - self.traverse_for_height_proof(&mut proof, 1) - .flat_map_ok(|trunk_height| { - if trunk_height < MIN_TRUNK_HEIGHT { - proof.clear(); - self.traverse_for_trunk(&mut proof, usize::MAX, true) - .map_ok(|_| Ok((proof, false))) - } else { - self.traverse_for_trunk(&mut proof, trunk_height, true) - .map_ok(|_| Ok((proof, true))) - } - }) - .flatten() - } - - /// Traverses down the left edge of the tree and pushes ops to the proof, to - /// act as a proof of the height of the tree. This is the first step in - /// generating a trunk proof. - fn traverse_for_height_proof( - &mut self, - proof: &mut Vec, - depth: usize, - ) -> CostResult { - let mut cost = OperationCost::default(); - let maybe_left = match self.walk(true).unwrap_add_cost(&mut cost) { - Ok(maybe_left) => maybe_left, - Err(e) => { - return Err(e).wrap_with_cost(cost); - } - }; - let has_left_child = maybe_left.is_some(); - - let trunk_height = if let Some(mut left) = maybe_left { - match left - .traverse_for_height_proof(proof, depth + 1) - .unwrap_add_cost(&mut cost) - { - Ok(x) => x, - Err(e) => return Err(e).wrap_with_cost(cost), - } - } else { - depth / 2 - }; - - if depth > trunk_height { - proof.push(Op::Push(self.to_kvhash_node())); - - if has_left_child { - proof.push(Op::Parent); - } - - if let Some(right) = self.tree().link(false) { - proof.push(Op::Push(Node::Hash(*right.hash()))); - proof.push(Op::Child); - } - } - - Ok(trunk_height).wrap_with_cost(cost) - } - - /// Traverses down the tree and adds KV push ops for all nodes up to a - /// certain depth. This expects the proof to contain a height proof as - /// generated by `traverse_for_height_proof`. - fn traverse_for_trunk( - &mut self, - proof: &mut Vec, - remaining_depth: usize, - is_leftmost: bool, - ) -> CostResult<(), Error> { - let mut cost = OperationCost::default(); - - if remaining_depth == 0 { - // return early if we have reached bottom of trunk - - // for leftmost node, we already have height proof - if is_leftmost { - return Ok(()).wrap_with_cost(cost); - } - - // add this node's hash - proof.push(Op::Push(self.to_hash_node().unwrap_add_cost(&mut cost))); - - return Ok(()).wrap_with_cost(cost); - } - - // traverse left - let has_left_child = self.tree().link(true).is_some(); - if has_left_child { - let mut left = cost_return_on_error!(&mut cost, self.walk(true)).unwrap(); - cost_return_on_error!( - &mut cost, - left.traverse_for_trunk(proof, remaining_depth - 1, is_leftmost) - ); - } - - // add this node's data - proof.push(Op::Push(self.to_kv_value_hash_feature_type_node())); - - if has_left_child { - proof.push(Op::Parent); - } - - // traverse right - if let Some(mut right) = cost_return_on_error!(&mut cost, self.walk(false)) { - cost_return_on_error!( - &mut cost, - right.traverse_for_trunk(proof, remaining_depth - 1, false) - ); - proof.push(Op::Child); - } - - Ok(()).wrap_with_cost(cost) - } -} - -/// Builds a chunk proof by iterating over values in a RocksDB, ending the chunk -/// when a node with key `end_key` is encountered. -/// -/// Advances the iterator for all nodes in the chunk and the `end_key` (if any). -#[cfg(feature = "full")] -pub(crate) fn get_next_chunk( - iter: &mut impl RawIterator, - end_key: Option<&[u8]>, -) -> CostResult, Error> { - let mut cost = OperationCost::default(); - - let mut chunk = Vec::with_capacity(512); - let mut stack = Vec::with_capacity(32); - let mut node = Tree::new(vec![], vec![], None, BasicMerk).unwrap_add_cost(&mut cost); - - while iter.valid().unwrap_add_cost(&mut cost) { - let key = iter.key().unwrap_add_cost(&mut cost).unwrap(); - - if let Some(end_key) = end_key { - if key == end_key { - break; - } - } - - let encoded_node = iter.value().unwrap_add_cost(&mut cost).unwrap(); - cost_return_on_error_no_add!( - &cost, - Tree::decode_into(&mut node, vec![], encoded_node).map_err(EdError) - ); - - // TODO: Only use the KVValueHash if needed, saves 32 bytes - // only needed when dealing with references and trees - let kv = Node::KVValueHashFeatureType( - key.to_vec(), - node.value_ref().to_vec(), - *node.value_hash(), - node.feature_type(), - ); - - chunk.push(Op::Push(kv)); - - if node.link(true).is_some() { - chunk.push(Op::Parent); - } - - if let Some(child) = node.link(false) { - stack.push(child.key().to_vec()); - } else { - while let Some(top_key) = stack.last() { - if key < top_key.as_slice() { - break; - } - stack.pop(); - chunk.push(Op::Child); - } - } - - iter.next().unwrap_add_cost(&mut cost); - } - - if iter.valid().unwrap_add_cost(&mut cost) { - iter.next().unwrap_add_cost(&mut cost); - } - - Ok(chunk).wrap_with_cost(cost) -} - -/// Verifies a leaf chunk proof by executing its operators. Checks that there -/// were no abridged nodes (Hash or KVHash) and the proof hashes to -/// `expected_hash`. -#[cfg(feature = "full")] -#[allow(dead_code)] // TODO: remove when proofs will be enabled -pub(crate) fn verify_leaf>>( - ops: I, - expected_hash: CryptoHash, -) -> CostResult { - execute(ops, false, |node| match node { - Node::KVValueHash(..) | Node::KV(..) | Node::KVValueHashFeatureType(..) => Ok(()), - _ => Err(Error::ChunkRestoringError( - "Leaf chunks must contain full subtree".to_string(), - )), - }) - .flat_map_ok(|tree| { - tree.hash().map(|hash| { - if hash != expected_hash { - Error::ChunkRestoringError(format!( - "Leaf chunk proof did not match expected hash\n\tExpected: {:?}\n\tActual: \ - {:?}", - expected_hash, - tree.hash() - )); - } - Ok(tree) - }) - }) -} - -/// Verifies a trunk chunk proof by executing its operators. Ensures the -/// resulting tree contains a valid height proof, the trunk is the correct -/// height, and all of its inner nodes are not abridged. Returns the tree and -/// the height given by the height proof. -#[cfg(feature = "full")] -pub(crate) fn verify_trunk>>( - ops: I, -) -> CostResult<(ProofTree, usize), Error> { - let mut cost = OperationCost::default(); - - fn verify_height_proof(tree: &ProofTree) -> Result { - Ok(match tree.child(true) { - Some(child) => { - if let Node::Hash(_) = child.tree.node { - return Err(Error::ChunkRestoringError( - "Expected height proof to only contain KV and KVHash nodes".to_string(), - )); - } - verify_height_proof(&child.tree)? + 1 - } - None => 1, - }) - } - - fn verify_completeness( - tree: &ProofTree, - remaining_depth: usize, - leftmost: bool, - ) -> Result<(), Error> { - let recurse = |left, leftmost| { - if let Some(child) = tree.child(left) { - verify_completeness(&child.tree, remaining_depth - 1, left && leftmost)?; - } - Ok(()) - }; - - if remaining_depth > 0 { - match tree.node { - Node::KVValueHash(..) | Node::KV(..) | Node::KVValueHashFeatureType(..) => {} - _ => { - return Err(Error::ChunkRestoringError( - "Expected trunk inner nodes to contain keys and values".to_string(), - )) - } - } - recurse(true, leftmost)?; - recurse(false, false) - } else if !leftmost { - match tree.node { - Node::Hash(_) => Ok(()), - _ => Err(Error::ChunkRestoringError( - "Expected trunk leaves to contain Hash nodes".to_string(), - )), - } - } else { - match &tree.node { - Node::KVHash(_) => Ok(()), - _ => Err(Error::ChunkRestoringError( - "Expected leftmost trunk leaf to contain KVHash node".to_string(), - )), - } - } - } - - let mut kv_only = true; - let tree = cost_return_on_error!( - &mut cost, - execute(ops, false, |node| { - kv_only &= matches!(node, Node::KVValueHash(..)) - || matches!(node, Node::KV(..)) - || matches!(node, Node::KVValueHashFeatureType(..)); - Ok(()) - }) - ); - - let height = cost_return_on_error_no_add!(&cost, verify_height_proof(&tree)); - let trunk_height = height / 2; - - if trunk_height < MIN_TRUNK_HEIGHT { - if !kv_only { - return Err(Error::ChunkRestoringError( - "Leaf chunks must contain full subtree".to_string(), - )) - .wrap_with_cost(cost); - } - } else { - cost_return_on_error_no_add!(&cost, verify_completeness(&tree, trunk_height, true)); - } - - Ok((tree, height)).wrap_with_cost(cost) -} - +pub mod chunk; +pub mod chunk_op; +pub mod error; #[cfg(feature = "full")] -#[cfg(test)] -mod tests { - use std::usize; - - use grovedb_costs::storage_cost::removal::StorageRemovedBytes::NoStorageRemoval; - use grovedb_storage::StorageContext; - - use super::{super::tree::Tree, *}; - use crate::{ - test_utils::*, - tree::{NoopCommit, PanicSource, Tree as BaseTree}, - }; - - #[derive(Default)] - struct NodeCounts { - hash: usize, - kv_hash: usize, - kv: usize, - kv_value_hash: usize, - kv_digest: usize, - kv_ref_value_hash: usize, - kv_value_hash_feature_type: usize, - } - - fn count_node_types(tree: Tree) -> NodeCounts { - let mut counts = NodeCounts::default(); - - tree.visit_nodes(&mut |node| { - match node { - Node::Hash(_) => counts.hash += 1, - Node::KVHash(_) => counts.kv_hash += 1, - Node::KV(..) => counts.kv += 1, - Node::KVValueHash(..) => counts.kv_value_hash += 1, - Node::KVDigest(..) => counts.kv_digest += 1, - Node::KVRefValueHash(..) => counts.kv_ref_value_hash += 1, - Node::KVValueHashFeatureType(..) => counts.kv_value_hash_feature_type += 1, - }; - }); - - counts - } - - #[test] - fn small_trunk_roundtrip() { - let mut tree = make_tree_seq(31); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let (proof, has_more) = walker.create_trunk_proof().unwrap().unwrap(); - assert!(!has_more); - - // println!("{:?}", &proof); - let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap().unwrap(); - - let counts = count_node_types(trunk); - assert_eq!(counts.hash, 0); - assert_eq!(counts.kv_value_hash_feature_type, 32); - assert_eq!(counts.kv_hash, 0); - } - - #[test] - fn big_trunk_roundtrip() { - let mut tree = make_tree_seq(2u64.pow(MIN_TRUNK_HEIGHT as u32 * 2 + 1) - 1); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let (proof, has_more) = walker.create_trunk_proof().unwrap().unwrap(); - assert!(has_more); - let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap().unwrap(); - - let counts = count_node_types(trunk); - // are these formulas correct for all values of `MIN_TRUNK_HEIGHT`? 🤔 - assert_eq!( - counts.hash, - 2usize.pow(MIN_TRUNK_HEIGHT as u32) + MIN_TRUNK_HEIGHT - 1 - ); - assert_eq!( - counts.kv_value_hash_feature_type, - 2usize.pow(MIN_TRUNK_HEIGHT as u32) - 1 - ); - assert_eq!(counts.kv_hash, MIN_TRUNK_HEIGHT + 1); - } - - #[test] - fn one_node_tree_trunk_roundtrip() { - let mut tree = BaseTree::new(vec![0], vec![], None, BasicMerk).unwrap(); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .unwrap(); - - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let (proof, has_more) = walker.create_trunk_proof().unwrap().unwrap(); - assert!(!has_more); - - let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap().unwrap(); - let counts = count_node_types(trunk); - assert_eq!(counts.hash, 0); - assert_eq!(counts.kv_value_hash_feature_type, 1); - assert_eq!(counts.kv_hash, 0); - } - - #[test] - fn two_node_right_heavy_tree_trunk_roundtrip() { - // 0 - // \ - // 1 - let mut tree = BaseTree::new(vec![0], vec![], None, BasicMerk) - .unwrap() - .attach( - false, - Some(BaseTree::new(vec![1], vec![], None, BasicMerk).unwrap()), - ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .unwrap(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let (proof, has_more) = walker.create_trunk_proof().unwrap().unwrap(); - assert!(!has_more); - - let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap().unwrap(); - let counts = count_node_types(trunk); - assert_eq!(counts.hash, 0); - assert_eq!(counts.kv_value_hash_feature_type, 2); - assert_eq!(counts.kv_hash, 0); - } - - #[test] - fn two_node_left_heavy_tree_trunk_roundtrip() { - // 1 - // / - // 0 - let mut tree = BaseTree::new(vec![1], vec![], None, BasicMerk) - .unwrap() - .attach( - true, - Some(BaseTree::new(vec![0], vec![], None, BasicMerk).unwrap()), - ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .unwrap(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let (proof, has_more) = walker.create_trunk_proof().unwrap().unwrap(); - assert!(!has_more); - - let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap().unwrap(); - let counts = count_node_types(trunk); - assert_eq!(counts.hash, 0); - assert_eq!(counts.kv_value_hash_feature_type, 2); - assert_eq!(counts.kv_hash, 0); - } - - #[test] - fn three_node_tree_trunk_roundtrip() { - // 1 - // / \ - // 0 2 - let mut tree = BaseTree::new(vec![1], vec![], None, BasicMerk) - .unwrap() - .attach( - true, - Some(BaseTree::new(vec![0], vec![], None, BasicMerk).unwrap()), - ) - .attach( - false, - Some(BaseTree::new(vec![2], vec![], None, BasicMerk).unwrap()), - ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .unwrap(); - - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let (proof, has_more) = walker.create_trunk_proof().unwrap().unwrap(); - assert!(!has_more); - - let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap().unwrap(); - let counts = count_node_types(trunk); - assert_eq!(counts.hash, 0); - assert_eq!(counts.kv_value_hash_feature_type, 3); - assert_eq!(counts.kv_hash, 0); - } - - #[test] - fn leaf_chunk_roundtrip() { - let mut merk = TempMerk::new(); - let batch = make_batch_seq(0..31); - merk.apply::<_, Vec<_>>(batch.as_slice(), &[], None) - .unwrap() - .unwrap(); - - merk.commit(); - - let root_node = merk.tree.take(); - let root_key = root_node.as_ref().unwrap().key().to_vec(); - merk.tree.set(root_node); - - // whole tree as 1 leaf - let mut iter = merk.storage.raw_iter(); - iter.seek_to_first().unwrap(); - let chunk = get_next_chunk(&mut iter, None).unwrap().unwrap(); - let ops = chunk.into_iter().map(Ok); - let chunk = verify_leaf(ops, merk.root_hash().unwrap()) - .unwrap() - .unwrap(); - let counts = count_node_types(chunk); - assert_eq!(counts.kv_value_hash_feature_type, 31); - assert_eq!(counts.hash, 0); - assert_eq!(counts.kv_hash, 0); - drop(iter); - - let mut iter = merk.storage.raw_iter(); - iter.seek_to_first().unwrap(); - - // left leaf - let chunk = get_next_chunk(&mut iter, Some(root_key.as_slice())) - .unwrap() - .unwrap(); - let ops = chunk.into_iter().map(Ok); - let chunk = verify_leaf( - ops, - [ - 78, 230, 25, 188, 163, 2, 169, 185, 254, 174, 196, 206, 162, 187, 245, 188, 74, 70, - 220, 160, 35, 78, 120, 122, 61, 90, 241, 105, 35, 180, 133, 98, - ], - ) - .unwrap() - .unwrap(); - let counts = count_node_types(chunk); - assert_eq!(counts.kv_value_hash_feature_type, 15); - assert_eq!(counts.hash, 0); - assert_eq!(counts.kv_hash, 0); - - // right leaf - let chunk = get_next_chunk(&mut iter, None).unwrap().unwrap(); - let ops = chunk.into_iter().map(Ok); - let chunk = verify_leaf( - ops, - [ - 21, 147, 223, 29, 106, 19, 23, 38, 233, 134, 245, 44, 246, 179, 48, 19, 111, 50, - 19, 191, 134, 37, 165, 5, 35, 111, 233, 213, 212, 5, 92, 45, - ], - ) - .unwrap() - .unwrap(); - let counts = count_node_types(chunk); - assert_eq!(counts.kv_value_hash_feature_type, 15); - assert_eq!(counts.hash, 0); - assert_eq!(counts.kv_hash, 0); - } -} +pub mod util; diff --git a/merk/src/proofs/chunk/binary_range.rs b/merk/src/proofs/chunk/binary_range.rs new file mode 100644 index 000000000..2acaa728f --- /dev/null +++ b/merk/src/proofs/chunk/binary_range.rs @@ -0,0 +1,239 @@ +// MIT LICENSE +// +// Copyright (c) 2021 Dash Core Group +// +// Permission is hereby granted, free of charge, to any +// person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the +// Software without restriction, including without +// limitation the rights to use, copy, modify, merge, +// publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software +// is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice +// shall be included in all copies or substantial portions +// of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +const LEFT: bool = true; +const RIGHT: bool = false; + +/// Utility type for range bisection and advancement +#[derive(Debug)] +pub(crate) struct BinaryRange { + start: usize, + end: usize, +} + +impl BinaryRange { + /// Returns a new BinaryRange and ensures that start < end + /// and min start value is 1 + pub fn new(start: usize, end: usize) -> Result { + // start should be less than or equal to end + if start > end { + return Err(String::from("start value cannot be greater than end value")); + } + + // the minimum value for start should be 1 + // that way the length of the maximum length + // of the range is usize::MAX and not + // usize::MAX + 1 + if start < 1 { + return Err(String::from( + "minimum start value should be 1 to avoid len overflow", + )); + } + + Ok(Self { start, end }) + } + + /// Returns the len of the current range + pub fn len(&self) -> usize { + self.end - self.start + 1 + } + + /// Returns true when the len of the range is odd + pub fn odd(&self) -> bool { + (self.len() % 2) != 0 + } + + /// Determines if a value belongs to the left half or right half of a range + /// returns true for left and false for right + /// returns None if value is outside the range or range len is odd + pub fn which_half(&self, value: usize) -> Option { + // return None if value is not in the range + if value < self.start || value > self.end { + return None; + } + + // can't divide the range into equal halves + // when odd, so return None + if self.odd() { + return None; + } + + let half_size = self.len() / 2; + let second_half_start = self.start + half_size; + + if value >= second_half_start { + return Some(RIGHT); + } + + Some(LEFT) + } + + /// Returns a new range that only contains elements on the specified half + /// returns an error if range is not odd + pub fn get_half(&self, left: bool) -> Result { + if self.odd() { + return Err(String::from("cannot break odd range in half")); + } + + let half_size = self.len() / 2; + let second_half_start = self.start + half_size; + + Ok(if left { + Self { + start: self.start, + end: second_half_start - 1, + } + } else { + Self { + start: second_half_start, + end: self.end, + } + }) + } + + /// Returns a new range that increments the start value + /// also return the previous start value + /// returns an error if the operation will cause start to be larger than end + pub fn advance_range_start(&self) -> Result<(Self, usize), String> { + // check if operation will cause start > end + if self.start == self.end { + return Err(String::from( + "can't advance start when start is equal to end", + )); + } + + Ok(( + Self { + start: self.start + 1, + end: self.end, + }, + self.start, + )) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn cannot_create_invalid_range() { + let invalid_range = BinaryRange::new(5, 3); + assert!(invalid_range.is_err()); + } + + #[test] + fn can_get_range_len() { + let range = BinaryRange::new(2, 5).expect("should create range"); + assert_eq!(range.len(), 4); + assert!(!range.odd()); + + let range = BinaryRange::new(2, 2).expect("should create range"); + assert_eq!(range.len(), 1); + assert!(range.odd()); + } + + #[test] + fn can_determine_correct_half() { + let range = BinaryRange::new(3, 7).expect("should create range"); + assert_eq!(range.len(), 5); + assert!(range.odd()); + + // cannot determine half for value outside a range + assert!(range.which_half(1).is_none()); + assert!(range.which_half(7).is_none()); + + // cannot determine half when range is odd + assert!(range.which_half(3).is_none()); + + let range = BinaryRange::new(3, 6).expect("should create range"); + assert_eq!(range.len(), 4); + assert!(!range.odd()); + + assert_eq!(range.which_half(3), Some(LEFT)); + assert_eq!(range.which_half(4), Some(LEFT)); + assert_eq!(range.which_half(5), Some(RIGHT)); + assert_eq!(range.which_half(6), Some(RIGHT)); + } + + #[test] + fn can_advance_start_range() { + let range = BinaryRange::new(2, 5).expect("should create range"); + assert_eq!(range.len(), 4); + assert_eq!(range.start, 2); + + // advance the range + let (range, prev_start) = range.advance_range_start().expect("should advance range"); + assert_eq!(prev_start, 2); + assert_eq!(range.len(), 3); + assert_eq!(range.start, 3); + + // advance range + let (range, prev_start) = range.advance_range_start().expect("should advance range"); + assert_eq!(prev_start, 3); + assert_eq!(range.len(), 2); + assert_eq!(range.start, 4); + + // advance range + let (range, prev_start) = range.advance_range_start().expect("should advance range"); + assert_eq!(prev_start, 4); + assert_eq!(range.len(), 1); + assert_eq!(range.start, 5); + + // should not be allowed to advance the range anymore + let advance_result = range.advance_range_start(); + assert!(advance_result.is_err()); + } + + #[test] + fn can_break_range_into_halves() { + let range = BinaryRange::new(2, 10).expect("should create range"); + assert_eq!(range.len(), 9); + assert!(range.odd()); + assert!(range.get_half(LEFT).is_err()); + + let range = BinaryRange::new(2, 11).expect("should create range"); + assert_eq!(range.len(), 10); + assert!(!range.odd()); + + let left_range = range.get_half(LEFT).expect("should get sub range"); + assert_eq!(left_range.start, 2); + assert_eq!(left_range.end, 6); + + let right_range = range.get_half(RIGHT).expect("should get sub range"); + assert_eq!(right_range.start, 7); + assert_eq!(right_range.end, 11); + + // right_range is false, advance to make even + let (right_range, _prev) = right_range.advance_range_start().expect("should advance"); + let right_left_range = right_range.get_half(LEFT).expect("should get sub range"); + assert_eq!(right_left_range.len(), 2); + assert_eq!(right_left_range.start, 8); + assert_eq!(right_left_range.end, 9); + } +} diff --git a/merk/src/proofs/chunk/chunk.rs b/merk/src/proofs/chunk/chunk.rs new file mode 100644 index 000000000..4960c53f9 --- /dev/null +++ b/merk/src/proofs/chunk/chunk.rs @@ -0,0 +1,806 @@ +// MIT LICENSE +// +// Copyright (c) 2021 Dash Core Group +// +// Permission is hereby granted, free of charge, to any +// person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the +// Software without restriction, including without +// limitation the rights to use, copy, modify, merge, +// publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software +// is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice +// shall be included in all copies or substantial portions +// of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; +use grovedb_version::version::GroveVersion; + +// TODO: add copyright comment +use crate::proofs::{Node, Op, Tree}; +use crate::{ + proofs::{chunk::error::ChunkError, tree::execute}, + tree::{kv::ValueDefinedCostType, Fetch, RefWalker}, + CryptoHash, Error, +}; + +pub const LEFT: bool = true; +pub const RIGHT: bool = false; + +impl<'a, S> RefWalker<'a, S> +where + S: Fetch + Sized + Clone, +{ + /// Returns a chunk of a given depth from a RefWalker + pub fn create_chunk( + &mut self, + depth: usize, + grove_version: &GroveVersion, + ) -> Result, Error> { + // build the proof vector + let mut proof = vec![]; + + self.create_chunk_internal(&mut proof, depth, grove_version)?; + + Ok(proof) + } + + fn create_chunk_internal( + &mut self, + proof: &mut Vec, + remaining_depth: usize, + grove_version: &GroveVersion, + ) -> Result<(), Error> { + // at some point we will reach the depth + // here we need to put the node hash + if remaining_depth == 0 { + proof.push(Op::Push(self.to_hash_node().unwrap())); + return Ok(()); + } + + // traverse left + let has_left_child = self.tree().link(true).is_some(); + if has_left_child { + let mut left = self + .walk( + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap()? + .expect("confirmed is some"); + left.create_chunk_internal(proof, remaining_depth - 1, grove_version)?; + } + + // add current node's data + proof.push(Op::Push(self.to_kv_value_hash_feature_type_node())); + + if has_left_child { + proof.push(Op::Parent); + } + + // traverse right + if let Some(mut right) = self + .walk( + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap()? + { + right.create_chunk_internal(proof, remaining_depth - 1, grove_version)?; + + proof.push(Op::Child); + } + + Ok(()) + } + + /// Returns a chunk of a given depth after applying some traversal + /// instruction to the RefWalker + pub fn traverse_and_build_chunk( + &mut self, + instructions: &[bool], + depth: usize, + grove_version: &GroveVersion, + ) -> Result, Error> { + // base case + if instructions.is_empty() { + // we are at the desired node + return self.create_chunk(depth, grove_version); + } + + // link must exist + let has_link = self.tree().link(instructions[0]).is_some(); + if !has_link { + return Err(Error::ChunkingError(ChunkError::BadTraversalInstruction( + "no node found at given traversal instruction", + ))); + } + + // grab child + let mut child = self + .walk( + instructions[0], + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap()? + .expect("confirmed link exists so cannot be none"); + + // recurse on child + child.traverse_and_build_chunk(&instructions[1..], depth, grove_version) + } + + /// Returns the smallest amount of tree ops, that can convince + /// a verifier of the tree height + /// the generated subtree is of this form + /// kv_hash + /// / \ + /// kv_hash node_hash + /// / \ + /// kv_hash node_hash + /// . + /// . + /// . + pub fn generate_height_proof( + &mut self, + proof: &mut Vec, + grove_version: &GroveVersion, + ) -> CostResult<(), Error> { + // TODO: look into making height proofs more efficient + // they will always be used in the context of some + // existing chunk, we don't want to repeat nodes unnecessarily + let mut cost = OperationCost::default(); + + let maybe_left = cost_return_on_error!( + &mut cost, + self.walk( + LEFT, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + ); + let has_left_child = maybe_left.is_some(); + + // recurse to leftmost element + if let Some(mut left) = maybe_left { + cost_return_on_error!(&mut cost, left.generate_height_proof(proof, grove_version)) + } + + proof.push(Op::Push(self.to_kvhash_node())); + + if has_left_child { + proof.push(Op::Parent); + } + + if let Some(right) = self.tree().link(RIGHT) { + proof.push(Op::Push(Node::Hash(*right.hash()))); + proof.push(Op::Child); + } + + Ok(()).wrap_with_cost(cost) + } +} + +// TODO: add documentation +pub fn verify_height_proof(proof: Vec, expected_root_hash: CryptoHash) -> Result { + // todo: remove unwrap + let height_proof_tree = execute(proof.into_iter().map(Ok), false, |_| Ok(())).unwrap()?; + + // todo: deal with cost + // todo: deal with old chunk restoring error + if height_proof_tree.hash().unwrap() != expected_root_hash { + return Err(Error::OldChunkRestoringError( + "invalid height proof: root hash mismatch".to_string(), + )); + } + + verify_height_tree(&height_proof_tree) +} + +// TODO: add documentation +pub fn verify_height_tree(height_proof_tree: &Tree) -> Result { + return Ok(match height_proof_tree.child(LEFT) { + Some(child) => { + if !matches!(child.tree.node, Node::KVHash(..)) { + // todo deal with old chunk restoring error + return Err(Error::OldChunkRestoringError( + "Expected left nodes in height proofs to be kvhash nodes".to_string(), + )); + } + verify_height_tree(&child.tree)? + 1 + } + None => 1, + }); +} + +#[cfg(test)] +pub mod tests { + use ed::Encode; + use grovedb_version::version::GroveVersion; + + use crate::{ + proofs::{ + chunk::chunk::{verify_height_proof, LEFT, RIGHT}, + tree::execute, + Node, Op, + }, + test_utils::make_tree_seq_with_start_key, + tree::{kv::ValueDefinedCostType, RefWalker, TreeNode}, + PanicSource, TreeFeatureType, + }; + + fn build_tree_10_nodes() -> TreeNode { + let grove_version = GroveVersion::latest(); + // 3 + // / \ + // 1 7 + // / \ / \ + // 0 2 5 8 + // / \ \ + // 4 6 9 + make_tree_seq_with_start_key(10, [0; 8].to_vec(), grove_version) + } + + /// Traverses a tree to a certain node and returns the node hash of that + /// node + pub fn traverse_get_node_hash( + walker: &mut RefWalker, + traverse_instructions: &[bool], + grove_version: &GroveVersion, + ) -> Node { + traverse_and_apply( + walker, + traverse_instructions, + |walker| walker.to_hash_node().unwrap(), + grove_version, + ) + } + + /// Traverses a tree to a certain node and returns the kv_feature_type of + /// that node + pub fn traverse_get_kv_feature_type( + walker: &mut RefWalker, + traverse_instructions: &[bool], + grove_version: &GroveVersion, + ) -> Node { + traverse_and_apply( + walker, + traverse_instructions, + |walker| walker.to_kv_value_hash_feature_type_node(), + grove_version, + ) + } + /// Traverses a tree to a certain node and returns the kv_hash of + /// that node + pub fn traverse_get_kv_hash( + walker: &mut RefWalker, + traverse_instructions: &[bool], + grove_version: &GroveVersion, + ) -> Node { + traverse_and_apply( + walker, + traverse_instructions, + |walker| walker.to_kvhash_node(), + grove_version, + ) + } + + /// Traverses a tree to a certain node and returns the result of applying + /// some arbitrary function + pub fn traverse_and_apply( + walker: &mut RefWalker, + traverse_instructions: &[bool], + apply_fn: T, + grove_version: &GroveVersion, + ) -> Node + where + T: Fn(&mut RefWalker) -> Node, + { + if traverse_instructions.is_empty() { + return apply_fn(walker); + } + + let mut child = walker + .walk( + traverse_instructions[0], + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .unwrap() + .unwrap(); + traverse_and_apply( + &mut child, + &traverse_instructions[1..], + apply_fn, + grove_version, + ) + } + + #[test] + fn build_chunk_from_root_depth_0() { + let grove_version = GroveVersion::latest(); + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + // should return the node hash of the root node + let chunk = tree_walker + .create_chunk(0, grove_version) + .expect("should build chunk"); + assert_eq!(chunk.len(), 1); + assert_eq!( + chunk[0], + Op::Push(traverse_get_node_hash(&mut tree_walker, &[], grove_version)) + ); + + let computed_tree = execute(chunk.into_iter().map(Ok), true, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(computed_tree.hash().unwrap(), tree.hash().unwrap()); + } + + #[test] + fn build_chunk_from_root_depth_1() { + let grove_version = GroveVersion::latest(); + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + // build chunk for depth 1 + // expected: + // 3 + // / \ + // Hash(1) Hash(7) + let chunk = tree_walker + .create_chunk(1, grove_version) + .expect("should build chunk"); + assert_eq!(chunk.len(), 5); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT], + grove_version + )), + Op::Child + ] + ); + + let computed_tree = execute(chunk.into_iter().map(Ok), true, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(computed_tree.hash().unwrap(), tree.hash().unwrap()); + } + + #[test] + fn build_chunk_from_root_depth_3() { + let grove_version = GroveVersion::latest(); + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + // build chunk for depth 3 + // expected: + // 3 + // / \ + // 1 7 + // / \ / \ + // 0 2 5 8 + // / \ \ + // H(4) H(6) H(9) + let chunk = tree_walker + .create_chunk(3, grove_version) + .expect("should build chunk"); + assert_eq!(chunk.len(), 19); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, RIGHT], + grove_version + )), + Op::Child, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, LEFT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, LEFT, RIGHT], + grove_version + )), + Op::Child, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, RIGHT], + grove_version + )), + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, RIGHT, RIGHT], + grove_version + )), + Op::Child, + Op::Child, + Op::Child + ] + ); + + let computed_tree = execute(chunk.into_iter().map(Ok), true, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(computed_tree.hash().unwrap(), tree.hash().unwrap()); + } + + #[test] + fn build_chunk_from_root_depth_max_depth() { + let grove_version = GroveVersion::latest(); + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + // build chunk for entire tree (depth 4) + // 3 + // / \ + // 1 7 + // / \ / \ + // 0 2 5 8 + // / \ \ + // 4 6 9 + let chunk = tree_walker + .create_chunk(4, grove_version) + .expect("should build chunk"); + assert_eq!(chunk.len(), 19); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, RIGHT], + grove_version + )), + Op::Child, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT, RIGHT], + grove_version + )), + Op::Child, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, RIGHT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, RIGHT, RIGHT], + grove_version + )), + Op::Child, + Op::Child, + Op::Child + ] + ); + + let computed_tree = execute(chunk.into_iter().map(Ok), true, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(computed_tree.hash().unwrap(), tree.hash().unwrap()); + } + + #[test] + fn chunk_greater_than_max_should_equal_max_depth() { + let grove_version = GroveVersion::latest(); + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + // build chunk with depth greater than tree + // we should get the same result as building with the exact depth + let large_depth_chunk = tree_walker + .create_chunk(100, grove_version) + .expect("should build chunk"); + let exact_depth_chunk = tree_walker + .create_chunk(4, grove_version) + .expect("should build chunk"); + assert_eq!(large_depth_chunk, exact_depth_chunk); + + let tree_a = execute(large_depth_chunk.into_iter().map(Ok), true, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + let tree_b = execute(exact_depth_chunk.into_iter().map(Ok), true, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(tree_a.hash().unwrap(), tree_b.hash().unwrap()); + } + + #[test] + fn build_chunk_after_traversal_depth_2() { + let grove_version = GroveVersion::latest(); + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + // traverse to the right first then build chunk + // expected + // 7 + // / \ + // 5 8 + // / \ \ + // H(4) H(6) H(9) + + // right traversal + let chunk = tree_walker + .traverse_and_build_chunk(&[RIGHT], 2, grove_version) + .expect("should build chunk"); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, LEFT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, LEFT, RIGHT], + grove_version + )), + Op::Child, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, RIGHT], + grove_version + )), + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, RIGHT, RIGHT], + grove_version + )), + Op::Child, + Op::Child, + ] + ); + + // the hash of the tree computed from the chunk + // should be the same as the node_hash of the element + // on the right + let computed_tree = execute(chunk.into_iter().map(Ok), true, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!( + Node::Hash(computed_tree.hash().unwrap()), + traverse_get_node_hash(&mut tree_walker, &[RIGHT], grove_version) + ); + } + + #[test] + fn build_chunk_after_traversal_depth_1() { + let grove_version = GroveVersion::latest(); + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + // traverse with [right, left] and then build chunk of depth 1 + // expected + // 5 + // / \ + // H(4) H(6) + + // instruction traversal + let chunk = tree_walker + .traverse_and_build_chunk(&[RIGHT, LEFT], 1, grove_version) + .expect("should build chunk"); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, LEFT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, LEFT, RIGHT], + grove_version + )), + Op::Child, + ] + ); + + let computed_tree = execute(chunk.into_iter().map(Ok), true, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!( + Node::Hash(computed_tree.hash().unwrap()), + traverse_get_node_hash(&mut tree_walker, &[RIGHT, LEFT], grove_version) + ); + } + + #[test] + fn test_chunk_encoding() { + let chunk = vec![ + Op::Push(Node::Hash([0; 32])), + Op::Push(Node::KVValueHashFeatureType( + vec![1], + vec![2], + [0; 32], + TreeFeatureType::BasicMerkNode, + )), + ]; + let encoded_chunk = chunk.encode().expect("should encode"); + assert_eq!(encoded_chunk.len(), 33 + 39); + assert_eq!( + encoded_chunk.len(), + chunk.encoding_length().expect("should get encoding length") + ); + } + + #[test] + fn test_height_proof_generation() { + let grove_version = GroveVersion::latest(); + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + let mut height_proof = vec![]; + tree_walker + .generate_height_proof(&mut height_proof, grove_version) + .unwrap() + .expect("should generate height proof"); + + assert_eq!(height_proof.len(), 9); + assert_eq!( + height_proof, + vec![ + Op::Push(traverse_get_kv_hash( + &mut tree_walker, + &[LEFT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_hash( + &mut tree_walker, + &[LEFT], + grove_version + )), + Op::Parent, + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[LEFT, RIGHT], + grove_version + )), + Op::Child, + Op::Push(traverse_get_kv_hash(&mut tree_walker, &[], grove_version)), + Op::Parent, + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT], + grove_version + )), + Op::Child, + ] + ); + } + + #[test] + fn test_height_proof_verification() { + let grove_version = GroveVersion::latest(); + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + let mut height_proof = vec![]; + tree_walker + .generate_height_proof(&mut height_proof, grove_version) + .unwrap() + .expect("should generate height proof"); + + let verified_height = verify_height_proof(height_proof, tree.hash().unwrap()) + .expect("should verify height proof"); + + // doesn't represent the max height of the tree + assert_eq!(verified_height, 3); + } +} diff --git a/merk/src/proofs/chunk/chunk_op.rs b/merk/src/proofs/chunk/chunk_op.rs new file mode 100644 index 000000000..6d0d08cdb --- /dev/null +++ b/merk/src/proofs/chunk/chunk_op.rs @@ -0,0 +1,169 @@ +// MIT LICENSE +// +// Copyright (c) 2021 Dash Core Group +// +// Permission is hereby granted, free of charge, to any +// person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the +// Software without restriction, including without +// limitation the rights to use, copy, modify, merge, +// publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software +// is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice +// shall be included in all copies or substantial portions +// of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::io::{Read, Write}; + +use ed::{Decode, Encode}; +use integer_encoding::{VarInt, VarIntReader}; + +use crate::proofs::Op; + +/// Represents the chunk generated from a given starting chunk id +#[derive(PartialEq, Debug)] +pub enum ChunkOp { + ChunkId(Vec), + Chunk(Vec), +} + +impl Encode for ChunkOp { + fn encode_into(&self, dest: &mut W) -> ed::Result<()> { + match self { + Self::ChunkId(instruction) => { + // write the marker then the len + let _ = dest.write_all(&[0_u8]); + dest.write_all(instruction.len().encode_var_vec().as_slice())?; + let instruction_as_binary: Vec = instruction + .iter() + .map(|v| if *v { 1_u8 } else { 0_u8 }) + .collect(); + dest.write_all(&instruction_as_binary)?; + } + Self::Chunk(chunk) => { + let _ = dest.write_all(&[1_u8]); + // chunk len represents the number of ops not the total encoding len of ops + dest.write_all(chunk.len().encode_var_vec().as_slice())?; + for op in chunk { + dest.write_all(&op.encode()?)?; + } + } + } + + Ok(()) + } + + fn encoding_length(&self) -> ed::Result { + Ok(match self { + Self::ChunkId(instruction) => { + 1 + instruction.len().encode_var_vec().len() + instruction.len() + } + Self::Chunk(chunk) => { + 1 + chunk.len().encode_var_vec().len() + chunk.encoding_length()? + } + }) + } +} + +impl Decode for ChunkOp { + fn decode(input: R) -> ed::Result { + let mut chunk_op = ChunkOp::ChunkId(vec![]); + Self::decode_into(&mut chunk_op, input)?; + Ok(chunk_op) + } + + fn decode_into(&mut self, mut input: R) -> ed::Result<()> { + let mut marker = [0_u8; 1]; + input.read_exact(&mut marker)?; + + match marker[0] { + 0 => { + let length = input.read_varint()?; + let mut instruction_as_binary = vec![0_u8; length]; + input.read_exact(&mut instruction_as_binary)?; + + let instruction: Vec = instruction_as_binary + .into_iter() + .map(|v| v == 1_u8) + .collect(); + + *self = ChunkOp::ChunkId(instruction); + } + 1 => { + let ops_length = input.read_varint()?; + let mut chunk = Vec::with_capacity(ops_length); + + for _ in 0..ops_length { + let op = Decode::decode(&mut input)?; + chunk.push(op); + } + + *self = ChunkOp::Chunk(chunk); + } + _ => return Err(ed::Error::UnexpectedByte(marker[0])), + } + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use ed::{Decode, Encode}; + + use crate::proofs::{ + chunk::{ + chunk::{LEFT, RIGHT}, + chunk_op::ChunkOp, + }, + Node, Op, + }; + + #[test] + fn test_chunk_op_encoding() { + let chunk_op = ChunkOp::ChunkId(vec![LEFT, RIGHT]); + let encoded_chunk_op = chunk_op.encode().unwrap(); + assert_eq!(encoded_chunk_op, vec![0, 2, 1, 0]); + assert_eq!(encoded_chunk_op.len(), chunk_op.encoding_length().unwrap()); + + let chunk_op = ChunkOp::Chunk(vec![Op::Push(Node::Hash([0; 32])), Op::Child]); + let encoded_chunk_op = chunk_op.encode().unwrap(); + let mut expected_encoding = vec![1, 2]; + expected_encoding.extend(Op::Push(Node::Hash([0; 32])).encode().unwrap()); + expected_encoding.extend(Op::Child.encode().unwrap()); + assert_eq!(encoded_chunk_op, expected_encoding); + assert_eq!(encoded_chunk_op.len(), chunk_op.encoding_length().unwrap()); + } + + #[test] + fn test_chunk_op_decoding() { + let encoded_chunk_op = vec![0, 3, 1, 0, 1]; + let decoded_chunk_op = ChunkOp::decode(encoded_chunk_op.as_slice()).unwrap(); + assert_eq!(decoded_chunk_op, ChunkOp::ChunkId(vec![LEFT, RIGHT, LEFT])); + + let mut encoded_chunk_op = vec![1, 2]; + encoded_chunk_op.extend(Op::Push(Node::Hash([1; 32])).encode().unwrap()); + encoded_chunk_op.extend(Op::Push(Node::KV(vec![1], vec![2])).encode().unwrap()); + let decoded_chunk_op = ChunkOp::decode(encoded_chunk_op.as_slice()).unwrap(); + assert_eq!( + decoded_chunk_op, + ChunkOp::Chunk(vec![ + Op::Push(Node::Hash([1; 32])), + Op::Push(Node::KV(vec![1], vec![2])) + ]) + ); + } +} diff --git a/merk/src/proofs/chunk/error.rs b/merk/src/proofs/chunk/error.rs new file mode 100644 index 000000000..bd482666c --- /dev/null +++ b/merk/src/proofs/chunk/error.rs @@ -0,0 +1,79 @@ +// MIT LICENSE +// +// Copyright (c) 2021 Dash Core Group +// +// Permission is hereby granted, free of charge, to any +// person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the +// Software without restriction, including without +// limitation the rights to use, copy, modify, merge, +// publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software +// is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice +// shall be included in all copies or substantial portions +// of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +#[derive(Debug, thiserror::Error)] +/// Chunk related errors +pub enum ChunkError { + /// Limit too small for first chunk, cannot make progress + #[error("overflow error {0}")] + LimitTooSmall(&'static str), + + /// Chunk index out of bounds + #[error("chunk index out of bounds: {0}")] + OutOfBounds(&'static str), + + /// Empty tree contains no chunks + #[error("chunk from empty tree: {0}")] + EmptyTree(&'static str), + + /// Invalid traversal instruction (points to no element) + #[error("traversal instruction invalid {0}")] + BadTraversalInstruction(&'static str), + + /// Expected ChunkId when parsing chunk ops + #[error("expected chunk id when parsing chunk op")] + ExpectedChunkId, + + /// Expected Chunk when parsing chunk ops + #[error("expected chunk when parsing chunk op")] + ExpectedChunk, + + // Restoration Errors + /// Chunk restoration starts from the root chunk, this lead to a set of + /// root hash values to verify other chunks .... + /// Hence before you can verify a child you need to have verified it's + /// parent. + #[error("unexpected chunk: cannot verify chunk because verification hash is not in memory")] + UnexpectedChunk, + + /// Invalid chunk proof when verifying chunk + #[error("invalid chunk proof: {0}")] + InvalidChunkProof(&'static str), + + /// Invalid multi chunk + #[error("invalid multi chunk: {0}")] + InvalidMultiChunk(&'static str), + + #[error("called finalize too early still expecting chunks")] + RestorationNotComplete, + + /// Internal error, this should never surface + /// if it does, it means wrong assumption in code + #[error("internal error {0}")] + InternalError(&'static str), +} diff --git a/merk/src/proofs/chunk/util.rs b/merk/src/proofs/chunk/util.rs new file mode 100644 index 000000000..3cbc09424 --- /dev/null +++ b/merk/src/proofs/chunk/util.rs @@ -0,0 +1,683 @@ +//! Collection of state independent algorithms needed for facilitate chunk +//! production and restoration + +use std::io::Write; + +// TODO: figure out better nomenclature +use crate::{proofs::chunk::binary_range::BinaryRange, Error}; +use crate::{ + proofs::chunk::{ + chunk::{LEFT, RIGHT}, + error::{ChunkError, ChunkError::BadTraversalInstruction}, + }, + Error::InternalError, +}; + +/// Represents the height as a linear combination of 3 amd 2 +/// of the form 3x + 2y +/// this breaks the tree into layers of height 3 or 2 +/// the minimum chunk height is 2, so if tree height is less than 2 +/// we just return a single layer of height 2 +fn chunk_height_per_layer(height: usize) -> Vec { + let mut two_count = 0; + let mut three_count = height / 3; + + if height == 0 { + return vec![]; + } + + // minimum chunk height is 2, if tree height is less than 2 + // return a single layer with chunk height 2 + if height < 2 { + two_count = 1; + } else { + match height % 3 { + 0 => { /* do nothing */ } + 1 => { + // reduce the three_count by 1 + // so the remainder becomes 3 + 1 + // which is equivalent to 2 + 2 + three_count -= 1; + two_count += 2; + } + 2 => { + // remainder is a factor of 2 + // just increase the two_count + two_count += 1; + } + // this is unreachable because height is a positive number + // remainder set after diving by 3 is fixed to [0,1,2] + _ => unreachable!(""), + } + } + + let mut layer_heights = vec![3; three_count]; + layer_heights.extend(vec![2; two_count]); + + layer_heights +} + +/// Return the layer a chunk subtree belongs to +pub fn chunk_layer(height: usize, chunk_id: usize) -> Result { + // remaining depth tells us how deep in the tree the specified chunk is + let mut remaining_depth = generate_traversal_instruction(height, chunk_id)?.len() + 1; + let layer_heights = chunk_height_per_layer(height); + + let mut layer = 1; + + while remaining_depth > 1 { + // remaining depth will always larger than the next layer height + // if it is not already 1 + // this is because a every chunk always starts at a layer boundary + // and remaining depth points to a chunk + debug_assert!(remaining_depth > layer_heights[layer - 1]); + + remaining_depth -= layer_heights[layer - 1]; + layer += 1; + } + + Ok(layer - 1) +} + +/// Return the depth of a chunk given the height +/// and chunk id +pub fn chunk_height(height: usize, chunk_id: usize) -> Result { + let chunk_layer = chunk_layer(height, chunk_id)?; + let layer_heights = chunk_height_per_layer(height); + + Ok(layer_heights[chunk_layer]) +} + +/// Given a tree of height h, return the number of chunks needed +/// to completely represent the tree +pub fn number_of_chunks(height: usize) -> usize { + let layer_heights = chunk_height_per_layer(height); + number_of_chunks_internal(layer_heights) +} + +/// Locates the subtree represented by a chunk id and returns +/// the number of chunks under that subtree +pub fn number_of_chunks_under_chunk_id(height: usize, chunk_id: usize) -> Result { + let chunk_layer = chunk_layer(height, chunk_id)?; + let layer_heights = chunk_height_per_layer(height); + + // we only care about the layer heights after the chunk layer + // as we are getting the number of chunks under a subtree and not + // the entire tree of height h + Ok(number_of_chunks_internal( + layer_heights[chunk_layer..].to_vec(), + )) +} + +/// Given the heights of a tree per layer, return the total number of chunks in +/// that tree +fn number_of_chunks_internal(layer_heights: Vec) -> usize { + // a layer consists of 1 or more subtrees of a given height + // here we figure out number of exit nodes from a single subtree for each layer + let mut single_subtree_exits_per_layer = layer_heights + .into_iter() + .map(exit_node_count) + .collect::>(); + + // we don't care about exit nodes from the last layer + // as that points to non-existent subtrees + single_subtree_exits_per_layer.pop(); + + // now we get the total exit nodes per layer + // by multiplying the exits per subtree with the number of subtrees on that + // layer + let mut chunk_counts_per_layer = vec![1]; + for i in 0..single_subtree_exits_per_layer.len() { + let previous_layer_chunk_count = chunk_counts_per_layer[i]; + let current_layer_chunk_count = + previous_layer_chunk_count * single_subtree_exits_per_layer[i]; + chunk_counts_per_layer.push(current_layer_chunk_count); + } + + chunk_counts_per_layer.into_iter().sum() +} + +/// Calculates the maximum number of exit nodes for a tree of height h. +fn exit_node_count(height: usize) -> usize { + 2_usize.pow(height as u32) +} + +/// Generate instruction for traversing to a given chunk index in a binary tree +pub fn generate_traversal_instruction( + height: usize, + chunk_index: usize, +) -> Result, Error> { + let mut instructions = vec![]; + + let total_chunk_count = number_of_chunks(height); + + // out of bounds + if chunk_index < 1 || chunk_index > total_chunk_count { + return Err(Error::ChunkingError(ChunkError::OutOfBounds( + "chunk id out of bounds", + ))); + } + + let mut chunk_range = BinaryRange::new(1, total_chunk_count).map_err(|_| { + Error::ChunkingError(ChunkError::InternalError( + "failed to initialize chunk range", + )) + })?; + + // total chunk count will always be odd because + // from the initial chunk (1) we have an even number of + // exit nodes, and they have even numbers of exit nodes ... + // so total_chunk_count = 1 + some_even_number = odd + debug_assert!(chunk_range.odd()); + + // bisect and reduce the chunk range until we get to the desired chunk + // we keep track of every left right decision we make + while chunk_range.len() > 1 { + if chunk_range.odd() { + // checks if we last decision we made got us to the desired chunk id + let advance_result = chunk_range.advance_range_start().unwrap(); + chunk_range = advance_result.0; + if advance_result.1 == chunk_index { + return Ok(instructions); + } + } else { + // for even chunk range, we are at the decision point + // we can either go left or right + // we first check which half the desired chunk is + // then follow that path + let chunk_id_half = chunk_range + .which_half(chunk_index) + .expect("chunk id must exist in range"); + instructions.push(chunk_id_half); + chunk_range = chunk_range + .get_half(chunk_id_half) + .expect("confirmed range is not odd"); + } + } + + // chunk range len is exactly 1 + // this must be the desired chunk id + // return instructions that got us here + Ok(instructions) +} + +/// Determine the chunk index given the traversal instruction and the max height +/// of the tree +pub fn chunk_index_from_traversal_instruction( + traversal_instruction: &[bool], + height: usize, +) -> Result { + // empty traversal instruction points to the first chunk + if traversal_instruction.is_empty() { + return Ok(1); + } + + let mut chunk_count = number_of_chunks(height); + let mut current_chunk_index = 1; + + let mut layer_heights = chunk_height_per_layer(height); + let last_layer_height = layer_heights.pop().expect("confirmed not empty"); + + // traversal instructions should only point to the root node of chunks (chunk + // boundaries) the layer heights represent the height of each chunk layer + // the last chunk layer is at height = total_height - last_chunk_height + 1 + // traversal instructions require 1 less than height to address it + // e.g. height 1 is represented by [] - len of 0 + // height 2 is represented by [left] or [right] len of 1 + // therefore last chunk root node is address with total_height - + // last_chunk_height + if traversal_instruction.len() > height - last_layer_height { + return Err(Error::ChunkingError(BadTraversalInstruction( + "traversal instruction should not address nodes past the root of the last layer chunks", + ))); + } + + // verify that the traversal instruction points to a chunk boundary + let mut traversal_length = traversal_instruction.len(); + let mut relevant_layer_heights = vec![]; + for layer_height in layer_heights { + // the traversal_length should be a perfect sum of a subset of the layer_height + // if the traversal_length is not 0, it should be larger than or equal to the + // next layer height. + if traversal_length < layer_height { + return Err(Error::ChunkingError(BadTraversalInstruction( + "traversal instruction should point to a chunk boundary", + ))); + } + + traversal_length -= layer_height; + relevant_layer_heights.push(layer_height); + + if traversal_length == 0 { + break; + } + } + + // take layer_height instructions and determine the updated chunk id + let mut start_index = 0; + for layer_height in relevant_layer_heights { + let end_index = start_index + layer_height; + let subset_instructions = &traversal_instruction[start_index..end_index]; + + // offset multiplier determines what subchunk we are on based on the given + // instruction offset multiplier just converts the binary instruction to + // decimal, taking left as 0 and right as 0 i.e [left, left, left] = 0 + // means we are at subchunk 0 + let mut offset_multiplier = 0; + for (i, instruction) in subset_instructions.iter().enumerate() { + offset_multiplier += 2_usize.pow((subset_instructions.len() - i - 1) as u32) + * (1 - *instruction as usize); + } + + if chunk_count % 2 != 0 { + // remove the current chunk from the chunk count + chunk_count -= 1; + } + + chunk_count /= exit_node_count(layer_height); + + current_chunk_index = current_chunk_index + offset_multiplier * chunk_count + 1; + + start_index = end_index; + } + + Ok(current_chunk_index) +} + +/// Determine the chunk index given the traversal instruction and the max height +/// of the tree. This can recover from traversal instructions not pointing to a +/// chunk boundary, in such a case, it backtracks until it hits a chunk +/// boundary. +pub fn chunk_index_from_traversal_instruction_with_recovery( + traversal_instruction: &[bool], + height: usize, +) -> Result { + let chunk_index_result = chunk_index_from_traversal_instruction(traversal_instruction, height); + if chunk_index_result.is_err() { + return chunk_index_from_traversal_instruction_with_recovery( + &traversal_instruction[0..traversal_instruction.len() - 1], + height, + ); + } + chunk_index_result +} + +/// Generate instruction for traversing to a given chunk index in a binary tree, +/// returns vec bytes representation +pub fn generate_traversal_instruction_as_vec_bytes( + height: usize, + chunk_index: usize, +) -> Result, Error> { + let instruction = generate_traversal_instruction(height, chunk_index)?; + Ok(traversal_instruction_as_vec_bytes(&instruction)) +} + +/// Convert traversal instruction to bytes vec +/// 1 represents left (true) +/// 0 represents right (false) +pub fn traversal_instruction_as_vec_bytes(instruction: &[bool]) -> Vec { + instruction + .iter() + .map(|v| if *v { 1u8 } else { 0u8 }) + .collect() +} + +/// Converts a vec bytes that represents a traversal instruction +/// to a vec of bool, true = left and false = right +pub fn vec_bytes_as_traversal_instruction( + instruction_vec_bytes: &[u8], +) -> Result, Error> { + instruction_vec_bytes + .iter() + .map(|byte| match byte { + 1u8 => Ok(LEFT), + 0u8 => Ok(RIGHT), + _ => Err(Error::ChunkingError(ChunkError::BadTraversalInstruction( + "failed to parse instruction vec bytes", + ))), + }) + .collect() +} + +pub fn write_to_vec(dest: &mut W, value: &[u8]) -> Result<(), Error> { + dest.write_all(value) + .map_err(|_e| InternalError("failed to write to vector")) +} + +#[cfg(test)] +mod test { + + use super::*; + use crate::proofs::chunk::chunk::{LEFT, RIGHT}; + + #[test] + fn test_chunk_height_per_layer() { + let layer_heights = chunk_height_per_layer(10); + assert_eq!(layer_heights.iter().sum::(), 10); + assert_eq!(layer_heights, [3, 3, 2, 2]); + + let layer_heights = chunk_height_per_layer(45); + assert_eq!(layer_heights.iter().sum::(), 45); + assert_eq!(layer_heights, [3; 15]); + + let layer_heights = chunk_height_per_layer(2); + assert_eq!(layer_heights.iter().sum::(), 2); + assert_eq!(layer_heights, [2]); + + // height less than 2 + let layer_heights = chunk_height_per_layer(1); + assert_eq!(layer_heights.iter().sum::(), 2); + assert_eq!(layer_heights, [2]); + + let layer_heights = chunk_height_per_layer(0); + assert_eq!(layer_heights.iter().sum::(), 0); + assert_eq!(layer_heights, Vec::::new()); + } + + #[test] + fn test_exit_node_count() { + // tree with just one node has 2 exit nodes + assert_eq!(exit_node_count(1), 2); + + // tree with height 2 has 4 exit nodes + assert_eq!(exit_node_count(2), 4); + + // tree with height 6 has 64 exit nodes + assert_eq!(exit_node_count(6), 64); + } + + #[test] + fn test_number_of_chunks() { + // given a chunk of height less than 3 chunk count should be 1 + assert_eq!(number_of_chunks(1), 1); + assert_eq!(number_of_chunks(2), 1); + + // tree with height 4 should have 5 chunks + // we split the tree into 2 layers of chunk height 2 each + // first layer contains just one chunk (1), but has 4 exit nodes + // hence total chunk count = 1 + 4 = 5 + assert_eq!(number_of_chunks(4), 5); + + // tree with height 6 should have 9 chunks + // will be split into two layers of chunk height 3 = [3,3] + // first chunk takes 1, has 2^3 = 8 exit nodes + // total chunks = 1 + 8 = 9 + assert_eq!(number_of_chunks(6), 9); + + // tree with height 10 should have 341 chunks + // will be split into 5 layers = [3, 3, 2, 2] + // first layer has just 1 chunk, exit nodes = 2^3 = 8 + // second layer has 4 chunks, exit nodes = 2^3 * 8 = 64 + // third layer has 16 chunks, exit nodes = 2^2 * 64 = 256 + // fourth layer has 256 chunks + // total chunks = 1 + 8 + 64 + 256 = 329 chunks + assert_eq!(number_of_chunks(10), 329); + } + + #[test] + fn test_number_of_chunks_under_chunk_id() { + // tree with height less than 3 should have just 1 chunk + assert_eq!(number_of_chunks_under_chunk_id(1, 1).unwrap(), 1); + assert_eq!(number_of_chunks_under_chunk_id(2, 1).unwrap(), 1); + + // asking for chunk out of bounds should return error + assert!(number_of_chunks_under_chunk_id(1, 3).is_err()); + + // tree with height 4 should have 5 chunks at chunk id 1 + // but 1 chunk at id 2 - 5 + assert_eq!(number_of_chunks_under_chunk_id(4, 1).unwrap(), 5); + assert_eq!(number_of_chunks_under_chunk_id(4, 2).unwrap(), 1); + assert_eq!(number_of_chunks_under_chunk_id(4, 3).unwrap(), 1); + assert_eq!(number_of_chunks_under_chunk_id(4, 4).unwrap(), 1); + assert_eq!(number_of_chunks_under_chunk_id(4, 5).unwrap(), 1); + + // tree with height 10 should have 329 chunks + // layer_heights = [3, 3, 2, 2] + // chunk_id 1 = 329 + // chunk_id 2 = 41 i.e (329 - 1) / 2^3 + // chunk_id 3 = 5 i.e (41 - 1) / 2^3 + // chunk_id 4 = 1 i.e (5 - 1) / 2^2 + // chunk_id 5 = 1 on the same layer as 4 + // chunk_id 43 = 41 as chunk 43 should wrap back to the same layer as chunk_id 2 + // chunk_id 44 = mirrors chunk_id 3 + // chunk_id 45 = mirrors chunk_id 4 + // chunk_id 46 = mirrors chunk_id 5 + assert_eq!(number_of_chunks_under_chunk_id(10, 1).unwrap(), 329); + assert_eq!(number_of_chunks_under_chunk_id(10, 2).unwrap(), 41); + assert_eq!(number_of_chunks_under_chunk_id(10, 3).unwrap(), 5); + assert_eq!(number_of_chunks_under_chunk_id(10, 4).unwrap(), 1); + assert_eq!(number_of_chunks_under_chunk_id(10, 5).unwrap(), 1); + assert_eq!(number_of_chunks_under_chunk_id(10, 43).unwrap(), 41); + assert_eq!(number_of_chunks_under_chunk_id(10, 44).unwrap(), 5); + assert_eq!(number_of_chunks_under_chunk_id(10, 45).unwrap(), 1); + assert_eq!(number_of_chunks_under_chunk_id(10, 46).unwrap(), 1); + } + + #[test] + fn test_traversal_instruction_generation() { + // 3 + // / \ + // 1 7 + // / \ / \ + // 0 2 5 8 + // / \ \ + // 4 6 9 + // height: 4 + // layer_height: 3, 3 + // 3 + // / \ + // 1 7 + // / \ / \ + // 0 2 5 8 + // ............................ + // / \ \ + // 4 6 9 + // 5 chunks + // chunk 1 entry - 3 + // chunk 2 entry - 0 + // chunk 3 entry - 2 + // chunk 4 entry - 5 + // chunk 5 entry - 8 + + // chunk 1 entry - 3 is at the top of the tree so empty instruction set + let instruction = + generate_traversal_instruction(4, 1).expect("should generate traversal instruction"); + let empty_instruction: &[bool] = &[]; + assert_eq!(instruction, empty_instruction); + + // chunk 2 entry - 0 + // go left twice from root i.e 3 left -> 1 left -> 0 + let instruction = + generate_traversal_instruction(4, 2).expect("should generate traversal instruction"); + assert_eq!(instruction, &[LEFT, LEFT]); + + // chunk 3 entry - 2 + // go left then right from root i.e 3 left -> 1 right -> 2 + let instruction = + generate_traversal_instruction(4, 3).expect("should generate traversal instruction"); + assert_eq!(instruction, &[LEFT, RIGHT]); + + // chunk 4 entry - 5 + // go right then left i.e 3 right -> 7 left -> 5 + let instruction = + generate_traversal_instruction(4, 4).expect("should generate traversal instruction"); + assert_eq!(instruction, &[RIGHT, LEFT]); + + // chunk 5 entry - 8 + // go right twice i.e 3 right -> 7 right -> 8 + let instruction = + generate_traversal_instruction(4, 5).expect("should generate traversal instruction"); + assert_eq!(instruction, &[RIGHT, RIGHT]); + + // out of bound tests + assert!(generate_traversal_instruction(4, 6).is_err()); + assert!(generate_traversal_instruction(4, 0).is_err()); + } + + #[test] + fn test_chunk_height() { + // tree of height 6 + // all chunks have the same height + // since layer height = [3,3] + // we have 9 chunks in a tree of this height + for i in 1..=9 { + assert_eq!(chunk_height(6, i).unwrap(), 3); + } + + // tree of height 5 + // layer_height = [3, 2] + // we have 9 chunks, just the first chunk is of height 3 + // the rest are of height 2 + assert_eq!(chunk_height(5, 1).unwrap(), 3); + for i in 2..=9 { + assert_eq!(chunk_height(5, i).unwrap(), 2); + } + + // tree of height 10 + // layer_height = [3, 3, 2, 2] + // just going to check chunk 1 - 5 + assert_eq!(chunk_height(10, 1).unwrap(), 3); + assert_eq!(chunk_height(10, 2).unwrap(), 3); + assert_eq!(chunk_height(10, 3).unwrap(), 2); + assert_eq!(chunk_height(10, 4).unwrap(), 2); + assert_eq!(chunk_height(10, 5).unwrap(), 2); + } + + #[test] + fn test_traversal_instruction_as_string() { + assert_eq!(traversal_instruction_as_vec_bytes(&[]), Vec::::new()); + assert_eq!(traversal_instruction_as_vec_bytes(&[LEFT]), vec![1u8]); + assert_eq!(traversal_instruction_as_vec_bytes(&[RIGHT]), vec![0u8]); + assert_eq!( + traversal_instruction_as_vec_bytes(&[RIGHT, LEFT, LEFT, RIGHT]), + vec![0u8, 1u8, 1u8, 0u8] + ); + } + + #[test] + fn test_instruction_string_to_traversal_instruction() { + assert_eq!( + vec_bytes_as_traversal_instruction(&[1u8]).unwrap(), + vec![LEFT] + ); + assert_eq!( + vec_bytes_as_traversal_instruction(&[0u8]).unwrap(), + vec![RIGHT] + ); + assert_eq!( + vec_bytes_as_traversal_instruction(&[0u8, 0u8, 1u8]).unwrap(), + vec![RIGHT, RIGHT, LEFT] + ); + assert!(vec_bytes_as_traversal_instruction(&[0u8, 0u8, 2u8]).is_err()); + assert_eq!( + vec_bytes_as_traversal_instruction(&[]).unwrap(), + Vec::::new() + ); + } + + #[test] + fn test_chunk_id_from_traversal_instruction() { + // tree of height 4 + let traversal_instruction = generate_traversal_instruction(4, 1).unwrap(); + assert_eq!( + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 4).unwrap(), + 1 + ); + let traversal_instruction = generate_traversal_instruction(4, 2).unwrap(); + assert_eq!( + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 4).unwrap(), + 2 + ); + let traversal_instruction = generate_traversal_instruction(4, 3).unwrap(); + assert_eq!( + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 4).unwrap(), + 3 + ); + let traversal_instruction = generate_traversal_instruction(4, 4).unwrap(); + assert_eq!( + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 4).unwrap(), + 4 + ); + + // tree of height 6 + let traversal_instruction = generate_traversal_instruction(6, 1).unwrap(); + assert_eq!( + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 1 + ); + let traversal_instruction = generate_traversal_instruction(6, 2).unwrap(); + assert_eq!( + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 2 + ); + let traversal_instruction = generate_traversal_instruction(6, 3).unwrap(); + assert_eq!( + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 3 + ); + let traversal_instruction = generate_traversal_instruction(6, 4).unwrap(); + assert_eq!( + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 4 + ); + let traversal_instruction = generate_traversal_instruction(6, 5).unwrap(); + assert_eq!( + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 5 + ); + let traversal_instruction = generate_traversal_instruction(6, 6).unwrap(); + assert_eq!( + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 6 + ); + let traversal_instruction = generate_traversal_instruction(6, 7).unwrap(); + assert_eq!( + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 7 + ); + let traversal_instruction = generate_traversal_instruction(6, 8).unwrap(); + assert_eq!( + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 8 + ); + let traversal_instruction = generate_traversal_instruction(6, 9).unwrap(); + assert_eq!( + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 9 + ); + } + + #[test] + fn test_chunk_id_from_traversal_instruction_with_recovery() { + // tree of height 5 + // layer heights = [3, 2] + // first chunk boundary is at instruction len 0 e.g. [] + // second chunk boundary is at instruction len 3 e.g. [left, left, left] + // anything outside of this should return an error with regular chunk_id + // function with recovery we expect this to backtrack to the last chunk + // boundary e.g. [left] should backtrack to [] + // [left, left, right, left] should backtrack to [left, left, right] + assert!(chunk_index_from_traversal_instruction(&[LEFT], 5).is_err()); + assert_eq!( + chunk_index_from_traversal_instruction_with_recovery(&[LEFT], 5).unwrap(), + 1 + ); + assert_eq!( + chunk_index_from_traversal_instruction_with_recovery(&[LEFT, LEFT], 5).unwrap(), + 1 + ); + assert_eq!( + chunk_index_from_traversal_instruction_with_recovery(&[LEFT, LEFT, RIGHT], 5).unwrap(), + 3 + ); + assert_eq!( + chunk_index_from_traversal_instruction_with_recovery(&[LEFT, LEFT, RIGHT, LEFT], 5) + .unwrap(), + 3 + ); + assert_eq!( + chunk_index_from_traversal_instruction_with_recovery(&[LEFT; 50], 5).unwrap(), + 2 + ); + } +} diff --git a/merk/src/proofs/encoding.rs b/merk/src/proofs/encoding.rs index b0e828338..eb1c055b2 100644 --- a/merk/src/proofs/encoding.rs +++ b/merk/src/proofs/encoding.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Proofs encoding #[cfg(any(feature = "full", feature = "verify"))] @@ -464,8 +436,9 @@ impl<'a> Iterator for Decoder<'a> { mod test { use super::super::{Node, Op}; use crate::{ + proofs::Decoder, tree::HASH_LENGTH, - TreeFeatureType::{BasicMerk, SummedMerk}, + TreeFeatureType::{BasicMerkNode, SummedMerkNode}, }; #[test] @@ -567,7 +540,7 @@ mod test { vec![1, 2, 3], vec![4, 5, 6], [0; 32], - BasicMerk, + BasicMerkNode, )); assert_eq!(op.encoding_length(), 43); @@ -585,7 +558,7 @@ mod test { vec![1, 2, 3], vec![4, 5, 6], [0; 32], - SummedMerk(6), + SummedMerkNode(6), )); assert_eq!(op.encoding_length(), 44); @@ -683,7 +656,7 @@ mod test { vec![1, 2, 3], vec![4, 5, 6], [0; 32], - BasicMerk, + BasicMerkNode, )); assert_eq!(op.encoding_length(), 43); @@ -701,7 +674,7 @@ mod test { vec![1, 2, 3], vec![4, 5, 6], [0; 32], - SummedMerk(5), + SummedMerkNode(5), )); assert_eq!(op.encoding_length(), 44); @@ -860,7 +833,7 @@ mod test { vec![1, 2, 3], vec![4, 5, 6], [0; 32], - BasicMerk + BasicMerkNode )) ); @@ -875,7 +848,7 @@ mod test { vec![1, 2, 3], vec![4, 5, 6], [0; 32], - SummedMerk(6) + SummedMerkNode(6) )) ); } @@ -960,7 +933,7 @@ mod test { vec![1, 2, 3], vec![4, 5, 6], [0; 32], - BasicMerk + BasicMerkNode )) ); @@ -975,7 +948,7 @@ mod test { vec![1, 2, 3], vec![4, 5, 6], [0; 32], - SummedMerk(6) + SummedMerkNode(6) )) ); } @@ -994,6 +967,24 @@ mod test { assert_eq!(op, Op::Child); } + #[test] + fn decode_multiple_child() { + let bytes = [0x11, 0x11, 0x11, 0x10]; + let decoder = Decoder { + bytes: &bytes, + offset: 0, + }; + + let mut vecop = vec![]; + for op in decoder { + match op { + Ok(op) => vecop.push(op), + Err(e) => eprintln!("Error decoding: {:?}", e), + } + } + assert_eq!(vecop, vec![Op::Child, Op::Child, Op::Child, Op::Parent]); + } + #[test] fn decode_parent_inverted() { let bytes = [0x12]; diff --git a/merk/src/proofs/mod.rs b/merk/src/proofs/mod.rs index 1bedeec5e..45f4b2e9d 100644 --- a/merk/src/proofs/mod.rs +++ b/merk/src/proofs/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk proofs #[cfg(feature = "full")] @@ -104,9 +76,58 @@ pub enum Node { KVValueHash(Vec, Vec, CryptoHash), /// Represents, the key, value, value_hash and feature_type of a tree node + /// Used by Sum trees KVValueHashFeatureType(Vec, Vec, CryptoHash, TreeFeatureType), /// Represents the key, value of some referenced node and value_hash of /// current tree node KVRefValueHash(Vec, Vec, CryptoHash), } + +use std::fmt; + +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for Node { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let node_string = match self { + Node::Hash(hash) => format!("Hash(HASH[{}])", hex::encode(hash)), + Node::KVHash(kv_hash) => format!("KVHash(HASH[{}])", hex::encode(kv_hash)), + Node::KV(key, value) => { + format!("KV({}, {})", hex_to_ascii(key), hex_to_ascii(value)) + } + Node::KVValueHash(key, value, value_hash) => format!( + "KVValueHash({}, {}, HASH[{}])", + hex_to_ascii(key), + hex_to_ascii(value), + hex::encode(value_hash) + ), + Node::KVDigest(key, value_hash) => format!( + "KVDigest({}, HASH[{}])", + hex_to_ascii(key), + hex::encode(value_hash) + ), + Node::KVRefValueHash(key, value, value_hash) => format!( + "KVRefValueHash({}, {}, HASH[{}])", + hex_to_ascii(key), + hex_to_ascii(value), + hex::encode(value_hash) + ), + Node::KVValueHashFeatureType(key, value, value_hash, feature_type) => format!( + "KVValueHashFeatureType({}, {}, HASH[{}], {:?})", + hex_to_ascii(key), + hex_to_ascii(value), + hex::encode(value_hash), + feature_type + ), + }; + write!(f, "{}", node_string) + } +} + +fn hex_to_ascii(hex_value: &[u8]) -> String { + if hex_value.len() == 1 && hex_value[0] < b"0"[0] { + hex::encode(hex_value) + } else { + String::from_utf8(hex_value.to_vec()).unwrap_or_else(|_| hex::encode(hex_value)) + } +} diff --git a/merk/src/proofs/query/map.rs b/merk/src/proofs/query/map.rs index 9eb716ed9..9e741ceab 100644 --- a/merk/src/proofs/query/map.rs +++ b/merk/src/proofs/query/map.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Query #![allow(unstable_name_collisions)] @@ -396,35 +368,3 @@ mod tests { assert_eq!(range.next().unwrap().unwrap(), (&[1][..], &[1][..])); } } - -#[cfg(feature = "full")] -/// `BTreeMapExtras` provides extra functionality to work with `BTreeMap` that -/// either missed or unstable -/// NOTE: We can easily remove this when the following feature will be rolled -/// out into stable rust: https://github.com/rust-lang/rust/issues/62924 -trait BTreeMapExtras { - type K; - type V; - - /// Returns `None` if `BTreeMap` is empty otherwise the first key-value pair - /// in the map. The key in this pair is the minimum key in the map. - fn first_key_value(&self) -> Option<(&Self::K, &Self::V)>; - - /// Returns `None` if `BTreeMap` is empty otherwise the last key-value pair - /// in the map. The key in this pair is the maximum key in the map. - fn last_key_value(&self) -> Option<(&Self::K, &Self::V)>; -} - -#[cfg(feature = "full")] -impl BTreeMapExtras for BTreeMap { - type K = KK; - type V = VV; - - fn first_key_value(&self) -> Option<(&Self::K, &Self::V)> { - self.iter().next() - } - - fn last_key_value(&self) -> Option<(&Self::K, &Self::V)> { - self.iter().next_back() - } -} diff --git a/merk/src/proofs/query/mod.rs b/merk/src/proofs/query/mod.rs index d63d2d478..669940cc2 100644 --- a/merk/src/proofs/query/mod.rs +++ b/merk/src/proofs/query/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Query proofs #[cfg(feature = "full")] @@ -42,12 +14,13 @@ pub mod query_item; #[cfg(any(feature = "full", feature = "verify"))] mod verify; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(feature = "full")] use std::cmp::Ordering; -use std::collections::HashSet; +use std::{collections::HashSet, fmt, ops::RangeFull}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(feature = "full")] use grovedb_costs::{cost_return_on_error, CostContext, CostResult, CostsExt, OperationCost}; +use grovedb_version::version::GroveVersion; #[cfg(any(feature = "full", feature = "verify"))] use indexmap::IndexMap; #[cfg(feature = "full")] @@ -56,17 +29,22 @@ pub use map::*; pub use query_item::intersect::QueryItemIntersectionResult; #[cfg(any(feature = "full", feature = "verify"))] pub use query_item::QueryItem; +#[cfg(feature = "full")] +use verify::ProofAbsenceLimit; #[cfg(any(feature = "full", feature = "verify"))] -use verify::ProofAbsenceLimitOffset; +pub use verify::VerifyOptions; #[cfg(any(feature = "full", feature = "verify"))] -pub use verify::{execute_proof, verify_query, ProofVerificationResult, ProvedKeyValue}; +pub use verify::{ProofVerificationResult, ProvedKeyOptionalValue, ProvedKeyValue}; #[cfg(feature = "full")] use {super::Op, std::collections::LinkedList}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(feature = "full")] use super::Node; #[cfg(any(feature = "full", feature = "verify"))] use crate::error::Error; +use crate::proofs::hex_to_ascii; +#[cfg(feature = "full")] +use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] use crate::tree::{Fetch, Link, RefWalker}; @@ -94,7 +72,7 @@ pub struct SubqueryBranch { #[cfg(any(feature = "full", feature = "verify"))] /// `Query` represents one or more keys or ranges of keys, which can be used to -/// resolve a proof which will include all of the requested values. +/// resolve a proof which will include all the requested values. #[derive(Debug, Default, Clone, PartialEq)] pub struct Query { /// Items @@ -107,6 +85,57 @@ pub struct Query { pub left_to_right: bool, } +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for SubqueryBranch { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "SubqueryBranch {{ ")?; + if let Some(path) = &self.subquery_path { + write!(f, "subquery_path: [")?; + for (i, path_part) in path.iter().enumerate() { + if i > 0 { + write!(f, ", ")? + } + write!(f, "{}", hex_to_ascii(path_part))?; + } + write!(f, "], ")?; + } else { + write!(f, "subquery_path: None ")?; + } + if let Some(subquery) = &self.subquery { + write!(f, "subquery: {} ", subquery)?; + } else { + write!(f, "subquery: None ")?; + } + write!(f, "}}") + } +} + +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for Query { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "Query {{")?; + writeln!(f, " items: [")?; + for item in &self.items { + writeln!(f, " {},", item)?; + } + writeln!(f, " ],")?; + writeln!( + f, + " default_subquery_branch: {},", + self.default_subquery_branch + )?; + if let Some(conditional_branches) = &self.conditional_subquery_branches { + writeln!(f, " conditional_subquery_branches: {{")?; + for (item, branch) in conditional_branches { + writeln!(f, " {}: {},", item, branch)?; + } + writeln!(f, " }},")?; + } + writeln!(f, " left_to_right: {},", self.left_to_right)?; + write!(f, "}}") + } +} + #[cfg(any(feature = "full", feature = "verify"))] impl Query { /// Creates a new query which contains no items. @@ -114,6 +143,15 @@ impl Query { Self::new_with_direction(true) } + /// Creates a new query which contains all items. + pub fn new_range_full() -> Self { + Self { + items: vec![QueryItem::RangeFull(RangeFull)], + left_to_right: true, + ..Self::default() + } + } + /// Creates a new query which contains only one key. pub fn new_single_key(key: Vec) -> Self { Self { @@ -153,6 +191,37 @@ impl Query { } } + pub fn has_subquery_on_key(&self, key: &[u8], in_path: bool) -> bool { + if in_path || self.default_subquery_branch.subquery.is_some() { + return true; + } + if let Some(conditional_subquery_branches) = self.conditional_subquery_branches.as_ref() { + for (query_item, subquery) in conditional_subquery_branches { + if query_item.contains(key) { + return subquery.subquery.is_some(); + } + } + } + false + } + + pub fn has_subquery_or_subquery_path_on_key(&self, key: &[u8], in_path: bool) -> bool { + if in_path + || self.default_subquery_branch.subquery.is_some() + || self.default_subquery_branch.subquery_path.is_some() + { + return true; + } + if let Some(conditional_subquery_branches) = self.conditional_subquery_branches.as_ref() { + for query_item in conditional_subquery_branches.keys() { + if query_item.contains(key) { + return true; + } + } + } + false + } + /// Pushes terminal key paths and keys to `result`, no more than /// `max_results`. Returns the number of terminal keys added. /// @@ -173,14 +242,16 @@ impl Query { // unbounded ranges can not be supported if conditional_query_item.is_unbounded_range() { return Err(Error::NotSupported( - "terminal keys are not supported with conditional unbounded ranges", + "terminal keys are not supported with conditional unbounded ranges" + .to_string(), )); } let conditional_keys = conditional_query_item.keys()?; for key in conditional_keys.into_iter() { if current_len > max_results { return Err(Error::RequestAmountExceeded(format!( - "terminal keys limit exceeded, set max is {max_results}", + "terminal keys limit exceeded for conditional subqueries, set max is \ + {max_results}, current length is {current_len}", ))); } already_added_keys.insert(key.clone()); @@ -193,14 +264,15 @@ impl Query { // push the subquery path to the path path.extend(subquery_path.iter().cloned()); // recurse onto the lower level - let added_here = - subquery.terminal_keys(path, max_results - current_len, result)?; + let added_here = subquery.terminal_keys(path, max_results, result)?; added += added_here; current_len += added_here; } else { if current_len == max_results { return Err(Error::RequestAmountExceeded(format!( - "terminal keys limit exceeded, set max is {max_results}", + "terminal keys limit exceeded when subquery path but no \ + subquery, set max is {max_results}, current length is \ + {current_len}", ))); } // a subquery path but no subquery @@ -235,7 +307,7 @@ impl Query { for item in self.items.iter() { if item.is_unbounded_range() { return Err(Error::NotSupported( - "terminal keys are not supported with unbounded ranges", + "terminal keys are not supported with unbounded ranges".to_string(), )); } let keys = item.keys()?; @@ -246,7 +318,8 @@ impl Query { } if current_len > max_results { return Err(Error::RequestAmountExceeded(format!( - "terminal keys limit exceeded, set max is {max_results}", + "terminal keys limit exceeded for items, set max is {max_results}, \ + current len is {current_len}", ))); } let mut path = current_path.clone(); @@ -258,14 +331,14 @@ impl Query { // push the subquery path to the path path.extend(subquery_path.iter().cloned()); // recurse onto the lower level - let added_here = - subquery.terminal_keys(path, max_results - current_len, result)?; + let added_here = subquery.terminal_keys(path, max_results, result)?; added += added_here; current_len += added_here; } else { if current_len == max_results { return Err(Error::RequestAmountExceeded(format!( - "terminal keys limit exceeded, set max is {max_results}", + "terminal keys limit exceeded when subquery path but no subquery, \ + set max is {max_results}, current len is {current_len}", ))); } // a subquery path but no subquery @@ -289,14 +362,14 @@ impl Query { // push the key to the path path.push(key); // recurse onto the lower level - let added_here = - subquery.terminal_keys(path, max_results - current_len, result)?; + let added_here = subquery.terminal_keys(path, max_results, result)?; added += added_here; current_len += added_here; } else { if current_len == max_results { return Err(Error::RequestAmountExceeded(format!( - "terminal keys limit exceeded, set max is {max_results}", + "terminal keys limit exceeded without subquery or subquery path, set \ + max is {max_results}, current len is {current_len}", ))); } result.push((path, key)); @@ -386,7 +459,7 @@ impl Query { } } - /// Check if has subquery + /// Check if there is a subquery pub fn has_subquery(&self) -> bool { // checks if a query has subquery items if self.default_subquery_branch.subquery.is_some() @@ -398,7 +471,7 @@ impl Query { false } - /// Check if has only keys + /// Check if there are only keys pub fn has_only_keys(&self) -> bool { // checks if all searched for items are keys self.items.iter().all(|a| a.is_key()) @@ -507,19 +580,6 @@ where self.tree().hash().map(Node::Hash) } - #[cfg(feature = "full")] - #[allow(dead_code)] // TODO: remove when proofs will be enabled - /// Create a full proof - pub(crate) fn create_full_proof( - &mut self, - query: &[QueryItem], - limit: Option, - offset: Option, - left_to_right: bool, - ) -> CostResult { - self.create_proof(query, limit, offset, left_to_right) - } - /// Generates a proof for the list of queried keys. Returns a tuple /// containing the generated proof operators, and a tuple representing if /// any keys were queried were less than the left edge or greater than the @@ -531,9 +591,9 @@ where &mut self, query: &[QueryItem], limit: Option, - offset: Option, left_to_right: bool, - ) -> CostResult { + grove_version: &GroveVersion, + ) -> CostResult { let mut cost = OperationCost::default(); // TODO: don't copy into vec, support comparing QI to byte slice @@ -551,8 +611,6 @@ where let current_node_in_query: bool; let mut node_on_non_inclusive_bounds = false; - // becomes true if the offset exists and is non zero - let mut skip_current_node = false; let (mut left_items, mut right_items) = match search { Ok(index) => { @@ -596,78 +654,77 @@ where } }; - if offset.is_none() || offset == Some(0) { - // when the limit hits zero, the rest of the query batch should be cleared - // so empty the left, right query batch, and set the current node to not found - if let Some(current_limit) = limit { - if current_limit == 0 { - left_items = &[]; - search = Err(Default::default()); - right_items = &[]; - } + // when the limit hits zero, the rest of the query batch should be cleared + // so empty the left, right query batch, and set the current node to not found + if let Some(current_limit) = limit { + if current_limit == 0 { + left_items = &[]; + search = Err(Default::default()); + right_items = &[]; } } let proof_direction = left_to_right; // signifies what direction the DFS should go - let (mut proof, left_absence, mut new_limit, mut new_offset) = if left_to_right { + let (mut proof, left_absence, mut new_limit) = if left_to_right { cost_return_on_error!( &mut cost, - self.create_child_proof(proof_direction, left_items, limit, offset, left_to_right) + self.create_child_proof( + proof_direction, + left_items, + limit, + left_to_right, + grove_version + ) ) } else { cost_return_on_error!( &mut cost, - self.create_child_proof(proof_direction, right_items, limit, offset, left_to_right) + self.create_child_proof( + proof_direction, + right_items, + limit, + left_to_right, + grove_version + ) ) }; - if let Some(current_offset) = new_offset { - if current_offset > 0 && current_node_in_query && !node_on_non_inclusive_bounds { - // reserve offset slot for current node before generating proof for right - // subtree - new_offset = Some(current_offset - 1); - skip_current_node = true; - } - } - - if !skip_current_node && (new_offset.is_none() || new_offset == Some(0)) { - if let Some(current_limit) = new_limit { - // if after generating proof for the left subtree, the limit becomes 0 - // clear the current node and clear the right batch - if current_limit == 0 { + if let Some(current_limit) = new_limit { + // if after generating proof for the left subtree, the limit becomes 0 + // clear the current node and clear the right batch + if current_limit == 0 { + if left_to_right { + right_items = &[]; + } else { + left_items = &[]; + } + search = Err(Default::default()); + } else if current_node_in_query && !node_on_non_inclusive_bounds { + // if limit is not zero, reserve a limit slot for the current node + // before generating proof for the right subtree + new_limit = Some(current_limit - 1); + // if after limit slot reservation, limit becomes 0, right query + // should be cleared + if current_limit - 1 == 0 { if left_to_right { right_items = &[]; } else { left_items = &[]; } - search = Err(Default::default()); - } else if current_node_in_query && !node_on_non_inclusive_bounds { - // if limit is not zero, reserve a limit slot for the current node - // before generating proof for the right subtree - new_limit = Some(current_limit - 1); - // if after limit slot reservation, limit becomes 0, right query - // should be cleared - if current_limit - 1 == 0 { - if left_to_right { - right_items = &[]; - } else { - left_items = &[]; - } - } } } } let proof_direction = !proof_direction; // search the opposite path on second pass - let (mut right_proof, right_absence, new_limit, new_offset) = if left_to_right { + let (mut right_proof, right_absence, new_limit) = if left_to_right { cost_return_on_error!( &mut cost, self.create_child_proof( proof_direction, right_items, new_limit, - new_offset, left_to_right, + grove_version ) ) } else { @@ -677,8 +734,8 @@ where proof_direction, left_items, new_limit, - new_offset, left_to_right, + grove_version ) ) }; @@ -687,7 +744,7 @@ where proof.push_back(match search { Ok(_) => { - if node_on_non_inclusive_bounds || skip_current_node { + if node_on_non_inclusive_bounds { if left_to_right { Op::Push(self.to_kvdigest_node()) } else { @@ -731,13 +788,7 @@ where } } - Ok(( - proof, - (left_absence.0, right_absence.1), - new_limit, - new_offset, - )) - .wrap_with_cost(cost) + Ok((proof, (left_absence.0, right_absence.1), new_limit)).wrap_with_cost(cost) } /// Similar to `create_proof`. Recurses into the child on the given side and @@ -748,16 +799,20 @@ where left: bool, query: &[QueryItem], limit: Option, - offset: Option, left_to_right: bool, - ) -> CostResult { + grove_version: &GroveVersion, + ) -> CostResult { if !query.is_empty() { - self.walk(left).flat_map_ok(|child_opt| { + self.walk( + left, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .flat_map_ok(|child_opt| { if let Some(mut child) = child_opt { - child.create_proof(query, limit, offset, left_to_right) + child.create_proof(query, limit, left_to_right, grove_version) } else { - Ok((LinkedList::new(), (true, true), limit, offset)) - .wrap_with_cost(Default::default()) + Ok((LinkedList::new(), (true, true), limit)).wrap_with_cost(Default::default()) } }) } else if let Some(link) = self.tree().link(left) { @@ -767,10 +822,9 @@ where } else { Op::PushInverted(link.to_hash_node()) }); - Ok((proof, (false, false), limit, offset)).wrap_with_cost(Default::default()) + Ok((proof, (false, false), limit)).wrap_with_cost(Default::default()) } else { - Ok((LinkedList::new(), (false, false), limit, offset)) - .wrap_with_cost(Default::default()) + Ok((LinkedList::new(), (false, false), limit)).wrap_with_cost(Default::default()) } } } @@ -779,98 +833,85 @@ where #[allow(deprecated)] #[cfg(test)] mod test { - use grovedb_costs::storage_cost::removal::StorageRemovedBytes::NoStorageRemoval; + + macro_rules! compare_result_tuples_not_optional { + ($result_set:expr, $expected_result_set:expr) => { + assert_eq!( + $expected_result_set.len(), + $result_set.len(), + "Result set lengths do not match" + ); + for i in 0..$expected_result_set.len() { + assert_eq!( + $expected_result_set[i].0, $result_set[i].key, + "Key mismatch at index {}", + i + ); + assert_eq!( + &$expected_result_set[i].1, + $result_set[i].value.as_ref().expect("expected value"), + "Value mismatch at index {}", + i + ); + } + }; + } use super::{ super::{encoding::encode_into, *}, *, }; use crate::{ - proofs::query::{ - query_item::QueryItem::RangeAfter, - verify, - verify::{verify_query, ProvedKeyValue}, - }, + proofs::query::verify, test_utils::make_tree_seq, - tree::{NoopCommit, PanicSource, RefWalker, Tree}, - TreeFeatureType::BasicMerk, + tree::{NoopCommit, PanicSource, RefWalker, TreeNode}, + TreeFeatureType::BasicMerkNode, }; - fn compare_result_tuples( - result_set: Vec, - expected_result_set: Vec<(Vec, Vec)>, - ) { - assert_eq!(expected_result_set.len(), result_set.len()); - for i in 0..expected_result_set.len() { - assert_eq!(expected_result_set[i].0, result_set[i].key); - assert_eq!(expected_result_set[i].1, result_set[i].value); - } - } - - fn make_3_node_tree() -> Tree { - let mut tree = Tree::new(vec![5], vec![5], None, BasicMerk) + fn make_3_node_tree() -> TreeNode { + let mut tree = TreeNode::new(vec![5], vec![5], None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(vec![3], vec![3], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![3], vec![3], None, BasicMerkNode).unwrap()), ) .attach( false, - Some(Tree::new(vec![7], vec![7], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![7], vec![7], None, BasicMerkNode).unwrap()), ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .expect("commit failed"); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .expect("commit failed"); tree } - fn make_6_node_tree() -> Tree { - let two_tree = Tree::new(vec![2], vec![2], None, BasicMerk).unwrap(); - let four_tree = Tree::new(vec![4], vec![4], None, BasicMerk).unwrap(); - let mut three_tree = Tree::new(vec![3], vec![3], None, BasicMerk) + fn make_6_node_tree() -> TreeNode { + let two_tree = TreeNode::new(vec![2], vec![2], None, BasicMerkNode).unwrap(); + let four_tree = TreeNode::new(vec![4], vec![4], None, BasicMerkNode).unwrap(); + let mut three_tree = TreeNode::new(vec![3], vec![3], None, BasicMerkNode) .unwrap() .attach(true, Some(two_tree)) .attach(false, Some(four_tree)); three_tree - .commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) + .commit(&mut NoopCommit {}, &|_, _| Ok(0)) .unwrap() .expect("commit failed"); - let seven_tree = Tree::new(vec![7], vec![7], None, BasicMerk).unwrap(); - let mut eight_tree = Tree::new(vec![8], vec![8], None, BasicMerk) + let seven_tree = TreeNode::new(vec![7], vec![7], None, BasicMerkNode).unwrap(); + let mut eight_tree = TreeNode::new(vec![8], vec![8], None, BasicMerkNode) .unwrap() .attach(true, Some(seven_tree)); eight_tree - .commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) + .commit(&mut NoopCommit {}, &|_, _| Ok(0)) .unwrap() .expect("commit failed"); - let mut root_tree = Tree::new(vec![5], vec![5], None, BasicMerk) + let mut root_tree = TreeNode::new(vec![5], vec![5], None, BasicMerkNode) .unwrap() .attach(true, Some(three_tree)) .attach(false, Some(eight_tree)); root_tree - .commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) + .commit(&mut NoopCommit {}, &|_, _| Ok(0)) .unwrap() .expect("commit failed"); @@ -878,19 +919,20 @@ mod test { } fn verify_keys_test(keys: Vec>, expected_result: Vec>>) { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let (proof, ..) = walker - .create_full_proof( + .create_proof( keys.clone() .into_iter() .map(QueryItem::Key) .collect::>() .as_slice(), None, - None, true, + grove_version, ) .unwrap() .expect("failed to create proof"); @@ -907,7 +949,8 @@ mod test { query.insert_key(key.clone()); } - let result = verify_query(bytes.as_slice(), &query, None, None, true, expected_hash) + let result = query + .verify_proof(bytes.as_slice(), None, true, expected_hash) .unwrap() .expect("verify failed"); @@ -919,7 +962,10 @@ mod test { } for (key, expected_value) in keys.iter().zip(expected_result.iter()) { - assert_eq!(values.get(key), expected_value.as_ref()); + assert_eq!( + values.get(key).and_then(|a| a.as_ref()), + expected_value.as_ref() + ); } } @@ -1115,11 +1161,12 @@ mod test { #[test] fn empty_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let (proof, absence, ..) = walker - .create_full_proof(vec![].as_slice(), None, None, true) + .create_proof(vec![].as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1152,27 +1199,22 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); - let res = verify_query( - bytes.as_slice(), - &Query::new(), - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); + let res = Query::new() + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); assert!(res.result_set.is_empty()); } #[test] fn root_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::Key(vec![5])]; + let query_items = vec![QueryItem::Key(vec![5])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1210,30 +1252,25 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5])]); + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![5], vec![5])]); } #[test] fn leaf_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::Key(vec![3])]; + let query_items = vec![QueryItem::Key(vec![3])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1271,30 +1308,25 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![3], vec![3])]); + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![3], vec![3])]); } #[test] fn double_leaf_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::Key(vec![3]), QueryItem::Key(vec![7])]; + let query_items = vec![QueryItem::Key(vec![3]), QueryItem::Key(vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1336,34 +1368,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![3], vec![3]), (vec![7], vec![7])]); + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![3], vec![3]), (vec![7], vec![7])] + ); } #[test] fn all_nodes_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![ + let query_items = vec![ QueryItem::Key(vec![3]), QueryItem::Key(vec![5]), QueryItem::Key(vec![7]), ]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1409,33 +1439,28 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![3], vec![3]), (vec![5], vec![5]), (vec![7], vec![7])], + vec![(vec![3], vec![3]), (vec![5], vec![5]), (vec![7], vec![7])] ); } #[test] fn global_edge_absence_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::Key(vec![8])]; + let query_items = vec![QueryItem::Key(vec![8])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1472,30 +1497,25 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, Vec::<(Vec, Vec)>::new()); } #[test] fn absence_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::Key(vec![6])]; + let query_items = vec![QueryItem::Key(vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1535,43 +1555,41 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, Vec::<(Vec, Vec)>::new()); } #[test] fn doc_proof() { - let mut tree = Tree::new(vec![5], vec![5], None, BasicMerk) + let grove_version = GroveVersion::latest(); + let mut tree = TreeNode::new(vec![5], vec![5], None, BasicMerkNode) .unwrap() .attach( true, Some( - Tree::new(vec![2], vec![2], None, BasicMerk) + TreeNode::new(vec![2], vec![2], None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(vec![1], vec![1], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![1], vec![1], None, BasicMerkNode).unwrap()), ) .attach( false, Some( - Tree::new(vec![4], vec![4], None, BasicMerk) + TreeNode::new(vec![4], vec![4], None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(vec![3], vec![3], None, BasicMerk).unwrap()), + Some( + TreeNode::new(vec![3], vec![3], None, BasicMerkNode) + .unwrap(), + ), ), ), ), @@ -1580,57 +1598,59 @@ mod test { .attach( false, Some( - Tree::new(vec![9], vec![9], None, BasicMerk) + TreeNode::new(vec![9], vec![9], None, BasicMerkNode) .unwrap() .attach( true, Some( - Tree::new(vec![7], vec![7], None, BasicMerk) + TreeNode::new(vec![7], vec![7], None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(vec![6], vec![6], None, BasicMerk).unwrap()), + Some( + TreeNode::new(vec![6], vec![6], None, BasicMerkNode) + .unwrap(), + ), ) .attach( false, - Some(Tree::new(vec![8], vec![8], None, BasicMerk).unwrap()), + Some( + TreeNode::new(vec![8], vec![8], None, BasicMerkNode) + .unwrap(), + ), ), ), ) .attach( false, Some( - Tree::new(vec![11], vec![11], None, BasicMerk) + TreeNode::new(vec![11], vec![11], None, BasicMerkNode) .unwrap() .attach( true, Some( - Tree::new(vec![10], vec![10], None, BasicMerk).unwrap(), + TreeNode::new(vec![10], vec![10], None, BasicMerkNode) + .unwrap(), ), ), ), ), ), ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .unwrap(); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .unwrap(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![ + let query_items = vec![ QueryItem::Key(vec![1]), QueryItem::Key(vec![2]), QueryItem::Key(vec![3]), QueryItem::Key(vec![4]), ]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1724,27 +1744,21 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![1], vec![1]), (vec![2], vec![2]), (vec![3], vec![3]), (vec![4], vec![4]), - ], + ] ); } @@ -1793,14 +1807,15 @@ mod test { #[test] fn range_proof() { - let mut tree = make_tree_seq(10); + let grove_version = GroveVersion::latest(); + let mut tree = make_tree_seq(10, grove_version); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::Range( + let query_items = vec![QueryItem::Range( vec![0, 0, 0, 0, 0, 0, 0, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], )]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1877,175 +1892,64 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]), - ], + ] ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); - - // skip 1 element - let mut tree = make_tree_seq(10); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::Range( - vec![0, 0, 0, 0, 0, 0, 0, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], - )]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( - res.result_set, - vec![(vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60])], - ); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip 2 elements - let mut tree = make_tree_seq(10); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::Range( - vec![0, 0, 0, 0, 0, 0, 0, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], - )]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(0)); - - // skip all elements - let mut tree = make_tree_seq(10); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::Range( - vec![0, 0, 0, 0, 0, 0, 0, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], - )]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(198)); // right to left test - let mut tree = make_tree_seq(10); + let mut tree = make_tree_seq(10, grove_version); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::Range( + let query_items = vec![QueryItem::Range( vec![0, 0, 0, 0, 0, 0, 0, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], )]; let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { + let mut query = Query::new_with_direction(false); + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), - ], + ] ); } #[test] fn range_proof_inclusive() { - let mut tree = make_tree_seq(10); + let grove_version = GroveVersion::latest(); + let mut tree = make_tree_seq(10, grove_version); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeInclusive( + let query_items = vec![QueryItem::RangeInclusive( vec![0, 0, 0, 0, 0, 0, 0, 5]..=vec![0, 0, 0, 0, 0, 0, 0, 7], )]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2123,214 +2027,65 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 7], vec![123; 60]), - ], + ] ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); - // skip 1 element - let mut tree = make_tree_seq(10); + // right_to_left proof + let mut tree = make_tree_seq(10, grove_version); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeInclusive( + let query_items = vec![QueryItem::RangeInclusive( vec![0, 0, 0, 0, 0, 0, 0, 5]..=vec![0, 0, 0, 0, 0, 0, 0, 7], )]; let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), true) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( - res.result_set, - vec![(vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60])], - ); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip 2 elements - let mut tree = make_tree_seq(10); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeInclusive( - vec![0, 0, 0, 0, 0, 0, 0, 5]..=vec![0, 0, 0, 0, 0, 0, 0, 7], - )]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) .unwrap() - .expect("create_proof errored"); + .unwrap(); - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![0, 0, 0, 0, 0, 0, 0, 7], vec![123; 60])], + vec![ + (vec![0, 0, 0, 0, 0, 0, 0, 7], vec![123; 60]), + (vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]), + (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), + ] ); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); + } - // skip all elements - let mut tree = make_tree_seq(10); + #[test] + fn range_from_proof() { + let grove_version = GroveVersion::latest(); + let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeInclusive( - vec![0, 0, 0, 0, 0, 0, 0, 5]..=vec![0, 0, 0, 0, 0, 0, 0, 7], - )]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(197)); - - // right_to_left proof - let mut tree = make_tree_seq(10); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeInclusive( - vec![0, 0, 0, 0, 0, 0, 0, 5]..=vec![0, 0, 0, 0, 0, 0, 0, 7], - )]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - - compare_result_tuples( - res.result_set, - vec![ - (vec![0, 0, 0, 0, 0, 0, 0, 7], vec![123; 60]), - (vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]), - (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), - ], - ); - - let mut tree = make_tree_seq(10); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeInclusive( - vec![0, 0, 0, 0, 0, 0, 0, 5]..=vec![0, 0, 0, 0, 0, 0, 0, 7], - )]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), None, Some(2), false) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - None, - Some(2), - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - - compare_result_tuples( - res.result_set, - vec![(vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60])], - ); - assert_eq!(res.limit, None); - assert_eq!(res.offset, Some(0)); - } - - #[test] - fn range_from_proof() { - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; - let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + let query_items = vec![QueryItem::RangeFrom(vec![5]..)]; + let (proof, absence, ..) = walker + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2384,39 +2139,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![5], vec![5]), (vec![7], vec![7]), (vec![8], vec![8])], + vec![(vec![5], vec![5]), (vec![7], vec![7]), (vec![8], vec![8])] ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); // Limit result set to 1 item let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; + let query_items = vec![QueryItem::RangeFrom(vec![5]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), None, true) + .create_proof(query_items.as_slice(), Some(1), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::Key(vec![5])]; + let equivalent_query_items = vec![QueryItem::Key(vec![5])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2426,40 +2174,33 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5])]); + let res = query + .verify_proof(bytes.as_slice(), Some(1), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![5], vec![5])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 2 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; + let query_items = vec![QueryItem::RangeFrom(vec![5]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), None, true) + .create_proof(query_items.as_slice(), Some(2), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![ + let equivalent_query_items = vec![ QueryItem::Key(vec![5]), QueryItem::Key(vec![6]), QueryItem::Key(vec![7]), ]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2469,36 +2210,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5]), (vec![7], vec![7])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![5], vec![5]), (vec![7], vec![7])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 100 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; + let query_items = vec![QueryItem::RangeFrom(vec![5]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(100), None, true) + .create_proof(query_items.as_slice(), Some(100), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; + let equivalent_query_items = vec![QueryItem::RangeFrom(vec![5]..)]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2508,123 +2245,26 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(100), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), Some(100), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![5], vec![5]), (vec![7], vec![7]), (vec![8], vec![8])], + vec![(vec![5], vec![5]), (vec![7], vec![7]), (vec![8], vec![8])] ); assert_eq!(res.limit, Some(97)); - assert_eq!(res.offset, None); - - // skip 1 element - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![7], vec![7])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip 2 elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![8], vec![8])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip all elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(197)); // right_to_left test let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; + let query_items = vec![QueryItem::RangeFrom(vec![5]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); @@ -2633,64 +2273,28 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![8], vec![8]), (vec![7], vec![7]), (vec![5], vec![5])], + vec![(vec![8], vec![8]), (vec![7], vec![7]), (vec![5], vec![5])] ); - - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; - let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), Some(1), false) - .unwrap() - .expect("create_proof errored"); - - assert_eq!(absence, (true, false)); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - Some(1), - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![7], vec![7]), (vec![5], vec![5])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); } #[test] fn range_to_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; + let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2767,44 +2371,37 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![2], vec![2]), (vec![3], vec![3]), (vec![4], vec![4]), (vec![5], vec![5]), - ], + ] ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); // Limit result set to 1 item let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; + let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), None, true) + .create_proof(query_items.as_slice(), Some(1), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeToInclusive(..=vec![2])]; + let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![2])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2814,36 +2411,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![2], vec![2])]); + let res = query + .verify_proof(bytes.as_slice(), Some(1), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![2], vec![2])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 2 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; + let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), None, true) + .create_proof(query_items.as_slice(), Some(2), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeToInclusive(..=vec![3])]; + let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![3])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2853,36 +2443,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![2], vec![2]), (vec![3], vec![3])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![2], vec![2]), (vec![3], vec![3])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 100 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; + let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(100), None, true) + .create_proof(query_items.as_slice(), Some(100), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeTo(..vec![6])]; + let equivalent_query_items = vec![QueryItem::RangeTo(..vec![6])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2892,128 +2478,31 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(100), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), Some(100), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![2], vec![2]), (vec![3], vec![3]), (vec![4], vec![4]), (vec![5], vec![5]), - ], + ] ); assert_eq!(res.limit, Some(96)); - assert_eq!(res.offset, None); - - // skip 1 element - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![3], vec![3])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip 2 elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip all elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(196)); // right_to_left proof let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; + let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); @@ -3022,35 +2511,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![5], vec![5]), (vec![4], vec![4]), (vec![3], vec![3]), (vec![2], vec![2]), - ], + ] ); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; + let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), None, false) + .create_proof(query_items.as_slice(), Some(2), false, grove_version) .unwrap() .expect("create_proof errored"); @@ -3059,32 +2542,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5]), (vec![4], vec![4])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![5], vec![5]), (vec![4], vec![4])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); } #[test] fn range_to_proof_inclusive() { + let grove_version = GroveVersion::latest(); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; + let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3161,44 +2641,37 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![2], vec![2]), (vec![3], vec![3]), (vec![4], vec![4]), (vec![5], vec![5]), - ], + ] ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); // Limit result set to 1 item let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; + let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), None, true) + .create_proof(query_items.as_slice(), Some(1), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeToInclusive(..=vec![2])]; + let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![2])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3208,36 +2681,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![2], vec![2])]); + let res = query + .verify_proof(bytes.as_slice(), Some(1), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![2], vec![2])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 2 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; + let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), None, true) + .create_proof(query_items.as_slice(), Some(2), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeToInclusive(..=vec![3])]; + let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![3])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3247,36 +2713,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![2], vec![2]), (vec![3], vec![3])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![2], vec![2]), (vec![3], vec![3])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 100 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; + let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(100), None, true) + .create_proof(query_items.as_slice(), Some(100), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; + let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3286,128 +2748,31 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(100), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), Some(100), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![2], vec![2]), (vec![3], vec![3]), (vec![4], vec![4]), (vec![5], vec![5]), - ], + ] ); assert_eq!(res.limit, Some(96)); - assert_eq!(res.offset, None); - - // skip 1 element - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![3], vec![3])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip 2 elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip all elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(196)); // right_to_left proof let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; + let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); @@ -3416,35 +2781,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![5], vec![5]), (vec![4], vec![4]), (vec![3], vec![3]), (vec![2], vec![2]), - ], + ] ); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; + let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), false) + .create_proof(query_items.as_slice(), Some(1), false, grove_version) .unwrap() .expect("create_proof errored"); @@ -3453,32 +2812,26 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4])]); + let res = query + .verify_proof(bytes.as_slice(), Some(1), false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![5], vec![5])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); } #[test] fn range_after_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![RangeAfter(vec![3]..)]; + let query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3555,44 +2908,37 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![4], vec![4]), (vec![5], vec![5]), (vec![7], vec![7]), (vec![8], vec![8]), - ], + ] ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); // Limit result set to 1 item let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfter(vec![3]..)]; + let query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), None, true) + .create_proof(query_items.as_slice(), Some(1), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![4])]; + let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![4])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3602,36 +2948,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4])]); + let res = query + .verify_proof(bytes.as_slice(), Some(1), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![4], vec![4])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 2 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfter(vec![3]..)]; + let query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), None, true) + .create_proof(query_items.as_slice(), Some(2), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![5])]; + let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![5])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3641,36 +2980,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4]), (vec![5], vec![5])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![4], vec![4]), (vec![5], vec![5])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 100 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfter(vec![3]..)]; + let query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(100), None, true) + .create_proof(query_items.as_slice(), Some(100), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfter(vec![3]..)]; + let equivalent_query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3680,128 +3015,31 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(100), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), Some(100), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![4], vec![4]), (vec![5], vec![5]), (vec![7], vec![7]), (vec![8], vec![8]), - ], + ] ); assert_eq!(res.limit, Some(96)); - assert_eq!(res.offset, None); - - // skip 1 element - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeAfter(vec![3]..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip 2 elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeAfter(vec![3]..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![7], vec![7])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip all elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeAfter(vec![3]..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(196)); // right_to_left proof let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![RangeAfter(vec![3]..)]; + let query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); @@ -3810,35 +3048,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![8], vec![8]), (vec![7], vec![7]), (vec![5], vec![5]), (vec![4], vec![4]), - ], + ] ); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![RangeAfter(vec![3]..)]; + let query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(3), None, false) + .create_proof(query_items.as_slice(), Some(3), false, grove_version) .unwrap() .expect("create_proof errored"); @@ -3847,35 +3079,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(3), - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), Some(3), false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![8], vec![8]), (vec![7], vec![7]), (vec![5], vec![5])], + vec![(vec![8], vec![8]), (vec![7], vec![7]), (vec![5], vec![5])] ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); } #[test] fn range_after_to_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; + let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3947,36 +3173,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4]), (vec![5], vec![5])]); + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![4], vec![4]), (vec![5], vec![5])] + ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); // Limit result set to 1 item let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; + let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), None, true) + .create_proof(query_items.as_slice(), Some(1), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![4])]; + let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![4])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3986,36 +3208,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4])]); + let res = query + .verify_proof(bytes.as_slice(), Some(1), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![4], vec![4])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 2 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; + let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), None, true) + .create_proof(query_items.as_slice(), Some(2), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![5])]; + let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![5])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4025,36 +3240,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4]), (vec![5], vec![5])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![4], vec![4]), (vec![5], vec![5])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 100 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; + let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(100), None, true) + .create_proof(query_items.as_slice(), Some(100), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; + let equivalent_query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4064,120 +3275,26 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(100), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4]), (vec![5], vec![5])]); - assert_eq!(res.limit, Some(98)); - assert_eq!(res.offset, None); - - // skip 1 element - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), true) + let res = query + .verify_proof(bytes.as_slice(), Some(100), true, tree.hash().unwrap()) .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip 2 elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(0)); - - // skip all elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(198)); + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![4], vec![4]), (vec![5], vec![5])] + ); + assert_eq!(res.limit, Some(98)); // right_to_left let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; + let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); @@ -4186,27 +3303,24 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5]), (vec![4], vec![4])]); + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![5], vec![5]), (vec![4], vec![4])] + ); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; + let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(300), Some(1), false) + .create_proof(query_items.as_slice(), Some(300), false, grove_version) .unwrap() .expect("create_proof errored"); @@ -4215,32 +3329,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(300), - Some(1), - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4])]); - assert_eq!(res.limit, Some(299)); - assert_eq!(res.offset, Some(0)); + let res = query + .verify_proof(bytes.as_slice(), Some(300), false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![5], vec![5]), (vec![4], vec![4])] + ); + assert_eq!(res.limit, Some(298)); } #[test] fn range_after_to_proof_inclusive() { + let grove_version = GroveVersion::latest(); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; + let query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4311,39 +3422,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![4], vec![4]), (vec![5], vec![5]), (vec![7], vec![7])], + vec![(vec![4], vec![4]), (vec![5], vec![5]), (vec![7], vec![7])] ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); // Limit result set to 1 item let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; + let query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), None, true) + .create_proof(query_items.as_slice(), Some(1), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![4])]; + let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![4])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4353,36 +3457,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4])]); + let res = query + .verify_proof(bytes.as_slice(), Some(1), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![4], vec![4])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 2 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; + let query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), None, true) + .create_proof(query_items.as_slice(), Some(2), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![5])]; + let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![5])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4392,36 +3489,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4]), (vec![5], vec![5])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![4], vec![4]), (vec![5], vec![5])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 100 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; + let query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(100), None, true) + .create_proof(query_items.as_slice(), Some(100), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; + let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4431,159 +3524,56 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(100), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), Some(100), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![4], vec![4]), (vec![5], vec![5]), (vec![7], vec![7])], + vec![(vec![4], vec![4]), (vec![5], vec![5]), (vec![7], vec![7])] ); assert_eq!(res.limit, Some(97)); - assert_eq!(res.offset, None); - // skip 1 element + // right_to_left proof let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), true) + let query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; + let (proof, absence, ..) = walker + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip 2 elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) - .unwrap() - .expect("create_proof errored"); + assert_eq!(absence, (false, false)); let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![7], vec![7])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip all elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(197)); - - // right_to_left proof - // let mut tree = make_6_node_tree(); - // let mut walker = RefWalker::new(&mut tree, PanicSource {}); - // - // let queryitems = - // vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; - // let (proof, absence, ..) = walker - // .create_full_proof(queryitems.as_slice(), None, None, false) - // .unwrap() - // .expect("create_proof errored"); - // - // assert_eq!(absence, (false, false)); - // - // let mut bytes = vec![]; - // encode_into(proof.iter(), &mut bytes); - // let mut query = Query::new(); - // for item in queryitems { - // query.insert_item(item); - // } - // let res = verify_query( - // bytes.as_slice(), - // &query, - // None, - // None, - // false, - // tree.hash().unwrap(), - // ) - // .unwrap() - // .unwrap(); - // compare_result_tuples( - // res.result_set, - // vec![(vec![7], vec![7]), (vec![5], vec![5]), (vec![4], vec![4])], - // ); + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![7], vec![7]), (vec![5], vec![5]), (vec![4], vec![4])] + ); } #[test] fn range_full_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFull(..)]; + let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4666,20 +3656,14 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![2], vec![2]), @@ -4688,24 +3672,23 @@ mod test { (vec![5], vec![5]), (vec![7], vec![7]), (vec![8], vec![8]), - ], + ] ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); // Limit result set to 1 item let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFull(..)]; + let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), None, true) + .create_proof(query_items.as_slice(), Some(1), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeToInclusive(..=vec![2])]; + let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![2])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4715,36 +3698,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![2], vec![2])]); + let res = query + .verify_proof(bytes.as_slice(), Some(1), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![2], vec![2])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 2 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFull(..)]; + let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), None, true) + .create_proof(query_items.as_slice(), Some(2), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeToInclusive(..=vec![3])]; + let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![3])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4754,36 +3730,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![2], vec![2]), (vec![3], vec![3])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![2], vec![2]), (vec![3], vec![3])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 100 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFull(..)]; + let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(100), None, true) + .create_proof(query_items.as_slice(), Some(100), true, grove_version) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeFull(..)]; + let equivalent_query_items = vec![QueryItem::RangeFull(..)]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4793,20 +3765,14 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(100), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), Some(100), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![2], vec![2]), @@ -4815,111 +3781,17 @@ mod test { (vec![5], vec![5]), (vec![7], vec![7]), (vec![8], vec![8]), - ], + ] ); assert_eq!(res.limit, Some(94)); - assert_eq!(res.offset, None); - - // skip 1 element - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFull(..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(3), Some(1), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(3), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( - res.result_set, - vec![(vec![3], vec![3]), (vec![4], vec![4]), (vec![5], vec![5])], - ); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip 2 elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFull(..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), Some(2), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4]), (vec![5], vec![5])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip all elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFull(..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(194)); // right_to_left proof let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFull(..)]; + let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); @@ -4928,20 +3800,14 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![8], vec![8]), @@ -4950,15 +3816,15 @@ mod test { (vec![4], vec![4]), (vec![3], vec![3]), (vec![2], vec![2]), - ], + ] ); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFull(..)]; + let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), Some(2), false) + .create_proof(query_items.as_slice(), Some(2), false, grove_version) .unwrap() .expect("create_proof errored"); @@ -4967,38 +3833,34 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - Some(2), - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5]), (vec![4], vec![4])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![8], vec![8]), (vec![7], vec![7])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); } #[test] fn proof_with_limit() { + let grove_version = GroveVersion::latest(); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFrom(vec![2]..)]; - let (proof, _, limit, offset) = walker - .create_full_proof(queryitems.as_slice(), Some(1), None, true) + let query_items = vec![QueryItem::RangeFrom(vec![2]..)]; + let (proof, _, limit) = walker + .create_proof(query_items.as_slice(), Some(1), true, grove_version) .unwrap() .expect("create_proof errored"); // TODO: Add this test for other range types assert_eq!(limit, Some(0)); - assert_eq!(offset, None); let mut iter = proof.iter(); assert_eq!( @@ -5049,116 +3911,26 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![2], vec![2])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); - } - - #[test] - fn proof_with_offset() { - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFrom(vec![2]..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) + let res = query + .verify_proof(bytes.as_slice(), Some(1), true, tree.hash().unwrap()) .unwrap() - .expect("create_proof errored"); - - let mut iter = proof.iter(); - assert_eq!( - iter.next(), - Some(&Op::Push(Node::KVDigest( - vec![2], - [ - 183, 215, 112, 4, 15, 120, 14, 157, 239, 246, 188, 3, 138, 190, 166, 110, 16, - 139, 136, 208, 152, 209, 109, 36, 205, 116, 134, 235, 103, 16, 96, 178 - ] - ))) - ); - assert_eq!( - iter.next(), - Some(&Op::Push(Node::KVDigest( - vec![3], - [ - 210, 173, 26, 11, 185, 253, 244, 69, 11, 216, 113, 81, 192, 139, 153, 104, 205, - 4, 107, 218, 102, 84, 170, 189, 186, 36, 48, 176, 169, 129, 231, 144 - ] - ))) - ); - assert_eq!(iter.next(), Some(&Op::Parent)); - assert_eq!( - iter.next(), - Some(&Op::Push(Node::KVValueHash( - vec![4], - vec![4], - [ - 198, 129, 51, 156, 134, 199, 7, 21, 172, 89, 146, 71, 4, 16, 82, 205, 89, 51, - 227, 215, 139, 195, 237, 202, 159, 191, 209, 172, 156, 38, 239, 192 - ] - ))) - ); - assert_eq!(iter.next(), Some(&Op::Child)); - assert_eq!( - iter.next(), - Some(&Op::Push(Node::KVHash([ - 61, 233, 169, 61, 231, 15, 78, 53, 219, 99, 131, 45, 44, 165, 68, 87, 7, 52, 238, - 68, 142, 211, 110, 161, 111, 220, 108, 11, 17, 31, 88, 197 - ]))) - ); - assert_eq!(iter.next(), Some(&Op::Parent)); - assert_eq!( - iter.next(), - Some(&Op::Push(Node::Hash([ - 133, 188, 175, 131, 60, 89, 221, 135, 133, 53, 205, 110, 58, 56, 128, 58, 1, 227, - 75, 122, 83, 20, 125, 44, 149, 44, 62, 130, 252, 134, 105, 200 - ]))) - ); - assert_eq!(iter.next(), Some(&Op::Child)); - assert!(iter.next().is_none()); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4])]); + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![2], vec![2])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); } #[test] fn right_to_left_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFrom(vec![3]..)]; + let query_items = vec![QueryItem::RangeFrom(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); @@ -5236,21 +4008,15 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { + let mut query = Query::new_with_direction(false); + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![8], vec![8]), @@ -5258,20 +4024,21 @@ mod test { (vec![5], vec![5]), (vec![4], vec![4]), (vec![3], vec![3]), - ], + ] ); } #[test] fn range_proof_missing_upper_bound() { - let mut tree = make_tree_seq(10); + let grove_version = GroveVersion::latest(); + let mut tree = make_tree_seq(10, grove_version); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::Range( + let query_items = vec![QueryItem::Range( vec![0, 0, 0, 0, 0, 0, 0, 5]..vec![0, 0, 0, 0, 0, 0, 0, 6, 5], )]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -5348,39 +4115,34 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]), - ], + ] ); } #[test] fn range_proof_missing_lower_bound() { - let mut tree = make_tree_seq(10); + let grove_version = GroveVersion::latest(); + let mut tree = make_tree_seq(10, grove_version); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![ + let query_items = vec![ // 7 is not inclusive QueryItem::Range(vec![0, 0, 0, 0, 0, 0, 0, 5, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7]), ]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -5456,28 +4218,23 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60])], + vec![(vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60])] ); } #[test] fn subset_proof() { - let mut tree = make_tree_seq(10); + let grove_version = GroveVersion::latest(); + let mut tree = make_tree_seq(10, grove_version); let expected_hash = tree.hash().unwrap().to_owned(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); @@ -5486,7 +4243,7 @@ mod test { query.insert_all(); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), None, None, true) + .create_proof(query.items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -5497,14 +4254,15 @@ mod test { let mut query = Query::new(); query.insert_key(vec![0, 0, 0, 0, 0, 0, 0, 6]); - let res = verify_query(bytes.as_slice(), &query, None, None, true, expected_hash) + let res = query + .verify_proof(bytes.as_slice(), None, true, expected_hash) .unwrap() .unwrap(); assert_eq!(res.result_set.len(), 1); - compare_result_tuples( + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60])], + vec![(vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60])] ); // 1..10 prove (2..=5, 7..10) subset (3..=4, 7..=8) @@ -5512,7 +4270,7 @@ mod test { query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 2]..=vec![0, 0, 0, 0, 0, 0, 0, 5]); query.insert_range(vec![0, 0, 0, 0, 0, 0, 0, 7]..vec![0, 0, 0, 0, 0, 0, 0, 10]); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), None, None, true) + .create_proof(query.items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -5522,19 +4280,20 @@ mod test { let mut query = Query::new(); query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 3]..=vec![0, 0, 0, 0, 0, 0, 0, 4]); query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 7]..=vec![0, 0, 0, 0, 0, 0, 0, 8]); - let res = verify_query(bytes.as_slice(), &query, None, None, true, expected_hash) + let res = query + .verify_proof(bytes.as_slice(), None, true, expected_hash) .unwrap() .unwrap(); assert_eq!(res.result_set.len(), 4); - compare_result_tuples( + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![0, 0, 0, 0, 0, 0, 0, 3], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 4], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 7], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 8], vec![123; 60]), - ], + ] ); // 1..10 prove (2..=5, 6..10) subset (4..=8) @@ -5542,7 +4301,7 @@ mod test { query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 2]..=vec![0, 0, 0, 0, 0, 0, 0, 5]); query.insert_range(vec![0, 0, 0, 0, 0, 0, 0, 6]..vec![0, 0, 0, 0, 0, 0, 0, 10]); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), None, None, true) + .create_proof(query.items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -5551,12 +4310,13 @@ mod test { let mut query = Query::new(); query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 4]..=vec![0, 0, 0, 0, 0, 0, 0, 8]); - let res = verify_query(bytes.as_slice(), &query, None, None, true, expected_hash) + let res = query + .verify_proof(bytes.as_slice(), None, true, expected_hash) .unwrap() .unwrap(); assert_eq!(res.result_set.len(), 5); - compare_result_tuples( + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![0, 0, 0, 0, 0, 0, 0, 4], vec![123; 60]), @@ -5564,7 +4324,7 @@ mod test { (vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 7], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 8], vec![123; 60]), - ], + ] ); // 1..10 prove (1..=3, 2..=5) subset (1..=5) @@ -5572,7 +4332,7 @@ mod test { query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 1]..=vec![0, 0, 0, 0, 0, 0, 0, 3]); query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 2]..=vec![0, 0, 0, 0, 0, 0, 0, 5]); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), None, None, true) + .create_proof(query.items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -5581,12 +4341,13 @@ mod test { let mut query = Query::new(); query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 1]..=vec![0, 0, 0, 0, 0, 0, 0, 5]); - let res = verify_query(bytes.as_slice(), &query, None, None, true, expected_hash) + let res = query + .verify_proof(bytes.as_slice(), None, true, expected_hash) .unwrap() .unwrap(); assert_eq!(res.result_set.len(), 5); - compare_result_tuples( + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![0, 0, 0, 0, 0, 0, 0, 1], vec![123; 60]), @@ -5594,14 +4355,14 @@ mod test { (vec![0, 0, 0, 0, 0, 0, 0, 3], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 4], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), - ], + ] ); // 1..10 prove full (..) limit to 5, subset (1..=5) let mut query = Query::new(); query.insert_range_from(vec![0, 0, 0, 0, 0, 0, 0, 1]..); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), Some(5), None, true) + .create_proof(query.items.as_slice(), Some(5), true, grove_version) .unwrap() .expect("create_proof errored"); @@ -5610,12 +4371,13 @@ mod test { let mut query = Query::new(); query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 1]..=vec![0, 0, 0, 0, 0, 0, 0, 5]); - let res = verify_query(bytes.as_slice(), &query, Some(5), None, true, expected_hash) + let res = query + .verify_proof(bytes.as_slice(), Some(5), true, expected_hash) .unwrap() .unwrap(); assert_eq!(res.result_set.len(), 5); - compare_result_tuples( + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![0, 0, 0, 0, 0, 0, 0, 1], vec![123; 60]), @@ -5623,40 +4385,13 @@ mod test { (vec![0, 0, 0, 0, 0, 0, 0, 3], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 4], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), - ], - ); - - // 1..10 prove full (..) limit to 5, subset (1..=5) - let mut query = Query::new(); - query.insert_range_from(vec![0, 0, 0, 0, 0, 0, 0, 1]..); - let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), None, Some(1), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - - let mut query = Query::new(); - query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 1]..=vec![0, 0, 0, 0, 0, 0, 0, 5]); - let res = verify_query(bytes.as_slice(), &query, None, Some(1), true, expected_hash) - .unwrap() - .unwrap(); - - assert_eq!(res.result_set.len(), 4); - compare_result_tuples( - res.result_set, - vec![ - (vec![0, 0, 0, 0, 0, 0, 0, 2], vec![123; 60]), - (vec![0, 0, 0, 0, 0, 0, 0, 3], vec![123; 60]), - (vec![0, 0, 0, 0, 0, 0, 0, 4], vec![123; 60]), - (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), - ], + ] ); } #[test] fn break_subset_proof() { + let grove_version = GroveVersion::latest(); // TODO: move this to where you'd set the constraints for this definition // goal is to show that ones limit and offset values are involved // whether a query is subset or not now also depends on the state @@ -5666,7 +4401,7 @@ mod test { // with limit and offset the nodes a query highlights now depends on state // hence it's impossible to know if something is subset at definition time - let mut tree = make_tree_seq(10); + let mut tree = make_tree_seq(10, grove_version); let expected_hash = tree.hash().unwrap().to_owned(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); @@ -5674,7 +4409,7 @@ mod test { let mut query = Query::new(); query.insert_range_from(vec![0, 0, 0, 0, 0, 0, 0, 1]..); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), Some(3), None, true) + .create_proof(query.items.as_slice(), Some(3), true, grove_version) .unwrap() .expect("create_proof errored"); @@ -5684,38 +4419,35 @@ mod test { // Try to query 4 let mut query = Query::new(); query.insert_key(vec![0, 0, 0, 0, 0, 0, 0, 4]); - assert!( - verify_query(bytes.as_slice(), &query, Some(3), None, true, expected_hash) - .unwrap() - .is_err() - ); + assert!(query + .verify_proof(bytes.as_slice(), Some(3), true, expected_hash) + .unwrap() + .is_err()); // if limit offset parameters are different from generation then proof // verification returns an error Try superset proof with increased limit let mut query = Query::new(); query.insert_range_from(vec![0, 0, 0, 0, 0, 0, 0, 1]..); - assert!( - verify_query(bytes.as_slice(), &query, Some(4), None, true, expected_hash) - .unwrap() - .is_err() - ); + assert!(query + .verify_proof(bytes.as_slice(), Some(4), true, expected_hash) + .unwrap() + .is_err()); // Try superset proof with less limit let mut query = Query::new(); query.insert_range_from(vec![0, 0, 0, 0, 0, 0, 0, 1]..); - assert!( - verify_query(bytes.as_slice(), &query, Some(2), None, true, expected_hash) - .unwrap() - .is_err() - ); + assert!(query + .verify_proof(bytes.as_slice(), Some(2), true, expected_hash) + .unwrap() + .is_err()); } #[test] fn query_from_vec() { - let queryitems = vec![QueryItem::Range( + let query_items = vec![QueryItem::Range( vec![0, 0, 0, 0, 0, 0, 0, 5, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], )]; - let query = Query::from(queryitems); + let query = Query::from(query_items); let mut expected = Vec::new(); expected.push(QueryItem::Range( @@ -5731,7 +4463,7 @@ mod test { vec![0, 0, 0, 0, 0, 0, 5, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], )); let query_vec: Vec = query.into(); - let expected = vec![QueryItem::Range( + let expected = [QueryItem::Range( vec![0, 0, 0, 0, 0, 0, 5, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], )]; assert_eq!( @@ -5746,8 +4478,8 @@ mod test { #[test] fn query_item_from_vec_u8() { - let queryitems: Vec = vec![42]; - let query = QueryItem::from(queryitems); + let query_items: Vec = vec![42]; + let query = QueryItem::from(query_items); let expected = QueryItem::Key(vec![42]); assert_eq!(query, expected); @@ -5755,21 +4487,22 @@ mod test { #[test] fn verify_ops() { - let mut tree = Tree::new(vec![5], vec![5], None, BasicMerk).unwrap(); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .expect("commit failed"); + let grove_version = GroveVersion::latest(); + let mut tree = TreeNode::new(vec![5], vec![5], None, BasicMerkNode).unwrap(); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .expect("commit failed"); let root_hash = tree.hash().unwrap(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let (proof, ..) = walker - .create_full_proof(vec![QueryItem::Key(vec![5])].as_slice(), None, None, true) + .create_proof( + vec![QueryItem::Key(vec![5])].as_slice(), + None, + true, + grove_version, + ) .unwrap() .expect("failed to create proof"); let mut bytes = vec![]; @@ -5786,20 +4519,21 @@ mod test { #[test] #[should_panic(expected = "verify failed")] fn verify_ops_mismatched_hash() { - let mut tree = Tree::new(vec![5], vec![5], None, BasicMerk).unwrap(); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .expect("commit failed"); + let grove_version = GroveVersion::latest(); + let mut tree = TreeNode::new(vec![5], vec![5], None, BasicMerkNode).unwrap(); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .expect("commit failed"); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let (proof, ..) = walker - .create_full_proof(vec![QueryItem::Key(vec![5])].as_slice(), None, None, true) + .create_proof( + vec![QueryItem::Key(vec![5])].as_slice(), + None, + true, + grove_version, + ) .unwrap() .expect("failed to create proof"); let mut bytes = vec![]; @@ -5814,19 +4548,20 @@ mod test { #[test] #[should_panic(expected = "verify failed")] fn verify_query_mismatched_hash() { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let keys = vec![vec![5], vec![7]]; let (proof, ..) = walker - .create_full_proof( + .create_proof( keys.clone() .into_iter() .map(QueryItem::Key) .collect::>() .as_slice(), None, - None, true, + grove_version, ) .unwrap() .expect("failed to create proof"); @@ -5838,7 +4573,8 @@ mod test { query.insert_key(key.clone()); } - let _result = verify_query(bytes.as_slice(), &query, None, None, true, [42; 32]) + let _result = query + .verify_proof(bytes.as_slice(), None, true, [42; 32]) .unwrap() .expect("verify failed"); } diff --git a/merk/src/proofs/query/query_item/mod.rs b/merk/src/proofs/query/query_item/mod.rs index e950df81a..7c81a27e4 100644 --- a/merk/src/proofs/query/query_item/mod.rs +++ b/merk/src/proofs/query/query_item/mod.rs @@ -5,17 +5,19 @@ mod merge; use std::{ cmp, cmp::Ordering, + fmt, hash::Hash, ops::{Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive}, }; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(feature = "full")] use grovedb_costs::{CostContext, CostsExt, OperationCost}; #[cfg(feature = "full")] use grovedb_storage::RawIterator; #[cfg(any(feature = "full", feature = "verify"))] use crate::error::Error; +use crate::proofs::hex_to_ascii; #[cfg(any(feature = "full", feature = "verify"))] /// A `QueryItem` represents a key or range of keys to be included in a proof. @@ -33,6 +35,50 @@ pub enum QueryItem { RangeAfterToInclusive(RangeInclusive>), } +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for QueryItem { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + QueryItem::Key(key) => write!(f, "Key({})", hex_to_ascii(key)), + QueryItem::Range(range) => write!( + f, + "Range({} .. {})", + hex_to_ascii(&range.start), + hex_to_ascii(&range.end) + ), + QueryItem::RangeInclusive(range) => write!( + f, + "RangeInclusive({} ..= {})", + hex_to_ascii(range.start()), + hex_to_ascii(range.end()) + ), + QueryItem::RangeFull(_) => write!(f, "RangeFull"), + QueryItem::RangeFrom(range) => { + write!(f, "RangeFrom({} ..)", hex_to_ascii(&range.start)) + } + QueryItem::RangeTo(range) => write!(f, "RangeTo(.. {})", hex_to_ascii(&range.end)), + QueryItem::RangeToInclusive(range) => { + write!(f, "RangeToInclusive(..= {})", hex_to_ascii(&range.end)) + } + QueryItem::RangeAfter(range) => { + write!(f, "RangeAfter({} <..)", hex_to_ascii(&range.start)) + } + QueryItem::RangeAfterTo(range) => write!( + f, + "RangeAfterTo({} <.. {})", + hex_to_ascii(&range.start), + hex_to_ascii(&range.end) + ), + QueryItem::RangeAfterToInclusive(range) => write!( + f, + "RangeAfterToInclusive({} <..= {})", + hex_to_ascii(range.start()), + hex_to_ascii(range.end()) + ), + } + } +} + #[cfg(any(feature = "full", feature = "verify"))] impl Hash for QueryItem { fn hash(&self, state: &mut H) { @@ -295,11 +341,13 @@ impl QueryItem { iter.seek(end).flat_map(|_| iter.prev()) } } - QueryItem::RangeInclusive(range_inclusive) => iter.seek(if left_to_right { - range_inclusive.start() - } else { - range_inclusive.end() - }), + QueryItem::RangeInclusive(range_inclusive) => { + if left_to_right { + iter.seek(range_inclusive.start()) + } else { + iter.seek_for_prev(range_inclusive.end()) + } + } QueryItem::RangeFull(..) => { if left_to_right { iter.seek_to_first() @@ -379,7 +427,7 @@ impl QueryItem { } #[cfg(any(feature = "full", feature = "verify"))] - fn compare(a: &[u8], b: &[u8]) -> cmp::Ordering { + pub fn compare(a: &[u8], b: &[u8]) -> cmp::Ordering { for (ai, bi) in a.iter().zip(b.iter()) { match ai.cmp(bi) { Ordering::Equal => continue, diff --git a/merk/src/proofs/query/verify.rs b/merk/src/proofs/query/verify.rs index 39ff471ad..e1d565114 100644 --- a/merk/src/proofs/query/verify.rs +++ b/merk/src/proofs/query/verify.rs @@ -1,18 +1,23 @@ +#[cfg(feature = "full")] use std::collections::LinkedList; +use std::fmt; use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; #[cfg(feature = "full")] -use crate::proofs::query::{Map, MapBuilder}; +use crate::proofs::{ + query::{Map, MapBuilder}, + Op, +}; use crate::{ error::Error, - proofs::{tree::execute, Decoder, Node, Op, Query}, + proofs::{hex_to_ascii, tree::execute, Decoder, Node, Query}, tree::value_hash, CryptoHash as MerkHash, CryptoHash, }; -#[cfg(any(feature = "full", feature = "verify"))] -pub type ProofAbsenceLimitOffset = (LinkedList, (bool, bool), Option, Option); +#[cfg(feature = "full")] +pub type ProofAbsenceLimit = (LinkedList, (bool, bool), Option); #[cfg(feature = "full")] /// Verify proof against expected hash @@ -37,266 +42,406 @@ pub fn verify(bytes: &[u8], expected_hash: MerkHash) -> CostResult { }) } -#[cfg(any(feature = "full", feature = "verify"))] -/// Verifies the encoded proof with the given query -/// -/// Every key in `keys` is checked to either have a key/value pair in the proof, -/// or to have its absence in the tree proven. -/// -/// Returns `Err` if the proof is invalid, or a list of proven values associated -/// with `keys`. For example, if `keys` contains keys `A` and `B`, the returned -/// list will contain 2 elements, the value of `A` and the value of `B`. Keys -/// proven to be absent in the tree will have an entry of `None`, keys that have -/// a proven value will have an entry of `Some(value)`. -pub fn execute_proof( - bytes: &[u8], - query: &Query, - limit: Option, - offset: Option, - left_to_right: bool, -) -> CostResult<(MerkHash, ProofVerificationResult), Error> { - let mut cost = OperationCost::default(); - - let mut output = Vec::with_capacity(query.len()); - let mut last_push = None; - let mut query = query.directional_iter(left_to_right).peekable(); - let mut in_range = false; - let mut current_limit = limit; - let mut current_offset = offset; +#[derive(Copy, Clone, Debug)] +pub struct VerifyOptions { + /// When set to true, this will give back absence proofs for any query items + /// that are keys. This means QueryItem::Key(), and not the ranges. + pub absence_proofs_for_non_existing_searched_keys: bool, + /// Verifies that we have all the data. Todo: verify that this works + /// properly + pub verify_proof_succinctness: bool, + /// Should return empty trees in the result? + pub include_empty_trees_in_result: bool, +} - let ops = Decoder::new(bytes); +impl Default for VerifyOptions { + fn default() -> Self { + VerifyOptions { + absence_proofs_for_non_existing_searched_keys: true, + verify_proof_succinctness: true, + include_empty_trees_in_result: false, + } + } +} - let root_wrapped = execute(ops, true, |node| { - let mut execute_node = |key: &Vec, - value: Option<&Vec>, - value_hash: CryptoHash| - -> Result<_, Error> { - while let Some(item) = query.peek() { - // get next item in query - let query_item = *item; - let (lower_bound, start_non_inclusive) = query_item.lower_bound(); - let (upper_bound, end_inclusive) = query_item.upper_bound(); - - // terminate if we encounter a node before the current query item. - // this means a node less than the current query item for left to right. - // and a node greater than the current query item for right to left. - let terminate = if left_to_right { - // if the query item is lower unbounded, then a node cannot be less than it. - // checks that the lower bound of the query item not greater than the key - // if they are equal make sure the start is inclusive - !query_item.lower_unbounded() - && ((lower_bound.expect("confirmed not unbounded") > key.as_slice()) - || (start_non_inclusive - && lower_bound.expect("confirmed not unbounded") == key.as_slice())) +impl Query { + #[cfg(any(feature = "full", feature = "verify"))] + /// Verifies the encoded proof with the given query + /// + /// Every key in `keys` is checked to either have a key/value pair in the + /// proof, or to have its absence in the tree proven. + /// + /// Returns `Err` if the proof is invalid, or a list of proven values + /// associated with `keys`. For example, if `keys` contains keys `A` and + /// `B`, the returned list will contain 2 elements, the value of `A` and + /// the value of `B`. Keys proven to be absent in the tree will have an + /// entry of `None`, keys that have a proven value will have an entry of + /// `Some(value)`. + pub fn execute_proof( + &self, + bytes: &[u8], + limit: Option, + left_to_right: bool, + ) -> CostResult<(MerkHash, ProofVerificationResult), Error> { + #[cfg(feature = "proof_debug")] + { + println!( + "executing proof with limit {:?} going {} using query {}", + limit, + if left_to_right { + "left to right" } else { - !query_item.upper_unbounded() - && ((upper_bound.expect("confirmed not unbounded") < key.as_slice()) - || (!end_inclusive - && upper_bound.expect("confirmed not unbounded") == key.as_slice())) - }; - if terminate { - break; - } + "right to left" + }, + self + ); + } + let mut cost = OperationCost::default(); - if !in_range { - // this is the first data we have encountered for this query item - if left_to_right { - // ensure lower bound of query item is proven - match last_push { - // lower bound is proven - we have an exact match - // ignoring the case when the lower bound is unbounded - // as it's not possible the get an exact key match for - // an unbounded value - _ if Some(key.as_slice()) == query_item.lower_bound().0 => {} - - // lower bound is proven - this is the leftmost node - // in the tree - None => {} - - // lower bound is proven - the preceding tree node - // is lower than the bound - Some(Node::KV(..)) => {} - Some(Node::KVDigest(..)) => {} - Some(Node::KVRefValueHash(..)) => {} - Some(Node::KVValueHash(..)) => {} - - // cannot verify lower bound - we have an abridged - // tree so we cannot tell what the preceding key was - Some(_) => { - return Err(Error::InvalidProofError( - "Cannot verify lower bound of queried range".to_string(), - )); - } - } + let mut output = Vec::with_capacity(self.len()); + let mut last_push = None; + let mut query = self.directional_iter(left_to_right).peekable(); + let mut in_range = false; + let original_limit = limit; + let mut current_limit = limit; + + let ops = Decoder::new(bytes); + + let root_wrapped = execute(ops, true, |node| { + let mut execute_node = |key: &Vec, + value: Option<&Vec>, + value_hash: CryptoHash| + -> Result<_, Error> { + while let Some(item) = query.peek() { + // get next item in query + let query_item = *item; + let (lower_bound, start_non_inclusive) = query_item.lower_bound(); + let (upper_bound, end_inclusive) = query_item.upper_bound(); + + // terminate if we encounter a node before the current query item. + // this means a node less than the current query item for left to right. + // and a node greater than the current query item for right to left. + let terminate = if left_to_right { + // if the query item is lower unbounded, then a node cannot be less than it. + // checks that the lower bound of the query item not greater than the key + // if they are equal make sure the start is inclusive + !query_item.lower_unbounded() + && ((lower_bound.expect("confirmed not unbounded") > key.as_slice()) + || (start_non_inclusive + && lower_bound.expect("confirmed not unbounded") + == key.as_slice())) } else { - // ensure upper bound of query item is proven - match last_push { - // upper bound is proven - we have an exact match - // ignoring the case when the upper bound is unbounded - // as it's not possible the get an exact key match for - // an unbounded value - _ if Some(key.as_slice()) == query_item.upper_bound().0 => {} - - // lower bound is proven - this is the rightmost node - // in the tree - None => {} - - // upper bound is proven - the preceding tree node - // is greater than the bound - Some(Node::KV(..)) => {} - Some(Node::KVDigest(..)) => {} - Some(Node::KVRefValueHash(..)) => {} - Some(Node::KVValueHash(..)) => {} - - // cannot verify upper bound - we have an abridged - // tree so we cannot tell what the previous key was - Some(_) => { - return Err(Error::InvalidProofError( - "Cannot verify upper bound of queried range".to_string(), - )); + !query_item.upper_unbounded() + && ((upper_bound.expect("confirmed not unbounded") < key.as_slice()) + || (!end_inclusive + && upper_bound.expect("confirmed not unbounded") + == key.as_slice())) + }; + if terminate { + break; + } + + if !in_range { + // this is the first data we have encountered for this query item + if left_to_right { + // ensure lower bound of query item is proven + match last_push { + // lower bound is proven - we have an exact match + // ignoring the case when the lower bound is unbounded + // as it's not possible the get an exact key match for + // an unbounded value + _ if Some(key.as_slice()) == query_item.lower_bound().0 => {} + + // lower bound is proven - this is the leftmost node + // in the tree + None => {} + + // lower bound is proven - the preceding tree node + // is lower than the bound + Some(Node::KV(..)) => {} + Some(Node::KVDigest(..)) => {} + Some(Node::KVRefValueHash(..)) => {} + Some(Node::KVValueHash(..)) => {} + + // cannot verify lower bound - we have an abridged + // tree so we cannot tell what the preceding key was + Some(_) => { + return Err(Error::InvalidProofError( + "Cannot verify lower bound of queried range".to_string(), + )); + } + } + } else { + // ensure upper bound of query item is proven + match last_push { + // upper bound is proven - we have an exact match + // ignoring the case when the upper bound is unbounded + // as it's not possible the get an exact key match for + // an unbounded value + _ if Some(key.as_slice()) == query_item.upper_bound().0 => {} + + // lower bound is proven - this is the rightmost node + // in the tree + None => {} + + // upper bound is proven - the preceding tree node + // is greater than the bound + Some(Node::KV(..)) => {} + Some(Node::KVDigest(..)) => {} + Some(Node::KVRefValueHash(..)) => {} + Some(Node::KVValueHash(..)) => {} + + // cannot verify upper bound - we have an abridged + // tree so we cannot tell what the previous key was + Some(_) => { + return Err(Error::InvalidProofError( + "Cannot verify upper bound of queried range".to_string(), + )); + } } } } - } - if left_to_right { - if query_item.upper_bound().0.is_some() - && Some(key.as_slice()) >= query_item.upper_bound().0 + if left_to_right { + if query_item.upper_bound().0.is_some() + && Some(key.as_slice()) >= query_item.upper_bound().0 + { + // at or past upper bound of range (or this was an exact + // match on a single-key queryitem), advance to next query + // item + query.next(); + in_range = false; + } else { + // have not reached upper bound, we expect more values + // to be proven in the range (and all pushes should be + // unabridged until we reach end of range) + in_range = true; + } + } else if query_item.lower_bound().0.is_some() + && Some(key.as_slice()) <= query_item.lower_bound().0 { - // at or past upper bound of range (or this was an exact + // at or before lower bound of range (or this was an exact // match on a single-key queryitem), advance to next query // item query.next(); in_range = false; } else { - // have not reached upper bound, we expect more values + // have not reached lower bound, we expect more values // to be proven in the range (and all pushes should be // unabridged until we reach end of range) in_range = true; } - } else if query_item.lower_bound().0.is_some() - && Some(key.as_slice()) <= query_item.lower_bound().0 - { - // at or before lower bound of range (or this was an exact - // match on a single-key queryitem), advance to next query - // item - query.next(); - in_range = false; - } else { - // have not reached lower bound, we expect more values - // to be proven in the range (and all pushes should be - // unabridged until we reach end of range) - in_range = true; - } - // this push matches the queried item - if query_item.contains(key) { - // if there are still offset slots, and node is of type kvdigest - // reduce the offset counter - // also, verify that a kv node was not pushed before offset is exhausted - if let Some(offset) = current_offset { - if offset > 0 && value.is_none() { - current_offset = Some(offset - 1); + // this push matches the queried item + if query_item.contains(key) { + if let Some(val) = value { + if let Some(limit) = current_limit { + if limit == 0 { + return Err(Error::InvalidProofError(format!( + "Proof returns more data than limit {:?}", + original_limit + ))); + } else { + current_limit = Some(limit - 1); + if current_limit == Some(0) { + in_range = false; + } + } + } + #[cfg(feature = "proof_debug")] + { + println!( + "pushing {}", + ProvedKeyOptionalValue { + key: key.clone(), + value: Some(val.clone()), + proof: value_hash, + } + ); + } + // add data to output + output.push(ProvedKeyOptionalValue { + key: key.clone(), + value: Some(val.clone()), + proof: value_hash, + }); + + // continue to next push break; - } else if offset > 0 && value.is_some() { - // inserting a kv node before exhausting offset + } else { return Err(Error::InvalidProofError( - "Proof returns data before offset is exhausted".to_string(), + "Proof is missing data for query".to_string(), )); } } + {} + // continue to next queried item + } + Ok(()) + }; - // offset is equal to zero or none - if let Some(val) = value { - if let Some(limit) = current_limit { - if limit == 0 { - return Err(Error::InvalidProofError( - "Proof returns more data than limit".to_string(), - )); - } else { - current_limit = Some(limit - 1); - if current_limit == Some(0) { - in_range = false; - } - } - } - // add data to output - output.push(ProvedKeyValue { - key: key.clone(), - value: val.clone(), - proof: value_hash, - }); - - // continue to next push - break; - } else { - return Err(Error::InvalidProofError( - "Proof is missing data for query".to_string(), - )); + match node { + Node::KV(key, value) => { + #[cfg(feature = "proof_debug")] + { + println!("Processing KV node"); + } + execute_node(key, Some(value), value_hash(value).unwrap())?; + } + Node::KVValueHash(key, value, value_hash) => { + #[cfg(feature = "proof_debug")] + { + println!("Processing KVValueHash node"); + } + execute_node(key, Some(value), *value_hash)?; + } + Node::KVDigest(key, value_hash) => { + #[cfg(feature = "proof_debug")] + { + println!("Processing KVDigest node"); + } + execute_node(key, None, *value_hash)?; + } + Node::KVRefValueHash(key, value, value_hash) => { + #[cfg(feature = "proof_debug")] + { + println!("Processing KVRefValueHash node"); + } + execute_node(key, Some(value), *value_hash)?; + } + Node::Hash(_) | Node::KVHash(_) | Node::KVValueHashFeatureType(..) => { + if in_range { + return Err(Error::InvalidProofError(format!( + "Proof is missing data for query range. Encountered unexpected node \ + type: {}", + node + ))); } } - {} - // continue to next queried item } - Ok(()) - }; - if let Node::KV(key, value) = node { - execute_node(key, Some(value), value_hash(value).unwrap())?; - } else if let Node::KVValueHash(key, value, value_hash) = node { - execute_node(key, Some(value), *value_hash)?; - } else if let Node::KVDigest(key, value_hash) = node { - execute_node(key, None, *value_hash)?; - } else if let Node::KVRefValueHash(key, value, value_hash) = node { - execute_node(key, Some(value), *value_hash)?; - } else if in_range { - // we encountered a queried range but the proof was abridged (saw a - // non-KV push), we are missing some part of the range - return Err(Error::InvalidProofError( - "Proof is missing data for query for range".to_string(), - )); - } + last_push = Some(node.clone()); - last_push = Some(node.clone()); + Ok(()) + }); - Ok(()) - }); + let root = cost_return_on_error!(&mut cost, root_wrapped); - let root = cost_return_on_error!(&mut cost, root_wrapped); + // we have remaining query items, check absence proof against right edge of + // tree + if query.peek().is_some() { + if current_limit == Some(0) { + } else { + match last_push { + // last node in tree was less than queried item + Some(Node::KV(..)) => {} + Some(Node::KVDigest(..)) => {} + Some(Node::KVRefValueHash(..)) => {} + Some(Node::KVValueHash(..)) => {} - // we have remaining query items, check absence proof against right edge of - // tree - if query.peek().is_some() { - if current_limit == Some(0) { - } else { - match last_push { - // last node in tree was less than queried item - Some(Node::KV(..)) => {} - Some(Node::KVDigest(..)) => {} - Some(Node::KVRefValueHash(..)) => {} - Some(Node::KVValueHash(..)) => {} - - // proof contains abridged data so we cannot verify absence of - // remaining query items - _ => { - return Err(Error::InvalidProofError( - "Proof is missing data for query".to_string(), - )) - .wrap_with_cost(cost) + // proof contains abridged data so we cannot verify absence of + // remaining query items + _ => { + return Err(Error::InvalidProofError( + "Proof is missing data for query".to_string(), + )) + .wrap_with_cost(cost) + } } } } + + Ok(( + root.hash().unwrap_add_cost(&mut cost), + ProofVerificationResult { + result_set: output, + limit: current_limit, + }, + )) + .wrap_with_cost(cost) } - Ok(( - root.hash().unwrap_add_cost(&mut cost), - ProofVerificationResult { - result_set: output, - limit: current_limit, - offset: current_offset, - }, - )) - .wrap_with_cost(cost) + #[cfg(any(feature = "full", feature = "verify"))] + /// Verifies the encoded proof with the given query and expected hash + pub fn verify_proof( + &self, + bytes: &[u8], + limit: Option, + left_to_right: bool, + expected_hash: MerkHash, + ) -> CostResult { + self.execute_proof(bytes, limit, left_to_right) + .map_ok(|(root_hash, verification_result)| { + if root_hash == expected_hash { + Ok(verification_result) + } else { + Err(Error::InvalidProofError(format!( + "Proof did not match expected hash\n\tExpected: \ + {expected_hash:?}\n\tActual: {root_hash:?}" + ))) + } + }) + .flatten() + } +} + +#[cfg(any(feature = "full", feature = "verify"))] +#[derive(PartialEq, Eq, Debug)] +/// Proved key-value +pub struct ProvedKeyOptionalValue { + /// Key + pub key: Vec, + /// Value + pub value: Option>, + /// Proof + pub proof: CryptoHash, +} + +impl From for ProvedKeyOptionalValue { + fn from(value: ProvedKeyValue) -> Self { + let ProvedKeyValue { key, value, proof } = value; + + ProvedKeyOptionalValue { + key, + value: Some(value), + proof, + } + } +} + +impl TryFrom for ProvedKeyValue { + type Error = Error; + + fn try_from(value: ProvedKeyOptionalValue) -> Result { + let ProvedKeyOptionalValue { key, value, proof } = value; + let value = value.ok_or(Error::InvalidProofError(format!( + "expected {}", + hex_to_ascii(&key) + )))?; + Ok(ProvedKeyValue { key, value, proof }) + } +} + +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for ProvedKeyOptionalValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let key_string = if self.key.len() == 1 && self.key[0] < b"0"[0] { + hex::encode(&self.key) + } else { + String::from_utf8(self.key.clone()).unwrap_or_else(|_| hex::encode(&self.key)) + }; + write!( + f, + "ProvedKeyOptionalValue {{ key: {}, value: {}, proof: {} }}", + key_string, + if let Some(value) = &self.value { + hex::encode(value) + } else { + "None".to_string() + }, + hex::encode(self.proof) + ) + } } #[cfg(any(feature = "full", feature = "verify"))] @@ -311,38 +456,39 @@ pub struct ProvedKeyValue { pub proof: CryptoHash, } +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for ProvedKeyValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "ProvedKeyValue {{ key: {}, value: {}, proof: {} }}", + String::from_utf8(self.key.clone()).unwrap_or_else(|_| hex::encode(&self.key)), + hex::encode(&self.value), + hex::encode(self.proof) + ) + } +} + #[cfg(any(feature = "full", feature = "verify"))] #[derive(PartialEq, Eq, Debug)] /// Proof verification result pub struct ProofVerificationResult { /// Result set - pub result_set: Vec, + pub result_set: Vec, /// Limit pub limit: Option, - /// Offset - pub offset: Option, } #[cfg(any(feature = "full", feature = "verify"))] -/// Verifies the encoded proof with the given query and expected hash -pub fn verify_query( - bytes: &[u8], - query: &Query, - limit: Option, - offset: Option, - left_to_right: bool, - expected_hash: MerkHash, -) -> CostResult { - execute_proof(bytes, query, limit, offset, left_to_right) - .map_ok(|(root_hash, verification_result)| { - if root_hash == expected_hash { - Ok(verification_result) - } else { - Err(Error::InvalidProofError(format!( - "Proof did not match expected hash\n\tExpected: {expected_hash:?}\n\tActual: \ - {root_hash:?}" - ))) - } - }) - .flatten() +impl fmt::Display for ProofVerificationResult { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "ProofVerificationResult {{")?; + writeln!(f, " result_set: [")?; + for (index, proved_key_value) in self.result_set.iter().enumerate() { + writeln!(f, " {}: {},", index, proved_key_value)?; + } + writeln!(f, " ],")?; + writeln!(f, " limit: {:?}", self.limit)?; + write!(f, "}}") + } } diff --git a/merk/src/proofs/tree.rs b/merk/src/proofs/tree.rs index 819fd43b1..16655a6dc 100644 --- a/merk/src/proofs/tree.rs +++ b/merk/src/proofs/tree.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Tree proofs #[cfg(feature = "full")] @@ -43,6 +15,12 @@ use super::{Node, Op}; use crate::tree::{combine_hash, kv_digest_to_kv_hash, kv_hash, node_hash, value_hash, NULL_HASH}; #[cfg(any(feature = "full", feature = "verify"))] use crate::{error::Error, tree::CryptoHash}; +#[cfg(feature = "full")] +use crate::{ + proofs::chunk::chunk::{LEFT, RIGHT}, + Link, + TreeFeatureType::SummedMerkNode, +}; #[cfg(any(feature = "full", feature = "verify"))] /// Contains a tree's child node and its hash. The hash can always be assumed to @@ -55,6 +33,36 @@ pub struct Child { pub hash: CryptoHash, } +impl Child { + #[cfg(feature = "full")] + pub fn as_link(&self) -> Link { + let (key, sum) = match &self.tree.node { + Node::KV(key, _) | Node::KVValueHash(key, ..) => (key.as_slice(), None), + Node::KVValueHashFeatureType(key, _, _, feature_type) => { + let sum_value = match feature_type { + SummedMerkNode(sum) => Some(*sum), + _ => None, + }; + (key.as_slice(), sum_value) + } + // for the connection between the trunk and leaf chunks, we don't + // have the child key so we must first write in an empty one. once + // the leaf gets verified, we can write in this key to its parent + _ => (&[] as &[u8], None), + }; + + Link::Reference { + hash: self.hash, + sum, + child_heights: ( + self.tree.child_heights.0 as u8, + self.tree.child_heights.1 as u8, + ), + key: key.to_vec(), + } + } +} + #[cfg(any(feature = "full", feature = "verify"))] /// A binary tree data structure used to represent a select subset of a tree /// when verifying Merkle proofs. @@ -68,6 +76,8 @@ pub struct Tree { pub right: Option, /// Height pub height: usize, + /// Child Heights + pub child_heights: (usize, usize), } #[cfg(any(feature = "full", feature = "verify"))] @@ -79,6 +89,7 @@ impl From for Tree { left: None, right: None, height: 1, + child_heights: (0, 0), } } } @@ -167,6 +178,42 @@ impl Tree { Ok(()) } + #[cfg(feature = "full")] + /// Does an in-order traversal over references to all the nodes in the tree, + /// calling `visit_node` for each with the current traversal path. + pub fn visit_refs_track_traversal_and_parent< + F: FnMut(&Self, &mut Vec, Option<&[u8]>) -> Result<(), Error>, + >( + &self, + base_traversal_instruction: &mut Vec, + parent_key: Option<&[u8]>, + visit_node: &mut F, + ) -> Result<(), Error> { + if let Some(child) = &self.left { + base_traversal_instruction.push(LEFT); + child.tree.visit_refs_track_traversal_and_parent( + base_traversal_instruction, + Some(self.key()), + visit_node, + )?; + base_traversal_instruction.pop(); + } + + visit_node(self, base_traversal_instruction, parent_key)?; + + if let Some(child) = &self.right { + base_traversal_instruction.push(RIGHT); + child.tree.visit_refs_track_traversal_and_parent( + base_traversal_instruction, + Some(self.key()), + visit_node, + )?; + base_traversal_instruction.pop(); + } + + Ok(()) + } + /// Returns an immutable reference to the child on the given side, if any. #[cfg(any(feature = "full", feature = "verify"))] pub const fn child(&self, left: bool) -> Option<&Child> { @@ -202,6 +249,13 @@ impl Tree { self.height = self.height.max(child.height + 1); + // update child height + if left { + self.child_heights.0 = child.height; + } else { + self.child_heights.1 = child.height; + } + let hash = child.hash().unwrap_add_cost(&mut cost); let tree = Box::new(child); *self.child_mut(left) = Some(Child { tree, hash }); @@ -238,13 +292,24 @@ impl Tree { _ => panic!("Expected node to be type KV"), } } + + #[cfg(feature = "full")] + pub(crate) fn sum(&self) -> Option { + match self.node { + Node::KVValueHashFeatureType(.., feature_type) => match feature_type { + SummedMerkNode(sum) => Some(sum), + _ => None, + }, + _ => panic!("Expected node to be type KVValueHashFeatureType"), + } + } } #[cfg(feature = "full")] /// `LayerIter` iterates over the nodes in a `Tree` at a given depth. Nodes are /// visited in order. pub struct LayerIter<'a> { - stack: Vec<&'a Tree>, + stack: Vec<(&'a Tree, usize)>, depth: usize, } @@ -257,25 +322,9 @@ impl<'a> LayerIter<'a> { depth, }; - iter.traverse_to_start(tree, depth); + iter.stack.push((tree, 0)); iter } - - /// Builds up the stack by traversing through left children to the desired - /// depth. - fn traverse_to_start(&mut self, tree: &'a Tree, remaining_depth: usize) { - self.stack.push(tree); - - if remaining_depth == 0 { - return; - } - - if let Some(child) = tree.child(true) { - self.traverse_to_start(&child.tree, remaining_depth - 1) - } else { - panic!("Could not traverse to given layer") - } - } } #[cfg(feature = "full")] @@ -283,32 +332,20 @@ impl<'a> Iterator for LayerIter<'a> { type Item = &'a Tree; fn next(&mut self) -> Option { - let item = self.stack.pop(); - let mut popped = item; - - loop { - if self.stack.is_empty() { - return item; - } - - let parent = self.stack.last().unwrap(); - let left_child = parent.child(true).unwrap(); - let right_child = parent.child(false).unwrap(); - - if left_child.tree.as_ref() == popped.unwrap() { - self.stack.push(&right_child.tree); - - while self.stack.len() - 1 < self.depth { - let parent = self.stack.last().unwrap(); - let left_child = parent.child(true).unwrap(); - self.stack.push(&left_child.tree); + while let Some((item, item_depth)) = self.stack.pop() { + if item_depth != self.depth { + if let Some(right_child) = item.child(false) { + self.stack.push((&right_child.tree, item_depth + 1)) + } + if let Some(left_child) = item.child(true) { + self.stack.push((&left_child.tree, item_depth + 1)) } - - return item; } else { - popped = self.stack.pop(); + return Some(item); } } + + None } } @@ -471,7 +508,19 @@ where .wrap_with_cost(cost); } - Ok(stack.pop().unwrap()).wrap_with_cost(cost) + let tree = stack.pop().unwrap(); + + if tree.child_heights.0.max(tree.child_heights.1) + - tree.child_heights.0.min(tree.child_heights.1) + > 1 + { + return Err(Error::InvalidProofError( + "Expected proof to result in a valid avl tree".to_string(), + )) + .wrap_with_cost(cost); + } + + Ok(tree).wrap_with_cost(cost) } #[cfg(feature = "full")] @@ -555,4 +604,104 @@ mod test { } assert!(iter.next().is_none()); } + + #[test] + fn execute_non_avl_tree() { + let non_avl_tree_proof = vec![ + Op::Push(Node::KV(vec![1], vec![1])), + Op::Push(Node::KV(vec![2], vec![2])), + Op::Parent, + Op::Push(Node::KV(vec![3], vec![3])), + Op::Parent, + ]; + let execution_result = + execute(non_avl_tree_proof.into_iter().map(Ok), false, |_| Ok(())).unwrap(); + assert!(execution_result.is_err()); + } + + #[test] + fn child_to_link() { + let basic_merk_tree = vec![ + Op::Push(Node::KV(vec![1], vec![1])), + Op::Push(Node::KV(vec![2], vec![2])), + Op::Parent, + Op::Push(Node::KV(vec![3], vec![3])), + Op::Child, + ]; + let tree = execute(basic_merk_tree.into_iter().map(Ok), false, |_| Ok(())) + .unwrap() + .unwrap(); + + let left_link = tree.left.as_ref().unwrap().as_link(); + let right_link = tree.right.as_ref().unwrap().as_link(); + + assert_eq!( + left_link, + Link::Reference { + hash: tree.left.as_ref().map(|node| node.hash).unwrap(), + sum: None, + child_heights: (0, 0), + key: vec![1] + } + ); + + assert_eq!( + right_link, + Link::Reference { + hash: tree.right.as_ref().map(|node| node.hash).unwrap(), + sum: None, + child_heights: (0, 0), + key: vec![3] + } + ); + + let sum_merk_tree = vec![ + Op::Push(Node::KVValueHashFeatureType( + vec![1], + vec![1], + [0; 32], + SummedMerkNode(3), + )), + Op::Push(Node::KVValueHashFeatureType( + vec![2], + vec![2], + [0; 32], + SummedMerkNode(1), + )), + Op::Parent, + Op::Push(Node::KVValueHashFeatureType( + vec![3], + vec![3], + [0; 32], + SummedMerkNode(1), + )), + Op::Child, + ]; + let tree = execute(sum_merk_tree.into_iter().map(Ok), false, |_| Ok(())) + .unwrap() + .unwrap(); + + let left_link = tree.left.as_ref().unwrap().as_link(); + let right_link = tree.right.as_ref().unwrap().as_link(); + + assert_eq!( + left_link, + Link::Reference { + hash: tree.left.as_ref().map(|node| node.hash).unwrap(), + sum: Some(3), + child_heights: (0, 0), + key: vec![1] + } + ); + + assert_eq!( + right_link, + Link::Reference { + hash: tree.right.as_ref().map(|node| node.hash).unwrap(), + sum: Some(1), + child_heights: (0, 0), + key: vec![3] + } + ); + } } diff --git a/merk/src/test_utils/mod.rs b/merk/src/test_utils/mod.rs index b75181589..397ad13a2 100644 --- a/merk/src/test_utils/mod.rs +++ b/merk/src/test_utils/mod.rs @@ -35,17 +35,21 @@ use std::{convert::TryInto, ops::Range}; use grovedb_costs::storage_cost::removal::StorageRemovedBytes::BasicStorageRemoval; use grovedb_path::SubtreePath; use grovedb_storage::{Storage, StorageBatch}; +use grovedb_version::version::GroveVersion; use rand::prelude::*; pub use temp_merk::TempMerk; use crate::{ - tree::{kv::KV, BatchEntry, MerkBatch, NoopCommit, Op, PanicSource, Tree, Walker}, + tree::{ + kv::{ValueDefinedCostType, KV}, + BatchEntry, MerkBatch, NoopCommit, Op, PanicSource, TreeNode, Walker, + }, Merk, - TreeFeatureType::{BasicMerk, SummedMerk}, + TreeFeatureType::{BasicMerkNode, SummedMerkNode}, }; /// Assert tree invariants -pub fn assert_tree_invariants(tree: &Tree) { +pub fn assert_tree_invariants(tree: &TreeNode) { assert!(tree.balance_factor().abs() < 2); let maybe_left = tree.link(true); @@ -71,7 +75,11 @@ pub fn assert_tree_invariants(tree: &Tree) { /// Apply given batch to given tree and commit using memory only. /// Used by `apply_memonly` which also performs checks using /// `assert_tree_invariants`. Return Tree. -pub fn apply_memonly_unchecked(tree: Tree, batch: &MerkBatch>) -> Tree { +pub fn apply_memonly_unchecked( + tree: TreeNode, + batch: &MerkBatch>, + grove_version: &GroveVersion, +) -> TreeNode { let is_sum_node = tree.is_sum_node(); let walker = Walker::::new(tree, PanicSource {}); let mut tree = Walker::::apply_to( @@ -85,35 +93,28 @@ pub fn apply_memonly_unchecked(tree: Tree, batch: &MerkBatch>) -> Tree { is_sum_node, )) }, + None::<&fn(&[u8], &GroveVersion) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed") .0 .expect("expected tree"); let is_sum_node = tree.is_sum_node(); - tree.commit( - &mut NoopCommit {}, - &|key, value| { - Ok(KV::layered_value_byte_cost_size_for_key_and_value_lengths( - key.len() as u32, - value.len() as u32, - is_sum_node, - )) - }, - &mut |_, _, _| Ok((false, None)), - &mut |_, key_bytes_to_remove, value_bytes_to_remove| { - Ok(( - BasicStorageRemoval(key_bytes_to_remove), - BasicStorageRemoval(value_bytes_to_remove), - )) - }, - ) + tree.commit(&mut NoopCommit {}, &|key, value| { + Ok(KV::layered_value_byte_cost_size_for_key_and_value_lengths( + key.len() as u32, + value.len() as u32, + is_sum_node, + )) + }) .unwrap() .expect("commit failed"); tree @@ -121,8 +122,12 @@ pub fn apply_memonly_unchecked(tree: Tree, batch: &MerkBatch>) -> Tree { /// Apply given batch to given tree and commit using memory only. /// Perform checks using `assert_tree_invariants`. Return Tree. -pub fn apply_memonly(tree: Tree, batch: &MerkBatch>) -> Tree { - let tree = apply_memonly_unchecked(tree, batch); +pub fn apply_memonly( + tree: TreeNode, + batch: &MerkBatch>, + grove_version: &GroveVersion, +) -> TreeNode { + let tree = apply_memonly_unchecked(tree, batch, grove_version); assert_tree_invariants(&tree); tree } @@ -130,10 +135,11 @@ pub fn apply_memonly(tree: Tree, batch: &MerkBatch>) -> Tree { /// Applies given batch to given tree or creates a new tree to apply to and /// commits to memory only. pub fn apply_to_memonly( - maybe_tree: Option, + maybe_tree: Option, batch: &MerkBatch>, is_sum_tree: bool, -) -> Option { + grove_version: &GroveVersion, +) -> Option { let maybe_walker = maybe_tree.map(|tree| Walker::::new(tree, PanicSource {})); Walker::::apply_to( maybe_walker, @@ -146,38 +152,30 @@ pub fn apply_to_memonly( is_sum_tree, )) }, + None::<&fn(&[u8], &GroveVersion) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed") .0 .map(|mut tree| { let is_sum_node = tree.is_sum_node(); - tree.commit( - &mut NoopCommit {}, - &|key, value| { - Ok(KV::layered_value_byte_cost_size_for_key_and_value_lengths( - key.len() as u32, - value.len() as u32, - is_sum_node, - )) - }, - &mut |_, _, _| Ok((false, None)), - &mut |_, key_bytes_to_remove, value_bytes_to_remove| { - Ok(( - BasicStorageRemoval(key_bytes_to_remove), - BasicStorageRemoval(value_bytes_to_remove), - )) - }, - ) + tree.commit(&mut NoopCommit {}, &|key, value| { + Ok(KV::layered_value_byte_cost_size_for_key_and_value_lengths( + key.len() as u32, + value.len() as u32, + is_sum_node, + )) + }) .unwrap() .expect("commit failed"); - println!("{:?}", &tree); assert_tree_invariants(&tree); tree }) @@ -190,7 +188,7 @@ pub const fn seq_key(n: u64) -> [u8; 8] { /// Create batch entry with Put op using key n and a fixed value pub fn put_entry(n: u64) -> BatchEntry> { - (seq_key(n).to_vec(), Op::Put(vec![123; 60], BasicMerk)) + (seq_key(n).to_vec(), Op::Put(vec![123; 60], BasicMerkNode)) } /// Create batch entry with Delete op using key n @@ -248,24 +246,25 @@ pub fn make_tree_rand( batch_size: u64, initial_seed: u64, is_sum_tree: bool, -) -> Tree { + grove_version: &GroveVersion, +) -> TreeNode { assert!(node_count >= batch_size); assert_eq!((node_count % batch_size), 0); let value = vec![123; 60]; let feature_type = if is_sum_tree { - SummedMerk(0) + SummedMerkNode(0) } else { - BasicMerk + BasicMerkNode }; - let mut tree = Tree::new(vec![0; 20], value, None, feature_type).unwrap(); + let mut tree = TreeNode::new(vec![0; 20], value, None, feature_type).unwrap(); let mut seed = initial_seed; let batch_count = node_count / batch_size; for _ in 0..batch_count { let batch = make_batch_rand(batch_size, seed); - tree = apply_memonly(tree, &batch); + tree = apply_memonly(tree, &batch, grove_version); seed += 1; } @@ -274,7 +273,19 @@ pub fn make_tree_rand( /// Create tree with initial fixed values and apply `node count` Put ops using /// sequential keys using memory only -pub fn make_tree_seq(node_count: u64) -> Tree { +/// starting tree node is [0; 20] +pub fn make_tree_seq(node_count: u64, grove_version: &GroveVersion) -> TreeNode { + make_tree_seq_with_start_key(node_count, [0; 20].to_vec(), grove_version) +} + +/// Create tree with initial fixed values and apply `node count` Put ops using +/// sequential keys using memory only +/// requires a starting key vector +pub fn make_tree_seq_with_start_key( + node_count: u64, + start_key: Vec, + grove_version: &GroveVersion, +) -> TreeNode { let batch_size = if node_count >= 10_000 { assert_eq!(node_count % 10_000, 0); 10_000 @@ -283,21 +294,22 @@ pub fn make_tree_seq(node_count: u64) -> Tree { }; let value = vec![123; 60]; - let mut tree = Tree::new(vec![0; 20], value, None, BasicMerk).unwrap(); + + let mut tree = TreeNode::new(start_key, value, None, BasicMerkNode).unwrap(); let batch_count = node_count / batch_size; for i in 0..batch_count { let batch = make_batch_seq((i * batch_size)..((i + 1) * batch_size)); - tree = apply_memonly(tree, &batch); + tree = apply_memonly(tree, &batch, grove_version); } tree } - /// Shortcut to open a Merk with a provided storage and batch pub fn empty_path_merk<'db, S>( storage: &'db S, batch: &'db StorageBatch, + grove_version: &GroveVersion, ) -> Merk<>::BatchStorageContext> where S: Storage<'db>, @@ -307,6 +319,8 @@ where .get_storage_context(SubtreePath::empty(), Some(batch)) .unwrap(), false, + None:: Option>, + grove_version, ) .unwrap() .unwrap() @@ -315,6 +329,7 @@ where /// Shortcut to open a Merk for read only pub fn empty_path_merk_read_only<'db, S>( storage: &'db S, + grove_version: &GroveVersion, ) -> Merk<>::BatchStorageContext> where S: Storage<'db>, @@ -324,6 +339,8 @@ where .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None:: Option>, + grove_version, ) .unwrap() .unwrap() diff --git a/merk/src/test_utils/temp_merk.rs b/merk/src/test_utils/temp_merk.rs index 0fb4724ed..69e5b5550 100644 --- a/merk/src/test_utils/temp_merk.rs +++ b/merk/src/test_utils/temp_merk.rs @@ -38,7 +38,9 @@ use grovedb_storage::{ rocksdb_storage::{test_utils::TempStorage, PrefixedRocksDbStorageContext}, Storage, }; +use grovedb_version::version::GroveVersion; +use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] use crate::Merk; @@ -54,7 +56,7 @@ pub struct TempMerk { impl TempMerk { /// Opens a `TempMerk` at the given file path, creating a new one if it /// does not exist. - pub fn new() -> Self { + pub fn new(grove_version: &GroveVersion) -> Self { let storage = Box::leak(Box::new(TempStorage::new())); let batch = Box::leak(Box::new(StorageBatch::new())); @@ -62,7 +64,14 @@ impl TempMerk { .get_storage_context(SubtreePath::empty(), Some(batch)) .unwrap(); - let merk = Merk::open_base(context, false).unwrap().unwrap(); + let merk = Merk::open_base( + context, + false, + None:: Option>, + grove_version, + ) + .unwrap() + .unwrap(); TempMerk { storage, merk, @@ -71,7 +80,7 @@ impl TempMerk { } /// Commits pending batch operations. - pub fn commit(&mut self) { + pub fn commit(&mut self, grove_version: &GroveVersion) { let batch = unsafe { Box::from_raw(self.batch as *const _ as *mut StorageBatch) }; self.storage .commit_multi_context_batch(*batch, None) @@ -82,7 +91,14 @@ impl TempMerk { .storage .get_storage_context(SubtreePath::empty(), Some(self.batch)) .unwrap(); - self.merk = Merk::open_base(context, false).unwrap().unwrap(); + self.merk = Merk::open_base( + context, + false, + None:: Option>, + grove_version, + ) + .unwrap() + .unwrap(); } } @@ -100,7 +116,7 @@ impl Drop for TempMerk { #[cfg(feature = "full")] impl Default for TempMerk { fn default() -> Self { - Self::new() + Self::new(GroveVersion::latest()) } } diff --git a/merk/src/tree/commit.rs b/merk/src/tree/commit.rs index ccecfb48f..31b0df5c6 100644 --- a/merk/src/tree/commit.rs +++ b/merk/src/tree/commit.rs @@ -1,41 +1,9 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree commit #[cfg(feature = "full")] -use grovedb_costs::storage_cost::{removal::StorageRemovedBytes, StorageCost}; - -#[cfg(feature = "full")] -use super::Tree; +use super::TreeNode; #[cfg(feature = "full")] use crate::error::Error; -use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] /// To be used when committing a tree (writing it to a store after applying the @@ -45,31 +13,15 @@ pub trait Commit { /// backing store or cache. fn write( &mut self, - tree: &mut Tree, + tree: &mut TreeNode, old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, - update_tree_value_based_on_costs: &mut impl FnMut( - &StorageCost, - &Vec, - &mut Vec, - ) -> Result< - (bool, Option), - Error, - >, - section_removal_bytes: &mut impl FnMut( - &Vec, - u32, - u32, - ) -> Result< - (StorageRemovedBytes, StorageRemovedBytes), - Error, - >, ) -> Result<(), Error>; /// Called once per node after writing a node and its children. The returned /// tuple specifies whether or not to prune the left and right child nodes, /// respectively. For example, returning `(true, true)` will prune both /// nodes, removing them from memory. - fn prune(&self, _tree: &Tree) -> (bool, bool) { + fn prune(&self, _tree: &TreeNode) -> (bool, bool) { (true, true) } } @@ -83,29 +35,13 @@ pub struct NoopCommit {} impl Commit for NoopCommit { fn write( &mut self, - _tree: &mut Tree, + _tree: &mut TreeNode, _old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, - _update_tree_value_based_on_costs: &mut impl FnMut( - &StorageCost, - &Vec, - &mut Vec, - ) -> Result< - (bool, Option), - Error, - >, - _section_removal_bytes: &mut impl FnMut( - &Vec, - u32, - u32, - ) -> Result< - (StorageRemovedBytes, StorageRemovedBytes), - Error, - >, ) -> Result<(), Error> { Ok(()) } - fn prune(&self, _tree: &Tree) -> (bool, bool) { + fn prune(&self, _tree: &TreeNode) -> (bool, bool) { (false, false) } } diff --git a/merk/src/tree/debug.rs b/merk/src/tree/debug.rs index 33889ebf0..3e88c60b9 100644 --- a/merk/src/tree/debug.rs +++ b/merk/src/tree/debug.rs @@ -32,15 +32,15 @@ use std::fmt::{Debug, Formatter, Result}; use colored::Colorize; -use super::{Link, Tree}; +use super::{Link, TreeNode}; #[cfg(feature = "full")] -impl Debug for Tree { +impl Debug for TreeNode { // TODO: unwraps should be results that bubble up fn fmt(&self, f: &mut Formatter) -> Result { fn traverse( f: &mut Formatter, - cursor: &Tree, + cursor: &TreeNode, stack: &mut Vec<(Vec, Vec)>, left: bool, ) { diff --git a/merk/src/tree/encoding.rs b/merk/src/tree/encoding.rs index 98f6a7c60..cd10937d9 100644 --- a/merk/src/tree/encoding.rs +++ b/merk/src/tree/encoding.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree encoding #[cfg(feature = "full")] @@ -36,26 +8,42 @@ use grovedb_costs::{ }; #[cfg(feature = "full")] use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] -use super::Tree; +use super::TreeNode; +use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] use crate::{ error::{Error, Error::EdError}, - tree::TreeInner, + tree::TreeNodeInner, Error::StorageError, }; #[cfg(feature = "full")] -impl Tree { +impl TreeNode { /// Decode given bytes and set as Tree fields. Set key to value of given /// key. - pub fn decode_raw(bytes: &[u8], key: Vec) -> Result { - Tree::decode(key, bytes).map_err(EdError) + pub fn decode_raw( + bytes: &[u8], + key: Vec, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> Result { + TreeNode::decode(key, bytes, value_defined_cost_fn, grove_version).map_err(EdError) } /// Get value from storage given key. - pub(crate) fn get<'db, S, K>(storage: &S, key: K) -> CostResult, Error> + pub(crate) fn get<'db, S, K>( + storage: &S, + key: K, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult, Error> where S: StorageContext<'db>, K: AsRef<[u8]>, @@ -66,7 +54,12 @@ impl Tree { let tree_opt = cost_return_on_error_no_add!( &cost, tree_bytes - .map(|x| Tree::decode_raw(&x, key.as_ref().to_vec())) + .map(|x| TreeNode::decode_raw( + &x, + key.as_ref().to_vec(), + value_defined_cost_fn, + grove_version + )) .transpose() ); @@ -75,7 +68,7 @@ impl Tree { } #[cfg(feature = "full")] -impl Tree { +impl TreeNode { #[inline] /// Encode pub fn encode(&self) -> Vec { @@ -111,19 +104,42 @@ impl Tree { #[inline] /// Decode bytes from reader, set as Tree fields and set key to given key - pub fn decode_into(&mut self, key: Vec, input: &[u8]) -> ed::Result<()> { - let mut tree_inner: TreeInner = Decode::decode(input)?; + pub fn decode_into( + &mut self, + key: Vec, + input: &[u8], + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> ed::Result<()> { + let mut tree_inner: TreeNodeInner = Decode::decode(input)?; tree_inner.kv.key = key; + if let Some(value_defined_cost_fn) = value_defined_cost_fn { + tree_inner.kv.value_defined_cost = + value_defined_cost_fn(tree_inner.kv.value.as_slice(), grove_version); + } self.inner = Box::new(tree_inner); Ok(()) } #[inline] /// Decode input and set as Tree fields. Set the key as the given key. - pub fn decode(key: Vec, input: &[u8]) -> ed::Result { - let mut tree_inner: TreeInner = Decode::decode(input)?; + pub fn decode( + key: Vec, + input: &[u8], + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> ed::Result { + let mut tree_inner: TreeNodeInner = Decode::decode(input)?; tree_inner.kv.key = key; - Ok(Tree::new_with_tree_inner(tree_inner)) + if let Some(value_defined_cost_fn) = value_defined_cost_fn { + tree_inner.kv.value_defined_cost = + value_defined_cost_fn(tree_inner.kv.value.as_slice(), grove_version); + } + Ok(TreeNode::new_with_tree_inner(tree_inner)) } } @@ -131,11 +147,12 @@ impl Tree { #[cfg(test)] mod tests { use super::{super::Link, *}; - use crate::TreeFeatureType::{BasicMerk, SummedMerk}; + use crate::TreeFeatureType::{BasicMerkNode, SummedMerkNode}; #[test] fn encode_leaf_tree() { - let tree = Tree::from_fields(vec![0], vec![1], [55; 32], None, None, BasicMerk).unwrap(); + let tree = + TreeNode::from_fields(vec![0], vec![1], [55; 32], None, None, BasicMerkNode).unwrap(); assert_eq!(tree.encoding_length(), 68); assert_eq!( tree.value_encoding_length_with_parent_to_child_reference(), @@ -155,17 +172,17 @@ mod tests { #[test] #[should_panic] fn encode_modified_tree() { - let tree = Tree::from_fields( + let tree = TreeNode::from_fields( vec![0], vec![1], [55; 32], Some(Link::Modified { pending_writes: 1, child_heights: (123, 124), - tree: Tree::new(vec![2], vec![3], None, BasicMerk).unwrap(), + tree: TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap(), }), None, - BasicMerk, + BasicMerkNode, ) .unwrap(); tree.encode(); @@ -173,7 +190,7 @@ mod tests { #[test] fn encode_loaded_tree() { - let tree = Tree::from_fields( + let tree = TreeNode::from_fields( vec![0], vec![1], [55; 32], @@ -181,10 +198,10 @@ mod tests { hash: [66; 32], sum: None, child_heights: (123, 124), - tree: Tree::new(vec![2], vec![3], None, BasicMerk).unwrap(), + tree: TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap(), }), None, - BasicMerk, + BasicMerkNode, ) .unwrap(); assert_eq!( @@ -202,7 +219,7 @@ mod tests { #[test] fn encode_uncommitted_tree() { - let tree = Tree::from_fields( + let tree = TreeNode::from_fields( vec![0], vec![1], [55; 32], @@ -210,10 +227,10 @@ mod tests { hash: [66; 32], sum: Some(10), child_heights: (123, 124), - tree: Tree::new(vec![2], vec![3], None, BasicMerk).unwrap(), + tree: TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap(), }), None, - SummedMerk(5), + SummedMerkNode(5), ) .unwrap(); assert_eq!( @@ -231,7 +248,7 @@ mod tests { #[test] fn encode_reference_tree() { - let tree = Tree::from_fields( + let tree = TreeNode::from_fields( vec![0], vec![1], [55; 32], @@ -242,7 +259,7 @@ mod tests { key: vec![2], }), None, - BasicMerk, + BasicMerkNode, ) .unwrap(); assert_eq!( @@ -269,20 +286,28 @@ mod tests { #[test] fn decode_leaf_tree() { + let grove_version = GroveVersion::latest(); let bytes = vec![ 0, 0, 0, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 32, 34, 236, 157, 87, 27, 167, 116, 207, 158, 131, 208, 25, 73, 98, 245, 209, 227, 170, 26, 72, 212, 134, 166, 126, 39, 98, 166, 199, 149, 144, 21, 1, ]; - let tree = Tree::decode(vec![0], bytes.as_slice()).expect("should decode correctly"); + let tree = TreeNode::decode( + vec![0], + bytes.as_slice(), + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .expect("should decode correctly"); assert_eq!(tree.key(), &[0]); assert_eq!(tree.value_as_slice(), &[1]); - assert_eq!(tree.inner.kv.feature_type, BasicMerk); + assert_eq!(tree.inner.kv.feature_type, BasicMerkNode); } #[test] fn decode_reference_tree() { + let grove_version = GroveVersion::latest(); let bytes = vec![ 1, 1, 2, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 123, 124, 0, 0, 0, 55, 55, 55, 55, @@ -290,7 +315,13 @@ mod tests { 55, 55, 55, 55, 55, 55, 32, 34, 236, 157, 87, 27, 167, 116, 207, 158, 131, 208, 25, 73, 98, 245, 209, 227, 170, 26, 72, 212, 134, 166, 126, 39, 98, 166, 199, 149, 144, 21, 1, ]; - let tree = Tree::decode(vec![0], bytes.as_slice()).expect("should decode correctly"); + let tree = TreeNode::decode( + vec![0], + bytes.as_slice(), + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .expect("should decode correctly"); assert_eq!(tree.key(), &[0]); assert_eq!(tree.value_as_slice(), &[1]); if let Some(Link::Reference { @@ -310,8 +341,14 @@ mod tests { #[test] fn decode_invalid_bytes_as_tree() { + let grove_version = GroveVersion::latest(); let bytes = vec![2, 3, 4, 5]; - let tree = Tree::decode(vec![0], bytes.as_slice()); - assert!(matches!(tree, Err(_))); + let tree = TreeNode::decode( + vec![0], + bytes.as_slice(), + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ); + assert!(tree.is_err()); } } diff --git a/merk/src/tree/fuzz_tests.rs b/merk/src/tree/fuzz_tests.rs index cd9d22d50..eb026f56a 100644 --- a/merk/src/tree/fuzz_tests.rs +++ b/merk/src/tree/fuzz_tests.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Fuzz tests #![cfg(tests)] @@ -71,7 +43,13 @@ fn fuzz_396148930387069749() { fn fuzz_case(seed: u64, using_sum_trees: bool) { let mut rng: SmallRng = SeedableRng::seed_from_u64(seed); let initial_size = (rng.gen::() % 10) + 1; - let tree = make_tree_rand(initial_size, initial_size, seed, using_sum_trees); + let tree = make_tree_rand( + initial_size, + initial_size, + seed, + using_sum_trees, + grove_version, + ); let mut map = Map::from_iter(tree.iter()); let mut maybe_tree = Some(tree); println!("====== MERK FUZZ ======"); @@ -83,7 +61,7 @@ fn fuzz_case(seed: u64, using_sum_trees: bool) { let batch = make_batch(maybe_tree.as_ref(), batch_size, rng.gen::()); println!("BATCH {}", j); println!("{:?}", batch); - maybe_tree = apply_to_memonly(maybe_tree, &batch, using_sum_trees); + maybe_tree = apply_to_memonly(maybe_tree, &batch, using_sum_trees, grove_version); apply_to_map(&mut map, &batch); assert_map(maybe_tree.as_ref(), &map); if let Some(tree) = &maybe_tree { @@ -95,7 +73,7 @@ fn fuzz_case(seed: u64, using_sum_trees: bool) { } #[cfg(feature = "full")] -fn make_batch(maybe_tree: Option<&Tree>, size: u64, seed: u64) -> Vec { +fn make_batch(maybe_tree: Option<&TreeNode>, size: u64, seed: u64) -> Vec { let rng: RefCell = RefCell::new(SeedableRng::seed_from_u64(seed)); let mut batch = Vec::with_capacity(size as usize); @@ -170,7 +148,7 @@ fn apply_to_map(map: &mut Map, batch: &Batch) { } #[cfg(feature = "full")] -fn assert_map(maybe_tree: Option<&Tree>, map: &Map) { +fn assert_map(maybe_tree: Option<&TreeNode>, map: &Map) { if map.is_empty() { assert!(maybe_tree.is_none(), "expected tree to be None"); return; diff --git a/merk/src/tree/hash.rs b/merk/src/tree/hash.rs index d6d45c9f2..e23566a9a 100644 --- a/merk/src/tree/hash.rs +++ b/merk/src/tree/hash.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree hash #[cfg(any(feature = "full", feature = "verify"))] diff --git a/merk/src/tree/iter.rs b/merk/src/tree/iter.rs index 2daa5a027..03cca6eaf 100644 --- a/merk/src/tree/iter.rs +++ b/merk/src/tree/iter.rs @@ -1,35 +1,7 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree iterator #[cfg(feature = "full")] -use super::Tree; +use super::TreeNode; #[cfg(feature = "full")] /// An entry stored on an `Iter`'s stack, containing a reference to a `Tree`, @@ -38,7 +10,7 @@ use super::Tree; /// The `traversed` field represents whether or not the left child, self, and /// right child have been visited, respectively (`(left, self, right)`). struct StackItem<'a> { - tree: &'a Tree, + tree: &'a TreeNode, traversed: (bool, bool, bool), } @@ -47,7 +19,7 @@ impl<'a> StackItem<'a> { /// Creates a new `StackItem` for the given tree. The `traversed` state will /// be `false` since the children and self have not been visited yet, but /// will default to `true` for sides that do not have a child. - const fn new(tree: &'a Tree) -> Self { + const fn new(tree: &'a TreeNode) -> Self { StackItem { tree, traversed: ( @@ -77,14 +49,14 @@ pub struct Iter<'a> { #[cfg(feature = "full")] impl<'a> Iter<'a> { /// Creates a new iterator for the given tree. - pub fn new(tree: &'a Tree) -> Self { + pub fn new(tree: &'a TreeNode) -> Self { let stack = vec![StackItem::new(tree)]; Iter { stack } } } #[cfg(feature = "full")] -impl<'a> Tree { +impl<'a> TreeNode { /// Creates an iterator which yields `(key, value)` tuples for all of the /// tree's nodes which are retained in memory (skipping pruned subtrees). pub fn iter(&'a self) -> Iter<'a> { diff --git a/merk/src/tree/just_in_time_value_update.rs b/merk/src/tree/just_in_time_value_update.rs new file mode 100644 index 000000000..20861ec4d --- /dev/null +++ b/merk/src/tree/just_in_time_value_update.rs @@ -0,0 +1,82 @@ +use grovedb_costs::storage_cost::{ + removal::{StorageRemovedBytes, StorageRemovedBytes::BasicStorageRemoval}, + StorageCost, +}; + +use crate::{ + merk::defaults::MAX_UPDATE_VALUE_BASED_ON_COSTS_TIMES, + tree::{kv::ValueDefinedCostType, TreeNode}, + Error, +}; + +impl TreeNode { + pub(in crate::tree) fn just_in_time_tree_node_value_update( + &mut self, + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> Result<(), Error> { + let (mut current_tree_plus_hook_size, mut storage_costs) = + self.kv_with_parent_hook_size_and_storage_cost(old_specialized_cost)?; + let mut i = 0; + + if let Some(old_value) = self.old_value.clone() { + // At this point the tree value can be updated based on client requirements + // For example to store the costs + loop { + let (flags_changed, value_defined_cost) = update_tree_value_based_on_costs( + &storage_costs.value_storage_cost, + &old_value, + self.value_mut_ref(), + )?; + if !flags_changed { + break; + } else { + self.inner.kv.value_defined_cost = value_defined_cost; + let after_update_tree_plus_hook_size = + self.value_encoding_length_with_parent_to_child_reference(); + if after_update_tree_plus_hook_size == current_tree_plus_hook_size { + break; + } + let new_size_and_storage_costs = + self.kv_with_parent_hook_size_and_storage_cost(old_specialized_cost)?; + current_tree_plus_hook_size = new_size_and_storage_costs.0; + storage_costs = new_size_and_storage_costs.1; + } + if i > MAX_UPDATE_VALUE_BASED_ON_COSTS_TIMES { + return Err(Error::CyclicError( + "updated value based on costs too many times", + )); + } + i += 1; + } + + if let BasicStorageRemoval(removed_bytes) = + storage_costs.value_storage_cost.removed_bytes + { + let (_, value_removed_bytes) = section_removal_bytes(&old_value, 0, removed_bytes)?; + storage_costs.value_storage_cost.removed_bytes = value_removed_bytes; + } + } + + // Update old tree size after generating value storage_cost cost + self.old_value = Some(self.value_ref().clone()); + self.known_storage_cost = Some(storage_costs); + + Ok(()) + } +} diff --git a/merk/src/tree/kv.rs b/merk/src/tree/kv.rs index 064b18a94..b10733fc3 100644 --- a/merk/src/tree/kv.rs +++ b/merk/src/tree/kv.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree key-values #[cfg(feature = "full")] @@ -45,7 +17,7 @@ use crate::tree::kv::ValueDefinedCostType::{LayeredValueDefinedCost, Specialized use crate::{ tree::{ hash::{combine_hash, kv_digest_to_kv_hash, value_hash, HASH_LENGTH_X2}, - tree_feature_type::{TreeFeatureType, TreeFeatureType::BasicMerk}, + tree_feature_type::{TreeFeatureType, TreeFeatureType::BasicMerkNode}, }, Link, HASH_LENGTH_U32, HASH_LENGTH_U32_X2, }; @@ -197,60 +169,57 @@ impl KV { } } + /// Replaces the `KV`'s value with the given value, does not update the hash + /// or value hash. + #[inline] + pub fn put_value_no_update_of_hashes(mut self, value: Vec) -> Self { + self.value = value; + self + } + /// Replaces the `KV`'s value with the given value, updates the hash, /// value hash and returns the modified `KV`. #[inline] pub fn put_value_then_update(mut self, value: Vec) -> CostContext { - let mut cost = OperationCost::default(); - // TODO: length check? self.value = value; - self.value_hash = value_hash(self.value_as_slice()).unwrap_add_cost(&mut cost); - self.hash = kv_digest_to_kv_hash(self.key(), self.value_hash()).unwrap_add_cost(&mut cost); - self.wrap_with_cost(cost) + self.update_hashes() } - /// Replaces the `KV`'s value with the given value, updates the hash, - /// value hash and returns the modified `KV`. - /// This is used when we want a fixed cost, for example in sum trees + /// Updates the hash, value hash and returns the modified `KV`. #[inline] - pub fn put_value_with_fixed_cost_then_update( - mut self, - value: Vec, - value_cost: u32, - ) -> CostContext { - self.value_defined_cost = Some(SpecializedValueDefinedCost(value_cost)); - self.put_value_then_update(value) + pub fn update_hashes(mut self) -> CostContext { + let mut cost = OperationCost::default(); + self.value_hash = value_hash(self.value_as_slice()).unwrap_add_cost(&mut cost); + self.hash = kv_digest_to_kv_hash(self.key(), self.value_hash()).unwrap_add_cost(&mut cost); + self.wrap_with_cost(cost) } - /// Replaces the `KV`'s value with the given value and value hash, - /// updates the hash and returns the modified `KV`. + /// Updates the hashes and returns the modified `KV`. #[inline] - pub fn put_value_and_reference_value_hash_then_update( + pub fn update_hashes_using_reference_value_hash( mut self, - value: Vec, reference_value_hash: CryptoHash, ) -> CostContext { let mut cost = OperationCost::default(); - let actual_value_hash = value_hash(value.as_slice()).unwrap_add_cost(&mut cost); + let actual_value_hash = value_hash(self.value_as_slice()).unwrap_add_cost(&mut cost); let combined_value_hash = combine_hash(&actual_value_hash, &reference_value_hash).unwrap_add_cost(&mut cost); - self.value = value; self.value_hash = combined_value_hash; self.hash = kv_digest_to_kv_hash(self.key(), self.value_hash()).unwrap_add_cost(&mut cost); self.wrap_with_cost(cost) } - /// Replaces the `KV`'s value with the given value and value hash, - /// updates the hash and returns the modified `KV`. + /// Replaces the `KV`'s value with the given value, does not update the + /// hashes, value hash and returns the modified `KV`. + /// This is used when we want a fixed cost, for example in sum trees #[inline] - pub fn put_value_with_reference_value_hash_and_value_cost_then_update( + pub fn put_value_with_fixed_cost_no_update_of_hashes( mut self, value: Vec, - reference_value_hash: CryptoHash, - value_cost: u32, - ) -> CostContext { - self.value_defined_cost = Some(LayeredValueDefinedCost(value_cost)); - self.put_value_and_reference_value_hash_then_update(value, reference_value_hash) + value_cost: ValueDefinedCostType, + ) -> Self { + self.value_defined_cost = Some(value_cost); + self.put_value_no_update_of_hashes(value) } /// Returns the key as a slice. @@ -532,7 +501,7 @@ impl Decode for KV { let mut kv = Self { key: Vec::with_capacity(0), value: Vec::with_capacity(128), - feature_type: BasicMerk, + feature_type: BasicMerkNode, value_defined_cost: None, hash: NULL_HASH, value_hash: NULL_HASH, @@ -563,11 +532,11 @@ impl Terminated for KV {} #[cfg(test)] mod test { use super::*; - use crate::tree::tree_feature_type::TreeFeatureType::SummedMerk; + use crate::tree::tree_feature_type::TreeFeatureType::SummedMerkNode; #[test] fn new_kv() { - let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6], None, BasicMerk).unwrap(); + let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6], None, BasicMerkNode).unwrap(); assert_eq!(kv.key(), &[1, 2, 3]); assert_eq!(kv.value_as_slice(), &[4, 5, 6]); @@ -576,7 +545,7 @@ mod test { #[test] fn with_value() { - let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6], None, BasicMerk) + let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6], None, BasicMerkNode) .unwrap() .put_value_then_update(vec![7, 8, 9]) .unwrap(); @@ -588,7 +557,7 @@ mod test { #[test] fn encode_and_decode_kv() { - let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6], None, BasicMerk).unwrap(); + let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6], None, BasicMerkNode).unwrap(); let mut encoded_kv = vec![]; kv.encode_into(&mut encoded_kv).expect("encoded"); let mut decoded_kv = KV::decode(encoded_kv.as_slice()).unwrap(); @@ -596,7 +565,7 @@ mod test { assert_eq!(kv, decoded_kv); - let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6], None, SummedMerk(20)).unwrap(); + let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6], None, SummedMerkNode(20)).unwrap(); let mut encoded_kv = vec![]; kv.encode_into(&mut encoded_kv).expect("encoded"); let mut decoded_kv = KV::decode(encoded_kv.as_slice()).unwrap(); diff --git a/merk/src/tree/link.rs b/merk/src/tree/link.rs index 56d9f1b0f..f445dd11b 100644 --- a/merk/src/tree/link.rs +++ b/merk/src/tree/link.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree link #[cfg(feature = "full")] @@ -37,7 +9,7 @@ use ed::{Decode, Encode, Result, Terminated}; use integer_encoding::{VarInt, VarIntReader, VarIntWriter}; #[cfg(feature = "full")] -use super::{hash::CryptoHash, Tree}; +use super::{hash::CryptoHash, TreeNode}; #[cfg(feature = "full")] use crate::HASH_LENGTH_U32; @@ -46,7 +18,7 @@ use crate::HASH_LENGTH_U32; #[cfg(feature = "full")] /// Represents a reference to a child tree node. Links may or may not contain /// the child's `Tree` instance (storing its key if not). -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub enum Link { /// Represents a child tree node which has been pruned from memory, only /// retaining a reference to it (its key). The child node can always be @@ -72,7 +44,7 @@ pub enum Link { /// Child heights child_heights: (u8, u8), /// Tree - tree: Tree + tree: TreeNode }, /// Represents a tree node which has been modified since the `Tree`'s last @@ -84,7 +56,7 @@ pub enum Link { /// Child heights child_heights: (u8, u8), /// Tree - tree: Tree, + tree: TreeNode, /// Sum sum: Option, }, @@ -97,7 +69,7 @@ pub enum Link { /// Child heights child_heights: (u8, u8), /// Tree - tree: Tree, + tree: TreeNode, /// Sum sum: Option, }, @@ -107,7 +79,7 @@ pub enum Link { impl Link { /// Creates a `Link::Modified` from the given `Tree`. #[inline] - pub const fn from_modified_tree(tree: Tree) -> Self { + pub const fn from_modified_tree(tree: TreeNode) -> Self { let pending_writes = 1 + tree.child_pending_writes(true) + tree.child_pending_writes(false); Self::Modified { @@ -119,7 +91,7 @@ impl Link { /// Creates a `Link::Modified` from the given tree, if any. If `None`, /// returns `None`. - pub fn maybe_from_modified_tree(maybe_tree: Option) -> Option { + pub fn maybe_from_modified_tree(maybe_tree: Option) -> Option { maybe_tree.map(Self::from_modified_tree) } @@ -161,7 +133,7 @@ impl Link { /// Returns the `Tree` instance of the tree referenced by the link. If the /// link is of variant `Link::Reference`, the returned value will be `None`. #[inline] - pub const fn tree(&self) -> Option<&Tree> { + pub const fn tree(&self) -> Option<&TreeNode> { match self { // TODO: panic for Reference, don't return Option? Link::Reference { .. } => None, @@ -483,14 +455,14 @@ fn read_u8(mut input: R) -> Result { #[cfg(test)] mod test { use super::{ - super::{hash::NULL_HASH, Tree}, + super::{hash::NULL_HASH, TreeNode}, *, }; - use crate::TreeFeatureType::BasicMerk; + use crate::TreeFeatureType::BasicMerkNode; #[test] fn from_modified_tree() { - let tree = Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(); + let tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(); let link = Link::from_modified_tree(tree); assert!(link.is_modified()); assert_eq!(link.height(), 1); @@ -507,7 +479,7 @@ mod test { let link = Link::maybe_from_modified_tree(None); assert!(link.is_none()); - let tree = Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(); + let tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(); let link = Link::maybe_from_modified_tree(Some(tree)); assert!(link.expect("expected link").is_modified()); } @@ -519,7 +491,7 @@ mod test { let child_heights = (0, 0); let pending_writes = 1; let key = vec![0]; - let tree = || Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(); + let tree = || TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(); let reference = Link::Reference { hash, @@ -585,7 +557,7 @@ mod test { Link::Modified { pending_writes: 1, child_heights: (1, 1), - tree: Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(), + tree: TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(), } .hash(); } @@ -596,7 +568,7 @@ mod test { Link::Modified { pending_writes: 1, child_heights: (1, 1), - tree: Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(), + tree: TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(), } .into_reference(); } @@ -608,7 +580,7 @@ mod test { hash: [1; 32], sum: None, child_heights: (1, 1), - tree: Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(), + tree: TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(), } .into_reference(); } diff --git a/merk/src/tree/mod.rs b/merk/src/tree/mod.rs index c497b8939..6b2710b6a 100644 --- a/merk/src/tree/mod.rs +++ b/merk/src/tree/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk trees #[cfg(feature = "full")] @@ -37,10 +9,12 @@ mod encoding; #[cfg(feature = "full")] mod fuzz_tests; #[cfg(any(feature = "full", feature = "verify"))] -mod hash; +pub mod hash; #[cfg(feature = "full")] mod iter; #[cfg(feature = "full")] +mod just_in_time_value_update; +#[cfg(feature = "full")] pub mod kv; #[cfg(feature = "full")] mod link; @@ -68,6 +42,7 @@ use grovedb_costs::{ }, CostContext, CostResult, CostsExt, OperationCost, }; +use grovedb_version::version::GroveVersion; #[cfg(any(feature = "full", feature = "verify"))] pub use hash::{ combine_hash, kv_digest_to_kv_hash, kv_hash, node_hash, value_hash, CryptoHash, HASH_LENGTH, @@ -91,6 +66,8 @@ pub use walk::{Fetch, RefWalker, Walker}; #[cfg(feature = "full")] use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] +use crate::tree::kv::ValueDefinedCostType::{LayeredValueDefinedCost, SpecializedValueDefinedCost}; +#[cfg(feature = "full")] use crate::{error::Error, Error::Overflow}; // TODO: remove need for `TreeInner`, and just use `Box` receiver for @@ -98,15 +75,15 @@ use crate::{error::Error, Error::Overflow}; #[cfg(feature = "full")] /// The fields of the `Tree` type, stored on the heap. -#[derive(Clone, Encode, Decode, Debug)] -pub struct TreeInner { +#[derive(Clone, Encode, Decode, Debug, PartialEq)] +pub struct TreeNodeInner { pub(crate) left: Option, pub(crate) right: Option, pub(crate) kv: KV, } #[cfg(feature = "full")] -impl TreeInner { +impl TreeNodeInner { /// Get the value as owned of the key value struct pub fn value_as_owned(self) -> Vec { self.kv.value @@ -129,7 +106,7 @@ impl TreeInner { } #[cfg(feature = "full")] -impl Terminated for Box {} +impl Terminated for Box {} #[cfg(feature = "full")] /// A binary AVL tree data structure, with Merkle hashes. @@ -137,15 +114,15 @@ impl Terminated for Box {} /// Trees' inner fields are stored on the heap so that nodes can recursively /// link to each other, and so we can detach nodes from their parents, then /// reattach without allocating or freeing heap memory. -#[derive(Clone)] -pub struct Tree { - pub(crate) inner: Box, - pub(crate) old_size_with_parent_to_child_hook: u32, +#[derive(Clone, PartialEq)] +pub struct TreeNode { + pub(crate) inner: Box, pub(crate) old_value: Option>, + pub(crate) known_storage_cost: Option, } #[cfg(feature = "full")] -impl Tree { +impl TreeNode { /// Creates a new `Tree` with the given key and value, and no children. /// /// Hashes the key/value pair and initializes the `kv_hash` field. @@ -156,24 +133,23 @@ impl Tree { feature_type: TreeFeatureType, ) -> CostContext { KV::new(key, value, value_defined_cost, feature_type).map(|kv| Self { - inner: Box::new(TreeInner { + inner: Box::new(TreeNodeInner { kv, left: None, right: None, }), - old_size_with_parent_to_child_hook: 0, old_value: None, + known_storage_cost: None, }) } /// Creates a new `Tree` given an inner tree - pub fn new_with_tree_inner(inner_tree: TreeInner) -> Self { - let decode_size = inner_tree.kv.value_byte_cost_size(); + pub fn new_with_tree_inner(inner_tree: TreeNodeInner) -> Self { let old_value = inner_tree.kv.value.clone(); Self { inner: Box::new(inner_tree), - old_size_with_parent_to_child_hook: decode_size, old_value: Some(old_value), + known_storage_cost: None, } } @@ -218,7 +194,7 @@ impl Tree { let key_value_storage_cost = KeyValueStorageCost { key_storage_cost, // the key storage cost is added later value_storage_cost, - new_node: self.old_size_with_parent_to_child_hook == 0, + new_node: self.old_value.is_none(), needs_value_verification: self.inner.kv.value_defined_cost.is_none(), }; @@ -234,10 +210,10 @@ impl Tree { ) -> Result<(u32, KeyValueStorageCost), Error> { let current_value_byte_cost = self.value_encoding_length_with_parent_to_child_reference(); - let old_cost = if self.inner.kv.value_defined_cost.is_some() && self.old_value.is_some() { - old_tree_cost(self.key_as_ref(), self.old_value.as_ref().unwrap()) + let old_cost = if let Some(old_value) = self.old_value.as_ref() { + old_tree_cost(self.key_as_ref(), old_value) } else { - Ok(self.old_size_with_parent_to_child_hook) + Ok(0) // there was no old value, hence old cost would be 0 }?; self.kv_with_parent_hook_size_and_storage_cost_from_old_cost( @@ -257,13 +233,13 @@ impl Tree { feature_type: TreeFeatureType, ) -> CostContext { KV::new_with_value_hash(key, value, value_hash, feature_type).map(|kv| Self { - inner: Box::new(TreeInner { + inner: Box::new(TreeNodeInner { kv, left: None, right: None, }), - old_size_with_parent_to_child_hook: 0, old_value: None, + known_storage_cost: None, }) } @@ -277,13 +253,13 @@ impl Tree { feature_type: TreeFeatureType, ) -> CostContext { KV::new_with_combined_value_hash(key, value, value_hash, feature_type).map(|kv| Self { - inner: Box::new(TreeInner { + inner: Box::new(TreeNodeInner { kv, left: None, right: None, }), - old_size_with_parent_to_child_hook: 0, old_value: None, + known_storage_cost: None, }) } @@ -299,13 +275,13 @@ impl Tree { ) -> CostContext { KV::new_with_layered_value_hash(key, value, value_cost, value_hash, feature_type).map( |kv| Self { - inner: Box::new(TreeInner { + inner: Box::new(TreeNodeInner { kv, left: None, right: None, }), - old_size_with_parent_to_child_hook: 0, old_value: None, + known_storage_cost: None, }, ) } @@ -321,13 +297,13 @@ impl Tree { feature_type: TreeFeatureType, ) -> CostContext { value_hash(value.as_slice()).map(|vh| Self { - inner: Box::new(TreeInner { + inner: Box::new(TreeNodeInner { kv: KV::from_fields(key, value, kv_hash, vh, feature_type), left, right, }), - old_size_with_parent_to_child_hook: 0, old_value: None, + known_storage_cost: None, }) } @@ -485,8 +461,8 @@ impl Tree { #[inline] pub fn sum(&self) -> Result, Error> { match self.inner.kv.feature_type { - TreeFeatureType::BasicMerk => Ok(None), - TreeFeatureType::SummedMerk(value) => value + TreeFeatureType::BasicMerkNode => Ok(None), + TreeFeatureType::SummedMerkNode(value) => value .checked_add(self.child_sum(true)) .and_then(|a| a.checked_add(self.child_sum(false))) .ok_or(Overflow("sum is overflowing")) @@ -653,15 +629,49 @@ impl Tree { /// Replaces the root node's value with the given value and returns the /// modified `Tree`. #[inline] - pub fn put_value(mut self, value: Vec, feature_type: TreeFeatureType) -> CostContext { + pub fn put_value( + mut self, + value: Vec, + feature_type: TreeFeatureType, + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> CostResult { let mut cost = OperationCost::default(); - self.inner.kv = self - .inner - .kv - .put_value_then_update(value) - .unwrap_add_cost(&mut cost); + + self.inner.kv = self.inner.kv.put_value_no_update_of_hashes(value); self.inner.kv.feature_type = feature_type; - self.wrap_with_cost(cost) + + if self.old_value.is_some() { + // we are replacing a value + // in this case there is a possibility that the client would want to update the + // element flags based on the change of values + cost_return_on_error_no_add!( + &cost, + self.just_in_time_tree_node_value_update( + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + ); + } + + self.inner.kv = self.inner.kv.update_hashes().unwrap_add_cost(&mut cost); + Ok(self).wrap_with_cost(cost) } /// Replaces the root node's value with the given value and returns the @@ -672,15 +682,47 @@ impl Tree { value: Vec, value_fixed_cost: u32, feature_type: TreeFeatureType, - ) -> CostContext { + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> CostResult { let mut cost = OperationCost::default(); - self.inner.kv = self - .inner - .kv - .put_value_with_fixed_cost_then_update(value, value_fixed_cost) - .unwrap_add_cost(&mut cost); + self.inner.kv = self.inner.kv.put_value_with_fixed_cost_no_update_of_hashes( + value, + SpecializedValueDefinedCost(value_fixed_cost), + ); self.inner.kv.feature_type = feature_type; - self.wrap_with_cost(cost) + + if self.old_value.is_some() { + // we are replacing a value + // in this case there is a possibility that the client would want to update the + // element flags based on the change of values + cost_return_on_error_no_add!( + &cost, + self.just_in_time_tree_node_value_update( + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + ); + } + + self.inner.kv = self.inner.kv.update_hashes().unwrap_add_cost(&mut cost); + Ok(self).wrap_with_cost(cost) } /// Replaces the root node's value with the given value and value hash @@ -691,15 +733,49 @@ impl Tree { value: Vec, value_hash: CryptoHash, feature_type: TreeFeatureType, - ) -> CostContext { + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> CostResult { let mut cost = OperationCost::default(); + + self.inner.kv = self.inner.kv.put_value_no_update_of_hashes(value); + self.inner.kv.feature_type = feature_type; + + if self.old_value.is_some() { + // we are replacing a value + // in this case there is a possibility that the client would want to update the + // element flags based on the change of values + cost_return_on_error_no_add!( + &cost, + self.just_in_time_tree_node_value_update( + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + ); + } + self.inner.kv = self .inner .kv - .put_value_and_reference_value_hash_then_update(value, value_hash) + .update_hashes_using_reference_value_hash(value_hash) .unwrap_add_cost(&mut cost); - self.inner.kv.feature_type = feature_type; - self.wrap_with_cost(cost) + Ok(self).wrap_with_cost(cost) } /// Replaces the root node's value with the given value and value hash @@ -711,17 +787,52 @@ impl Tree { value_hash: CryptoHash, value_cost: u32, feature_type: TreeFeatureType, - ) -> CostContext { + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> CostResult { let mut cost = OperationCost::default(); + + self.inner.kv = self.inner.kv.put_value_with_fixed_cost_no_update_of_hashes( + value, + LayeredValueDefinedCost(value_cost), + ); + self.inner.kv.feature_type = feature_type; + + if self.old_value.is_some() { + // we are replacing a value + // in this case there is a possibility that the client would want to update the + // element flags based on the change of values + cost_return_on_error_no_add!( + &cost, + self.just_in_time_tree_node_value_update( + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + ); + } + self.inner.kv = self .inner .kv - .put_value_with_reference_value_hash_and_value_cost_then_update( - value, value_hash, value_cost, - ) + .update_hashes_using_reference_value_hash(value_hash) .unwrap_add_cost(&mut cost); - self.inner.kv.feature_type = feature_type; - self.wrap_with_cost(cost) + Ok(self).wrap_with_cost(cost) } // TODO: add compute_hashes method @@ -737,22 +848,6 @@ impl Tree { &mut self, c: &mut C, old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, - update_tree_value_based_on_costs: &mut impl FnMut( - &StorageCost, - &Vec, - &mut Vec, - ) -> Result< - (bool, Option), - Error, - >, - section_removal_bytes: &mut impl FnMut( - &Vec, - u32, - u32, - ) -> Result< - (StorageRemovedBytes, StorageRemovedBytes), - Error, - >, ) -> CostResult<(), Error> { // TODO: make this method less ugly // TODO: call write in-order for better performance in writing batch to db? @@ -769,15 +864,7 @@ impl Tree { }) = self.inner.left.take() { // println!("key is {}", std::str::from_utf8(tree.key()).unwrap()); - cost_return_on_error!( - &mut cost, - tree.commit( - c, - old_specialized_cost, - update_tree_value_based_on_costs, - section_removal_bytes - ) - ); + cost_return_on_error!(&mut cost, tree.commit(c, old_specialized_cost,)); let sum = cost_return_on_error_default!(tree.sum()); self.inner.left = Some(Link::Loaded { @@ -800,15 +887,7 @@ impl Tree { }) = self.inner.right.take() { // println!("key is {}", std::str::from_utf8(tree.key()).unwrap()); - cost_return_on_error!( - &mut cost, - tree.commit( - c, - old_specialized_cost, - update_tree_value_based_on_costs, - section_removal_bytes - ) - ); + cost_return_on_error!(&mut cost, tree.commit(c, old_specialized_cost,)); let sum = cost_return_on_error_default!(tree.sum()); self.inner.right = Some(Link::Loaded { hash: tree.hash().unwrap_add_cost(&mut cost), @@ -821,15 +900,7 @@ impl Tree { } } - cost_return_on_error_no_add!( - &cost, - c.write( - self, - old_specialized_cost, - update_tree_value_based_on_costs, - section_removal_bytes - ) - ); + cost_return_on_error_no_add!(&cost, c.write(self, old_specialized_cost,)); // println!("done committing {}", std::str::from_utf8(self.key()).unwrap()); @@ -847,7 +918,16 @@ impl Tree { /// Fetches the child on the given side using the given data source, and /// places it in the child slot (upgrading the link from `Link::Reference` /// to `Link::Loaded`). - pub fn load(&mut self, left: bool, source: &S) -> CostResult<(), Error> { + pub fn load( + &mut self, + left: bool, + source: &S, + value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, + ) -> CostResult<(), Error> + where + V: Fn(&[u8], &GroveVersion) -> Option, + { // TODO: return Err instead of panic? let link = self.link(left).expect("Expected link"); let (child_heights, hash, sum) = match link { @@ -861,7 +941,10 @@ impl Tree { }; let mut cost = OperationCost::default(); - let tree = cost_return_on_error!(&mut cost, source.fetch(link)); + let tree = cost_return_on_error!( + &mut cost, + source.fetch(link, value_defined_cost_fn, grove_version) + ); debug_assert_eq!(tree.key(), link.key()); *self.slot_mut(left) = Some(Link::Loaded { tree, @@ -886,14 +969,15 @@ pub const fn side_to_str(left: bool) -> &'static str { #[cfg(feature = "full")] #[cfg(test)] mod test { - use grovedb_costs::storage_cost::removal::StorageRemovedBytes::NoStorageRemoval; - use super::{commit::NoopCommit, hash::NULL_HASH, Tree}; - use crate::tree::{tree_feature_type::TreeFeatureType::SummedMerk, TreeFeatureType::BasicMerk}; + use super::{commit::NoopCommit, hash::NULL_HASH, TreeNode}; + use crate::tree::{ + tree_feature_type::TreeFeatureType::SummedMerkNode, TreeFeatureType::BasicMerkNode, + }; #[test] fn build_tree() { - let tree = Tree::new(vec![1], vec![101], None, BasicMerk).unwrap(); + let tree = TreeNode::new(vec![1], vec![101], None, BasicMerkNode).unwrap(); assert_eq!(tree.key(), &[1]); assert_eq!(tree.value_as_slice(), &[101]); assert!(tree.child(true).is_none()); @@ -905,13 +989,13 @@ mod test { let tree = tree.attach( true, - Some(Tree::new(vec![2], vec![102], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![2], vec![102], None, BasicMerkNode).unwrap()), ); assert_eq!(tree.key(), &[1]); assert_eq!(tree.child(true).unwrap().key(), &[2]); assert!(tree.child(false).is_none()); - let tree = Tree::new(vec![3], vec![103], None, BasicMerk) + let tree = TreeNode::new(vec![3], vec![103], None, BasicMerkNode) .unwrap() .attach(false, Some(tree)); assert_eq!(tree.key(), &[3]); @@ -922,29 +1006,29 @@ mod test { #[should_panic] #[test] fn attach_existing() { - Tree::new(vec![0], vec![1], None, BasicMerk) + TreeNode::new(vec![0], vec![1], None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(vec![2], vec![3], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap()), ) .attach( true, - Some(Tree::new(vec![4], vec![5], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![4], vec![5], None, BasicMerkNode).unwrap()), ); } #[test] fn modify() { - let tree = Tree::new(vec![0], vec![1], None, BasicMerk) + let tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(vec![2], vec![3], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap()), ) .attach( false, - Some(Tree::new(vec![4], vec![5], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![4], vec![5], None, BasicMerkNode).unwrap()), ); let tree = tree.walk(true, |left_opt| { @@ -956,7 +1040,7 @@ mod test { let tree = tree.walk(true, |left_opt| { assert!(left_opt.is_none()); - Some(Tree::new(vec![2], vec![3], None, BasicMerk).unwrap()) + Some(TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap()) }); assert_eq!(tree.link(true).unwrap().key(), &[2]); @@ -970,25 +1054,20 @@ mod test { #[test] fn child_and_link() { - let mut tree = Tree::new(vec![0], vec![1], None, BasicMerk) + let mut tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(vec![2], vec![3], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap()), ); assert!(tree.link(true).expect("expected link").is_modified()); assert!(tree.child(true).is_some()); assert!(tree.link(false).is_none()); assert!(tree.child(false).is_none()); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .expect("commit failed"); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .expect("commit failed"); assert!(tree.link(true).expect("expected link").is_stored()); assert!(tree.child(true).is_some()); @@ -1003,20 +1082,15 @@ mod test { #[test] fn child_hash() { - let mut tree = Tree::new(vec![0], vec![1], None, BasicMerk) + let mut tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(vec![2], vec![3], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap()), ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .expect("commit failed"); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .expect("commit failed"); assert_eq!( tree.child_hash(true), &[ @@ -1029,7 +1103,7 @@ mod test { #[test] fn hash() { - let tree = Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(); + let tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(); assert_eq!( tree.hash().unwrap(), [ @@ -1041,13 +1115,13 @@ mod test { #[test] fn child_pending_writes() { - let tree = Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(); + let tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(); assert_eq!(tree.child_pending_writes(true), 0); assert_eq!(tree.child_pending_writes(false), 0); let tree = tree.attach( true, - Some(Tree::new(vec![2], vec![3], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap()), ); assert_eq!(tree.child_pending_writes(true), 1); assert_eq!(tree.child_pending_writes(false), 0); @@ -1055,7 +1129,7 @@ mod test { #[test] fn height_and_balance() { - let tree = Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(); + let tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(); assert_eq!(tree.height(), 1); assert_eq!(tree.child_height(true), 0); assert_eq!(tree.child_height(false), 0); @@ -1063,7 +1137,7 @@ mod test { let tree = tree.attach( true, - Some(Tree::new(vec![2], vec![3], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap()), ); assert_eq!(tree.height(), 2); assert_eq!(tree.child_height(true), 1); @@ -1080,40 +1154,30 @@ mod test { #[test] fn commit() { - let mut tree = Tree::new(vec![0], vec![1], None, BasicMerk) + let mut tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode) .unwrap() .attach( false, - Some(Tree::new(vec![2], vec![3], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap()), ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .expect("commit failed"); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .expect("commit failed"); assert!(tree.link(false).expect("expected link").is_stored()); } #[test] fn sum_tree() { - let mut tree = Tree::new(vec![0], vec![1], None, SummedMerk(3)) + let mut tree = TreeNode::new(vec![0], vec![1], None, SummedMerkNode(3)) .unwrap() .attach( false, - Some(Tree::new(vec![2], vec![3], None, SummedMerk(5)).unwrap()), + Some(TreeNode::new(vec![2], vec![3], None, SummedMerkNode(5)).unwrap()), ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .expect("commit failed"); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .expect("commit failed"); assert_eq!(Some(8), tree.sum().expect("expected to get sum from tree")); } diff --git a/merk/src/tree/ops.rs b/merk/src/tree/ops.rs index 1b161cdd0..156c7e602 100644 --- a/merk/src/tree/ops.rs +++ b/merk/src/tree/ops.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree ops #[cfg(feature = "full")] @@ -44,16 +16,20 @@ use grovedb_costs::{ }, CostContext, CostResult, CostsExt, OperationCost, }; +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] use integer_encoding::VarInt; #[cfg(feature = "full")] use Op::*; #[cfg(feature = "full")] -use super::{Fetch, Link, Tree, Walker}; +use super::{Fetch, Link, TreeNode, Walker}; #[cfg(feature = "full")] use crate::{error::Error, tree::tree_feature_type::TreeFeatureType, CryptoHash, HASH_LENGTH_U32}; -use crate::{merk::KeyUpdates, tree::kv::ValueDefinedCostType::SpecializedValueDefinedCost}; +use crate::{ + merk::KeyUpdates, + tree::kv::{ValueDefinedCostType, ValueDefinedCostType::SpecializedValueDefinedCost}, +}; #[cfg(feature = "full")] /// An operation to be applied to a key in the store. @@ -65,18 +41,18 @@ pub enum Op { /// cost into the Merk tree. This is ideal for sum items where /// we want sizes to always be fixed PutWithSpecializedCost(Vec, u32, TreeFeatureType), - /// Combined references include the value in the node hash + /// `Combined references` include the value in the node hash /// because the value is independent of the reference hash /// In GroveDB this is used for references PutCombinedReference(Vec, CryptoHash, TreeFeatureType), - /// Layered references include the value in the node hash + /// `Layered references` include the value in the node hash /// because the value is independent of the reference hash /// In GroveDB this is used for trees /// A layered reference does not pay for the tree's value, /// instead providing a cost for the value PutLayeredReference(Vec, u32, CryptoHash, TreeFeatureType), /// Replacing a layered reference is slightly more efficient - /// than putting it as the replace will not modify the size + /// than putting it as the replace operation will not modify the size /// hence there is no need to calculate a difference in /// costs ReplaceLayeredReference(Vec, u32, CryptoHash, TreeFeatureType), @@ -147,7 +123,14 @@ pub struct PanicSource {} #[cfg(feature = "full")] impl Fetch for PanicSource { - fn fetch(&self, _link: &Link) -> CostResult { + fn fetch( + &self, + _link: &Link, + _value_defined_cost_fn: Option< + &impl Fn(&[u8], &GroveVersion) -> Option, + >, + _grove_version: &GroveVersion, + ) -> CostResult { unreachable!("'fetch' should not have been called") } } @@ -162,15 +145,24 @@ where /// not require a non-empty tree. /// /// Keys in batch must be sorted and unique. - pub fn apply_to, C, R>( + pub fn apply_to, C, V, U, R>( maybe_tree: Option, batch: &MerkBatch, source: S, old_tree_cost: &C, + value_defined_cost_fn: Option<&V>, + update_tree_value_based_on_costs: &mut U, section_removal_bytes: &mut R, - ) -> CostContext, KeyUpdates), Error>> + grove_version: &GroveVersion, + ) -> CostContext, KeyUpdates), Error>> where C: Fn(&Vec, &Vec) -> Result, + V: Fn(&[u8], &GroveVersion) -> Option, + U: FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result<(bool, Option), Error>, R: FnMut(&Vec, u32, u32) -> Result<(StorageRemovedBytes, StorageRemovedBytes), Error>, { let mut cost = OperationCost::default(); @@ -188,28 +180,42 @@ where } else { match maybe_tree { None => { - return Self::build(batch, source, old_tree_cost, section_removal_bytes).map_ok( - |tree| { - let new_keys: BTreeSet> = batch - .iter() - .map(|batch_entry| batch_entry.0.as_ref().to_vec()) - .collect(); - ( - tree, - KeyUpdates::new( - new_keys, - BTreeSet::default(), - LinkedList::default(), - None, - ), - ) - }, + return Self::build( + batch, + source, + old_tree_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + grove_version, ) + .map_ok(|tree| { + let new_keys: BTreeSet> = batch + .iter() + .map(|batch_entry| batch_entry.0.as_ref().to_vec()) + .collect(); + ( + tree, + KeyUpdates::new( + new_keys, + BTreeSet::default(), + LinkedList::default(), + None, + ), + ) + }) } Some(tree) => { cost_return_on_error!( &mut cost, - tree.apply_sorted(batch, old_tree_cost, section_removal_bytes) + tree.apply_sorted( + batch, + old_tree_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + grove_version + ) ) } } @@ -222,14 +228,23 @@ where /// Builds a `Tree` from a batch of operations. /// /// Keys in batch must be sorted and unique. - fn build, C, R>( + fn build, C, V, U, R>( batch: &MerkBatch, source: S, old_tree_cost: &C, + value_defined_cost_fn: Option<&V>, + update_tree_value_based_on_costs: &mut U, section_removal_bytes: &mut R, - ) -> CostResult, Error> + grove_version: &GroveVersion, + ) -> CostResult, Error> where C: Fn(&Vec, &Vec) -> Result, + V: Fn(&[u8], &GroveVersion) -> Option, + U: FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result<(bool, Option), Error>, R: FnMut(&Vec, u32, u32) -> Result<(StorageRemovedBytes, StorageRemovedBytes), Error>, { let mut cost = OperationCost::default(); @@ -251,7 +266,10 @@ where left_batch, source.clone(), old_tree_cost, - section_removal_bytes + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + grove_version ) ) .map(|tree| Self::new(tree, source.clone())); @@ -259,7 +277,14 @@ where Some(tree) => { cost_return_on_error!( &mut cost, - tree.apply_sorted(right_batch, old_tree_cost, section_removal_bytes) + tree.apply_sorted( + right_batch, + old_tree_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + grove_version + ) ) .0 } @@ -269,7 +294,10 @@ where right_batch, source.clone(), old_tree_cost, - section_removal_bytes + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + grove_version ) ) .map(|tree| Self::new(tree, source.clone())), @@ -286,21 +314,21 @@ where // TODO: take from batch so we don't have to clone let mid_tree = match mid_op { - Put(..) => Tree::new( + Put(..) => TreeNode::new( mid_key.as_ref().to_vec(), mid_value.to_vec(), None, mid_feature_type.to_owned(), ) .unwrap_add_cost(&mut cost), - PutWithSpecializedCost(_, value_cost, _) => Tree::new( + PutWithSpecializedCost(_, value_cost, _) => TreeNode::new( mid_key.as_ref().to_vec(), mid_value.to_vec(), Some(SpecializedValueDefinedCost(*value_cost)), mid_feature_type.to_owned(), ) .unwrap_add_cost(&mut cost), - PutCombinedReference(_, referenced_value, _) => Tree::new_with_combined_value_hash( + PutCombinedReference(_, referenced_value, _) => TreeNode::new_with_combined_value_hash( mid_key.as_ref().to_vec(), mid_value, referenced_value.to_owned(), @@ -309,7 +337,7 @@ where .unwrap_add_cost(&mut cost), PutLayeredReference(_, value_cost, referenced_value, _) | ReplaceLayeredReference(_, value_cost, referenced_value, _) => { - Tree::new_with_layered_value_hash( + TreeNode::new_with_layered_value_hash( mid_key.as_ref().to_vec(), mid_value, *value_cost, @@ -338,7 +366,10 @@ where None ), old_tree_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, section_removal_bytes, + grove_version, ) ) .0 @@ -350,16 +381,20 @@ where fn apply_sorted_without_costs>( self, batch: &MerkBatch, + grove_version: &GroveVersion, ) -> CostResult<(Option, KeyUpdates), Error> { self.apply_sorted( batch, &|_, _| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) } @@ -367,14 +402,23 @@ where /// `Walker::apply`_to, but requires a populated tree. /// /// Keys in batch must be sorted and unique. - fn apply_sorted, C, R>( + fn apply_sorted, C, V, U, R>( self, batch: &MerkBatch, old_specialized_cost: &C, + value_defined_cost_fn: Option<&V>, + update_tree_value_based_on_costs: &mut U, section_removal_bytes: &mut R, + grove_version: &GroveVersion, ) -> CostResult<(Option, KeyUpdates), Error> where C: Fn(&Vec, &Vec) -> Result, + V: Fn(&[u8], &GroveVersion) -> Option, + U: FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result<(bool, Option), Error>, R: FnMut(&Vec, u32, u32) -> Result<(StorageRemovedBytes, StorageRemovedBytes), Error>, { let mut cost = OperationCost::default(); @@ -382,7 +426,7 @@ where let key_vec = self.tree().key().to_vec(); // binary search to see if this node's key is in the batch, and to split // into left and right batches - let search = batch.binary_search_by(|(key, _op)| key.as_ref().cmp(self.tree().key())); + let search = batch.binary_search_by(|(key, _op)| key.as_ref().cmp(&key_vec)); let tree = if let Ok(index) = search { let (_, op) = &batch[index]; @@ -390,58 +434,95 @@ where // a key matches this node's key, apply op to this node match op { // TODO: take vec from batch so we don't need to clone - Put(value, feature_type) => self - .put_value(value.to_vec(), feature_type.to_owned()) - .unwrap_add_cost(&mut cost), - PutWithSpecializedCost(value, value_cost, feature_type) => self - .put_value_with_fixed_cost(value.to_vec(), *value_cost, feature_type.to_owned()) - .unwrap_add_cost(&mut cost), - PutCombinedReference(value, referenced_value, feature_type) => self - .put_value_and_reference_value_hash( - value.to_vec(), - referenced_value.to_owned(), - feature_type.to_owned(), + Put(value, feature_type) => { + cost_return_on_error!( + &mut cost, + self.put_value( + value.to_vec(), + feature_type.to_owned(), + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes, + ) + ) + } + + PutWithSpecializedCost(value, value_cost, feature_type) => { + cost_return_on_error!( + &mut cost, + self.put_value_with_fixed_cost( + value.to_vec(), + *value_cost, + feature_type.to_owned(), + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + ) + } + PutCombinedReference(value, referenced_value, feature_type) => { + cost_return_on_error!( + &mut cost, + self.put_value_and_reference_value_hash( + value.to_vec(), + referenced_value.to_owned(), + feature_type.to_owned(), + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes, + ) ) - .unwrap_add_cost(&mut cost), + } PutLayeredReference(value, value_cost, referenced_value, feature_type) | ReplaceLayeredReference(value, value_cost, referenced_value, feature_type) => { - self.put_value_with_reference_value_hash_and_value_cost( - value.to_vec(), - referenced_value.to_owned(), - *value_cost, - feature_type.to_owned(), + cost_return_on_error!( + &mut cost, + self.put_value_with_reference_value_hash_and_value_cost( + value.to_vec(), + referenced_value.to_owned(), + *value_cost, + feature_type.to_owned(), + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes, + ) ) - .unwrap_add_cost(&mut cost) } Delete | DeleteLayered | DeleteLayeredMaybeSpecialized | DeleteMaybeSpecialized => { - // TODO: we shouldn't have to do this as 2 different calls to apply let source = self.clone_source(); - let wrap = |maybe_tree: Option| { - maybe_tree.map(|tree| Self::new(tree, source.clone())) - }; - let key = self.tree().key().to_vec(); - let key_len = key.len() as u32; - - let prefixed_key_len = HASH_LENGTH_U32 + key_len; - let total_key_len = prefixed_key_len + prefixed_key_len.required_space() as u32; - let value = self.tree().value_ref(); - let old_cost = match &batch[index].1 { - Delete => self.tree().inner.kv.value_byte_cost_size(), - DeleteLayered | DeleteLayeredMaybeSpecialized => { - cost_return_on_error_no_add!(&cost, old_specialized_cost(&key, value)) - } - DeleteMaybeSpecialized => { - cost_return_on_error_no_add!(&cost, old_specialized_cost(&key, value)) - } - _ => 0, // can't get here anyways + let (r_key_cost, r_value_cost) = { + let value = self.tree().value_ref(); + + let old_cost = match &batch[index].1 { + Delete => self.tree().inner.kv.value_byte_cost_size(), + DeleteLayered | DeleteLayeredMaybeSpecialized => { + cost_return_on_error_no_add!( + &cost, + old_specialized_cost(&key_vec, value) + ) + } + DeleteMaybeSpecialized => { + cost_return_on_error_no_add!( + &cost, + old_specialized_cost(&key_vec, value) + ) + } + _ => 0, // can't get here anyway + }; + + let key_len = key_vec.len() as u32; + + let prefixed_key_len = HASH_LENGTH_U32 + key_len; + let total_key_len = + prefixed_key_len + prefixed_key_len.required_space() as u32; + let value = self.tree().value_ref(); + cost_return_on_error_no_add!( + &cost, + section_removal_bytes(value, total_key_len, old_cost) + ) }; - - let (r_key_cost, r_value_cost) = cost_return_on_error_no_add!( - &cost, - section_removal_bytes(value, total_key_len, old_cost) - ); - let deletion_cost = Some(KeyValueStorageCost { + let deletion_cost = KeyValueStorageCost { key_storage_cost: StorageCost { added_bytes: 0, replaced_bytes: 0, @@ -454,35 +535,128 @@ where }, new_node: false, needs_value_verification: false, - }); - - let maybe_tree = cost_return_on_error!(&mut cost, self.remove()); + }; - #[rustfmt::skip] - let (maybe_tree, mut key_updates) - = cost_return_on_error!( + let maybe_tree_walker = cost_return_on_error!( &mut cost, - Self::apply_to( - maybe_tree, - &batch[..index], - source.clone(), - old_specialized_cost, - section_removal_bytes - ) + self.remove(value_defined_cost_fn, grove_version) ); - let maybe_walker = wrap(maybe_tree); - let (maybe_tree, mut key_updates_right) = cost_return_on_error!( - &mut cost, - Self::apply_to( - maybe_walker, - &batch[index + 1..], - source.clone(), - old_specialized_cost, - section_removal_bytes + // If there are no more batch updates to the left this means that the index is 0 + // There would be no key updates to the left of this part of the tree. + + let (maybe_tree_walker, mut key_updates) = if index == 0 { + ( + maybe_tree_walker, + KeyUpdates::new( + BTreeSet::default(), + BTreeSet::default(), + LinkedList::default(), + None, + ), ) - ); - let maybe_walker = wrap(maybe_tree); + } else { + match maybe_tree_walker { + None => { + let new_tree_node = cost_return_on_error!( + &mut cost, + Self::build( + &batch[..index], + source.clone(), + old_specialized_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + grove_version, + ) + ); + let new_keys: BTreeSet> = batch[..index] + .iter() + .map(|batch_entry| batch_entry.0.as_ref().to_vec()) + .collect(); + ( + new_tree_node.map(|tree| Self::new(tree, source.clone())), + KeyUpdates::new( + new_keys, + BTreeSet::default(), + LinkedList::default(), + None, + ), + ) + } + Some(tree) => { + cost_return_on_error!( + &mut cost, + tree.apply_sorted( + &batch[..index], + old_specialized_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + grove_version + ) + ) + } + } + }; + + // We not have a new top tree node, and a set of batch operations to the right + // of the node + + let (maybe_tree_walker, mut key_updates_right) = if index == batch.len() - 1 { + ( + maybe_tree_walker, + KeyUpdates::new( + BTreeSet::default(), + BTreeSet::default(), + LinkedList::default(), + None, + ), + ) + } else { + match maybe_tree_walker { + None => { + let new_tree_node = cost_return_on_error!( + &mut cost, + Self::build( + &batch[index + 1..], + source.clone(), + old_specialized_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + grove_version, + ) + ); + let new_keys: BTreeSet> = batch[index + 1..] + .iter() + .map(|batch_entry| batch_entry.0.as_ref().to_vec()) + .collect(); + ( + new_tree_node.map(|tree| Self::new(tree, source)), + KeyUpdates::new( + new_keys, + BTreeSet::default(), + LinkedList::default(), + None, + ), + ) + } + Some(tree) => { + cost_return_on_error!( + &mut cost, + tree.apply_sorted( + &batch[index + 1..], + old_specialized_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + grove_version + ) + ) + } + } + }; key_updates.new_keys.append(&mut key_updates_right.new_keys); key_updates @@ -491,10 +665,12 @@ where key_updates .deleted_keys .append(&mut key_updates_right.deleted_keys); - key_updates.deleted_keys.push_back((key, deletion_cost)); + key_updates + .deleted_keys + .push_back((key_vec.clone(), deletion_cost)); key_updates.updated_root_key_from = Some(key_vec); - return Ok((maybe_walker, key_updates)).wrap_with_cost(cost); + return Ok((maybe_tree_walker, key_updates)).wrap_with_cost(cost); } } } else { @@ -520,7 +696,10 @@ where exclusive, KeyUpdates::new(new_keys, updated_keys, LinkedList::default(), None), old_specialized_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, section_removal_bytes, + grove_version, ) .add_cost(cost) } @@ -530,17 +709,26 @@ where /// /// This recursion executes serially in the same thread, but in the future /// will be dispatched to workers in other threads. - fn recurse, C, R>( + fn recurse, C, V, U, R>( self, batch: &MerkBatch, mid: usize, exclusive: bool, mut key_updates: KeyUpdates, old_tree_cost: &C, + value_defined_cost_fn: Option<&V>, + update_tree_value_based_on_costs: &mut U, section_removal_bytes: &mut R, + grove_version: &GroveVersion, ) -> CostResult<(Option, KeyUpdates), Error> where C: Fn(&Vec, &Vec) -> Result, + U: FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result<(bool, Option), Error>, + V: Fn(&[u8], &GroveVersion) -> Option, R: FnMut(&Vec, u32, u32) -> Result<(StorageRemovedBytes, StorageRemovedBytes), Error>, { let mut cost = OperationCost::default(); @@ -558,25 +746,33 @@ where let source = self.clone_source(); cost_return_on_error!( &mut cost, - self.walk(true, |maybe_left| { - Self::apply_to( - maybe_left, - left_batch, - source, - old_tree_cost, - section_removal_bytes, - ) - .map_ok(|(maybe_left, mut key_updates_left)| { - key_updates.new_keys.append(&mut key_updates_left.new_keys); - key_updates - .updated_keys - .append(&mut key_updates_left.updated_keys); - key_updates - .deleted_keys - .append(&mut key_updates_left.deleted_keys); - maybe_left - }) - }) + self.walk( + true, + |maybe_left| { + Self::apply_to( + maybe_left, + left_batch, + source, + old_tree_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + grove_version, + ) + .map_ok(|(maybe_left, mut key_updates_left)| { + key_updates.new_keys.append(&mut key_updates_left.new_keys); + key_updates + .updated_keys + .append(&mut key_updates_left.updated_keys); + key_updates + .deleted_keys + .append(&mut key_updates_left.deleted_keys); + maybe_left + }) + }, + value_defined_cost_fn, + grove_version, + ) ) } else { self @@ -586,31 +782,42 @@ where let source = tree.clone_source(); cost_return_on_error!( &mut cost, - tree.walk(false, |maybe_right| { - Self::apply_to( - maybe_right, - right_batch, - source, - old_tree_cost, - section_removal_bytes, - ) - .map_ok(|(maybe_right, mut key_updates_right)| { - key_updates.new_keys.append(&mut key_updates_right.new_keys); - key_updates - .updated_keys - .append(&mut key_updates_right.updated_keys); - key_updates - .deleted_keys - .append(&mut key_updates_right.deleted_keys); - maybe_right - }) - }) + tree.walk( + false, + |maybe_right| { + Self::apply_to( + maybe_right, + right_batch, + source, + old_tree_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + grove_version, + ) + .map_ok(|(maybe_right, mut key_updates_right)| { + key_updates.new_keys.append(&mut key_updates_right.new_keys); + key_updates + .updated_keys + .append(&mut key_updates_right.updated_keys); + key_updates + .deleted_keys + .append(&mut key_updates_right.deleted_keys); + maybe_right + }) + }, + value_defined_cost_fn, + grove_version + ) ) } else { tree }; - let tree = cost_return_on_error!(&mut cost, tree.maybe_balance()); + let tree = cost_return_on_error!( + &mut cost, + tree.maybe_balance(value_defined_cost_fn, grove_version) + ); let new_root_key = tree.tree().key(); @@ -631,9 +838,16 @@ where } /// Checks if the tree is unbalanced and if so, applies AVL tree rotation(s) - /// to rebalance the tree and its subtrees. Returns the root node of the + /// to re-balance the tree and its subtrees. Returns the root node of the /// balanced tree after applying the rotations. - fn maybe_balance(self) -> CostResult { + fn maybe_balance( + self, + value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, + ) -> CostResult + where + V: Fn(&[u8], &GroveVersion) -> Option, + { let mut cost = OperationCost::default(); let balance_factor = self.balance_factor(); @@ -647,37 +861,69 @@ where let tree = if left == (self.tree().link(left).unwrap().balance_factor() > 0) { cost_return_on_error!( &mut cost, - self.walk_expect(left, |child| child.rotate(!left).map_ok(Option::Some)) + self.walk_expect( + left, + |child| child + .rotate(!left, value_defined_cost_fn, grove_version) + .map_ok(Some), + value_defined_cost_fn, + grove_version, + ) ) } else { self }; - let rotate = tree.rotate(left).unwrap_add_cost(&mut cost); + let rotate = tree + .rotate(left, value_defined_cost_fn, grove_version) + .unwrap_add_cost(&mut cost); rotate.wrap_with_cost(cost) } /// Applies an AVL tree rotation, a constant-time operation which only needs - /// to swap pointers in order to rebalance a tree. - fn rotate(self, left: bool) -> CostResult { + /// to swap pointers in order to re-balance a tree. + fn rotate( + self, + left: bool, + value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, + ) -> CostResult + where + V: Fn(&[u8], &GroveVersion) -> Option, + { let mut cost = OperationCost::default(); - let (tree, child) = cost_return_on_error!(&mut cost, self.detach_expect(left)); - let (child, maybe_grandchild) = cost_return_on_error!(&mut cost, child.detach(!left)); + let (tree, child) = cost_return_on_error!( + &mut cost, + self.detach_expect(left, value_defined_cost_fn, grove_version) + ); + let (child, maybe_grandchild) = cost_return_on_error!( + &mut cost, + child.detach(!left, value_defined_cost_fn, grove_version) + ); // attach grandchild to self tree.attach(left, maybe_grandchild) - .maybe_balance() + .maybe_balance(value_defined_cost_fn, grove_version) .flat_map_ok(|tree| { // attach self to child, return child - child.attach(!left, Some(tree)).maybe_balance() + child + .attach(!left, Some(tree)) + .maybe_balance(value_defined_cost_fn, grove_version) }) .add_cost(cost) } - /// Removes the root node from the tree. Rearranges and rebalances + /// Removes the root node from the tree. Rearranges and re-balances /// descendants (if any) in order to maintain a valid tree. - pub fn remove(self) -> CostResult, Error> { + pub fn remove( + self, + value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, + ) -> CostResult, Error> + where + V: Fn(&[u8], &GroveVersion) -> Option, + { let mut cost = OperationCost::default(); let tree = self.tree(); @@ -687,14 +933,28 @@ where let maybe_tree = if has_left && has_right { // two children, promote edge of taller child - let (tree, tall_child) = cost_return_on_error!(&mut cost, self.detach_expect(left)); - let (_, short_child) = cost_return_on_error!(&mut cost, tree.detach_expect(!left)); - let promoted = - cost_return_on_error!(&mut cost, tall_child.promote_edge(!left, short_child)); + let (tree, tall_child) = cost_return_on_error!( + &mut cost, + self.detach_expect(left, value_defined_cost_fn, grove_version) + ); + let (_, short_child) = cost_return_on_error!( + &mut cost, + tree.detach_expect(!left, value_defined_cost_fn, grove_version) + ); + let promoted = cost_return_on_error!( + &mut cost, + tall_child.promote_edge(!left, short_child, value_defined_cost_fn, grove_version) + ); Some(promoted) } else if has_left || has_right { // single child, promote it - Some(cost_return_on_error!(&mut cost, self.detach_expect(left)).1) + Some( + cost_return_on_error!( + &mut cost, + self.detach_expect(left, value_defined_cost_fn, grove_version) + ) + .1, + ) } else { // no child None @@ -707,31 +967,55 @@ where /// reattaches it at the top in order to fill in a gap when removing a root /// node from a tree with both left and right children. Attaches `attach` on /// the opposite side. Returns the promoted node. - fn promote_edge(self, left: bool, attach: Self) -> CostResult { - self.remove_edge(left).flat_map_ok(|(edge, maybe_child)| { - edge.attach(!left, maybe_child) - .attach(left, Some(attach)) - .maybe_balance() - }) + fn promote_edge( + self, + left: bool, + attach: Self, + value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, + ) -> CostResult + where + V: Fn(&[u8], &GroveVersion) -> Option, + { + self.remove_edge(left, value_defined_cost_fn, grove_version) + .flat_map_ok(|(edge, maybe_child)| { + edge.attach(!left, maybe_child) + .attach(left, Some(attach)) + .maybe_balance(value_defined_cost_fn, grove_version) + }) } /// Traverses to the tree's edge on the given side and detaches it /// (reattaching its child, if any, to its former parent). Return value is /// `(edge, maybe_updated_tree)`. - fn remove_edge(self, left: bool) -> CostResult<(Self, Option), Error> { + fn remove_edge( + self, + left: bool, + value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, + ) -> CostResult<(Self, Option), Error> + where + V: Fn(&[u8], &GroveVersion) -> Option, + { let mut cost = OperationCost::default(); if self.tree().link(left).is_some() { // this node is not the edge, recurse - let (tree, child) = cost_return_on_error!(&mut cost, self.detach_expect(left)); - let (edge, maybe_child) = cost_return_on_error!(&mut cost, child.remove_edge(left)); + let (tree, child) = cost_return_on_error!( + &mut cost, + self.detach_expect(left, value_defined_cost_fn, grove_version) + ); + let (edge, maybe_child) = cost_return_on_error!( + &mut cost, + child.remove_edge(left, value_defined_cost_fn, grove_version) + ); tree.attach(left, maybe_child) - .maybe_balance() + .maybe_balance(value_defined_cost_fn, grove_version) .map_ok(|tree| (edge, Some(tree))) .add_cost(cost) } else { // this node is the edge, detach its child if present - self.detach(!left) + self.detach(!left, value_defined_cost_fn, grove_version) } } } @@ -742,15 +1026,16 @@ mod test { use super::*; use crate::{ test_utils::{apply_memonly, assert_tree_invariants, del_entry, make_tree_seq, seq_key}, - tree::{tree_feature_type::TreeFeatureType::BasicMerk, *}, + tree::{tree_feature_type::TreeFeatureType::BasicMerkNode, *}, }; #[test] fn simple_insert() { - let batch = [(b"foo2".to_vec(), Op::Put(b"bar2".to_vec(), BasicMerk))]; - let tree = Tree::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerk).unwrap(); + let grove_version = GroveVersion::latest(); + let batch = [(b"foo2".to_vec(), Put(b"bar2".to_vec(), BasicMerkNode))]; + let tree = TreeNode::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerkNode).unwrap(); let (maybe_walker, key_updates) = Walker::new(tree, PanicSource {}) - .apply_sorted_without_costs(&batch) + .apply_sorted_without_costs(&batch, grove_version) .unwrap() .expect("apply errored"); let walker = maybe_walker.expect("should be Some"); @@ -763,10 +1048,11 @@ mod test { #[test] fn simple_update() { - let batch = [(b"foo".to_vec(), Op::Put(b"bar2".to_vec(), BasicMerk))]; - let tree = Tree::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerk).unwrap(); + let grove_version = GroveVersion::latest(); + let batch = [(b"foo".to_vec(), Put(b"bar2".to_vec(), BasicMerkNode))]; + let tree = TreeNode::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerkNode).unwrap(); let (maybe_walker, key_updates) = Walker::new(tree, PanicSource {}) - .apply_sorted_without_costs(&batch) + .apply_sorted_without_costs(&batch, grove_version) .unwrap() .expect("apply errored"); let walker = maybe_walker.expect("should be Some"); @@ -780,8 +1066,9 @@ mod test { #[test] fn simple_delete() { - let batch = [(b"foo2".to_vec(), Op::Delete)]; - let tree = Tree::from_fields( + let grove_version = GroveVersion::latest(); + let batch = [(b"foo2".to_vec(), Delete)]; + let tree = TreeNode::from_fields( b"foo".to_vec(), b"bar".to_vec(), [123; 32], @@ -790,13 +1077,14 @@ mod test { hash: [123; 32], sum: None, child_heights: (0, 0), - tree: Tree::new(b"foo2".to_vec(), b"bar2".to_vec(), None, BasicMerk).unwrap(), + tree: TreeNode::new(b"foo2".to_vec(), b"bar2".to_vec(), None, BasicMerkNode) + .unwrap(), }), - BasicMerk, + BasicMerkNode, ) .unwrap(); let (maybe_walker, key_updates) = Walker::new(tree, PanicSource {}) - .apply_sorted_without_costs(&batch) + .apply_sorted_without_costs(&batch, grove_version) .unwrap() .expect("apply errored"); let walker = maybe_walker.expect("should be Some"); @@ -814,20 +1102,22 @@ mod test { #[test] fn delete_non_existent() { - let batch = [(b"foo2".to_vec(), Op::Delete)]; - let tree = Tree::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerk).unwrap(); + let grove_version = GroveVersion::latest(); + let batch = [(b"foo2".to_vec(), Delete)]; + let tree = TreeNode::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerkNode).unwrap(); Walker::new(tree, PanicSource {}) - .apply_sorted_without_costs(&batch) + .apply_sorted_without_costs(&batch, grove_version) .unwrap() .unwrap(); } #[test] fn delete_only_node() { - let batch = [(b"foo".to_vec(), Op::Delete)]; - let tree = Tree::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerk).unwrap(); + let grove_version = GroveVersion::latest(); + let batch = [(b"foo".to_vec(), Delete)]; + let tree = TreeNode::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerkNode).unwrap(); let (maybe_walker, key_updates) = Walker::new(tree, PanicSource {}) - .apply_sorted_without_costs(&batch) + .apply_sorted_without_costs(&batch, grove_version) .unwrap() .expect("apply errored"); assert!(maybe_walker.is_none()); @@ -841,10 +1131,11 @@ mod test { #[test] fn delete_deep() { - let tree = make_tree_seq(50); + let grove_version = GroveVersion::latest(); + let tree = make_tree_seq(50, grove_version); let batch = [del_entry(5)]; let (maybe_walker, key_updates) = Walker::new(tree, PanicSource {}) - .apply_sorted_without_costs(&batch) + .apply_sorted_without_costs(&batch, grove_version) .unwrap() .expect("apply errored"); maybe_walker.expect("should be Some"); @@ -858,10 +1149,11 @@ mod test { #[test] fn delete_recursive() { - let tree = make_tree_seq(50); + let grove_version = GroveVersion::latest(); + let tree = make_tree_seq(50, grove_version); let batch = [del_entry(29), del_entry(34)]; let (maybe_walker, mut key_updates) = Walker::new(tree, PanicSource {}) - .apply_sorted_without_costs(&batch) + .apply_sorted_without_costs(&batch, grove_version) .unwrap() .expect("apply errored"); maybe_walker.expect("should be Some"); @@ -879,10 +1171,11 @@ mod test { #[test] fn delete_recursive_2() { - let tree = make_tree_seq(10); + let grove_version = GroveVersion::latest(); + let tree = make_tree_seq(10, grove_version); let batch = [del_entry(7), del_entry(9)]; let (maybe_walker, key_updates) = Walker::new(tree, PanicSource {}) - .apply_sorted_without_costs(&batch) + .apply_sorted_without_costs(&batch, grove_version) .unwrap() .expect("apply errored"); maybe_walker.expect("should be Some"); @@ -895,17 +1188,21 @@ mod test { #[test] fn apply_empty_none() { - let (maybe_tree, key_updates) = Walker::::apply_to::, _, _>( + let grove_version = GroveVersion::latest(); + let (maybe_tree, key_updates) = Walker::::apply_to::, _, _, _, _>( None, &[], PanicSource {}, &|_, _| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply_to failed"); @@ -916,18 +1213,22 @@ mod test { #[test] fn insert_empty_single() { - let batch = vec![(vec![0], Op::Put(vec![1], BasicMerk))]; + let grove_version = GroveVersion::latest(); + let batch = vec![(vec![0], Put(vec![1], BasicMerkNode))]; let (maybe_tree, key_updates) = Walker::::apply_to( None, &batch, PanicSource {}, &|_, _| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply_to failed"); @@ -941,18 +1242,22 @@ mod test { #[test] fn insert_updated_single() { - let batch = vec![(vec![0], Op::Put(vec![1], BasicMerk))]; + let grove_version = GroveVersion::latest(); + let batch = vec![(vec![0], Put(vec![1], BasicMerkNode))]; let (maybe_tree, key_updates) = Walker::::apply_to( None, &batch, PanicSource {}, &|_, _| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply_to failed"); @@ -961,20 +1266,23 @@ mod test { let maybe_walker = maybe_tree.map(|tree| Walker::::new(tree, PanicSource {})); let batch = vec![ - (vec![0], Op::Put(vec![2], BasicMerk)), - (vec![1], Op::Put(vec![2], BasicMerk)), + (vec![0], Put(vec![2], BasicMerkNode)), + (vec![1], Put(vec![2], BasicMerkNode)), ]; let (maybe_tree, key_updates) = Walker::::apply_to( maybe_walker, &batch, PanicSource {}, &|_, _| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply_to failed"); @@ -987,22 +1295,26 @@ mod test { #[test] fn insert_updated_multiple() { + let grove_version = GroveVersion::latest(); let batch = vec![ - (vec![0], Op::Put(vec![1], BasicMerk)), - (vec![1], Op::Put(vec![2], BasicMerk)), - (vec![2], Op::Put(vec![3], BasicMerk)), + (vec![0], Put(vec![1], BasicMerkNode)), + (vec![1], Put(vec![2], BasicMerkNode)), + (vec![2], Put(vec![3], BasicMerkNode)), ]; let (maybe_tree, key_updates) = Walker::::apply_to( None, &batch, PanicSource {}, &|_, _| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply_to failed"); @@ -1011,21 +1323,24 @@ mod test { let maybe_walker = maybe_tree.map(|tree| Walker::::new(tree, PanicSource {})); let batch = vec![ - (vec![0], Op::Put(vec![5], BasicMerk)), - (vec![1], Op::Put(vec![8], BasicMerk)), - (vec![2], Op::Delete), + (vec![0], Put(vec![5], BasicMerkNode)), + (vec![1], Put(vec![8], BasicMerkNode)), + (vec![2], Delete), ]; let (maybe_tree, key_updates) = Walker::::apply_to( maybe_walker, &batch, PanicSource {}, &|_, _| Ok(0), + None::<&fn(&[u8], &GroveVersion) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply_to failed"); @@ -1039,9 +1354,10 @@ mod test { #[test] fn insert_root_single() { - let tree = Tree::new(vec![5], vec![123], None, BasicMerk).unwrap(); - let batch = vec![(vec![6], Op::Put(vec![123], BasicMerk))]; - let tree = apply_memonly(tree, &batch); + let grove_version = GroveVersion::latest(); + let tree = TreeNode::new(vec![5], vec![123], None, BasicMerkNode).unwrap(); + let batch = vec![(vec![6], Put(vec![123], BasicMerkNode))]; + let tree = apply_memonly(tree, &batch, grove_version); assert_eq!(tree.key(), &[5]); assert!(tree.child(true).is_none()); assert_eq!(tree.child(false).expect("expected child").key(), &[6]); @@ -1049,12 +1365,13 @@ mod test { #[test] fn insert_root_double() { - let tree = Tree::new(vec![5], vec![123], None, BasicMerk).unwrap(); + let grove_version = GroveVersion::latest(); + let tree = TreeNode::new(vec![5], vec![123], None, BasicMerkNode).unwrap(); let batch = vec![ - (vec![4], Op::Put(vec![123], BasicMerk)), - (vec![6], Op::Put(vec![123], BasicMerk)), + (vec![4], Put(vec![123], BasicMerkNode)), + (vec![6], Put(vec![123], BasicMerkNode)), ]; - let tree = apply_memonly(tree, &batch); + let tree = apply_memonly(tree, &batch, grove_version); assert_eq!(tree.key(), &[5]); assert_eq!(tree.child(true).expect("expected child").key(), &[4]); assert_eq!(tree.child(false).expect("expected child").key(), &[6]); @@ -1062,13 +1379,14 @@ mod test { #[test] fn insert_rebalance() { - let tree = Tree::new(vec![5], vec![123], None, BasicMerk).unwrap(); + let grove_version = GroveVersion::latest(); + let tree = TreeNode::new(vec![5], vec![123], None, BasicMerkNode).unwrap(); - let batch = vec![(vec![6], Op::Put(vec![123], BasicMerk))]; - let tree = apply_memonly(tree, &batch); + let batch = vec![(vec![6], Put(vec![123], BasicMerkNode))]; + let tree = apply_memonly(tree, &batch, grove_version); - let batch = vec![(vec![7], Op::Put(vec![123], BasicMerk))]; - let tree = apply_memonly(tree, &batch); + let batch = vec![(vec![7], Put(vec![123], BasicMerkNode))]; + let tree = apply_memonly(tree, &batch, grove_version); assert_eq!(tree.key(), &[6]); assert_eq!(tree.child(true).expect("expected child").key(), &[5]); @@ -1077,11 +1395,12 @@ mod test { #[test] fn insert_100_sequential() { - let mut tree = Tree::new(vec![0], vec![123], None, BasicMerk).unwrap(); + let grove_version = GroveVersion::latest(); + let mut tree = TreeNode::new(vec![0], vec![123], None, BasicMerkNode).unwrap(); for i in 0..100 { - let batch = vec![(vec![i + 1], Op::Put(vec![123], BasicMerk))]; - tree = apply_memonly(tree, &batch); + let batch = vec![(vec![i + 1], Put(vec![123], BasicMerkNode))]; + tree = apply_memonly(tree, &batch, grove_version); } assert_eq!(tree.key(), &[63]); diff --git a/merk/src/tree/tree_feature_type.rs b/merk/src/tree/tree_feature_type.rs index e99ca3106..c47fb0d60 100644 --- a/merk/src/tree/tree_feature_type.rs +++ b/merk/src/tree/tree_feature_type.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree feature type #[cfg(any(feature = "full", feature = "verify"))] @@ -39,16 +11,16 @@ use ed::{Decode, Encode}; use integer_encoding::{VarInt, VarIntReader, VarIntWriter}; #[cfg(any(feature = "full", feature = "verify"))] -use crate::tree::tree_feature_type::TreeFeatureType::{BasicMerk, SummedMerk}; +use crate::tree::tree_feature_type::TreeFeatureType::{BasicMerkNode, SummedMerkNode}; #[cfg(any(feature = "full", feature = "verify"))] #[derive(Copy, Clone, PartialEq, Eq, Debug)] /// Basic or summed pub enum TreeFeatureType { - /// Basic Merk - BasicMerk, - /// Summed Merk - SummedMerk(i64), + /// Basic Merk Tree Node + BasicMerkNode, + /// Summed Merk Tree Node + SummedMerkNode(i64), } #[cfg(feature = "full")] @@ -57,23 +29,23 @@ impl TreeFeatureType { /// Get length of encoded SummedMerk pub fn sum_length(&self) -> Option { match self { - BasicMerk => None, - SummedMerk(m) => Some(m.encode_var_vec().len() as u32), + BasicMerkNode => None, + SummedMerkNode(m) => Some(m.encode_var_vec().len() as u32), } } #[inline] /// Is sum feature? pub fn is_sum_feature(&self) -> bool { - matches!(self, SummedMerk(_)) + matches!(self, SummedMerkNode(_)) } #[inline] /// Get encoding cost of self pub(crate) fn encoding_cost(&self) -> usize { match self { - BasicMerk => 1, - SummedMerk(_sum) => 9, + BasicMerkNode => 1, + SummedMerkNode(_sum) => 9, } } } @@ -85,11 +57,11 @@ impl Encode for TreeFeatureType { #[inline] fn encode_into(&self, dest: &mut W) -> ed::Result<()> { match self { - BasicMerk => { + BasicMerkNode => { dest.write_all(&[0])?; Ok(()) } - SummedMerk(sum) => { + SummedMerkNode(sum) => { dest.write_all(&[1])?; dest.write_varint(sum.to_owned())?; Ok(()) @@ -100,8 +72,8 @@ impl Encode for TreeFeatureType { #[inline] fn encoding_length(&self) -> ed::Result { match self { - BasicMerk => Ok(1), - SummedMerk(sum) => { + BasicMerkNode => Ok(1), + SummedMerkNode(sum) => { let encoded_sum = sum.encode_var_vec(); // 1 for the enum type // encoded_sum.len() for the length of the encoded vector @@ -118,10 +90,10 @@ impl Decode for TreeFeatureType { let mut feature_type: [u8; 1] = [0]; input.read_exact(&mut feature_type)?; match feature_type { - [0] => Ok(BasicMerk), + [0] => Ok(BasicMerkNode), [1] => { let encoded_sum: i64 = input.read_varint()?; - Ok(SummedMerk(encoded_sum)) + Ok(SummedMerkNode(encoded_sum)) } _ => Err(ed::Error::UnexpectedByte(55)), } diff --git a/merk/src/tree/walk/fetch.rs b/merk/src/tree/walk/fetch.rs index 94a083afc..0ba657f2e 100644 --- a/merk/src/tree/walk/fetch.rs +++ b/merk/src/tree/walk/fetch.rs @@ -1,40 +1,15 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Walk #[cfg(feature = "full")] use grovedb_costs::CostResult; +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] -use super::super::{Link, Tree}; +use super::super::{Link, TreeNode}; #[cfg(feature = "full")] use crate::error::Error; +#[cfg(feature = "full")] +use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] /// A source of data to be used by the tree when encountering a pruned node. @@ -43,5 +18,12 @@ use crate::error::Error; pub trait Fetch { /// Called when the tree needs to fetch a node with the given `Link`. The /// `link` value will always be a `Link::Reference` variant. - fn fetch(&self, link: &Link) -> CostResult; + fn fetch( + &self, + link: &Link, + value_defined_cost_fn: Option< + &impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, + ) -> CostResult; } diff --git a/merk/src/tree/walk/mod.rs b/merk/src/tree/walk/mod.rs index 9cc5bb163..a84a1d4c2 100644 --- a/merk/src/tree/walk/mod.rs +++ b/merk/src/tree/walk/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree walk #[cfg(feature = "full")] @@ -36,12 +8,18 @@ mod ref_walker; #[cfg(feature = "full")] pub use fetch::Fetch; #[cfg(feature = "full")] -use grovedb_costs::{cost_return_on_error, CostContext, CostResult, CostsExt, OperationCost}; +use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; +use grovedb_costs::{ + cost_return_on_error_no_add, + storage_cost::{removal::StorageRemovedBytes, StorageCost}, +}; +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] pub use ref_walker::RefWalker; #[cfg(feature = "full")] -use super::{Link, Tree}; +use super::{Link, TreeNode}; +use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] use crate::{owner::Owner, tree::tree_feature_type::TreeFeatureType, CryptoHash, Error}; @@ -52,7 +30,7 @@ pub struct Walker where S: Fetch + Sized + Clone, { - tree: Owner, + tree: Owner, source: S, } @@ -62,7 +40,7 @@ where S: Fetch + Sized + Clone, { /// Creates a `Walker` with the given tree and source. - pub fn new(tree: Tree, source: S) -> Self { + pub fn new(tree: TreeNode, source: S) -> Self { Self { tree: Owner::new(tree), source, @@ -72,7 +50,15 @@ where /// Similar to `Tree#detach`, but yields a `Walker` which fetches from the /// same source as `self`. Returned tuple is `(updated_self, /// maybe_child_walker)`. - pub fn detach(mut self, left: bool) -> CostResult<(Self, Option), Error> { + pub fn detach( + mut self, + left: bool, + value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, + ) -> CostResult<(Self, Option), Error> + where + V: Fn(&[u8], &GroveVersion) -> Option, + { let mut cost = OperationCost::default(); let link = match self.tree.link(left) { @@ -91,7 +77,11 @@ where Some(Link::Reference { .. }) => (), _ => unreachable!("Expected Some(Link::Reference)"), } - cost_return_on_error!(&mut cost, self.source.fetch(&link.unwrap())) + cost_return_on_error!( + &mut cost, + self.source + .fetch(&link.unwrap(), value_defined_cost_fn, grove_version) + ) }; let child = self.wrap(child); @@ -101,29 +91,48 @@ where /// Similar to `Tree#detach_expect`, but yields a `Walker` which fetches /// from the same source as `self`. Returned tuple is `(updated_self, /// child_walker)`. - pub fn detach_expect(self, left: bool) -> CostResult<(Self, Self), Error> { - self.detach(left).map_ok(|(walker, maybe_child)| { - if let Some(child) = maybe_child { - (walker, child) - } else { - panic!( - "Expected {} child, got None", - if left { "left" } else { "right" } - ); - } - }) + pub fn detach_expect( + self, + left: bool, + value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, + ) -> CostResult<(Self, Self), Error> + where + V: Fn(&[u8], &GroveVersion) -> Option, + { + self.detach(left, value_defined_cost_fn, grove_version) + .map_ok(|(walker, maybe_child)| { + if let Some(child) = maybe_child { + (walker, child) + } else { + panic!( + "Expected {} child, got None", + if left { "left" } else { "right" } + ); + } + }) } /// Similar to `Tree#walk`, but yields a `Walker` which fetches from the /// same source as `self`. - pub fn walk(self, left: bool, f: F) -> CostResult + pub fn walk( + self, + left: bool, + f: F, + value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, + ) -> CostResult where F: FnOnce(Option) -> CostResult, Error>, - T: Into, + T: Into, + V: Fn(&[u8], &GroveVersion) -> Option, { let mut cost = OperationCost::default(); - let (mut walker, maybe_child) = cost_return_on_error!(&mut cost, self.detach(left)); + let (mut walker, maybe_child) = cost_return_on_error!( + &mut cost, + self.detach(left, value_defined_cost_fn, grove_version) + ); let new_child = match f(maybe_child).unwrap_add_cost(&mut cost) { Ok(x) => x.map(|t| t.into()), Err(e) => return Err(e).wrap_with_cost(cost), @@ -134,14 +143,24 @@ where /// Similar to `Tree#walk_expect` but yields a `Walker` which fetches from /// the same source as `self`. - pub fn walk_expect(self, left: bool, f: F) -> CostResult + pub fn walk_expect( + self, + left: bool, + f: F, + value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, + ) -> CostResult where F: FnOnce(Self) -> CostResult, Error>, - T: Into, + T: Into, + V: Fn(&[u8], &GroveVersion) -> Option, { let mut cost = OperationCost::default(); - let (mut walker, child) = cost_return_on_error!(&mut cost, self.detach_expect(left)); + let (mut walker, child) = cost_return_on_error!( + &mut cost, + self.detach_expect(left, value_defined_cost_fn, grove_version) + ); let new_child = match f(child).unwrap_add_cost(&mut cost) { Ok(x) => x.map(|t| t.into()), Err(e) => return Err(e).wrap_with_cost(cost), @@ -151,18 +170,18 @@ where } /// Returns an immutable reference to the `Tree` wrapped by this walker. - pub fn tree(&self) -> &Tree { + pub fn tree(&self) -> &TreeNode { &self.tree } /// Consumes the `Walker` and returns the `Tree` it wraps. - pub fn into_inner(self) -> Tree { + pub fn into_inner(self) -> TreeNode { self.tree.into_inner() } /// Takes a `Tree` and returns a `Walker` which fetches from the same source /// as `self`. - fn wrap(&self, tree: Tree) -> Self { + fn wrap(&self, tree: TreeNode) -> Self { Self::new(tree, self.source.clone()) } @@ -175,75 +194,180 @@ where /// implements `Into`. pub fn attach(mut self, left: bool, maybe_child: Option) -> Self where - T: Into, + T: Into, { self.tree .own(|t| t.attach(left, maybe_child.map(|t| t.into()))); self } - /// Similar to `Tree#with_value`. - pub fn put_value(mut self, value: Vec, feature_type: TreeFeatureType) -> CostContext { + /// Similar to `Tree#put_value`. + pub fn put_value( + mut self, + value: Vec, + feature_type: TreeFeatureType, + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> CostResult { let mut cost = OperationCost::default(); - self.tree - .own(|t| t.put_value(value, feature_type).unwrap_add_cost(&mut cost)); - self.wrap_with_cost(cost) + cost_return_on_error_no_add!( + &cost, + self.tree.own_result(|t| t + .put_value( + value, + feature_type, + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + .unwrap_add_cost(&mut cost)) + ); + Ok(self).wrap_with_cost(cost) } - /// Similar to `Tree#with_value`. + /// Similar to `Tree#put_value_with_fixed_cost`. pub fn put_value_with_fixed_cost( mut self, value: Vec, value_fixed_cost: u32, feature_type: TreeFeatureType, - ) -> CostContext { + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> CostResult { let mut cost = OperationCost::default(); - self.tree.own(|t| { - t.put_value_with_fixed_cost(value, value_fixed_cost, feature_type) - .unwrap_add_cost(&mut cost) - }); - self.wrap_with_cost(cost) + cost_return_on_error_no_add!( + &cost, + self.tree.own_result(|t| t + .put_value_with_fixed_cost( + value, + value_fixed_cost, + feature_type, + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + .unwrap_add_cost(&mut cost)) + ); + Ok(self).wrap_with_cost(cost) } - /// Similar to `Tree#with_value_and_value_hash`. + /// Similar to `Tree#put_value_and_reference_value_hash`. pub fn put_value_and_reference_value_hash( mut self, value: Vec, value_hash: CryptoHash, feature_type: TreeFeatureType, - ) -> CostContext { + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> CostResult { let mut cost = OperationCost::default(); - self.tree.own(|t| { - t.put_value_and_reference_value_hash(value, value_hash, feature_type) - .unwrap_add_cost(&mut cost) - }); - self.wrap_with_cost(cost) + cost_return_on_error_no_add!( + &cost, + self.tree.own_result(|t| t + .put_value_and_reference_value_hash( + value, + value_hash, + feature_type, + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + .unwrap_add_cost(&mut cost)) + ); + Ok(self).wrap_with_cost(cost) } - /// Similar to `Tree#with_value_and_value_hash`. + /// Similar to `Tree#put_value_with_reference_value_hash_and_value_cost`. pub fn put_value_with_reference_value_hash_and_value_cost( mut self, value: Vec, value_hash: CryptoHash, value_fixed_cost: u32, feature_type: TreeFeatureType, - ) -> CostContext { + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> CostResult { let mut cost = OperationCost::default(); - self.tree.own(|t| { - t.put_value_with_reference_value_hash_and_value_cost( - value, - value_hash, - value_fixed_cost, - feature_type, - ) - .unwrap_add_cost(&mut cost) - }); - self.wrap_with_cost(cost) + cost_return_on_error_no_add!( + &cost, + self.tree.own_result(|t| t + .put_value_with_reference_value_hash_and_value_cost( + value, + value_hash, + value_fixed_cost, + feature_type, + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + .unwrap_add_cost(&mut cost)) + ); + Ok(self).wrap_with_cost(cost) } } #[cfg(feature = "full")] -impl From> for Tree +impl From> for TreeNode where S: Fetch + Sized + Clone, { @@ -255,37 +379,51 @@ where #[cfg(feature = "full")] #[cfg(test)] mod test { - use grovedb_costs::{storage_cost::removal::StorageRemovedBytes::NoStorageRemoval, CostsExt}; + use grovedb_costs::CostsExt; + use grovedb_version::version::GroveVersion; use super::{super::NoopCommit, *}; - use crate::tree::{Tree, TreeFeatureType::BasicMerk}; + use crate::tree::{TreeFeatureType::BasicMerkNode, TreeNode}; #[derive(Clone)] struct MockSource {} impl Fetch for MockSource { - fn fetch(&self, link: &Link) -> CostResult { - Tree::new(link.key().to_vec(), b"foo".to_vec(), None, BasicMerk).map(Ok) + fn fetch( + &self, + link: &Link, + _value_defined_cost_fn: Option< + &impl Fn(&[u8], &GroveVersion) -> Option, + >, + _grove_version: &GroveVersion, + ) -> CostResult { + TreeNode::new(link.key().to_vec(), b"foo".to_vec(), None, BasicMerkNode).map(Ok) } } #[test] fn walk_modified() { - let tree = Tree::new(b"test".to_vec(), b"abc".to_vec(), None, BasicMerk) + let grove_version = GroveVersion::latest(); + let tree = TreeNode::new(b"test".to_vec(), b"abc".to_vec(), None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerk).unwrap()), + Some(TreeNode::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerkNode).unwrap()), ); let source = MockSource {}; let walker = Walker::new(tree, source); let walker = walker - .walk(true, |child| -> CostResult, Error> { - assert_eq!(child.expect("should have child").tree().key(), b"foo"); - Ok(None).wrap_with_cost(Default::default()) - }) + .walk( + true, + |child| -> CostResult, Error> { + assert_eq!(child.expect("should have child").tree().key(), b"foo"); + Ok(None).wrap_with_cost(Default::default()) + }, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("walk failed"); assert!(walker.into_inner().child(true).is_none()); @@ -293,29 +431,30 @@ mod test { #[test] fn walk_stored() { - let mut tree = Tree::new(b"test".to_vec(), b"abc".to_vec(), None, BasicMerk) + let grove_version = GroveVersion::latest(); + let mut tree = TreeNode::new(b"test".to_vec(), b"abc".to_vec(), None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerk).unwrap()), + Some(TreeNode::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerkNode).unwrap()), ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .expect("commit failed"); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .expect("commit failed"); let source = MockSource {}; let walker = Walker::new(tree, source); let walker = walker - .walk(true, |child| -> CostResult, Error> { - assert_eq!(child.expect("should have child").tree().key(), b"foo"); - Ok(None).wrap_with_cost(Default::default()) - }) + .walk( + true, + |child| -> CostResult, Error> { + assert_eq!(child.expect("should have child").tree().key(), b"foo"); + Ok(None).wrap_with_cost(Default::default()) + }, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("walk failed"); assert!(walker.into_inner().child(true).is_none()); @@ -323,7 +462,8 @@ mod test { #[test] fn walk_pruned() { - let tree = Tree::from_fields( + let grove_version = GroveVersion::latest(); + let tree = TreeNode::from_fields( b"test".to_vec(), b"abc".to_vec(), Default::default(), @@ -334,7 +474,7 @@ mod test { sum: None, }), None, - BasicMerk, + BasicMerkNode, ) .unwrap(); @@ -342,10 +482,15 @@ mod test { let walker = Walker::new(tree, source); let walker = walker - .walk_expect(true, |child| -> CostResult, Error> { - assert_eq!(child.tree().key(), b"foo"); - Ok(None).wrap_with_cost(Default::default()) - }) + .walk_expect( + true, + |child| -> CostResult, Error> { + assert_eq!(child.tree().key(), b"foo"); + Ok(None).wrap_with_cost(Default::default()) + }, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("walk failed"); assert!(walker.into_inner().child(true).is_none()); @@ -353,16 +498,22 @@ mod test { #[test] fn walk_none() { - let tree = Tree::new(b"test".to_vec(), b"abc".to_vec(), None, BasicMerk).unwrap(); + let grove_version = GroveVersion::latest(); + let tree = TreeNode::new(b"test".to_vec(), b"abc".to_vec(), None, BasicMerkNode).unwrap(); let source = MockSource {}; let walker = Walker::new(tree, source); walker - .walk(true, |child| -> CostResult, Error> { - assert!(child.is_none()); - Ok(None).wrap_with_cost(Default::default()) - }) + .walk( + true, + |child| -> CostResult, Error> { + assert!(child.is_none()); + Ok(None).wrap_with_cost(Default::default()) + }, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .expect("walk failed"); } diff --git a/merk/src/tree/walk/ref_walker.rs b/merk/src/tree/walk/ref_walker.rs index a6d7e4f0b..189bc7eeb 100644 --- a/merk/src/tree/walk/ref_walker.rs +++ b/merk/src/tree/walk/ref_walker.rs @@ -1,41 +1,15 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk reference walker #[cfg(feature = "full")] use grovedb_costs::{CostResult, CostsExt, OperationCost}; +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] use super::{ - super::{Link, Tree}, + super::{Link, TreeNode}, Fetch, }; +use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] use crate::Error; @@ -50,7 +24,7 @@ pub struct RefWalker<'a, S> where S: Fetch + Sized + Clone, { - tree: &'a mut Tree, + tree: &'a mut TreeNode, source: S, } @@ -60,20 +34,28 @@ where S: Fetch + Sized + Clone, { /// Creates a `RefWalker` with the given tree and source. - pub fn new(tree: &'a mut Tree, source: S) -> Self { + pub fn new(tree: &'a mut TreeNode, source: S) -> Self { // TODO: check if tree has modified links, panic if so RefWalker { tree, source } } /// Gets an immutable reference to the `Tree` wrapped by this `RefWalker`. - pub fn tree(&self) -> &Tree { + pub fn tree(&self) -> &TreeNode { self.tree } /// Traverses to the child on the given side (if any), fetching from the /// source if pruned. When fetching, the link is upgraded from /// `Link::Reference` to `Link::Loaded`. - pub fn walk(&mut self, left: bool) -> CostResult>, Error> { + pub fn walk( + &mut self, + left: bool, + value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, + ) -> CostResult>, Error> + where + V: Fn(&[u8], &GroveVersion) -> Option, + { let link = match self.tree.link(left) { None => return Ok(None).wrap_with_cost(Default::default()), Some(link) => link, @@ -84,7 +66,7 @@ where Link::Reference { .. } => { let load_res = self .tree - .load(left, &self.source) + .load(left, &self.source, value_defined_cost_fn, grove_version) .unwrap_add_cost(&mut cost); if let Err(e) = load_res { return Err(e).wrap_with_cost(cost); diff --git a/merk/src/visualize.rs b/merk/src/visualize.rs index da0bec444..0235f92dd 100644 --- a/merk/src/visualize.rs +++ b/merk/src/visualize.rs @@ -33,7 +33,7 @@ use std::io::{Result, Write}; use grovedb_storage::StorageContext; use grovedb_visualize::{Drawer, Visualize}; -use crate::{tree::Tree, Merk}; +use crate::{tree::TreeNode, Merk}; /// Visualizeable Merk pub struct VisualizeableMerk<'a, S, F> { @@ -52,12 +52,12 @@ impl<'a, S, F> VisualizeableMerk<'a, S, F> { } struct VisualizableTree<'a, F> { - tree: &'a Tree, + tree: &'a TreeNode, deserialize_fn: F, } impl<'a, F> VisualizableTree<'a, F> { - fn new(tree: &'a Tree, deserialize_fn: F) -> Self { + fn new(tree: &'a TreeNode, deserialize_fn: F) -> Self { Self { tree, deserialize_fn, @@ -87,9 +87,9 @@ impl<'a, 'db, S: StorageContext<'db>, T: Visualize, F: Fn(&[u8]) -> T + Copy> Vi impl<'a, T: Visualize, F: Fn(&[u8]) -> T + Copy> Visualize for VisualizableTree<'a, F> { fn visualize(&self, mut drawer: Drawer) -> Result> { drawer.write(b"[key: ")?; - drawer = self.tree.inner.key_as_slice().visualize(drawer)?; + drawer = self.tree.inner.kv.key_as_ref().visualize(drawer)?; drawer.write(b", value: ")?; - drawer = (self.deserialize_fn)(self.tree.inner.value_as_slice()).visualize(drawer)?; + drawer = (self.deserialize_fn)(self.tree.inner.kv.value_as_slice()).visualize(drawer)?; drawer.down(); drawer.write(b"\n")?; diff --git a/node-grove/Cargo.toml b/node-grove/Cargo.toml index 1e18a6f3a..7656ea7d6 100644 --- a/node-grove/Cargo.toml +++ b/node-grove/Cargo.toml @@ -11,6 +11,7 @@ crate-type = ["cdylib"] [dependencies] grovedb = { path = "../grovedb", features = ["full", "estimated_costs"] } +grovedb-version = { path = "../grovedb-version" } [dependencies.neon] version = "0.10.1" diff --git a/node-grove/src/lib.rs b/node-grove/src/lib.rs index 26eb5188a..e9e4ac0ac 100644 --- a/node-grove/src/lib.rs +++ b/node-grove/src/lib.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! GroveDB binding for Node.JS #![deny(missing_docs)] @@ -35,6 +7,7 @@ mod converter; use std::{option::Option::None, path::Path, sync::mpsc, thread}; use grovedb::{GroveDb, Transaction, TransactionArg}; +use grovedb_version::version::GroveVersion; use neon::prelude::*; type DbCallback = Box FnOnce(&'a GroveDb, TransactionArg, &Channel) + Send>; @@ -348,6 +321,7 @@ impl GroveDbWrapper { path.as_slice(), &key, using_transaction.then_some(transaction).flatten(), + GroveVersion::latest(), ) .unwrap(); // Todo: Costs @@ -397,6 +371,7 @@ impl GroveDbWrapper { &key, None, using_transaction.then_some(transaction).flatten(), + GroveVersion::latest(), ) .unwrap(); // Todo: Costs; @@ -446,6 +421,7 @@ impl GroveDbWrapper { element, None, using_transaction.then_some(transaction).flatten(), + GroveVersion::latest(), ) .unwrap(); // Todo: Costs; @@ -488,6 +464,7 @@ impl GroveDbWrapper { &key, element, using_transaction.then_some(transaction).flatten(), + GroveVersion::latest(), ) .unwrap(); // Todo: Costs; @@ -663,7 +640,10 @@ impl GroveDbWrapper { .query_item_value( &path_query, allows_cache, + true, + true, using_transaction.then_some(transaction).flatten(), + GroveVersion::latest(), ) .unwrap(); // Todo: Costs; @@ -758,7 +738,10 @@ impl GroveDbWrapper { db.send_to_db_thread(move |grove_db: &GroveDb, transaction, channel| { let result = grove_db - .root_hash(using_transaction.then_some(transaction).flatten()) + .root_hash( + using_transaction.then_some(transaction).flatten(), + GroveVersion::latest(), + ) .unwrap(); // Todo: Costs; channel.send(move |mut task_context| { diff --git a/path/Cargo.toml b/path/Cargo.toml index c627f855c..bae126750 100644 --- a/path/Cargo.toml +++ b/path/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-path" -version = "1.0.0-rc.1" +version = "1.0.0-rc.2" edition = "2021" license = "MIT" description = "Path extension crate for GroveDB" diff --git a/path/src/lib.rs b/path/src/lib.rs index b79c7deb9..548dc4100 100644 --- a/path/src/lib.rs +++ b/path/src/lib.rs @@ -44,7 +44,7 @@ mod tests { use super::*; use crate::util::calculate_hash; - fn assert_path_properties<'b, B>(path: SubtreePath<'b, B>, reference: Vec>) + fn assert_path_properties(path: SubtreePath<'_, B>, reference: Vec>) where B: AsRef<[u8]> + std::fmt::Debug, { @@ -62,6 +62,7 @@ mod tests { let subtree_path_builder = subtree_path_ref.derive_owned(); assert_eq!(calculate_hash(&path), calculate_hash(&subtree_path_ref)); assert_eq!(calculate_hash(&path), calculate_hash(&subtree_path_builder)); + assert_eq!(path.len(), reference.len()); } #[test] diff --git a/path/src/subtree_path.rs b/path/src/subtree_path.rs index 1752c7fff..437f911af 100644 --- a/path/src/subtree_path.rs +++ b/path/src/subtree_path.rs @@ -146,6 +146,26 @@ impl SubtreePath<'static, [u8; 0]> { } } +impl SubtreePath<'_, B> { + /// Returns the length of the subtree path. + pub fn len(&self) -> usize { + match &self.ref_variant { + SubtreePathInner::Slice(s) => s.len(), + SubtreePathInner::SubtreePath(path) => path.len(), + SubtreePathInner::SubtreePathIter(path_iter) => path_iter.len(), + } + } + + /// Returns whether the path is empty (the root tree). + pub fn is_empty(&self) -> bool { + match &self.ref_variant { + SubtreePathInner::Slice(s) => s.is_empty(), + SubtreePathInner::SubtreePath(path) => path.is_empty(), + SubtreePathInner::SubtreePathIter(path_iter) => path_iter.is_empty(), + } + } +} + impl<'b, B: AsRef<[u8]>> SubtreePath<'b, B> { /// Get a derived path that will reuse this [Self] as it's base path and /// capable of owning data. @@ -241,17 +261,17 @@ mod tests { let parent = builder.derive_parent().unwrap().0; let as_vec = parent.to_vec(); - assert_eq!( - as_vec, - vec![ - b"one".to_vec(), - b"two".to_vec(), - b"three".to_vec(), - b"four".to_vec(), - b"five".to_vec(), - b"six".to_vec(), - b"seven".to_vec(), - ], - ); + let reference_vec = vec![ + b"one".to_vec(), + b"two".to_vec(), + b"three".to_vec(), + b"four".to_vec(), + b"five".to_vec(), + b"six".to_vec(), + b"seven".to_vec(), + ]; + + assert_eq!(as_vec, reference_vec); + assert_eq!(parent.len(), reference_vec.len()); } } diff --git a/path/src/subtree_path_builder.rs b/path/src/subtree_path_builder.rs index c3e868e66..4ef25f0a1 100644 --- a/path/src/subtree_path_builder.rs +++ b/path/src/subtree_path_builder.rs @@ -107,6 +107,20 @@ pub(crate) enum SubtreePathRelative<'r> { Multi(CompactBytes), } +impl SubtreePathRelative<'_> { + pub fn len(&self) -> usize { + match self { + SubtreePathRelative::Empty => 0, + SubtreePathRelative::Single(_) => 1, + SubtreePathRelative::Multi(cb) => cb.len(), + } + } + + pub fn is_empty(&self) -> bool { + matches!(self, SubtreePathRelative::Empty) + } +} + impl Hash for SubtreePathRelative<'_> { fn hash(&self, state: &mut H) { match self { @@ -135,6 +149,18 @@ impl Default for SubtreePathBuilder<'static, [u8; 0]> { } } +impl SubtreePathBuilder<'_, B> { + /// Returns the length of the subtree path. + pub fn len(&self) -> usize { + self.base.len() + self.relative.len() + } + + /// Returns whether the path is empty (the root tree). + pub fn is_empty(&self) -> bool { + self.base.is_empty() && self.relative.is_empty() + } +} + impl<'b, B: AsRef<[u8]>> SubtreePathBuilder<'b, B> { /// Get a derived path that will use another subtree path (or reuse the base /// slice) as it's base, then could be edited in place. @@ -266,17 +292,17 @@ mod tests { builder.push_segment(b"seven"); let as_vec = builder.to_vec(); - assert_eq!( - as_vec, - vec![ - b"one".to_vec(), - b"two".to_vec(), - b"three".to_vec(), - b"four".to_vec(), - b"five".to_vec(), - b"six".to_vec(), - b"seven".to_vec(), - ], - ); + let reference_vec = vec![ + b"one".to_vec(), + b"two".to_vec(), + b"three".to_vec(), + b"four".to_vec(), + b"five".to_vec(), + b"six".to_vec(), + b"seven".to_vec(), + ]; + + assert_eq!(as_vec, reference_vec); + assert_eq!(builder.len(), reference_vec.len()); } } diff --git a/path/src/subtree_path_iter.rs b/path/src/subtree_path_iter.rs index 78f3e12da..2ca658660 100644 --- a/path/src/subtree_path_iter.rs +++ b/path/src/subtree_path_iter.rs @@ -52,6 +52,10 @@ impl<'b, B> Clone for SubtreePathIter<'b, B> { } impl<'b, B> SubtreePathIter<'b, B> { + pub(crate) fn len(&self) -> usize { + self.current_iter.len() + self.next_subtree_path.map(|p| p.len()).unwrap_or_default() + } + pub(crate) fn new(iter: I) -> Self where I: Into>, @@ -133,6 +137,16 @@ pub(crate) enum CurrentSubtreePathIter<'b, B> { OwnedBytes(CompactBytesIter<'b>), } +impl CurrentSubtreePathIter<'_, B> { + pub fn len(&self) -> usize { + match self { + CurrentSubtreePathIter::Single(_) => 1, + CurrentSubtreePathIter::Slice(s) => s.len(), + CurrentSubtreePathIter::OwnedBytes(cb) => cb.len(), + } + } +} + impl<'b, B> Clone for CurrentSubtreePathIter<'b, B> { fn clone(&self) -> Self { match self { diff --git a/path/src/util/compact_bytes.rs b/path/src/util/compact_bytes.rs index d14e26445..1e4362cb5 100644 --- a/path/src/util/compact_bytes.rs +++ b/path/src/util/compact_bytes.rs @@ -61,7 +61,6 @@ impl CompactBytes { } } - #[cfg(test)] pub fn len(&self) -> usize { self.n_segments } diff --git a/storage/Cargo.toml b/storage/Cargo.toml index 96c7522a2..7d7030bfd 100644 --- a/storage/Cargo.toml +++ b/storage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-storage" -version = "1.0.0-rc.1" +version = "1.0.0-rc.2" edition = "2021" license = "MIT" description = "Storage extension crate for GroveDB" @@ -10,17 +10,17 @@ repository = "https://github.com/dashpay/grovedb" [dependencies] lazy_static = { version = "1.4.0", optional = true } -num_cpus = { version = "1.14.0", optional = true } -tempfile = { version = "3.3.0", optional = true } -blake3 = { version = "1.3.3", optional = true } -integer-encoding = { version = "3.0.4", optional = true } -grovedb-visualize = { version = "1.0.0-rc.1", path = "../visualize" } -strum = { version = "0.24.1", features = ["derive"] } -grovedb-costs = { version = "1.0.0-rc.1", path = "../costs" } -thiserror = "1.0.37" -rocksdb = { version = "0.21.0", optional = true } +num_cpus = { version = "1.16.0", optional = true } +tempfile = { version = "3.10.1", optional = true } +blake3 = { version = "1.5.1", optional = true } +integer-encoding = { version = "4.0.0", optional = true } +grovedb-visualize = { version = "1.0.0-rc.2", path = "../visualize" } +strum = { version = "0.26.2", features = ["derive"] } +grovedb-costs = { version = "1.0.0-rc.2", path = "../costs" } +thiserror = "1.0.59" +rocksdb = { version = "0.22.0", optional = true } hex = "0.4.3" -grovedb-path = { version = "1.0.0-rc.1", path = "../path" } +grovedb-path = { version = "1.0.0-rc.2", path = "../path" } [features] rocksdb_storage = ["rocksdb", "num_cpus", "lazy_static", "tempfile", "blake3", "integer-encoding"] diff --git a/storage/src/rocksdb_storage.rs b/storage/src/rocksdb_storage.rs index 90d0cc216..14c4df5ac 100644 --- a/storage/src/rocksdb_storage.rs +++ b/storage/src/rocksdb_storage.rs @@ -28,7 +28,7 @@ //! GroveDB storage layer implemented over RocksDB backend. mod storage; -mod storage_context; +pub mod storage_context; pub mod test_utils; #[cfg(test)] mod tests; diff --git a/storage/src/rocksdb_storage/storage.rs b/storage/src/rocksdb_storage/storage.rs index 96d17a7e1..a396b75fe 100644 --- a/storage/src/rocksdb_storage/storage.rs +++ b/storage/src/rocksdb_storage/storage.rs @@ -41,7 +41,7 @@ use integer_encoding::VarInt; use lazy_static::lazy_static; use rocksdb::{ checkpoint::Checkpoint, ColumnFamily, ColumnFamilyDescriptor, OptimisticTransactionDB, - Transaction, WriteBatchWithTransaction, + Transaction, WriteBatchWithTransaction, DEFAULT_COLUMN_FAMILY_NAME, }; use super::{ @@ -112,7 +112,6 @@ impl RocksDbStorage { ], ) .map_err(RocksDBError)?; - Ok(RocksDbStorage { db }) } @@ -405,6 +404,34 @@ impl RocksDbStorage { .wrap_with_cost(OperationCost::default()) } } + + /// Destroys the OptimisticTransactionDB and drops instance + pub fn wipe(&self) -> Result<(), Error> { + // TODO: fix this + // very inefficient way of doing this, time complexity is O(n) + // we can do O(1) + self.wipe_column_family(DEFAULT_COLUMN_FAMILY_NAME)?; + self.wipe_column_family(ROOTS_CF_NAME)?; + self.wipe_column_family(AUX_CF_NAME)?; + self.wipe_column_family(META_CF_NAME)?; + Ok(()) + } + + fn wipe_column_family(&self, column_family_name: &str) -> Result<(), Error> { + let cf_handle = self + .db + .cf_handle(column_family_name) + .ok_or(Error::StorageError( + "failed to get column family handle".to_string(), + ))?; + let mut iter = self.db.raw_iterator_cf(&cf_handle); + iter.seek_to_first(); + while iter.valid() { + self.db.delete(iter.key().expect("should have key"))?; + iter.next() + } + Ok(()) + } } impl<'db> Storage<'db> for RocksDbStorage { diff --git a/storage/src/rocksdb_storage/storage_context.rs b/storage/src/rocksdb_storage/storage_context.rs index 7481fc130..0611d51c1 100644 --- a/storage/src/rocksdb_storage/storage_context.rs +++ b/storage/src/rocksdb_storage/storage_context.rs @@ -29,7 +29,7 @@ //! Implementation of prefixed storage context. mod batch; -mod context_immediate; +pub mod context_immediate; mod context_no_tx; mod context_tx; mod raw_iterator; diff --git a/storage/src/rocksdb_storage/storage_context/context_no_tx.rs b/storage/src/rocksdb_storage/storage_context/context_no_tx.rs index 20cb65c17..fd639a5a6 100644 --- a/storage/src/rocksdb_storage/storage_context/context_no_tx.rs +++ b/storage/src/rocksdb_storage/storage_context/context_no_tx.rs @@ -265,7 +265,7 @@ impl<'db> StorageContext<'db> for PrefixedRocksDbStorageContext<'db> { fn new_batch(&self) -> Self::Batch { PrefixedMultiContextBatchPart { - prefix: self.prefix.clone(), + prefix: self.prefix, batch: StorageBatch::new(), } } @@ -279,7 +279,7 @@ impl<'db> StorageContext<'db> for PrefixedRocksDbStorageContext<'db> { fn raw_iter(&self) -> Self::RawIterator { PrefixedRocksDbRawIterator { - prefix: self.prefix.clone(), + prefix: self.prefix, raw_iterator: self.storage.raw_iterator(), } } diff --git a/storage/src/rocksdb_storage/storage_context/context_tx.rs b/storage/src/rocksdb_storage/storage_context/context_tx.rs index 045cd982d..d5a480c38 100644 --- a/storage/src/rocksdb_storage/storage_context/context_tx.rs +++ b/storage/src/rocksdb_storage/storage_context/context_tx.rs @@ -296,7 +296,7 @@ impl<'db> StorageContext<'db> for PrefixedRocksDbTransactionContext<'db> { fn new_batch(&self) -> Self::Batch { PrefixedMultiContextBatchPart { - prefix: self.prefix.clone(), + prefix: self.prefix, batch: StorageBatch::new(), } } @@ -311,7 +311,7 @@ impl<'db> StorageContext<'db> for PrefixedRocksDbTransactionContext<'db> { fn raw_iter(&self) -> Self::RawIterator { PrefixedRocksDbRawIterator { - prefix: self.prefix.clone(), + prefix: self.prefix, raw_iterator: self.transaction.raw_iterator(), } } diff --git a/storage/src/rocksdb_storage/storage_context/raw_iterator.rs b/storage/src/rocksdb_storage/storage_context/raw_iterator.rs index 4ee36510d..a9d6cf4fe 100644 --- a/storage/src/rocksdb_storage/storage_context/raw_iterator.rs +++ b/storage/src/rocksdb_storage/storage_context/raw_iterator.rs @@ -48,7 +48,7 @@ pub struct PrefixedRocksDbRawIterator { impl<'a> RawIterator for PrefixedRocksDbRawIterator> { fn seek_to_first(&mut self) -> CostContext<()> { - self.raw_iterator.seek(&self.prefix); + self.raw_iterator.seek(self.prefix); ().wrap_with_cost(OperationCost::with_seek_count(1)) } @@ -169,7 +169,7 @@ impl<'a> RawIterator for PrefixedRocksDbRawIterator RawIterator for PrefixedRocksDbRawIterator>> { fn seek_to_first(&mut self) -> CostContext<()> { - self.raw_iterator.seek(&self.prefix); + self.raw_iterator.seek(self.prefix); ().wrap_with_cost(OperationCost::with_seek_count(1)) } diff --git a/tutorials/Cargo.toml b/tutorials/Cargo.toml index ec220b446..409a1c64e 100644 --- a/tutorials/Cargo.toml +++ b/tutorials/Cargo.toml @@ -7,9 +7,14 @@ default-run = "tutorials" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -grovedb = { git = "https://github.com/dashpay/grovedb.git" } -path = { path = "../path" } +#grovedb = { git = "https://github.com/dashpay/grovedb.git" } +grovedb = { path = "../grovedb" } +grovedb-merk = { path = "../merk" } +grovedb-storage = { path = "../storage" } +grovedb-visualize = { path = "../visualize" } +grovedb-path = { path = "../path" } rand = "0.8.5" +hex = "0.4" [workspace] diff --git a/tutorials/src/bin/delete.rs b/tutorials/src/bin/delete.rs index 5ff6beab3..063243fd0 100644 --- a/tutorials/src/bin/delete.rs +++ b/tutorials/src/bin/delete.rs @@ -20,6 +20,7 @@ fn main() { Element::Item(val1.to_vec(), None), None, None, + grove_version, ) .unwrap() .expect("successful key1 insert"); @@ -31,13 +32,14 @@ fn main() { Element::Item(val2.to_vec(), None), None, None, + grove_version, ) .unwrap() .expect("successful key2 insert"); // Check the key-values are there - let result1 = db.get(root_path, key1, None).unwrap(); - let result2 = db.get(root_path, key2, None).unwrap(); + let result1 = db.get(root_path, key1, None, grove_version).unwrap(); + let result2 = db.get(root_path, key2, None, grove_version).unwrap(); println!("Before deleting, we have key1: {:?}", result1); println!("Before deleting, we have key2: {:?}", result2); @@ -50,8 +52,8 @@ fn main() { .expect("successfully deleted key2"); // Check the key-values again - let result3 = db.get(root_path, key1, None).unwrap(); - let result4 = db.get(root_path, key2, None).unwrap(); + let result3 = db.get(root_path, key1, None, grove_version).unwrap(); + let result4 = db.get(root_path, key2, None, grove_version).unwrap(); println!("After deleting, we have key1: {:?}", result3); println!("After deleting, we have key2: {:?}", result4); } diff --git a/tutorials/src/bin/insert.rs b/tutorials/src/bin/insert.rs index 5b1a4cd1f..3d9f9b2ad 100644 --- a/tutorials/src/bin/insert.rs +++ b/tutorials/src/bin/insert.rs @@ -20,6 +20,7 @@ fn main() { Element::Item(val1.to_vec(), None), None, None, + grove_version, ) .unwrap() .expect("successful key1 insert"); @@ -31,6 +32,7 @@ fn main() { Element::Item(val2.to_vec(), None), None, None, + grove_version, ) .unwrap() .expect("successful key2 insert"); @@ -42,10 +44,10 @@ fn main() { // function to get them from the RocksDB backing store. // Get value 1 - let result1 = db.get(root_path, key1, None).unwrap(); + let result1 = db.get(root_path, key1, None, grove_version).unwrap(); // Get value 2 - let result2 = db.get(root_path, key2, None).unwrap(); + let result2 = db.get(root_path, key2, None, grove_version).unwrap(); // Print the values to terminal println!("{:?}", result1); diff --git a/tutorials/src/bin/proofs.rs b/tutorials/src/bin/proofs.rs index e62fb17b4..025969197 100644 --- a/tutorials/src/bin/proofs.rs +++ b/tutorials/src/bin/proofs.rs @@ -28,19 +28,19 @@ fn main() { let path_query = PathQuery::new_unsized(path, query.clone()); // Execute the query and collect the result items in "elements". let (_elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, false, true, None) .unwrap() .expect("expected successful get_path_query"); // Generate proof. - let proof = db.prove_query(&path_query).unwrap().unwrap(); + let proof = db.prove_query(&path_query, None, grove_version).unwrap().unwrap(); // Get hash from query proof and print to terminal along with GroveDB root hash. - let (hash, _result_set) = GroveDb::verify_query(&proof, &path_query).unwrap(); + let (hash, _result_set) = GroveDb::verify_query(&proof, &path_query, grove_version).unwrap(); // See if the query proof hash matches the GroveDB root hash println!("Does the hash generated from the query proof match the GroveDB root hash?"); - if hash == db.root_hash(None).unwrap().unwrap() { + if hash == db.root_hash(None, grove_version).unwrap().unwrap() { println!("Yes"); } else { println!("No"); @@ -52,13 +52,13 @@ fn populate(db: &GroveDb) { // Put an empty subtree into the root tree nodes at KEY1. // Call this SUBTREE1. - db.insert(root_path, KEY1, Element::empty_tree(), INSERT_OPTIONS, None) + db.insert(root_path, KEY1, Element::empty_tree(), INSERT_OPTIONS, None, grove_version) .unwrap() .expect("successful SUBTREE1 insert"); // Put an empty subtree into subtree1 at KEY2. // Call this SUBTREE2. - db.insert(&[KEY1], KEY2, Element::empty_tree(), INSERT_OPTIONS, None) + db.insert(&[KEY1], KEY2, Element::empty_tree(), INSERT_OPTIONS, None, grove_version) .unwrap() .expect("successful SUBTREE2 insert"); @@ -71,6 +71,7 @@ fn populate(db: &GroveDb) { Element::new_item(i_vec.clone()), INSERT_OPTIONS, None, + grove_version, ) .unwrap() .expect("successfully inserted values"); diff --git a/tutorials/src/bin/query-complex.rs b/tutorials/src/bin/query-complex.rs index a101bb372..b4fb78cfb 100644 --- a/tutorials/src/bin/query-complex.rs +++ b/tutorials/src/bin/query-complex.rs @@ -66,7 +66,7 @@ fn main() { // Execute the path query and collect the result items in "elements". let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, false, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -78,13 +78,13 @@ fn populate(db: &GroveDb) { let root_path: &[&[u8]] = &[]; // Put an empty subtree into the root tree nodes at KEY1. // Call this SUBTREE1. - db.insert(root_path, KEY1, Element::empty_tree(), INSERT_OPTIONS, None) + db.insert(root_path, KEY1, Element::empty_tree(), INSERT_OPTIONS, None, grove_version) .unwrap() .expect("successful SUBTREE1 insert"); // Put an empty subtree into subtree1 at KEY2. // Call this SUBTREE2. - db.insert(&[KEY1], KEY2, Element::empty_tree(), INSERT_OPTIONS, None) + db.insert(&[KEY1], KEY2, Element::empty_tree(), INSERT_OPTIONS, None, grove_version) .unwrap() .expect("successful SUBTREE2 insert"); @@ -97,6 +97,7 @@ fn populate(db: &GroveDb) { Element::new_item(i_vec.clone()), INSERT_OPTIONS, None, + grove_version, ) .unwrap() .expect("successfully inserted values in SUBTREE2"); @@ -115,6 +116,7 @@ fn populate(db: &GroveDb) { Element::empty_tree(), INSERT_OPTIONS, None, + grove_version, ) .unwrap() .expect("successful SUBTREE3 insert"); @@ -128,6 +130,7 @@ fn populate(db: &GroveDb) { Element::new_item(i_vec.clone()), INSERT_OPTIONS, None, + grove_version, ) .unwrap() .expect("successfully inserted values in SUBTREE3"); @@ -141,6 +144,7 @@ fn populate(db: &GroveDb) { Element::empty_tree(), INSERT_OPTIONS, None, + grove_version, ) .unwrap() .expect("successful SUBTREE4 insert"); @@ -153,6 +157,7 @@ fn populate(db: &GroveDb) { Element::empty_tree(), INSERT_OPTIONS, None, + grove_version, ) .unwrap() .expect("successful SUBTREE5 insert"); @@ -166,6 +171,7 @@ fn populate(db: &GroveDb) { Element::new_item(i_vec.clone()), INSERT_OPTIONS, None, + grove_version, ) .unwrap() .expect("successfully inserted values in SUBTREE5"); diff --git a/tutorials/src/bin/query-simple.rs b/tutorials/src/bin/query-simple.rs index 05ac62649..ab888873d 100644 --- a/tutorials/src/bin/query-simple.rs +++ b/tutorials/src/bin/query-simple.rs @@ -36,7 +36,7 @@ fn main() { // Execute the query and collect the result items in "elements". let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, false, true,None) .unwrap() .expect("expected successful get_path_query"); @@ -48,13 +48,13 @@ fn populate(db: &GroveDb) { let root_path: &[&[u8]] = &[]; // Put an empty subtree into the root tree nodes at KEY1. // Call this SUBTREE1. - db.insert(root_path, KEY1, Element::empty_tree(), INSERT_OPTIONS, None) + db.insert(root_path, KEY1, Element::empty_tree(), INSERT_OPTIONS, None, grove_version) .unwrap() .expect("successful SUBTREE1 insert"); // Put an empty subtree into subtree1 at KEY2. // Call this SUBTREE2. - db.insert(&[KEY1], KEY2, Element::empty_tree(), INSERT_OPTIONS, None) + db.insert(&[KEY1], KEY2, Element::empty_tree(), INSERT_OPTIONS, None, grove_version) .unwrap() .expect("successful SUBTREE2 insert"); @@ -67,6 +67,7 @@ fn populate(db: &GroveDb) { Element::new_item(i_vec.clone()), INSERT_OPTIONS, None, + grove_version, ) .unwrap() .expect("successfully inserted values"); diff --git a/tutorials/src/bin/replication.rs b/tutorials/src/bin/replication.rs new file mode 100644 index 000000000..5ed6ab5ba --- /dev/null +++ b/tutorials/src/bin/replication.rs @@ -0,0 +1,267 @@ +use std::collections::VecDeque; +use std::path::Path; +use grovedb::{operations::insert::InsertOptions, Element, GroveDb, PathQuery, Query, Transaction}; +use grovedb::reference_path::ReferencePathType; +use rand::{distributions::Alphanumeric, Rng, }; +use grovedb::element::SumValue; +use grovedb::replication::CURRENT_STATE_SYNC_VERSION; +use grovedb::replication::MultiStateSyncInfo; + +const MAIN_ΚΕΥ: &[u8] = b"key_main"; +const MAIN_ΚΕΥ_EMPTY: &[u8] = b"key_main_empty"; + +const KEY_INT_0: &[u8] = b"key_int_0"; +const KEY_INT_1: &[u8] = b"key_int_1"; +const KEY_INT_2: &[u8] = b"key_int_2"; +const KEY_INT_REF_0: &[u8] = b"key_int_ref_0"; +const KEY_INT_A: &[u8] = b"key_sum_0"; +const ROOT_PATH: &[&[u8]] = &[]; + +// Allow insertions to overwrite trees +// This is necessary so the tutorial can be rerun easily +const INSERT_OPTIONS: Option = Some(InsertOptions { + validate_insertion_does_not_override: false, + validate_insertion_does_not_override_tree: false, + base_root_storage_is_free: true, +}); + +fn populate_db(grovedb_path: String) -> GroveDb { + let db = GroveDb::open(grovedb_path).unwrap(); + + insert_empty_tree_db(&db, ROOT_PATH, MAIN_ΚΕΥ); + insert_empty_tree_db(&db, ROOT_PATH, MAIN_ΚΕΥ_EMPTY); + insert_empty_tree_db(&db, &[MAIN_ΚΕΥ], KEY_INT_0); + insert_empty_tree_db(&db, &[MAIN_ΚΕΥ], KEY_INT_1); + insert_empty_tree_db(&db, &[MAIN_ΚΕΥ], KEY_INT_2); + + let tx = db.start_transaction(); + let batch_size = 50; + for i in 0..=5 { + insert_range_values_db(&db, &[MAIN_ΚΕΥ, KEY_INT_0], i * batch_size, i * batch_size + batch_size - 1, &tx); + } + let _ = db.commit_transaction(tx); + + let tx = db.start_transaction(); + let batch_size = 50; + for i in 0..=5 { + insert_range_values_db(&db, &[MAIN_ΚΕΥ, KEY_INT_1], i * batch_size, i * batch_size + batch_size - 1, &tx); + } + let _ = db.commit_transaction(tx); + + let tx = db.start_transaction(); + let batch_size = 50; + for i in 0..=5 { + insert_range_values_db(&db, &[MAIN_ΚΕΥ, KEY_INT_2], i * batch_size, i * batch_size + batch_size - 1, &tx); + } + let _ = db.commit_transaction(tx); + + insert_empty_tree_db(&db, &[MAIN_ΚΕΥ], KEY_INT_REF_0); + + let tx_2 = db.start_transaction(); + insert_range_ref_double_values_db(&db, &[MAIN_ΚΕΥ, KEY_INT_REF_0], KEY_INT_0, 1, 50, &tx_2); + let _ = db.commit_transaction(tx_2); + + insert_empty_sum_tree_db(&db, &[MAIN_ΚΕΥ], KEY_INT_A); + + let tx_3 = db.start_transaction(); + insert_range_values_db(&db, &[MAIN_ΚΕΥ, KEY_INT_A], 1, 500, &tx_3); + insert_sum_element_db(&db, &[MAIN_ΚΕΥ, KEY_INT_A], 501, 550, &tx_3); + let _ = db.commit_transaction(tx_3); + db +} + +fn create_empty_db(grovedb_path: String) -> GroveDb { + let db = GroveDb::open(grovedb_path).unwrap(); + db +} + +fn main() { + let path_source = generate_random_path("../tutorial-storage/", "/db_0", 24); + let db_source = populate_db(path_source.clone()); + + let checkpoint_dir = path_source + "/checkpoint"; + let path_checkpoint = Path::new(checkpoint_dir.as_str()); + + db_source.create_checkpoint(&path_checkpoint).expect("cannot create checkpoint"); + let db_checkpoint_0 = GroveDb::open(path_checkpoint).expect("cannot open groveDB from checkpoint"); + + let path_destination = generate_random_path("../tutorial-storage/", "/db_copy", 24); + let db_destination = create_empty_db(path_destination.clone()); + + println!("\n######### root_hashes:"); + let root_hash_source = db_source.root_hash(None, grove_version).unwrap().unwrap(); + println!("root_hash_source: {:?}", hex::encode(root_hash_source)); + let root_hash_checkpoint_0 = db_checkpoint_0.root_hash(None, grove_version).unwrap().unwrap(); + println!("root_hash_checkpoint_0: {:?}", hex::encode(root_hash_checkpoint_0)); + let root_hash_destination = db_destination.root_hash(None, grove_version).unwrap().unwrap(); + println!("root_hash_destination: {:?}", hex::encode(root_hash_destination)); + + println!("\n######### source_subtree_metadata of db_source"); + let subtrees_metadata_source = db_source.get_subtrees_metadata(None, grove_version).unwrap(); + println!("{:?}", subtrees_metadata_source); + + println!("\n######### db_checkpoint_0 -> db_destination state sync"); + let state_info = MultiStateSyncInfo::default(); + let tx = db_destination.start_transaction(); + sync_db_demo(&db_checkpoint_0, &db_destination, state_info, &tx).unwrap(); + db_destination.commit_transaction(tx).unwrap().expect("expected to commit transaction"); + + println!("\n######### verify db_destination"); + let incorrect_hashes = db_destination.verify_grovedb(None, grove_version).unwrap(); + if incorrect_hashes.len() > 0 { + println!("DB verification failed!"); + } + else { + println!("DB verification success"); + } + + println!("\n######### root_hashes:"); + let root_hash_source = db_source.root_hash(None, grove_version).unwrap().unwrap(); + println!("root_hash_source: {:?}", hex::encode(root_hash_source)); + let root_hash_checkpoint_0 = db_checkpoint_0.root_hash(None, grove_version).unwrap().unwrap(); + println!("root_hash_checkpoint_0: {:?}", hex::encode(root_hash_checkpoint_0)); + let root_hash_destination = db_destination.root_hash(None, grove_version).unwrap().unwrap(); + println!("root_hash_destination: {:?}", hex::encode(root_hash_destination)); + + let query_path = &[MAIN_ΚΕΥ, KEY_INT_0]; + let query_key = (20487u32).to_be_bytes().to_vec(); + println!("\n######## Query on db_checkpoint_0:"); + query_db(&db_checkpoint_0, query_path, query_key.clone()); + println!("\n######## Query on db_destination:"); + query_db(&db_destination, query_path, query_key.clone()); + + return; + +} + +fn insert_empty_tree_db(db: &GroveDb, path: &[&[u8]], key: &[u8]) +{ + db.insert(path, key, Element::empty_tree(), INSERT_OPTIONS, None, grove_version) + .unwrap() + .expect("successfully inserted tree"); +} +fn insert_range_values_db(db: &GroveDb, path: &[&[u8]], min_i: u32, max_i: u32, transaction: &Transaction) +{ + for i in min_i..=max_i { + let i_vec = i.to_be_bytes().to_vec(); + db.insert( + path, + &i_vec, + Element::new_item(i_vec.to_vec()), + INSERT_OPTIONS, + Some(&transaction), + grove_version, + ) + .unwrap() + .expect("successfully inserted values"); + } +} + +fn insert_range_ref_double_values_db(db: &GroveDb, path: &[&[u8]], ref_key: &[u8], min_i: u32, max_i: u32, transaction: &Transaction) +{ + for i in min_i..=max_i { + let i_vec = i.to_be_bytes().to_vec(); + let value = i * 2; + let value_vec = value.to_be_bytes().to_vec(); + db.insert( + path, + &i_vec, + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + MAIN_ΚΕΥ.to_vec(), + ref_key.to_vec(), + value_vec.to_vec() + ])), + INSERT_OPTIONS, + Some(&transaction), + grove_version, + ) + .unwrap() + .expect("successfully inserted values"); + } +} + +fn insert_empty_sum_tree_db(db: &GroveDb, path: &[&[u8]], key: &[u8]) +{ + db.insert(path, key, Element::empty_sum_tree(), INSERT_OPTIONS, None, grove_version) + .unwrap() + .expect("successfully inserted tree"); +} +fn insert_sum_element_db(db: &GroveDb, path: &[&[u8]], min_i: u32, max_i: u32, transaction: &Transaction) +{ + for i in min_i..=max_i { + //let value : u32 = i; + let value = i as u64; + //let value: u64 = 1; + let i_vec = i.to_be_bytes().to_vec(); + db.insert( + path, + &i_vec, + Element::new_sum_item(value as SumValue), + INSERT_OPTIONS, + Some(&transaction), + grove_version, + ) + .unwrap() + .expect("successfully inserted values"); + } +} +fn generate_random_path(prefix: &str, suffix: &str, len: usize) -> String { + let random_string: String = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(len) + .map(char::from) + .collect(); + format!("{}{}{}", prefix, random_string, suffix) +} + +fn query_db(db: &GroveDb, path: &[&[u8]], key: Vec) { + let path_vec: Vec> = path.iter() + .map(|&slice| slice.to_vec()) + .collect(); + + let mut query = Query::new(); + query.insert_key(key); + + let path_query = PathQuery::new_unsized(path_vec, query.clone()); + + let (elements, _) = db + .query_item_value(&path_query, true, false, true, None) + .unwrap() + .expect("expected successful get_path_query"); + for e in elements.into_iter() { + println!(">> {:?}", e); + } + + let proof = db.prove_query(&path_query, None, grove_version).unwrap().unwrap(); + // Get hash from query proof and print to terminal along with GroveDB root hash. + let (verify_hash, _) = GroveDb::verify_query(&proof, &path_query, grove_version).unwrap(); + println!("verify_hash: {:?}", hex::encode(verify_hash)); + if verify_hash == db.root_hash(None, grove_version).unwrap().unwrap() { + println!("Query verified"); + } else { println!("Verification FAILED"); }; +} + +fn sync_db_demo( + source_db: &GroveDb, + target_db: &GroveDb, + state_sync_info: MultiStateSyncInfo, + target_tx: &Transaction, +) -> Result<(), grovedb::Error> { + let app_hash = source_db.root_hash(None, grove_version).value.unwrap(); + let mut state_sync_info = target_db.start_snapshot_syncing(state_sync_info, app_hash, target_tx, CURRENT_STATE_SYNC_VERSION, grove_version)?; + + let mut chunk_queue : VecDeque> = VecDeque::new(); + + // The very first chunk to fetch is always identified by the root app_hash + chunk_queue.push_back(app_hash.to_vec()); + + while let Some(chunk_id) = chunk_queue.pop_front() { + let ops = source_db.fetch_chunk(chunk_id.as_slice(), None, CURRENT_STATE_SYNC_VERSION, grove_version)?; + let (more_chunks, new_state_sync_info) = target_db.apply_chunk(state_sync_info, chunk_id.as_slice(), ops, target_tx, CURRENT_STATE_SYNC_VERSION, grove_version)?; + state_sync_info = new_state_sync_info; + chunk_queue.extend(more_chunks); + } + + Ok(()) +} + diff --git a/visualize/Cargo.toml b/visualize/Cargo.toml index a1fc2fcbb..ac93bff0b 100644 --- a/visualize/Cargo.toml +++ b/visualize/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-visualize" -version = "1.0.0-rc.1" +version = "1.0.0-rc.2" edition = "2021" license = "MIT" description = "Visualizer extension crate for GroveDB" @@ -12,4 +12,4 @@ repository = "https://github.com/dashpay/grovedb" [dependencies] hex = "0.4.3" -itertools = "0.10.3" +itertools = "0.12.1"