From 401c8e802bbf4b846b4f454762c394005a5eda7b Mon Sep 17 00:00:00 2001 From: Wisdom Ogwu <40731160+iammadab@users.noreply.github.com> Date: Thu, 13 Jul 2023 09:57:02 +0100 Subject: [PATCH 01/37] feat: grovedb data wipe (#264) * v1 of storage wipe * implement guarded option * cleanup wipe function * delete guarded option type * fmt * get rid of self.db() * use on one rocksdb constructor function * fmt --- grovedb/src/lib.rs | 6 ++++ grovedb/src/tests/mod.rs | 38 ++++++++++++++++++++++++++ storage/src/rocksdb_storage/storage.rs | 14 ++++++++-- 3 files changed, 55 insertions(+), 3 deletions(-) diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index 8dac7975..fbf9b0b5 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -249,6 +249,12 @@ impl GroveDb { Ok(GroveDb { db }) } + /// Deletes GroveDB folder from disk and drop GroveDB instance + pub fn wipe(self) -> Result<(), Error> { + self.db.wipe()?; + Ok(()) + } + /// Opens the transactional Merk at the given path. Returns CostResult. fn open_transactional_merk_at_path<'db, 'b, B>( &'db self, diff --git a/grovedb/src/tests/mod.rs b/grovedb/src/tests/mod.rs index c67b119e..74b993e1 100644 --- a/grovedb/src/tests/mod.rs +++ b/grovedb/src/tests/mod.rs @@ -2907,3 +2907,41 @@ fn test_tree_value_exists_method_tx() { .unwrap()); assert!(db.has_raw(EMPTY_PATH, b"leaf", None).unwrap().unwrap()); } + +#[test] +fn test_storage_wipe() { + let db = make_test_grovedb(); + let path = db._tmp_dir.path(); + + // Test keys in non-root tree + db.insert( + [TEST_LEAF].as_ref(), + b"key", + Element::new_item(b"ayy".to_vec()), + None, + None, + ) + .unwrap() + .expect("cannot insert item"); + + // retrieve key before wipe + let elem = db + .get(&[TEST_LEAF.as_ref()], b"key", None) + .unwrap() + .unwrap(); + assert_eq!(elem, Element::new_item(b"ayy".to_vec())); + + // wipe the database + db.grove_db.wipe().unwrap(); + + // re-open database + let db = GroveDb::open(path).unwrap(); + + // retrieve key after wipe + let elem_result = db.get(&[TEST_LEAF.as_ref()], b"key", None).unwrap(); + assert!(elem_result.is_err()); + assert!(matches!( + elem_result, + Err(Error::PathParentLayerNotFound(..)) + )); +} diff --git a/storage/src/rocksdb_storage/storage.rs b/storage/src/rocksdb_storage/storage.rs index 96d17a7e..7cd869a7 100644 --- a/storage/src/rocksdb_storage/storage.rs +++ b/storage/src/rocksdb_storage/storage.rs @@ -40,8 +40,8 @@ use grovedb_path::SubtreePath; use integer_encoding::VarInt; use lazy_static::lazy_static; use rocksdb::{ - checkpoint::Checkpoint, ColumnFamily, ColumnFamilyDescriptor, OptimisticTransactionDB, - Transaction, WriteBatchWithTransaction, + checkpoint::Checkpoint, ColumnFamily, ColumnFamilyDescriptor, OptimisticTransactionDB, Options, + Transaction, WriteBatchWithTransaction, DB, }; use super::{ @@ -53,6 +53,7 @@ use crate::{ error::Error::{CostError, RocksDBError}, storage::AbstractBatchOperation, worst_case_costs::WorstKeyLength, + Error::StorageError, Storage, StorageBatch, }; @@ -112,7 +113,6 @@ impl RocksDbStorage { ], ) .map_err(RocksDBError)?; - Ok(RocksDbStorage { db }) } @@ -405,6 +405,14 @@ impl RocksDbStorage { .wrap_with_cost(OperationCost::default()) } } + + /// Destroys the OptimisticTransactionDB and drops instance + pub fn wipe(self) -> Result<(), Error> { + let path = self.db.path().to_path_buf(); + drop(self); + DB::destroy(&Options::default(), path).map_err(|e| StorageError(e.into_string()))?; + Ok(()) + } } impl<'db> Storage<'db> for RocksDbStorage { From cc1b577227525b2cfa9114590283b1a4a4d91661 Mon Sep 17 00:00:00 2001 From: Wisdom Ogwu <40731160+iammadab@users.noreply.github.com> Date: Fri, 21 Jul 2023 11:07:39 +0100 Subject: [PATCH 02/37] feat: raw iter data wipe (#265) * raw iterator delete * wipe grovedb data with &self * update comment * cleanup --- grovedb/src/lib.rs | 4 ++-- grovedb/src/tests/mod.rs | 3 --- storage/src/rocksdb_storage/storage.rs | 30 +++++++++++++++++++++----- 3 files changed, 27 insertions(+), 10 deletions(-) diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index fbf9b0b5..87ca2443 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -249,8 +249,8 @@ impl GroveDb { Ok(GroveDb { db }) } - /// Deletes GroveDB folder from disk and drop GroveDB instance - pub fn wipe(self) -> Result<(), Error> { + /// Uses raw iter to delete GroveDB key values pairs from rocksdb + pub fn wipe(&self) -> Result<(), Error> { self.db.wipe()?; Ok(()) } diff --git a/grovedb/src/tests/mod.rs b/grovedb/src/tests/mod.rs index 74b993e1..e078237d 100644 --- a/grovedb/src/tests/mod.rs +++ b/grovedb/src/tests/mod.rs @@ -2934,9 +2934,6 @@ fn test_storage_wipe() { // wipe the database db.grove_db.wipe().unwrap(); - // re-open database - let db = GroveDb::open(path).unwrap(); - // retrieve key after wipe let elem_result = db.get(&[TEST_LEAF.as_ref()], b"key", None).unwrap(); assert!(elem_result.is_err()); diff --git a/storage/src/rocksdb_storage/storage.rs b/storage/src/rocksdb_storage/storage.rs index 7cd869a7..b386ecfc 100644 --- a/storage/src/rocksdb_storage/storage.rs +++ b/storage/src/rocksdb_storage/storage.rs @@ -41,7 +41,7 @@ use integer_encoding::VarInt; use lazy_static::lazy_static; use rocksdb::{ checkpoint::Checkpoint, ColumnFamily, ColumnFamilyDescriptor, OptimisticTransactionDB, Options, - Transaction, WriteBatchWithTransaction, DB, + Transaction, WriteBatchWithTransaction, DB, DEFAULT_COLUMN_FAMILY_NAME, }; use super::{ @@ -407,10 +407,30 @@ impl RocksDbStorage { } /// Destroys the OptimisticTransactionDB and drops instance - pub fn wipe(self) -> Result<(), Error> { - let path = self.db.path().to_path_buf(); - drop(self); - DB::destroy(&Options::default(), path).map_err(|e| StorageError(e.into_string()))?; + pub fn wipe(&self) -> Result<(), Error> { + // TODO: fix this + // very inefficient way of doing this, time complexity is O(n) + // we can do O(1) + self.wipe_column_family(DEFAULT_COLUMN_FAMILY_NAME)?; + self.wipe_column_family(ROOTS_CF_NAME)?; + self.wipe_column_family(AUX_CF_NAME)?; + self.wipe_column_family(META_CF_NAME)?; + Ok(()) + } + + fn wipe_column_family(&self, column_family_name: &str) -> Result<(), Error> { + let cf_handle = self + .db + .cf_handle(column_family_name) + .ok_or(Error::StorageError( + "failed to get column family handle".to_string(), + ))?; + let mut iter = self.db.raw_iterator_cf(&cf_handle); + iter.seek_to_first(); + while iter.valid() { + self.db.delete(iter.key().expect("should have key")); + iter.next() + } Ok(()) } } From 8380363e3c473c015b2bf70fcba60b9f47c9df35 Mon Sep 17 00:00:00 2001 From: Wisdom Ogwu <40731160+iammadab@users.noreply.github.com> Date: Mon, 4 Sep 2023 21:04:30 +0100 Subject: [PATCH 03/37] add last key to subquery path only after subquery path verification (#268) --- grovedb/src/operations/proof/verify.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/grovedb/src/operations/proof/verify.rs b/grovedb/src/operations/proof/verify.rs index 60b954dc..ac13306c 100644 --- a/grovedb/src/operations/proof/verify.rs +++ b/grovedb/src/operations/proof/verify.rs @@ -678,7 +678,6 @@ impl ProofVerifier { ProofTokenType::Merk | ProofTokenType::SizedMerk => { let mut key_as_query = Query::new(); key_as_query.insert_key(last_key.to_owned()); - current_path.push(last_key); let verification_result = self.execute_merk_proof( proof_token_type, @@ -688,6 +687,8 @@ impl ProofVerifier { current_path.to_owned(), )?; + current_path.push(last_key); + Ok((verification_result.0, verification_result.1, false)) } _ => Err(Error::InvalidProof( From 75b4df1177a830a7a280d65a8c179c93c225fe73 Mon Sep 17 00:00:00 2001 From: Wisdom Ogwu <40731160+iammadab@users.noreply.github.com> Date: Mon, 25 Sep 2023 20:32:48 +0100 Subject: [PATCH 04/37] feat: Improve verify grovedb + better debug statements (#269) * Improve verify grovedb + better debug statements * clean up * return error rather than panic for verify_grovedb * fix compile errors --- grovedb/src/batch/mod.rs | 55 +++++++++------- grovedb/src/lib.rs | 137 ++++++++++++++++++++++++++++++++++----- 2 files changed, 152 insertions(+), 40 deletions(-) diff --git a/grovedb/src/batch/mod.rs b/grovedb/src/batch/mod.rs index f6120209..1ea50305 100644 --- a/grovedb/src/batch/mod.rs +++ b/grovedb/src/batch/mod.rs @@ -371,32 +371,42 @@ impl fmt::Debug for GroveDbOp { let op_dbg = match &self.op { Op::Insert { element } => match element { - Element::Item(..) => "Insert Item", - Element::Reference(..) => "Insert Ref", - Element::Tree(..) => "Insert Tree", - Element::SumTree(..) => "Insert Sum Tree", - Element::SumItem(..) => "Insert Sum Item", + Element::Item(..) => "Insert Item".to_string(), + Element::Reference(..) => "Insert Ref".to_string(), + Element::Tree(..) => "Insert Tree".to_string(), + Element::SumTree(..) => "Insert Sum Tree".to_string(), + Element::SumItem(..) => "Insert Sum Item".to_string(), }, Op::Replace { element } => match element { - Element::Item(..) => "Replace Item", - Element::Reference(..) => "Replace Ref", - Element::Tree(..) => "Replace Tree", - Element::SumTree(..) => "Replace Sum Tree", - Element::SumItem(..) => "Replace Sum Item", + Element::Item(..) => "Replace Item".to_string(), + Element::Reference(..) => "Replace Ref".to_string(), + Element::Tree(..) => "Replace Tree".to_string(), + Element::SumTree(..) => "Replace Sum Tree".to_string(), + Element::SumItem(..) => "Replace Sum Item".to_string(), }, Op::Patch { element, .. } => match element { - Element::Item(..) => "Patch Item", - Element::Reference(..) => "Patch Ref", - Element::Tree(..) => "Patch Tree", - Element::SumTree(..) => "Patch Sum Tree", - Element::SumItem(..) => "Patch Sum Item", + Element::Item(..) => "Patch Item".to_string(), + Element::Reference(..) => "Patch Ref".to_string(), + Element::Tree(..) => "Patch Tree".to_string(), + Element::SumTree(..) => "Patch Sum Tree".to_string(), + Element::SumItem(..) => "Patch Sum Item".to_string(), }, - Op::RefreshReference { .. } => "Refresh Reference", - Op::Delete => "Delete", - Op::DeleteTree => "Delete Tree", - Op::DeleteSumTree => "Delete Sum Tree", - Op::ReplaceTreeRootKey { .. } => "Replace Tree Hash and Root Key", - Op::InsertTreeWithRootHash { .. } => "Insert Tree Hash and Root Key", + Op::RefreshReference { + reference_path_type, + max_reference_hop, + trust_refresh_reference, + .. + } => { + format!( + "Refresh Reference: path {:?}, max_hop {:?}, trust_reference {} ", + reference_path_type, max_reference_hop, trust_refresh_reference + ) + } + Op::Delete => "Delete".to_string(), + Op::DeleteTree => "Delete Tree".to_string(), + Op::DeleteSumTree => "Delete Sum Tree".to_string(), + Op::ReplaceTreeRootKey { .. } => "Replace Tree Hash and Root Key".to_string(), + Op::InsertTreeWithRootHash { .. } => "Insert Tree Hash and Root Key".to_string(), }; f.debug_struct("GroveDbOp") @@ -1150,7 +1160,8 @@ where let Element::Reference(path_reference, max_reference_hop, _) = &element else { return Err(Error::InvalidInput( "trying to refresh a an element that is not a reference", - )).wrap_with_cost(cost) + )) + .wrap_with_cost(cost); }; let merk_feature_type = if is_sum_tree { diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index 87ca2443..050d6b28 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -224,6 +224,7 @@ pub use crate::error::Error; use crate::helpers::raw_decode; #[cfg(feature = "full")] use crate::util::{root_merk_optional_tx, storage_context_optional_tx}; +use crate::Error::MerkError; #[cfg(feature = "full")] type Hash = [u8; 32]; @@ -783,8 +784,11 @@ impl GroveDb { } /// Method to visualize hash mismatch after verification - pub fn visualize_verify_grovedb(&self) -> HashMap { - self.verify_grovedb() + pub fn visualize_verify_grovedb( + &self, + ) -> Result, Error> { + Ok(self + .verify_grovedb(None)? .iter() .map(|(path, (root_hash, expected, actual))| { ( @@ -799,27 +803,41 @@ impl GroveDb { ), ) }) - .collect() + .collect()) } /// Method to check that the value_hash of Element::Tree nodes are computed /// correctly. - pub fn verify_grovedb(&self) -> HashMap>, (CryptoHash, CryptoHash, CryptoHash)> { - let root_merk = self - .open_non_transactional_merk_at_path(SubtreePath::empty(), None) - .unwrap() - .expect("should exist"); - self.verify_merk_and_submerks(root_merk, &SubtreePath::empty(), None) + pub fn verify_grovedb( + &self, + transaction: TransactionArg, + ) -> Result>, (CryptoHash, CryptoHash, CryptoHash)>, Error> { + if let Some(transaction) = transaction { + let root_merk = self + .open_transactional_merk_at_path(SubtreePath::empty(), transaction, None) + .unwrap()?; + self.verify_merk_and_submerks_in_transaction( + root_merk, + &SubtreePath::empty(), + None, + transaction, + ) + } else { + let root_merk = self + .open_non_transactional_merk_at_path(SubtreePath::empty(), None) + .unwrap()?; + self.verify_merk_and_submerks(root_merk, &SubtreePath::empty(), None) + } } /// Verifies that the root hash of the given merk and all submerks match /// those of the merk and submerks at the given path. Returns any issues. - fn verify_merk_and_submerks<'db, B: AsRef<[u8]>>( + fn verify_merk_and_submerks<'db, B: AsRef<[u8]>, S: StorageContext<'db>>( &'db self, - merk: Merk, + merk: Merk, path: &SubtreePath, batch: Option<&'db StorageBatch>, - ) -> HashMap>, (CryptoHash, CryptoHash, CryptoHash)> { + ) -> Result>, (CryptoHash, CryptoHash, CryptoHash)>, Error> { let mut all_query = Query::new(); all_query.insert_all(); @@ -828,20 +846,83 @@ impl GroveDb { let mut element_iterator = KVIterator::new(merk.storage.raw_iter(), &all_query).unwrap(); while let Some((key, element_value)) = element_iterator.next_kv().unwrap() { - let element = raw_decode(&element_value).unwrap(); + let element = raw_decode(&element_value)?; if element.is_tree() { let (kv_value, element_value_hash) = merk .get_value_and_value_hash(&key, true) .unwrap() - .unwrap() - .unwrap(); + .map_err(MerkError)? + .ok_or(Error::CorruptedData( + "expected merk to contain value at key".to_string(), + ))?; let new_path = path.derive_owned_with_child(key); let new_path_ref = SubtreePath::from(&new_path); let inner_merk = self .open_non_transactional_merk_at_path(new_path_ref.clone(), batch) + .unwrap()?; + let root_hash = inner_merk.root_hash().unwrap(); + + let actual_value_hash = value_hash(&kv_value).unwrap(); + let combined_value_hash = combine_hash(&actual_value_hash, &root_hash).unwrap(); + + if combined_value_hash != element_value_hash { + issues.insert( + new_path.to_vec(), + (root_hash, combined_value_hash, element_value_hash), + ); + } + issues.extend(self.verify_merk_and_submerks(inner_merk, &new_path_ref, batch)?); + } else if element.is_item() { + let (kv_value, element_value_hash) = merk + .get_value_and_value_hash(&key, true) + .unwrap() + .map_err(MerkError)? + .ok_or(Error::CorruptedData( + "expected merk to contain value at key".to_string(), + ))?; + let actual_value_hash = value_hash(&kv_value).unwrap(); + if actual_value_hash != element_value_hash { + issues.insert( + path.derive_owned_with_child(key).to_vec(), + (actual_value_hash, element_value_hash, actual_value_hash), + ); + } + } + } + Ok(issues) + } + + fn verify_merk_and_submerks_in_transaction<'db, B: AsRef<[u8]>, S: StorageContext<'db>>( + &'db self, + merk: Merk, + path: &SubtreePath, + batch: Option<&'db StorageBatch>, + transaction: &Transaction, + ) -> Result>, (CryptoHash, CryptoHash, CryptoHash)>, Error> { + let mut all_query = Query::new(); + all_query.insert_all(); + + let _in_sum_tree = merk.is_sum_tree; + let mut issues = HashMap::new(); + let mut element_iterator = KVIterator::new(merk.storage.raw_iter(), &all_query).unwrap(); + + while let Some((key, element_value)) = element_iterator.next_kv().unwrap() { + let element = raw_decode(&element_value)?; + if element.is_tree() { + let (kv_value, element_value_hash) = merk + .get_value_and_value_hash(&key, true) .unwrap() - .expect("should exist"); + .map_err(MerkError)? + .ok_or(Error::CorruptedData( + "expected merk to contain value at key".to_string(), + ))?; + let new_path = path.derive_owned_with_child(key); + let new_path_ref = SubtreePath::from(&new_path); + + let inner_merk = self + .open_transactional_merk_at_path(new_path_ref.clone(), transaction, batch) + .unwrap()?; let root_hash = inner_merk.root_hash().unwrap(); let actual_value_hash = value_hash(&kv_value).unwrap(); @@ -853,9 +934,29 @@ impl GroveDb { (root_hash, combined_value_hash, element_value_hash), ); } - issues.extend(self.verify_merk_and_submerks(inner_merk, &new_path_ref, batch)); + issues.extend(self.verify_merk_and_submerks_in_transaction( + inner_merk, + &new_path_ref, + batch, + transaction, + )?); + } else if element.is_item() { + let (kv_value, element_value_hash) = merk + .get_value_and_value_hash(&key, true) + .unwrap() + .map_err(MerkError)? + .ok_or(Error::CorruptedData( + "expected merk to contain value at key".to_string(), + ))?; + let actual_value_hash = value_hash(&kv_value).unwrap(); + if actual_value_hash != element_value_hash { + issues.insert( + path.derive_owned_with_child(key).to_vec(), + (actual_value_hash, element_value_hash, actual_value_hash), + ); + } } } - issues + Ok(issues) } } From 5bed4f5696e2c693685453047ed3b285e9aa6478 Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Thu, 28 Sep 2023 01:02:00 +0700 Subject: [PATCH 05/37] fix: just in time update (#270) * some refactoring * tree to tree node * removed just in time updates from commit time * more work * more work * removed a comma * added a callback method to be able to get the value defined costs * more fixes * grovedb working again * fixed proof feature * fixed bench * clippy fixes * fmt fixes * updated to rc2 * updated caching system * removed cache * readded cache * merge --- .github/workflows/grovedb.yml | 8 + .github/workflows/nodejs.yml | 64 - costs/Cargo.toml | 2 +- costs/src/storage_cost/removal.rs | 9 +- grovedb/Cargo.toml | 12 +- .../estimated_costs/average_case_costs.rs | 12 +- grovedb/src/batch/mod.rs | 107 +- grovedb/src/batch/single_insert_cost_tests.rs | 5 +- .../single_sum_item_insert_cost_tests.rs | 4 +- grovedb/src/element/delete.rs | 15 +- grovedb/src/element/exists.rs | 7 +- grovedb/src/element/get.rs | 36 +- grovedb/src/element/helpers.rs | 47 +- grovedb/src/element/insert.rs | 3 + .../src/estimated_costs/average_case_costs.rs | 18 +- .../src/estimated_costs/worst_case_costs.rs | 15 +- grovedb/src/lib.rs | 107 +- grovedb/src/operations/delete/average_case.rs | 2 +- grovedb/src/operations/delete/mod.rs | 7 +- grovedb/src/operations/delete/worst_case.rs | 2 +- grovedb/src/operations/insert/mod.rs | 23 +- grovedb/src/operations/proof/generate.rs | 21 +- grovedb/src/operations/proof/verify.rs | 2 +- grovedb/src/replication.rs | 1 + grovedb/src/tests/mod.rs | 46 +- grovedb/src/tests/sum_tree_tests.rs | 151 ++- grovedb/src/tests/tree_hashes_tests.rs | 76 +- grovedb/src/util.rs | 20 +- grovedb/src/versioning.rs | 9 +- merk/Cargo.toml | 10 +- merk/benches/merk.rs | 68 +- merk/benches/ops.rs | 7 +- .../src/estimated_costs/average_case_costs.rs | 24 +- merk/src/estimated_costs/worst_case_costs.rs | 6 +- merk/src/lib.rs | 5 +- merk/src/merk/apply.rs | 321 +++++ merk/src/merk/chunks.rs | 4 + merk/src/merk/clear.rs | 32 + merk/src/merk/committer.rs | 59 + merk/src/merk/get.rs | 326 +++++ merk/src/merk/mod.rs | 1199 ++--------------- merk/src/merk/open.rs | 185 +++ merk/src/merk/prove.rs | 147 ++ merk/src/merk/restore.rs | 73 +- merk/src/merk/source.rs | 49 + merk/src/owner.rs | 21 + merk/src/proofs/chunk.rs | 94 +- merk/src/proofs/encoding.rs | 18 +- merk/src/proofs/query/mod.rs | 147 +- merk/src/test_utils/mod.rs | 86 +- merk/src/test_utils/temp_merk.rs | 17 +- merk/src/tree/commit.rs | 46 +- merk/src/tree/debug.rs | 6 +- merk/src/tree/encoding.rs | 103 +- merk/src/tree/fuzz_tests.rs | 4 +- merk/src/tree/iter.rs | 10 +- merk/src/tree/just_in_time_value_update.rs | 82 ++ merk/src/tree/kv.rs | 69 +- merk/src/tree/link.rs | 30 +- merk/src/tree/mod.rs | 393 +++--- merk/src/tree/ops.rs | 467 +++++-- merk/src/tree/tree_feature_type.rs | 32 +- merk/src/tree/walk/fetch.rs | 10 +- merk/src/tree/walk/mod.rs | 345 +++-- merk/src/tree/walk/ref_walker.rs | 20 +- merk/src/visualize.rs | 6 +- path/Cargo.toml | 2 +- path/src/lib.rs | 2 +- storage/Cargo.toml | 8 +- storage/src/rocksdb_storage/storage.rs | 7 +- .../storage_context/context_no_tx.rs | 4 +- .../storage_context/context_tx.rs | 4 +- .../storage_context/raw_iterator.rs | 4 +- visualize/Cargo.toml | 2 +- 74 files changed, 3189 insertions(+), 2196 deletions(-) delete mode 100644 .github/workflows/nodejs.yml create mode 100644 merk/src/merk/apply.rs create mode 100644 merk/src/merk/clear.rs create mode 100644 merk/src/merk/committer.rs create mode 100644 merk/src/merk/get.rs create mode 100644 merk/src/merk/open.rs create mode 100644 merk/src/merk/prove.rs create mode 100644 merk/src/merk/source.rs create mode 100644 merk/src/tree/just_in_time_value_update.rs diff --git a/.github/workflows/grovedb.yml b/.github/workflows/grovedb.yml index af4242de..118beafb 100644 --- a/.github/workflows/grovedb.yml +++ b/.github/workflows/grovedb.yml @@ -27,6 +27,8 @@ jobs: - name: Enable Rust cache uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: "false" - run: cargo test --workspace --all-features @@ -52,6 +54,8 @@ jobs: - name: Enable Rust cache uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: "false" - uses: actions-rs/clippy-check@v1 with: @@ -78,6 +82,8 @@ jobs: - name: Enable Rust cache uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: "false" - run: exit `cargo +nightly fmt --check | wc -l` @@ -100,6 +106,8 @@ jobs: - name: Enable Rust cache uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: "false" - run: cargo check diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml deleted file mode 100644 index bac693cb..00000000 --- a/.github/workflows/nodejs.yml +++ /dev/null @@ -1,64 +0,0 @@ -on: - workflow_dispatch: - pull_request: - branches: - - master - -name: Node.JS binding - -jobs: - test: - name: Tests - runs-on: ubuntu-latest - steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - - uses: actions/checkout@v2 - - - name: Setup Node.JS - uses: actions/setup-node@v2 - with: - node-version: '16' - - - name: Setup Rust toolchain - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - default: true - - - name: Install NPM deps - run: npm ci - - - name: Run tests - run: npm test - - linting: - name: Linting - runs-on: ubuntu-latest - steps: - - name: Cancel previous runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - - uses: actions/checkout@v2 - - - name: Setup Node.JS - uses: actions/setup-node@v2 - with: - node-version: '16' - - - name: Setup Rust toolchain - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - default: true - - - name: Install NPM deps - run: npm ci - - - name: Run ES linter - run: npm run lint \ No newline at end of file diff --git a/costs/Cargo.toml b/costs/Cargo.toml index 5d9784d9..6aaa6ece 100644 --- a/costs/Cargo.toml +++ b/costs/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-costs" -version = "1.0.0-rc.1" +version = "1.0.0-rc.2" edition = "2021" license = "MIT" description = "Costs extension crate for GroveDB" diff --git a/costs/src/storage_cost/removal.rs b/costs/src/storage_cost/removal.rs index 6d36a57b..9fa7af99 100644 --- a/costs/src/storage_cost/removal.rs +++ b/costs/src/storage_cost/removal.rs @@ -49,9 +49,10 @@ pub const UNKNOWN_EPOCH: u64 = u64::MAX; pub type StorageRemovalPerEpochByIdentifier = BTreeMap>; /// Removal bytes -#[derive(Debug, PartialEq, Clone, Eq)] +#[derive(Debug, PartialEq, Clone, Eq, Default)] pub enum StorageRemovedBytes { /// No storage removal + #[default] NoStorageRemoval, /// Basic storage removal BasicStorageRemoval(u32), @@ -59,12 +60,6 @@ pub enum StorageRemovedBytes { SectionedStorageRemoval(StorageRemovalPerEpochByIdentifier), } -impl Default for StorageRemovedBytes { - fn default() -> Self { - NoStorageRemoval - } -} - impl Add for StorageRemovedBytes { type Output = Self; diff --git a/grovedb/Cargo.toml b/grovedb/Cargo.toml index a03f48e2..a7fb382b 100644 --- a/grovedb/Cargo.toml +++ b/grovedb/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "grovedb" description = "Fully featured database using balanced hierarchical authenticated data structures" -version = "1.0.0-rc.1" +version = "1.0.0-rc.2" authors = ["Samuel Westrich ", "Wisdom Ogwu "] edition = "2021" license = "MIT" @@ -12,21 +12,21 @@ documentation = "https://docs.rs/grovedb" [dependencies] -grovedb-merk = { version = "1.0.0-rc.1", path = "../merk", optional = true, default-features = false } +grovedb-merk = { version = "1.0.0-rc.2", path = "../merk", optional = true, default-features = false } thiserror = { version = "1.0.37", optional = true } tempfile = { version = "3.3.0", optional = true } bincode = { version = "1.3.3", optional = true } serde = { version = "1.0.149", optional = true } -grovedb-storage = { version = "1.0.0-rc.1", path = "../storage", optional = true } -grovedb-visualize = { version = "1.0.0-rc.1", path = "../visualize", optional = true } +grovedb-storage = { version = "1.0.0-rc.2", path = "../storage", optional = true } +grovedb-visualize = { version = "1.0.0-rc.2", path = "../visualize", optional = true } hex = { version = "0.4.3", optional = true } itertools = { version = "0.10.5", optional = true } integer-encoding = { version = "3.0.4", optional = true } -grovedb-costs = { version = "1.0.0-rc.1", path = "../costs", optional = true } +grovedb-costs = { version = "1.0.0-rc.2", path = "../costs", optional = true } nohash-hasher = { version = "0.2.0", optional = true } indexmap = { version = "1.9.2", optional = true } intmap = { version = "2.0.0", optional = true } -grovedb-path = { version = "1.0.0-rc.1", path = "../path" } +grovedb-path = { version = "1.0.0-rc.2", path = "../path" } [dev-dependencies] rand = "0.8.5" diff --git a/grovedb/src/batch/estimated_costs/average_case_costs.rs b/grovedb/src/batch/estimated_costs/average_case_costs.rs index e6e58975..0a8d573d 100644 --- a/grovedb/src/batch/estimated_costs/average_case_costs.rs +++ b/grovedb/src/batch/estimated_costs/average_case_costs.rs @@ -212,8 +212,8 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { self.paths.get(path).ok_or_else(|| { let paths = self .paths - .iter() - .map(|(k, _v)| k.0.iter().map(|k| hex::encode(k.as_slice())).join("/")) + .keys() + .map(|k| k.0.iter().map(|k| hex::encode(k.as_slice())).join("/")) .join(" | "); Error::PathNotFoundInCacheForEstimatedCosts(format!( "required path {} not found in paths {}", @@ -234,8 +234,8 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { self.paths.get(path).ok_or_else(|| { let paths = self .paths - .iter() - .map(|(k, _v)| k.0.iter().map(|k| hex::encode(k.as_slice())).join("/")) + .keys() + .map(|k| k.0.iter().map(|k| hex::encode(k.as_slice())).join("/")) .join(" | "); Error::PathNotFoundInCacheForEstimatedCosts(format!( "required path for estimated merk caching {} not found in paths {}", @@ -576,7 +576,7 @@ mod tests { seek_count: 5, // todo: why is this 5 storage_cost: StorageCost { added_bytes: 115, - replaced_bytes: 106, + replaced_bytes: 75, removed_bytes: NoStorageRemoval, }, storage_loaded_bytes: 109, @@ -723,7 +723,7 @@ mod tests { seek_count: 41, storage_cost: StorageCost { added_bytes: 0, - replaced_bytes: 5625, + replaced_bytes: 5594, removed_bytes: NoStorageRemoval, }, storage_loaded_bytes: 7669, diff --git a/grovedb/src/batch/mod.rs b/grovedb/src/batch/mod.rs index 1ea50305..a3b2d502 100644 --- a/grovedb/src/batch/mod.rs +++ b/grovedb/src/batch/mod.rs @@ -80,7 +80,7 @@ use grovedb_merk::{ value_hash, NULL_HASH, }, CryptoHash, Error as MerkError, Merk, MerkType, RootHashKeyAndSum, - TreeFeatureType::{BasicMerk, SummedMerk}, + TreeFeatureType::{BasicMerkNode, SummedMerkNode}, }; use grovedb_path::SubtreePath; use grovedb_storage::{ @@ -776,8 +776,12 @@ where if recursions_allowed == 1 { let referenced_element_value_hash_opt = cost_return_on_error!( &mut cost, - merk.get_value_hash(key.as_ref(), true) - .map_err(|e| Error::CorruptedData(e.to_string())) + merk.get_value_hash( + key.as_ref(), + true, + Some(Element::value_defined_cost_for_serialized_value) + ) + .map_err(|e| Error::CorruptedData(e.to_string())) ); let referenced_element_value_hash = cost_return_on_error!( @@ -816,8 +820,12 @@ where // change in the batch. let referenced_element = cost_return_on_error!( &mut cost, - merk.get(key.as_ref(), true) - .map_err(|e| Error::CorruptedData(e.to_string())) + merk.get( + key.as_ref(), + true, + Some(Element::value_defined_cost_for_serialized_value) + ) + .map_err(|e| Error::CorruptedData(e.to_string())) ); let referenced_element = cost_return_on_error_no_add!( @@ -1140,14 +1148,18 @@ where } else { let value = cost_return_on_error!( &mut cost, - merk.get(key_info.as_slice(), true) - .map(|result_value| result_value - .map_err(Error::MerkError) - .and_then(|maybe_value| maybe_value.ok_or( - Error::InvalidInput( - "trying to refresh a non existing reference", - ) - ))) + merk.get( + key_info.as_slice(), + true, + Some(Element::value_defined_cost_for_serialized_value) + ) + .map( + |result_value| result_value.map_err(Error::MerkError).and_then( + |maybe_value| maybe_value.ok_or(Error::InvalidInput( + "trying to refresh a non existing reference", + )) + ) + ) ); cost_return_on_error_no_add!( &cost, @@ -1165,9 +1177,9 @@ where }; let merk_feature_type = if is_sum_tree { - SummedMerk(0) + SummedMerkNode(0) } else { - BasicMerk + BasicMerkNode }; let path_reference = cost_return_on_error!( @@ -1286,7 +1298,7 @@ where } cost_return_on_error!( &mut cost, - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch_operations, &[], Some(batch_apply_options.as_merk_options()), @@ -1294,6 +1306,7 @@ where Element::specialized_costs_for_key_value(key, value, is_sum_tree) .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, + Some(&Element::value_defined_cost_for_serialized_value), &mut |storage_costs, old_value, new_value| { // todo: change the flags without full deserialization let old_element = Element::deserialize(old_value.as_slice()) @@ -1578,9 +1591,7 @@ impl GroveDb { // we need to pause the batch execution return Ok(Some(ops_by_level_paths)).wrap_with_cost(cost); } - if current_level > 0 { - current_level -= 1; - } + current_level = current_level.saturating_sub(1); } Ok(None).wrap_with_cost(cost) } @@ -1789,13 +1800,16 @@ impl GroveDb { ); let is_sum_tree = element.is_sum_tree(); if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { - Merk::open_layered_with_root_key(storage, root_key, is_sum_tree) - .map_err(|_| { - Error::CorruptedData( - "cannot open a subtree with given root key".to_owned(), - ) - }) - .add_cost(cost) + Merk::open_layered_with_root_key( + storage, + root_key, + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .map_err(|_| { + Error::CorruptedData("cannot open a subtree with given root key".to_owned()) + }) + .add_cost(cost) } else { Err(Error::CorruptedPath( "cannot open a subtree as parent exists but is not a tree", @@ -1803,14 +1817,16 @@ impl GroveDb { .wrap_with_cost(OperationCost::default()) } } + } else if new_merk { + Ok(Merk::open_empty(storage, MerkType::BaseMerk, false)).wrap_with_cost(cost) } else { - if new_merk { - Ok(Merk::open_empty(storage, MerkType::BaseMerk, false)).wrap_with_cost(cost) - } else { - Merk::open_base(storage, false) - .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) - .add_cost(cost) - } + Merk::open_base( + storage, + false, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) + .add_cost(cost) } } @@ -1845,11 +1861,16 @@ impl GroveDb { ); let is_sum_tree = element.is_sum_tree(); if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { - Merk::open_layered_with_root_key(storage, root_key, is_sum_tree) - .map_err(|_| { - Error::CorruptedData("cannot open a subtree with given root key".to_owned()) - }) - .add_cost(local_cost) + Merk::open_layered_with_root_key( + storage, + root_key, + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .map_err(|_| { + Error::CorruptedData("cannot open a subtree with given root key".to_owned()) + }) + .add_cost(local_cost) } else { Err(Error::CorruptedData( "cannot open a subtree as parent exists but is not a tree".to_owned(), @@ -1857,9 +1878,13 @@ impl GroveDb { .wrap_with_cost(local_cost) } } else { - Merk::open_base(storage, false) - .map_err(|_| Error::CorruptedData("cannot open a subtree".to_owned())) - .add_cost(local_cost) + Merk::open_base( + storage, + false, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .map_err(|_| Error::CorruptedData("cannot open a subtree".to_owned())) + .add_cost(local_cost) } } diff --git a/grovedb/src/batch/single_insert_cost_tests.rs b/grovedb/src/batch/single_insert_cost_tests.rs index c7f79146..1dd2d43c 100644 --- a/grovedb/src/batch/single_insert_cost_tests.rs +++ b/grovedb/src/batch/single_insert_cost_tests.rs @@ -335,9 +335,6 @@ mod tests { // Replaced bytes // 37 + 36 = 74 (key is not replaced) //needs update - // We instead are getting 106, because we are paying for (+ hash - key byte - // size) this means 31 extra bytes. - // In reality though we really are replacing 106 bytes. TBD what to do. // Hash node calls 8 // 1 to get tree hash @@ -359,7 +356,7 @@ mod tests { seek_count: 5, storage_cost: StorageCost { added_bytes: 115, - replaced_bytes: 106, // todo: this should actually be less + replaced_bytes: 75, removed_bytes: NoStorageRemoval, }, storage_loaded_bytes: 71, // todo: verify and explain diff --git a/grovedb/src/batch/single_sum_item_insert_cost_tests.rs b/grovedb/src/batch/single_sum_item_insert_cost_tests.rs index 09ca5ee2..d1e13fea 100644 --- a/grovedb/src/batch/single_sum_item_insert_cost_tests.rs +++ b/grovedb/src/batch/single_sum_item_insert_cost_tests.rs @@ -209,7 +209,7 @@ mod tests { seek_count: 5, storage_cost: StorageCost { added_bytes: 124, - replaced_bytes: 106, // todo: this should actually be less + replaced_bytes: 75, removed_bytes: NoStorageRemoval, }, storage_loaded_bytes: 71, // todo: verify and explain @@ -287,7 +287,7 @@ mod tests { seek_count: 5, storage_cost: StorageCost { added_bytes: 124, - replaced_bytes: 107, // todo: this should actually be less + replaced_bytes: 84, removed_bytes: NoStorageRemoval, }, storage_loaded_bytes: 72, // todo: verify and explain diff --git a/grovedb/src/element/delete.rs b/grovedb/src/element/delete.rs index 9766c546..92087bc4 100644 --- a/grovedb/src/element/delete.rs +++ b/grovedb/src/element/delete.rs @@ -57,10 +57,16 @@ impl Element { }; let batch = [(key, op)]; let uses_sum_nodes = merk.is_sum_tree; - merk.apply_with_specialized_costs::<_, Vec>(&batch, &[], merk_options, &|key, value| { - Self::specialized_costs_for_key_value(key, value, uses_sum_nodes) - .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) - }) + merk.apply_with_specialized_costs::<_, Vec>( + &batch, + &[], + merk_options, + &|key, value| { + Self::specialized_costs_for_key_value(key, value, uses_sum_nodes) + .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) + }, + Some(&Element::value_defined_cost_for_serialized_value), + ) .map_err(|e| Error::CorruptedData(e.to_string())) } @@ -97,6 +103,7 @@ impl Element { Self::specialized_costs_for_key_value(key, value, uses_sum_nodes) .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, + Some(&Element::value_defined_cost_for_serialized_value), &mut |_costs, _old_value, _value| Ok((false, None)), sectioned_removal, ) diff --git a/grovedb/src/element/exists.rs b/grovedb/src/element/exists.rs index d0bf5ecb..c3bf61fa 100644 --- a/grovedb/src/element/exists.rs +++ b/grovedb/src/element/exists.rs @@ -48,7 +48,10 @@ impl Element { merk: &mut Merk, key: K, ) -> CostResult { - merk.exists(key.as_ref()) - .map_err(|e| Error::CorruptedData(e.to_string())) + merk.exists( + key.as_ref(), + Some(&Element::value_defined_cost_for_serialized_value), + ) + .map_err(|e| Error::CorruptedData(e.to_string())) } } diff --git a/grovedb/src/element/get.rs b/grovedb/src/element/get.rs index eae7baa0..b6f75b10 100644 --- a/grovedb/src/element/get.rs +++ b/grovedb/src/element/get.rs @@ -37,7 +37,7 @@ use grovedb_merk::tree::kv::KV; #[cfg(feature = "full")] use grovedb_merk::Merk; #[cfg(feature = "full")] -use grovedb_merk::{ed::Decode, tree::TreeInner}; +use grovedb_merk::{ed::Decode, tree::TreeNodeInner}; #[cfg(feature = "full")] use grovedb_storage::StorageContext; use integer_encoding::VarInt; @@ -78,8 +78,12 @@ impl Element { let value_opt = cost_return_on_error!( &mut cost, - merk.get(key.as_ref(), allow_cache) - .map_err(|e| Error::CorruptedData(e.to_string())) + merk.get( + key.as_ref(), + allow_cache, + Some(&Element::value_defined_cost_for_serialized_value) + ) + .map_err(|e| Error::CorruptedData(e.to_string())) ); let element = cost_return_on_error_no_add!( &cost, @@ -129,7 +133,7 @@ impl Element { .get(key_ref) .map_err(|e| Error::CorruptedData(e.to_string())) ); - let maybe_tree_inner: Option = cost_return_on_error_no_add!( + let maybe_tree_inner: Option = cost_return_on_error_no_add!( &cost, node_value_opt .map(|node_value| { @@ -230,8 +234,12 @@ impl Element { let value_hash = cost_return_on_error!( &mut cost, - merk.get_value_hash(key.as_ref(), allow_cache) - .map_err(|e| Error::CorruptedData(e.to_string())) + merk.get_value_hash( + key.as_ref(), + allow_cache, + Some(&Element::value_defined_cost_for_serialized_value) + ) + .map_err(|e| Error::CorruptedData(e.to_string())) ); Ok(value_hash).wrap_with_cost(cost) @@ -253,7 +261,13 @@ mod tests { let ctx = storage .get_storage_context(SubtreePath::empty(), Some(&batch)) .unwrap(); - let mut merk = Merk::open_base(ctx, false).unwrap().unwrap(); + let mut merk = Merk::open_base( + ctx, + false, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .unwrap() + .unwrap(); Element::empty_tree() .insert(&mut merk, b"mykey", None) .unwrap() @@ -271,7 +285,13 @@ mod tests { let ctx = storage .get_storage_context(SubtreePath::empty(), None) .unwrap(); - let mut merk = Merk::open_base(ctx, false).unwrap().unwrap(); + let mut merk = Merk::open_base( + ctx, + false, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .unwrap() + .unwrap(); assert_eq!( Element::get(&merk, b"another-key", true) diff --git a/grovedb/src/element/helpers.rs b/grovedb/src/element/helpers.rs index c049e7ba..91d57fe3 100644 --- a/grovedb/src/element/helpers.rs +++ b/grovedb/src/element/helpers.rs @@ -29,11 +29,16 @@ //! Helpers //! Implements helper functions in Element +#[cfg(feature = "full")] +use grovedb_merk::tree::kv::{ + ValueDefinedCostType, + ValueDefinedCostType::{LayeredValueDefinedCost, SpecializedValueDefinedCost}, +}; #[cfg(feature = "full")] use grovedb_merk::{ - tree::{kv::KV, Tree}, + tree::{kv::KV, TreeNode}, TreeFeatureType, - TreeFeatureType::{BasicMerk, SummedMerk}, + TreeFeatureType::{BasicMerkNode, SummedMerkNode}, }; #[cfg(feature = "full")] use integer_encoding::VarInt; @@ -114,8 +119,8 @@ impl Element { /// Get the tree feature type pub fn get_feature_type(&self, parent_is_sum_tree: bool) -> Result { match parent_is_sum_tree { - true => Ok(SummedMerk(self.sum_value_or_default())), - false => Ok(BasicMerk), + true => Ok(SummedMerkNode(self.sum_value_or_default())), + false => Ok(BasicMerkNode), } } @@ -307,12 +312,44 @@ impl Element { )), } } + + #[cfg(feature = "full")] + /// Get the value defined cost for a serialized value + pub fn value_defined_cost(&self) -> Option { + let Some(value_cost) = self.get_specialized_cost().ok() else { + return None; + }; + + let cost = value_cost + + self.get_flags().as_ref().map_or(0, |flags| { + let flags_len = flags.len() as u32; + flags_len + flags_len.required_space() as u32 + }); + match self { + Element::Tree(..) => Some(LayeredValueDefinedCost(cost)), + Element::SumTree(..) => Some(LayeredValueDefinedCost(cost)), + Element::SumItem(..) => Some(SpecializedValueDefinedCost(cost)), + _ => None, + } + } + + #[cfg(feature = "full")] + /// Get the value defined cost for a serialized value + pub fn value_defined_cost_for_serialized_value(value: &[u8]) -> Option { + let element = Element::deserialize(value).ok()?; + element.value_defined_cost() + } } #[cfg(feature = "full")] /// Decode from bytes pub fn raw_decode(bytes: &[u8]) -> Result { - let tree = Tree::decode_raw(bytes, vec![]).map_err(|e| Error::CorruptedData(e.to_string()))?; + let tree = TreeNode::decode_raw( + bytes, + vec![], + Some(Element::value_defined_cost_for_serialized_value), + ) + .map_err(|e| Error::CorruptedData(e.to_string()))?; let element: Element = Element::deserialize(tree.value_as_slice())?; Ok(element) } diff --git a/grovedb/src/element/insert.rs b/grovedb/src/element/insert.rs index e500b41a..2ba7f92d 100644 --- a/grovedb/src/element/insert.rs +++ b/grovedb/src/element/insert.rs @@ -92,6 +92,7 @@ impl Element { Self::specialized_costs_for_key_value(key, value, uses_sum_nodes) .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, + Some(&Element::value_defined_cost_for_serialized_value), ) .map_err(|e| Error::CorruptedData(e.to_string())) } @@ -287,6 +288,7 @@ impl Element { Self::specialized_costs_for_key_value(key, value, uses_sum_nodes) .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, + Some(&Element::value_defined_cost_for_serialized_value), ) .map_err(|e| Error::CorruptedData(e.to_string())) } @@ -356,6 +358,7 @@ impl Element { Self::specialized_costs_for_key_value(key, value, uses_sum_nodes) .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, + Some(&Element::value_defined_cost_for_serialized_value), ) .map_err(|e| Error::CorruptedData(e.to_string())) } diff --git a/grovedb/src/estimated_costs/average_case_costs.rs b/grovedb/src/estimated_costs/average_case_costs.rs index 919d4d5a..d93b6451 100644 --- a/grovedb/src/estimated_costs/average_case_costs.rs +++ b/grovedb/src/estimated_costs/average_case_costs.rs @@ -40,7 +40,7 @@ use grovedb_merk::{ add_average_case_merk_replace_layered, EstimatedLayerInformation, }, }, - tree::Tree, + tree::TreeNode, HASH_LENGTH, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; @@ -68,7 +68,7 @@ impl GroveDb { match path.last() { None => {} Some(key) => { - cost.storage_loaded_bytes += Tree::average_case_encoded_tree_size( + cost.storage_loaded_bytes += TreeNode::average_case_encoded_tree_size( key.max_length() as u32, HASH_LENGTH as u32, is_sum_tree, @@ -359,7 +359,7 @@ impl GroveDb { estimated_element_size: u32, in_parent_tree_using_sums: bool, ) { - let value_size = Tree::average_case_encoded_tree_size( + let value_size = TreeNode::average_case_encoded_tree_size( key.max_length() as u32, estimated_element_size, in_parent_tree_using_sums, @@ -443,7 +443,7 @@ impl GroveDb { estimated_references_sizes: Vec, ) { // todo: verify - let value_size: u32 = Tree::average_case_encoded_tree_size( + let value_size: u32 = TreeNode::average_case_encoded_tree_size( key.max_length() as u32, estimated_element_size, in_parent_tree_using_sums, @@ -461,7 +461,7 @@ mod test { use grovedb_costs::OperationCost; use grovedb_merk::{ estimated_costs::average_case_costs::add_average_case_get_merk_node, - test_utils::make_batch_seq, Merk, + test_utils::make_batch_seq, tree::kv::ValueDefinedCostType, Merk, }; use grovedb_storage::{ rocksdb_storage::RocksDbStorage, worst_case_costs::WorstKeyLength, Storage, StorageBatch, @@ -487,6 +487,7 @@ mod test { .get_storage_context(EMPTY_PATH, Some(&batch)) .unwrap(), false, + None::<&fn(&[u8]) -> Option>, ) .unwrap() .expect("cannot open merk"); @@ -505,6 +506,7 @@ mod test { let merk = Merk::open_base( storage.get_storage_context(EMPTY_PATH, None).unwrap(), false, + None::<&fn(&[u8]) -> Option>, ) .unwrap() .expect("cannot open merk"); @@ -514,7 +516,11 @@ mod test { // 2. Left link exists // 3. Right link exists // Based on merk's avl rotation algorithm node is key 8 satisfies this - let node_result = merk.get(&8_u64.to_be_bytes(), true); + let node_result = merk.get( + &8_u64.to_be_bytes(), + true, + None::<&fn(&[u8]) -> Option>, + ); // By tweaking the max element size, we can adapt the average case function to // this scenario. make_batch_seq creates values that are 60 bytes in size diff --git a/grovedb/src/estimated_costs/worst_case_costs.rs b/grovedb/src/estimated_costs/worst_case_costs.rs index d84b3df2..5a72a405 100644 --- a/grovedb/src/estimated_costs/worst_case_costs.rs +++ b/grovedb/src/estimated_costs/worst_case_costs.rs @@ -42,7 +42,7 @@ use grovedb_merk::{ MERK_BIGGEST_VALUE_SIZE, }, }, - tree::Tree, + tree::TreeNode, HASH_LENGTH, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; @@ -67,7 +67,7 @@ impl GroveDb { match path.last() { None => {} Some(key) => { - cost.storage_loaded_bytes += Tree::worst_case_encoded_tree_size( + cost.storage_loaded_bytes += TreeNode::worst_case_encoded_tree_size( key.max_length() as u32, HASH_LENGTH as u32, is_sum_tree, @@ -333,7 +333,7 @@ impl GroveDb { max_element_size: u32, in_parent_tree_using_sums: bool, ) { - let value_size = Tree::worst_case_encoded_tree_size( + let value_size = TreeNode::worst_case_encoded_tree_size( key.max_length() as u32, max_element_size, in_parent_tree_using_sums, @@ -392,7 +392,7 @@ impl GroveDb { max_references_sizes: Vec, ) { // todo: verify - let value_size: u32 = Tree::worst_case_encoded_tree_size( + let value_size: u32 = TreeNode::worst_case_encoded_tree_size( key.max_length() as u32, max_element_size, in_parent_tree_using_sums, @@ -411,6 +411,7 @@ mod test { use grovedb_merk::{ estimated_costs::worst_case_costs::add_worst_case_get_merk_node, test_utils::{empty_path_merk, empty_path_merk_read_only, make_batch_seq}, + tree::kv::ValueDefinedCostType, }; use grovedb_storage::{ rocksdb_storage::{test_utils::TempStorage, RocksDbStorage}, @@ -451,7 +452,11 @@ mod test { // 2. Left link exists // 3. Right link exists // Based on merk's avl rotation algorithm node is key 8 satisfies this - let node_result = merk.get(&8_u64.to_be_bytes(), true); + let node_result = merk.get( + &8_u64.to_be_bytes(), + true, + None::<&fn(&[u8]) -> Option>, + ); // By tweaking the max element size, we can adapt the worst case function to // this scenario. make_batch_seq creates values that are 60 bytes in size diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index 050d6b28..9ea95513 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -194,6 +194,8 @@ pub use grovedb_merk::proofs::query::query_item::QueryItem; #[cfg(any(feature = "full", feature = "verify"))] pub use grovedb_merk::proofs::Query; #[cfg(feature = "full")] +use grovedb_merk::tree::kv::ValueDefinedCostType; +#[cfg(feature = "full")] use grovedb_merk::{ self, tree::{combine_hash, value_hash}, @@ -290,11 +292,16 @@ impl GroveDb { ); let is_sum_tree = element.is_sum_tree(); if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { - Merk::open_layered_with_root_key(storage, root_key, is_sum_tree) - .map_err(|_| { - Error::CorruptedData("cannot open a subtree with given root key".to_owned()) - }) - .add_cost(cost) + Merk::open_layered_with_root_key( + storage, + root_key, + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .map_err(|_| { + Error::CorruptedData("cannot open a subtree with given root key".to_owned()) + }) + .add_cost(cost) } else { Err(Error::CorruptedPath( "cannot open a subtree as parent exists but is not a tree", @@ -302,9 +309,13 @@ impl GroveDb { .wrap_with_cost(cost) } } else { - Merk::open_base(storage, false) - .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) - .add_cost(cost) + Merk::open_base( + storage, + false, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) + .add_cost(cost) } } @@ -341,20 +352,29 @@ impl GroveDb { .unwrap()?; let is_sum_tree = element.is_sum_tree(); if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { - Merk::open_layered_with_root_key(storage, root_key, is_sum_tree) - .map_err(|_| { - Error::CorruptedData("cannot open a subtree with given root key".to_owned()) - }) - .unwrap() + Merk::open_layered_with_root_key( + storage, + root_key, + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .map_err(|_| { + Error::CorruptedData("cannot open a subtree with given root key".to_owned()) + }) + .unwrap() } else { Err(Error::CorruptedPath( "cannot open a subtree as parent exists but is not a tree", )) } } else { - Merk::open_base(storage, false) - .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) - .unwrap() + Merk::open_base( + storage, + false, + None::<&fn(&[u8]) -> Option>, + ) + .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) + .unwrap() } } @@ -392,11 +412,16 @@ impl GroveDb { ); let is_sum_tree = element.is_sum_tree(); if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { - Merk::open_layered_with_root_key(storage, root_key, is_sum_tree) - .map_err(|_| { - Error::CorruptedData("cannot open a subtree with given root key".to_owned()) - }) - .add_cost(cost) + Merk::open_layered_with_root_key( + storage, + root_key, + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .map_err(|_| { + Error::CorruptedData("cannot open a subtree with given root key".to_owned()) + }) + .add_cost(cost) } else { Err(Error::CorruptedPath( "cannot open a subtree as parent exists but is not a tree", @@ -404,9 +429,13 @@ impl GroveDb { .wrap_with_cost(cost) } } else { - Merk::open_base(storage, false) - .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) - .add_cost(cost) + Merk::open_base( + storage, + false, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) + .add_cost(cost) } } @@ -676,7 +705,11 @@ impl GroveDb { key: K, ) -> CostResult { subtree - .get(key.as_ref(), true) + .get( + key.as_ref(), + true, + Some(&Element::value_defined_cost_for_serialized_value), + ) .map_err(|_| { Error::InvalidPath("can't find subtree in parent during propagation".to_owned()) }) @@ -849,7 +882,11 @@ impl GroveDb { let element = raw_decode(&element_value)?; if element.is_tree() { let (kv_value, element_value_hash) = merk - .get_value_and_value_hash(&key, true) + .get_value_and_value_hash( + &key, + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .map_err(MerkError)? .ok_or(Error::CorruptedData( @@ -875,7 +912,11 @@ impl GroveDb { issues.extend(self.verify_merk_and_submerks(inner_merk, &new_path_ref, batch)?); } else if element.is_item() { let (kv_value, element_value_hash) = merk - .get_value_and_value_hash(&key, true) + .get_value_and_value_hash( + &key, + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .map_err(MerkError)? .ok_or(Error::CorruptedData( @@ -911,7 +952,11 @@ impl GroveDb { let element = raw_decode(&element_value)?; if element.is_tree() { let (kv_value, element_value_hash) = merk - .get_value_and_value_hash(&key, true) + .get_value_and_value_hash( + &key, + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .map_err(MerkError)? .ok_or(Error::CorruptedData( @@ -942,7 +987,11 @@ impl GroveDb { )?); } else if element.is_item() { let (kv_value, element_value_hash) = merk - .get_value_and_value_hash(&key, true) + .get_value_and_value_hash( + &key, + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .map_err(MerkError)? .ok_or(Error::CorruptedData( diff --git a/grovedb/src/operations/delete/average_case.rs b/grovedb/src/operations/delete/average_case.rs index ce3d141a..5b1dba7c 100644 --- a/grovedb/src/operations/delete/average_case.rs +++ b/grovedb/src/operations/delete/average_case.rs @@ -73,7 +73,7 @@ impl GroveDb { let mut used_path = path.0.as_slice(); let mut ops = vec![]; let path_len = path.len() as u16; - for height in (stop_path_height..(path_len as u16)).rev() { + for height in (stop_path_height..path_len).rev() { let ( path_at_level, key_at_level, diff --git a/grovedb/src/operations/delete/mod.rs b/grovedb/src/operations/delete/mod.rs index fb2ce5ce..512a33ef 100644 --- a/grovedb/src/operations/delete/mod.rs +++ b/grovedb/src/operations/delete/mod.rs @@ -269,7 +269,7 @@ impl GroveDb { .map_err(|e| MerkError::ClientCorruptionError(e.to_string())), } }, - &batch, + batch, ) } @@ -495,7 +495,8 @@ impl GroveDb { Merk::open_layered_with_root_key( storage, subtree_to_delete_from.root_key(), - element.is_sum_tree() + element.is_sum_tree(), + Some(&Element::value_defined_cost_for_serialized_value), ) .map_err(|_| { Error::CorruptedData("cannot open a subtree with given root key".to_owned()) @@ -521,7 +522,7 @@ impl GroveDb { cost_return_on_error!( &mut cost, self.propagate_changes_with_batch_transaction( - &batch, + batch, merk_cache, &path, transaction diff --git a/grovedb/src/operations/delete/worst_case.rs b/grovedb/src/operations/delete/worst_case.rs index 60699f59..a887a469 100644 --- a/grovedb/src/operations/delete/worst_case.rs +++ b/grovedb/src/operations/delete/worst_case.rs @@ -68,7 +68,7 @@ impl GroveDb { let mut used_path = path.0.as_slice(); let mut ops = vec![]; let path_len = path.len() as u16; - for height in (stop_path_height..(path_len as u16)).rev() { + for height in (stop_path_height..path_len).rev() { let ( path_at_level, key_at_level, diff --git a/grovedb/src/operations/insert/mod.rs b/grovedb/src/operations/insert/mod.rs index 9b83ff87..513e2098 100644 --- a/grovedb/src/operations/insert/mod.rs +++ b/grovedb/src/operations/insert/mod.rs @@ -218,7 +218,11 @@ impl GroveDb { let maybe_element_bytes = cost_return_on_error!( &mut cost, subtree_to_insert_into - .get(key, true) + .get( + key, + true, + Some(&Element::value_defined_cost_for_serialized_value) + ) .map_err(|e| Error::CorruptedData(e.to_string())) ); if let Some(element_bytes) = maybe_element_bytes { @@ -353,7 +357,11 @@ impl GroveDb { let maybe_element_bytes = cost_return_on_error!( &mut cost, subtree_to_insert_into - .get(key, true) + .get( + key, + true, + Some(&Element::value_defined_cost_for_serialized_value) + ) .map_err(|e| Error::CorruptedData(e.to_string())) ); if let Some(element_bytes) = maybe_element_bytes { @@ -867,13 +875,18 @@ mod tests { // Child Heights 2 // Total 37 + 85 + 48 = 170 + + // replaced bytes + // 133 for key1 (higher node/same merk level) + // ? + assert_eq!( cost, OperationCost { seek_count: 7, storage_cost: StorageCost { added_bytes: 170, - replaced_bytes: 209, // todo: verify + replaced_bytes: 217, removed_bytes: NoStorageRemoval }, storage_loaded_bytes: 232, @@ -942,7 +955,7 @@ mod tests { seek_count: 7, storage_cost: StorageCost { added_bytes: 170, - replaced_bytes: 211, // todo: verify + replaced_bytes: 217, // todo: verify removed_bytes: NoStorageRemoval }, storage_loaded_bytes: 237, @@ -1707,7 +1720,7 @@ mod tests { seek_count: 9, // todo: verify this storage_cost: StorageCost { added_bytes: 0, - replaced_bytes: 405, // todo: verify this + replaced_bytes: 409, // todo: verify this removed_bytes: NoStorageRemoval }, storage_loaded_bytes: 487, // todo verify this diff --git a/grovedb/src/operations/proof/generate.rs b/grovedb/src/operations/proof/generate.rs index 22bf49a9..292b8cbf 100644 --- a/grovedb/src/operations/proof/generate.rs +++ b/grovedb/src/operations/proof/generate.rs @@ -32,40 +32,31 @@ // that supports multiple implementations for verbose and non-verbose // generation -use grovedb_costs::cost_return_on_error_default; -#[cfg(feature = "full")] use grovedb_costs::{ - cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, + cost_return_on_error, cost_return_on_error_default, cost_return_on_error_no_add, CostResult, + CostsExt, OperationCost, }; -#[cfg(feature = "full")] use grovedb_merk::{ proofs::{encode_into, Node, Op}, tree::value_hash, KVIterator, Merk, ProofWithoutEncodingResult, }; use grovedb_path::SubtreePath; -#[cfg(feature = "full")] use grovedb_storage::StorageContext; -#[cfg(feature = "full")] -use crate::element::helpers::raw_decode; -#[cfg(feature = "full")] use crate::{ + element::helpers::raw_decode, operations::proof::util::{ - reduce_limit_and_offset_by, write_to_vec, ProofTokenType, EMPTY_TREE_HASH, + reduce_limit_and_offset_by, write_slice_of_slice_to_slice, write_slice_to_vec, + write_to_vec, ProofTokenType, EMPTY_TREE_HASH, }, reference_path::path_from_reference_path_type, - Element, Error, GroveDb, PathQuery, Query, -}; -use crate::{ - operations::proof::util::{write_slice_of_slice_to_slice, write_slice_to_vec}, versioning::{prepend_version_to_bytes, PROOF_VERSION}, + Element, Error, GroveDb, PathQuery, Query, }; -#[cfg(feature = "full")] type LimitOffset = (Option, Option); -#[cfg(feature = "full")] impl GroveDb { /// Prove one or more path queries. /// If we more than one path query, we merge into a single path query before diff --git a/grovedb/src/operations/proof/verify.rs b/grovedb/src/operations/proof/verify.rs index ac13306c..9e8c6e44 100644 --- a/grovedb/src/operations/proof/verify.rs +++ b/grovedb/src/operations/proof/verify.rs @@ -260,7 +260,7 @@ impl ProofVerifier { query: &PathQuery, is_verbose: bool, ) -> Result<[u8; 32], Error> { - let (proof_version, proof) = read_and_consume_proof_version(proof)?; + let (_proof_version, proof) = read_and_consume_proof_version(proof)?; let mut proof_reader = ProofReader::new_with_verbose_status(proof, is_verbose); let path_slices = query.path.iter().map(|x| x.as_slice()).collect::>(); diff --git a/grovedb/src/replication.rs b/grovedb/src/replication.rs index 86c1c3f0..898f5ff1 100644 --- a/grovedb/src/replication.rs +++ b/grovedb/src/replication.rs @@ -181,6 +181,7 @@ impl<'db> Restorer<'db> { .get_immediate_storage_context(SubtreePath::empty(), tx) .unwrap(), false, + Some(&Element::value_defined_cost_for_serialized_value), ) .unwrap() .map_err(|e| RestorerError(e.to_string()))?, diff --git a/grovedb/src/tests/mod.rs b/grovedb/src/tests/mod.rs index e078237d..451b2307 100644 --- a/grovedb/src/tests/mod.rs +++ b/grovedb/src/tests/mod.rs @@ -2365,9 +2365,13 @@ fn test_find_subtrees() { fn test_root_subtree_has_root_key() { let db = make_test_grovedb(); let storage = db.db.get_storage_context(EMPTY_PATH, None).unwrap(); - let root_merk = Merk::open_base(storage, false) - .unwrap() - .expect("expected to get root merk"); + let root_merk = Merk::open_base( + storage, + false, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .unwrap() + .expect("expected to get root merk"); let (_, root_key, _) = root_merk .root_hash_key_and_sum() .unwrap() @@ -2457,10 +2461,14 @@ fn test_get_subtree() { .db .get_storage_context([TEST_LEAF, b"key1", b"key2"].as_ref().into(), None) .unwrap(); - let subtree = - Merk::open_layered_with_root_key(subtree_storage, Some(b"key3".to_vec()), false) - .unwrap() - .expect("cannot open merk"); + let subtree = Merk::open_layered_with_root_key( + subtree_storage, + Some(b"key3".to_vec()), + false, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .unwrap() + .expect("cannot open merk"); let result_element = Element::get(&subtree, b"key3", true).unwrap().unwrap(); assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); } @@ -2497,9 +2505,14 @@ fn test_get_subtree() { &transaction, ) .unwrap(); - let subtree = Merk::open_layered_with_root_key(subtree_storage, Some(b"key4".to_vec()), false) - .unwrap() - .expect("cannot open merk"); + let subtree = Merk::open_layered_with_root_key( + subtree_storage, + Some(b"key4".to_vec()), + false, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .unwrap() + .expect("cannot open merk"); let result_element = Element::get(&subtree, b"key4", true).unwrap().unwrap(); assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); @@ -2509,9 +2522,14 @@ fn test_get_subtree() { .db .get_storage_context([TEST_LEAF, b"key1", b"key2"].as_ref().into(), None) .unwrap(); - let subtree = Merk::open_layered_with_root_key(subtree_storage, Some(b"key3".to_vec()), false) - .unwrap() - .expect("cannot open merk"); + let subtree = Merk::open_layered_with_root_key( + subtree_storage, + Some(b"key3".to_vec()), + false, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .unwrap() + .expect("cannot open merk"); let result_element = Element::get(&subtree, b"key3", true).unwrap().unwrap(); assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); } @@ -2911,7 +2929,7 @@ fn test_tree_value_exists_method_tx() { #[test] fn test_storage_wipe() { let db = make_test_grovedb(); - let path = db._tmp_dir.path(); + let _path = db._tmp_dir.path(); // Test keys in non-root tree db.insert( diff --git a/grovedb/src/tests/sum_tree_tests.rs b/grovedb/src/tests/sum_tree_tests.rs index 3bc6896e..6c4a7589 100644 --- a/grovedb/src/tests/sum_tree_tests.rs +++ b/grovedb/src/tests/sum_tree_tests.rs @@ -30,7 +30,8 @@ use grovedb_merk::{ proofs::Query, - TreeFeatureType::{BasicMerk, SummedMerk}, + tree::kv::ValueDefinedCostType, + TreeFeatureType::{BasicMerkNode, SummedMerkNode}, }; use grovedb_storage::StorageBatch; @@ -266,28 +267,44 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { .unwrap() .expect("should open tree"); assert!(matches!( - merk.get_feature_type(b"item1", true) - .unwrap() - .expect("node should exist"), - Some(SummedMerk(30)) + merk.get_feature_type( + b"item1", + true, + None::<&fn(&[u8]) -> Option> + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(30)) )); assert!(matches!( - merk.get_feature_type(b"item2", true) - .unwrap() - .expect("node should exist"), - Some(SummedMerk(10)) + merk.get_feature_type( + b"item2", + true, + None::<&fn(&[u8]) -> Option> + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(10)) )); assert!(matches!( - merk.get_feature_type(b"item3", true) - .unwrap() - .expect("node should exist"), - Some(SummedMerk(0)) + merk.get_feature_type( + b"item3", + true, + None::<&fn(&[u8]) -> Option> + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(0)) )); assert!(matches!( - merk.get_feature_type(b"item4", true) - .unwrap() - .expect("node should exist"), - Some(SummedMerk(0)) + merk.get_feature_type( + b"item4", + true, + None::<&fn(&[u8]) -> Option> + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(0)) )); assert_eq!(merk.sum().expect("expected to get sum"), Some(40)); @@ -326,16 +343,24 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { .unwrap() .expect("should open tree"); assert!(matches!( - merk.get_feature_type(b"item1", true) - .unwrap() - .expect("node should exist"), - Some(BasicMerk) + merk.get_feature_type( + b"item1", + true, + Some(&Element::value_defined_cost_for_serialized_value) + ) + .unwrap() + .expect("node should exist"), + Some(BasicMerkNode) )); assert!(matches!( - merk.get_feature_type(b"item2", true) - .unwrap() - .expect("node should exist"), - Some(BasicMerk) + merk.get_feature_type( + b"item2", + true, + Some(&Element::value_defined_cost_for_serialized_value) + ) + .unwrap() + .expect("node should exist"), + Some(BasicMerkNode) )); assert_eq!(merk.sum().expect("expected to get sum"), None); } @@ -582,10 +607,14 @@ fn test_sum_tree_propagation() { .expect("should open tree"); assert!(matches!( test_leaf_merk - .get_feature_type(b"key", true) + .get_feature_type( + b"key", + true, + Some(&Element::value_defined_cost_for_serialized_value) + ) .unwrap() .expect("node should exist"), - Some(BasicMerk) + Some(BasicMerkNode) )); let parent_sum_tree = db @@ -594,12 +623,16 @@ fn test_sum_tree_propagation() { .expect("should open tree"); assert!(matches!( parent_sum_tree - .get_feature_type(b"tree2", true) + .get_feature_type( + b"tree2", + true, + Some(&Element::value_defined_cost_for_serialized_value) + ) .unwrap() .expect("node should exist"), - Some(SummedMerk(15)) /* 15 because the child sum tree has one sum item of - * value 5 and - * another of value 10 */ + Some(SummedMerkNode(15)) /* 15 because the child sum tree has one sum item of + * value 5 and + * another of value 10 */ )); let child_sum_tree = db @@ -611,33 +644,49 @@ fn test_sum_tree_propagation() { .expect("should open tree"); assert!(matches!( child_sum_tree - .get_feature_type(b"item1", true) + .get_feature_type( + b"item1", + true, + None::<&fn(&[u8]) -> Option> + ) .unwrap() .expect("node should exist"), - Some(SummedMerk(0)) + Some(SummedMerkNode(0)) )); assert!(matches!( child_sum_tree - .get_feature_type(b"sumitem1", true) + .get_feature_type( + b"sumitem1", + true, + None::<&fn(&[u8]) -> Option> + ) .unwrap() .expect("node should exist"), - Some(SummedMerk(5)) + Some(SummedMerkNode(5)) )); assert!(matches!( child_sum_tree - .get_feature_type(b"sumitem2", true) + .get_feature_type( + b"sumitem2", + true, + None::<&fn(&[u8]) -> Option> + ) .unwrap() .expect("node should exist"), - Some(SummedMerk(10)) + Some(SummedMerkNode(10)) )); // TODO: should references take the sum of the referenced element?? assert!(matches!( child_sum_tree - .get_feature_type(b"item2", true) + .get_feature_type( + b"item2", + true, + None::<&fn(&[u8]) -> Option> + ) .unwrap() .expect("node should exist"), - Some(SummedMerk(0)) + Some(SummedMerkNode(0)) )); } @@ -673,17 +722,25 @@ fn test_sum_tree_with_batches() { assert!(matches!( sum_tree - .get_feature_type(b"a", true) + .get_feature_type( + b"a", + true, + Some(&Element::value_defined_cost_for_serialized_value) + ) .unwrap() .expect("node should exist"), - Some(SummedMerk(0)) + Some(SummedMerkNode(0)) )); assert!(matches!( sum_tree - .get_feature_type(b"b", true) + .get_feature_type( + b"b", + true, + Some(&Element::value_defined_cost_for_serialized_value) + ) .unwrap() .expect("node should exist"), - Some(SummedMerk(10)) + Some(SummedMerkNode(10)) )); // Create new batch to use existing tree @@ -703,10 +760,14 @@ fn test_sum_tree_with_batches() { .expect("should open tree"); assert!(matches!( sum_tree - .get_feature_type(b"c", true) + .get_feature_type( + b"c", + true, + None::<&fn(&[u8]) -> Option> + ) .unwrap() .expect("node should exist"), - Some(SummedMerk(10)) + Some(SummedMerkNode(10)) )); assert_eq!(sum_tree.sum().expect("expected to get sum"), Some(20)); diff --git a/grovedb/src/tests/tree_hashes_tests.rs b/grovedb/src/tests/tree_hashes_tests.rs index b67d1cf9..d2418132 100644 --- a/grovedb/src/tests/tree_hashes_tests.rs +++ b/grovedb/src/tests/tree_hashes_tests.rs @@ -28,7 +28,9 @@ //! Tree hashes tests -use grovedb_merk::tree::{combine_hash, kv_digest_to_kv_hash, node_hash, value_hash, NULL_HASH}; +use grovedb_merk::tree::{ + combine_hash, kv::ValueDefinedCostType, kv_digest_to_kv_hash, node_hash, value_hash, NULL_HASH, +}; use grovedb_storage::StorageBatch; use crate::{ @@ -58,19 +60,31 @@ fn test_node_hashes_when_inserting_item() { .expect("should open merk"); let (elem_value, elem_value_hash) = test_leaf_merk - .get_value_and_value_hash(b"key1", true) + .get_value_and_value_hash( + b"key1", + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); let elem_kv_hash = test_leaf_merk - .get_kv_hash(b"key1", true) + .get_kv_hash( + b"key1", + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); let elem_node_hash = test_leaf_merk - .get_hash(b"key1", true) + .get_hash( + b"key1", + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); @@ -110,19 +124,31 @@ fn test_tree_hashes_when_inserting_empty_tree() { .expect("should open merk"); let (elem_value, elem_value_hash) = test_leaf_merk - .get_value_and_value_hash(b"key1", true) + .get_value_and_value_hash( + b"key1", + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); let elem_kv_hash = test_leaf_merk - .get_kv_hash(b"key1", true) + .get_kv_hash( + b"key1", + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); let elem_node_hash = test_leaf_merk - .get_hash(b"key1", true) + .get_hash( + b"key1", + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); @@ -187,7 +213,11 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { // Let's first verify that the lowest nodes hashes are as we expect let (elem_value, elem_value_hash) = middle_merk_key1 - .get_value_and_value_hash(b"key2", true) + .get_value_and_value_hash( + b"key2", + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); @@ -210,7 +240,11 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { assert_eq!(elem_value_hash, combined_value_hash_key2); let elem_kv_hash = middle_merk_key1 - .get_kv_hash(b"key2", true) + .get_kv_hash( + b"key2", + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("should get kv hash") .expect("value hash should be some"); @@ -220,7 +254,11 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { assert_eq!(elem_kv_hash, kv_hash_key2); let elem_node_hash = middle_merk_key1 - .get_hash(b"key2", true) + .get_hash( + b"key2", + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("should get kv hash") .expect("value hash should be some"); @@ -238,7 +276,11 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { assert_eq!(root_hash, node_hash_key2); let (middle_elem_value_key1, middle_elem_value_hash_key1) = under_top_merk - .get_value_and_value_hash(b"key1", true) + .get_value_and_value_hash( + b"key1", + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); @@ -276,7 +318,11 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { ); let middle_elem_kv_hash_key1 = under_top_merk - .get_kv_hash(b"key1", true) + .get_kv_hash( + b"key1", + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); @@ -290,7 +336,11 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { ); let middle_elem_node_hash_key1 = under_top_merk - .get_hash(b"key1", true) + .get_hash( + b"key1", + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); diff --git a/grovedb/src/util.rs b/grovedb/src/util.rs index da478128..cbedc9af 100644 --- a/grovedb/src/util.rs +++ b/grovedb/src/util.rs @@ -198,8 +198,11 @@ macro_rules! merk_optional_tx { { let $subtree = cost_return_on_error!( &mut $cost, - ::grovedb_merk::Merk::open_base(storage.unwrap_add_cost(&mut $cost), false) - .map(|merk_res| + ::grovedb_merk::Merk::open_base( + storage.unwrap_add_cost(&mut $cost), + false, + Some(&Element::value_defined_cost_for_serialized_value) + ).map(|merk_res| merk_res .map_err(|_| crate::Error::CorruptedData( "cannot open a subtree".to_owned() @@ -226,7 +229,8 @@ macro_rules! merk_optional_tx { ::grovedb_merk::Merk::open_layered_with_root_key( storage, root_key, - is_sum_tree + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), ).map(|merk_res| merk_res .map_err(|_| crate::Error::CorruptedData( @@ -271,7 +275,8 @@ macro_rules! merk_optional_tx_path_not_empty { ::grovedb_merk::Merk::open_layered_with_root_key( storage, root_key, - is_sum_tree + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), ).map(|merk_res| merk_res .map_err(|_| crate::Error::CorruptedData( @@ -308,8 +313,11 @@ macro_rules! root_merk_optional_tx { { let $subtree = cost_return_on_error!( &mut $cost, - ::grovedb_merk::Merk::open_base(storage.unwrap_add_cost(&mut $cost), false) - .map(|merk_res| + ::grovedb_merk::Merk::open_base( + storage.unwrap_add_cost(&mut $cost), + false, + Some(&Element::value_defined_cost_for_serialized_value) + ).map(|merk_res| merk_res .map_err(|_| crate::Error::CorruptedData( "cannot open a subtree".to_owned() diff --git a/grovedb/src/versioning.rs b/grovedb/src/versioning.rs index a241b1e4..a041b3d8 100644 --- a/grovedb/src/versioning.rs +++ b/grovedb/src/versioning.rs @@ -1,8 +1,8 @@ use std::io::Cursor; -use integer_encoding::{VarInt, VarIntReader, VarIntWriter}; +use integer_encoding::{VarInt, VarIntReader}; -use crate::{Error, Error::InternalError}; +use crate::Error; pub(crate) const PROOF_VERSION: u32 = 1; @@ -37,7 +37,6 @@ pub(crate) fn prepend_version_to_bytes(mut bytes: Vec, version: u32) -> Resu #[cfg(test)] mod tests { - use integer_encoding::VarIntWriter; use crate::versioning::{ prepend_version_to_bytes, read_and_consume_proof_version, read_proof_version, @@ -45,11 +44,11 @@ mod tests { #[test] fn read_correct_version() { - let mut data = vec![1, 2, 3]; + let data = vec![1, 2, 3]; let version = 500_u32; // prepend the version information to the data vector - let mut new_data = prepend_version_to_bytes(data, version).unwrap(); + let new_data = prepend_version_to_bytes(data, version).unwrap(); assert_eq!(new_data, [244, 3, 1, 2, 3]); // show that read_version doesn't consume diff --git a/merk/Cargo.toml b/merk/Cargo.toml index 4fec012e..8cfab2db 100644 --- a/merk/Cargo.toml +++ b/merk/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "grovedb-merk" description = "Merkle key/value store adapted for GroveDB" -version = "1.0.0-rc.1" +version = "1.0.0-rc.2" authors = ["Samuel Westrich ", "Wisdom Ogwu ", "Matt Bell "] edition = "2021" license = "MIT" @@ -12,13 +12,13 @@ documentation = "https://docs.rs/grovedb-merk" [dependencies] thiserror = "1.0.37" -grovedb-storage = { version = "1.0.0-rc.1", path = "../storage", optional = true } +grovedb-storage = { version = "1.0.0-rc.2", path = "../storage", optional = true } failure = "0.1.8" integer-encoding = "3.0.4" indexmap = "1.9.2" -grovedb-costs = { version = "1.0.0-rc.1", path = "../costs" } -grovedb-visualize = { version = "1.0.0-rc.1", path = "../visualize" } -grovedb-path = { version = "1.0.0-rc.1", path = "../path" } +grovedb-costs = { version = "1.0.0-rc.2", path = "../costs" } +grovedb-visualize = { version = "1.0.0-rc.2", path = "../visualize" } +grovedb-path = { version = "1.0.0-rc.2", path = "../path" } [dependencies.time] version = "0.3.17" diff --git a/merk/benches/merk.rs b/merk/benches/merk.rs index 7846a78c..b0f9cca4 100644 --- a/merk/benches/merk.rs +++ b/merk/benches/merk.rs @@ -30,9 +30,14 @@ use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use grovedb_costs::storage_cost::removal::StorageRemovedBytes::BasicStorageRemoval; +use grovedb_merk::{ + proofs, + test_utils::{make_batch_rand, make_batch_seq, make_del_batch_rand, TempMerk}, + tree::kv::ValueDefinedCostType, + Merk, +}; use grovedb_path::SubtreePath; use grovedb_storage::{rocksdb_storage::test_utils::TempStorage, Storage}; -use merk::{proofs::encode_into as encode_proof_into, test_utils::*, Merk}; use rand::prelude::*; /// 1 million gets in 2k batches @@ -46,11 +51,12 @@ pub fn get(c: &mut Criterion) { let mut batches = vec![]; for i in 0..num_batches { let batch = make_batch_rand(batch_size, i); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8]) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -72,7 +78,9 @@ pub fn get(c: &mut Criterion) { let key_index = (i / num_batches) as usize; let key = &batches[batch_index][key_index].0; - merk.get(key, true).unwrap().expect("get failed"); + merk.get(key, true, None:: Option>) + .unwrap() + .expect("get failed"); i = (i + 1) % initial_size; }) @@ -98,11 +106,12 @@ pub fn insert_1m_2k_seq(c: &mut Criterion) { b.iter_with_large_drop(|| { let batch = &batches[i % n_batches]; - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8]) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -137,11 +146,12 @@ pub fn insert_1m_2k_rand(c: &mut Criterion) { b.iter_with_large_drop(|| { let batch = &batches[i % n_batches]; - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8]) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -169,11 +179,12 @@ pub fn update_1m_2k_seq(c: &mut Criterion) { for i in 0..n_batches { let batch = make_batch_seq(((i * batch_size) as u64)..((i + 1) * batch_size) as u64); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8]) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -193,11 +204,12 @@ pub fn update_1m_2k_seq(c: &mut Criterion) { b.iter_with_large_drop(|| { let batch = &batches[i % n_batches]; - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8]) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -225,11 +237,12 @@ pub fn update_1m_2k_rand(c: &mut Criterion) { for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8]) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -249,11 +262,12 @@ pub fn update_1m_2k_rand(c: &mut Criterion) { b.iter_with_large_drop(|| { let batch = &batches[i % n_batches]; - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8]) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -283,11 +297,12 @@ pub fn delete_1m_2k_rand(c: &mut Criterion) { for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); let delete_batch = make_del_batch_rand(batch_size as u64, i as u64); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8]) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -311,11 +326,12 @@ pub fn delete_1m_2k_rand(c: &mut Criterion) { // Merk tree is kept with 1m elements before each bench iteration for more or // less same inputs. - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( insert_batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8]) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -328,11 +344,12 @@ pub fn delete_1m_2k_rand(c: &mut Criterion) { .expect("apply failed"); b.iter_with_large_drop(|| { - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( delete_batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8]) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -361,11 +378,12 @@ pub fn prove_1m_2k_rand(c: &mut Criterion) { for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8]) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -378,7 +396,7 @@ pub fn prove_1m_2k_rand(c: &mut Criterion) { .expect("apply failed"); let mut prove_keys = Vec::with_capacity(batch_size); for (key, _) in batch.iter() { - prove_keys.push(merk::proofs::query::query_item::QueryItem::Key(key.clone())); + prove_keys.push(proofs::query::query_item::QueryItem::Key(key.clone())); } prove_keys_per_batch.push(prove_keys); batches.push(batch); @@ -409,11 +427,12 @@ pub fn build_trunk_chunk_1m_2k_rand(c: &mut Criterion) { for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8]) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -434,7 +453,7 @@ pub fn build_trunk_chunk_1m_2k_rand(c: &mut Criterion) { let (ops, _) = merk.walk(|walker| walker.unwrap().create_trunk_proof().unwrap().unwrap()); - encode_proof_into(ops.iter(), &mut bytes); + proofs::encode_into(ops.iter(), &mut bytes); }); }); } @@ -450,11 +469,12 @@ pub fn chunkproducer_rand_1m_1_rand(c: &mut Criterion) { for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8]) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -489,11 +509,12 @@ pub fn chunk_iter_1m_1(c: &mut Criterion) { for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8]) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -530,11 +551,12 @@ pub fn restore_500_1(c: &mut Criterion) { let mut merk = TempMerk::new(); let batch = make_batch_rand(merk_size as u64, 0_u64); - merk.apply_unchecked::<_, Vec, _, _, _>( + merk.apply_unchecked::<_, Vec, _, _, _, _>( &batch, &[], None, &|_k, _v| Ok(0), + None::<&fn(&[u8]) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -560,7 +582,13 @@ pub fn restore_500_1(c: &mut Criterion) { .0 .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(); - let m = Merk::open_standalone(ctx, false).unwrap().unwrap(); + let m = Merk::open_standalone( + ctx, + false, + None::<&fn(&[u8]) -> Option>, + ) + .unwrap() + .unwrap(); let mut restorer = Merk::restore(m, root_hash); for chunk in data.1 { diff --git a/merk/benches/ops.rs b/merk/benches/ops.rs index a1fa8bf2..f9576fba 100644 --- a/merk/benches/ops.rs +++ b/merk/benches/ops.rs @@ -29,7 +29,12 @@ //! Merk benches ops use criterion::{criterion_group, criterion_main, Criterion}; -use merk::{owner::Owner, test_utils::*}; +use grovedb_merk::{ + owner::Owner, + test_utils::{ + apply_memonly_unchecked, make_batch_rand, make_batch_seq, make_tree_rand, make_tree_seq, + }, +}; /// 1m sequential inserts in 10k batches, memonly fn insert_1m_10k_seq_memonly(c: &mut Criterion) { diff --git a/merk/src/estimated_costs/average_case_costs.rs b/merk/src/estimated_costs/average_case_costs.rs index 23501844..b92222ac 100644 --- a/merk/src/estimated_costs/average_case_costs.rs +++ b/merk/src/estimated_costs/average_case_costs.rs @@ -37,7 +37,7 @@ use integer_encoding::VarInt; use crate::{ error::Error, estimated_costs::LAYER_COST_SIZE, - tree::{kv::KV, Link, Tree}, + tree::{kv::KV, Link, TreeNode}, HASH_BLOCK_SIZE, HASH_BLOCK_SIZE_U32, HASH_LENGTH, HASH_LENGTH_U32, }; @@ -57,8 +57,10 @@ pub type Weight = u8; #[cfg(feature = "full")] #[derive(Clone, PartialEq, Eq, Debug)] /// Estimated number of sum trees +#[derive(Default)] pub enum EstimatedSumTrees { /// No sum trees + #[default] NoSumTrees, /// Some sum trees SomeSumTrees { @@ -72,12 +74,6 @@ pub enum EstimatedSumTrees { } #[cfg(feature = "full")] -impl Default for EstimatedSumTrees { - fn default() -> Self { - EstimatedSumTrees::NoSumTrees - } -} - #[cfg(feature = "full")] impl EstimatedSumTrees { fn estimated_size(&self) -> Result { @@ -318,7 +314,7 @@ impl EstimatedLayerCount { } #[cfg(feature = "full")] -impl Tree { +impl TreeNode { /// Return estimate of average encoded tree size pub fn average_case_encoded_tree_size( not_prefixed_key_len: u32, @@ -347,7 +343,7 @@ pub fn add_average_case_get_merk_node( // To write a node to disk, the left link, right link and kv nodes are encoded. // worst case, the node has both the left and right link present. - cost.storage_loaded_bytes += Tree::average_case_encoded_tree_size( + cost.storage_loaded_bytes += TreeNode::average_case_encoded_tree_size( not_prefixed_key_len, approximate_element_size, is_sum_tree, @@ -461,16 +457,16 @@ pub fn add_average_case_merk_propagate( estimated_sum_trees, average_flags_size, ) => { - let flags_len = average_flags_size.unwrap_or(0); - // it is normal to have LAYER_COST_SIZE here, as we add estimated sum tree // additions right after - let value_len = LAYER_COST_SIZE + flags_len; + let value_len = LAYER_COST_SIZE + + average_flags_size + .map_or(0, |flags_len| flags_len + flags_len.required_space() as u32); // in order to simplify calculations we get the estimated size and remove the // cost for the basic merk let sum_tree_addition = estimated_sum_trees.estimated_size()?; nodes_updated - * (KV::value_byte_cost_size_for_key_and_raw_value_lengths( + * (KV::layered_value_byte_cost_size_for_key_and_value_lengths( *average_key_size as u32, value_len, *is_sum_tree, @@ -520,7 +516,7 @@ pub fn add_average_case_merk_propagate( let flags_len = average_flags_size.unwrap_or(0); let value_len = LAYER_COST_SIZE + flags_len; let sum_tree_addition = estimated_sum_trees.estimated_size()?; - let cost = KV::value_byte_cost_size_for_key_and_raw_value_lengths( + let cost = KV::layered_value_byte_cost_size_for_key_and_value_lengths( *average_key_size as u32, value_len, in_sum_tree, diff --git a/merk/src/estimated_costs/worst_case_costs.rs b/merk/src/estimated_costs/worst_case_costs.rs index 42911b37..407e2c70 100644 --- a/merk/src/estimated_costs/worst_case_costs.rs +++ b/merk/src/estimated_costs/worst_case_costs.rs @@ -37,7 +37,7 @@ use grovedb_costs::{CostResult, CostsExt, OperationCost}; use crate::{ error::Error, merk::defaults::MAX_PREFIXED_KEY_SIZE, - tree::{kv::KV, Link, Tree}, + tree::{kv::KV, Link, TreeNode}, HASH_BLOCK_SIZE, HASH_BLOCK_SIZE_U32, HASH_LENGTH, }; @@ -52,7 +52,7 @@ pub enum WorstCaseLayerInformation { } #[cfg(feature = "full")] -impl Tree { +impl TreeNode { /// Return worst case size of encoded tree pub fn worst_case_encoded_tree_size( not_prefixed_key_len: u32, @@ -82,7 +82,7 @@ pub fn add_worst_case_get_merk_node( // To write a node to disk, the left link, right link and kv nodes are encoded. // worst case, the node has both the left and right link present. cost.storage_loaded_bytes += - Tree::worst_case_encoded_tree_size(not_prefixed_key_len, max_element_size, is_sum_node); + TreeNode::worst_case_encoded_tree_size(not_prefixed_key_len, max_element_size, is_sum_node); } #[cfg(feature = "full")] diff --git a/merk/src/lib.rs b/merk/src/lib.rs index b780b6f4..caf3837c 100644 --- a/merk/src/lib.rs +++ b/merk/src/lib.rs @@ -86,8 +86,9 @@ pub use tree::{CryptoHash, TreeFeatureType}; #[cfg(feature = "full")] pub use crate::merk::{ - defaults::ROOT_KEY_KEY, IsSumTree, KVIterator, Merk, MerkType, ProofConstructionResult, - ProofWithoutEncodingResult, RootHashKeyAndSum, + defaults::ROOT_KEY_KEY, + prove::{ProofConstructionResult, ProofWithoutEncodingResult}, + IsSumTree, KVIterator, Merk, MerkType, RootHashKeyAndSum, }; #[cfg(feature = "full")] pub use crate::visualize::VisualizeableMerk; diff --git a/merk/src/merk/apply.rs b/merk/src/merk/apply.rs new file mode 100644 index 00000000..a33dfcc4 --- /dev/null +++ b/merk/src/merk/apply.rs @@ -0,0 +1,321 @@ +use std::cmp::Ordering; + +use grovedb_costs::{ + storage_cost::{ + removal::{StorageRemovedBytes, StorageRemovedBytes::BasicStorageRemoval}, + StorageCost, + }, + CostResult, CostsExt, +}; +use grovedb_storage::StorageContext; + +use crate::{ + tree::{ + kv::{ValueDefinedCostType, KV}, + AuxMerkBatch, Walker, + }, + Error, Merk, MerkBatch, MerkOptions, +}; + +impl<'db, S> Merk +where + S: StorageContext<'db>, +{ + /// Applies a batch of operations (puts and deletes) to the tree. + /// + /// This will fail if the keys in `batch` are not sorted and unique. This + /// check creates some overhead, so if you are sure your batch is sorted and + /// unique you can use the unsafe `apply_unchecked` for a small performance + /// gain. + /// + /// # Example + /// ``` + /// # let mut store = grovedb_merk::test_utils::TempMerk::new(); + /// # store.apply::<_, Vec<_>>(&[(vec![4,5,6], Op::Put(vec![0], BasicMerkNode))], &[], None) + /// .unwrap().expect(""); + /// + /// use grovedb_merk::Op; + /// use grovedb_merk::TreeFeatureType::BasicMerkNode; + /// + /// let batch = &[ + /// // puts value [4,5,6] to key[1,2,3] + /// (vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerkNode)), + /// // deletes key [4,5,6] + /// (vec![4, 5, 6], Op::Delete), + /// ]; + /// store.apply::<_, Vec<_>>(batch, &[], None).unwrap().expect(""); + /// ``` + pub fn apply( + &mut self, + batch: &MerkBatch, + aux: &AuxMerkBatch, + options: Option, + ) -> CostResult<(), Error> + where + KB: AsRef<[u8]>, + KA: AsRef<[u8]>, + { + let use_sum_nodes = self.is_sum_tree; + self.apply_with_costs_just_in_time_value_update( + batch, + aux, + options, + &|key, value| { + Ok(KV::layered_value_byte_cost_size_for_key_and_value_lengths( + key.len() as u32, + value.len() as u32, + use_sum_nodes, + )) + }, + None::<&fn(&[u8]) -> Option>, + &mut |_costs, _old_value, _value| Ok((false, None)), + &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { + Ok(( + BasicStorageRemoval(key_bytes_to_remove), + BasicStorageRemoval(value_bytes_to_remove), + )) + }, + ) + } + + /// Applies a batch of operations (puts and deletes) to the tree. + /// + /// This will fail if the keys in `batch` are not sorted and unique. This + /// check creates some overhead, so if you are sure your batch is sorted and + /// unique you can use the unsafe `apply_unchecked` for a small performance + /// gain. + /// + /// # Example + /// ``` + /// # let mut store = grovedb_merk::test_utils::TempMerk::new(); + /// # store.apply::<_, Vec<_>>(&[(vec![4,5,6], Op::Put(vec![0], BasicMerkNode))], &[], None) + /// .unwrap().expect(""); + /// + /// use grovedb_merk::Op; + /// use grovedb_merk::TreeFeatureType::BasicMerkNode; + /// + /// let batch = &[ + /// // puts value [4,5,6] to key[1,2,3] + /// (vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerkNode)), + /// // deletes key [4,5,6] + /// (vec![4, 5, 6], Op::Delete), + /// ]; + /// store.apply::<_, Vec<_>>(batch, &[], None).unwrap().expect(""); + /// ``` + pub fn apply_with_specialized_costs( + &mut self, + batch: &MerkBatch, + aux: &AuxMerkBatch, + options: Option, + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + value_defined_cost_fn: Option<&impl Fn(&[u8]) -> Option>, + ) -> CostResult<(), Error> + where + KB: AsRef<[u8]>, + KA: AsRef<[u8]>, + { + self.apply_with_costs_just_in_time_value_update( + batch, + aux, + options, + old_specialized_cost, + value_defined_cost_fn, + &mut |_costs, _old_value, _value| Ok((false, None)), + &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { + Ok(( + BasicStorageRemoval(key_bytes_to_remove), + BasicStorageRemoval(value_bytes_to_remove), + )) + }, + ) + } + + /// Applies a batch of operations (puts and deletes) to the tree with the + /// ability to update values based on costs. + /// + /// This will fail if the keys in `batch` are not sorted and unique. This + /// check creates some overhead, so if you are sure your batch is sorted and + /// unique you can use the unsafe `apply_unchecked` for a small performance + /// gain. + /// + /// # Example + /// ``` + /// # let mut store = grovedb_merk::test_utils::TempMerk::new(); + /// # store.apply_with_costs_just_in_time_value_update::<_, Vec<_>>( + /// &[(vec![4,5,6], Op::Put(vec![0], BasicMerkNode))], + /// &[], + /// None, + /// &|k, v| Ok(0), + /// None::<&fn(&[u8]) -> Option>, + /// &mut |s, v, o| Ok((false, None)), + /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)) + /// ).unwrap().expect(""); + /// + /// use grovedb_costs::storage_cost::removal::StorageRemovedBytes::NoStorageRemoval; + /// use grovedb_merk::Op; + /// use grovedb_merk::tree::kv::ValueDefinedCostType; + /// use grovedb_merk::TreeFeatureType::BasicMerkNode; + /// + /// let batch = &[ + /// // puts value [4,5,6] to key[1,2,3] + /// (vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerkNode)), + /// // deletes key [4,5,6] + /// (vec![4, 5, 6], Op::Delete), + /// ]; + /// + /// store.apply_with_costs_just_in_time_value_update::<_, Vec<_>>( + /// batch, + /// &[], + /// None, + /// &|k, v| Ok(0), + /// None::<&fn(&[u8]) -> Option>, + /// &mut |s, v, o| Ok((false, None)), + /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)) + /// ).unwrap().expect(""); + /// ``` + pub fn apply_with_costs_just_in_time_value_update( + &mut self, + batch: &MerkBatch, + aux: &AuxMerkBatch, + options: Option, + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + value_defined_cost_fn: Option<&impl Fn(&[u8]) -> Option>, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> CostResult<(), Error> + where + KB: AsRef<[u8]>, + KA: AsRef<[u8]>, + { + // ensure keys in batch are sorted and unique + let mut maybe_prev_key: Option<&KB> = None; + for (key, ..) in batch.iter() { + if let Some(prev_key) = maybe_prev_key { + match prev_key.as_ref().cmp(key.as_ref()) { + Ordering::Greater => { + return Err(Error::InvalidInputError("Keys in batch must be sorted")) + .wrap_with_cost(Default::default()) + } + Ordering::Equal => { + return Err(Error::InvalidInputError("Keys in batch must be unique")) + .wrap_with_cost(Default::default()) + } + _ => (), + } + } + maybe_prev_key = Some(key); + } + + self.apply_unchecked( + batch, + aux, + options, + old_specialized_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + ) + } + + /// Applies a batch of operations (puts and deletes) to the tree. + /// + /// # Safety + /// This is unsafe because the keys in `batch` must be sorted and unique - + /// if they are not, there will be undefined behavior. For a safe version of + /// this method which checks to ensure the batch is sorted and unique, see + /// `apply`. + /// + /// # Example + /// ``` + /// # let mut store = grovedb_merk::test_utils::TempMerk::new(); + /// # store.apply_with_costs_just_in_time_value_update::<_, Vec<_>>( + /// &[(vec![4,5,6], Op::Put(vec![0], BasicMerkNode))], + /// &[], + /// None, + /// &|k, v| Ok(0), + /// None::<&fn(&[u8]) -> Option>, + /// &mut |s, o, v| Ok((false, None)), + /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)) + /// ).unwrap().expect(""); + /// + /// use grovedb_costs::storage_cost::removal::StorageRemovedBytes::NoStorageRemoval; + /// use grovedb_merk::Op; + /// use grovedb_merk::tree::kv::ValueDefinedCostType; + /// use grovedb_merk::TreeFeatureType::BasicMerkNode; + /// + /// let batch = &[ + /// // puts value [4,5,6] to key [1,2,3] + /// (vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerkNode)), + /// // deletes key [4,5,6] + /// (vec![4, 5, 6], Op::Delete), + /// ]; + /// unsafe { store.apply_unchecked::<_, Vec<_>, _, _, _, _>( /// /// /// + /// batch, + /// &[], + /// None, + /// &|k, v| Ok(0), + /// None::<&fn(&[u8]) -> Option>, + /// &mut |s, o, v| Ok((false, None)), + /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)) + /// ).unwrap().expect(""); + /// } + /// ``` + pub fn apply_unchecked( + &mut self, + batch: &MerkBatch, + aux: &AuxMerkBatch, + options: Option, + old_specialized_cost: &C, + value_defined_cost_fn: Option<&V>, + update_tree_value_based_on_costs: &mut U, + section_removal_bytes: &mut R, + ) -> CostResult<(), Error> + where + KB: AsRef<[u8]>, + KA: AsRef<[u8]>, + C: Fn(&Vec, &Vec) -> Result, + V: Fn(&[u8]) -> Option, + U: FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result<(bool, Option), Error>, + R: FnMut(&Vec, u32, u32) -> Result<(StorageRemovedBytes, StorageRemovedBytes), Error>, + { + let maybe_walker = self + .tree + .take() + .take() + .map(|tree| Walker::new(tree, self.source())); + + Walker::apply_to( + maybe_walker, + batch, + self.source(), + old_specialized_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + ) + .flat_map_ok(|(maybe_tree, key_updates)| { + // we set the new root node of the merk tree + self.tree.set(maybe_tree); + // commit changes to db + self.commit(key_updates, aux, options, old_specialized_cost) + }) + } +} diff --git a/merk/src/merk/chunks.rs b/merk/src/merk/chunks.rs index 7e8c588e..4f6564ef 100644 --- a/merk/src/merk/chunks.rs +++ b/merk/src/merk/chunks.rs @@ -214,6 +214,7 @@ mod tests { use crate::{ proofs::chunk::{verify_leaf, verify_trunk}, test_utils::*, + tree::kv::ValueDefinedCostType, }; #[test] @@ -275,6 +276,7 @@ mod tests { .get_storage_context(SubtreePath::empty(), Some(&batch)) .unwrap(), false, + None::<&fn(&[u8]) -> Option>, ) .unwrap() .unwrap(); @@ -293,6 +295,7 @@ mod tests { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None::<&fn(&[u8]) -> Option>, ) .unwrap() .unwrap(); @@ -311,6 +314,7 @@ mod tests { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None::<&fn(&[u8]) -> Option>, ) .unwrap() .unwrap(); diff --git a/merk/src/merk/clear.rs b/merk/src/merk/clear.rs new file mode 100644 index 00000000..0de28f6a --- /dev/null +++ b/merk/src/merk/clear.rs @@ -0,0 +1,32 @@ +use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; +use grovedb_storage::{Batch, RawIterator, StorageContext}; + +use crate::{Error, Error::StorageError, Merk}; + +impl<'db, S> Merk +where + S: StorageContext<'db>, +{ + /// Deletes tree data + pub fn clear(&mut self) -> CostResult<(), Error> { + let mut cost = OperationCost::default(); + + let mut iter = self.storage.raw_iter(); + iter.seek_to_first().unwrap_add_cost(&mut cost); + + let mut to_delete = self.storage.new_batch(); + while iter.valid().unwrap_add_cost(&mut cost) { + if let Some(key) = iter.key().unwrap_add_cost(&mut cost) { + // todo: deal with cost reimbursement + to_delete.delete(key, None); + } + iter.next().unwrap_add_cost(&mut cost); + } + cost_return_on_error!( + &mut cost, + self.storage.commit_batch(to_delete).map_err(StorageError) + ); + self.tree.set(None); + Ok(()).wrap_with_cost(cost) + } +} diff --git a/merk/src/merk/committer.rs b/merk/src/merk/committer.rs new file mode 100644 index 00000000..9fb02987 --- /dev/null +++ b/merk/src/merk/committer.rs @@ -0,0 +1,59 @@ +use crate::{ + merk::BatchValue, + tree::{Commit, TreeNode}, + Error, +}; + +pub struct MerkCommitter { + /// The batch has a key, maybe a value, with the value bytes, maybe the left + /// child size and maybe the right child size, then the + /// key_value_storage_cost + pub(in crate::merk) batch: Vec, + pub(in crate::merk) height: u8, + pub(in crate::merk) levels: u8, +} + +impl MerkCommitter { + pub(in crate::merk) fn new(height: u8, levels: u8) -> Self { + Self { + batch: Vec::with_capacity(10000), + height, + levels, + } + } +} + +impl Commit for MerkCommitter { + fn write( + &mut self, + tree: &mut TreeNode, + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + ) -> Result<(), Error> { + let tree_size = tree.encoding_length(); + let storage_costs = if let Some(storage_costs) = tree.known_storage_cost.take() { + storage_costs + } else { + tree.kv_with_parent_hook_size_and_storage_cost(old_specialized_cost)? + .1 + }; + + let mut buf = Vec::with_capacity(tree_size); + tree.encode_into(&mut buf); + + let left_child_sizes = tree.child_ref_and_sum_size(true); + let right_child_sizes = tree.child_ref_and_sum_size(false); + self.batch.push(( + tree.key().to_vec(), + tree.feature_type().sum_length(), + Some((buf, left_child_sizes, right_child_sizes)), + storage_costs, + )); + Ok(()) + } + + fn prune(&self, tree: &TreeNode) -> (bool, bool) { + // keep N top levels of tree + let prune = (self.height - tree.height()) >= self.levels; + (prune, prune) + } +} diff --git a/merk/src/merk/get.rs b/merk/src/merk/get.rs new file mode 100644 index 00000000..f0c25b42 --- /dev/null +++ b/merk/src/merk/get.rs @@ -0,0 +1,326 @@ +use grovedb_costs::{CostContext, CostResult, CostsExt, OperationCost}; +use grovedb_storage::StorageContext; + +use crate::{ + tree::{kv::ValueDefinedCostType, TreeNode}, + CryptoHash, Error, + Error::StorageError, + Merk, TreeFeatureType, +}; + +impl<'db, S> Merk +where + S: StorageContext<'db>, +{ + /// Gets an auxiliary value. + pub fn get_aux(&self, key: &[u8]) -> CostResult>, Error> { + self.storage.get_aux(key).map_err(StorageError) + } + + /// Returns if the value at the given key exists + /// + /// Note that this is essentially the same as a normal RocksDB `get`, so + /// should be a fast operation and has almost no tree overhead. + pub fn exists( + &self, + key: &[u8], + value_defined_cost_fn: Option Option>, + ) -> CostResult { + self.has_node_direct(key, value_defined_cost_fn) + } + + /// Returns if the value at the given key exists + /// + /// Note that this is essentially the same as a normal RocksDB `get`, so + /// should be a fast operation and has almost no tree overhead. + /// Contrary to a simple exists, this traverses the tree and can be faster + /// if the tree is cached, but slower if it is not + pub fn exists_by_traversing_tree( + &self, + key: &[u8], + value_defined_cost_fn: Option Option>, + ) -> CostResult { + self.has_node(key, value_defined_cost_fn) + } + + /// Gets a value for the given key. If the key is not found, `None` is + /// returned. + /// + /// Note that this is essentially the same as a normal RocksDB `get`, so + /// should be a fast operation and has almost no tree overhead. + pub fn get( + &self, + key: &[u8], + allow_cache: bool, + value_defined_cost_fn: Option Option>, + ) -> CostResult>, Error> { + if allow_cache { + self.get_node_fn( + key, + |node| { + node.value_as_slice() + .to_vec() + .wrap_with_cost(Default::default()) + }, + value_defined_cost_fn, + ) + } else { + self.get_node_direct_fn( + key, + |node| { + node.value_as_slice() + .to_vec() + .wrap_with_cost(Default::default()) + }, + value_defined_cost_fn, + ) + } + } + + /// Returns the feature type for the node at the given key. + pub fn get_feature_type( + &self, + key: &[u8], + allow_cache: bool, + value_defined_cost_fn: Option Option>, + ) -> CostResult, Error> { + if allow_cache { + self.get_node_fn( + key, + |node| node.feature_type().wrap_with_cost(Default::default()), + value_defined_cost_fn, + ) + } else { + self.get_node_direct_fn( + key, + |node| node.feature_type().wrap_with_cost(Default::default()), + value_defined_cost_fn, + ) + } + } + + /// Gets a hash of a node by a given key, `None` is returned in case + /// when node not found by the key. + pub fn get_hash( + &self, + key: &[u8], + allow_cache: bool, + value_defined_cost_fn: Option Option>, + ) -> CostResult, Error> { + if allow_cache { + self.get_node_fn(key, |node| node.hash(), value_defined_cost_fn) + } else { + self.get_node_direct_fn(key, |node| node.hash(), value_defined_cost_fn) + } + } + + /// Gets the value hash of a node by a given key, `None` is returned in case + /// when node not found by the key. + pub fn get_value_hash( + &self, + key: &[u8], + allow_cache: bool, + value_defined_cost_fn: Option Option>, + ) -> CostResult, Error> { + if allow_cache { + self.get_node_fn( + key, + |node| (*node.value_hash()).wrap_with_cost(OperationCost::default()), + value_defined_cost_fn, + ) + } else { + self.get_node_direct_fn( + key, + |node| (*node.value_hash()).wrap_with_cost(OperationCost::default()), + value_defined_cost_fn, + ) + } + } + + /// Gets a hash of a node by a given key, `None` is returned in case + /// when node not found by the key. + pub fn get_kv_hash( + &self, + key: &[u8], + allow_cache: bool, + value_defined_cost_fn: Option Option>, + ) -> CostResult, Error> { + if allow_cache { + self.get_node_fn( + key, + |node| (*node.inner.kv.hash()).wrap_with_cost(OperationCost::default()), + value_defined_cost_fn, + ) + } else { + self.get_node_direct_fn( + key, + |node| (*node.inner.kv.hash()).wrap_with_cost(OperationCost::default()), + value_defined_cost_fn, + ) + } + } + + /// Gets the value and value hash of a node by a given key, `None` is + /// returned in case when node not found by the key. + pub fn get_value_and_value_hash( + &self, + key: &[u8], + allow_cache: bool, + value_defined_cost_fn: Option Option>, + ) -> CostResult, CryptoHash)>, Error> { + if allow_cache { + self.get_node_fn( + key, + |node| { + (node.value_as_slice().to_vec(), *node.value_hash()) + .wrap_with_cost(OperationCost::default()) + }, + value_defined_cost_fn, + ) + } else { + self.get_node_direct_fn( + key, + |node| { + (node.value_as_slice().to_vec(), *node.value_hash()) + .wrap_with_cost(OperationCost::default()) + }, + value_defined_cost_fn, + ) + } + } + + /// See if a node's field exists + fn has_node_direct( + &self, + key: &[u8], + value_defined_cost_fn: Option Option>, + ) -> CostResult { + TreeNode::get(&self.storage, key, value_defined_cost_fn).map_ok(|x| x.is_some()) + } + + /// See if a node's field exists + fn has_node( + &self, + key: &[u8], + value_defined_cost_fn: Option Option>, + ) -> CostResult { + self.use_tree(move |maybe_tree| { + let mut cursor = match maybe_tree { + None => return Ok(false).wrap_with_cost(Default::default()), // empty tree + Some(tree) => tree, + }; + + loop { + if key == cursor.key() { + return Ok(true).wrap_with_cost(OperationCost::default()); + } + + let left = key < cursor.key(); + let link = match cursor.link(left) { + None => return Ok(false).wrap_with_cost(Default::default()), // not found + Some(link) => link, + }; + + let maybe_child = link.tree(); + match maybe_child { + None => { + // fetch from RocksDB + break self.has_node_direct(key, value_defined_cost_fn); + } + Some(child) => cursor = child, // traverse to child + } + } + }) + } + + /// Generic way to get a node's field + fn get_node_direct_fn( + &self, + key: &[u8], + f: F, + value_defined_cost_fn: Option Option>, + ) -> CostResult, Error> + where + F: FnOnce(&TreeNode) -> CostContext, + { + TreeNode::get(&self.storage, key, value_defined_cost_fn).flat_map_ok(|maybe_node| { + let mut cost = OperationCost::default(); + Ok(maybe_node.map(|node| f(&node).unwrap_add_cost(&mut cost))).wrap_with_cost(cost) + }) + } + + /// Generic way to get a node's field + fn get_node_fn( + &self, + key: &[u8], + f: F, + value_defined_cost_fn: Option Option>, + ) -> CostResult, Error> + where + F: FnOnce(&TreeNode) -> CostContext, + { + self.use_tree(move |maybe_tree| { + let mut cursor = match maybe_tree { + None => return Ok(None).wrap_with_cost(Default::default()), // empty tree + Some(tree) => tree, + }; + + loop { + if key == cursor.key() { + return f(cursor).map(|x| Ok(Some(x))); + } + + let left = key < cursor.key(); + let link = match cursor.link(left) { + None => return Ok(None).wrap_with_cost(Default::default()), // not found + Some(link) => link, + }; + + let maybe_child = link.tree(); + match maybe_child { + None => { + // fetch from RocksDB + break self.get_node_direct_fn(key, f, value_defined_cost_fn); + } + Some(child) => cursor = child, // traverse to child + } + } + }) + } +} + +#[cfg(test)] +mod test { + use crate::{ + test_utils::TempMerk, tree::kv::ValueDefinedCostType, Op, TreeFeatureType::BasicMerkNode, + }; + + #[test] + fn test_has_node_with_empty_tree() { + let mut merk = TempMerk::new(); + + let key = b"something"; + + let result = merk + .has_node(key, None::<&fn(&[u8]) -> Option>) + .unwrap() + .unwrap(); + + assert!(!result); + + let batch_entry = (key, Op::Put(vec![123; 60], BasicMerkNode)); + + let batch = vec![batch_entry]; + + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("should ..."); + + let result = merk + .has_node(key, None::<&fn(&[u8]) -> Option>) + .unwrap() + .unwrap(); + + assert!(result); + } +} diff --git a/merk/src/merk/mod.rs b/merk/src/merk/mod.rs index 37276b65..93c052a4 100644 --- a/merk/src/merk/mod.rs +++ b/merk/src/merk/mod.rs @@ -34,92 +34,46 @@ pub(crate) mod defaults; pub mod options; +pub mod apply; +pub mod clear; +pub mod committer; +pub mod get; +pub mod open; +pub mod prove; pub mod restore; +pub mod source; use std::{ cell::Cell, - cmp::Ordering, collections::{BTreeSet, LinkedList}, fmt, }; +use committer::MerkCommitter; use grovedb_costs::{ cost_return_on_error, cost_return_on_error_default, cost_return_on_error_no_add, - storage_cost::{ - key_value_cost::KeyValueStorageCost, - removal::{StorageRemovedBytes, StorageRemovedBytes::BasicStorageRemoval}, - StorageCost, - }, - ChildrenSizesWithValue, CostContext, CostResult, CostsExt, FeatureSumLength, OperationCost, + storage_cost::key_value_cost::KeyValueStorageCost, ChildrenSizesWithValue, CostContext, + CostResult, CostsExt, FeatureSumLength, OperationCost, }; use grovedb_storage::{self, Batch, RawIterator, StorageContext}; +use source::MerkSource; use crate::{ error::Error, - merk::{ - defaults::{MAX_UPDATE_VALUE_BASED_ON_COSTS_TIMES, ROOT_KEY_KEY}, - options::MerkOptions, - }, - proofs::{encode_into, query::query_item::QueryItem, Op as ProofOp, Query}, + merk::{defaults::ROOT_KEY_KEY, options::MerkOptions}, + proofs::{query::query_item::QueryItem, Query}, tree::{ - kv::{ValueDefinedCostType, KV}, - AuxMerkBatch, Commit, CryptoHash, Fetch, Link, MerkBatch, Op, RefWalker, Tree, Walker, - NULL_HASH, + kv::ValueDefinedCostType, AuxMerkBatch, CryptoHash, Op, RefWalker, TreeNode, NULL_HASH, }, Error::{CostsError, EdError, StorageError}, MerkType::{BaseMerk, LayeredMerk, StandaloneMerk}, - TreeFeatureType, }; -type Proof = (LinkedList, Option, Option); - -/// Proof construction result -pub struct ProofConstructionResult { - /// Proof - pub proof: Vec, - /// Limit - pub limit: Option, - /// Offset - pub offset: Option, -} - -impl ProofConstructionResult { - /// New ProofConstructionResult - pub fn new(proof: Vec, limit: Option, offset: Option) -> Self { - Self { - proof, - limit, - offset, - } - } -} - -/// Proof without encoding result -pub struct ProofWithoutEncodingResult { - /// Proof - pub proof: LinkedList, - /// Limit - pub limit: Option, - /// Offset - pub offset: Option, -} - -impl ProofWithoutEncodingResult { - /// New ProofWithoutEncodingResult - pub fn new(proof: LinkedList, limit: Option, offset: Option) -> Self { - Self { - proof, - limit, - offset, - } - } -} - /// Key update types pub struct KeyUpdates { pub new_keys: BTreeSet>, pub updated_keys: BTreeSet>, - pub deleted_keys: LinkedList<(Vec, Option)>, + pub deleted_keys: LinkedList<(Vec, KeyValueStorageCost)>, pub updated_root_key_from: Option>, } @@ -128,7 +82,7 @@ impl KeyUpdates { pub fn new( new_keys: BTreeSet>, updated_keys: BTreeSet>, - deleted_keys: LinkedList<(Vec, Option)>, + deleted_keys: LinkedList<(Vec, KeyValueStorageCost)>, updated_root_key_from: Option>, ) -> Self { Self { @@ -145,7 +99,7 @@ pub type BatchValue = ( Vec, Option, ChildrenSizesWithValue, - Option, + KeyValueStorageCost, ); /// A bool type @@ -272,7 +226,7 @@ impl MerkType { /// A handle to a Merkle key/value store backed by RocksDB. pub struct Merk { - pub(crate) tree: Cell>, + pub(crate) tree: Cell>, pub(crate) root_tree_key: Cell>>, /// Storage pub storage: S, @@ -294,7 +248,7 @@ pub type UseTreeMutResult = CostResult< Vec, Option, ChildrenSizesWithValue, - Option, + KeyValueStorageCost, )>, Error, >; @@ -303,291 +257,6 @@ impl<'db, S> Merk where S: StorageContext<'db>, { - /// Open empty tree - pub fn open_empty(storage: S, merk_type: MerkType, is_sum_tree: bool) -> Self { - Self { - tree: Cell::new(None), - root_tree_key: Cell::new(None), - storage, - merk_type, - is_sum_tree, - } - } - - /// Open standalone tree - pub fn open_standalone(storage: S, is_sum_tree: bool) -> CostResult { - let mut merk = Self { - tree: Cell::new(None), - root_tree_key: Cell::new(None), - storage, - merk_type: StandaloneMerk, - is_sum_tree, - }; - - merk.load_base_root().map_ok(|_| merk) - } - - /// Open base tree - pub fn open_base(storage: S, is_sum_tree: bool) -> CostResult { - let mut merk = Self { - tree: Cell::new(None), - root_tree_key: Cell::new(None), - storage, - merk_type: BaseMerk, - is_sum_tree, - }; - - merk.load_base_root().map_ok(|_| merk) - } - - /// Open layered tree with root key - pub fn open_layered_with_root_key( - storage: S, - root_key: Option>, - is_sum_tree: bool, - ) -> CostResult { - let mut merk = Self { - tree: Cell::new(None), - root_tree_key: Cell::new(root_key), - storage, - merk_type: LayeredMerk, - is_sum_tree, - }; - - merk.load_root().map_ok(|_| merk) - } - - /// Deletes tree data - pub fn clear(&mut self) -> CostResult<(), Error> { - let mut cost = OperationCost::default(); - - let mut iter = self.storage.raw_iter(); - iter.seek_to_first().unwrap_add_cost(&mut cost); - - let mut to_delete = self.storage.new_batch(); - while iter.valid().unwrap_add_cost(&mut cost) { - if let Some(key) = iter.key().unwrap_add_cost(&mut cost) { - // todo: deal with cost reimbursement - to_delete.delete(key, None); - } - iter.next().unwrap_add_cost(&mut cost); - } - cost_return_on_error!( - &mut cost, - self.storage.commit_batch(to_delete).map_err(StorageError) - ); - self.tree.set(None); - Ok(()).wrap_with_cost(cost) - } - - /// Gets an auxiliary value. - pub fn get_aux(&self, key: &[u8]) -> CostResult>, Error> { - self.storage.get_aux(key).map_err(StorageError) - } - - /// Returns if the value at the given key exists - /// - /// Note that this is essentially the same as a normal RocksDB `get`, so - /// should be a fast operation and has almost no tree overhead. - pub fn exists(&self, key: &[u8]) -> CostResult { - self.has_node_direct(key) - } - - /// Returns if the value at the given key exists - /// - /// Note that this is essentially the same as a normal RocksDB `get`, so - /// should be a fast operation and has almost no tree overhead. - /// Contrary to a simple exists, this traverses the tree and can be faster - /// if the tree is cached, but slower if it is not - pub fn exists_by_traversing_tree(&self, key: &[u8]) -> CostResult { - self.has_node(key) - } - - /// Gets a value for the given key. If the key is not found, `None` is - /// returned. - /// - /// Note that this is essentially the same as a normal RocksDB `get`, so - /// should be a fast operation and has almost no tree overhead. - pub fn get(&self, key: &[u8], allow_cache: bool) -> CostResult>, Error> { - if allow_cache { - self.get_node_fn(key, |node| { - node.value_as_slice() - .to_vec() - .wrap_with_cost(Default::default()) - }) - } else { - self.get_node_direct_fn(key, |node| { - node.value_as_slice() - .to_vec() - .wrap_with_cost(Default::default()) - }) - } - } - - /// Returns the feature type for the node at the given key. - pub fn get_feature_type( - &self, - key: &[u8], - allow_cache: bool, - ) -> CostResult, Error> { - if allow_cache { - self.get_node_fn(key, |node| { - node.feature_type().wrap_with_cost(Default::default()) - }) - } else { - self.get_node_direct_fn(key, |node| { - node.feature_type().wrap_with_cost(Default::default()) - }) - } - } - - /// Gets a hash of a node by a given key, `None` is returned in case - /// when node not found by the key. - pub fn get_hash(&self, key: &[u8], allow_cache: bool) -> CostResult, Error> { - if allow_cache { - self.get_node_fn(key, |node| node.hash()) - } else { - self.get_node_direct_fn(key, |node| node.hash()) - } - } - - /// Gets the value hash of a node by a given key, `None` is returned in case - /// when node not found by the key. - pub fn get_value_hash( - &self, - key: &[u8], - allow_cache: bool, - ) -> CostResult, Error> { - if allow_cache { - self.get_node_fn(key, |node| { - (*node.value_hash()).wrap_with_cost(OperationCost::default()) - }) - } else { - self.get_node_direct_fn(key, |node| { - (*node.value_hash()).wrap_with_cost(OperationCost::default()) - }) - } - } - - /// Gets a hash of a node by a given key, `None` is returned in case - /// when node not found by the key. - pub fn get_kv_hash( - &self, - key: &[u8], - allow_cache: bool, - ) -> CostResult, Error> { - if allow_cache { - self.get_node_fn(key, |node| { - (*node.inner.kv.hash()).wrap_with_cost(OperationCost::default()) - }) - } else { - self.get_node_direct_fn(key, |node| { - (*node.inner.kv.hash()).wrap_with_cost(OperationCost::default()) - }) - } - } - - /// Gets the value and value hash of a node by a given key, `None` is - /// returned in case when node not found by the key. - pub fn get_value_and_value_hash( - &self, - key: &[u8], - allow_cache: bool, - ) -> CostResult, CryptoHash)>, Error> { - if allow_cache { - self.get_node_fn(key, |node| { - (node.value_as_slice().to_vec(), *node.value_hash()) - .wrap_with_cost(OperationCost::default()) - }) - } else { - self.get_node_direct_fn(key, |node| { - (node.value_as_slice().to_vec(), *node.value_hash()) - .wrap_with_cost(OperationCost::default()) - }) - } - } - - /// See if a node's field exists - fn has_node_direct(&self, key: &[u8]) -> CostResult { - Tree::get(&self.storage, key).map_ok(|x| x.is_some()) - } - - /// See if a node's field exists - fn has_node(&self, key: &[u8]) -> CostResult { - self.use_tree(move |maybe_tree| { - let mut cursor = match maybe_tree { - None => return Ok(false).wrap_with_cost(Default::default()), // empty tree - Some(tree) => tree, - }; - - loop { - if key == cursor.key() { - return Ok(true).wrap_with_cost(OperationCost::default()); - } - - let left = key < cursor.key(); - let link = match cursor.link(left) { - None => return Ok(false).wrap_with_cost(Default::default()), // not found - Some(link) => link, - }; - - let maybe_child = link.tree(); - match maybe_child { - None => { - // fetch from RocksDB - break self.has_node_direct(key); - } - Some(child) => cursor = child, // traverse to child - } - } - }) - } - - /// Generic way to get a node's field - fn get_node_direct_fn(&self, key: &[u8], f: F) -> CostResult, Error> - where - F: FnOnce(&Tree) -> CostContext, - { - Tree::get(&self.storage, key).flat_map_ok(|maybe_node| { - let mut cost = OperationCost::default(); - Ok(maybe_node.map(|node| f(&node).unwrap_add_cost(&mut cost))).wrap_with_cost(cost) - }) - } - - /// Generic way to get a node's field - fn get_node_fn(&self, key: &[u8], f: F) -> CostResult, Error> - where - F: FnOnce(&Tree) -> CostContext, - { - self.use_tree(move |maybe_tree| { - let mut cursor = match maybe_tree { - None => return Ok(None).wrap_with_cost(Default::default()), // empty tree - Some(tree) => tree, - }; - - loop { - if key == cursor.key() { - return f(cursor).map(|x| Ok(Some(x))); - } - - let left = key < cursor.key(); - let link = match cursor.link(left) { - None => return Ok(None).wrap_with_cost(Default::default()), // not found - Some(link) => link, - }; - - let maybe_child = link.tree(); - match maybe_child { - None => { - // fetch from RocksDB - break self.get_node_direct_fn(key, f); - } - Some(child) => cursor = child, // traverse to child - } - } - }) - } - /// Returns the root hash of the tree (a digest for the entire store which /// proofs can be checked against). If the tree is empty, returns the null /// hash (zero-filled). @@ -625,384 +294,6 @@ where }) } - /// Applies a batch of operations (puts and deletes) to the tree. - /// - /// This will fail if the keys in `batch` are not sorted and unique. This - /// check creates some overhead, so if you are sure your batch is sorted and - /// unique you can use the unsafe `apply_unchecked` for a small performance - /// gain. - /// - /// # Example - /// ``` - /// # let mut store = grovedb_merk::test_utils::TempMerk::new(); - /// # store.apply::<_, Vec<_>>(&[(vec![4,5,6], Op::Put(vec![0], BasicMerk))], &[], None) - /// .unwrap().expect(""); - /// - /// use grovedb_merk::Op; - /// use grovedb_merk::TreeFeatureType::BasicMerk; - /// - /// let batch = &[ - /// // puts value [4,5,6] to key[1,2,3] - /// (vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerk)), - /// // deletes key [4,5,6] - /// (vec![4, 5, 6], Op::Delete), - /// ]; - /// store.apply::<_, Vec<_>>(batch, &[], None).unwrap().expect(""); - /// ``` - pub fn apply( - &mut self, - batch: &MerkBatch, - aux: &AuxMerkBatch, - options: Option, - ) -> CostResult<(), Error> - where - KB: AsRef<[u8]>, - KA: AsRef<[u8]>, - { - let use_sum_nodes = self.is_sum_tree; - self.apply_with_costs_just_in_time_value_update( - batch, - aux, - options, - &|key, value| { - Ok(KV::layered_value_byte_cost_size_for_key_and_value_lengths( - key.len() as u32, - value.len() as u32, - use_sum_nodes, - )) - }, - &mut |_costs, _old_value, _value| Ok((false, None)), - &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { - Ok(( - BasicStorageRemoval(key_bytes_to_remove), - BasicStorageRemoval(value_bytes_to_remove), - )) - }, - ) - } - - /// Applies a batch of operations (puts and deletes) to the tree. - /// - /// This will fail if the keys in `batch` are not sorted and unique. This - /// check creates some overhead, so if you are sure your batch is sorted and - /// unique you can use the unsafe `apply_unchecked` for a small performance - /// gain. - /// - /// # Example - /// ``` - /// # let mut store = grovedb_merk::test_utils::TempMerk::new(); - /// # store.apply::<_, Vec<_>>(&[(vec![4,5,6], Op::Put(vec![0], BasicMerk))], &[], None) - /// .unwrap().expect(""); - /// - /// use grovedb_merk::Op; - /// use grovedb_merk::TreeFeatureType::BasicMerk; - /// - /// let batch = &[ - /// // puts value [4,5,6] to key[1,2,3] - /// (vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerk)), - /// // deletes key [4,5,6] - /// (vec![4, 5, 6], Op::Delete), - /// ]; - /// store.apply::<_, Vec<_>>(batch, &[], None).unwrap().expect(""); - /// ``` - pub fn apply_with_specialized_costs( - &mut self, - batch: &MerkBatch, - aux: &AuxMerkBatch, - options: Option, - old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, - ) -> CostResult<(), Error> - where - KB: AsRef<[u8]>, - KA: AsRef<[u8]>, - { - self.apply_with_costs_just_in_time_value_update( - batch, - aux, - options, - old_specialized_cost, - &mut |_costs, _old_value, _value| Ok((false, None)), - &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { - Ok(( - BasicStorageRemoval(key_bytes_to_remove), - BasicStorageRemoval(value_bytes_to_remove), - )) - }, - ) - } - - /// Applies a batch of operations (puts and deletes) to the tree with the - /// ability to update values based on costs. - /// - /// This will fail if the keys in `batch` are not sorted and unique. This - /// check creates some overhead, so if you are sure your batch is sorted and - /// unique you can use the unsafe `apply_unchecked` for a small performance - /// gain. - /// - /// # Example - /// ``` - /// # let mut store = grovedb_merk::test_utils::TempMerk::new(); - /// # store.apply_with_costs_just_in_time_value_update::<_, Vec<_>>( - /// &[(vec![4,5,6], Op::Put(vec![0], BasicMerk))], - /// &[], - /// None, - /// &|k, v| Ok(0), - /// &mut |s, v, o| Ok((false, None)), - /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)) - /// ).unwrap().expect(""); - /// - /// use grovedb_costs::storage_cost::removal::StorageRemovedBytes::NoStorageRemoval; - /// use grovedb_merk::Op; - /// use grovedb_merk::TreeFeatureType::BasicMerk; - /// - /// let batch = &[ - /// // puts value [4,5,6] to key[1,2,3] - /// (vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerk)), - /// // deletes key [4,5,6] - /// (vec![4, 5, 6], Op::Delete), - /// ]; - /// - /// store.apply_with_costs_just_in_time_value_update::<_, Vec<_>>( - /// batch, - /// &[], - /// None, - /// &|k, v| Ok(0), - /// &mut |s, v, o| Ok((false, None)), - /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)) - /// ).unwrap().expect(""); - /// ``` - pub fn apply_with_costs_just_in_time_value_update( - &mut self, - batch: &MerkBatch, - aux: &AuxMerkBatch, - options: Option, - old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, - update_tree_value_based_on_costs: &mut impl FnMut( - &StorageCost, - &Vec, - &mut Vec, - ) -> Result< - (bool, Option), - Error, - >, - section_removal_bytes: &mut impl FnMut( - &Vec, - u32, - u32, - ) -> Result< - (StorageRemovedBytes, StorageRemovedBytes), - Error, - >, - ) -> CostResult<(), Error> - where - KB: AsRef<[u8]>, - KA: AsRef<[u8]>, - { - // ensure keys in batch are sorted and unique - let mut maybe_prev_key: Option<&KB> = None; - for (key, ..) in batch.iter() { - if let Some(prev_key) = maybe_prev_key { - match prev_key.as_ref().cmp(key.as_ref()) { - Ordering::Greater => { - return Err(Error::InvalidInputError("Keys in batch must be sorted")) - .wrap_with_cost(Default::default()) - } - Ordering::Equal => { - return Err(Error::InvalidInputError("Keys in batch must be unique")) - .wrap_with_cost(Default::default()) - } - _ => (), - } - } - maybe_prev_key = Some(key); - } - - self.apply_unchecked( - batch, - aux, - options, - old_specialized_cost, - update_tree_value_based_on_costs, - section_removal_bytes, - ) - } - - /// Applies a batch of operations (puts and deletes) to the tree. - /// - /// # Safety - /// This is unsafe because the keys in `batch` must be sorted and unique - - /// if they are not, there will be undefined behavior. For a safe version of - /// this method which checks to ensure the batch is sorted and unique, see - /// `apply`. - /// - /// # Example - /// ``` - /// # let mut store = grovedb_merk::test_utils::TempMerk::new(); - /// # store.apply_with_costs_just_in_time_value_update::<_, Vec<_>>( - /// &[(vec![4,5,6], Op::Put(vec![0], BasicMerk))], - /// &[], - /// None, - /// &|k, v| Ok(0), - /// &mut |s, o, v| Ok((false, None)), - /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)) - /// ).unwrap().expect(""); - /// - /// use grovedb_costs::storage_cost::removal::StorageRemovedBytes::NoStorageRemoval; - /// use grovedb_merk::Op; - /// use grovedb_merk::TreeFeatureType::BasicMerk; - /// - /// let batch = &[ - /// // puts value [4,5,6] to key [1,2,3] - /// (vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerk)), - /// // deletes key [4,5,6] - /// (vec![4, 5, 6], Op::Delete), - /// ]; - /// unsafe { store.apply_unchecked::<_, Vec<_>, _, _, _>( /// /// /// - /// batch, - /// &[], - /// None, - /// &|k, v| Ok(0), - /// &mut |s, o, v| Ok((false, None)), - /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)) - /// ).unwrap().expect(""); - /// } - /// ``` - pub fn apply_unchecked( - &mut self, - batch: &MerkBatch, - aux: &AuxMerkBatch, - options: Option, - old_specialized_cost: &C, - update_tree_value_based_on_costs: &mut U, - section_removal_bytes: &mut R, - ) -> CostResult<(), Error> - where - KB: AsRef<[u8]>, - KA: AsRef<[u8]>, - C: Fn(&Vec, &Vec) -> Result, - U: FnMut( - &StorageCost, - &Vec, - &mut Vec, - ) -> Result<(bool, Option), Error>, - R: FnMut(&Vec, u32, u32) -> Result<(StorageRemovedBytes, StorageRemovedBytes), Error>, - { - let maybe_walker = self - .tree - .take() - .take() - .map(|tree| Walker::new(tree, self.source())); - - Walker::apply_to( - maybe_walker, - batch, - self.source(), - old_specialized_cost, - section_removal_bytes, - ) - .flat_map_ok(|(maybe_tree, key_updates)| { - // we set the new root node of the merk tree - self.tree.set(maybe_tree); - // commit changes to db - self.commit( - key_updates, - aux, - options, - old_specialized_cost, - update_tree_value_based_on_costs, - section_removal_bytes, - ) - }) - } - - /// Creates a Merkle proof for the list of queried keys. For each key in the - /// query, if the key is found in the store then the value will be proven to - /// be in the tree. For each key in the query that does not exist in the - /// tree, its absence will be proven by including boundary keys. - /// - /// The proof returned is in an encoded format which can be verified with - /// `merk::verify`. - /// - /// This will fail if the keys in `query` are not sorted and unique. This - /// check adds some overhead, so if you are sure your batch is sorted and - /// unique you can use the unsafe `prove_unchecked` for a small performance - /// gain. - pub fn prove( - &self, - query: Query, - limit: Option, - offset: Option, - ) -> CostResult { - let left_to_right = query.left_to_right; - self.prove_unchecked(query, limit, offset, left_to_right) - .map_ok(|(proof, limit, offset)| { - let mut bytes = Vec::with_capacity(128); - encode_into(proof.iter(), &mut bytes); - ProofConstructionResult::new(bytes, limit, offset) - }) - } - - /// Creates a Merkle proof for the list of queried keys. For each key in the - /// query, if the key is found in the store then the value will be proven to - /// be in the tree. For each key in the query that does not exist in the - /// tree, its absence will be proven by including boundary keys. - /// - /// The proof returned is in an intermediate format to be later encoded - /// - /// This will fail if the keys in `query` are not sorted and unique. This - /// check adds some overhead, so if you are sure your batch is sorted and - /// unique you can use the unsafe `prove_unchecked` for a small performance - /// gain. - pub fn prove_without_encoding( - &self, - query: Query, - limit: Option, - offset: Option, - ) -> CostResult { - let left_to_right = query.left_to_right; - self.prove_unchecked(query, limit, offset, left_to_right) - .map_ok(|(proof, limit, offset)| ProofWithoutEncodingResult::new(proof, limit, offset)) - } - - /// Creates a Merkle proof for the list of queried keys. For each key in - /// the query, if the key is found in the store then the value will be - /// proven to be in the tree. For each key in the query that does not - /// exist in the tree, its absence will be proven by including - /// boundary keys. - /// The proof returned is in an encoded format which can be verified with - /// `merk::verify`. - /// - /// This is unsafe because the keys in `query` must be sorted and unique - - /// if they are not, there will be undefined behavior. For a safe version - /// of this method which checks to ensure the batch is sorted and - /// unique, see `prove`. - pub fn prove_unchecked( - &self, - query: I, - limit: Option, - offset: Option, - left_to_right: bool, - ) -> CostResult - where - Q: Into, - I: IntoIterator, - { - let query_vec: Vec = query.into_iter().map(Into::into).collect(); - - self.use_tree_mut(|maybe_tree| { - maybe_tree - .ok_or(Error::CorruptedCodeExecution( - "Cannot create proof for empty tree", - )) - .wrap_with_cost(Default::default()) - .flat_map_ok(|tree| { - let mut ref_walker = RefWalker::new(tree, self.source()); - ref_walker.create_proof(query_vec.as_slice(), limit, offset, left_to_right) - }) - .map_ok(|(proof, _, limit, offset, ..)| (proof, limit, offset)) - }) - } - /// Commit tree changes pub fn commit( &mut self, @@ -1010,22 +301,6 @@ where aux: &AuxMerkBatch, options: Option, old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, - update_tree_value_based_on_costs: &mut impl FnMut( - &StorageCost, - &Vec, - &mut Vec, - ) -> Result< - (bool, Option), - Error, - >, - section_removal_bytes: &mut impl FnMut( - &Vec, - u32, - u32, - ) -> Result< - (StorageRemovedBytes, StorageRemovedBytes), - Error, - >, ) -> CostResult<(), Error> where K: AsRef<[u8]>, @@ -1042,12 +317,7 @@ where let mut committer = MerkCommitter::new(tree.height(), 100); cost_return_on_error!( &mut inner_cost, - tree.commit( - &mut committer, - old_specialized_cost, - update_tree_value_based_on_costs, - section_removal_bytes - ) + tree.commit(&mut committer, old_specialized_cost) ); let tree_key = tree.key(); @@ -1109,7 +379,7 @@ where to_batch.push((key, None, None, maybe_cost)); } to_batch.sort_by(|a, b| a.0.cmp(&b.0)); - for (key, maybe_sum_tree_cost, maybe_value, maybe_cost) in to_batch { + for (key, maybe_sum_tree_cost, maybe_value, storage_cost) in to_batch { if let Some((value, left_size, right_size)) = maybe_value { cost_return_on_error_no_add!( &cost, @@ -1118,12 +388,12 @@ where &key, &value, Some((maybe_sum_tree_cost, left_size, right_size)), - maybe_cost + Some(storage_cost) ) .map_err(CostsError) ); } else { - batch.delete(&key, maybe_cost); + batch.delete(&key, Some(storage_cost)); } } @@ -1186,22 +456,15 @@ where true.wrap_with_cost(cost) } - fn source(&self) -> MerkSource { - MerkSource { - storage: &self.storage, - is_sum_tree: self.is_sum_tree, - } - } - /// Use tree - pub(crate) fn use_tree(&self, f: impl FnOnce(Option<&Tree>) -> T) -> T { + pub(crate) fn use_tree(&self, f: impl FnOnce(Option<&TreeNode>) -> T) -> T { let tree = self.tree.take(); let res = f(tree.as_ref()); self.tree.set(tree); res } - fn use_tree_mut(&self, mut f: impl FnMut(Option<&mut Tree>) -> T) -> T { + fn use_tree_mut(&self, mut f: impl FnMut(Option<&mut TreeNode>) -> T) -> T { let mut tree = self.tree.take(); let res = f(tree.as_mut()); self.tree.set(tree); @@ -1228,7 +491,10 @@ where /// Loads the Merk from the base root key /// The base root key should only be used if the Merk tree is independent /// Meaning that it doesn't have a parent Merk - pub(crate) fn load_base_root(&mut self) -> CostResult<(), Error> { + pub(crate) fn load_base_root( + &mut self, + value_defined_cost_fn: Option Option>, + ) -> CostResult<(), Error> { self.storage .get_root(ROOT_KEY_KEY) .map(|root_result| root_result.map_err(Error::StorageError)) @@ -1237,12 +503,14 @@ where if let Some(tree_root_key) = tree_root_key_opt { // Trying to build a tree out of it, costs will be accumulated because // `Tree::get` returns `CostContext` and this call happens inside `flat_map_ok`. - Tree::get(&self.storage, tree_root_key).map_ok(|tree| { - if let Some(t) = tree.as_ref() { - self.root_tree_key = Cell::new(Some(t.key().to_vec())); - } - self.tree = Cell::new(tree); - }) + TreeNode::get(&self.storage, tree_root_key, value_defined_cost_fn).map_ok( + |tree| { + if let Some(t) = tree.as_ref() { + self.root_tree_key = Cell::new(Some(t.key().to_vec())); + } + self.tree = Cell::new(tree); + }, + ) } else { Ok(()).wrap_with_cost(Default::default()) } @@ -1252,12 +520,15 @@ where /// Loads the Merk from it's parent root key /// The base root key should only be used if the Merk tree is independent /// Meaning that it doesn't have a parent Merk - pub(crate) fn load_root(&mut self) -> CostResult<(), Error> { + pub(crate) fn load_root( + &mut self, + value_defined_cost_fn: Option Option>, + ) -> CostResult<(), Error> { // In case of successful seek for root key check if it exists if let Some(tree_root_key) = self.root_tree_key.get_mut() { // Trying to build a tree out of it, costs will be accumulated because // `Tree::get` returns `CostContext` and this call happens inside `flat_map_ok`. - Tree::get(&self.storage, tree_root_key).map_ok(|tree| { + TreeNode::get(&self.storage, tree_root_key, value_defined_cost_fn).map_ok(|tree| { self.tree = Cell::new(tree); }) } else { @@ -1267,182 +538,37 @@ where } } -fn fetch_node<'db>(db: &impl StorageContext<'db>, key: &[u8]) -> Result, Error> { +fn fetch_node<'db>( + db: &impl StorageContext<'db>, + key: &[u8], + value_defined_cost_fn: Option Option>, +) -> Result, Error> { let bytes = db.get(key).unwrap().map_err(StorageError)?; // TODO: get_pinned ? if let Some(bytes) = bytes { - Ok(Some(Tree::decode(key.to_vec(), &bytes).map_err(EdError)?)) + Ok(Some( + TreeNode::decode(key.to_vec(), &bytes, value_defined_cost_fn).map_err(EdError)?, + )) } else { Ok(None) } } -// impl Clone for Merk { -// fn clone(&self) -> Self { -// let tree_clone = match self.tree.take() { -// None => None, -// Some(tree) => { -// let clone = tree.clone(); -// self.tree.set(Some(tree)); -// Some(clone) -// } -// }; -// Self { -// tree: Cell::new(tree_clone), -// storage_cost: self.storage_cost.clone(), -// } -// } -// } - // // TODO: get rid of Fetch/source and use GroveDB storage_cost abstraction -#[derive(Debug)] -pub struct MerkSource<'s, S> { - storage: &'s S, - is_sum_tree: bool, -} - -impl<'s, S> Clone for MerkSource<'s, S> { - fn clone(&self) -> Self { - MerkSource { - storage: self.storage, - is_sum_tree: self.is_sum_tree, - } - } -} - -impl<'s, 'db, S> Fetch for MerkSource<'s, S> -where - S: StorageContext<'db>, -{ - fn fetch(&self, link: &Link) -> CostResult { - Tree::get(self.storage, link.key()) - .map_ok(|x| x.ok_or(Error::KeyNotFoundError("Key not found for fetch"))) - .flatten() - } -} - -struct MerkCommitter { - /// The batch has a key, maybe a value, with the value bytes, maybe the left - /// child size and maybe the right child size, then the - /// key_value_storage_cost - batch: Vec, - height: u8, - levels: u8, -} - -impl MerkCommitter { - fn new(height: u8, levels: u8) -> Self { - Self { - batch: Vec::with_capacity(10000), - height, - levels, - } - } -} - -impl Commit for MerkCommitter { - fn write( - &mut self, - tree: &mut Tree, - old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, - update_tree_value_based_on_costs: &mut impl FnMut( - &StorageCost, - &Vec, - &mut Vec, - ) -> Result< - (bool, Option), - Error, - >, - section_removal_bytes: &mut impl FnMut( - &Vec, - u32, - u32, - ) -> Result< - (StorageRemovedBytes, StorageRemovedBytes), - Error, - >, - ) -> Result<(), Error> { - let tree_size = tree.encoding_length(); - let (mut current_tree_plus_hook_size, mut storage_costs) = - tree.kv_with_parent_hook_size_and_storage_cost(old_specialized_cost)?; - let mut i = 0; - - if let Some(old_value) = tree.old_value.clone() { - // At this point the tree value can be updated based on client requirements - // For example to store the costs - loop { - let (flags_changed, value_defined_cost) = update_tree_value_based_on_costs( - &storage_costs.value_storage_cost, - &old_value, - tree.value_mut_ref(), - )?; - if !flags_changed { - break; - } else { - tree.inner.kv.value_defined_cost = value_defined_cost; - let after_update_tree_plus_hook_size = - tree.value_encoding_length_with_parent_to_child_reference(); - if after_update_tree_plus_hook_size == current_tree_plus_hook_size { - break; - } - let new_size_and_storage_costs = - tree.kv_with_parent_hook_size_and_storage_cost(old_specialized_cost)?; - current_tree_plus_hook_size = new_size_and_storage_costs.0; - storage_costs = new_size_and_storage_costs.1; - } - if i > MAX_UPDATE_VALUE_BASED_ON_COSTS_TIMES { - return Err(Error::CyclicError( - "updated value based on costs too many times", - )); - } - i += 1; - } - - if let BasicStorageRemoval(removed_bytes) = - storage_costs.value_storage_cost.removed_bytes - { - let (_, value_removed_bytes) = section_removal_bytes(&old_value, 0, removed_bytes)?; - storage_costs.value_storage_cost.removed_bytes = value_removed_bytes; - } - } - - // Update old tree size after generating value storage_cost cost - tree.old_size_with_parent_to_child_hook = current_tree_plus_hook_size; - tree.old_value = Some(tree.value_ref().clone()); - - let mut buf = Vec::with_capacity(tree_size); - tree.encode_into(&mut buf); - - let left_child_sizes = tree.child_ref_and_sum_size(true); - let right_child_sizes = tree.child_ref_and_sum_size(false); - self.batch.push(( - tree.key().to_vec(), - tree.feature_type().sum_length(), - Some((buf, left_child_sizes, right_child_sizes)), - Some(storage_costs), - )); - Ok(()) - } - - fn prune(&self, tree: &Tree) -> (bool, bool) { - // keep N top levels of tree - let prune = (self.height - tree.height()) >= self.levels; - (prune, prune) - } -} - #[cfg(test)] mod test { - use grovedb_costs::OperationCost; use grovedb_path::SubtreePath; use grovedb_storage::{ - rocksdb_storage::{test_utils::TempStorage, PrefixedRocksDbStorageContext, RocksDbStorage}, + rocksdb_storage::{PrefixedRocksDbStorageContext, RocksDbStorage}, RawIterator, Storage, StorageBatch, StorageContext, }; use tempfile::TempDir; - use super::{Merk, MerkSource, RefWalker}; - use crate::{test_utils::*, Op, TreeFeatureType::BasicMerk}; + use super::{Merk, RefWalker}; + use crate::{ + merk::source::MerkSource, test_utils::*, tree::kv::ValueDefinedCostType, Op, + TreeFeatureType::BasicMerkNode, + }; // TODO: Close and then reopen test @@ -1453,97 +579,6 @@ mod test { }) } - #[test] - fn test_reopen_root_hash() { - let tmp_dir = TempDir::new().expect("cannot open tempdir"); - let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) - .expect("cannot open rocksdb storage"); - let test_prefix = [b"ayy"]; - - let batch = StorageBatch::new(); - let mut merk = Merk::open_base( - storage - .get_storage_context(SubtreePath::from(test_prefix.as_ref()), Some(&batch)) - .unwrap(), - false, - ) - .unwrap() - .unwrap(); - - merk.apply::<_, Vec<_>>( - &[(vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerk))], - &[], - None, - ) - .unwrap() - .expect("apply failed"); - - let root_hash = merk.root_hash(); - - storage - .commit_multi_context_batch(batch, None) - .unwrap() - .expect("cannot commit batch"); - - let merk = Merk::open_base( - storage - .get_storage_context(SubtreePath::from(test_prefix.as_ref()), None) - .unwrap(), - false, - ) - .unwrap() - .unwrap(); - assert_eq!(merk.root_hash(), root_hash); - } - - #[test] - fn test_open_fee() { - let storage = TempStorage::new(); - let batch = StorageBatch::new(); - - let merk_fee_context = Merk::open_base( - storage - .get_storage_context(SubtreePath::empty(), Some(&batch)) - .unwrap(), - false, - ); - - // Opening not existing merk should cost only root key seek (except context - // creation) - assert!(matches!( - merk_fee_context.cost(), - OperationCost { seek_count: 1, .. } - )); - - let mut merk = merk_fee_context.unwrap().unwrap(); - merk.apply::<_, Vec<_>>( - &[(vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerk))], - &[], - None, - ) - .unwrap() - .expect("apply failed"); - - storage - .commit_multi_context_batch(batch, None) - .unwrap() - .expect("cannot commit batch"); - - let merk_fee_context = Merk::open_base( - storage - .get_storage_context(SubtreePath::empty(), None) - .unwrap(), - false, - ); - - // Opening existing merk should cost two seeks. (except context creation) - assert!(matches!( - merk_fee_context.cost(), - OperationCost { seek_count: 2, .. } - )); - assert!(merk_fee_context.cost().storage_loaded_bytes > 0); - } - #[test] fn simple_insert_apply() { let batch_size = 20; @@ -1581,29 +616,6 @@ mod test { assert_invariants(&merk); } - #[test] - fn test_has_node_with_empty_tree() { - let mut merk = TempMerk::new(); - - let key = b"something"; - - let result = merk.has_node(key).unwrap().unwrap(); - - assert!(!result); - - let batch_entry = (key, Op::Put(vec![123; 60], BasicMerk)); - - let batch = vec![batch_entry]; - - merk.apply::<_, Vec<_>>(&batch, &[], None) - .unwrap() - .expect("should ..."); - - let result = merk.has_node(key).unwrap().unwrap(); - - assert!(result); - } - #[test] fn insert_two() { let tree_size = 2; @@ -1656,7 +668,7 @@ mod test { let mut merk = TempMerk::new(); merk.apply::, _>( &[], - &[(vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerk), None)], + &[(vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerkNode), None)], None, ) .unwrap() @@ -1672,27 +684,55 @@ mod test { let mut merk = TempMerk::new(); // no root - assert!(merk.get(&[1, 2, 3], true).unwrap().unwrap().is_none()); + assert!(merk + .get( + &[1, 2, 3], + true, + None::<&fn(&[u8]) -> Option> + ) + .unwrap() + .unwrap() + .is_none()); // cached - merk.apply::<_, Vec<_>>(&[(vec![5, 5, 5], Op::Put(vec![], BasicMerk))], &[], None) + merk.apply::<_, Vec<_>>( + &[(vec![5, 5, 5], Op::Put(vec![], BasicMerkNode))], + &[], + None, + ) + .unwrap() + .unwrap(); + assert!(merk + .get( + &[1, 2, 3], + true, + None::<&fn(&[u8]) -> Option> + ) .unwrap() - .unwrap(); - assert!(merk.get(&[1, 2, 3], true).unwrap().unwrap().is_none()); + .unwrap() + .is_none()); // uncached merk.apply::<_, Vec<_>>( &[ - (vec![0, 0, 0], Op::Put(vec![], BasicMerk)), - (vec![1, 1, 1], Op::Put(vec![], BasicMerk)), - (vec![2, 2, 2], Op::Put(vec![], BasicMerk)), + (vec![0, 0, 0], Op::Put(vec![], BasicMerkNode)), + (vec![1, 1, 1], Op::Put(vec![], BasicMerkNode)), + (vec![2, 2, 2], Op::Put(vec![], BasicMerkNode)), ], &[], None, ) .unwrap() .unwrap(); - assert!(merk.get(&[3, 3, 3], true).unwrap().unwrap().is_none()); + assert!(merk + .get( + &[3, 3, 3], + true, + None::<&fn(&[u8]) -> Option> + ) + .unwrap() + .unwrap() + .is_none()); } // TODO: what this test should do? @@ -1706,6 +746,7 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None::<&fn(&[u8]) -> Option>, ) .unwrap() .expect("cannot open merk"); @@ -1729,6 +770,7 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None::<&fn(&[u8]) -> Option>, ) .unwrap() .expect("cannot open merk"); @@ -1746,10 +788,18 @@ mod test { nodes: &mut Vec>, ) { nodes.push(node.tree().encode()); - if let Some(c) = node.walk(true).unwrap().unwrap() { + if let Some(c) = node + .walk(true, None::<&fn(&[u8]) -> Option>) + .unwrap() + .unwrap() + { collect(c, nodes); } - if let Some(c) = node.walk(false).unwrap().unwrap() { + if let Some(c) = node + .walk(false, None::<&fn(&[u8]) -> Option>) + .unwrap() + .unwrap() + { collect(c, nodes); } } @@ -1765,6 +815,7 @@ mod test { .get_storage_context(SubtreePath::empty(), Some(&batch)) .unwrap(), false, + None::<&fn(&[u8]) -> Option>, ) .unwrap() .expect("cannot open merk"); @@ -1782,6 +833,7 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None::<&fn(&[u8]) -> Option>, ) .unwrap() .expect("cannot open merk"); @@ -1801,6 +853,7 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None::<&fn(&[u8]) -> Option>, ) .unwrap() .expect("cannot open merk"); @@ -1818,10 +871,7 @@ mod test { #[test] fn reopen_iter() { - fn collect<'db, 'ctx>( - iter: PrefixedStorageIter<'db, 'ctx>, - nodes: &mut Vec<(Vec, Vec)>, - ) { + fn collect(iter: PrefixedStorageIter<'_, '_>, nodes: &mut Vec<(Vec, Vec)>) { while iter.valid().unwrap() { nodes.push(( iter.key().unwrap().unwrap().to_vec(), @@ -1841,6 +891,7 @@ mod test { .get_storage_context(SubtreePath::empty(), Some(&batch)) .unwrap(), false, + None::<&fn(&[u8]) -> Option>, ) .unwrap() .expect("cannot open merk"); @@ -1860,6 +911,7 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None::<&fn(&[u8]) -> Option>, ) .unwrap() .expect("cannot open merk"); @@ -1873,6 +925,7 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None::<&fn(&[u8]) -> Option>, ) .unwrap() .expect("cannot open merk"); @@ -1894,19 +947,20 @@ mod test { .get_storage_context(SubtreePath::empty(), Some(&batch)) .unwrap(), false, + None::<&fn(&[u8]) -> Option>, ) .unwrap() .expect("cannot open merk"); merk.apply::<_, Vec<_>>( - &[(b"9".to_vec(), Op::Put(b"a".to_vec(), BasicMerk))], + &[(b"9".to_vec(), Op::Put(b"a".to_vec(), BasicMerkNode))], &[], None, ) .unwrap() .expect("should insert successfully"); merk.apply::<_, Vec<_>>( - &[(b"10".to_vec(), Op::Put(b"a".to_vec(), BasicMerk))], + &[(b"10".to_vec(), Op::Put(b"a".to_vec(), BasicMerkNode))], &[], None, ) @@ -1914,21 +968,29 @@ mod test { .expect("should insert successfully"); let result = merk - .get(b"10".as_slice(), true) + .get( + b"10".as_slice(), + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("should get successfully"); assert_eq!(result, Some(b"a".to_vec())); // Update the node merk.apply::<_, Vec<_>>( - &[(b"10".to_vec(), Op::Put(b"b".to_vec(), BasicMerk))], + &[(b"10".to_vec(), Op::Put(b"b".to_vec(), BasicMerkNode))], &[], None, ) .unwrap() .expect("should insert successfully"); let result = merk - .get(b"10".as_slice(), true) + .get( + b"10".as_slice(), + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("should get successfully"); assert_eq!(result, Some(b"b".to_vec())); @@ -1943,20 +1005,25 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None::<&fn(&[u8]) -> Option>, ) .unwrap() .expect("cannot open merk"); // Update the node after dropping merk merk.apply::<_, Vec<_>>( - &[(b"10".to_vec(), Op::Put(b"c".to_vec(), BasicMerk))], + &[(b"10".to_vec(), Op::Put(b"c".to_vec(), BasicMerkNode))], &[], None, ) .unwrap() .expect("should insert successfully"); let result = merk - .get(b"10".as_slice(), true) + .get( + b"10".as_slice(), + true, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("should get successfully"); assert_eq!(result, Some(b"c".to_vec())); diff --git a/merk/src/merk/open.rs b/merk/src/merk/open.rs new file mode 100644 index 00000000..af15d596 --- /dev/null +++ b/merk/src/merk/open.rs @@ -0,0 +1,185 @@ +use std::cell::Cell; + +use grovedb_costs::CostResult; +use grovedb_storage::StorageContext; + +use crate::{ + tree::kv::ValueDefinedCostType, + Error, Merk, MerkType, + MerkType::{BaseMerk, LayeredMerk, StandaloneMerk}, +}; + +impl<'db, S> Merk +where + S: StorageContext<'db>, +{ + /// Open empty tree + pub fn open_empty(storage: S, merk_type: MerkType, is_sum_tree: bool) -> Self { + Self { + tree: Cell::new(None), + root_tree_key: Cell::new(None), + storage, + merk_type, + is_sum_tree, + } + } + + /// Open standalone tree + pub fn open_standalone( + storage: S, + is_sum_tree: bool, + value_defined_cost_fn: Option Option>, + ) -> CostResult { + let mut merk = Self { + tree: Cell::new(None), + root_tree_key: Cell::new(None), + storage, + merk_type: StandaloneMerk, + is_sum_tree, + }; + + merk.load_base_root(value_defined_cost_fn).map_ok(|_| merk) + } + + /// Open base tree + pub fn open_base( + storage: S, + is_sum_tree: bool, + value_defined_cost_fn: Option Option>, + ) -> CostResult { + let mut merk = Self { + tree: Cell::new(None), + root_tree_key: Cell::new(None), + storage, + merk_type: BaseMerk, + is_sum_tree, + }; + + merk.load_base_root(value_defined_cost_fn).map_ok(|_| merk) + } + + /// Open layered tree with root key + pub fn open_layered_with_root_key( + storage: S, + root_key: Option>, + is_sum_tree: bool, + value_defined_cost_fn: Option Option>, + ) -> CostResult { + let mut merk = Self { + tree: Cell::new(None), + root_tree_key: Cell::new(root_key), + storage, + merk_type: LayeredMerk, + is_sum_tree, + }; + + merk.load_root(value_defined_cost_fn).map_ok(|_| merk) + } +} + +#[cfg(test)] +mod test { + use grovedb_costs::OperationCost; + use grovedb_path::SubtreePath; + use grovedb_storage::{ + rocksdb_storage::{test_utils::TempStorage, RocksDbStorage}, + Storage, StorageBatch, + }; + use tempfile::TempDir; + + use crate::{tree::kv::ValueDefinedCostType, Merk, Op, TreeFeatureType::BasicMerkNode}; + + #[test] + fn test_reopen_root_hash() { + let tmp_dir = TempDir::new().expect("cannot open tempdir"); + let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) + .expect("cannot open rocksdb storage"); + let test_prefix = [b"ayy"]; + + let batch = StorageBatch::new(); + let mut merk = Merk::open_base( + storage + .get_storage_context(SubtreePath::from(test_prefix.as_ref()), Some(&batch)) + .unwrap(), + false, + None::<&fn(&[u8]) -> Option>, + ) + .unwrap() + .unwrap(); + + merk.apply::<_, Vec<_>>( + &[(vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerkNode))], + &[], + None, + ) + .unwrap() + .expect("apply failed"); + + let root_hash = merk.root_hash(); + + storage + .commit_multi_context_batch(batch, None) + .unwrap() + .expect("cannot commit batch"); + + let merk = Merk::open_base( + storage + .get_storage_context(SubtreePath::from(test_prefix.as_ref()), None) + .unwrap(), + false, + None::<&fn(&[u8]) -> Option>, + ) + .unwrap() + .unwrap(); + assert_eq!(merk.root_hash(), root_hash); + } + + #[test] + fn test_open_fee() { + let storage = TempStorage::new(); + let batch = StorageBatch::new(); + + let merk_fee_context = Merk::open_base( + storage + .get_storage_context(SubtreePath::empty(), Some(&batch)) + .unwrap(), + false, + None::<&fn(&[u8]) -> Option>, + ); + // Opening not existing merk should cost only root key seek (except context + // creation) + assert!(matches!( + merk_fee_context.cost(), + OperationCost { seek_count: 1, .. } + )); + + let mut merk = merk_fee_context.unwrap().unwrap(); + merk.apply::<_, Vec<_>>( + &[(vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerkNode))], + &[], + None, + ) + .unwrap() + .expect("apply failed"); + + storage + .commit_multi_context_batch(batch, None) + .unwrap() + .expect("cannot commit batch"); + + let merk_fee_context = Merk::open_base( + storage + .get_storage_context(SubtreePath::empty(), None) + .unwrap(), + false, + None::<&fn(&[u8]) -> Option>, + ); + + // Opening existing merk should cost two seeks. (except context creation) + assert!(matches!( + merk_fee_context.cost(), + OperationCost { seek_count: 2, .. } + )); + assert!(merk_fee_context.cost().storage_loaded_bytes > 0); + } +} diff --git a/merk/src/merk/prove.rs b/merk/src/merk/prove.rs new file mode 100644 index 00000000..7f295534 --- /dev/null +++ b/merk/src/merk/prove.rs @@ -0,0 +1,147 @@ +use std::collections::LinkedList; + +use grovedb_costs::{CostResult, CostsExt}; +use grovedb_storage::StorageContext; + +use crate::{ + proofs::{encode_into, query::QueryItem, Op as ProofOp, Query}, + tree::RefWalker, + Error, Merk, +}; + +impl<'db, S> Merk +where + S: StorageContext<'db>, +{ + /// Creates a Merkle proof for the list of queried keys. For each key in the + /// query, if the key is found in the store then the value will be proven to + /// be in the tree. For each key in the query that does not exist in the + /// tree, its absence will be proven by including boundary keys. + /// + /// The proof returned is in an encoded format which can be verified with + /// `merk::verify`. + /// + /// This will fail if the keys in `query` are not sorted and unique. This + /// check adds some overhead, so if you are sure your batch is sorted and + /// unique you can use the unsafe `prove_unchecked` for a small performance + /// gain. + pub fn prove( + &self, + query: Query, + limit: Option, + offset: Option, + ) -> CostResult { + let left_to_right = query.left_to_right; + self.prove_unchecked(query, limit, offset, left_to_right) + .map_ok(|(proof, limit, offset)| { + let mut bytes = Vec::with_capacity(128); + encode_into(proof.iter(), &mut bytes); + ProofConstructionResult::new(bytes, limit, offset) + }) + } + + /// Creates a Merkle proof for the list of queried keys. For each key in the + /// query, if the key is found in the store then the value will be proven to + /// be in the tree. For each key in the query that does not exist in the + /// tree, its absence will be proven by including boundary keys. + /// + /// The proof returned is in an intermediate format to be later encoded + /// + /// This will fail if the keys in `query` are not sorted and unique. This + /// check adds some overhead, so if you are sure your batch is sorted and + /// unique you can use the unsafe `prove_unchecked` for a small performance + /// gain. + pub fn prove_without_encoding( + &self, + query: Query, + limit: Option, + offset: Option, + ) -> CostResult { + let left_to_right = query.left_to_right; + self.prove_unchecked(query, limit, offset, left_to_right) + .map_ok(|(proof, limit, offset)| ProofWithoutEncodingResult::new(proof, limit, offset)) + } + + /// Creates a Merkle proof for the list of queried keys. For each key in + /// the query, if the key is found in the store then the value will be + /// proven to be in the tree. For each key in the query that does not + /// exist in the tree, its absence will be proven by including + /// boundary keys. + /// The proof returned is in an encoded format which can be verified with + /// `merk::verify`. + /// + /// This is unsafe because the keys in `query` must be sorted and unique - + /// if they are not, there will be undefined behavior. For a safe version + /// of this method which checks to ensure the batch is sorted and + /// unique, see `prove`. + pub fn prove_unchecked( + &self, + query: I, + limit: Option, + offset: Option, + left_to_right: bool, + ) -> CostResult + where + Q: Into, + I: IntoIterator, + { + let query_vec: Vec = query.into_iter().map(Into::into).collect(); + + self.use_tree_mut(|maybe_tree| { + maybe_tree + .ok_or(Error::CorruptedCodeExecution( + "Cannot create proof for empty tree", + )) + .wrap_with_cost(Default::default()) + .flat_map_ok(|tree| { + let mut ref_walker = RefWalker::new(tree, self.source()); + ref_walker.create_proof(query_vec.as_slice(), limit, offset, left_to_right) + }) + .map_ok(|(proof, _, limit, offset, ..)| (proof, limit, offset)) + }) + } +} + +type Proof = (LinkedList, Option, Option); + +/// Proof construction result +pub struct ProofConstructionResult { + /// Proof + pub proof: Vec, + /// Limit + pub limit: Option, + /// Offset + pub offset: Option, +} + +impl ProofConstructionResult { + /// New ProofConstructionResult + pub fn new(proof: Vec, limit: Option, offset: Option) -> Self { + Self { + proof, + limit, + offset, + } + } +} + +/// Proof without encoding result +pub struct ProofWithoutEncodingResult { + /// Proof + pub proof: LinkedList, + /// Limit + pub limit: Option, + /// Offset + pub offset: Option, +} + +impl ProofWithoutEncodingResult { + /// New ProofWithoutEncodingResult + pub fn new(proof: LinkedList, limit: Option, offset: Option) -> Self { + Self { + proof, + limit, + offset, + } + } +} diff --git a/merk/src/merk/restore.rs b/merk/src/merk/restore.rs index 23cef703..e6ac22e2 100644 --- a/merk/src/merk/restore.rs +++ b/merk/src/merk/restore.rs @@ -38,18 +38,20 @@ use grovedb_storage::{Batch, StorageContext}; #[cfg(feature = "full")] use super::Merk; #[cfg(feature = "full")] +use crate::merk::source::MerkSource; +use crate::tree::kv::ValueDefinedCostType; +#[cfg(feature = "full")] use crate::{ error::Error, - merk::MerkSource, proofs::{ chunk::{verify_leaf, verify_trunk, MIN_TRUNK_HEIGHT}, tree::{Child, Tree as ProofTree}, Node, Op, }, - tree::{combine_hash, value_hash, Link, RefWalker, Tree}, + tree::{combine_hash, value_hash, Link, RefWalker, TreeNode}, CryptoHash, Error::{CostsError, EdError, StorageError}, - TreeFeatureType::BasicMerk, + TreeFeatureType::BasicMerkNode, }; #[cfg(feature = "full")] @@ -115,7 +117,9 @@ impl<'db, S: StorageContext<'db>> Restorer { self.rewrite_trunk_child_heights()?; } - self.merk.load_base_root().unwrap()?; + self.merk + .load_base_root(None:: Option>) + .unwrap()?; Ok(self.merk) } @@ -135,16 +139,21 @@ impl<'db, S: StorageContext<'db>> Restorer { tree.visit_refs(&mut |proof_node| { if let Some((mut node, key)) = match &proof_node.node { Node::KV(key, value) => Some(( - Tree::new(key.clone(), value.clone(), None, BasicMerk).unwrap(), + TreeNode::new(key.clone(), value.clone(), None, BasicMerkNode).unwrap(), key, )), Node::KVValueHash(key, value, value_hash) => Some(( - Tree::new_with_value_hash(key.clone(), value.clone(), *value_hash, BasicMerk) - .unwrap(), + TreeNode::new_with_value_hash( + key.clone(), + value.clone(), + *value_hash, + BasicMerkNode, + ) + .unwrap(), key, )), Node::KVValueHashFeatureType(key, value, value_hash, feature_type) => Some(( - Tree::new_with_value_hash( + TreeNode::new_with_value_hash( key.clone(), value.clone(), *value_hash, @@ -264,8 +273,12 @@ impl<'db, S: StorageContext<'db>> Restorer { fn rewrite_parent_link(&mut self, leaf: &ProofTree) -> Result<(), Error> { let parent_keys = self.parent_keys.as_mut().unwrap(); let parent_key = parent_keys.peek().unwrap().clone(); - let mut parent = crate::merk::fetch_node(&self.merk.storage, parent_key.as_slice())? - .expect("Could not find parent of leaf chunk"); + let mut parent = crate::merk::fetch_node( + &self.merk.storage, + parent_key.as_slice(), + None:: Option>, + )? + .expect("Could not find parent of leaf chunk"); let is_left_child = self.remaining_chunks_unchecked() % 2 == 0; if let Some(Link::Reference { ref mut key, .. }) = parent.link_mut(is_left_child) { @@ -299,16 +312,25 @@ impl<'db, S: StorageContext<'db>> Restorer { return Ok(node.tree().child_heights()); } - let mut cloned_node = - Tree::decode(node.tree().key().to_vec(), node.tree().encode().as_slice()) - .map_err(EdError)?; + let mut cloned_node = TreeNode::decode( + node.tree().key().to_vec(), + node.tree().encode().as_slice(), + None:: Option>, + ) + .map_err(EdError)?; - let left_child = node.walk(true).unwrap()?.unwrap(); + let left_child = node + .walk(true, None::<&fn(&[u8]) -> Option>) + .unwrap()? + .unwrap(); let left_child_heights = recurse(left_child, remaining_depth - 1, batch)?; let left_height = left_child_heights.0.max(left_child_heights.1) + 1; *cloned_node.link_mut(true).unwrap().child_heights_mut() = left_child_heights; - let right_child = node.walk(false).unwrap()?.unwrap(); + let right_child = node + .walk(false, None::<&fn(&[u8]) -> Option>) + .unwrap()? + .unwrap(); let right_child_heights = recurse(right_child, remaining_depth - 1, batch)?; let right_height = right_child_heights.0.max(right_child_heights.1) + 1; *cloned_node.link_mut(false).unwrap().child_heights_mut() = right_child_heights; @@ -321,7 +343,9 @@ impl<'db, S: StorageContext<'db>> Restorer { Ok((left_height, right_height)) } - self.merk.load_base_root().unwrap()?; + self.merk + .load_base_root(None:: Option>) + .unwrap()?; let mut batch = self.merk.storage.new_batch(); @@ -409,6 +433,7 @@ mod tests { .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), false, + None::<&fn(&[u8]) -> Option>, ) .unwrap() .unwrap(); @@ -426,7 +451,13 @@ mod tests { let ctx = storage .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(); - let merk = Merk::open_base(ctx, false).unwrap().unwrap(); + let merk = Merk::open_base( + ctx, + false, + None::<&fn(&[u8]) -> Option>, + ) + .unwrap() + .unwrap(); let mut restorer = Merk::restore(merk, original.root_hash().unwrap()); assert_eq!(restorer.remaining_chunks(), None); @@ -460,8 +491,8 @@ mod tests { fn restore_2_left_heavy() { restore_test( &[ - &[(vec![0], Op::Put(vec![], BasicMerk))], - &[(vec![1], Op::Put(vec![], BasicMerk))], + &[(vec![0], Op::Put(vec![], BasicMerkNode))], + &[(vec![1], Op::Put(vec![], BasicMerkNode))], ], 2, ); @@ -471,8 +502,8 @@ mod tests { fn restore_2_right_heavy() { restore_test( &[ - &[(vec![1], Op::Put(vec![], BasicMerk))], - &[(vec![0], Op::Put(vec![], BasicMerk))], + &[(vec![1], Op::Put(vec![], BasicMerkNode))], + &[(vec![0], Op::Put(vec![], BasicMerkNode))], ], 2, ); diff --git a/merk/src/merk/source.rs b/merk/src/merk/source.rs new file mode 100644 index 00000000..46782bdc --- /dev/null +++ b/merk/src/merk/source.rs @@ -0,0 +1,49 @@ +use grovedb_costs::CostResult; +use grovedb_storage::StorageContext; + +use crate::{ + tree::{kv::ValueDefinedCostType, Fetch, TreeNode}, + Error, Link, Merk, +}; + +impl<'db, S> Merk +where + S: StorageContext<'db>, +{ + pub(in crate::merk) fn source(&self) -> MerkSource { + MerkSource { + storage: &self.storage, + is_sum_tree: self.is_sum_tree, + } + } +} + +#[derive(Debug)] +pub struct MerkSource<'s, S> { + storage: &'s S, + is_sum_tree: bool, +} + +impl<'s, S> Clone for MerkSource<'s, S> { + fn clone(&self) -> Self { + MerkSource { + storage: self.storage, + is_sum_tree: self.is_sum_tree, + } + } +} + +impl<'s, 'db, S> Fetch for MerkSource<'s, S> +where + S: StorageContext<'db>, +{ + fn fetch( + &self, + link: &Link, + value_defined_cost_fn: Option<&impl Fn(&[u8]) -> Option>, + ) -> CostResult { + TreeNode::get(self.storage, link.key(), value_defined_cost_fn) + .map_ok(|x| x.ok_or(Error::KeyNotFoundError("Key not found for fetch"))) + .flatten() + } +} diff --git a/merk/src/owner.rs b/merk/src/owner.rs index d84917b7..18efb8f2 100644 --- a/merk/src/owner.rs +++ b/merk/src/owner.rs @@ -91,6 +91,27 @@ impl Owner { return_value } + /// Takes temporary ownership of the contained value by passing it to `f`. + /// The function must return a result of the same type (the same value, or a + /// new value to take its place). + /// + /// Like `own`, but uses a tuple return type which allows specifying a value + /// to return from the call to `own_result` for convenience. + pub fn own_result(&mut self, f: F) -> Result<(), E> + where + F: FnOnce(T) -> Result, + { + let old_value = unwrap(self.inner.take()); + let new_value_result = f(old_value); + match new_value_result { + Ok(new_value) => { + self.inner = Some(new_value); + Ok(()) + } + Err(e) => Err(e), + } + } + /// Sheds the `Owner` container and returns the value it contained. pub fn into_inner(mut self) -> T { unwrap(self.inner.take()) diff --git a/merk/src/proofs/chunk.rs b/merk/src/proofs/chunk.rs index 48afe8f3..1e3b9fb1 100644 --- a/merk/src/proofs/chunk.rs +++ b/merk/src/proofs/chunk.rs @@ -38,17 +38,18 @@ use grovedb_storage::RawIterator; use { super::tree::{execute, Tree as ProofTree}, crate::tree::CryptoHash, - crate::tree::Tree, + crate::tree::TreeNode, }; #[cfg(feature = "full")] use super::{Node, Op}; +use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] use crate::{ error::Error, tree::{Fetch, RefWalker}, Error::EdError, - TreeFeatureType::BasicMerk, + TreeFeatureType::BasicMerkNode, }; /// The minimum number of layers the trunk will be guaranteed to have before @@ -95,7 +96,10 @@ where depth: usize, ) -> CostResult { let mut cost = OperationCost::default(); - let maybe_left = match self.walk(true).unwrap_add_cost(&mut cost) { + let maybe_left = match self + .walk(true, None::<&fn(&[u8]) -> Option>) + .unwrap_add_cost(&mut cost) + { Ok(maybe_left) => maybe_left, Err(e) => { return Err(e).wrap_with_cost(cost); @@ -159,7 +163,11 @@ where // traverse left let has_left_child = self.tree().link(true).is_some(); if has_left_child { - let mut left = cost_return_on_error!(&mut cost, self.walk(true)).unwrap(); + let mut left = cost_return_on_error!( + &mut cost, + self.walk(true, None::<&fn(&[u8]) -> Option>) + ) + .unwrap(); cost_return_on_error!( &mut cost, left.traverse_for_trunk(proof, remaining_depth - 1, is_leftmost) @@ -174,7 +182,10 @@ where } // traverse right - if let Some(mut right) = cost_return_on_error!(&mut cost, self.walk(false)) { + if let Some(mut right) = cost_return_on_error!( + &mut cost, + self.walk(false, None::<&fn(&[u8]) -> Option>) + ) { cost_return_on_error!( &mut cost, right.traverse_for_trunk(proof, remaining_depth - 1, false) @@ -199,7 +210,7 @@ pub(crate) fn get_next_chunk( let mut chunk = Vec::with_capacity(512); let mut stack = Vec::with_capacity(32); - let mut node = Tree::new(vec![], vec![], None, BasicMerk).unwrap_add_cost(&mut cost); + let mut node = TreeNode::new(vec![], vec![], None, BasicMerkNode).unwrap_add_cost(&mut cost); while iter.valid().unwrap_add_cost(&mut cost) { let key = iter.key().unwrap_add_cost(&mut cost).unwrap(); @@ -213,7 +224,13 @@ pub(crate) fn get_next_chunk( let encoded_node = iter.value().unwrap_add_cost(&mut cost).unwrap(); cost_return_on_error_no_add!( &cost, - Tree::decode_into(&mut node, vec![], encoded_node).map_err(EdError) + TreeNode::decode_into( + &mut node, + vec![], + encoded_node, + None:: Option> + ) + .map_err(EdError) ); // TODO: Only use the KVValueHash if needed, saves 32 bytes @@ -380,13 +397,12 @@ pub(crate) fn verify_trunk>>( mod tests { use std::usize; - use grovedb_costs::storage_cost::removal::StorageRemovedBytes::NoStorageRemoval; use grovedb_storage::StorageContext; use super::{super::tree::Tree, *}; use crate::{ test_utils::*, - tree::{NoopCommit, PanicSource, Tree as BaseTree}, + tree::{NoopCommit, PanicSource, TreeNode as BaseTree}, }; #[derive(Default)] @@ -459,15 +475,10 @@ mod tests { #[test] fn one_node_tree_trunk_roundtrip() { - let mut tree = BaseTree::new(vec![0], vec![], None, BasicMerk).unwrap(); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .unwrap(); + let mut tree = BaseTree::new(vec![0], vec![], None, BasicMerkNode).unwrap(); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .unwrap(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let (proof, has_more) = walker.create_trunk_proof().unwrap().unwrap(); @@ -485,20 +496,15 @@ mod tests { // 0 // \ // 1 - let mut tree = BaseTree::new(vec![0], vec![], None, BasicMerk) + let mut tree = BaseTree::new(vec![0], vec![], None, BasicMerkNode) .unwrap() .attach( false, - Some(BaseTree::new(vec![1], vec![], None, BasicMerk).unwrap()), + Some(BaseTree::new(vec![1], vec![], None, BasicMerkNode).unwrap()), ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .unwrap(); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .unwrap(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let (proof, has_more) = walker.create_trunk_proof().unwrap().unwrap(); assert!(!has_more); @@ -515,20 +521,15 @@ mod tests { // 1 // / // 0 - let mut tree = BaseTree::new(vec![1], vec![], None, BasicMerk) + let mut tree = BaseTree::new(vec![1], vec![], None, BasicMerkNode) .unwrap() .attach( true, - Some(BaseTree::new(vec![0], vec![], None, BasicMerk).unwrap()), + Some(BaseTree::new(vec![0], vec![], None, BasicMerkNode).unwrap()), ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .unwrap(); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .unwrap(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let (proof, has_more) = walker.create_trunk_proof().unwrap().unwrap(); assert!(!has_more); @@ -545,24 +546,19 @@ mod tests { // 1 // / \ // 0 2 - let mut tree = BaseTree::new(vec![1], vec![], None, BasicMerk) + let mut tree = BaseTree::new(vec![1], vec![], None, BasicMerkNode) .unwrap() .attach( true, - Some(BaseTree::new(vec![0], vec![], None, BasicMerk).unwrap()), + Some(BaseTree::new(vec![0], vec![], None, BasicMerkNode).unwrap()), ) .attach( false, - Some(BaseTree::new(vec![2], vec![], None, BasicMerk).unwrap()), + Some(BaseTree::new(vec![2], vec![], None, BasicMerkNode).unwrap()), ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .unwrap(); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .unwrap(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let (proof, has_more) = walker.create_trunk_proof().unwrap().unwrap(); diff --git a/merk/src/proofs/encoding.rs b/merk/src/proofs/encoding.rs index b0e82833..6b5f95b0 100644 --- a/merk/src/proofs/encoding.rs +++ b/merk/src/proofs/encoding.rs @@ -465,7 +465,7 @@ mod test { use super::super::{Node, Op}; use crate::{ tree::HASH_LENGTH, - TreeFeatureType::{BasicMerk, SummedMerk}, + TreeFeatureType::{BasicMerkNode, SummedMerkNode}, }; #[test] @@ -567,7 +567,7 @@ mod test { vec![1, 2, 3], vec![4, 5, 6], [0; 32], - BasicMerk, + BasicMerkNode, )); assert_eq!(op.encoding_length(), 43); @@ -585,7 +585,7 @@ mod test { vec![1, 2, 3], vec![4, 5, 6], [0; 32], - SummedMerk(6), + SummedMerkNode(6), )); assert_eq!(op.encoding_length(), 44); @@ -683,7 +683,7 @@ mod test { vec![1, 2, 3], vec![4, 5, 6], [0; 32], - BasicMerk, + BasicMerkNode, )); assert_eq!(op.encoding_length(), 43); @@ -701,7 +701,7 @@ mod test { vec![1, 2, 3], vec![4, 5, 6], [0; 32], - SummedMerk(5), + SummedMerkNode(5), )); assert_eq!(op.encoding_length(), 44); @@ -860,7 +860,7 @@ mod test { vec![1, 2, 3], vec![4, 5, 6], [0; 32], - BasicMerk + BasicMerkNode )) ); @@ -875,7 +875,7 @@ mod test { vec![1, 2, 3], vec![4, 5, 6], [0; 32], - SummedMerk(6) + SummedMerkNode(6) )) ); } @@ -960,7 +960,7 @@ mod test { vec![1, 2, 3], vec![4, 5, 6], [0; 32], - BasicMerk + BasicMerkNode )) ); @@ -975,7 +975,7 @@ mod test { vec![1, 2, 3], vec![4, 5, 6], [0; 32], - SummedMerk(6) + SummedMerkNode(6) )) ); } diff --git a/merk/src/proofs/query/mod.rs b/merk/src/proofs/query/mod.rs index d63d2d47..4ecc808b 100644 --- a/merk/src/proofs/query/mod.rs +++ b/merk/src/proofs/query/mod.rs @@ -68,6 +68,8 @@ use super::Node; #[cfg(any(feature = "full", feature = "verify"))] use crate::error::Error; #[cfg(feature = "full")] +use crate::tree::kv::ValueDefinedCostType; +#[cfg(feature = "full")] use crate::tree::{Fetch, Link, RefWalker}; #[cfg(any(feature = "full", feature = "verify"))] @@ -752,14 +754,15 @@ where left_to_right: bool, ) -> CostResult { if !query.is_empty() { - self.walk(left).flat_map_ok(|child_opt| { - if let Some(mut child) = child_opt { - child.create_proof(query, limit, offset, left_to_right) - } else { - Ok((LinkedList::new(), (true, true), limit, offset)) - .wrap_with_cost(Default::default()) - } - }) + self.walk(left, None::<&fn(&[u8]) -> Option>) + .flat_map_ok(|child_opt| { + if let Some(mut child) = child_opt { + child.create_proof(query, limit, offset, left_to_right) + } else { + Ok((LinkedList::new(), (true, true), limit, offset)) + .wrap_with_cost(Default::default()) + } + }) } else if let Some(link) = self.tree().link(left) { let mut proof = LinkedList::new(); proof.push_back(if left_to_right { @@ -779,7 +782,6 @@ where #[allow(deprecated)] #[cfg(test)] mod test { - use grovedb_costs::storage_cost::removal::StorageRemovedBytes::NoStorageRemoval; use super::{ super::{encoding::encode_into, *}, @@ -792,8 +794,8 @@ mod test { verify::{verify_query, ProvedKeyValue}, }, test_utils::make_tree_seq, - tree::{NoopCommit, PanicSource, RefWalker, Tree}, - TreeFeatureType::BasicMerk, + tree::{NoopCommit, PanicSource, RefWalker, TreeNode}, + TreeFeatureType::BasicMerkNode, }; fn compare_result_tuples( @@ -807,70 +809,50 @@ mod test { } } - fn make_3_node_tree() -> Tree { - let mut tree = Tree::new(vec![5], vec![5], None, BasicMerk) + fn make_3_node_tree() -> TreeNode { + let mut tree = TreeNode::new(vec![5], vec![5], None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(vec![3], vec![3], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![3], vec![3], None, BasicMerkNode).unwrap()), ) .attach( false, - Some(Tree::new(vec![7], vec![7], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![7], vec![7], None, BasicMerkNode).unwrap()), ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .expect("commit failed"); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .expect("commit failed"); tree } - fn make_6_node_tree() -> Tree { - let two_tree = Tree::new(vec![2], vec![2], None, BasicMerk).unwrap(); - let four_tree = Tree::new(vec![4], vec![4], None, BasicMerk).unwrap(); - let mut three_tree = Tree::new(vec![3], vec![3], None, BasicMerk) + fn make_6_node_tree() -> TreeNode { + let two_tree = TreeNode::new(vec![2], vec![2], None, BasicMerkNode).unwrap(); + let four_tree = TreeNode::new(vec![4], vec![4], None, BasicMerkNode).unwrap(); + let mut three_tree = TreeNode::new(vec![3], vec![3], None, BasicMerkNode) .unwrap() .attach(true, Some(two_tree)) .attach(false, Some(four_tree)); three_tree - .commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) + .commit(&mut NoopCommit {}, &|_, _| Ok(0)) .unwrap() .expect("commit failed"); - let seven_tree = Tree::new(vec![7], vec![7], None, BasicMerk).unwrap(); - let mut eight_tree = Tree::new(vec![8], vec![8], None, BasicMerk) + let seven_tree = TreeNode::new(vec![7], vec![7], None, BasicMerkNode).unwrap(); + let mut eight_tree = TreeNode::new(vec![8], vec![8], None, BasicMerkNode) .unwrap() .attach(true, Some(seven_tree)); eight_tree - .commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) + .commit(&mut NoopCommit {}, &|_, _| Ok(0)) .unwrap() .expect("commit failed"); - let mut root_tree = Tree::new(vec![5], vec![5], None, BasicMerk) + let mut root_tree = TreeNode::new(vec![5], vec![5], None, BasicMerkNode) .unwrap() .attach(true, Some(three_tree)) .attach(false, Some(eight_tree)); root_tree - .commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) + .commit(&mut NoopCommit {}, &|_, _| Ok(0)) .unwrap() .expect("commit failed"); @@ -1553,25 +1535,28 @@ mod test { #[test] fn doc_proof() { - let mut tree = Tree::new(vec![5], vec![5], None, BasicMerk) + let mut tree = TreeNode::new(vec![5], vec![5], None, BasicMerkNode) .unwrap() .attach( true, Some( - Tree::new(vec![2], vec![2], None, BasicMerk) + TreeNode::new(vec![2], vec![2], None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(vec![1], vec![1], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![1], vec![1], None, BasicMerkNode).unwrap()), ) .attach( false, Some( - Tree::new(vec![4], vec![4], None, BasicMerk) + TreeNode::new(vec![4], vec![4], None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(vec![3], vec![3], None, BasicMerk).unwrap()), + Some( + TreeNode::new(vec![3], vec![3], None, BasicMerkNode) + .unwrap(), + ), ), ), ), @@ -1580,46 +1565,48 @@ mod test { .attach( false, Some( - Tree::new(vec![9], vec![9], None, BasicMerk) + TreeNode::new(vec![9], vec![9], None, BasicMerkNode) .unwrap() .attach( true, Some( - Tree::new(vec![7], vec![7], None, BasicMerk) + TreeNode::new(vec![7], vec![7], None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(vec![6], vec![6], None, BasicMerk).unwrap()), + Some( + TreeNode::new(vec![6], vec![6], None, BasicMerkNode) + .unwrap(), + ), ) .attach( false, - Some(Tree::new(vec![8], vec![8], None, BasicMerk).unwrap()), + Some( + TreeNode::new(vec![8], vec![8], None, BasicMerkNode) + .unwrap(), + ), ), ), ) .attach( false, Some( - Tree::new(vec![11], vec![11], None, BasicMerk) + TreeNode::new(vec![11], vec![11], None, BasicMerkNode) .unwrap() .attach( true, Some( - Tree::new(vec![10], vec![10], None, BasicMerk).unwrap(), + TreeNode::new(vec![10], vec![10], None, BasicMerkNode) + .unwrap(), ), ), ), ), ), ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .unwrap(); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .unwrap(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); @@ -5755,15 +5742,10 @@ mod test { #[test] fn verify_ops() { - let mut tree = Tree::new(vec![5], vec![5], None, BasicMerk).unwrap(); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .expect("commit failed"); + let mut tree = TreeNode::new(vec![5], vec![5], None, BasicMerkNode).unwrap(); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .expect("commit failed"); let root_hash = tree.hash().unwrap(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); @@ -5786,15 +5768,10 @@ mod test { #[test] #[should_panic(expected = "verify failed")] fn verify_ops_mismatched_hash() { - let mut tree = Tree::new(vec![5], vec![5], None, BasicMerk).unwrap(); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .expect("commit failed"); + let mut tree = TreeNode::new(vec![5], vec![5], None, BasicMerkNode).unwrap(); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .expect("commit failed"); let mut walker = RefWalker::new(&mut tree, PanicSource {}); diff --git a/merk/src/test_utils/mod.rs b/merk/src/test_utils/mod.rs index b7518158..6abe167e 100644 --- a/merk/src/test_utils/mod.rs +++ b/merk/src/test_utils/mod.rs @@ -39,13 +39,16 @@ use rand::prelude::*; pub use temp_merk::TempMerk; use crate::{ - tree::{kv::KV, BatchEntry, MerkBatch, NoopCommit, Op, PanicSource, Tree, Walker}, + tree::{ + kv::{ValueDefinedCostType, KV}, + BatchEntry, MerkBatch, NoopCommit, Op, PanicSource, TreeNode, Walker, + }, Merk, - TreeFeatureType::{BasicMerk, SummedMerk}, + TreeFeatureType::{BasicMerkNode, SummedMerkNode}, }; /// Assert tree invariants -pub fn assert_tree_invariants(tree: &Tree) { +pub fn assert_tree_invariants(tree: &TreeNode) { assert!(tree.balance_factor().abs() < 2); let maybe_left = tree.link(true); @@ -71,7 +74,7 @@ pub fn assert_tree_invariants(tree: &Tree) { /// Apply given batch to given tree and commit using memory only. /// Used by `apply_memonly` which also performs checks using /// `assert_tree_invariants`. Return Tree. -pub fn apply_memonly_unchecked(tree: Tree, batch: &MerkBatch>) -> Tree { +pub fn apply_memonly_unchecked(tree: TreeNode, batch: &MerkBatch>) -> TreeNode { let is_sum_node = tree.is_sum_node(); let walker = Walker::::new(tree, PanicSource {}); let mut tree = Walker::::apply_to( @@ -85,6 +88,8 @@ pub fn apply_memonly_unchecked(tree: Tree, batch: &MerkBatch>) -> Tree { is_sum_node, )) }, + None::<&fn(&[u8]) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), @@ -97,23 +102,13 @@ pub fn apply_memonly_unchecked(tree: Tree, batch: &MerkBatch>) -> Tree { .0 .expect("expected tree"); let is_sum_node = tree.is_sum_node(); - tree.commit( - &mut NoopCommit {}, - &|key, value| { - Ok(KV::layered_value_byte_cost_size_for_key_and_value_lengths( - key.len() as u32, - value.len() as u32, - is_sum_node, - )) - }, - &mut |_, _, _| Ok((false, None)), - &mut |_, key_bytes_to_remove, value_bytes_to_remove| { - Ok(( - BasicStorageRemoval(key_bytes_to_remove), - BasicStorageRemoval(value_bytes_to_remove), - )) - }, - ) + tree.commit(&mut NoopCommit {}, &|key, value| { + Ok(KV::layered_value_byte_cost_size_for_key_and_value_lengths( + key.len() as u32, + value.len() as u32, + is_sum_node, + )) + }) .unwrap() .expect("commit failed"); tree @@ -121,7 +116,7 @@ pub fn apply_memonly_unchecked(tree: Tree, batch: &MerkBatch>) -> Tree { /// Apply given batch to given tree and commit using memory only. /// Perform checks using `assert_tree_invariants`. Return Tree. -pub fn apply_memonly(tree: Tree, batch: &MerkBatch>) -> Tree { +pub fn apply_memonly(tree: TreeNode, batch: &MerkBatch>) -> TreeNode { let tree = apply_memonly_unchecked(tree, batch); assert_tree_invariants(&tree); tree @@ -130,10 +125,10 @@ pub fn apply_memonly(tree: Tree, batch: &MerkBatch>) -> Tree { /// Applies given batch to given tree or creates a new tree to apply to and /// commits to memory only. pub fn apply_to_memonly( - maybe_tree: Option, + maybe_tree: Option, batch: &MerkBatch>, is_sum_tree: bool, -) -> Option { +) -> Option { let maybe_walker = maybe_tree.map(|tree| Walker::::new(tree, PanicSource {})); Walker::::apply_to( maybe_walker, @@ -146,6 +141,8 @@ pub fn apply_to_memonly( is_sum_tree, )) }, + None::<&fn(&[u8]) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), @@ -158,26 +155,15 @@ pub fn apply_to_memonly( .0 .map(|mut tree| { let is_sum_node = tree.is_sum_node(); - tree.commit( - &mut NoopCommit {}, - &|key, value| { - Ok(KV::layered_value_byte_cost_size_for_key_and_value_lengths( - key.len() as u32, - value.len() as u32, - is_sum_node, - )) - }, - &mut |_, _, _| Ok((false, None)), - &mut |_, key_bytes_to_remove, value_bytes_to_remove| { - Ok(( - BasicStorageRemoval(key_bytes_to_remove), - BasicStorageRemoval(value_bytes_to_remove), - )) - }, - ) + tree.commit(&mut NoopCommit {}, &|key, value| { + Ok(KV::layered_value_byte_cost_size_for_key_and_value_lengths( + key.len() as u32, + value.len() as u32, + is_sum_node, + )) + }) .unwrap() .expect("commit failed"); - println!("{:?}", &tree); assert_tree_invariants(&tree); tree }) @@ -190,7 +176,7 @@ pub const fn seq_key(n: u64) -> [u8; 8] { /// Create batch entry with Put op using key n and a fixed value pub fn put_entry(n: u64) -> BatchEntry> { - (seq_key(n).to_vec(), Op::Put(vec![123; 60], BasicMerk)) + (seq_key(n).to_vec(), Op::Put(vec![123; 60], BasicMerkNode)) } /// Create batch entry with Delete op using key n @@ -248,17 +234,17 @@ pub fn make_tree_rand( batch_size: u64, initial_seed: u64, is_sum_tree: bool, -) -> Tree { +) -> TreeNode { assert!(node_count >= batch_size); assert_eq!((node_count % batch_size), 0); let value = vec![123; 60]; let feature_type = if is_sum_tree { - SummedMerk(0) + SummedMerkNode(0) } else { - BasicMerk + BasicMerkNode }; - let mut tree = Tree::new(vec![0; 20], value, None, feature_type).unwrap(); + let mut tree = TreeNode::new(vec![0; 20], value, None, feature_type).unwrap(); let mut seed = initial_seed; @@ -274,7 +260,7 @@ pub fn make_tree_rand( /// Create tree with initial fixed values and apply `node count` Put ops using /// sequential keys using memory only -pub fn make_tree_seq(node_count: u64) -> Tree { +pub fn make_tree_seq(node_count: u64) -> TreeNode { let batch_size = if node_count >= 10_000 { assert_eq!(node_count % 10_000, 0); 10_000 @@ -283,7 +269,7 @@ pub fn make_tree_seq(node_count: u64) -> Tree { }; let value = vec![123; 60]; - let mut tree = Tree::new(vec![0; 20], value, None, BasicMerk).unwrap(); + let mut tree = TreeNode::new(vec![0; 20], value, None, BasicMerkNode).unwrap(); let batch_count = node_count / batch_size; for i in 0..batch_count { @@ -307,6 +293,7 @@ where .get_storage_context(SubtreePath::empty(), Some(batch)) .unwrap(), false, + None:: Option>, ) .unwrap() .unwrap() @@ -324,6 +311,7 @@ where .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, + None:: Option>, ) .unwrap() .unwrap() diff --git a/merk/src/test_utils/temp_merk.rs b/merk/src/test_utils/temp_merk.rs index 0fb4724e..25e5b75c 100644 --- a/merk/src/test_utils/temp_merk.rs +++ b/merk/src/test_utils/temp_merk.rs @@ -39,6 +39,7 @@ use grovedb_storage::{ Storage, }; +use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] use crate::Merk; @@ -62,7 +63,13 @@ impl TempMerk { .get_storage_context(SubtreePath::empty(), Some(batch)) .unwrap(); - let merk = Merk::open_base(context, false).unwrap().unwrap(); + let merk = Merk::open_base( + context, + false, + None:: Option>, + ) + .unwrap() + .unwrap(); TempMerk { storage, merk, @@ -82,7 +89,13 @@ impl TempMerk { .storage .get_storage_context(SubtreePath::empty(), Some(self.batch)) .unwrap(); - self.merk = Merk::open_base(context, false).unwrap().unwrap(); + self.merk = Merk::open_base( + context, + false, + None:: Option>, + ) + .unwrap() + .unwrap(); } } diff --git a/merk/src/tree/commit.rs b/merk/src/tree/commit.rs index ccecfb48..24c1d996 100644 --- a/merk/src/tree/commit.rs +++ b/merk/src/tree/commit.rs @@ -29,13 +29,9 @@ //! Merk tree commit #[cfg(feature = "full")] -use grovedb_costs::storage_cost::{removal::StorageRemovedBytes, StorageCost}; - -#[cfg(feature = "full")] -use super::Tree; +use super::TreeNode; #[cfg(feature = "full")] use crate::error::Error; -use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] /// To be used when committing a tree (writing it to a store after applying the @@ -45,31 +41,15 @@ pub trait Commit { /// backing store or cache. fn write( &mut self, - tree: &mut Tree, + tree: &mut TreeNode, old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, - update_tree_value_based_on_costs: &mut impl FnMut( - &StorageCost, - &Vec, - &mut Vec, - ) -> Result< - (bool, Option), - Error, - >, - section_removal_bytes: &mut impl FnMut( - &Vec, - u32, - u32, - ) -> Result< - (StorageRemovedBytes, StorageRemovedBytes), - Error, - >, ) -> Result<(), Error>; /// Called once per node after writing a node and its children. The returned /// tuple specifies whether or not to prune the left and right child nodes, /// respectively. For example, returning `(true, true)` will prune both /// nodes, removing them from memory. - fn prune(&self, _tree: &Tree) -> (bool, bool) { + fn prune(&self, _tree: &TreeNode) -> (bool, bool) { (true, true) } } @@ -83,29 +63,13 @@ pub struct NoopCommit {} impl Commit for NoopCommit { fn write( &mut self, - _tree: &mut Tree, + _tree: &mut TreeNode, _old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, - _update_tree_value_based_on_costs: &mut impl FnMut( - &StorageCost, - &Vec, - &mut Vec, - ) -> Result< - (bool, Option), - Error, - >, - _section_removal_bytes: &mut impl FnMut( - &Vec, - u32, - u32, - ) -> Result< - (StorageRemovedBytes, StorageRemovedBytes), - Error, - >, ) -> Result<(), Error> { Ok(()) } - fn prune(&self, _tree: &Tree) -> (bool, bool) { + fn prune(&self, _tree: &TreeNode) -> (bool, bool) { (false, false) } } diff --git a/merk/src/tree/debug.rs b/merk/src/tree/debug.rs index 33889ebf..3e88c60b 100644 --- a/merk/src/tree/debug.rs +++ b/merk/src/tree/debug.rs @@ -32,15 +32,15 @@ use std::fmt::{Debug, Formatter, Result}; use colored::Colorize; -use super::{Link, Tree}; +use super::{Link, TreeNode}; #[cfg(feature = "full")] -impl Debug for Tree { +impl Debug for TreeNode { // TODO: unwraps should be results that bubble up fn fmt(&self, f: &mut Formatter) -> Result { fn traverse( f: &mut Formatter, - cursor: &Tree, + cursor: &TreeNode, stack: &mut Vec<(Vec, Vec)>, left: bool, ) { diff --git a/merk/src/tree/encoding.rs b/merk/src/tree/encoding.rs index 98f6a7c6..29307246 100644 --- a/merk/src/tree/encoding.rs +++ b/merk/src/tree/encoding.rs @@ -38,24 +38,33 @@ use grovedb_costs::{ use grovedb_storage::StorageContext; #[cfg(feature = "full")] -use super::Tree; +use super::TreeNode; +use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] use crate::{ error::{Error, Error::EdError}, - tree::TreeInner, + tree::TreeNodeInner, Error::StorageError, }; #[cfg(feature = "full")] -impl Tree { +impl TreeNode { /// Decode given bytes and set as Tree fields. Set key to value of given /// key. - pub fn decode_raw(bytes: &[u8], key: Vec) -> Result { - Tree::decode(key, bytes).map_err(EdError) + pub fn decode_raw( + bytes: &[u8], + key: Vec, + value_defined_cost_fn: Option Option>, + ) -> Result { + TreeNode::decode(key, bytes, value_defined_cost_fn).map_err(EdError) } /// Get value from storage given key. - pub(crate) fn get<'db, S, K>(storage: &S, key: K) -> CostResult, Error> + pub(crate) fn get<'db, S, K>( + storage: &S, + key: K, + value_defined_cost_fn: Option Option>, + ) -> CostResult, Error> where S: StorageContext<'db>, K: AsRef<[u8]>, @@ -66,7 +75,7 @@ impl Tree { let tree_opt = cost_return_on_error_no_add!( &cost, tree_bytes - .map(|x| Tree::decode_raw(&x, key.as_ref().to_vec())) + .map(|x| TreeNode::decode_raw(&x, key.as_ref().to_vec(), value_defined_cost_fn)) .transpose() ); @@ -75,7 +84,7 @@ impl Tree { } #[cfg(feature = "full")] -impl Tree { +impl TreeNode { #[inline] /// Encode pub fn encode(&self) -> Vec { @@ -111,19 +120,36 @@ impl Tree { #[inline] /// Decode bytes from reader, set as Tree fields and set key to given key - pub fn decode_into(&mut self, key: Vec, input: &[u8]) -> ed::Result<()> { - let mut tree_inner: TreeInner = Decode::decode(input)?; + pub fn decode_into( + &mut self, + key: Vec, + input: &[u8], + value_defined_cost_fn: Option Option>, + ) -> ed::Result<()> { + let mut tree_inner: TreeNodeInner = Decode::decode(input)?; tree_inner.kv.key = key; + if let Some(value_defined_cost_fn) = value_defined_cost_fn { + tree_inner.kv.value_defined_cost = + value_defined_cost_fn(tree_inner.kv.value.as_slice()); + } self.inner = Box::new(tree_inner); Ok(()) } #[inline] /// Decode input and set as Tree fields. Set the key as the given key. - pub fn decode(key: Vec, input: &[u8]) -> ed::Result { - let mut tree_inner: TreeInner = Decode::decode(input)?; + pub fn decode( + key: Vec, + input: &[u8], + value_defined_cost_fn: Option Option>, + ) -> ed::Result { + let mut tree_inner: TreeNodeInner = Decode::decode(input)?; tree_inner.kv.key = key; - Ok(Tree::new_with_tree_inner(tree_inner)) + if let Some(value_defined_cost_fn) = value_defined_cost_fn { + tree_inner.kv.value_defined_cost = + value_defined_cost_fn(tree_inner.kv.value.as_slice()); + } + Ok(TreeNode::new_with_tree_inner(tree_inner)) } } @@ -131,11 +157,12 @@ impl Tree { #[cfg(test)] mod tests { use super::{super::Link, *}; - use crate::TreeFeatureType::{BasicMerk, SummedMerk}; + use crate::TreeFeatureType::{BasicMerkNode, SummedMerkNode}; #[test] fn encode_leaf_tree() { - let tree = Tree::from_fields(vec![0], vec![1], [55; 32], None, None, BasicMerk).unwrap(); + let tree = + TreeNode::from_fields(vec![0], vec![1], [55; 32], None, None, BasicMerkNode).unwrap(); assert_eq!(tree.encoding_length(), 68); assert_eq!( tree.value_encoding_length_with_parent_to_child_reference(), @@ -155,17 +182,17 @@ mod tests { #[test] #[should_panic] fn encode_modified_tree() { - let tree = Tree::from_fields( + let tree = TreeNode::from_fields( vec![0], vec![1], [55; 32], Some(Link::Modified { pending_writes: 1, child_heights: (123, 124), - tree: Tree::new(vec![2], vec![3], None, BasicMerk).unwrap(), + tree: TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap(), }), None, - BasicMerk, + BasicMerkNode, ) .unwrap(); tree.encode(); @@ -173,7 +200,7 @@ mod tests { #[test] fn encode_loaded_tree() { - let tree = Tree::from_fields( + let tree = TreeNode::from_fields( vec![0], vec![1], [55; 32], @@ -181,10 +208,10 @@ mod tests { hash: [66; 32], sum: None, child_heights: (123, 124), - tree: Tree::new(vec![2], vec![3], None, BasicMerk).unwrap(), + tree: TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap(), }), None, - BasicMerk, + BasicMerkNode, ) .unwrap(); assert_eq!( @@ -202,7 +229,7 @@ mod tests { #[test] fn encode_uncommitted_tree() { - let tree = Tree::from_fields( + let tree = TreeNode::from_fields( vec![0], vec![1], [55; 32], @@ -210,10 +237,10 @@ mod tests { hash: [66; 32], sum: Some(10), child_heights: (123, 124), - tree: Tree::new(vec![2], vec![3], None, BasicMerk).unwrap(), + tree: TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap(), }), None, - SummedMerk(5), + SummedMerkNode(5), ) .unwrap(); assert_eq!( @@ -231,7 +258,7 @@ mod tests { #[test] fn encode_reference_tree() { - let tree = Tree::from_fields( + let tree = TreeNode::from_fields( vec![0], vec![1], [55; 32], @@ -242,7 +269,7 @@ mod tests { key: vec![2], }), None, - BasicMerk, + BasicMerkNode, ) .unwrap(); assert_eq!( @@ -275,10 +302,15 @@ mod tests { 131, 208, 25, 73, 98, 245, 209, 227, 170, 26, 72, 212, 134, 166, 126, 39, 98, 166, 199, 149, 144, 21, 1, ]; - let tree = Tree::decode(vec![0], bytes.as_slice()).expect("should decode correctly"); + let tree = TreeNode::decode( + vec![0], + bytes.as_slice(), + None::<&fn(&[u8]) -> Option>, + ) + .expect("should decode correctly"); assert_eq!(tree.key(), &[0]); assert_eq!(tree.value_as_slice(), &[1]); - assert_eq!(tree.inner.kv.feature_type, BasicMerk); + assert_eq!(tree.inner.kv.feature_type, BasicMerkNode); } #[test] @@ -290,7 +322,12 @@ mod tests { 55, 55, 55, 55, 55, 55, 32, 34, 236, 157, 87, 27, 167, 116, 207, 158, 131, 208, 25, 73, 98, 245, 209, 227, 170, 26, 72, 212, 134, 166, 126, 39, 98, 166, 199, 149, 144, 21, 1, ]; - let tree = Tree::decode(vec![0], bytes.as_slice()).expect("should decode correctly"); + let tree = TreeNode::decode( + vec![0], + bytes.as_slice(), + None::<&fn(&[u8]) -> Option>, + ) + .expect("should decode correctly"); assert_eq!(tree.key(), &[0]); assert_eq!(tree.value_as_slice(), &[1]); if let Some(Link::Reference { @@ -311,7 +348,11 @@ mod tests { #[test] fn decode_invalid_bytes_as_tree() { let bytes = vec![2, 3, 4, 5]; - let tree = Tree::decode(vec![0], bytes.as_slice()); - assert!(matches!(tree, Err(_))); + let tree = TreeNode::decode( + vec![0], + bytes.as_slice(), + None::<&fn(&[u8]) -> Option>, + ); + assert!(tree.is_err()); } } diff --git a/merk/src/tree/fuzz_tests.rs b/merk/src/tree/fuzz_tests.rs index cd9d22d5..631918ff 100644 --- a/merk/src/tree/fuzz_tests.rs +++ b/merk/src/tree/fuzz_tests.rs @@ -95,7 +95,7 @@ fn fuzz_case(seed: u64, using_sum_trees: bool) { } #[cfg(feature = "full")] -fn make_batch(maybe_tree: Option<&Tree>, size: u64, seed: u64) -> Vec { +fn make_batch(maybe_tree: Option<&TreeNode>, size: u64, seed: u64) -> Vec { let rng: RefCell = RefCell::new(SeedableRng::seed_from_u64(seed)); let mut batch = Vec::with_capacity(size as usize); @@ -170,7 +170,7 @@ fn apply_to_map(map: &mut Map, batch: &Batch) { } #[cfg(feature = "full")] -fn assert_map(maybe_tree: Option<&Tree>, map: &Map) { +fn assert_map(maybe_tree: Option<&TreeNode>, map: &Map) { if map.is_empty() { assert!(maybe_tree.is_none(), "expected tree to be None"); return; diff --git a/merk/src/tree/iter.rs b/merk/src/tree/iter.rs index 2daa5a02..6ca58df7 100644 --- a/merk/src/tree/iter.rs +++ b/merk/src/tree/iter.rs @@ -29,7 +29,7 @@ //! Merk tree iterator #[cfg(feature = "full")] -use super::Tree; +use super::TreeNode; #[cfg(feature = "full")] /// An entry stored on an `Iter`'s stack, containing a reference to a `Tree`, @@ -38,7 +38,7 @@ use super::Tree; /// The `traversed` field represents whether or not the left child, self, and /// right child have been visited, respectively (`(left, self, right)`). struct StackItem<'a> { - tree: &'a Tree, + tree: &'a TreeNode, traversed: (bool, bool, bool), } @@ -47,7 +47,7 @@ impl<'a> StackItem<'a> { /// Creates a new `StackItem` for the given tree. The `traversed` state will /// be `false` since the children and self have not been visited yet, but /// will default to `true` for sides that do not have a child. - const fn new(tree: &'a Tree) -> Self { + const fn new(tree: &'a TreeNode) -> Self { StackItem { tree, traversed: ( @@ -77,14 +77,14 @@ pub struct Iter<'a> { #[cfg(feature = "full")] impl<'a> Iter<'a> { /// Creates a new iterator for the given tree. - pub fn new(tree: &'a Tree) -> Self { + pub fn new(tree: &'a TreeNode) -> Self { let stack = vec![StackItem::new(tree)]; Iter { stack } } } #[cfg(feature = "full")] -impl<'a> Tree { +impl<'a> TreeNode { /// Creates an iterator which yields `(key, value)` tuples for all of the /// tree's nodes which are retained in memory (skipping pruned subtrees). pub fn iter(&'a self) -> Iter<'a> { diff --git a/merk/src/tree/just_in_time_value_update.rs b/merk/src/tree/just_in_time_value_update.rs new file mode 100644 index 00000000..20861ec4 --- /dev/null +++ b/merk/src/tree/just_in_time_value_update.rs @@ -0,0 +1,82 @@ +use grovedb_costs::storage_cost::{ + removal::{StorageRemovedBytes, StorageRemovedBytes::BasicStorageRemoval}, + StorageCost, +}; + +use crate::{ + merk::defaults::MAX_UPDATE_VALUE_BASED_ON_COSTS_TIMES, + tree::{kv::ValueDefinedCostType, TreeNode}, + Error, +}; + +impl TreeNode { + pub(in crate::tree) fn just_in_time_tree_node_value_update( + &mut self, + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> Result<(), Error> { + let (mut current_tree_plus_hook_size, mut storage_costs) = + self.kv_with_parent_hook_size_and_storage_cost(old_specialized_cost)?; + let mut i = 0; + + if let Some(old_value) = self.old_value.clone() { + // At this point the tree value can be updated based on client requirements + // For example to store the costs + loop { + let (flags_changed, value_defined_cost) = update_tree_value_based_on_costs( + &storage_costs.value_storage_cost, + &old_value, + self.value_mut_ref(), + )?; + if !flags_changed { + break; + } else { + self.inner.kv.value_defined_cost = value_defined_cost; + let after_update_tree_plus_hook_size = + self.value_encoding_length_with_parent_to_child_reference(); + if after_update_tree_plus_hook_size == current_tree_plus_hook_size { + break; + } + let new_size_and_storage_costs = + self.kv_with_parent_hook_size_and_storage_cost(old_specialized_cost)?; + current_tree_plus_hook_size = new_size_and_storage_costs.0; + storage_costs = new_size_and_storage_costs.1; + } + if i > MAX_UPDATE_VALUE_BASED_ON_COSTS_TIMES { + return Err(Error::CyclicError( + "updated value based on costs too many times", + )); + } + i += 1; + } + + if let BasicStorageRemoval(removed_bytes) = + storage_costs.value_storage_cost.removed_bytes + { + let (_, value_removed_bytes) = section_removal_bytes(&old_value, 0, removed_bytes)?; + storage_costs.value_storage_cost.removed_bytes = value_removed_bytes; + } + } + + // Update old tree size after generating value storage_cost cost + self.old_value = Some(self.value_ref().clone()); + self.known_storage_cost = Some(storage_costs); + + Ok(()) + } +} diff --git a/merk/src/tree/kv.rs b/merk/src/tree/kv.rs index 064b18a9..ff020abc 100644 --- a/merk/src/tree/kv.rs +++ b/merk/src/tree/kv.rs @@ -45,7 +45,7 @@ use crate::tree::kv::ValueDefinedCostType::{LayeredValueDefinedCost, Specialized use crate::{ tree::{ hash::{combine_hash, kv_digest_to_kv_hash, value_hash, HASH_LENGTH_X2}, - tree_feature_type::{TreeFeatureType, TreeFeatureType::BasicMerk}, + tree_feature_type::{TreeFeatureType, TreeFeatureType::BasicMerkNode}, }, Link, HASH_LENGTH_U32, HASH_LENGTH_U32_X2, }; @@ -197,60 +197,57 @@ impl KV { } } + /// Replaces the `KV`'s value with the given value, does not update the hash + /// or value hash. + #[inline] + pub fn put_value_no_update_of_hashes(mut self, value: Vec) -> Self { + self.value = value; + self + } + /// Replaces the `KV`'s value with the given value, updates the hash, /// value hash and returns the modified `KV`. #[inline] pub fn put_value_then_update(mut self, value: Vec) -> CostContext { - let mut cost = OperationCost::default(); - // TODO: length check? self.value = value; - self.value_hash = value_hash(self.value_as_slice()).unwrap_add_cost(&mut cost); - self.hash = kv_digest_to_kv_hash(self.key(), self.value_hash()).unwrap_add_cost(&mut cost); - self.wrap_with_cost(cost) + self.update_hashes() } - /// Replaces the `KV`'s value with the given value, updates the hash, - /// value hash and returns the modified `KV`. - /// This is used when we want a fixed cost, for example in sum trees + /// Updates the hash, value hash and returns the modified `KV`. #[inline] - pub fn put_value_with_fixed_cost_then_update( - mut self, - value: Vec, - value_cost: u32, - ) -> CostContext { - self.value_defined_cost = Some(SpecializedValueDefinedCost(value_cost)); - self.put_value_then_update(value) + pub fn update_hashes(mut self) -> CostContext { + let mut cost = OperationCost::default(); + self.value_hash = value_hash(self.value_as_slice()).unwrap_add_cost(&mut cost); + self.hash = kv_digest_to_kv_hash(self.key(), self.value_hash()).unwrap_add_cost(&mut cost); + self.wrap_with_cost(cost) } - /// Replaces the `KV`'s value with the given value and value hash, - /// updates the hash and returns the modified `KV`. + /// Updates the hashes and returns the modified `KV`. #[inline] - pub fn put_value_and_reference_value_hash_then_update( + pub fn update_hashes_using_reference_value_hash( mut self, - value: Vec, reference_value_hash: CryptoHash, ) -> CostContext { let mut cost = OperationCost::default(); - let actual_value_hash = value_hash(value.as_slice()).unwrap_add_cost(&mut cost); + let actual_value_hash = value_hash(self.value_as_slice()).unwrap_add_cost(&mut cost); let combined_value_hash = combine_hash(&actual_value_hash, &reference_value_hash).unwrap_add_cost(&mut cost); - self.value = value; self.value_hash = combined_value_hash; self.hash = kv_digest_to_kv_hash(self.key(), self.value_hash()).unwrap_add_cost(&mut cost); self.wrap_with_cost(cost) } - /// Replaces the `KV`'s value with the given value and value hash, - /// updates the hash and returns the modified `KV`. + /// Replaces the `KV`'s value with the given value, does not update the + /// hashes, value hash and returns the modified `KV`. + /// This is used when we want a fixed cost, for example in sum trees #[inline] - pub fn put_value_with_reference_value_hash_and_value_cost_then_update( + pub fn put_value_with_fixed_cost_no_update_of_hashes( mut self, value: Vec, - reference_value_hash: CryptoHash, - value_cost: u32, - ) -> CostContext { - self.value_defined_cost = Some(LayeredValueDefinedCost(value_cost)); - self.put_value_and_reference_value_hash_then_update(value, reference_value_hash) + value_cost: ValueDefinedCostType, + ) -> Self { + self.value_defined_cost = Some(value_cost); + self.put_value_no_update_of_hashes(value) } /// Returns the key as a slice. @@ -532,7 +529,7 @@ impl Decode for KV { let mut kv = Self { key: Vec::with_capacity(0), value: Vec::with_capacity(128), - feature_type: BasicMerk, + feature_type: BasicMerkNode, value_defined_cost: None, hash: NULL_HASH, value_hash: NULL_HASH, @@ -563,11 +560,11 @@ impl Terminated for KV {} #[cfg(test)] mod test { use super::*; - use crate::tree::tree_feature_type::TreeFeatureType::SummedMerk; + use crate::tree::tree_feature_type::TreeFeatureType::SummedMerkNode; #[test] fn new_kv() { - let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6], None, BasicMerk).unwrap(); + let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6], None, BasicMerkNode).unwrap(); assert_eq!(kv.key(), &[1, 2, 3]); assert_eq!(kv.value_as_slice(), &[4, 5, 6]); @@ -576,7 +573,7 @@ mod test { #[test] fn with_value() { - let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6], None, BasicMerk) + let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6], None, BasicMerkNode) .unwrap() .put_value_then_update(vec![7, 8, 9]) .unwrap(); @@ -588,7 +585,7 @@ mod test { #[test] fn encode_and_decode_kv() { - let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6], None, BasicMerk).unwrap(); + let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6], None, BasicMerkNode).unwrap(); let mut encoded_kv = vec![]; kv.encode_into(&mut encoded_kv).expect("encoded"); let mut decoded_kv = KV::decode(encoded_kv.as_slice()).unwrap(); @@ -596,7 +593,7 @@ mod test { assert_eq!(kv, decoded_kv); - let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6], None, SummedMerk(20)).unwrap(); + let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6], None, SummedMerkNode(20)).unwrap(); let mut encoded_kv = vec![]; kv.encode_into(&mut encoded_kv).expect("encoded"); let mut decoded_kv = KV::decode(encoded_kv.as_slice()).unwrap(); diff --git a/merk/src/tree/link.rs b/merk/src/tree/link.rs index 56d9f1b0..ab26159b 100644 --- a/merk/src/tree/link.rs +++ b/merk/src/tree/link.rs @@ -37,7 +37,7 @@ use ed::{Decode, Encode, Result, Terminated}; use integer_encoding::{VarInt, VarIntReader, VarIntWriter}; #[cfg(feature = "full")] -use super::{hash::CryptoHash, Tree}; +use super::{hash::CryptoHash, TreeNode}; #[cfg(feature = "full")] use crate::HASH_LENGTH_U32; @@ -72,7 +72,7 @@ pub enum Link { /// Child heights child_heights: (u8, u8), /// Tree - tree: Tree + tree: TreeNode }, /// Represents a tree node which has been modified since the `Tree`'s last @@ -84,7 +84,7 @@ pub enum Link { /// Child heights child_heights: (u8, u8), /// Tree - tree: Tree, + tree: TreeNode, /// Sum sum: Option, }, @@ -97,7 +97,7 @@ pub enum Link { /// Child heights child_heights: (u8, u8), /// Tree - tree: Tree, + tree: TreeNode, /// Sum sum: Option, }, @@ -107,7 +107,7 @@ pub enum Link { impl Link { /// Creates a `Link::Modified` from the given `Tree`. #[inline] - pub const fn from_modified_tree(tree: Tree) -> Self { + pub const fn from_modified_tree(tree: TreeNode) -> Self { let pending_writes = 1 + tree.child_pending_writes(true) + tree.child_pending_writes(false); Self::Modified { @@ -119,7 +119,7 @@ impl Link { /// Creates a `Link::Modified` from the given tree, if any. If `None`, /// returns `None`. - pub fn maybe_from_modified_tree(maybe_tree: Option) -> Option { + pub fn maybe_from_modified_tree(maybe_tree: Option) -> Option { maybe_tree.map(Self::from_modified_tree) } @@ -161,7 +161,7 @@ impl Link { /// Returns the `Tree` instance of the tree referenced by the link. If the /// link is of variant `Link::Reference`, the returned value will be `None`. #[inline] - pub const fn tree(&self) -> Option<&Tree> { + pub const fn tree(&self) -> Option<&TreeNode> { match self { // TODO: panic for Reference, don't return Option? Link::Reference { .. } => None, @@ -483,14 +483,14 @@ fn read_u8(mut input: R) -> Result { #[cfg(test)] mod test { use super::{ - super::{hash::NULL_HASH, Tree}, + super::{hash::NULL_HASH, TreeNode}, *, }; - use crate::TreeFeatureType::BasicMerk; + use crate::TreeFeatureType::BasicMerkNode; #[test] fn from_modified_tree() { - let tree = Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(); + let tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(); let link = Link::from_modified_tree(tree); assert!(link.is_modified()); assert_eq!(link.height(), 1); @@ -507,7 +507,7 @@ mod test { let link = Link::maybe_from_modified_tree(None); assert!(link.is_none()); - let tree = Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(); + let tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(); let link = Link::maybe_from_modified_tree(Some(tree)); assert!(link.expect("expected link").is_modified()); } @@ -519,7 +519,7 @@ mod test { let child_heights = (0, 0); let pending_writes = 1; let key = vec![0]; - let tree = || Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(); + let tree = || TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(); let reference = Link::Reference { hash, @@ -585,7 +585,7 @@ mod test { Link::Modified { pending_writes: 1, child_heights: (1, 1), - tree: Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(), + tree: TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(), } .hash(); } @@ -596,7 +596,7 @@ mod test { Link::Modified { pending_writes: 1, child_heights: (1, 1), - tree: Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(), + tree: TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(), } .into_reference(); } @@ -608,7 +608,7 @@ mod test { hash: [1; 32], sum: None, child_heights: (1, 1), - tree: Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(), + tree: TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(), } .into_reference(); } diff --git a/merk/src/tree/mod.rs b/merk/src/tree/mod.rs index c497b893..cb732b56 100644 --- a/merk/src/tree/mod.rs +++ b/merk/src/tree/mod.rs @@ -41,6 +41,8 @@ mod hash; #[cfg(feature = "full")] mod iter; #[cfg(feature = "full")] +mod just_in_time_value_update; +#[cfg(feature = "full")] pub mod kv; #[cfg(feature = "full")] mod link; @@ -91,6 +93,8 @@ pub use walk::{Fetch, RefWalker, Walker}; #[cfg(feature = "full")] use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] +use crate::tree::kv::ValueDefinedCostType::{LayeredValueDefinedCost, SpecializedValueDefinedCost}; +#[cfg(feature = "full")] use crate::{error::Error, Error::Overflow}; // TODO: remove need for `TreeInner`, and just use `Box` receiver for @@ -99,14 +103,14 @@ use crate::{error::Error, Error::Overflow}; #[cfg(feature = "full")] /// The fields of the `Tree` type, stored on the heap. #[derive(Clone, Encode, Decode, Debug)] -pub struct TreeInner { +pub struct TreeNodeInner { pub(crate) left: Option, pub(crate) right: Option, pub(crate) kv: KV, } #[cfg(feature = "full")] -impl TreeInner { +impl TreeNodeInner { /// Get the value as owned of the key value struct pub fn value_as_owned(self) -> Vec { self.kv.value @@ -129,7 +133,7 @@ impl TreeInner { } #[cfg(feature = "full")] -impl Terminated for Box {} +impl Terminated for Box {} #[cfg(feature = "full")] /// A binary AVL tree data structure, with Merkle hashes. @@ -138,14 +142,14 @@ impl Terminated for Box {} /// link to each other, and so we can detach nodes from their parents, then /// reattach without allocating or freeing heap memory. #[derive(Clone)] -pub struct Tree { - pub(crate) inner: Box, - pub(crate) old_size_with_parent_to_child_hook: u32, +pub struct TreeNode { + pub(crate) inner: Box, pub(crate) old_value: Option>, + pub(crate) known_storage_cost: Option, } #[cfg(feature = "full")] -impl Tree { +impl TreeNode { /// Creates a new `Tree` with the given key and value, and no children. /// /// Hashes the key/value pair and initializes the `kv_hash` field. @@ -156,24 +160,23 @@ impl Tree { feature_type: TreeFeatureType, ) -> CostContext { KV::new(key, value, value_defined_cost, feature_type).map(|kv| Self { - inner: Box::new(TreeInner { + inner: Box::new(TreeNodeInner { kv, left: None, right: None, }), - old_size_with_parent_to_child_hook: 0, old_value: None, + known_storage_cost: None, }) } /// Creates a new `Tree` given an inner tree - pub fn new_with_tree_inner(inner_tree: TreeInner) -> Self { - let decode_size = inner_tree.kv.value_byte_cost_size(); + pub fn new_with_tree_inner(inner_tree: TreeNodeInner) -> Self { let old_value = inner_tree.kv.value.clone(); Self { inner: Box::new(inner_tree), - old_size_with_parent_to_child_hook: decode_size, old_value: Some(old_value), + known_storage_cost: None, } } @@ -218,7 +221,7 @@ impl Tree { let key_value_storage_cost = KeyValueStorageCost { key_storage_cost, // the key storage cost is added later value_storage_cost, - new_node: self.old_size_with_parent_to_child_hook == 0, + new_node: self.old_value.is_none(), needs_value_verification: self.inner.kv.value_defined_cost.is_none(), }; @@ -234,10 +237,10 @@ impl Tree { ) -> Result<(u32, KeyValueStorageCost), Error> { let current_value_byte_cost = self.value_encoding_length_with_parent_to_child_reference(); - let old_cost = if self.inner.kv.value_defined_cost.is_some() && self.old_value.is_some() { - old_tree_cost(self.key_as_ref(), self.old_value.as_ref().unwrap()) + let old_cost = if let Some(old_value) = self.old_value.as_ref() { + old_tree_cost(self.key_as_ref(), old_value) } else { - Ok(self.old_size_with_parent_to_child_hook) + Ok(0) // there was no old value, hence old cost would be 0 }?; self.kv_with_parent_hook_size_and_storage_cost_from_old_cost( @@ -257,13 +260,13 @@ impl Tree { feature_type: TreeFeatureType, ) -> CostContext { KV::new_with_value_hash(key, value, value_hash, feature_type).map(|kv| Self { - inner: Box::new(TreeInner { + inner: Box::new(TreeNodeInner { kv, left: None, right: None, }), - old_size_with_parent_to_child_hook: 0, old_value: None, + known_storage_cost: None, }) } @@ -277,13 +280,13 @@ impl Tree { feature_type: TreeFeatureType, ) -> CostContext { KV::new_with_combined_value_hash(key, value, value_hash, feature_type).map(|kv| Self { - inner: Box::new(TreeInner { + inner: Box::new(TreeNodeInner { kv, left: None, right: None, }), - old_size_with_parent_to_child_hook: 0, old_value: None, + known_storage_cost: None, }) } @@ -299,13 +302,13 @@ impl Tree { ) -> CostContext { KV::new_with_layered_value_hash(key, value, value_cost, value_hash, feature_type).map( |kv| Self { - inner: Box::new(TreeInner { + inner: Box::new(TreeNodeInner { kv, left: None, right: None, }), - old_size_with_parent_to_child_hook: 0, old_value: None, + known_storage_cost: None, }, ) } @@ -321,13 +324,13 @@ impl Tree { feature_type: TreeFeatureType, ) -> CostContext { value_hash(value.as_slice()).map(|vh| Self { - inner: Box::new(TreeInner { + inner: Box::new(TreeNodeInner { kv: KV::from_fields(key, value, kv_hash, vh, feature_type), left, right, }), - old_size_with_parent_to_child_hook: 0, old_value: None, + known_storage_cost: None, }) } @@ -485,8 +488,8 @@ impl Tree { #[inline] pub fn sum(&self) -> Result, Error> { match self.inner.kv.feature_type { - TreeFeatureType::BasicMerk => Ok(None), - TreeFeatureType::SummedMerk(value) => value + TreeFeatureType::BasicMerkNode => Ok(None), + TreeFeatureType::SummedMerkNode(value) => value .checked_add(self.child_sum(true)) .and_then(|a| a.checked_add(self.child_sum(false))) .ok_or(Overflow("sum is overflowing")) @@ -653,15 +656,49 @@ impl Tree { /// Replaces the root node's value with the given value and returns the /// modified `Tree`. #[inline] - pub fn put_value(mut self, value: Vec, feature_type: TreeFeatureType) -> CostContext { + pub fn put_value( + mut self, + value: Vec, + feature_type: TreeFeatureType, + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> CostResult { let mut cost = OperationCost::default(); - self.inner.kv = self - .inner - .kv - .put_value_then_update(value) - .unwrap_add_cost(&mut cost); + + self.inner.kv = self.inner.kv.put_value_no_update_of_hashes(value); self.inner.kv.feature_type = feature_type; - self.wrap_with_cost(cost) + + if self.old_value.is_some() { + // we are replacing a value + // in this case there is a possibility that the client would want to update the + // element flags based on the change of values + cost_return_on_error_no_add!( + &cost, + self.just_in_time_tree_node_value_update( + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + ); + } + + self.inner.kv = self.inner.kv.update_hashes().unwrap_add_cost(&mut cost); + Ok(self).wrap_with_cost(cost) } /// Replaces the root node's value with the given value and returns the @@ -672,15 +709,47 @@ impl Tree { value: Vec, value_fixed_cost: u32, feature_type: TreeFeatureType, - ) -> CostContext { + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> CostResult { let mut cost = OperationCost::default(); - self.inner.kv = self - .inner - .kv - .put_value_with_fixed_cost_then_update(value, value_fixed_cost) - .unwrap_add_cost(&mut cost); + self.inner.kv = self.inner.kv.put_value_with_fixed_cost_no_update_of_hashes( + value, + SpecializedValueDefinedCost(value_fixed_cost), + ); self.inner.kv.feature_type = feature_type; - self.wrap_with_cost(cost) + + if self.old_value.is_some() { + // we are replacing a value + // in this case there is a possibility that the client would want to update the + // element flags based on the change of values + cost_return_on_error_no_add!( + &cost, + self.just_in_time_tree_node_value_update( + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + ); + } + + self.inner.kv = self.inner.kv.update_hashes().unwrap_add_cost(&mut cost); + Ok(self).wrap_with_cost(cost) } /// Replaces the root node's value with the given value and value hash @@ -691,15 +760,49 @@ impl Tree { value: Vec, value_hash: CryptoHash, feature_type: TreeFeatureType, - ) -> CostContext { + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> CostResult { let mut cost = OperationCost::default(); + + self.inner.kv = self.inner.kv.put_value_no_update_of_hashes(value); + self.inner.kv.feature_type = feature_type; + + if self.old_value.is_some() { + // we are replacing a value + // in this case there is a possibility that the client would want to update the + // element flags based on the change of values + cost_return_on_error_no_add!( + &cost, + self.just_in_time_tree_node_value_update( + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + ); + } + self.inner.kv = self .inner .kv - .put_value_and_reference_value_hash_then_update(value, value_hash) + .update_hashes_using_reference_value_hash(value_hash) .unwrap_add_cost(&mut cost); - self.inner.kv.feature_type = feature_type; - self.wrap_with_cost(cost) + Ok(self).wrap_with_cost(cost) } /// Replaces the root node's value with the given value and value hash @@ -711,17 +814,52 @@ impl Tree { value_hash: CryptoHash, value_cost: u32, feature_type: TreeFeatureType, - ) -> CostContext { + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> CostResult { let mut cost = OperationCost::default(); + + self.inner.kv = self.inner.kv.put_value_with_fixed_cost_no_update_of_hashes( + value, + LayeredValueDefinedCost(value_cost), + ); + self.inner.kv.feature_type = feature_type; + + if self.old_value.is_some() { + // we are replacing a value + // in this case there is a possibility that the client would want to update the + // element flags based on the change of values + cost_return_on_error_no_add!( + &cost, + self.just_in_time_tree_node_value_update( + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + ); + } + self.inner.kv = self .inner .kv - .put_value_with_reference_value_hash_and_value_cost_then_update( - value, value_hash, value_cost, - ) + .update_hashes_using_reference_value_hash(value_hash) .unwrap_add_cost(&mut cost); - self.inner.kv.feature_type = feature_type; - self.wrap_with_cost(cost) + Ok(self).wrap_with_cost(cost) } // TODO: add compute_hashes method @@ -737,22 +875,6 @@ impl Tree { &mut self, c: &mut C, old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, - update_tree_value_based_on_costs: &mut impl FnMut( - &StorageCost, - &Vec, - &mut Vec, - ) -> Result< - (bool, Option), - Error, - >, - section_removal_bytes: &mut impl FnMut( - &Vec, - u32, - u32, - ) -> Result< - (StorageRemovedBytes, StorageRemovedBytes), - Error, - >, ) -> CostResult<(), Error> { // TODO: make this method less ugly // TODO: call write in-order for better performance in writing batch to db? @@ -769,15 +891,7 @@ impl Tree { }) = self.inner.left.take() { // println!("key is {}", std::str::from_utf8(tree.key()).unwrap()); - cost_return_on_error!( - &mut cost, - tree.commit( - c, - old_specialized_cost, - update_tree_value_based_on_costs, - section_removal_bytes - ) - ); + cost_return_on_error!(&mut cost, tree.commit(c, old_specialized_cost,)); let sum = cost_return_on_error_default!(tree.sum()); self.inner.left = Some(Link::Loaded { @@ -800,15 +914,7 @@ impl Tree { }) = self.inner.right.take() { // println!("key is {}", std::str::from_utf8(tree.key()).unwrap()); - cost_return_on_error!( - &mut cost, - tree.commit( - c, - old_specialized_cost, - update_tree_value_based_on_costs, - section_removal_bytes - ) - ); + cost_return_on_error!(&mut cost, tree.commit(c, old_specialized_cost,)); let sum = cost_return_on_error_default!(tree.sum()); self.inner.right = Some(Link::Loaded { hash: tree.hash().unwrap_add_cost(&mut cost), @@ -821,15 +927,7 @@ impl Tree { } } - cost_return_on_error_no_add!( - &cost, - c.write( - self, - old_specialized_cost, - update_tree_value_based_on_costs, - section_removal_bytes - ) - ); + cost_return_on_error_no_add!(&cost, c.write(self, old_specialized_cost,)); // println!("done committing {}", std::str::from_utf8(self.key()).unwrap()); @@ -847,7 +945,15 @@ impl Tree { /// Fetches the child on the given side using the given data source, and /// places it in the child slot (upgrading the link from `Link::Reference` /// to `Link::Loaded`). - pub fn load(&mut self, left: bool, source: &S) -> CostResult<(), Error> { + pub fn load( + &mut self, + left: bool, + source: &S, + value_defined_cost_fn: Option<&V>, + ) -> CostResult<(), Error> + where + V: Fn(&[u8]) -> Option, + { // TODO: return Err instead of panic? let link = self.link(left).expect("Expected link"); let (child_heights, hash, sum) = match link { @@ -861,7 +967,7 @@ impl Tree { }; let mut cost = OperationCost::default(); - let tree = cost_return_on_error!(&mut cost, source.fetch(link)); + let tree = cost_return_on_error!(&mut cost, source.fetch(link, value_defined_cost_fn)); debug_assert_eq!(tree.key(), link.key()); *self.slot_mut(left) = Some(Link::Loaded { tree, @@ -886,14 +992,15 @@ pub const fn side_to_str(left: bool) -> &'static str { #[cfg(feature = "full")] #[cfg(test)] mod test { - use grovedb_costs::storage_cost::removal::StorageRemovedBytes::NoStorageRemoval; - use super::{commit::NoopCommit, hash::NULL_HASH, Tree}; - use crate::tree::{tree_feature_type::TreeFeatureType::SummedMerk, TreeFeatureType::BasicMerk}; + use super::{commit::NoopCommit, hash::NULL_HASH, TreeNode}; + use crate::tree::{ + tree_feature_type::TreeFeatureType::SummedMerkNode, TreeFeatureType::BasicMerkNode, + }; #[test] fn build_tree() { - let tree = Tree::new(vec![1], vec![101], None, BasicMerk).unwrap(); + let tree = TreeNode::new(vec![1], vec![101], None, BasicMerkNode).unwrap(); assert_eq!(tree.key(), &[1]); assert_eq!(tree.value_as_slice(), &[101]); assert!(tree.child(true).is_none()); @@ -905,13 +1012,13 @@ mod test { let tree = tree.attach( true, - Some(Tree::new(vec![2], vec![102], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![2], vec![102], None, BasicMerkNode).unwrap()), ); assert_eq!(tree.key(), &[1]); assert_eq!(tree.child(true).unwrap().key(), &[2]); assert!(tree.child(false).is_none()); - let tree = Tree::new(vec![3], vec![103], None, BasicMerk) + let tree = TreeNode::new(vec![3], vec![103], None, BasicMerkNode) .unwrap() .attach(false, Some(tree)); assert_eq!(tree.key(), &[3]); @@ -922,29 +1029,29 @@ mod test { #[should_panic] #[test] fn attach_existing() { - Tree::new(vec![0], vec![1], None, BasicMerk) + TreeNode::new(vec![0], vec![1], None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(vec![2], vec![3], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap()), ) .attach( true, - Some(Tree::new(vec![4], vec![5], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![4], vec![5], None, BasicMerkNode).unwrap()), ); } #[test] fn modify() { - let tree = Tree::new(vec![0], vec![1], None, BasicMerk) + let tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(vec![2], vec![3], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap()), ) .attach( false, - Some(Tree::new(vec![4], vec![5], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![4], vec![5], None, BasicMerkNode).unwrap()), ); let tree = tree.walk(true, |left_opt| { @@ -956,7 +1063,7 @@ mod test { let tree = tree.walk(true, |left_opt| { assert!(left_opt.is_none()); - Some(Tree::new(vec![2], vec![3], None, BasicMerk).unwrap()) + Some(TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap()) }); assert_eq!(tree.link(true).unwrap().key(), &[2]); @@ -970,25 +1077,20 @@ mod test { #[test] fn child_and_link() { - let mut tree = Tree::new(vec![0], vec![1], None, BasicMerk) + let mut tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(vec![2], vec![3], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap()), ); assert!(tree.link(true).expect("expected link").is_modified()); assert!(tree.child(true).is_some()); assert!(tree.link(false).is_none()); assert!(tree.child(false).is_none()); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .expect("commit failed"); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .expect("commit failed"); assert!(tree.link(true).expect("expected link").is_stored()); assert!(tree.child(true).is_some()); @@ -1003,20 +1105,15 @@ mod test { #[test] fn child_hash() { - let mut tree = Tree::new(vec![0], vec![1], None, BasicMerk) + let mut tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(vec![2], vec![3], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap()), ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .expect("commit failed"); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .expect("commit failed"); assert_eq!( tree.child_hash(true), &[ @@ -1029,7 +1126,7 @@ mod test { #[test] fn hash() { - let tree = Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(); + let tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(); assert_eq!( tree.hash().unwrap(), [ @@ -1041,13 +1138,13 @@ mod test { #[test] fn child_pending_writes() { - let tree = Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(); + let tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(); assert_eq!(tree.child_pending_writes(true), 0); assert_eq!(tree.child_pending_writes(false), 0); let tree = tree.attach( true, - Some(Tree::new(vec![2], vec![3], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap()), ); assert_eq!(tree.child_pending_writes(true), 1); assert_eq!(tree.child_pending_writes(false), 0); @@ -1055,7 +1152,7 @@ mod test { #[test] fn height_and_balance() { - let tree = Tree::new(vec![0], vec![1], None, BasicMerk).unwrap(); + let tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(); assert_eq!(tree.height(), 1); assert_eq!(tree.child_height(true), 0); assert_eq!(tree.child_height(false), 0); @@ -1063,7 +1160,7 @@ mod test { let tree = tree.attach( true, - Some(Tree::new(vec![2], vec![3], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap()), ); assert_eq!(tree.height(), 2); assert_eq!(tree.child_height(true), 1); @@ -1080,40 +1177,30 @@ mod test { #[test] fn commit() { - let mut tree = Tree::new(vec![0], vec![1], None, BasicMerk) + let mut tree = TreeNode::new(vec![0], vec![1], None, BasicMerkNode) .unwrap() .attach( false, - Some(Tree::new(vec![2], vec![3], None, BasicMerk).unwrap()), + Some(TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap()), ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .expect("commit failed"); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .expect("commit failed"); assert!(tree.link(false).expect("expected link").is_stored()); } #[test] fn sum_tree() { - let mut tree = Tree::new(vec![0], vec![1], None, SummedMerk(3)) + let mut tree = TreeNode::new(vec![0], vec![1], None, SummedMerkNode(3)) .unwrap() .attach( false, - Some(Tree::new(vec![2], vec![3], None, SummedMerk(5)).unwrap()), + Some(TreeNode::new(vec![2], vec![3], None, SummedMerkNode(5)).unwrap()), ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .expect("commit failed"); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .expect("commit failed"); assert_eq!(Some(8), tree.sum().expect("expected to get sum from tree")); } diff --git a/merk/src/tree/ops.rs b/merk/src/tree/ops.rs index 1b161cdd..a55adbe9 100644 --- a/merk/src/tree/ops.rs +++ b/merk/src/tree/ops.rs @@ -50,10 +50,13 @@ use integer_encoding::VarInt; use Op::*; #[cfg(feature = "full")] -use super::{Fetch, Link, Tree, Walker}; +use super::{Fetch, Link, TreeNode, Walker}; #[cfg(feature = "full")] use crate::{error::Error, tree::tree_feature_type::TreeFeatureType, CryptoHash, HASH_LENGTH_U32}; -use crate::{merk::KeyUpdates, tree::kv::ValueDefinedCostType::SpecializedValueDefinedCost}; +use crate::{ + merk::KeyUpdates, + tree::kv::{ValueDefinedCostType, ValueDefinedCostType::SpecializedValueDefinedCost}, +}; #[cfg(feature = "full")] /// An operation to be applied to a key in the store. @@ -147,7 +150,11 @@ pub struct PanicSource {} #[cfg(feature = "full")] impl Fetch for PanicSource { - fn fetch(&self, _link: &Link) -> CostResult { + fn fetch( + &self, + _link: &Link, + _value_defined_cost_fn: Option<&impl Fn(&[u8]) -> Option>, + ) -> CostResult { unreachable!("'fetch' should not have been called") } } @@ -162,15 +169,23 @@ where /// not require a non-empty tree. /// /// Keys in batch must be sorted and unique. - pub fn apply_to, C, R>( + pub fn apply_to, C, V, U, R>( maybe_tree: Option, batch: &MerkBatch, source: S, old_tree_cost: &C, + value_defined_cost_fn: Option<&V>, + update_tree_value_based_on_costs: &mut U, section_removal_bytes: &mut R, - ) -> CostContext, KeyUpdates), Error>> + ) -> CostContext, KeyUpdates), Error>> where C: Fn(&Vec, &Vec) -> Result, + V: Fn(&[u8]) -> Option, + U: FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result<(bool, Option), Error>, R: FnMut(&Vec, u32, u32) -> Result<(StorageRemovedBytes, StorageRemovedBytes), Error>, { let mut cost = OperationCost::default(); @@ -188,28 +203,40 @@ where } else { match maybe_tree { None => { - return Self::build(batch, source, old_tree_cost, section_removal_bytes).map_ok( - |tree| { - let new_keys: BTreeSet> = batch - .iter() - .map(|batch_entry| batch_entry.0.as_ref().to_vec()) - .collect(); - ( - tree, - KeyUpdates::new( - new_keys, - BTreeSet::default(), - LinkedList::default(), - None, - ), - ) - }, + return Self::build( + batch, + source, + old_tree_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, ) + .map_ok(|tree| { + let new_keys: BTreeSet> = batch + .iter() + .map(|batch_entry| batch_entry.0.as_ref().to_vec()) + .collect(); + ( + tree, + KeyUpdates::new( + new_keys, + BTreeSet::default(), + LinkedList::default(), + None, + ), + ) + }) } Some(tree) => { cost_return_on_error!( &mut cost, - tree.apply_sorted(batch, old_tree_cost, section_removal_bytes) + tree.apply_sorted( + batch, + old_tree_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes + ) ) } } @@ -222,14 +249,22 @@ where /// Builds a `Tree` from a batch of operations. /// /// Keys in batch must be sorted and unique. - fn build, C, R>( + fn build, C, V, U, R>( batch: &MerkBatch, source: S, old_tree_cost: &C, + value_defined_cost_fn: Option<&V>, + update_tree_value_based_on_costs: &mut U, section_removal_bytes: &mut R, - ) -> CostResult, Error> + ) -> CostResult, Error> where C: Fn(&Vec, &Vec) -> Result, + V: Fn(&[u8]) -> Option, + U: FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result<(bool, Option), Error>, R: FnMut(&Vec, u32, u32) -> Result<(StorageRemovedBytes, StorageRemovedBytes), Error>, { let mut cost = OperationCost::default(); @@ -251,6 +286,8 @@ where left_batch, source.clone(), old_tree_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, section_removal_bytes ) ) @@ -259,7 +296,13 @@ where Some(tree) => { cost_return_on_error!( &mut cost, - tree.apply_sorted(right_batch, old_tree_cost, section_removal_bytes) + tree.apply_sorted( + right_batch, + old_tree_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes + ) ) .0 } @@ -269,6 +312,8 @@ where right_batch, source.clone(), old_tree_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, section_removal_bytes ) ) @@ -286,21 +331,21 @@ where // TODO: take from batch so we don't have to clone let mid_tree = match mid_op { - Put(..) => Tree::new( + Put(..) => TreeNode::new( mid_key.as_ref().to_vec(), mid_value.to_vec(), None, mid_feature_type.to_owned(), ) .unwrap_add_cost(&mut cost), - PutWithSpecializedCost(_, value_cost, _) => Tree::new( + PutWithSpecializedCost(_, value_cost, _) => TreeNode::new( mid_key.as_ref().to_vec(), mid_value.to_vec(), Some(SpecializedValueDefinedCost(*value_cost)), mid_feature_type.to_owned(), ) .unwrap_add_cost(&mut cost), - PutCombinedReference(_, referenced_value, _) => Tree::new_with_combined_value_hash( + PutCombinedReference(_, referenced_value, _) => TreeNode::new_with_combined_value_hash( mid_key.as_ref().to_vec(), mid_value, referenced_value.to_owned(), @@ -309,7 +354,7 @@ where .unwrap_add_cost(&mut cost), PutLayeredReference(_, value_cost, referenced_value, _) | ReplaceLayeredReference(_, value_cost, referenced_value, _) => { - Tree::new_with_layered_value_hash( + TreeNode::new_with_layered_value_hash( mid_key.as_ref().to_vec(), mid_value, *value_cost, @@ -338,6 +383,8 @@ where None ), old_tree_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, section_removal_bytes, ) ) @@ -354,6 +401,8 @@ where self.apply_sorted( batch, &|_, _| Ok(0), + None::<&fn(&[u8]) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), @@ -367,14 +416,22 @@ where /// `Walker::apply`_to, but requires a populated tree. /// /// Keys in batch must be sorted and unique. - fn apply_sorted, C, R>( + fn apply_sorted, C, V, U, R>( self, batch: &MerkBatch, old_specialized_cost: &C, + value_defined_cost_fn: Option<&V>, + update_tree_value_based_on_costs: &mut U, section_removal_bytes: &mut R, ) -> CostResult<(Option, KeyUpdates), Error> where C: Fn(&Vec, &Vec) -> Result, + V: Fn(&[u8]) -> Option, + U: FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result<(bool, Option), Error>, R: FnMut(&Vec, u32, u32) -> Result<(StorageRemovedBytes, StorageRemovedBytes), Error>, { let mut cost = OperationCost::default(); @@ -390,33 +447,64 @@ where // a key matches this node's key, apply op to this node match op { // TODO: take vec from batch so we don't need to clone - Put(value, feature_type) => self - .put_value(value.to_vec(), feature_type.to_owned()) - .unwrap_add_cost(&mut cost), - PutWithSpecializedCost(value, value_cost, feature_type) => self - .put_value_with_fixed_cost(value.to_vec(), *value_cost, feature_type.to_owned()) - .unwrap_add_cost(&mut cost), - PutCombinedReference(value, referenced_value, feature_type) => self - .put_value_and_reference_value_hash( - value.to_vec(), - referenced_value.to_owned(), - feature_type.to_owned(), + Put(value, feature_type) => { + cost_return_on_error!( + &mut cost, + self.put_value( + value.to_vec(), + feature_type.to_owned(), + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) ) - .unwrap_add_cost(&mut cost), + } + + PutWithSpecializedCost(value, value_cost, feature_type) => { + cost_return_on_error!( + &mut cost, + self.put_value_with_fixed_cost( + value.to_vec(), + *value_cost, + feature_type.to_owned(), + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + ) + } + PutCombinedReference(value, referenced_value, feature_type) => { + cost_return_on_error!( + &mut cost, + self.put_value_and_reference_value_hash( + value.to_vec(), + referenced_value.to_owned(), + feature_type.to_owned(), + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes, + ) + ) + } PutLayeredReference(value, value_cost, referenced_value, feature_type) | ReplaceLayeredReference(value, value_cost, referenced_value, feature_type) => { - self.put_value_with_reference_value_hash_and_value_cost( - value.to_vec(), - referenced_value.to_owned(), - *value_cost, - feature_type.to_owned(), + cost_return_on_error!( + &mut cost, + self.put_value_with_reference_value_hash_and_value_cost( + value.to_vec(), + referenced_value.to_owned(), + *value_cost, + feature_type.to_owned(), + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes, + ) ) - .unwrap_add_cost(&mut cost) } Delete | DeleteLayered | DeleteLayeredMaybeSpecialized | DeleteMaybeSpecialized => { // TODO: we shouldn't have to do this as 2 different calls to apply let source = self.clone_source(); - let wrap = |maybe_tree: Option| { + let wrap = |maybe_tree: Option| { maybe_tree.map(|tree| Self::new(tree, source.clone())) }; let key = self.tree().key().to_vec(); @@ -441,7 +529,7 @@ where &cost, section_removal_bytes(value, total_key_len, old_cost) ); - let deletion_cost = Some(KeyValueStorageCost { + let deletion_cost = KeyValueStorageCost { key_storage_cost: StorageCost { added_bytes: 0, replaced_bytes: 0, @@ -454,9 +542,10 @@ where }, new_node: false, needs_value_verification: false, - }); + }; - let maybe_tree = cost_return_on_error!(&mut cost, self.remove()); + let maybe_tree = + cost_return_on_error!(&mut cost, self.remove(value_defined_cost_fn)); #[rustfmt::skip] let (maybe_tree, mut key_updates) @@ -467,6 +556,8 @@ where &batch[..index], source.clone(), old_specialized_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, section_removal_bytes ) ); @@ -479,6 +570,8 @@ where &batch[index + 1..], source.clone(), old_specialized_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, section_removal_bytes ) ); @@ -520,6 +613,8 @@ where exclusive, KeyUpdates::new(new_keys, updated_keys, LinkedList::default(), None), old_specialized_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, section_removal_bytes, ) .add_cost(cost) @@ -530,17 +625,25 @@ where /// /// This recursion executes serially in the same thread, but in the future /// will be dispatched to workers in other threads. - fn recurse, C, R>( + fn recurse, C, V, U, R>( self, batch: &MerkBatch, mid: usize, exclusive: bool, mut key_updates: KeyUpdates, old_tree_cost: &C, + value_defined_cost_fn: Option<&V>, + update_tree_value_based_on_costs: &mut U, section_removal_bytes: &mut R, ) -> CostResult<(Option, KeyUpdates), Error> where C: Fn(&Vec, &Vec) -> Result, + U: FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result<(bool, Option), Error>, + V: Fn(&[u8]) -> Option, R: FnMut(&Vec, u32, u32) -> Result<(StorageRemovedBytes, StorageRemovedBytes), Error>, { let mut cost = OperationCost::default(); @@ -558,25 +661,31 @@ where let source = self.clone_source(); cost_return_on_error!( &mut cost, - self.walk(true, |maybe_left| { - Self::apply_to( - maybe_left, - left_batch, - source, - old_tree_cost, - section_removal_bytes, - ) - .map_ok(|(maybe_left, mut key_updates_left)| { - key_updates.new_keys.append(&mut key_updates_left.new_keys); - key_updates - .updated_keys - .append(&mut key_updates_left.updated_keys); - key_updates - .deleted_keys - .append(&mut key_updates_left.deleted_keys); - maybe_left - }) - }) + self.walk( + true, + |maybe_left| { + Self::apply_to( + maybe_left, + left_batch, + source, + old_tree_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + ) + .map_ok(|(maybe_left, mut key_updates_left)| { + key_updates.new_keys.append(&mut key_updates_left.new_keys); + key_updates + .updated_keys + .append(&mut key_updates_left.updated_keys); + key_updates + .deleted_keys + .append(&mut key_updates_left.deleted_keys); + maybe_left + }) + }, + value_defined_cost_fn + ) ) } else { self @@ -586,31 +695,37 @@ where let source = tree.clone_source(); cost_return_on_error!( &mut cost, - tree.walk(false, |maybe_right| { - Self::apply_to( - maybe_right, - right_batch, - source, - old_tree_cost, - section_removal_bytes, - ) - .map_ok(|(maybe_right, mut key_updates_right)| { - key_updates.new_keys.append(&mut key_updates_right.new_keys); - key_updates - .updated_keys - .append(&mut key_updates_right.updated_keys); - key_updates - .deleted_keys - .append(&mut key_updates_right.deleted_keys); - maybe_right - }) - }) + tree.walk( + false, + |maybe_right| { + Self::apply_to( + maybe_right, + right_batch, + source, + old_tree_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + ) + .map_ok(|(maybe_right, mut key_updates_right)| { + key_updates.new_keys.append(&mut key_updates_right.new_keys); + key_updates + .updated_keys + .append(&mut key_updates_right.updated_keys); + key_updates + .deleted_keys + .append(&mut key_updates_right.deleted_keys); + maybe_right + }) + }, + value_defined_cost_fn + ) ) } else { tree }; - let tree = cost_return_on_error!(&mut cost, tree.maybe_balance()); + let tree = cost_return_on_error!(&mut cost, tree.maybe_balance(value_defined_cost_fn)); let new_root_key = tree.tree().key(); @@ -633,7 +748,10 @@ where /// Checks if the tree is unbalanced and if so, applies AVL tree rotation(s) /// to rebalance the tree and its subtrees. Returns the root node of the /// balanced tree after applying the rotations. - fn maybe_balance(self) -> CostResult { + fn maybe_balance(self, value_defined_cost_fn: Option<&V>) -> CostResult + where + V: Fn(&[u8]) -> Option, + { let mut cost = OperationCost::default(); let balance_factor = self.balance_factor(); @@ -647,37 +765,55 @@ where let tree = if left == (self.tree().link(left).unwrap().balance_factor() > 0) { cost_return_on_error!( &mut cost, - self.walk_expect(left, |child| child.rotate(!left).map_ok(Option::Some)) + self.walk_expect( + left, + |child| child + .rotate(!left, value_defined_cost_fn) + .map_ok(Option::Some), + value_defined_cost_fn + ) ) } else { self }; - let rotate = tree.rotate(left).unwrap_add_cost(&mut cost); + let rotate = tree + .rotate(left, value_defined_cost_fn) + .unwrap_add_cost(&mut cost); rotate.wrap_with_cost(cost) } /// Applies an AVL tree rotation, a constant-time operation which only needs /// to swap pointers in order to rebalance a tree. - fn rotate(self, left: bool) -> CostResult { + fn rotate(self, left: bool, value_defined_cost_fn: Option<&V>) -> CostResult + where + V: Fn(&[u8]) -> Option, + { let mut cost = OperationCost::default(); - let (tree, child) = cost_return_on_error!(&mut cost, self.detach_expect(left)); - let (child, maybe_grandchild) = cost_return_on_error!(&mut cost, child.detach(!left)); + let (tree, child) = + cost_return_on_error!(&mut cost, self.detach_expect(left, value_defined_cost_fn)); + let (child, maybe_grandchild) = + cost_return_on_error!(&mut cost, child.detach(!left, value_defined_cost_fn)); // attach grandchild to self tree.attach(left, maybe_grandchild) - .maybe_balance() + .maybe_balance(value_defined_cost_fn) .flat_map_ok(|tree| { // attach self to child, return child - child.attach(!left, Some(tree)).maybe_balance() + child + .attach(!left, Some(tree)) + .maybe_balance(value_defined_cost_fn) }) .add_cost(cost) } /// Removes the root node from the tree. Rearranges and rebalances /// descendants (if any) in order to maintain a valid tree. - pub fn remove(self) -> CostResult, Error> { + pub fn remove(self, value_defined_cost_fn: Option<&V>) -> CostResult, Error> + where + V: Fn(&[u8]) -> Option, + { let mut cost = OperationCost::default(); let tree = self.tree(); @@ -687,14 +823,20 @@ where let maybe_tree = if has_left && has_right { // two children, promote edge of taller child - let (tree, tall_child) = cost_return_on_error!(&mut cost, self.detach_expect(left)); - let (_, short_child) = cost_return_on_error!(&mut cost, tree.detach_expect(!left)); - let promoted = - cost_return_on_error!(&mut cost, tall_child.promote_edge(!left, short_child)); + let (tree, tall_child) = + cost_return_on_error!(&mut cost, self.detach_expect(left, value_defined_cost_fn)); + let (_, short_child) = + cost_return_on_error!(&mut cost, tree.detach_expect(!left, value_defined_cost_fn)); + let promoted = cost_return_on_error!( + &mut cost, + tall_child.promote_edge(!left, short_child, value_defined_cost_fn) + ); Some(promoted) } else if has_left || has_right { // single child, promote it - Some(cost_return_on_error!(&mut cost, self.detach_expect(left)).1) + Some( + cost_return_on_error!(&mut cost, self.detach_expect(left, value_defined_cost_fn)).1, + ) } else { // no child None @@ -707,31 +849,49 @@ where /// reattaches it at the top in order to fill in a gap when removing a root /// node from a tree with both left and right children. Attaches `attach` on /// the opposite side. Returns the promoted node. - fn promote_edge(self, left: bool, attach: Self) -> CostResult { - self.remove_edge(left).flat_map_ok(|(edge, maybe_child)| { - edge.attach(!left, maybe_child) - .attach(left, Some(attach)) - .maybe_balance() - }) + fn promote_edge( + self, + left: bool, + attach: Self, + value_defined_cost_fn: Option<&V>, + ) -> CostResult + where + V: Fn(&[u8]) -> Option, + { + self.remove_edge(left, value_defined_cost_fn) + .flat_map_ok(|(edge, maybe_child)| { + edge.attach(!left, maybe_child) + .attach(left, Some(attach)) + .maybe_balance(value_defined_cost_fn) + }) } /// Traverses to the tree's edge on the given side and detaches it /// (reattaching its child, if any, to its former parent). Return value is /// `(edge, maybe_updated_tree)`. - fn remove_edge(self, left: bool) -> CostResult<(Self, Option), Error> { + fn remove_edge( + self, + left: bool, + value_defined_cost_fn: Option<&V>, + ) -> CostResult<(Self, Option), Error> + where + V: Fn(&[u8]) -> Option, + { let mut cost = OperationCost::default(); if self.tree().link(left).is_some() { // this node is not the edge, recurse - let (tree, child) = cost_return_on_error!(&mut cost, self.detach_expect(left)); - let (edge, maybe_child) = cost_return_on_error!(&mut cost, child.remove_edge(left)); + let (tree, child) = + cost_return_on_error!(&mut cost, self.detach_expect(left, value_defined_cost_fn)); + let (edge, maybe_child) = + cost_return_on_error!(&mut cost, child.remove_edge(left, value_defined_cost_fn)); tree.attach(left, maybe_child) - .maybe_balance() + .maybe_balance(value_defined_cost_fn) .map_ok(|tree| (edge, Some(tree))) .add_cost(cost) } else { // this node is the edge, detach its child if present - self.detach(!left) + self.detach(!left, value_defined_cost_fn) } } } @@ -742,13 +902,13 @@ mod test { use super::*; use crate::{ test_utils::{apply_memonly, assert_tree_invariants, del_entry, make_tree_seq, seq_key}, - tree::{tree_feature_type::TreeFeatureType::BasicMerk, *}, + tree::{tree_feature_type::TreeFeatureType::BasicMerkNode, *}, }; #[test] fn simple_insert() { - let batch = [(b"foo2".to_vec(), Op::Put(b"bar2".to_vec(), BasicMerk))]; - let tree = Tree::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerk).unwrap(); + let batch = [(b"foo2".to_vec(), Op::Put(b"bar2".to_vec(), BasicMerkNode))]; + let tree = TreeNode::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerkNode).unwrap(); let (maybe_walker, key_updates) = Walker::new(tree, PanicSource {}) .apply_sorted_without_costs(&batch) .unwrap() @@ -763,8 +923,8 @@ mod test { #[test] fn simple_update() { - let batch = [(b"foo".to_vec(), Op::Put(b"bar2".to_vec(), BasicMerk))]; - let tree = Tree::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerk).unwrap(); + let batch = [(b"foo".to_vec(), Op::Put(b"bar2".to_vec(), BasicMerkNode))]; + let tree = TreeNode::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerkNode).unwrap(); let (maybe_walker, key_updates) = Walker::new(tree, PanicSource {}) .apply_sorted_without_costs(&batch) .unwrap() @@ -781,7 +941,7 @@ mod test { #[test] fn simple_delete() { let batch = [(b"foo2".to_vec(), Op::Delete)]; - let tree = Tree::from_fields( + let tree = TreeNode::from_fields( b"foo".to_vec(), b"bar".to_vec(), [123; 32], @@ -790,9 +950,10 @@ mod test { hash: [123; 32], sum: None, child_heights: (0, 0), - tree: Tree::new(b"foo2".to_vec(), b"bar2".to_vec(), None, BasicMerk).unwrap(), + tree: TreeNode::new(b"foo2".to_vec(), b"bar2".to_vec(), None, BasicMerkNode) + .unwrap(), }), - BasicMerk, + BasicMerkNode, ) .unwrap(); let (maybe_walker, key_updates) = Walker::new(tree, PanicSource {}) @@ -815,7 +976,7 @@ mod test { #[test] fn delete_non_existent() { let batch = [(b"foo2".to_vec(), Op::Delete)]; - let tree = Tree::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerk).unwrap(); + let tree = TreeNode::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerkNode).unwrap(); Walker::new(tree, PanicSource {}) .apply_sorted_without_costs(&batch) .unwrap() @@ -825,7 +986,7 @@ mod test { #[test] fn delete_only_node() { let batch = [(b"foo".to_vec(), Op::Delete)]; - let tree = Tree::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerk).unwrap(); + let tree = TreeNode::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerkNode).unwrap(); let (maybe_walker, key_updates) = Walker::new(tree, PanicSource {}) .apply_sorted_without_costs(&batch) .unwrap() @@ -895,11 +1056,13 @@ mod test { #[test] fn apply_empty_none() { - let (maybe_tree, key_updates) = Walker::::apply_to::, _, _>( + let (maybe_tree, key_updates) = Walker::::apply_to::, _, _, _, _>( None, &[], PanicSource {}, &|_, _| Ok(0), + None::<&fn(&[u8]) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), @@ -916,12 +1079,14 @@ mod test { #[test] fn insert_empty_single() { - let batch = vec![(vec![0], Op::Put(vec![1], BasicMerk))]; + let batch = vec![(vec![0], Op::Put(vec![1], BasicMerkNode))]; let (maybe_tree, key_updates) = Walker::::apply_to( None, &batch, PanicSource {}, &|_, _| Ok(0), + None::<&fn(&[u8]) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), @@ -941,12 +1106,14 @@ mod test { #[test] fn insert_updated_single() { - let batch = vec![(vec![0], Op::Put(vec![1], BasicMerk))]; + let batch = vec![(vec![0], Op::Put(vec![1], BasicMerkNode))]; let (maybe_tree, key_updates) = Walker::::apply_to( None, &batch, PanicSource {}, &|_, _| Ok(0), + None::<&fn(&[u8]) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), @@ -961,14 +1128,16 @@ mod test { let maybe_walker = maybe_tree.map(|tree| Walker::::new(tree, PanicSource {})); let batch = vec![ - (vec![0], Op::Put(vec![2], BasicMerk)), - (vec![1], Op::Put(vec![2], BasicMerk)), + (vec![0], Op::Put(vec![2], BasicMerkNode)), + (vec![1], Op::Put(vec![2], BasicMerkNode)), ]; let (maybe_tree, key_updates) = Walker::::apply_to( maybe_walker, &batch, PanicSource {}, &|_, _| Ok(0), + None::<&fn(&[u8]) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), @@ -988,15 +1157,17 @@ mod test { #[test] fn insert_updated_multiple() { let batch = vec![ - (vec![0], Op::Put(vec![1], BasicMerk)), - (vec![1], Op::Put(vec![2], BasicMerk)), - (vec![2], Op::Put(vec![3], BasicMerk)), + (vec![0], Op::Put(vec![1], BasicMerkNode)), + (vec![1], Op::Put(vec![2], BasicMerkNode)), + (vec![2], Op::Put(vec![3], BasicMerkNode)), ]; let (maybe_tree, key_updates) = Walker::::apply_to( None, &batch, PanicSource {}, &|_, _| Ok(0), + None::<&fn(&[u8]) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), @@ -1011,8 +1182,8 @@ mod test { let maybe_walker = maybe_tree.map(|tree| Walker::::new(tree, PanicSource {})); let batch = vec![ - (vec![0], Op::Put(vec![5], BasicMerk)), - (vec![1], Op::Put(vec![8], BasicMerk)), + (vec![0], Op::Put(vec![5], BasicMerkNode)), + (vec![1], Op::Put(vec![8], BasicMerkNode)), (vec![2], Op::Delete), ]; let (maybe_tree, key_updates) = Walker::::apply_to( @@ -1020,6 +1191,8 @@ mod test { &batch, PanicSource {}, &|_, _| Ok(0), + None::<&fn(&[u8]) -> Option>, + &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( BasicStorageRemoval(key_bytes_to_remove), @@ -1039,8 +1212,8 @@ mod test { #[test] fn insert_root_single() { - let tree = Tree::new(vec![5], vec![123], None, BasicMerk).unwrap(); - let batch = vec![(vec![6], Op::Put(vec![123], BasicMerk))]; + let tree = TreeNode::new(vec![5], vec![123], None, BasicMerkNode).unwrap(); + let batch = vec![(vec![6], Op::Put(vec![123], BasicMerkNode))]; let tree = apply_memonly(tree, &batch); assert_eq!(tree.key(), &[5]); assert!(tree.child(true).is_none()); @@ -1049,10 +1222,10 @@ mod test { #[test] fn insert_root_double() { - let tree = Tree::new(vec![5], vec![123], None, BasicMerk).unwrap(); + let tree = TreeNode::new(vec![5], vec![123], None, BasicMerkNode).unwrap(); let batch = vec![ - (vec![4], Op::Put(vec![123], BasicMerk)), - (vec![6], Op::Put(vec![123], BasicMerk)), + (vec![4], Op::Put(vec![123], BasicMerkNode)), + (vec![6], Op::Put(vec![123], BasicMerkNode)), ]; let tree = apply_memonly(tree, &batch); assert_eq!(tree.key(), &[5]); @@ -1062,12 +1235,12 @@ mod test { #[test] fn insert_rebalance() { - let tree = Tree::new(vec![5], vec![123], None, BasicMerk).unwrap(); + let tree = TreeNode::new(vec![5], vec![123], None, BasicMerkNode).unwrap(); - let batch = vec![(vec![6], Op::Put(vec![123], BasicMerk))]; + let batch = vec![(vec![6], Op::Put(vec![123], BasicMerkNode))]; let tree = apply_memonly(tree, &batch); - let batch = vec![(vec![7], Op::Put(vec![123], BasicMerk))]; + let batch = vec![(vec![7], Op::Put(vec![123], BasicMerkNode))]; let tree = apply_memonly(tree, &batch); assert_eq!(tree.key(), &[6]); @@ -1077,10 +1250,10 @@ mod test { #[test] fn insert_100_sequential() { - let mut tree = Tree::new(vec![0], vec![123], None, BasicMerk).unwrap(); + let mut tree = TreeNode::new(vec![0], vec![123], None, BasicMerkNode).unwrap(); for i in 0..100 { - let batch = vec![(vec![i + 1], Op::Put(vec![123], BasicMerk))]; + let batch = vec![(vec![i + 1], Op::Put(vec![123], BasicMerkNode))]; tree = apply_memonly(tree, &batch); } diff --git a/merk/src/tree/tree_feature_type.rs b/merk/src/tree/tree_feature_type.rs index e99ca310..c1fceed3 100644 --- a/merk/src/tree/tree_feature_type.rs +++ b/merk/src/tree/tree_feature_type.rs @@ -39,16 +39,16 @@ use ed::{Decode, Encode}; use integer_encoding::{VarInt, VarIntReader, VarIntWriter}; #[cfg(any(feature = "full", feature = "verify"))] -use crate::tree::tree_feature_type::TreeFeatureType::{BasicMerk, SummedMerk}; +use crate::tree::tree_feature_type::TreeFeatureType::{BasicMerkNode, SummedMerkNode}; #[cfg(any(feature = "full", feature = "verify"))] #[derive(Copy, Clone, PartialEq, Eq, Debug)] /// Basic or summed pub enum TreeFeatureType { - /// Basic Merk - BasicMerk, - /// Summed Merk - SummedMerk(i64), + /// Basic Merk Tree Node + BasicMerkNode, + /// Summed Merk Tree Node + SummedMerkNode(i64), } #[cfg(feature = "full")] @@ -57,23 +57,23 @@ impl TreeFeatureType { /// Get length of encoded SummedMerk pub fn sum_length(&self) -> Option { match self { - BasicMerk => None, - SummedMerk(m) => Some(m.encode_var_vec().len() as u32), + BasicMerkNode => None, + SummedMerkNode(m) => Some(m.encode_var_vec().len() as u32), } } #[inline] /// Is sum feature? pub fn is_sum_feature(&self) -> bool { - matches!(self, SummedMerk(_)) + matches!(self, SummedMerkNode(_)) } #[inline] /// Get encoding cost of self pub(crate) fn encoding_cost(&self) -> usize { match self { - BasicMerk => 1, - SummedMerk(_sum) => 9, + BasicMerkNode => 1, + SummedMerkNode(_sum) => 9, } } } @@ -85,11 +85,11 @@ impl Encode for TreeFeatureType { #[inline] fn encode_into(&self, dest: &mut W) -> ed::Result<()> { match self { - BasicMerk => { + BasicMerkNode => { dest.write_all(&[0])?; Ok(()) } - SummedMerk(sum) => { + SummedMerkNode(sum) => { dest.write_all(&[1])?; dest.write_varint(sum.to_owned())?; Ok(()) @@ -100,8 +100,8 @@ impl Encode for TreeFeatureType { #[inline] fn encoding_length(&self) -> ed::Result { match self { - BasicMerk => Ok(1), - SummedMerk(sum) => { + BasicMerkNode => Ok(1), + SummedMerkNode(sum) => { let encoded_sum = sum.encode_var_vec(); // 1 for the enum type // encoded_sum.len() for the length of the encoded vector @@ -118,10 +118,10 @@ impl Decode for TreeFeatureType { let mut feature_type: [u8; 1] = [0]; input.read_exact(&mut feature_type)?; match feature_type { - [0] => Ok(BasicMerk), + [0] => Ok(BasicMerkNode), [1] => { let encoded_sum: i64 = input.read_varint()?; - Ok(SummedMerk(encoded_sum)) + Ok(SummedMerkNode(encoded_sum)) } _ => Err(ed::Error::UnexpectedByte(55)), } diff --git a/merk/src/tree/walk/fetch.rs b/merk/src/tree/walk/fetch.rs index 94a083af..08b66d99 100644 --- a/merk/src/tree/walk/fetch.rs +++ b/merk/src/tree/walk/fetch.rs @@ -32,9 +32,11 @@ use grovedb_costs::CostResult; #[cfg(feature = "full")] -use super::super::{Link, Tree}; +use super::super::{Link, TreeNode}; #[cfg(feature = "full")] use crate::error::Error; +#[cfg(feature = "full")] +use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] /// A source of data to be used by the tree when encountering a pruned node. @@ -43,5 +45,9 @@ use crate::error::Error; pub trait Fetch { /// Called when the tree needs to fetch a node with the given `Link`. The /// `link` value will always be a `Link::Reference` variant. - fn fetch(&self, link: &Link) -> CostResult; + fn fetch( + &self, + link: &Link, + value_defined_cost_fn: Option<&impl Fn(&[u8]) -> Option>, + ) -> CostResult; } diff --git a/merk/src/tree/walk/mod.rs b/merk/src/tree/walk/mod.rs index 9cc5bb16..e5401814 100644 --- a/merk/src/tree/walk/mod.rs +++ b/merk/src/tree/walk/mod.rs @@ -36,12 +36,17 @@ mod ref_walker; #[cfg(feature = "full")] pub use fetch::Fetch; #[cfg(feature = "full")] -use grovedb_costs::{cost_return_on_error, CostContext, CostResult, CostsExt, OperationCost}; +use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; +use grovedb_costs::{ + cost_return_on_error_no_add, + storage_cost::{removal::StorageRemovedBytes, StorageCost}, +}; #[cfg(feature = "full")] pub use ref_walker::RefWalker; #[cfg(feature = "full")] -use super::{Link, Tree}; +use super::{Link, TreeNode}; +use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] use crate::{owner::Owner, tree::tree_feature_type::TreeFeatureType, CryptoHash, Error}; @@ -52,7 +57,7 @@ pub struct Walker where S: Fetch + Sized + Clone, { - tree: Owner, + tree: Owner, source: S, } @@ -62,7 +67,7 @@ where S: Fetch + Sized + Clone, { /// Creates a `Walker` with the given tree and source. - pub fn new(tree: Tree, source: S) -> Self { + pub fn new(tree: TreeNode, source: S) -> Self { Self { tree: Owner::new(tree), source, @@ -72,7 +77,14 @@ where /// Similar to `Tree#detach`, but yields a `Walker` which fetches from the /// same source as `self`. Returned tuple is `(updated_self, /// maybe_child_walker)`. - pub fn detach(mut self, left: bool) -> CostResult<(Self, Option), Error> { + pub fn detach( + mut self, + left: bool, + value_defined_cost_fn: Option<&V>, + ) -> CostResult<(Self, Option), Error> + where + V: Fn(&[u8]) -> Option, + { let mut cost = OperationCost::default(); let link = match self.tree.link(left) { @@ -91,7 +103,10 @@ where Some(Link::Reference { .. }) => (), _ => unreachable!("Expected Some(Link::Reference)"), } - cost_return_on_error!(&mut cost, self.source.fetch(&link.unwrap())) + cost_return_on_error!( + &mut cost, + self.source.fetch(&link.unwrap(), value_defined_cost_fn) + ) }; let child = self.wrap(child); @@ -101,29 +116,44 @@ where /// Similar to `Tree#detach_expect`, but yields a `Walker` which fetches /// from the same source as `self`. Returned tuple is `(updated_self, /// child_walker)`. - pub fn detach_expect(self, left: bool) -> CostResult<(Self, Self), Error> { - self.detach(left).map_ok(|(walker, maybe_child)| { - if let Some(child) = maybe_child { - (walker, child) - } else { - panic!( - "Expected {} child, got None", - if left { "left" } else { "right" } - ); - } - }) + pub fn detach_expect( + self, + left: bool, + value_defined_cost_fn: Option<&V>, + ) -> CostResult<(Self, Self), Error> + where + V: Fn(&[u8]) -> Option, + { + self.detach(left, value_defined_cost_fn) + .map_ok(|(walker, maybe_child)| { + if let Some(child) = maybe_child { + (walker, child) + } else { + panic!( + "Expected {} child, got None", + if left { "left" } else { "right" } + ); + } + }) } /// Similar to `Tree#walk`, but yields a `Walker` which fetches from the /// same source as `self`. - pub fn walk(self, left: bool, f: F) -> CostResult + pub fn walk( + self, + left: bool, + f: F, + value_defined_cost_fn: Option<&V>, + ) -> CostResult where F: FnOnce(Option) -> CostResult, Error>, - T: Into, + T: Into, + V: Fn(&[u8]) -> Option, { let mut cost = OperationCost::default(); - let (mut walker, maybe_child) = cost_return_on_error!(&mut cost, self.detach(left)); + let (mut walker, maybe_child) = + cost_return_on_error!(&mut cost, self.detach(left, value_defined_cost_fn)); let new_child = match f(maybe_child).unwrap_add_cost(&mut cost) { Ok(x) => x.map(|t| t.into()), Err(e) => return Err(e).wrap_with_cost(cost), @@ -134,14 +164,21 @@ where /// Similar to `Tree#walk_expect` but yields a `Walker` which fetches from /// the same source as `self`. - pub fn walk_expect(self, left: bool, f: F) -> CostResult + pub fn walk_expect( + self, + left: bool, + f: F, + value_defined_cost_fn: Option<&V>, + ) -> CostResult where F: FnOnce(Self) -> CostResult, Error>, - T: Into, + T: Into, + V: Fn(&[u8]) -> Option, { let mut cost = OperationCost::default(); - let (mut walker, child) = cost_return_on_error!(&mut cost, self.detach_expect(left)); + let (mut walker, child) = + cost_return_on_error!(&mut cost, self.detach_expect(left, value_defined_cost_fn)); let new_child = match f(child).unwrap_add_cost(&mut cost) { Ok(x) => x.map(|t| t.into()), Err(e) => return Err(e).wrap_with_cost(cost), @@ -151,18 +188,18 @@ where } /// Returns an immutable reference to the `Tree` wrapped by this walker. - pub fn tree(&self) -> &Tree { + pub fn tree(&self) -> &TreeNode { &self.tree } /// Consumes the `Walker` and returns the `Tree` it wraps. - pub fn into_inner(self) -> Tree { + pub fn into_inner(self) -> TreeNode { self.tree.into_inner() } /// Takes a `Tree` and returns a `Walker` which fetches from the same source /// as `self`. - fn wrap(&self, tree: Tree) -> Self { + fn wrap(&self, tree: TreeNode) -> Self { Self::new(tree, self.source.clone()) } @@ -175,75 +212,180 @@ where /// implements `Into`. pub fn attach(mut self, left: bool, maybe_child: Option) -> Self where - T: Into, + T: Into, { self.tree .own(|t| t.attach(left, maybe_child.map(|t| t.into()))); self } - /// Similar to `Tree#with_value`. - pub fn put_value(mut self, value: Vec, feature_type: TreeFeatureType) -> CostContext { + /// Similar to `Tree#put_value`. + pub fn put_value( + mut self, + value: Vec, + feature_type: TreeFeatureType, + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> CostResult { let mut cost = OperationCost::default(); - self.tree - .own(|t| t.put_value(value, feature_type).unwrap_add_cost(&mut cost)); - self.wrap_with_cost(cost) + cost_return_on_error_no_add!( + &cost, + self.tree.own_result(|t| t + .put_value( + value, + feature_type, + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + .unwrap_add_cost(&mut cost)) + ); + Ok(self).wrap_with_cost(cost) } - /// Similar to `Tree#with_value`. + /// Similar to `Tree#put_value_with_fixed_cost`. pub fn put_value_with_fixed_cost( mut self, value: Vec, value_fixed_cost: u32, feature_type: TreeFeatureType, - ) -> CostContext { + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> CostResult { let mut cost = OperationCost::default(); - self.tree.own(|t| { - t.put_value_with_fixed_cost(value, value_fixed_cost, feature_type) - .unwrap_add_cost(&mut cost) - }); - self.wrap_with_cost(cost) + cost_return_on_error_no_add!( + &cost, + self.tree.own_result(|t| t + .put_value_with_fixed_cost( + value, + value_fixed_cost, + feature_type, + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + .unwrap_add_cost(&mut cost)) + ); + Ok(self).wrap_with_cost(cost) } - /// Similar to `Tree#with_value_and_value_hash`. + /// Similar to `Tree#put_value_and_reference_value_hash`. pub fn put_value_and_reference_value_hash( mut self, value: Vec, value_hash: CryptoHash, feature_type: TreeFeatureType, - ) -> CostContext { + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> CostResult { let mut cost = OperationCost::default(); - self.tree.own(|t| { - t.put_value_and_reference_value_hash(value, value_hash, feature_type) - .unwrap_add_cost(&mut cost) - }); - self.wrap_with_cost(cost) + cost_return_on_error_no_add!( + &cost, + self.tree.own_result(|t| t + .put_value_and_reference_value_hash( + value, + value_hash, + feature_type, + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + .unwrap_add_cost(&mut cost)) + ); + Ok(self).wrap_with_cost(cost) } - /// Similar to `Tree#with_value_and_value_hash`. + /// Similar to `Tree#put_value_with_reference_value_hash_and_value_cost`. pub fn put_value_with_reference_value_hash_and_value_cost( mut self, value: Vec, value_hash: CryptoHash, value_fixed_cost: u32, feature_type: TreeFeatureType, - ) -> CostContext { + old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, + update_tree_value_based_on_costs: &mut impl FnMut( + &StorageCost, + &Vec, + &mut Vec, + ) -> Result< + (bool, Option), + Error, + >, + section_removal_bytes: &mut impl FnMut( + &Vec, + u32, + u32, + ) -> Result< + (StorageRemovedBytes, StorageRemovedBytes), + Error, + >, + ) -> CostResult { let mut cost = OperationCost::default(); - self.tree.own(|t| { - t.put_value_with_reference_value_hash_and_value_cost( - value, - value_hash, - value_fixed_cost, - feature_type, - ) - .unwrap_add_cost(&mut cost) - }); - self.wrap_with_cost(cost) + cost_return_on_error_no_add!( + &cost, + self.tree.own_result(|t| t + .put_value_with_reference_value_hash_and_value_cost( + value, + value_hash, + value_fixed_cost, + feature_type, + old_specialized_cost, + update_tree_value_based_on_costs, + section_removal_bytes + ) + .unwrap_add_cost(&mut cost)) + ); + Ok(self).wrap_with_cost(cost) } } #[cfg(feature = "full")] -impl From> for Tree +impl From> for TreeNode where S: Fetch + Sized + Clone, { @@ -255,37 +397,45 @@ where #[cfg(feature = "full")] #[cfg(test)] mod test { - use grovedb_costs::{storage_cost::removal::StorageRemovedBytes::NoStorageRemoval, CostsExt}; + use grovedb_costs::CostsExt; use super::{super::NoopCommit, *}; - use crate::tree::{Tree, TreeFeatureType::BasicMerk}; + use crate::tree::{TreeFeatureType::BasicMerkNode, TreeNode}; #[derive(Clone)] struct MockSource {} impl Fetch for MockSource { - fn fetch(&self, link: &Link) -> CostResult { - Tree::new(link.key().to_vec(), b"foo".to_vec(), None, BasicMerk).map(Ok) + fn fetch( + &self, + link: &Link, + _value_defined_cost_fn: Option<&impl Fn(&[u8]) -> Option>, + ) -> CostResult { + TreeNode::new(link.key().to_vec(), b"foo".to_vec(), None, BasicMerkNode).map(Ok) } } #[test] fn walk_modified() { - let tree = Tree::new(b"test".to_vec(), b"abc".to_vec(), None, BasicMerk) + let tree = TreeNode::new(b"test".to_vec(), b"abc".to_vec(), None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerk).unwrap()), + Some(TreeNode::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerkNode).unwrap()), ); let source = MockSource {}; let walker = Walker::new(tree, source); let walker = walker - .walk(true, |child| -> CostResult, Error> { - assert_eq!(child.expect("should have child").tree().key(), b"foo"); - Ok(None).wrap_with_cost(Default::default()) - }) + .walk( + true, + |child| -> CostResult, Error> { + assert_eq!(child.expect("should have child").tree().key(), b"foo"); + Ok(None).wrap_with_cost(Default::default()) + }, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("walk failed"); assert!(walker.into_inner().child(true).is_none()); @@ -293,29 +443,28 @@ mod test { #[test] fn walk_stored() { - let mut tree = Tree::new(b"test".to_vec(), b"abc".to_vec(), None, BasicMerk) + let mut tree = TreeNode::new(b"test".to_vec(), b"abc".to_vec(), None, BasicMerkNode) .unwrap() .attach( true, - Some(Tree::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerk).unwrap()), + Some(TreeNode::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerkNode).unwrap()), ); - tree.commit( - &mut NoopCommit {}, - &|_, _| Ok(0), - &mut |_, _, _| Ok((false, None)), - &mut |_, _, _| Ok((NoStorageRemoval, NoStorageRemoval)), - ) - .unwrap() - .expect("commit failed"); + tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) + .unwrap() + .expect("commit failed"); let source = MockSource {}; let walker = Walker::new(tree, source); let walker = walker - .walk(true, |child| -> CostResult, Error> { - assert_eq!(child.expect("should have child").tree().key(), b"foo"); - Ok(None).wrap_with_cost(Default::default()) - }) + .walk( + true, + |child| -> CostResult, Error> { + assert_eq!(child.expect("should have child").tree().key(), b"foo"); + Ok(None).wrap_with_cost(Default::default()) + }, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("walk failed"); assert!(walker.into_inner().child(true).is_none()); @@ -323,7 +472,7 @@ mod test { #[test] fn walk_pruned() { - let tree = Tree::from_fields( + let tree = TreeNode::from_fields( b"test".to_vec(), b"abc".to_vec(), Default::default(), @@ -334,7 +483,7 @@ mod test { sum: None, }), None, - BasicMerk, + BasicMerkNode, ) .unwrap(); @@ -342,10 +491,14 @@ mod test { let walker = Walker::new(tree, source); let walker = walker - .walk_expect(true, |child| -> CostResult, Error> { - assert_eq!(child.tree().key(), b"foo"); - Ok(None).wrap_with_cost(Default::default()) - }) + .walk_expect( + true, + |child| -> CostResult, Error> { + assert_eq!(child.tree().key(), b"foo"); + Ok(None).wrap_with_cost(Default::default()) + }, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("walk failed"); assert!(walker.into_inner().child(true).is_none()); @@ -353,16 +506,20 @@ mod test { #[test] fn walk_none() { - let tree = Tree::new(b"test".to_vec(), b"abc".to_vec(), None, BasicMerk).unwrap(); + let tree = TreeNode::new(b"test".to_vec(), b"abc".to_vec(), None, BasicMerkNode).unwrap(); let source = MockSource {}; let walker = Walker::new(tree, source); walker - .walk(true, |child| -> CostResult, Error> { - assert!(child.is_none()); - Ok(None).wrap_with_cost(Default::default()) - }) + .walk( + true, + |child| -> CostResult, Error> { + assert!(child.is_none()); + Ok(None).wrap_with_cost(Default::default()) + }, + None::<&fn(&[u8]) -> Option>, + ) .unwrap() .expect("walk failed"); } diff --git a/merk/src/tree/walk/ref_walker.rs b/merk/src/tree/walk/ref_walker.rs index a6d7e4f0..d9fb1bcd 100644 --- a/merk/src/tree/walk/ref_walker.rs +++ b/merk/src/tree/walk/ref_walker.rs @@ -33,9 +33,10 @@ use grovedb_costs::{CostResult, CostsExt, OperationCost}; #[cfg(feature = "full")] use super::{ - super::{Link, Tree}, + super::{Link, TreeNode}, Fetch, }; +use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] use crate::Error; @@ -50,7 +51,7 @@ pub struct RefWalker<'a, S> where S: Fetch + Sized + Clone, { - tree: &'a mut Tree, + tree: &'a mut TreeNode, source: S, } @@ -60,20 +61,27 @@ where S: Fetch + Sized + Clone, { /// Creates a `RefWalker` with the given tree and source. - pub fn new(tree: &'a mut Tree, source: S) -> Self { + pub fn new(tree: &'a mut TreeNode, source: S) -> Self { // TODO: check if tree has modified links, panic if so RefWalker { tree, source } } /// Gets an immutable reference to the `Tree` wrapped by this `RefWalker`. - pub fn tree(&self) -> &Tree { + pub fn tree(&self) -> &TreeNode { self.tree } /// Traverses to the child on the given side (if any), fetching from the /// source if pruned. When fetching, the link is upgraded from /// `Link::Reference` to `Link::Loaded`. - pub fn walk(&mut self, left: bool) -> CostResult>, Error> { + pub fn walk( + &mut self, + left: bool, + value_defined_cost_fn: Option<&V>, + ) -> CostResult>, Error> + where + V: Fn(&[u8]) -> Option, + { let link = match self.tree.link(left) { None => return Ok(None).wrap_with_cost(Default::default()), Some(link) => link, @@ -84,7 +92,7 @@ where Link::Reference { .. } => { let load_res = self .tree - .load(left, &self.source) + .load(left, &self.source, value_defined_cost_fn) .unwrap_add_cost(&mut cost); if let Err(e) = load_res { return Err(e).wrap_with_cost(cost); diff --git a/merk/src/visualize.rs b/merk/src/visualize.rs index da0bec44..4b3b2fb7 100644 --- a/merk/src/visualize.rs +++ b/merk/src/visualize.rs @@ -33,7 +33,7 @@ use std::io::{Result, Write}; use grovedb_storage::StorageContext; use grovedb_visualize::{Drawer, Visualize}; -use crate::{tree::Tree, Merk}; +use crate::{tree::TreeNode, Merk}; /// Visualizeable Merk pub struct VisualizeableMerk<'a, S, F> { @@ -52,12 +52,12 @@ impl<'a, S, F> VisualizeableMerk<'a, S, F> { } struct VisualizableTree<'a, F> { - tree: &'a Tree, + tree: &'a TreeNode, deserialize_fn: F, } impl<'a, F> VisualizableTree<'a, F> { - fn new(tree: &'a Tree, deserialize_fn: F) -> Self { + fn new(tree: &'a TreeNode, deserialize_fn: F) -> Self { Self { tree, deserialize_fn, diff --git a/path/Cargo.toml b/path/Cargo.toml index c627f855..bae12675 100644 --- a/path/Cargo.toml +++ b/path/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-path" -version = "1.0.0-rc.1" +version = "1.0.0-rc.2" edition = "2021" license = "MIT" description = "Path extension crate for GroveDB" diff --git a/path/src/lib.rs b/path/src/lib.rs index b79c7deb..0691874e 100644 --- a/path/src/lib.rs +++ b/path/src/lib.rs @@ -44,7 +44,7 @@ mod tests { use super::*; use crate::util::calculate_hash; - fn assert_path_properties<'b, B>(path: SubtreePath<'b, B>, reference: Vec>) + fn assert_path_properties(path: SubtreePath<'_, B>, reference: Vec>) where B: AsRef<[u8]> + std::fmt::Debug, { diff --git a/storage/Cargo.toml b/storage/Cargo.toml index 96c7522a..856888a7 100644 --- a/storage/Cargo.toml +++ b/storage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-storage" -version = "1.0.0-rc.1" +version = "1.0.0-rc.2" edition = "2021" license = "MIT" description = "Storage extension crate for GroveDB" @@ -14,13 +14,13 @@ num_cpus = { version = "1.14.0", optional = true } tempfile = { version = "3.3.0", optional = true } blake3 = { version = "1.3.3", optional = true } integer-encoding = { version = "3.0.4", optional = true } -grovedb-visualize = { version = "1.0.0-rc.1", path = "../visualize" } +grovedb-visualize = { version = "1.0.0-rc.2", path = "../visualize" } strum = { version = "0.24.1", features = ["derive"] } -grovedb-costs = { version = "1.0.0-rc.1", path = "../costs" } +grovedb-costs = { version = "1.0.0-rc.2", path = "../costs" } thiserror = "1.0.37" rocksdb = { version = "0.21.0", optional = true } hex = "0.4.3" -grovedb-path = { version = "1.0.0-rc.1", path = "../path" } +grovedb-path = { version = "1.0.0-rc.2", path = "../path" } [features] rocksdb_storage = ["rocksdb", "num_cpus", "lazy_static", "tempfile", "blake3", "integer-encoding"] diff --git a/storage/src/rocksdb_storage/storage.rs b/storage/src/rocksdb_storage/storage.rs index b386ecfc..a396b75f 100644 --- a/storage/src/rocksdb_storage/storage.rs +++ b/storage/src/rocksdb_storage/storage.rs @@ -40,8 +40,8 @@ use grovedb_path::SubtreePath; use integer_encoding::VarInt; use lazy_static::lazy_static; use rocksdb::{ - checkpoint::Checkpoint, ColumnFamily, ColumnFamilyDescriptor, OptimisticTransactionDB, Options, - Transaction, WriteBatchWithTransaction, DB, DEFAULT_COLUMN_FAMILY_NAME, + checkpoint::Checkpoint, ColumnFamily, ColumnFamilyDescriptor, OptimisticTransactionDB, + Transaction, WriteBatchWithTransaction, DEFAULT_COLUMN_FAMILY_NAME, }; use super::{ @@ -53,7 +53,6 @@ use crate::{ error::Error::{CostError, RocksDBError}, storage::AbstractBatchOperation, worst_case_costs::WorstKeyLength, - Error::StorageError, Storage, StorageBatch, }; @@ -428,7 +427,7 @@ impl RocksDbStorage { let mut iter = self.db.raw_iterator_cf(&cf_handle); iter.seek_to_first(); while iter.valid() { - self.db.delete(iter.key().expect("should have key")); + self.db.delete(iter.key().expect("should have key"))?; iter.next() } Ok(()) diff --git a/storage/src/rocksdb_storage/storage_context/context_no_tx.rs b/storage/src/rocksdb_storage/storage_context/context_no_tx.rs index 20cb65c1..fd639a5a 100644 --- a/storage/src/rocksdb_storage/storage_context/context_no_tx.rs +++ b/storage/src/rocksdb_storage/storage_context/context_no_tx.rs @@ -265,7 +265,7 @@ impl<'db> StorageContext<'db> for PrefixedRocksDbStorageContext<'db> { fn new_batch(&self) -> Self::Batch { PrefixedMultiContextBatchPart { - prefix: self.prefix.clone(), + prefix: self.prefix, batch: StorageBatch::new(), } } @@ -279,7 +279,7 @@ impl<'db> StorageContext<'db> for PrefixedRocksDbStorageContext<'db> { fn raw_iter(&self) -> Self::RawIterator { PrefixedRocksDbRawIterator { - prefix: self.prefix.clone(), + prefix: self.prefix, raw_iterator: self.storage.raw_iterator(), } } diff --git a/storage/src/rocksdb_storage/storage_context/context_tx.rs b/storage/src/rocksdb_storage/storage_context/context_tx.rs index 045cd982..d5a480c3 100644 --- a/storage/src/rocksdb_storage/storage_context/context_tx.rs +++ b/storage/src/rocksdb_storage/storage_context/context_tx.rs @@ -296,7 +296,7 @@ impl<'db> StorageContext<'db> for PrefixedRocksDbTransactionContext<'db> { fn new_batch(&self) -> Self::Batch { PrefixedMultiContextBatchPart { - prefix: self.prefix.clone(), + prefix: self.prefix, batch: StorageBatch::new(), } } @@ -311,7 +311,7 @@ impl<'db> StorageContext<'db> for PrefixedRocksDbTransactionContext<'db> { fn raw_iter(&self) -> Self::RawIterator { PrefixedRocksDbRawIterator { - prefix: self.prefix.clone(), + prefix: self.prefix, raw_iterator: self.transaction.raw_iterator(), } } diff --git a/storage/src/rocksdb_storage/storage_context/raw_iterator.rs b/storage/src/rocksdb_storage/storage_context/raw_iterator.rs index 4ee36510..a9d6cf4f 100644 --- a/storage/src/rocksdb_storage/storage_context/raw_iterator.rs +++ b/storage/src/rocksdb_storage/storage_context/raw_iterator.rs @@ -48,7 +48,7 @@ pub struct PrefixedRocksDbRawIterator { impl<'a> RawIterator for PrefixedRocksDbRawIterator> { fn seek_to_first(&mut self) -> CostContext<()> { - self.raw_iterator.seek(&self.prefix); + self.raw_iterator.seek(self.prefix); ().wrap_with_cost(OperationCost::with_seek_count(1)) } @@ -169,7 +169,7 @@ impl<'a> RawIterator for PrefixedRocksDbRawIterator RawIterator for PrefixedRocksDbRawIterator>> { fn seek_to_first(&mut self) -> CostContext<()> { - self.raw_iterator.seek(&self.prefix); + self.raw_iterator.seek(self.prefix); ().wrap_with_cost(OperationCost::with_seek_count(1)) } diff --git a/visualize/Cargo.toml b/visualize/Cargo.toml index a1fc2fcb..f27fe48e 100644 --- a/visualize/Cargo.toml +++ b/visualize/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-visualize" -version = "1.0.0-rc.1" +version = "1.0.0-rc.2" edition = "2021" license = "MIT" description = "Visualizer extension crate for GroveDB" From eade3e078a4ba93398d94aade9144f8305632a9b Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Thu, 28 Sep 2023 12:38:57 +0700 Subject: [PATCH 06/37] fix: improving merk deletion (#271) * small improvements to reduce the call stack on heavy deletion * small fix * fix --- merk/src/tree/ops.rs | 198 +++++++++++++++++++++++++++++++------------ 1 file changed, 144 insertions(+), 54 deletions(-) diff --git a/merk/src/tree/ops.rs b/merk/src/tree/ops.rs index a55adbe9..da481d0a 100644 --- a/merk/src/tree/ops.rs +++ b/merk/src/tree/ops.rs @@ -439,7 +439,7 @@ where let key_vec = self.tree().key().to_vec(); // binary search to see if this node's key is in the batch, and to split // into left and right batches - let search = batch.binary_search_by(|(key, _op)| key.as_ref().cmp(self.tree().key())); + let search = batch.binary_search_by(|(key, _op)| key.as_ref().cmp(&key_vec)); let tree = if let Ok(index) = search { let (_, op) = &batch[index]; @@ -502,33 +502,39 @@ where ) } Delete | DeleteLayered | DeleteLayeredMaybeSpecialized | DeleteMaybeSpecialized => { - // TODO: we shouldn't have to do this as 2 different calls to apply let source = self.clone_source(); - let wrap = |maybe_tree: Option| { - maybe_tree.map(|tree| Self::new(tree, source.clone())) - }; - let key = self.tree().key().to_vec(); - let key_len = key.len() as u32; - - let prefixed_key_len = HASH_LENGTH_U32 + key_len; - let total_key_len = prefixed_key_len + prefixed_key_len.required_space() as u32; - let value = self.tree().value_ref(); - let old_cost = match &batch[index].1 { - Delete => self.tree().inner.kv.value_byte_cost_size(), - DeleteLayered | DeleteLayeredMaybeSpecialized => { - cost_return_on_error_no_add!(&cost, old_specialized_cost(&key, value)) - } - DeleteMaybeSpecialized => { - cost_return_on_error_no_add!(&cost, old_specialized_cost(&key, value)) - } - _ => 0, // can't get here anyways + let (r_key_cost, r_value_cost) = { + let value = self.tree().value_ref(); + + let old_cost = match &batch[index].1 { + Delete => self.tree().inner.kv.value_byte_cost_size(), + DeleteLayered | DeleteLayeredMaybeSpecialized => { + cost_return_on_error_no_add!( + &cost, + old_specialized_cost(&key_vec, value) + ) + } + DeleteMaybeSpecialized => { + cost_return_on_error_no_add!( + &cost, + old_specialized_cost(&key_vec, value) + ) + } + _ => 0, // can't get here anyways + }; + + let key_len = key_vec.len() as u32; + + let prefixed_key_len = HASH_LENGTH_U32 + key_len; + let total_key_len = + prefixed_key_len + prefixed_key_len.required_space() as u32; + let value = self.tree().value_ref(); + cost_return_on_error_no_add!( + &cost, + section_removal_bytes(value, total_key_len, old_cost) + ) }; - - let (r_key_cost, r_value_cost) = cost_return_on_error_no_add!( - &cost, - section_removal_bytes(value, total_key_len, old_cost) - ); let deletion_cost = KeyValueStorageCost { key_storage_cost: StorageCost { added_bytes: 0, @@ -544,38 +550,120 @@ where needs_value_verification: false, }; - let maybe_tree = + let maybe_tree_walker = cost_return_on_error!(&mut cost, self.remove(value_defined_cost_fn)); - #[rustfmt::skip] - let (maybe_tree, mut key_updates) - = cost_return_on_error!( - &mut cost, - Self::apply_to( - maybe_tree, - &batch[..index], - source.clone(), - old_specialized_cost, - value_defined_cost_fn, - update_tree_value_based_on_costs, - section_removal_bytes + // If there are no more batch updates to the left this means that the index is 0 + // There would be no key updates to the left of this part of the tree. + + let (maybe_tree_walker, mut key_updates) = if index == 0 { + ( + maybe_tree_walker, + KeyUpdates::new( + BTreeSet::default(), + BTreeSet::default(), + LinkedList::default(), + None, + ), ) - ); - let maybe_walker = wrap(maybe_tree); + } else { + match maybe_tree_walker { + None => { + let new_tree_node = cost_return_on_error!( + &mut cost, + Self::build( + &batch[..index], + source.clone(), + old_specialized_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + ) + ); + let new_keys: BTreeSet> = batch[..index] + .iter() + .map(|batch_entry| batch_entry.0.as_ref().to_vec()) + .collect(); + ( + new_tree_node.map(|tree| Self::new(tree, source.clone())), + KeyUpdates::new( + new_keys, + BTreeSet::default(), + LinkedList::default(), + None, + ), + ) + } + Some(tree) => { + cost_return_on_error!( + &mut cost, + tree.apply_sorted( + &batch[..index], + old_specialized_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes + ) + ) + } + } + }; - let (maybe_tree, mut key_updates_right) = cost_return_on_error!( - &mut cost, - Self::apply_to( - maybe_walker, - &batch[index + 1..], - source.clone(), - old_specialized_cost, - value_defined_cost_fn, - update_tree_value_based_on_costs, - section_removal_bytes + // We not have a new top tree node, and a set of batch operations to the right + // of the node + + let (maybe_tree_walker, mut key_updates_right) = if index == batch.len() - 1 { + ( + maybe_tree_walker, + KeyUpdates::new( + BTreeSet::default(), + BTreeSet::default(), + LinkedList::default(), + None, + ), ) - ); - let maybe_walker = wrap(maybe_tree); + } else { + match maybe_tree_walker { + None => { + let new_tree_node = cost_return_on_error!( + &mut cost, + Self::build( + &batch[index + 1..], + source.clone(), + old_specialized_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes, + ) + ); + let new_keys: BTreeSet> = batch[index + 1..] + .iter() + .map(|batch_entry| batch_entry.0.as_ref().to_vec()) + .collect(); + ( + new_tree_node.map(|tree| Self::new(tree, source)), + KeyUpdates::new( + new_keys, + BTreeSet::default(), + LinkedList::default(), + None, + ), + ) + } + Some(tree) => { + cost_return_on_error!( + &mut cost, + tree.apply_sorted( + &batch[index + 1..], + old_specialized_cost, + value_defined_cost_fn, + update_tree_value_based_on_costs, + section_removal_bytes + ) + ) + } + } + }; key_updates.new_keys.append(&mut key_updates_right.new_keys); key_updates @@ -584,10 +672,12 @@ where key_updates .deleted_keys .append(&mut key_updates_right.deleted_keys); - key_updates.deleted_keys.push_back((key, deletion_cost)); + key_updates + .deleted_keys + .push_back((key_vec.clone(), deletion_cost)); key_updates.updated_root_key_from = Some(key_vec); - return Ok((maybe_walker, key_updates)).wrap_with_cost(cost); + return Ok((maybe_tree_walker, key_updates)).wrap_with_cost(cost); } } } else { From 3eb67ebb45347cdf89d0f37c722033c710f453d6 Mon Sep 17 00:00:00 2001 From: Wisdom Ogwu <40731160+iammadab@users.noreply.github.com> Date: Fri, 29 Sep 2023 08:10:48 +0100 Subject: [PATCH 07/37] feat: clear subtree (#272) * wip * fmt * modifications --------- Co-authored-by: Quantum Explorer --- grovedb/src/error.rs | 4 + grovedb/src/operations/delete/mod.rs | 317 ++++++++++++++++++++++++++- 2 files changed, 319 insertions(+), 2 deletions(-) diff --git a/grovedb/src/error.rs b/grovedb/src/error.rs index d2db936f..d29feaa7 100644 --- a/grovedb/src/error.rs +++ b/grovedb/src/error.rs @@ -132,6 +132,10 @@ pub enum Error { /// Deleting non empty tree DeletingNonEmptyTree(&'static str), + #[error("clearing tree with subtrees not allowed error: {0}")] + /// Clearing tree with subtrees not allowed + ClearingTreeWithSubtreesNotAllowed(&'static str), + // Client allowed errors #[error("just in time element flags client error: {0}")] /// Just in time element flags client error diff --git a/grovedb/src/operations/delete/mod.rs b/grovedb/src/operations/delete/mod.rs index 512a33ef..490a4dab 100644 --- a/grovedb/src/operations/delete/mod.rs +++ b/grovedb/src/operations/delete/mod.rs @@ -46,6 +46,7 @@ use grovedb_costs::{ storage_cost::removal::{StorageRemovedBytes, StorageRemovedBytes::BasicStorageRemoval}, CostResult, CostsExt, OperationCost, }; +use grovedb_merk::{proofs::Query, KVIterator}; #[cfg(feature = "full")] use grovedb_merk::{Error as MerkError, Merk, MerkOptions}; use grovedb_path::SubtreePath; @@ -55,13 +56,37 @@ use grovedb_storage::{ Storage, StorageBatch, StorageContext, }; -use crate::util::merk_optional_tx_path_not_empty; #[cfg(feature = "full")] use crate::{ batch::{GroveDbOp, Op}, util::{storage_context_optional_tx, storage_context_with_parent_optional_tx}, Element, ElementFlags, Error, GroveDb, Transaction, TransactionArg, }; +use crate::{raw_decode, util::merk_optional_tx_path_not_empty}; + +#[cfg(feature = "full")] +#[derive(Clone)] +/// Clear options +pub struct ClearOptions { + /// Check for Subtrees + pub check_for_subtrees: bool, + /// Allow deleting non empty trees if we check for subtrees + pub allow_deleting_subtrees: bool, + /// If we check for subtrees, and we don't allow deleting and there are + /// some, should we error? + pub trying_to_clear_with_subtrees_returns_error: bool, +} + +#[cfg(feature = "full")] +impl Default for ClearOptions { + fn default() -> Self { + ClearOptions { + check_for_subtrees: true, + allow_deleting_subtrees: false, + trying_to_clear_with_subtrees_returns_error: true, + } + } +} #[cfg(feature = "full")] #[derive(Clone)] @@ -138,6 +163,180 @@ impl GroveDb { }) } + /// Delete all elements in a specified subtree + /// Returns if we successfully cleared the subtree + fn clear_subtree<'b, B, P>( + &self, + path: P, + options: Option, + transaction: TransactionArg, + ) -> Result + where + B: AsRef<[u8]> + 'b, + P: Into>, + { + self.clear_subtree_with_costs(path, options, transaction) + .unwrap() + } + + /// Delete all elements in a specified subtree and get back costs + /// Warning: The costs for this operation are not yet correct, hence we + /// should keep this private for now + /// Returns if we successfully cleared the subtree + fn clear_subtree_with_costs<'b, B, P>( + &self, + path: P, + options: Option, + transaction: TransactionArg, + ) -> CostResult + where + B: AsRef<[u8]> + 'b, + P: Into>, + { + let subtree_path: SubtreePath = path.into(); + let mut cost = OperationCost::default(); + let batch = StorageBatch::new(); + + let options = options.unwrap_or_default(); + + if let Some(transaction) = transaction { + let mut merk_to_clear = cost_return_on_error!( + &mut cost, + self.open_transactional_merk_at_path( + subtree_path.clone(), + transaction, + Some(&batch) + ) + ); + + if options.check_for_subtrees { + let mut all_query = Query::new(); + all_query.insert_all(); + + let mut element_iterator = + KVIterator::new(merk_to_clear.storage.raw_iter(), &all_query).unwrap(); + + // delete all nested subtrees + while let Some((key, element_value)) = + element_iterator.next_kv().unwrap_add_cost(&mut cost) + { + let element = raw_decode(&element_value).unwrap(); + if element.is_tree() { + if options.allow_deleting_subtrees { + cost_return_on_error!( + &mut cost, + self.delete( + subtree_path.clone(), + key.as_slice(), + Some(DeleteOptions { + allow_deleting_non_empty_trees: true, + deleting_non_empty_trees_returns_error: false, + ..Default::default() + }), + Some(transaction), + ) + ); + } else if options.trying_to_clear_with_subtrees_returns_error { + return Err(Error::ClearingTreeWithSubtreesNotAllowed( + "options do not allow to clear this merk tree as it contains \ + subtrees", + )) + .wrap_with_cost(cost); + } else { + return Ok(false).wrap_with_cost(cost); + } + } + } + } + + // delete non subtree values + cost_return_on_error!(&mut cost, merk_to_clear.clear().map_err(Error::MerkError)); + + // propagate changes + let mut merk_cache: HashMap, Merk> = + HashMap::default(); + merk_cache.insert(subtree_path.clone(), merk_to_clear); + cost_return_on_error!( + &mut cost, + self.propagate_changes_with_transaction( + merk_cache, + subtree_path.clone(), + transaction, + &batch, + ) + ); + } else { + let mut merk_to_clear = cost_return_on_error!( + &mut cost, + self.open_non_transactional_merk_at_path(subtree_path.clone(), Some(&batch)) + ); + + if options.check_for_subtrees { + let mut all_query = Query::new(); + all_query.insert_all(); + + let mut element_iterator = + KVIterator::new(merk_to_clear.storage.raw_iter(), &all_query).unwrap(); + + // delete all nested subtrees + while let Some((key, element_value)) = + element_iterator.next_kv().unwrap_add_cost(&mut cost) + { + let element = raw_decode(&element_value).unwrap(); + if options.allow_deleting_subtrees { + if element.is_tree() { + cost_return_on_error!( + &mut cost, + self.delete( + subtree_path.clone(), + key.as_slice(), + Some(DeleteOptions { + allow_deleting_non_empty_trees: true, + deleting_non_empty_trees_returns_error: false, + ..Default::default() + }), + None + ) + ); + } + } else if options.trying_to_clear_with_subtrees_returns_error { + return Err(Error::ClearingTreeWithSubtreesNotAllowed( + "options do not allow to clear this merk tree as it contains subtrees", + )) + .wrap_with_cost(cost); + } else { + return Ok(false).wrap_with_cost(cost); + } + } + } + + // delete non subtree values + cost_return_on_error!(&mut cost, merk_to_clear.clear().map_err(Error::MerkError)); + + // propagate changes + let mut merk_cache: HashMap, Merk> = + HashMap::default(); + merk_cache.insert(subtree_path.clone(), merk_to_clear); + cost_return_on_error!( + &mut cost, + self.propagate_changes_without_transaction( + merk_cache, + subtree_path.clone(), + &batch, + ) + ); + } + + cost_return_on_error!( + &mut cost, + self.db + .commit_multi_context_batch(batch, transaction) + .map_err(Into::into) + ); + + Ok(true).wrap_with_cost(cost) + } + /// Delete element with sectional storage function pub fn delete_with_sectional_storage_function>( &self, @@ -738,7 +937,7 @@ mod tests { use pretty_assertions::assert_eq; use crate::{ - operations::delete::{delete_up_tree::DeleteUpTreeOptions, DeleteOptions}, + operations::delete::{delete_up_tree::DeleteUpTreeOptions, ClearOptions, DeleteOptions}, tests::{ common::EMPTY_PATH, make_empty_grovedb, make_test_grovedb, ANOTHER_TEST_LEAF, TEST_LEAF, }, @@ -1445,4 +1644,118 @@ mod tests { } ); } + + #[test] + fn test_subtree_clear() { + let element = Element::new_item(b"ayy".to_vec()); + + let db = make_test_grovedb(); + + // Insert some nested subtrees + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree 1 insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key2", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree 2 insert"); + + // Insert an element into subtree + db.insert( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + element, + None, + None, + ) + .unwrap() + .expect("successful value insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key4", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree 3 insert"); + + let key1_tree = db + .get([TEST_LEAF].as_ref(), b"key1", None) + .unwrap() + .unwrap(); + assert!(!matches!(key1_tree, Element::Tree(None, _))); + let key1_merk = db + .open_non_transactional_merk_at_path([TEST_LEAF, b"key1"].as_ref().into(), None) + .unwrap() + .unwrap(); + assert_ne!(key1_merk.root_hash().unwrap(), [0; 32]); + + let root_hash_before_clear = db.root_hash(None).unwrap().unwrap(); + db.clear_subtree([TEST_LEAF, b"key1"].as_ref(), None, None) + .expect_err("unable to delete subtree"); + + let success = db + .clear_subtree( + [TEST_LEAF, b"key1"].as_ref(), + Some(ClearOptions { + check_for_subtrees: true, + allow_deleting_subtrees: false, + trying_to_clear_with_subtrees_returns_error: false, + }), + None, + ) + .expect("expected no error"); + assert!(!success); + + let success = db + .clear_subtree( + [TEST_LEAF, b"key1"].as_ref(), + Some(ClearOptions { + check_for_subtrees: true, + allow_deleting_subtrees: true, + trying_to_clear_with_subtrees_returns_error: false, + }), + None, + ) + .expect("unable to delete subtree"); + + assert!(success); + + assert!(matches!( + db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", None) + .unwrap(), + Err(Error::PathKeyNotFound(_)) + )); + assert!(matches!( + db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) + .unwrap(), + Err(Error::PathParentLayerNotFound(_)) + )); + let key1_tree = db + .get([TEST_LEAF].as_ref(), b"key1", None) + .unwrap() + .unwrap(); + assert!(matches!(key1_tree, Element::Tree(None, _))); + + let key1_merk = db + .open_non_transactional_merk_at_path([TEST_LEAF, b"key1"].as_ref().into(), None) + .unwrap() + .unwrap(); + assert_eq!(key1_merk.root_hash().unwrap(), [0; 32]); + + let root_hash_after_clear = db.root_hash(None).unwrap().unwrap(); + assert_ne!(root_hash_before_clear, root_hash_after_clear); + } } From 63b4ff1df767cabd92c4ad13a6f2ef87a1299a7d Mon Sep 17 00:00:00 2001 From: Quantum Explorer Date: Fri, 29 Sep 2023 14:27:42 +0700 Subject: [PATCH 08/37] fix: forgot to add pub for clear_subtree --- grovedb/src/operations/delete/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grovedb/src/operations/delete/mod.rs b/grovedb/src/operations/delete/mod.rs index 490a4dab..6d7a34d0 100644 --- a/grovedb/src/operations/delete/mod.rs +++ b/grovedb/src/operations/delete/mod.rs @@ -165,7 +165,7 @@ impl GroveDb { /// Delete all elements in a specified subtree /// Returns if we successfully cleared the subtree - fn clear_subtree<'b, B, P>( + pub fn clear_subtree<'b, B, P>( &self, path: P, options: Option, From f948538ab9e53cd83762aa4c57e50ec571d61758 Mon Sep 17 00:00:00 2001 From: Wisdom Ogwu <40731160+iammadab@users.noreply.github.com> Date: Wed, 11 Oct 2023 13:45:31 +0100 Subject: [PATCH 09/37] fix: early limit reduction (#273) * wip * factor offset values * fix clippy errors --- grovedb/src/batch/key_info.rs | 15 +-------------- grovedb/src/batch/mod.rs | 16 ++++++++-------- grovedb/src/operations/proof/generate.rs | 16 +++++++++++++--- grovedb/src/operations/proof/util.rs | 14 ++++++++++++++ 4 files changed, 36 insertions(+), 25 deletions(-) diff --git a/grovedb/src/batch/key_info.rs b/grovedb/src/batch/key_info.rs index 19bea9e2..a8eb50af 100644 --- a/grovedb/src/batch/key_info.rs +++ b/grovedb/src/batch/key_info.rs @@ -100,20 +100,7 @@ impl PartialEq<&[u8]> for KeyInfo { #[cfg(feature = "full")] impl PartialOrd for KeyInfo { fn partial_cmp(&self, other: &Self) -> Option { - match self.as_slice().partial_cmp(other.as_slice()) { - None => None, - Some(ord) => match ord { - Ordering::Less => Some(Ordering::Less), - Ordering::Equal => { - let other_len = other.max_length(); - match self.max_length().partial_cmp(&other_len) { - None => Some(Ordering::Equal), - Some(ord) => Some(ord), - } - } - Ordering::Greater => Some(Ordering::Greater), - }, - } + Some(self.cmp(other)) } } diff --git a/grovedb/src/batch/mod.rs b/grovedb/src/batch/mod.rs index a3b2d502..9724ea7b 100644 --- a/grovedb/src/batch/mod.rs +++ b/grovedb/src/batch/mod.rs @@ -168,19 +168,19 @@ pub enum Op { impl PartialOrd for Op { fn partial_cmp(&self, other: &Self) -> Option { - match (self, other) { - (Op::Delete, Op::Insert { .. }) => Some(Ordering::Less), - (Op::Delete, Op::Replace { .. }) => Some(Ordering::Less), - (Op::Insert { .. }, Op::Delete) => Some(Ordering::Greater), - (Op::Replace { .. }, Op::Delete) => Some(Ordering::Greater), - _ => Some(Ordering::Equal), - } + Some(self.cmp(other)) } } impl Ord for Op { fn cmp(&self, other: &Self) -> Ordering { - self.partial_cmp(other).expect("all ops have order") + match (self, other) { + (Op::Delete, Op::Insert { .. }) => Ordering::Less, + (Op::Delete, Op::Replace { .. }) => Ordering::Less, + (Op::Insert { .. }, Op::Delete) => Ordering::Greater, + (Op::Replace { .. }, Op::Delete) => Ordering::Greater, + _ => Ordering::Equal, + } } } diff --git a/grovedb/src/operations/proof/generate.rs b/grovedb/src/operations/proof/generate.rs index 292b8cbf..82852359 100644 --- a/grovedb/src/operations/proof/generate.rs +++ b/grovedb/src/operations/proof/generate.rs @@ -47,8 +47,8 @@ use grovedb_storage::StorageContext; use crate::{ element::helpers::raw_decode, operations::proof::util::{ - reduce_limit_and_offset_by, write_slice_of_slice_to_slice, write_slice_to_vec, - write_to_vec, ProofTokenType, EMPTY_TREE_HASH, + increase_limit_and_offset_by, reduce_limit_and_offset_by, write_slice_of_slice_to_slice, + write_slice_to_vec, write_to_vec, ProofTokenType, EMPTY_TREE_HASH, }, reference_path::path_from_reference_path_type, versioning::{prepend_version_to_bytes, PROOF_VERSION}, @@ -216,6 +216,9 @@ impl GroveDb { let mut is_leaf_tree = true; + let mut offset_inc = 0; + let mut limit_inc = 0; + let mut kv_iterator = KVIterator::new(subtree.storage.raw_iter(), &query.query.query) .unwrap_add_cost(&mut cost); @@ -231,7 +234,13 @@ impl GroveDb { if subquery_value.is_none() && subquery_path.is_none() { // this element should be added to the result set // hence we have to update the limit and offset value - reduce_limit_and_offset_by(current_limit, current_offset, 1); + let reduced_offset = + reduce_limit_and_offset_by(current_limit, current_offset, 1); + if reduced_offset { + offset_inc += 1; + } else { + limit_inc += 1; + } continue; } @@ -411,6 +420,7 @@ impl GroveDb { if is_leaf_tree { // if no useful subtree, then we care about the result set of this subtree. // apply the sized query + increase_limit_and_offset_by(current_limit, current_offset, limit_inc, offset_inc); let limit_offset = cost_return_on_error!( &mut cost, self.generate_and_store_merk_proof( diff --git a/grovedb/src/operations/proof/util.rs b/grovedb/src/operations/proof/util.rs index a3802a62..0ea9ad26 100644 --- a/grovedb/src/operations/proof/util.rs +++ b/grovedb/src/operations/proof/util.rs @@ -333,6 +333,20 @@ pub fn reduce_limit_and_offset_by( skip_limit } +pub fn increase_limit_and_offset_by( + limit: &mut Option, + offset: &mut Option, + limit_inc: u16, + offset_inc: u16, +) { + if let Some(offset_value) = *offset { + *offset = Some(offset_value + offset_inc); + } + if let Some(limit_value) = *limit { + *limit = Some(limit_value + limit_inc); + } +} + /// Proved path-key-values pub type ProvedPathKeyValues = Vec; From 6e946239c514dedcfa97eb1a42792790d38cb3eb Mon Sep 17 00:00:00 2001 From: Wisdom Ogwu <40731160+iammadab@users.noreply.github.com> Date: Wed, 11 Oct 2023 16:31:34 +0100 Subject: [PATCH 10/37] fix: op ord implemenation (#275) * update op ordering * fix comment * do fixed ordering, remove equality * fmt --- grovedb/src/batch/mod.rs | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/grovedb/src/batch/mod.rs b/grovedb/src/batch/mod.rs index 9724ea7b..8f275095 100644 --- a/grovedb/src/batch/mod.rs +++ b/grovedb/src/batch/mod.rs @@ -166,6 +166,22 @@ pub enum Op { DeleteSumTree, } +impl Op { + fn to_u8(&self) -> u8 { + match self { + Op::DeleteTree => 0, + Op::DeleteSumTree => 1, + Op::Delete => 2, + Op::InsertTreeWithRootHash { .. } => 3, + Op::ReplaceTreeRootKey { .. } => 4, + Op::RefreshReference { .. } => 5, + Op::Replace { .. } => 6, + Op::Patch { .. } => 7, + Op::Insert { .. } => 8, + } + } +} + impl PartialOrd for Op { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) @@ -174,13 +190,7 @@ impl PartialOrd for Op { impl Ord for Op { fn cmp(&self, other: &Self) -> Ordering { - match (self, other) { - (Op::Delete, Op::Insert { .. }) => Ordering::Less, - (Op::Delete, Op::Replace { .. }) => Ordering::Less, - (Op::Insert { .. }, Op::Delete) => Ordering::Greater, - (Op::Replace { .. }, Op::Delete) => Ordering::Greater, - _ => Ordering::Equal, - } + self.to_u8().cmp(&other.to_u8()) } } From 11198471542442de286348d0936475c8e602f953 Mon Sep 17 00:00:00 2001 From: Wisdom Ogwu <40731160+iammadab@users.noreply.github.com> Date: Thu, 26 Oct 2023 17:22:52 +0100 Subject: [PATCH 11/37] fix: proof panic when proving absent path with intermediary empty tree. (#276) * update op ordering * prevent panic in merk proof construction * cleanup * fix proof construction * fix verification * add documentation * clippy fixes * fixed non used error * fmt --------- Co-authored-by: Quantum Explorer --- grovedb/src/operations/proof/generate.rs | 33 ++++++++++++++---------- grovedb/src/operations/proof/verify.rs | 20 +++++++++++++- grovedb/src/tests/query_tests.rs | 23 +++++++++++++++++ 3 files changed, 62 insertions(+), 14 deletions(-) diff --git a/grovedb/src/operations/proof/generate.rs b/grovedb/src/operations/proof/generate.rs index 82852359..fad64c84 100644 --- a/grovedb/src/operations/proof/generate.rs +++ b/grovedb/src/operations/proof/generate.rs @@ -510,10 +510,22 @@ impl GroveDb { let mut cost = OperationCost::default(); - let mut proof_result = subtree - .prove_without_encoding(query.clone(), limit_offset.0, limit_offset.1) - .unwrap() - .expect("should generate proof"); + // if the subtree is empty, return the EmptyTree proof op + if subtree.root_hash().unwrap() == EMPTY_TREE_HASH { + cost_return_on_error_no_add!( + &cost, + write_to_vec(proofs, &[ProofTokenType::EmptyTree.into()]) + ); + return Ok(limit_offset).wrap_with_cost(cost); + } + + let mut proof_result = cost_return_on_error_no_add!( + &cost, + subtree + .prove_without_encoding(query.clone(), limit_offset.0, limit_offset.1) + .unwrap() + .map_err(|_e| Error::InternalError("failed to generate proof")) + ); cost_return_on_error!(&mut cost, self.post_process_proof(path, &mut proof_result)); @@ -570,16 +582,11 @@ impl GroveDb { .open_non_transactional_merk_at_path(current_path.as_slice().into(), None) .unwrap_add_cost(&mut cost); - if subtree.is_err() { + let Ok(subtree) = subtree else { break; - } + }; - let has_item = Element::get( - subtree.as_ref().expect("confirmed not error above"), - key, - true, - ) - .unwrap_add_cost(&mut cost); + let has_item = Element::get(&subtree, key, true).unwrap_add_cost(&mut cost); let mut next_key_query = Query::new(); next_key_query.insert_key(key.to_vec()); @@ -587,7 +594,7 @@ impl GroveDb { &mut cost, self.generate_and_store_merk_proof( ¤t_path.as_slice().into(), - &subtree.expect("confirmed not error above"), + &subtree, &next_key_query, (None, None), ProofTokenType::Merk, diff --git a/grovedb/src/operations/proof/verify.rs b/grovedb/src/operations/proof/verify.rs index 9e8c6e44..a69935bf 100644 --- a/grovedb/src/operations/proof/verify.rs +++ b/grovedb/src/operations/proof/verify.rs @@ -708,7 +708,25 @@ impl ProofVerifier { for key in path_slices { let (proof_token_type, merk_proof, _) = proof_reader.read_proof()?; - if proof_token_type != ProofTokenType::Merk { + if proof_token_type == ProofTokenType::EmptyTree { + // when we encounter the empty tree op, we need to ensure + // that the expected tree hash is the combination of the + // Element_value_hash and the empty root hash [0; 32] + let combined_hash = combine_hash( + value_hash_fn(last_result_set[0].value.as_slice()).value(), + &[0; 32], + ) + .unwrap(); + if Some(combined_hash) != expected_child_hash { + return Err(Error::InvalidProof( + "proof invalid: could not verify empty subtree while generating absent \ + path proof", + )); + } else { + last_result_set = vec![]; + break; + } + } else if proof_token_type != ProofTokenType::Merk { return Err(Error::InvalidProof("expected a merk proof for absent path")); } diff --git a/grovedb/src/tests/query_tests.rs b/grovedb/src/tests/query_tests.rs index 0bb6a1f0..c8e53ff8 100644 --- a/grovedb/src/tests/query_tests.rs +++ b/grovedb/src/tests/query_tests.rs @@ -2658,3 +2658,26 @@ fn test_query_b_depends_on_query_a() { assert_eq!(age_result[0].2, Some(Element::new_item(vec![12]))); assert_eq!(age_result[1].2, Some(Element::new_item(vec![46]))); } + +#[test] +fn test_prove_absent_path_with_intermediate_emtpy_tree() { + // root + // test_leaf (empty) + let mut grovedb = make_test_grovedb(); + + // prove the absence of key "book" in ["test_leaf", "invalid"] + let mut query = Query::new(); + query.insert_key(b"book".to_vec()); + let mut path_query = + PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"invalid".to_vec()], query); + + let proof = grovedb + .prove_query(&path_query) + .unwrap() + .expect("should generate proofs"); + + let (root_hash, result_set) = + GroveDb::verify_query(proof.as_slice(), &path_query).expect("should verify proof"); + assert_eq!(result_set.len(), 0); + assert_eq!(root_hash, grovedb.root_hash(None).unwrap().unwrap()); +} From 206bc63b291021ef1ad38d297572981f2236a8e7 Mon Sep 17 00:00:00 2001 From: Ivan Shumkov Date: Thu, 23 Nov 2023 18:47:16 +0700 Subject: [PATCH 12/37] chore: debug for op value and flags (#278) * chore: debug for op value and flags * style: fix formatting * style: fix clippy warnings --- grovedb/src/batch/mod.rs | 24 +++-------------------- grovedb/src/tests/query_tests.rs | 5 ++--- grovedb/src/visualize.rs | 33 ++++++++++++++++++++++++++++---- 3 files changed, 34 insertions(+), 28 deletions(-) diff --git a/grovedb/src/batch/mod.rs b/grovedb/src/batch/mod.rs index 8f275095..f6a4e7ea 100644 --- a/grovedb/src/batch/mod.rs +++ b/grovedb/src/batch/mod.rs @@ -380,27 +380,9 @@ impl fmt::Debug for GroveDbOp { self.key.visualize(key_drawer).unwrap(); let op_dbg = match &self.op { - Op::Insert { element } => match element { - Element::Item(..) => "Insert Item".to_string(), - Element::Reference(..) => "Insert Ref".to_string(), - Element::Tree(..) => "Insert Tree".to_string(), - Element::SumTree(..) => "Insert Sum Tree".to_string(), - Element::SumItem(..) => "Insert Sum Item".to_string(), - }, - Op::Replace { element } => match element { - Element::Item(..) => "Replace Item".to_string(), - Element::Reference(..) => "Replace Ref".to_string(), - Element::Tree(..) => "Replace Tree".to_string(), - Element::SumTree(..) => "Replace Sum Tree".to_string(), - Element::SumItem(..) => "Replace Sum Item".to_string(), - }, - Op::Patch { element, .. } => match element { - Element::Item(..) => "Patch Item".to_string(), - Element::Reference(..) => "Patch Ref".to_string(), - Element::Tree(..) => "Patch Tree".to_string(), - Element::SumTree(..) => "Patch Sum Tree".to_string(), - Element::SumItem(..) => "Patch Sum Item".to_string(), - }, + Op::Insert { element } => format!("Insert {:?}", element), + Op::Replace { element } => format!("Replace {:?}", element), + Op::Patch { element, .. } => format!("Patch {:?}", element), Op::RefreshReference { reference_path_type, max_reference_hop, diff --git a/grovedb/src/tests/query_tests.rs b/grovedb/src/tests/query_tests.rs index c8e53ff8..7d5fc680 100644 --- a/grovedb/src/tests/query_tests.rs +++ b/grovedb/src/tests/query_tests.rs @@ -2663,13 +2663,12 @@ fn test_query_b_depends_on_query_a() { fn test_prove_absent_path_with_intermediate_emtpy_tree() { // root // test_leaf (empty) - let mut grovedb = make_test_grovedb(); + let grovedb = make_test_grovedb(); // prove the absence of key "book" in ["test_leaf", "invalid"] let mut query = Query::new(); query.insert_key(b"book".to_vec()); - let mut path_query = - PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"invalid".to_vec()], query); + let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"invalid".to_vec()], query); let proof = grovedb .prove_query(&path_query) diff --git a/grovedb/src/visualize.rs b/grovedb/src/visualize.rs index 3abbbfd0..6bba2f83 100644 --- a/grovedb/src/visualize.rs +++ b/grovedb/src/visualize.rs @@ -44,12 +44,24 @@ use crate::{ impl Visualize for Element { fn visualize(&self, mut drawer: Drawer) -> Result> { match self { - Element::Item(value, _) => { + Element::Item(value, flags) => { drawer.write(b"item: ")?; drawer = value.visualize(drawer)?; + + if let Some(f) = flags { + if !f.is_empty() { + drawer = f.visualize(drawer)?; + } + } } - Element::SumItem(value, _) => { + Element::SumItem(value, flags) => { drawer.write(format!("sum_item: {value}").as_bytes())?; + + if let Some(f) = flags { + if !f.is_empty() { + drawer = f.visualize(drawer)?; + } + } } Element::Reference(_ref, ..) => { drawer.write(b"ref")?; @@ -64,13 +76,26 @@ impl Visualize for Element { // } // drawer.write(b"]")?; } - Element::Tree(root_key, _) => { + Element::Tree(root_key, flags) => { drawer.write(b"tree: ")?; drawer = root_key.as_deref().visualize(drawer)?; + + if let Some(f) = flags { + if !f.is_empty() { + drawer = f.visualize(drawer)?; + } + } } - Element::SumTree(root_key, ..) => { + Element::SumTree(root_key, value, flags) => { drawer.write(b"sum_tree: ")?; drawer = root_key.as_deref().visualize(drawer)?; + drawer.write(format!(" {value}").as_bytes())?; + + if let Some(f) = flags { + if !f.is_empty() { + drawer = f.visualize(drawer)?; + } + } } } Ok(drawer) From b79c4630bd64c52ecac673165b10fda672fb4c00 Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Thu, 22 Feb 2024 18:24:10 +0700 Subject: [PATCH 13/37] fix: remove unused jemalloc dependency (#284) --- merk/Cargo.toml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/merk/Cargo.toml b/merk/Cargo.toml index 8cfab2db..7d180f54 100644 --- a/merk/Cargo.toml +++ b/merk/Cargo.toml @@ -29,7 +29,7 @@ version = "0.4.3" optional = true [dependencies.colored] -version = "1.9.3" +version = "2.0.4" optional = true [dependencies.num_cpus] @@ -53,11 +53,6 @@ version = "0.8.5" features = ["small_rng"] optional = true -[dependencies.jemallocator] -version = "0.5.0" -features = ["disable_initial_exec_tls"] -optional = true - [features] default = ["full"] full = ["rand", @@ -68,7 +63,6 @@ full = ["rand", "byteorder", "ed", "blake3", - "jemallocator", "grovedb-storage", "grovedb-storage/rocksdb_storage" ] From d007bed7544bf837bdf155e696a530b90d5f82e5 Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Thu, 22 Feb 2024 18:34:09 +0700 Subject: [PATCH 14/37] fix: limit will now decrease when subquery has no elements (#277) --- grovedb/src/element/query.rs | 107 ++++++++++++++++++++-------- grovedb/src/operations/get/query.rs | 68 +++++++++++++----- grovedb/src/query/mod.rs | 2 + grovedb/src/reference_path.rs | 2 +- grovedb/src/tests/mod.rs | 3 +- grovedb/src/tests/query_tests.rs | 70 +++++++++--------- node-grove/src/lib.rs | 1 + 7 files changed, 171 insertions(+), 82 deletions(-) diff --git a/grovedb/src/element/query.rs b/grovedb/src/element/query.rs index 8c412cdc..cc914949 100644 --- a/grovedb/src/element/query.rs +++ b/grovedb/src/element/query.rs @@ -59,6 +59,20 @@ use crate::{ #[cfg(any(feature = "full", feature = "verify"))] use crate::{Element, SizedQuery}; +#[cfg(feature = "full")] +#[derive(Copy, Clone, Debug)] +pub struct QueryOptions { + pub allow_get_raw: bool, + pub allow_cache: bool, + /// Should we decrease the limit of elements found when we have no + /// subelements in the subquery? This should generally be set to true, + /// as having it false could mean very expensive queries. The queries + /// would be expensive because we could go through many many trees where the + /// sub elements have no matches, hence the limit would not decrease and + /// hence we would continue on the increasingly expensive query. + pub decrease_limit_on_range_with_no_sub_elements: bool, +} + #[cfg(feature = "full")] /// Path query push arguments pub struct PathQueryPushArgs<'db, 'ctx, 'a> @@ -73,8 +87,7 @@ where pub subquery_path: Option, pub subquery: Option, pub left_to_right: bool, - pub allow_get_raw: bool, - pub allow_cache: bool, + pub query_options: QueryOptions, pub result_type: QueryResultType, pub results: &'a mut Vec, pub limit: &'a mut Option, @@ -97,6 +110,7 @@ impl Element { merk_path, &sized_query, true, + true, result_type, transaction, ) @@ -139,8 +153,7 @@ impl Element { storage: &RocksDbStorage, path: &[&[u8]], sized_query: &SizedQuery, - allow_get_raw: bool, - allow_cache: bool, + query_options: QueryOptions, result_type: QueryResultType, transaction: TransactionArg, add_element_function: fn(PathQueryPushArgs) -> CostResult<(), Error>, @@ -166,8 +179,7 @@ impl Element { transaction, &mut limit, &mut offset, - allow_get_raw, - allow_cache, + query_options, result_type, add_element_function, ) @@ -189,8 +201,7 @@ impl Element { transaction, &mut limit, &mut offset, - allow_get_raw, - allow_cache, + query_options, result_type, add_element_function, ) @@ -216,6 +227,7 @@ impl Element { storage: &RocksDbStorage, path_query: &PathQuery, allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, result_type: QueryResultType, transaction: TransactionArg, ) -> CostResult<(QueryResultElements, u16), Error> { @@ -228,8 +240,11 @@ impl Element { storage, path_slices.as_slice(), &path_query.query, - false, - allow_cache, + QueryOptions { + allow_get_raw: false, + allow_cache, + decrease_limit_on_range_with_no_sub_elements, + }, result_type, transaction, Element::path_query_push, @@ -243,6 +258,7 @@ impl Element { storage: &RocksDbStorage, path_query: &PathQuery, allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, result_type: QueryResultType, transaction: TransactionArg, ) -> CostResult<(QueryResultElements, u16), Error> { @@ -255,8 +271,11 @@ impl Element { storage, path_slices.as_slice(), &path_query.query, - true, - allow_cache, + QueryOptions { + allow_get_raw: true, + allow_cache, + decrease_limit_on_range_with_no_sub_elements, + }, result_type, transaction, Element::path_query_push, @@ -270,6 +289,7 @@ impl Element { path: &[&[u8]], sized_query: &SizedQuery, allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, result_type: QueryResultType, transaction: TransactionArg, ) -> CostResult<(QueryResultElements, u16), Error> { @@ -277,8 +297,11 @@ impl Element { storage, path, sized_query, - false, - allow_cache, + QueryOptions { + allow_get_raw: false, + allow_cache, + decrease_limit_on_range_with_no_sub_elements, + }, result_type, transaction, Element::path_query_push, @@ -299,13 +322,17 @@ impl Element { subquery_path, subquery, left_to_right, - allow_get_raw, - allow_cache, + query_options, result_type, results, limit, offset, } = args; + let QueryOptions { + allow_get_raw, + allow_cache, + decrease_limit_on_range_with_no_sub_elements, + } = query_options; if element.is_tree() { let mut path_vec = path.to_vec(); let key = cost_return_on_error_no_add!( @@ -331,13 +358,19 @@ impl Element { storage, &inner_path_query, allow_cache, + decrease_limit_on_range_with_no_sub_elements, result_type, transaction ) ); if let Some(limit) = limit { - *limit = limit.saturating_sub(sub_elements.len() as u16); + if sub_elements.is_empty() && decrease_limit_on_range_with_no_sub_elements { + // we should decrease by 1 in this case + *limit = limit.saturating_sub(1); + } else { + *limit = limit.saturating_sub(sub_elements.len() as u16); + } } if let Some(offset) = offset { *offset = offset.saturating_sub(skipped); @@ -455,8 +488,7 @@ impl Element { subquery_path, subquery, left_to_right, - allow_get_raw, - allow_cache, + query_options, result_type, results, limit, @@ -483,8 +515,7 @@ impl Element { subquery_path, subquery, left_to_right, - allow_get_raw, - allow_cache, + query_options, result_type, results, limit, @@ -530,6 +561,12 @@ impl Element { (subquery_path, subquery) } + /// `decrease_limit_on_range_with_no_sub_elements` should generally be set + /// to true, as having it false could mean very expensive queries. + /// The queries would be expensive because we could go through many many + /// trees where the sub elements have no matches, hence the limit would + /// not decrease and hence we would continue on the increasingly + /// expensive query. #[cfg(feature = "full")] // TODO: refactor fn query_item( @@ -541,8 +578,7 @@ impl Element { transaction: TransactionArg, limit: &mut Option, offset: &mut Option, - allow_get_raw: bool, - allow_cache: bool, + query_options: QueryOptions, result_type: QueryResultType, add_element_function: fn(PathQueryPushArgs) -> CostResult<(), Error>, ) -> CostResult<(), Error> { @@ -560,7 +596,10 @@ impl Element { None, transaction, subtree, - { Element::get(&subtree, key, allow_cache).unwrap_add_cost(&mut cost) } + { + Element::get(&subtree, key, query_options.allow_cache) + .unwrap_add_cost(&mut cost) + } ); match element_res { Ok(element) => { @@ -575,8 +614,7 @@ impl Element { subquery_path, subquery, left_to_right: sized_query.query.left_to_right, - allow_get_raw, - allow_cache, + query_options, result_type, results, limit, @@ -630,8 +668,7 @@ impl Element { subquery_path, subquery, left_to_right: sized_query.query.left_to_right, - allow_get_raw, - allow_cache, + query_options, result_type, results, limit, @@ -939,6 +976,7 @@ mod tests { &[TEST_LEAF], &ascending_query, true, + true, QueryKeyElementPairResultType, None, ) @@ -973,6 +1011,7 @@ mod tests { &[TEST_LEAF], &backwards_query, true, + true, QueryKeyElementPairResultType, None, ) @@ -1062,6 +1101,7 @@ mod tests { &[TEST_LEAF], &ascending_query, true, + true, QueryKeyElementPairResultType, None, ) @@ -1079,6 +1119,7 @@ mod tests { &[TEST_LEAF], &backwards_query, true, + true, QueryKeyElementPairResultType, None, ) @@ -1099,6 +1140,7 @@ mod tests { &[TEST_LEAF], &backwards_query, true, + true, QueryKeyElementPairResultType, None, ) @@ -1161,6 +1203,7 @@ mod tests { &[TEST_LEAF], &backwards_query, true, + true, QueryKeyElementPairResultType, None, ) @@ -1187,6 +1230,7 @@ mod tests { &[TEST_LEAF], &backwards_query, true, + true, QueryKeyElementPairResultType, None, ) @@ -1208,6 +1252,7 @@ mod tests { &[TEST_LEAF], &limit_query, true, + true, QueryKeyElementPairResultType, None, ) @@ -1229,6 +1274,7 @@ mod tests { &[TEST_LEAF], &limit_query, true, + true, QueryKeyElementPairResultType, None, ) @@ -1249,6 +1295,7 @@ mod tests { &[TEST_LEAF], &limit_offset_query, true, + true, QueryKeyElementPairResultType, None, ) @@ -1274,6 +1321,7 @@ mod tests { &[TEST_LEAF], &limit_offset_backwards_query, true, + true, QueryKeyElementPairResultType, None, ) @@ -1298,6 +1346,7 @@ mod tests { &[TEST_LEAF], &limit_full_query, true, + true, QueryKeyElementPairResultType, None, ) @@ -1323,6 +1372,7 @@ mod tests { &[TEST_LEAF], &limit_offset_backwards_query, true, + true, QueryKeyElementPairResultType, None, ) @@ -1348,6 +1398,7 @@ mod tests { &[TEST_LEAF], &limit_backwards_query, true, + true, QueryKeyElementPairResultType, None, ) diff --git a/grovedb/src/operations/get/query.rs b/grovedb/src/operations/get/query.rs index 2dbd89b8..efdcfa63 100644 --- a/grovedb/src/operations/get/query.rs +++ b/grovedb/src/operations/get/query.rs @@ -51,6 +51,7 @@ impl GroveDb { &self, path_queries: &[&PathQuery], allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, transaction: TransactionArg, ) -> CostResult>, Error> { let mut cost = OperationCost::default(); @@ -60,6 +61,7 @@ impl GroveDb { self.query_many_raw( path_queries, allow_cache, + decrease_limit_on_range_with_no_sub_elements, QueryResultType::QueryElementResultType, transaction ) @@ -109,6 +111,7 @@ impl GroveDb { &self, path_queries: &[&PathQuery], allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, result_type: QueryResultType, transaction: TransactionArg, ) -> CostResult @@ -118,7 +121,13 @@ where { let query = cost_return_on_error_no_add!(&cost, PathQuery::merge(path_queries.to_vec())); let (result, _) = cost_return_on_error!( &mut cost, - self.query_raw(&query, allow_cache, result_type, transaction) + self.query_raw( + &query, + allow_cache, + decrease_limit_on_range_with_no_sub_elements, + result_type, + transaction + ) ); Ok(result).wrap_with_cost(cost) } @@ -189,6 +198,7 @@ where { &self, path_query: &PathQuery, allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, result_type: QueryResultType, transaction: TransactionArg, ) -> CostResult<(QueryResultElements, u16), Error> { @@ -196,7 +206,13 @@ where { let (elements, skipped) = cost_return_on_error!( &mut cost, - self.query_raw(path_query, allow_cache, result_type, transaction) + self.query_raw( + path_query, + allow_cache, + decrease_limit_on_range_with_no_sub_elements, + result_type, + transaction + ) ); let results_wrapped = elements @@ -218,6 +234,7 @@ where { &self, path_query: &PathQuery, allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, transaction: TransactionArg, ) -> CostResult<(Vec>, u16), Error> { let mut cost = OperationCost::default(); @@ -227,6 +244,7 @@ where { self.query_raw( path_query, allow_cache, + decrease_limit_on_range_with_no_sub_elements, QueryResultType::QueryElementResultType, transaction ) @@ -288,6 +306,7 @@ where { &self, path_query: &PathQuery, allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, transaction: TransactionArg, ) -> CostResult<(Vec, u16), Error> { let mut cost = OperationCost::default(); @@ -297,6 +316,7 @@ where { self.query_raw( path_query, allow_cache, + decrease_limit_on_range_with_no_sub_elements, QueryResultType::QueryElementResultType, transaction ) @@ -360,10 +380,18 @@ where { &self, path_query: &PathQuery, allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, result_type: QueryResultType, transaction: TransactionArg, ) -> CostResult<(QueryResultElements, u16), Error> { - Element::get_raw_path_query(&self.db, path_query, allow_cache, result_type, transaction) + Element::get_raw_path_query( + &self.db, + path_query, + allow_cache, + decrease_limit_on_range_with_no_sub_elements, + result_type, + transaction, + ) } /// Splits the result set of a path query by query path. @@ -372,6 +400,7 @@ where { &self, path_query: &PathQuery, allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, transaction: TransactionArg, ) -> CostResult, Error> { let max_results = cost_return_on_error_default!(path_query.query.limit.ok_or( @@ -393,6 +422,7 @@ where { self.query( path_query, allow_cache, + decrease_limit_on_range_with_no_sub_elements, QueryResultType::QueryPathKeyElementTrioResultType, transaction ) @@ -415,6 +445,7 @@ where { &self, path_query: &PathQuery, allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, transaction: TransactionArg, ) -> CostResult, Error> { let max_results = cost_return_on_error_default!(path_query.query.limit.ok_or( @@ -436,6 +467,7 @@ where { self.query_raw( path_query, allow_cache, + decrease_limit_on_range_with_no_sub_elements, QueryResultType::QueryPathKeyElementTrioResultType, transaction ) @@ -507,7 +539,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path.clone(), SizedQuery::new(query, Some(5), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, None) + .query_raw_keys_optional(&path_query, true, true, None) .unwrap() .expect("should get successfully"); @@ -563,7 +595,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path.clone(), SizedQuery::new(query, Some(5), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, None) + .query_raw_keys_optional(&path_query, true, true, None) .unwrap() .expect("should get successfully"); @@ -620,7 +652,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path.clone(), SizedQuery::new(query, Some(5), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, None) + .query_raw_keys_optional(&path_query, true, true, None) .unwrap() .expect("should get successfully"); @@ -688,7 +720,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(4), None)); - db.query_raw_keys_optional(&path_query, true, None) + db.query_raw_keys_optional(&path_query, true, true, None) .unwrap() .expect_err("range a should error"); @@ -697,7 +729,7 @@ mod tests { query.insert_key(b"5".to_vec()); // 3 let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(3), None)); - db.query_raw_keys_optional(&path_query, true, None) + db.query_raw_keys_optional(&path_query, true, true, None) .unwrap() .expect("range b should not error"); @@ -706,7 +738,7 @@ mod tests { query.insert_key(b"5".to_vec()); // 4 let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(3), None)); - db.query_raw_keys_optional(&path_query, true, None) + db.query_raw_keys_optional(&path_query, true, true, None) .unwrap() .expect_err("range c should error"); @@ -715,7 +747,7 @@ mod tests { query.insert_key(b"5".to_vec()); // 3 let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(2), None)); - db.query_raw_keys_optional(&path_query, true, None) + db.query_raw_keys_optional(&path_query, true, true, None) .unwrap() .expect_err("range d should error"); @@ -723,7 +755,7 @@ mod tests { query.insert_range(b"z".to_vec()..b"10".to_vec()); let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); - db.query_raw_keys_optional(&path_query, true, None) + db.query_raw_keys_optional(&path_query, true, true, None) .unwrap() .expect_err("range using 2 bytes should error"); } @@ -774,7 +806,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path.clone(), SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, None) + .query_raw_keys_optional(&path_query, true, true, None) .unwrap() .expect("range starting with null should not error"); @@ -857,7 +889,7 @@ mod tests { query.insert_range(b"".to_vec()..b"c".to_vec()); let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); - db.query_keys_optional(&path_query, true, None) + db.query_keys_optional(&path_query, true, true, None) .unwrap() .expect_err("range should error because we didn't subquery"); @@ -867,7 +899,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, None) + .query_raw_keys_optional(&path_query, true, true, None) .unwrap() .expect("query with subquery should not error"); @@ -975,7 +1007,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, None) + .query_raw_keys_optional(&path_query, true, true, None) .unwrap() .expect("query with subquery should not error"); @@ -1123,7 +1155,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, None) + .query_raw_keys_optional(&path_query, true, true, None) .unwrap() .expect("query with subquery should not error"); @@ -1290,7 +1322,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, None) + .query_raw_keys_optional(&path_query, true, true, None) .unwrap() .expect("query with subquery should not error"); @@ -1470,7 +1502,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let result = db - .query_keys_optional(&path_query, true, None) + .query_keys_optional(&path_query, true, true, None) .unwrap() .expect("query with subquery should not error"); diff --git a/grovedb/src/query/mod.rs b/grovedb/src/query/mod.rs index 7563dc73..ce052cb9 100644 --- a/grovedb/src/query/mod.rs +++ b/grovedb/src/query/mod.rs @@ -534,6 +534,7 @@ mod tests { .query_raw( &merged_path_query, true, + true, QueryResultType::QueryPathKeyElementTrioResultType, None, ) @@ -826,6 +827,7 @@ mod tests { .query_raw( &merged_path_query, true, + true, QueryResultType::QueryPathKeyElementTrioResultType, None, ) diff --git a/grovedb/src/reference_path.rs b/grovedb/src/reference_path.rs index 41dd9b6b..359628e6 100644 --- a/grovedb/src/reference_path.rs +++ b/grovedb/src/reference_path.rs @@ -387,7 +387,7 @@ mod tests { let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()], query); let result = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("should query items"); assert_eq!(result.0.len(), 5); diff --git a/grovedb/src/tests/mod.rs b/grovedb/src/tests/mod.rs index 451b2307..3d905224 100644 --- a/grovedb/src/tests/mod.rs +++ b/grovedb/src/tests/mod.rs @@ -541,7 +541,7 @@ fn test_element_with_flags() { SizedQuery::new(query, None, None), ); let (flagged_ref_no_follow, _) = db - .query_raw(&path_query, true, QueryKeyElementPairResultType, None) + .query_raw(&path_query, true, true, QueryKeyElementPairResultType, None) .unwrap() .expect("should get successfully"); @@ -2622,6 +2622,7 @@ fn test_get_full_query() { db.query_many_raw( &[&path_query1, &path_query2], true, + true, QueryKeyElementPairResultType, None ) diff --git a/grovedb/src/tests/query_tests.rs b/grovedb/src/tests/query_tests.rs index 7d5fc680..830a18e0 100644 --- a/grovedb/src/tests/query_tests.rs +++ b/grovedb/src/tests/query_tests.rs @@ -365,7 +365,7 @@ fn test_get_range_query_with_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -402,7 +402,7 @@ fn test_get_range_query_with_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -437,7 +437,7 @@ fn test_get_range_query_with_unique_subquery_on_references() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -481,7 +481,7 @@ fn test_get_range_query_with_unique_subquery_with_non_unique_null_values() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -524,7 +524,7 @@ fn test_get_range_query_with_unique_subquery_ignore_non_unique_null_values() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -562,7 +562,7 @@ fn test_get_range_inclusive_query_with_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -602,7 +602,7 @@ fn test_get_range_inclusive_query_with_non_unique_subquery_on_references() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -642,7 +642,7 @@ fn test_get_range_inclusive_query_with_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -680,7 +680,7 @@ fn test_get_range_from_query_with_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -717,7 +717,7 @@ fn test_get_range_from_query_with_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -755,7 +755,7 @@ fn test_get_range_to_query_with_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -792,7 +792,7 @@ fn test_get_range_to_query_with_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -830,7 +830,7 @@ fn test_get_range_to_inclusive_query_with_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -870,7 +870,7 @@ fn test_get_range_to_inclusive_query_with_non_unique_subquery_and_key_out_of_bou let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -907,7 +907,7 @@ fn test_get_range_to_inclusive_query_with_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -945,7 +945,7 @@ fn test_get_range_after_query_with_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -985,7 +985,7 @@ fn test_get_range_after_to_query_with_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1027,7 +1027,7 @@ fn test_get_range_after_to_inclusive_query_with_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1069,7 +1069,7 @@ fn test_get_range_after_to_inclusive_query_with_non_unique_subquery_and_key_out_ let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1117,7 +1117,7 @@ fn test_get_range_inclusive_query_with_double_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1156,7 +1156,7 @@ fn test_get_range_query_with_limit_and_offset() { let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), None, None)); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1187,7 +1187,7 @@ fn test_get_range_query_with_limit_and_offset() { let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), None, None)); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1218,7 +1218,7 @@ fn test_get_range_query_with_limit_and_offset() { let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), Some(55), None)); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1249,7 +1249,7 @@ fn test_get_range_query_with_limit_and_offset() { ); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1287,7 +1287,7 @@ fn test_get_range_query_with_limit_and_offset() { ); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1322,7 +1322,7 @@ fn test_get_range_query_with_limit_and_offset() { ); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1343,7 +1343,7 @@ fn test_get_range_query_with_limit_and_offset() { ); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1366,7 +1366,7 @@ fn test_get_range_query_with_limit_and_offset() { let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(5), Some(2))); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1609,7 +1609,7 @@ fn test_mixed_level_proofs() { let path_query = PathQuery::new_unsized(path.clone(), query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("successful get_path_query"); @@ -1625,7 +1625,7 @@ fn test_mixed_level_proofs() { // Test mixed element proofs with limit and offset let path_query = PathQuery::new_unsized(path.clone(), query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("successful get_path_query"); @@ -1642,7 +1642,7 @@ fn test_mixed_level_proofs() { let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), Some(1), None)); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("successful get_path_query"); @@ -1660,7 +1660,7 @@ fn test_mixed_level_proofs() { SizedQuery::new(query.clone(), Some(3), Some(0)), ); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("successful get_path_query"); @@ -1678,7 +1678,7 @@ fn test_mixed_level_proofs() { SizedQuery::new(query.clone(), Some(4), Some(0)), ); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("successful get_path_query"); @@ -1693,7 +1693,7 @@ fn test_mixed_level_proofs() { let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(10), Some(4))); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, true, None) .unwrap() .expect("successful get_path_query"); @@ -1789,6 +1789,7 @@ fn test_mixed_level_proofs_with_tree() { .query_raw( &path_query, true, + true, QueryResultType::QueryPathKeyElementTrioResultType, None, ) @@ -1811,6 +1812,7 @@ fn test_mixed_level_proofs_with_tree() { .query_raw( &path_query, true, + true, QueryResultType::QueryPathKeyElementTrioResultType, None, ) diff --git a/node-grove/src/lib.rs b/node-grove/src/lib.rs index 26eb5188..a955b744 100644 --- a/node-grove/src/lib.rs +++ b/node-grove/src/lib.rs @@ -663,6 +663,7 @@ impl GroveDbWrapper { .query_item_value( &path_query, allows_cache, + true, using_transaction.then_some(transaction).flatten(), ) .unwrap(); // Todo: Costs; From 9eae5457f18d307d673767c8d772630a9d0ad66d Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Thu, 4 Apr 2024 14:01:37 +0700 Subject: [PATCH 15/37] fix: element verify debug (#287) * fix: enable visualized debug of elements by enabling features --- grovedb/Cargo.toml | 5 ++++- grovedb/src/element/mod.rs | 3 ++- grovedb/src/reference_path.rs | 1 + 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/grovedb/Cargo.toml b/grovedb/Cargo.toml index a7fb382b..c479b07c 100644 --- a/grovedb/Cargo.toml +++ b/grovedb/Cargo.toml @@ -47,7 +47,7 @@ full = [ "bincode", "serde/derive", "grovedb-storage/rocksdb_storage", - "grovedb-visualize", + "visualize", "hex", "itertools", "integer-encoding", @@ -56,6 +56,9 @@ full = [ "indexmap", "intmap" ] +visualize = [ + "grovedb-visualize", +] verify = [ "grovedb-merk/verify", "grovedb-costs", diff --git a/grovedb/src/element/mod.rs b/grovedb/src/element/mod.rs index 009f85ae..f674d2d8 100644 --- a/grovedb/src/element/mod.rs +++ b/grovedb/src/element/mod.rs @@ -94,6 +94,7 @@ pub type SumValue = i64; /// ONLY APPEND TO THIS LIST!!! Because /// of how serialization works. #[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[cfg_attr(not(any(feature = "full", feature = "visualize")), derive(Debug))] pub enum Element { /// An ordinary value Item(Vec, Option), @@ -109,7 +110,7 @@ pub enum Element { SumTree(Option>, SumValue, Option), } -#[cfg(feature = "full")] +#[cfg(any(feature = "full", feature = "visualize"))] impl fmt::Debug for Element { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut v = Vec::new(); diff --git a/grovedb/src/reference_path.rs b/grovedb/src/reference_path.rs index 359628e6..68e6e621 100644 --- a/grovedb/src/reference_path.rs +++ b/grovedb/src/reference_path.rs @@ -42,6 +42,7 @@ use serde::{Deserialize, Serialize}; use crate::Error; #[cfg(any(feature = "full", feature = "verify"))] +#[cfg_attr(not(any(feature = "full", feature = "visualize")), derive(Debug))] /// Reference path variants #[derive(Hash, Eq, PartialEq, Serialize, Deserialize, Clone)] pub enum ReferencePathType { From f5a3382c5537de7417e983cdf2132792aa9fdc9d Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Fri, 5 Apr 2024 11:11:18 +0700 Subject: [PATCH 16/37] refactor: switch to bincode 2, clean up dependencies (#288) --- costs/Cargo.toml | 4 +- grovedb/Cargo.toml | 21 ++++----- grovedb/src/element/mod.rs | 5 +-- grovedb/src/element/serialize.rs | 44 ++++++++----------- .../src/estimated_costs/average_case_costs.rs | 11 ++--- .../src/estimated_costs/worst_case_costs.rs | 11 ++--- grovedb/src/reference_path.rs | 5 +-- grovedb/src/visualize.rs | 12 ++--- merk/Cargo.toml | 20 ++++----- storage/Cargo.toml | 12 ++--- visualize/Cargo.toml | 2 +- 11 files changed, 69 insertions(+), 78 deletions(-) diff --git a/costs/Cargo.toml b/costs/Cargo.toml index 6aaa6ece..4d62b58e 100644 --- a/costs/Cargo.toml +++ b/costs/Cargo.toml @@ -10,6 +10,6 @@ repository = "https://github.com/dashpay/grovedb" [dependencies] -thiserror = "1.0.30" +thiserror = "1.0.58" intmap = "2.0.0" -integer-encoding = "3.0.3" +integer-encoding = "4.0.0" diff --git a/grovedb/Cargo.toml b/grovedb/Cargo.toml index c479b07c..a77ff4e9 100644 --- a/grovedb/Cargo.toml +++ b/grovedb/Cargo.toml @@ -13,26 +13,25 @@ documentation = "https://docs.rs/grovedb" [dependencies] grovedb-merk = { version = "1.0.0-rc.2", path = "../merk", optional = true, default-features = false } -thiserror = { version = "1.0.37", optional = true } -tempfile = { version = "3.3.0", optional = true } -bincode = { version = "1.3.3", optional = true } -serde = { version = "1.0.149", optional = true } +thiserror = { version = "1.0.58", optional = true } +tempfile = { version = "3.10.1", optional = true } +bincode = { version = "2.0.0-rc.3" } grovedb-storage = { version = "1.0.0-rc.2", path = "../storage", optional = true } grovedb-visualize = { version = "1.0.0-rc.2", path = "../visualize", optional = true } hex = { version = "0.4.3", optional = true } -itertools = { version = "0.10.5", optional = true } -integer-encoding = { version = "3.0.4", optional = true } +itertools = { version = "0.12.1", optional = true } +integer-encoding = { version = "4.0.0", optional = true } grovedb-costs = { version = "1.0.0-rc.2", path = "../costs", optional = true } nohash-hasher = { version = "0.2.0", optional = true } -indexmap = { version = "1.9.2", optional = true } +indexmap = { version = "2.2.6", optional = true } intmap = { version = "2.0.0", optional = true } grovedb-path = { version = "1.0.0-rc.2", path = "../path" } [dev-dependencies] rand = "0.8.5" -criterion = "0.4.0" +criterion = "0.5.1" hex = "0.4.3" -pretty_assertions = "1.3.0" +pretty_assertions = "1.4.0" [[bench]] name = "insertion_benchmark" @@ -44,8 +43,6 @@ full = [ "grovedb-merk/full", "thiserror", "tempfile", - "bincode", - "serde/derive", "grovedb-storage/rocksdb_storage", "visualize", "hex", @@ -63,8 +60,6 @@ verify = [ "grovedb-merk/verify", "grovedb-costs", "thiserror", - "serde/derive", - "bincode", "integer-encoding", ] estimated_costs = ["full"] diff --git a/grovedb/src/element/mod.rs b/grovedb/src/element/mod.rs index f674d2d8..a5573b4d 100644 --- a/grovedb/src/element/mod.rs +++ b/grovedb/src/element/mod.rs @@ -49,14 +49,13 @@ mod serialize; #[cfg(feature = "full")] use core::fmt; +use bincode::{Decode, Encode}; #[cfg(any(feature = "full", feature = "verify"))] use grovedb_merk::estimated_costs::SUM_VALUE_EXTRA_COST; #[cfg(feature = "full")] use grovedb_merk::estimated_costs::{LAYER_COST_SIZE, SUM_LAYER_COST_SIZE}; #[cfg(feature = "full")] use grovedb_visualize::visualize_to_vec; -#[cfg(any(feature = "full", feature = "verify"))] -use serde::{Deserialize, Serialize}; #[cfg(any(feature = "full", feature = "verify"))] use crate::reference_path::ReferencePathType; @@ -93,7 +92,7 @@ pub type SumValue = i64; /// /// ONLY APPEND TO THIS LIST!!! Because /// of how serialization works. -#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[derive(Clone, Encode, Decode, PartialEq, Eq, Hash)] #[cfg_attr(not(any(feature = "full", feature = "visualize")), derive(Debug))] pub enum Element { /// An ordinary value diff --git a/grovedb/src/element/serialize.rs b/grovedb/src/element/serialize.rs index 730881d2..ab798054 100644 --- a/grovedb/src/element/serialize.rs +++ b/grovedb/src/element/serialize.rs @@ -29,8 +29,7 @@ //! Serialize //! Implements serialization functions in Element -#[cfg(any(feature = "full", feature = "verify"))] -use bincode::Options; +use bincode::config; #[cfg(any(feature = "full", feature = "verify"))] use crate::{Element, Error}; @@ -39,31 +38,26 @@ impl Element { #[cfg(feature = "full")] /// Serializes self. Returns vector of u8s. pub fn serialize(&self) -> Result, Error> { - bincode::DefaultOptions::default() - .with_varint_encoding() - .reject_trailing_bytes() - .serialize(self) - .map_err(|_| Error::CorruptedData(String::from("unable to serialize element"))) + let config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + bincode::encode_to_vec(self, config) + .map_err(|e| Error::CorruptedData(format!("unable to serialize element {}", e))) } #[cfg(feature = "full")] /// Serializes self. Returns usize. - pub fn serialized_size(&self) -> usize { - bincode::DefaultOptions::default() - .with_varint_encoding() - .reject_trailing_bytes() - .serialized_size(self) - .unwrap() as usize // this should not be able to error + pub fn serialized_size(&self) -> Result { + self.serialize().map(|serialized| serialized.len()) } #[cfg(any(feature = "full", feature = "verify"))] /// Deserializes given bytes and sets as self pub fn deserialize(bytes: &[u8]) -> Result { - bincode::DefaultOptions::default() - .with_varint_encoding() - .reject_trailing_bytes() - .deserialize(bytes) - .map_err(|_| Error::CorruptedData(String::from("unable to deserialize element"))) + let config = config::standard().with_big_endian().with_no_limit(); + Ok(bincode::decode_from_slice(bytes, config) + .map_err(|e| Error::CorruptedData(format!("unable to deserialize element {}", e)))? + .0) } } @@ -80,20 +74,20 @@ mod tests { let empty_tree = Element::empty_tree(); let serialized = empty_tree.serialize().expect("expected to serialize"); assert_eq!(serialized.len(), 3); - assert_eq!(serialized.len(), empty_tree.serialized_size()); + assert_eq!(serialized.len(), empty_tree.serialized_size().unwrap()); // The tree is fixed length 32 bytes, so it's enum 2 then 32 bytes of zeroes assert_eq!(hex::encode(serialized), "020000"); let empty_tree = Element::new_tree_with_flags(None, Some(vec![5])); let serialized = empty_tree.serialize().expect("expected to serialize"); assert_eq!(serialized.len(), 5); - assert_eq!(serialized.len(), empty_tree.serialized_size()); + assert_eq!(serialized.len(), empty_tree.serialized_size().unwrap()); assert_eq!(hex::encode(serialized), "0200010105"); let item = Element::new_item(hex::decode("abcdef").expect("expected to decode")); let serialized = item.serialize().expect("expected to serialize"); assert_eq!(serialized.len(), 6); - assert_eq!(serialized.len(), item.serialized_size()); + assert_eq!(serialized.len(), item.serialized_size().unwrap()); // The item is variable length 3 bytes, so it's enum 2 then 32 bytes of zeroes assert_eq!(hex::encode(serialized), "0003abcdef00"); @@ -102,7 +96,7 @@ mod tests { let item = Element::new_sum_item(5); let serialized = item.serialize().expect("expected to serialize"); assert_eq!(serialized.len(), 3); - assert_eq!(serialized.len(), item.serialized_size()); + assert_eq!(serialized.len(), item.serialized_size().unwrap()); // The item is variable length 3 bytes, so it's enum 2 then 32 bytes of zeroes assert_eq!(hex::encode(serialized), "030a00"); @@ -112,7 +106,7 @@ mod tests { ); let serialized = item.serialize().expect("expected to serialize"); assert_eq!(serialized.len(), 8); - assert_eq!(serialized.len(), item.serialized_size()); + assert_eq!(serialized.len(), item.serialized_size().unwrap()); assert_eq!(hex::encode(serialized), "0003abcdef010101"); let reference = Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ @@ -122,7 +116,7 @@ mod tests { ])); let serialized = reference.serialize().expect("expected to serialize"); assert_eq!(serialized.len(), 12); - assert_eq!(serialized.len(), reference.serialized_size()); + assert_eq!(serialized.len(), reference.serialized_size().unwrap()); // The item is variable length 2 bytes, so it's enum 1 then 1 byte for length, // then 1 byte for 0, then 1 byte 02 for abcd, then 1 byte '1' for 05 assert_eq!(hex::encode(serialized), "010003010002abcd01050000"); @@ -137,7 +131,7 @@ mod tests { ); let serialized = reference.serialize().expect("expected to serialize"); assert_eq!(serialized.len(), 16); - assert_eq!(serialized.len(), reference.serialized_size()); + assert_eq!(serialized.len(), reference.serialized_size().unwrap()); assert_eq!(hex::encode(serialized), "010003010002abcd0105000103010203"); } } diff --git a/grovedb/src/estimated_costs/average_case_costs.rs b/grovedb/src/estimated_costs/average_case_costs.rs index d93b6451..8d803daf 100644 --- a/grovedb/src/estimated_costs/average_case_costs.rs +++ b/grovedb/src/estimated_costs/average_case_costs.rs @@ -208,7 +208,7 @@ impl GroveDb { _ => add_cost_case_merk_insert( &mut cost, key_len, - value.serialized_size() as u32, + cost_return_on_error_no_add!(&cost, value.serialized_size()) as u32, in_tree_using_sums, ), }; @@ -259,7 +259,7 @@ impl GroveDb { let sum_item_cost_size = if value.is_sum_item() { SUM_ITEM_COST_SIZE } else { - value.serialized_size() as u32 + cost_return_on_error_no_add!(&cost, value.serialized_size()) as u32 }; let value_len = sum_item_cost_size + flags_len; add_cost_case_merk_replace_same_size( @@ -272,7 +272,7 @@ impl GroveDb { _ => add_cost_case_merk_replace_same_size( &mut cost, key_len, - value.serialized_size() as u32, + cost_return_on_error_no_add!(&cost, value.serialized_size()) as u32, in_tree_using_sums, ), }; @@ -303,7 +303,8 @@ impl GroveDb { flags_len + flags_len.required_space() as u32 }); // Items need to be always the same serialized size for this to work - let item_cost_size = value.serialized_size() as u32; + let item_cost_size = + cost_return_on_error_no_add!(&cost, value.serialized_size()) as u32; let value_len = item_cost_size + flags_len; add_cost_case_merk_patch( &mut cost, @@ -562,7 +563,7 @@ mod test { &mut average_case_has_raw_cost, &path, &key, - elem.serialized_size() as u32, + elem.serialized_size().expect("expected size") as u32, false, ); diff --git a/grovedb/src/estimated_costs/worst_case_costs.rs b/grovedb/src/estimated_costs/worst_case_costs.rs index 5a72a405..106c2bb6 100644 --- a/grovedb/src/estimated_costs/worst_case_costs.rs +++ b/grovedb/src/estimated_costs/worst_case_costs.rs @@ -29,7 +29,7 @@ //! Worst case costs //! Implements worst case cost functions in GroveDb -use grovedb_costs::{CostResult, CostsExt, OperationCost}; +use grovedb_costs::{cost_return_on_error_no_add, CostResult, CostsExt, OperationCost}; use grovedb_merk::{ estimated_costs::{ add_cost_case_merk_insert, add_cost_case_merk_insert_layered, add_cost_case_merk_patch, @@ -195,7 +195,7 @@ impl GroveDb { _ => add_cost_case_merk_insert( &mut cost, key_len, - value.serialized_size() as u32, + cost_return_on_error_no_add!(&cost, value.serialized_size()) as u32, in_parent_tree_using_sums, ), }; @@ -253,7 +253,7 @@ impl GroveDb { _ => add_cost_case_merk_replace( &mut cost, key_len, - value.serialized_size() as u32, + cost_return_on_error_no_add!(&cost, value.serialized_size()) as u32, in_parent_tree_using_sums, ), }; @@ -284,7 +284,8 @@ impl GroveDb { flags_len + flags_len.required_space() as u32 }); // Items need to be always the same serialized size for this to work - let sum_item_cost_size = value.serialized_size() as u32; + let sum_item_cost_size = + cost_return_on_error_no_add!(&cost, value.serialized_size()) as u32; let value_len = sum_item_cost_size + flags_len; add_cost_case_merk_patch( &mut cost, @@ -498,7 +499,7 @@ mod test { &mut worst_case_has_raw_cost, &path, &key, - elem.serialized_size() as u32, + elem.serialized_size().expect("expected size") as u32, false, ); diff --git a/grovedb/src/reference_path.rs b/grovedb/src/reference_path.rs index 68e6e621..2b9fb7ed 100644 --- a/grovedb/src/reference_path.rs +++ b/grovedb/src/reference_path.rs @@ -31,12 +31,11 @@ #[cfg(feature = "full")] use std::fmt; +use bincode::{Decode, Encode}; #[cfg(feature = "full")] use grovedb_visualize::visualize_to_vec; #[cfg(feature = "full")] use integer_encoding::VarInt; -#[cfg(any(feature = "full", feature = "verify"))] -use serde::{Deserialize, Serialize}; #[cfg(feature = "full")] use crate::Error; @@ -44,7 +43,7 @@ use crate::Error; #[cfg(any(feature = "full", feature = "verify"))] #[cfg_attr(not(any(feature = "full", feature = "visualize")), derive(Debug))] /// Reference path variants -#[derive(Hash, Eq, PartialEq, Serialize, Deserialize, Clone)] +#[derive(Hash, Eq, PartialEq, Encode, Decode, Clone)] pub enum ReferencePathType { /// Holds the absolute path to the element the reference points to AbsolutePathReference(Vec>), diff --git a/grovedb/src/visualize.rs b/grovedb/src/visualize.rs index 6bba2f83..6f1f1c0d 100644 --- a/grovedb/src/visualize.rs +++ b/grovedb/src/visualize.rs @@ -30,7 +30,10 @@ use std::io::{Result, Write}; -use bincode::Options; +use bincode::{ + config, + config::{BigEndian, Configuration}, +}; use grovedb_merk::{Merk, VisualizeableMerk}; use grovedb_path::SubtreePathBuilder; use grovedb_storage::StorageContext; @@ -239,11 +242,10 @@ impl Visualize for GroveDb { #[allow(dead_code)] pub fn visualize_merk_stdout<'db, S: StorageContext<'db>>(merk: &Merk) { visualize_stdout(&VisualizeableMerk::new(merk, |bytes: &[u8]| { - bincode::DefaultOptions::default() - .with_varint_encoding() - .reject_trailing_bytes() - .deserialize::(bytes) + let config = config::standard().with_big_endian().with_no_limit(); + bincode::decode_from_slice::>(bytes, config) .expect("unable to deserialize Element") + .0 })); } diff --git a/merk/Cargo.toml b/merk/Cargo.toml index 7d180f54..e381f5ed 100644 --- a/merk/Cargo.toml +++ b/merk/Cargo.toml @@ -11,17 +11,17 @@ readme = "README.md" documentation = "https://docs.rs/grovedb-merk" [dependencies] -thiserror = "1.0.37" +thiserror = "1.0.58" grovedb-storage = { version = "1.0.0-rc.2", path = "../storage", optional = true } failure = "0.1.8" -integer-encoding = "3.0.4" -indexmap = "1.9.2" +integer-encoding = "4.0.0" +indexmap = "2.2.6" grovedb-costs = { version = "1.0.0-rc.2", path = "../costs" } grovedb-visualize = { version = "1.0.0-rc.2", path = "../visualize" } grovedb-path = { version = "1.0.0-rc.2", path = "../path" } [dependencies.time] -version = "0.3.17" +version = "0.3.34" optional = true [dependencies.hex] @@ -29,15 +29,15 @@ version = "0.4.3" optional = true [dependencies.colored] -version = "2.0.4" +version = "2.1.0" optional = true [dependencies.num_cpus] -version = "1.14.0" +version = "1.16.0" optional = true [dependencies.byteorder] -version = "1.4.3" +version = "1.5.0" optional = true [dependencies.ed] @@ -45,7 +45,7 @@ version = "0.2.2" optional = true [dependencies.blake3] -version = "1.3.3" +version = "1.5.1" optional = true [dependencies.rand] @@ -72,8 +72,8 @@ verify = [ ] [dev-dependencies] -tempfile = "3.3.0" -criterion = "0.4.0" +tempfile = "3.10.1" +criterion = "0.5.1" [[bench]] name = "merk" diff --git a/storage/Cargo.toml b/storage/Cargo.toml index 856888a7..8dc73fdc 100644 --- a/storage/Cargo.toml +++ b/storage/Cargo.toml @@ -10,15 +10,15 @@ repository = "https://github.com/dashpay/grovedb" [dependencies] lazy_static = { version = "1.4.0", optional = true } -num_cpus = { version = "1.14.0", optional = true } -tempfile = { version = "3.3.0", optional = true } -blake3 = { version = "1.3.3", optional = true } -integer-encoding = { version = "3.0.4", optional = true } +num_cpus = { version = "1.16.0", optional = true } +tempfile = { version = "3.10.1", optional = true } +blake3 = { version = "1.5.1", optional = true } +integer-encoding = { version = "4.0.0", optional = true } grovedb-visualize = { version = "1.0.0-rc.2", path = "../visualize" } -strum = { version = "0.24.1", features = ["derive"] } +strum = { version = "0.26.2", features = ["derive"] } grovedb-costs = { version = "1.0.0-rc.2", path = "../costs" } thiserror = "1.0.37" -rocksdb = { version = "0.21.0", optional = true } +rocksdb = { version = "0.22.0", optional = true } hex = "0.4.3" grovedb-path = { version = "1.0.0-rc.2", path = "../path" } diff --git a/visualize/Cargo.toml b/visualize/Cargo.toml index f27fe48e..ac93bff0 100644 --- a/visualize/Cargo.toml +++ b/visualize/Cargo.toml @@ -12,4 +12,4 @@ repository = "https://github.com/dashpay/grovedb" [dependencies] hex = "0.4.3" -itertools = "0.10.3" +itertools = "0.12.1" From 941dd86d70c86e7a236541919e789dba078abae6 Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Wed, 24 Apr 2024 22:22:51 +0700 Subject: [PATCH 17/37] refactor: changed some str to Strings for better errors (#290) --- costs/Cargo.toml | 2 +- grovedb/Cargo.toml | 2 +- grovedb/src/error.rs | 36 +++----------------------- grovedb/src/operations/delete/mod.rs | 2 +- grovedb/src/operations/get/query.rs | 10 +++---- grovedb/src/operations/proof/verify.rs | 4 +-- grovedb/src/query/mod.rs | 5 ++-- merk/src/error.rs | 2 +- merk/src/proofs/query/mod.rs | 5 ++-- storage/Cargo.toml | 2 +- 10 files changed, 22 insertions(+), 48 deletions(-) diff --git a/costs/Cargo.toml b/costs/Cargo.toml index 4d62b58e..8178f839 100644 --- a/costs/Cargo.toml +++ b/costs/Cargo.toml @@ -10,6 +10,6 @@ repository = "https://github.com/dashpay/grovedb" [dependencies] -thiserror = "1.0.58" +thiserror = "1.0.59" intmap = "2.0.0" integer-encoding = "4.0.0" diff --git a/grovedb/Cargo.toml b/grovedb/Cargo.toml index a77ff4e9..dc67a761 100644 --- a/grovedb/Cargo.toml +++ b/grovedb/Cargo.toml @@ -13,7 +13,7 @@ documentation = "https://docs.rs/grovedb" [dependencies] grovedb-merk = { version = "1.0.0-rc.2", path = "../merk", optional = true, default-features = false } -thiserror = { version = "1.0.58", optional = true } +thiserror = { version = "1.0.59", optional = true } tempfile = { version = "3.10.1", optional = true } bincode = { version = "2.0.0-rc.3" } grovedb-storage = { version = "1.0.0-rc.2", path = "../storage", optional = true } diff --git a/grovedb/src/error.rs b/grovedb/src/error.rs index d29feaa7..d7f476af 100644 --- a/grovedb/src/error.rs +++ b/grovedb/src/error.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! GroveDB Errors /// GroveDB Errors @@ -139,15 +111,15 @@ pub enum Error { // Client allowed errors #[error("just in time element flags client error: {0}")] /// Just in time element flags client error - JustInTimeElementFlagsClientError(&'static str), + JustInTimeElementFlagsClientError(String), #[error("split removal bytes client error: {0}")] /// Split removal bytes client error - SplitRemovalBytesClientError(&'static str), + SplitRemovalBytesClientError(String), #[error("client returned non client error: {0}")] /// Client returned non client error - ClientReturnedNonClientError(&'static str), + ClientReturnedNonClientError(String), #[error("override not allowed error: {0}")] /// Override not allowed @@ -160,7 +132,7 @@ pub enum Error { // Support errors #[error("not supported: {0}")] /// Not supported - NotSupported(&'static str), + NotSupported(String), // Merk errors #[error("merk error: {0}")] diff --git a/grovedb/src/operations/delete/mod.rs b/grovedb/src/operations/delete/mod.rs index 6d7a34d0..a8c1c876 100644 --- a/grovedb/src/operations/delete/mod.rs +++ b/grovedb/src/operations/delete/mod.rs @@ -569,7 +569,7 @@ impl GroveDb { ))) } else { Err(Error::NotSupported( - "deletion operation for non empty tree not currently supported", + "deletion operation for non empty tree not currently supported".to_string(), )) }; result.wrap_with_cost(cost) diff --git a/grovedb/src/operations/get/query.rs b/grovedb/src/operations/get/query.rs index efdcfa63..deef7437 100644 --- a/grovedb/src/operations/get/query.rs +++ b/grovedb/src/operations/get/query.rs @@ -141,7 +141,7 @@ where { ) -> CostResult, Error> { if transaction.is_some() { Err(Error::NotSupported( - "transactions are not currently supported", + "transactions are not currently supported".to_string(), )) .wrap_with_cost(Default::default()) } else if is_verbose { @@ -404,11 +404,11 @@ where { transaction: TransactionArg, ) -> CostResult, Error> { let max_results = cost_return_on_error_default!(path_query.query.limit.ok_or( - Error::NotSupported("limits must be set in query_keys_optional",) + Error::NotSupported("limits must be set in query_keys_optional".to_string()) )) as usize; if path_query.query.offset.is_some() { return Err(Error::NotSupported( - "offsets are not supported in query_raw_keys_optional", + "offsets are not supported in query_raw_keys_optional".to_string(), )) .wrap_with_cost(OperationCost::default()); } @@ -449,11 +449,11 @@ where { transaction: TransactionArg, ) -> CostResult, Error> { let max_results = cost_return_on_error_default!(path_query.query.limit.ok_or( - Error::NotSupported("limits must be set in query_raw_keys_optional",) + Error::NotSupported("limits must be set in query_raw_keys_optional".to_string()) )) as usize; if path_query.query.offset.is_some() { return Err(Error::NotSupported( - "offsets are not supported in query_raw_keys_optional", + "offsets are not supported in query_raw_keys_optional".to_string(), )) .wrap_with_cost(OperationCost::default()); } diff --git a/grovedb/src/operations/proof/verify.rs b/grovedb/src/operations/proof/verify.rs index a69935bf..baea8735 100644 --- a/grovedb/src/operations/proof/verify.rs +++ b/grovedb/src/operations/proof/verify.rs @@ -165,13 +165,13 @@ impl GroveDb { { // must have a limit let max_results = query.query.limit.ok_or(Error::NotSupported( - "limits must be set in verify_query_with_absence_proof", + "limits must be set in verify_query_with_absence_proof".to_string(), ))? as usize; // must have no offset if query.query.offset.is_some() { return Err(Error::NotSupported( - "offsets are not supported for verify_query_with_absence_proof", + "offsets are not supported for verify_query_with_absence_proof".to_string(), )); } diff --git a/grovedb/src/query/mod.rs b/grovedb/src/query/mod.rs index ce052cb9..1b72e274 100644 --- a/grovedb/src/query/mod.rs +++ b/grovedb/src/query/mod.rs @@ -158,13 +158,14 @@ impl PathQuery { path_queries.into_iter().try_for_each(|path_query| { if path_query.query.offset.is_some() { return Err(Error::NotSupported( - "can not merge pathqueries with offsets", + "can not merge pathqueries with offsets".to_string(), )); } if path_query.query.limit.is_some() { return Err(Error::NotSupported( "can not merge pathqueries with limits, consider setting the limit after the \ - merge", + merge" + .to_string(), )); } path_query diff --git a/merk/src/error.rs b/merk/src/error.rs index 4455ef96..405fdeb1 100644 --- a/merk/src/error.rs +++ b/merk/src/error.rs @@ -87,7 +87,7 @@ pub enum Error { /// Not supported error #[error("not supported error {0}")] - NotSupported(&'static str), + NotSupported(String), /// Request amount exceeded error #[error("request amount exceeded error {0}")] diff --git a/merk/src/proofs/query/mod.rs b/merk/src/proofs/query/mod.rs index 4ecc808b..3a070919 100644 --- a/merk/src/proofs/query/mod.rs +++ b/merk/src/proofs/query/mod.rs @@ -175,7 +175,8 @@ impl Query { // unbounded ranges can not be supported if conditional_query_item.is_unbounded_range() { return Err(Error::NotSupported( - "terminal keys are not supported with conditional unbounded ranges", + "terminal keys are not supported with conditional unbounded ranges" + .to_string(), )); } let conditional_keys = conditional_query_item.keys()?; @@ -237,7 +238,7 @@ impl Query { for item in self.items.iter() { if item.is_unbounded_range() { return Err(Error::NotSupported( - "terminal keys are not supported with unbounded ranges", + "terminal keys are not supported with unbounded ranges".to_string(), )); } let keys = item.keys()?; diff --git a/storage/Cargo.toml b/storage/Cargo.toml index 8dc73fdc..7d7030bf 100644 --- a/storage/Cargo.toml +++ b/storage/Cargo.toml @@ -17,7 +17,7 @@ integer-encoding = { version = "4.0.0", optional = true } grovedb-visualize = { version = "1.0.0-rc.2", path = "../visualize" } strum = { version = "0.26.2", features = ["derive"] } grovedb-costs = { version = "1.0.0-rc.2", path = "../costs" } -thiserror = "1.0.37" +thiserror = "1.0.59" rocksdb = { version = "0.22.0", optional = true } hex = "0.4.3" grovedb-path = { version = "1.0.0-rc.2", path = "../path" } From d9292aa20bd8f3bda7c5d25d62db06ac341b0677 Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Fri, 26 Apr 2024 15:09:07 +0700 Subject: [PATCH 18/37] fix!: empty parent paths should only error if we want them to error (#291) * fix: query with empty parent path should return no error * add in option to not error if intermediate paths dont exist * more verbose error * more verbose error * fix for terminal keys * fmt * fix * fix --- grovedb/src/element/mod.rs | 2 + grovedb/src/element/query.rs | 201 ++++++++++++----------- grovedb/src/operations/get/query.rs | 238 +++++++++++++++++++++++++--- grovedb/src/query/mod.rs | 2 + grovedb/src/reference_path.rs | 2 +- grovedb/src/tests/mod.rs | 10 +- grovedb/src/tests/query_tests.rs | 70 ++++---- merk/src/proofs/query/mod.rs | 25 +-- node-grove/src/lib.rs | 1 + 9 files changed, 382 insertions(+), 169 deletions(-) diff --git a/grovedb/src/element/mod.rs b/grovedb/src/element/mod.rs index a5573b4d..c71bb52f 100644 --- a/grovedb/src/element/mod.rs +++ b/grovedb/src/element/mod.rs @@ -45,6 +45,8 @@ mod insert; #[cfg(any(feature = "full", feature = "verify"))] mod query; #[cfg(any(feature = "full", feature = "verify"))] +pub use query::QueryOptions; +#[cfg(any(feature = "full", feature = "verify"))] mod serialize; #[cfg(feature = "full")] use core::fmt; diff --git a/grovedb/src/element/query.rs b/grovedb/src/element/query.rs index cc914949..eba5ae1f 100644 --- a/grovedb/src/element/query.rs +++ b/grovedb/src/element/query.rs @@ -59,7 +59,7 @@ use crate::{ #[cfg(any(feature = "full", feature = "verify"))] use crate::{Element, SizedQuery}; -#[cfg(feature = "full")] +#[cfg(any(feature = "full", feature = "verify"))] #[derive(Copy, Clone, Debug)] pub struct QueryOptions { pub allow_get_raw: bool, @@ -71,6 +71,19 @@ pub struct QueryOptions { /// sub elements have no matches, hence the limit would not decrease and /// hence we would continue on the increasingly expensive query. pub decrease_limit_on_range_with_no_sub_elements: bool, + pub error_if_intermediate_path_tree_not_present: bool, +} + +#[cfg(any(feature = "full", feature = "verify"))] +impl Default for QueryOptions { + fn default() -> Self { + QueryOptions { + allow_get_raw: false, + allow_cache: true, + decrease_limit_on_range_with_no_sub_elements: true, + error_if_intermediate_path_tree_not_present: true, + } + } } #[cfg(feature = "full")] @@ -101,6 +114,7 @@ impl Element { storage: &RocksDbStorage, merk_path: &[&[u8]], query: &Query, + query_options: QueryOptions, result_type: QueryResultType, transaction: TransactionArg, ) -> CostResult { @@ -109,8 +123,7 @@ impl Element { storage, merk_path, &sized_query, - true, - true, + query_options, result_type, transaction, ) @@ -123,12 +136,14 @@ impl Element { storage: &RocksDbStorage, merk_path: &[&[u8]], query: &Query, + query_options: QueryOptions, transaction: TransactionArg, ) -> CostResult, Error> { Element::get_query( storage, merk_path, query, + query_options, QueryElementResultType, transaction, ) @@ -226,39 +241,7 @@ impl Element { pub fn get_path_query( storage: &RocksDbStorage, path_query: &PathQuery, - allow_cache: bool, - decrease_limit_on_range_with_no_sub_elements: bool, - result_type: QueryResultType, - transaction: TransactionArg, - ) -> CostResult<(QueryResultElements, u16), Error> { - let path_slices = path_query - .path - .iter() - .map(|x| x.as_slice()) - .collect::>(); - Element::get_query_apply_function( - storage, - path_slices.as_slice(), - &path_query.query, - QueryOptions { - allow_get_raw: false, - allow_cache, - decrease_limit_on_range_with_no_sub_elements, - }, - result_type, - transaction, - Element::path_query_push, - ) - } - - #[cfg(feature = "full")] - /// Returns a vector of elements including trees, and the number of skipped - /// elements - pub fn get_raw_path_query( - storage: &RocksDbStorage, - path_query: &PathQuery, - allow_cache: bool, - decrease_limit_on_range_with_no_sub_elements: bool, + query_options: QueryOptions, result_type: QueryResultType, transaction: TransactionArg, ) -> CostResult<(QueryResultElements, u16), Error> { @@ -271,11 +254,7 @@ impl Element { storage, path_slices.as_slice(), &path_query.query, - QueryOptions { - allow_get_raw: true, - allow_cache, - decrease_limit_on_range_with_no_sub_elements, - }, + query_options, result_type, transaction, Element::path_query_push, @@ -288,8 +267,7 @@ impl Element { storage: &RocksDbStorage, path: &[&[u8]], sized_query: &SizedQuery, - allow_cache: bool, - decrease_limit_on_range_with_no_sub_elements: bool, + query_options: QueryOptions, result_type: QueryResultType, transaction: TransactionArg, ) -> CostResult<(QueryResultElements, u16), Error> { @@ -297,11 +275,7 @@ impl Element { storage, path, sized_query, - QueryOptions { - allow_get_raw: false, - allow_cache, - decrease_limit_on_range_with_no_sub_elements, - }, + query_options, result_type, transaction, Element::path_query_push, @@ -332,6 +306,7 @@ impl Element { allow_get_raw, allow_cache, decrease_limit_on_range_with_no_sub_elements, + .. } = query_options; if element.is_tree() { let mut path_vec = path.to_vec(); @@ -357,8 +332,7 @@ impl Element { Element::get_path_query( storage, &inner_path_query, - allow_cache, - decrease_limit_on_range_with_no_sub_elements, + query_options, result_type, transaction ) @@ -605,7 +579,7 @@ impl Element { Ok(element) => { let (subquery_path, subquery) = Self::subquery_paths_and_value_for_sized_query(sized_query, key); - add_element_function(PathQueryPushArgs { + match add_element_function(PathQueryPushArgs { storage, transaction, key: Some(key.as_slice()), @@ -621,9 +595,31 @@ impl Element { offset, }) .unwrap_add_cost(&mut cost) + { + Ok(_) => Ok(()), + Err(e) => { + if !query_options.error_if_intermediate_path_tree_not_present { + match e { + Error::PathParentLayerNotFound(_) => Ok(()), + _ => Err(e), + } + } else { + Err(e) + } + } + } } Err(Error::PathKeyNotFound(_)) => Ok(()), - Err(e) => Err(e), + Err(e) => { + if !query_options.error_if_intermediate_path_tree_not_present { + match e { + Error::PathParentLayerNotFound(_) => Ok(()), + _ => Err(e), + } + } else { + Err(e) + } + } } } else { Err(Error::InternalError( @@ -657,24 +653,36 @@ impl Element { .expect("key should exist"); let (subquery_path, subquery) = Self::subquery_paths_and_value_for_sized_query(sized_query, key); - cost_return_on_error!( - &mut cost, - add_element_function(PathQueryPushArgs { - storage, - transaction, - key: Some(key), - element, - path, - subquery_path, - subquery, - left_to_right: sized_query.query.left_to_right, - query_options, - result_type, - results, - limit, - offset, - }) - ); + let result_with_cost = add_element_function(PathQueryPushArgs { + storage, + transaction, + key: Some(key), + element, + path, + subquery_path, + subquery, + left_to_right: sized_query.query.left_to_right, + query_options, + result_type, + results, + limit, + offset, + }); + let result = result_with_cost.unwrap_add_cost(&mut cost); + match result { + Ok(x) => x, + Err(e) => { + if !query_options.error_if_intermediate_path_tree_not_present { + match e { + Error::PathKeyNotFound(_) + | Error::PathParentLayerNotFound(_) => (), + _ => return Err(e).wrap_with_cost(cost), + } + } else { + return Err(e).wrap_with_cost(cost); + } + } + } if sized_query.query.left_to_right { iter.next().unwrap_add_cost(&mut cost); } else { @@ -750,7 +758,7 @@ mod tests { use grovedb_storage::{Storage, StorageBatch}; use crate::{ - element::*, + element::{query::QueryOptions, *}, query_result_type::{ KeyElementPair, QueryResultElement, QueryResultElements, QueryResultType::{QueryKeyElementPairResultType, QueryPathKeyElementTrioResultType}, @@ -806,7 +814,7 @@ mod tests { query.insert_key(b"a".to_vec()); assert_eq!( - Element::get_query_values(&db.db, &[TEST_LEAF], &query, None) + Element::get_query_values(&db.db, &[TEST_LEAF], &query, QueryOptions::default(), None) .unwrap() .expect("expected successful get_query"), vec![ @@ -820,7 +828,7 @@ mod tests { query.insert_range(b"b".to_vec()..b"d".to_vec()); query.insert_range(b"a".to_vec()..b"c".to_vec()); assert_eq!( - Element::get_query_values(&db.db, &[TEST_LEAF], &query, None) + Element::get_query_values(&db.db, &[TEST_LEAF], &query, QueryOptions::default(), None) .unwrap() .expect("expected successful get_query"), vec![ @@ -835,7 +843,7 @@ mod tests { query.insert_range_inclusive(b"b".to_vec()..=b"d".to_vec()); query.insert_range(b"b".to_vec()..b"c".to_vec()); assert_eq!( - Element::get_query_values(&db.db, &[TEST_LEAF], &query, None) + Element::get_query_values(&db.db, &[TEST_LEAF], &query, QueryOptions::default(), None) .unwrap() .expect("expected successful get_query"), vec![ @@ -851,7 +859,7 @@ mod tests { query.insert_range(b"b".to_vec()..b"d".to_vec()); query.insert_range(b"a".to_vec()..b"c".to_vec()); assert_eq!( - Element::get_query_values(&db.db, &[TEST_LEAF], &query, None) + Element::get_query_values(&db.db, &[TEST_LEAF], &query, QueryOptions::default(), None) .unwrap() .expect("expected successful get_query"), vec![ @@ -912,6 +920,7 @@ mod tests { &db.db, &[TEST_LEAF], &query, + QueryOptions::default(), QueryPathKeyElementTrioResultType, None ) @@ -975,8 +984,7 @@ mod tests { storage, &[TEST_LEAF], &ascending_query, - true, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, ) @@ -1010,8 +1018,7 @@ mod tests { storage, &[TEST_LEAF], &backwards_query, - true, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, ) @@ -1100,8 +1107,7 @@ mod tests { storage, &[TEST_LEAF], &ascending_query, - true, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, ) @@ -1118,8 +1124,7 @@ mod tests { storage, &[TEST_LEAF], &backwards_query, - true, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, ) @@ -1139,8 +1144,7 @@ mod tests { storage, &[TEST_LEAF], &backwards_query, - true, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, ) @@ -1202,8 +1206,7 @@ mod tests { &db.db, &[TEST_LEAF], &backwards_query, - true, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, ) @@ -1229,8 +1232,7 @@ mod tests { &db.db, &[TEST_LEAF], &backwards_query, - true, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, ) @@ -1251,8 +1253,7 @@ mod tests { &db.db, &[TEST_LEAF], &limit_query, - true, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, ) @@ -1273,8 +1274,7 @@ mod tests { &db.db, &[TEST_LEAF], &limit_query, - true, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, ) @@ -1294,8 +1294,7 @@ mod tests { &db.db, &[TEST_LEAF], &limit_offset_query, - true, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, ) @@ -1320,8 +1319,7 @@ mod tests { &db.db, &[TEST_LEAF], &limit_offset_backwards_query, - true, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, ) @@ -1345,8 +1343,7 @@ mod tests { &db.db, &[TEST_LEAF], &limit_full_query, - true, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, ) @@ -1371,8 +1368,7 @@ mod tests { &db.db, &[TEST_LEAF], &limit_offset_backwards_query, - true, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, ) @@ -1397,8 +1393,7 @@ mod tests { &db.db, &[TEST_LEAF], &limit_backwards_query, - true, - true, + QueryOptions::default(), QueryKeyElementPairResultType, None, ) diff --git a/grovedb/src/operations/get/query.rs b/grovedb/src/operations/get/query.rs index deef7437..29a581d9 100644 --- a/grovedb/src/operations/get/query.rs +++ b/grovedb/src/operations/get/query.rs @@ -36,7 +36,7 @@ use grovedb_costs::{ #[cfg(feature = "full")] use integer_encoding::VarInt; -use crate::query_result_type::PathKeyOptionalElementTrio; +use crate::{element::QueryOptions, query_result_type::PathKeyOptionalElementTrio}; #[cfg(feature = "full")] use crate::{ query_result_type::{QueryResultElement, QueryResultElements, QueryResultType}, @@ -52,6 +52,7 @@ impl GroveDb { path_queries: &[&PathQuery], allow_cache: bool, decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, transaction: TransactionArg, ) -> CostResult>, Error> { let mut cost = OperationCost::default(); @@ -62,6 +63,7 @@ impl GroveDb { path_queries, allow_cache, decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, QueryResultType::QueryElementResultType, transaction ) @@ -112,6 +114,7 @@ impl GroveDb { path_queries: &[&PathQuery], allow_cache: bool, decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, result_type: QueryResultType, transaction: TransactionArg, ) -> CostResult @@ -125,6 +128,7 @@ where { &query, allow_cache, decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, result_type, transaction ) @@ -199,6 +203,7 @@ where { path_query: &PathQuery, allow_cache: bool, decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, result_type: QueryResultType, transaction: TransactionArg, ) -> CostResult<(QueryResultElements, u16), Error> { @@ -210,6 +215,7 @@ where { path_query, allow_cache, decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, result_type, transaction ) @@ -235,6 +241,7 @@ where { path_query: &PathQuery, allow_cache: bool, decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, transaction: TransactionArg, ) -> CostResult<(Vec>, u16), Error> { let mut cost = OperationCost::default(); @@ -245,6 +252,7 @@ where { path_query, allow_cache, decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, QueryResultType::QueryElementResultType, transaction ) @@ -307,6 +315,7 @@ where { path_query: &PathQuery, allow_cache: bool, decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, transaction: TransactionArg, ) -> CostResult<(Vec, u16), Error> { let mut cost = OperationCost::default(); @@ -317,6 +326,7 @@ where { path_query, allow_cache, decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, QueryResultType::QueryElementResultType, transaction ) @@ -381,14 +391,19 @@ where { path_query: &PathQuery, allow_cache: bool, decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, result_type: QueryResultType, transaction: TransactionArg, ) -> CostResult<(QueryResultElements, u16), Error> { - Element::get_raw_path_query( + Element::get_path_query( &self.db, path_query, - allow_cache, - decrease_limit_on_range_with_no_sub_elements, + QueryOptions { + allow_get_raw: true, + allow_cache, + decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, + }, result_type, transaction, ) @@ -401,6 +416,7 @@ where { path_query: &PathQuery, allow_cache: bool, decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, transaction: TransactionArg, ) -> CostResult, Error> { let max_results = cost_return_on_error_default!(path_query.query.limit.ok_or( @@ -423,6 +439,7 @@ where { path_query, allow_cache, decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, QueryResultType::QueryPathKeyElementTrioResultType, transaction ) @@ -446,6 +463,7 @@ where { path_query: &PathQuery, allow_cache: bool, decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, transaction: TransactionArg, ) -> CostResult, Error> { let max_results = cost_return_on_error_default!(path_query.query.limit.ok_or( @@ -468,6 +486,7 @@ where { path_query, allow_cache, decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, QueryResultType::QueryPathKeyElementTrioResultType, transaction ) @@ -539,7 +558,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path.clone(), SizedQuery::new(query, Some(5), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None) .unwrap() .expect("should get successfully"); @@ -595,7 +614,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path.clone(), SizedQuery::new(query, Some(5), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None) .unwrap() .expect("should get successfully"); @@ -652,7 +671,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path.clone(), SizedQuery::new(query, Some(5), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None) .unwrap() .expect("should get successfully"); @@ -720,7 +739,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(4), None)); - db.query_raw_keys_optional(&path_query, true, true, None) + db.query_raw_keys_optional(&path_query, true, true, true, None) .unwrap() .expect_err("range a should error"); @@ -729,7 +748,7 @@ mod tests { query.insert_key(b"5".to_vec()); // 3 let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(3), None)); - db.query_raw_keys_optional(&path_query, true, true, None) + db.query_raw_keys_optional(&path_query, true, true, true, None) .unwrap() .expect("range b should not error"); @@ -738,7 +757,7 @@ mod tests { query.insert_key(b"5".to_vec()); // 4 let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(3), None)); - db.query_raw_keys_optional(&path_query, true, true, None) + db.query_raw_keys_optional(&path_query, true, true, true, None) .unwrap() .expect_err("range c should error"); @@ -747,7 +766,7 @@ mod tests { query.insert_key(b"5".to_vec()); // 3 let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(2), None)); - db.query_raw_keys_optional(&path_query, true, true, None) + db.query_raw_keys_optional(&path_query, true, true, true, None) .unwrap() .expect_err("range d should error"); @@ -755,7 +774,7 @@ mod tests { query.insert_range(b"z".to_vec()..b"10".to_vec()); let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); - db.query_raw_keys_optional(&path_query, true, true, None) + db.query_raw_keys_optional(&path_query, true, true, true, None) .unwrap() .expect_err("range using 2 bytes should error"); } @@ -806,7 +825,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path.clone(), SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None) .unwrap() .expect("range starting with null should not error"); @@ -889,7 +908,7 @@ mod tests { query.insert_range(b"".to_vec()..b"c".to_vec()); let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); - db.query_keys_optional(&path_query, true, true, None) + db.query_keys_optional(&path_query, true, true, true, None) .unwrap() .expect_err("range should error because we didn't subquery"); @@ -899,7 +918,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None) .unwrap() .expect("query with subquery should not error"); @@ -1007,7 +1026,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None) .unwrap() .expect("query with subquery should not error"); @@ -1057,6 +1076,187 @@ mod tests { ); // because we didn't query for it } + #[test] + fn test_query_raw_keys_options_with_subquery_having_intermediate_paths_missing() { + let db = make_test_grovedb(); + + db.insert([TEST_LEAF].as_ref(), b"", Element::empty_tree(), None, None) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF].as_ref(), + b"1", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF].as_ref(), + b"2", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF].as_ref(), + b"3", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"1"].as_ref(), + b"deep_1", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"1", b"deep_1"].as_ref(), + b"deeper_1", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"1", b"deep_1", b"deeper_1"].as_ref(), + b"2", + Element::new_item(b"found_me".to_vec()), + None, + None, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"2"].as_ref(), + b"1", + Element::new_item(b"1 in 2".to_vec()), + None, + None, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"2"].as_ref(), + b"5", + Element::new_item(b"5 in 2".to_vec()), + None, + None, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"2"].as_ref(), + b"2", + Element::new_item(b"2 in 2".to_vec()), + None, + None, + ) + .unwrap() + .expect("should insert subtree successfully"); + + let mut sub_query = Query::new(); + sub_query.insert_key(b"1".to_vec()); + sub_query.insert_key(b"2".to_vec()); + let mut query = Query::new(); + query.insert_keys(vec![b"1".to_vec(), b"2".to_vec(), b"3".to_vec()]); + query.set_subquery_path(vec![b"deep_1".to_vec(), b"deeper_1".to_vec()]); + query.set_subquery(sub_query); + let path = vec![TEST_LEAF.to_vec()]; + let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); + + let raw_result = db + .query_raw_keys_optional(&path_query, true, true, true, None) + .unwrap() + .expect_err( + "query with subquery should error if error_if_intermediate_path_tree_not_present \ + is set to true", + ); + + let raw_result = db + .query_raw_keys_optional(&path_query, true, true, false, None) + .unwrap() + .expect("query with subquery should not error"); + + // because is 99 ascii, and we have empty too = 100 then x 2 + assert_eq!(raw_result.len(), 6); + + let expected_result = vec![ + ( + vec![ + b"test_leaf".to_vec(), + b"1".to_vec(), + b"deep_1".to_vec(), + b"deeper_1".to_vec(), + ], + b"1".to_vec(), + None, + ), + ( + vec![ + b"test_leaf".to_vec(), + b"1".to_vec(), + b"deep_1".to_vec(), + b"deeper_1".to_vec(), + ], + b"2".to_vec(), + Some(Element::new_item(b"found_me".to_vec())), + ), + ( + vec![ + b"test_leaf".to_vec(), + b"2".to_vec(), + b"deep_1".to_vec(), + b"deeper_1".to_vec(), + ], + b"1".to_vec(), + None, + ), + ( + vec![ + b"test_leaf".to_vec(), + b"2".to_vec(), + b"deep_1".to_vec(), + b"deeper_1".to_vec(), + ], + b"2".to_vec(), + None, + ), + ( + vec![ + b"test_leaf".to_vec(), + b"3".to_vec(), + b"deep_1".to_vec(), + b"deeper_1".to_vec(), + ], + b"1".to_vec(), + None, + ), + ( + vec![ + b"test_leaf".to_vec(), + b"3".to_vec(), + b"deep_1".to_vec(), + b"deeper_1".to_vec(), + ], + b"2".to_vec(), + None, + ), + ]; + + assert_eq!(raw_result, expected_result); + } + #[test] fn test_query_raw_keys_options_with_subquery_and_subquery_path() { let db = make_test_grovedb(); @@ -1155,7 +1355,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None) .unwrap() .expect("query with subquery should not error"); @@ -1322,7 +1522,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None) .unwrap() .expect("query with subquery should not error"); @@ -1502,7 +1702,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let result = db - .query_keys_optional(&path_query, true, true, None) + .query_keys_optional(&path_query, true, true, true, None) .unwrap() .expect("query with subquery should not error"); diff --git a/grovedb/src/query/mod.rs b/grovedb/src/query/mod.rs index 1b72e274..db75144d 100644 --- a/grovedb/src/query/mod.rs +++ b/grovedb/src/query/mod.rs @@ -536,6 +536,7 @@ mod tests { &merged_path_query, true, true, + true, QueryResultType::QueryPathKeyElementTrioResultType, None, ) @@ -829,6 +830,7 @@ mod tests { &merged_path_query, true, true, + true, QueryResultType::QueryPathKeyElementTrioResultType, None, ) diff --git a/grovedb/src/reference_path.rs b/grovedb/src/reference_path.rs index 2b9fb7ed..52f07eb8 100644 --- a/grovedb/src/reference_path.rs +++ b/grovedb/src/reference_path.rs @@ -387,7 +387,7 @@ mod tests { let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()], query); let result = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("should query items"); assert_eq!(result.0.len(), 5); diff --git a/grovedb/src/tests/mod.rs b/grovedb/src/tests/mod.rs index 3d905224..09a38e6b 100644 --- a/grovedb/src/tests/mod.rs +++ b/grovedb/src/tests/mod.rs @@ -541,7 +541,14 @@ fn test_element_with_flags() { SizedQuery::new(query, None, None), ); let (flagged_ref_no_follow, _) = db - .query_raw(&path_query, true, true, QueryKeyElementPairResultType, None) + .query_raw( + &path_query, + true, + true, + true, + QueryKeyElementPairResultType, + None, + ) .unwrap() .expect("should get successfully"); @@ -2623,6 +2630,7 @@ fn test_get_full_query() { &[&path_query1, &path_query2], true, true, + true, QueryKeyElementPairResultType, None ) diff --git a/grovedb/src/tests/query_tests.rs b/grovedb/src/tests/query_tests.rs index 830a18e0..304042bd 100644 --- a/grovedb/src/tests/query_tests.rs +++ b/grovedb/src/tests/query_tests.rs @@ -365,7 +365,7 @@ fn test_get_range_query_with_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -402,7 +402,7 @@ fn test_get_range_query_with_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -437,7 +437,7 @@ fn test_get_range_query_with_unique_subquery_on_references() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -481,7 +481,7 @@ fn test_get_range_query_with_unique_subquery_with_non_unique_null_values() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -524,7 +524,7 @@ fn test_get_range_query_with_unique_subquery_ignore_non_unique_null_values() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -562,7 +562,7 @@ fn test_get_range_inclusive_query_with_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -602,7 +602,7 @@ fn test_get_range_inclusive_query_with_non_unique_subquery_on_references() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -642,7 +642,7 @@ fn test_get_range_inclusive_query_with_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -680,7 +680,7 @@ fn test_get_range_from_query_with_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -717,7 +717,7 @@ fn test_get_range_from_query_with_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -755,7 +755,7 @@ fn test_get_range_to_query_with_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -792,7 +792,7 @@ fn test_get_range_to_query_with_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -830,7 +830,7 @@ fn test_get_range_to_inclusive_query_with_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -870,7 +870,7 @@ fn test_get_range_to_inclusive_query_with_non_unique_subquery_and_key_out_of_bou let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -907,7 +907,7 @@ fn test_get_range_to_inclusive_query_with_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -945,7 +945,7 @@ fn test_get_range_after_query_with_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -985,7 +985,7 @@ fn test_get_range_after_to_query_with_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1027,7 +1027,7 @@ fn test_get_range_after_to_inclusive_query_with_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1069,7 +1069,7 @@ fn test_get_range_after_to_inclusive_query_with_non_unique_subquery_and_key_out_ let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1117,7 +1117,7 @@ fn test_get_range_inclusive_query_with_double_non_unique_subquery() { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1156,7 +1156,7 @@ fn test_get_range_query_with_limit_and_offset() { let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), None, None)); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1187,7 +1187,7 @@ fn test_get_range_query_with_limit_and_offset() { let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), None, None)); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1218,7 +1218,7 @@ fn test_get_range_query_with_limit_and_offset() { let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), Some(55), None)); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1249,7 +1249,7 @@ fn test_get_range_query_with_limit_and_offset() { ); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1287,7 +1287,7 @@ fn test_get_range_query_with_limit_and_offset() { ); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1322,7 +1322,7 @@ fn test_get_range_query_with_limit_and_offset() { ); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1343,7 +1343,7 @@ fn test_get_range_query_with_limit_and_offset() { ); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1366,7 +1366,7 @@ fn test_get_range_query_with_limit_and_offset() { let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(5), Some(2))); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("expected successful get_path_query"); @@ -1609,7 +1609,7 @@ fn test_mixed_level_proofs() { let path_query = PathQuery::new_unsized(path.clone(), query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("successful get_path_query"); @@ -1625,7 +1625,7 @@ fn test_mixed_level_proofs() { // Test mixed element proofs with limit and offset let path_query = PathQuery::new_unsized(path.clone(), query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("successful get_path_query"); @@ -1642,7 +1642,7 @@ fn test_mixed_level_proofs() { let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), Some(1), None)); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("successful get_path_query"); @@ -1660,7 +1660,7 @@ fn test_mixed_level_proofs() { SizedQuery::new(query.clone(), Some(3), Some(0)), ); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("successful get_path_query"); @@ -1678,7 +1678,7 @@ fn test_mixed_level_proofs() { SizedQuery::new(query.clone(), Some(4), Some(0)), ); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("successful get_path_query"); @@ -1693,7 +1693,7 @@ fn test_mixed_level_proofs() { let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(10), Some(4))); let (elements, _) = db - .query_item_value(&path_query, true, true, None) + .query_item_value(&path_query, true, true, true, None) .unwrap() .expect("successful get_path_query"); @@ -1790,6 +1790,7 @@ fn test_mixed_level_proofs_with_tree() { &path_query, true, true, + true, QueryResultType::QueryPathKeyElementTrioResultType, None, ) @@ -1813,6 +1814,7 @@ fn test_mixed_level_proofs_with_tree() { &path_query, true, true, + true, QueryResultType::QueryPathKeyElementTrioResultType, None, ) diff --git a/merk/src/proofs/query/mod.rs b/merk/src/proofs/query/mod.rs index 3a070919..9d485564 100644 --- a/merk/src/proofs/query/mod.rs +++ b/merk/src/proofs/query/mod.rs @@ -183,7 +183,8 @@ impl Query { for key in conditional_keys.into_iter() { if current_len > max_results { return Err(Error::RequestAmountExceeded(format!( - "terminal keys limit exceeded, set max is {max_results}", + "terminal keys limit exceeded for conditional subqueries, set max is \ + {max_results}, current length is {current_len}", ))); } already_added_keys.insert(key.clone()); @@ -196,14 +197,15 @@ impl Query { // push the subquery path to the path path.extend(subquery_path.iter().cloned()); // recurse onto the lower level - let added_here = - subquery.terminal_keys(path, max_results - current_len, result)?; + let added_here = subquery.terminal_keys(path, max_results, result)?; added += added_here; current_len += added_here; } else { if current_len == max_results { return Err(Error::RequestAmountExceeded(format!( - "terminal keys limit exceeded, set max is {max_results}", + "terminal keys limit exceeded when subquery path but no \ + subquery, set max is {max_results}, current length is \ + {current_len}", ))); } // a subquery path but no subquery @@ -249,7 +251,8 @@ impl Query { } if current_len > max_results { return Err(Error::RequestAmountExceeded(format!( - "terminal keys limit exceeded, set max is {max_results}", + "terminal keys limit exceeded for items, set max is {max_results}, \ + current len is {current_len}", ))); } let mut path = current_path.clone(); @@ -261,14 +264,14 @@ impl Query { // push the subquery path to the path path.extend(subquery_path.iter().cloned()); // recurse onto the lower level - let added_here = - subquery.terminal_keys(path, max_results - current_len, result)?; + let added_here = subquery.terminal_keys(path, max_results, result)?; added += added_here; current_len += added_here; } else { if current_len == max_results { return Err(Error::RequestAmountExceeded(format!( - "terminal keys limit exceeded, set max is {max_results}", + "terminal keys limit exceeded when subquery path but no subquery, \ + set max is {max_results}, current len is {current_len}", ))); } // a subquery path but no subquery @@ -292,14 +295,14 @@ impl Query { // push the key to the path path.push(key); // recurse onto the lower level - let added_here = - subquery.terminal_keys(path, max_results - current_len, result)?; + let added_here = subquery.terminal_keys(path, max_results, result)?; added += added_here; current_len += added_here; } else { if current_len == max_results { return Err(Error::RequestAmountExceeded(format!( - "terminal keys limit exceeded, set max is {max_results}", + "terminal keys limit exceeded without subquery or subquery path, set \ + max is {max_results}, current len is {current_len}", ))); } result.push((path, key)); diff --git a/node-grove/src/lib.rs b/node-grove/src/lib.rs index a955b744..d3d6e0a6 100644 --- a/node-grove/src/lib.rs +++ b/node-grove/src/lib.rs @@ -664,6 +664,7 @@ impl GroveDbWrapper { &path_query, allows_cache, true, + true, using_transaction.then_some(transaction).flatten(), ) .unwrap(); // Todo: Costs; From 7267fcf65e4d2e552ea566a3804d9b46ea621ccb Mon Sep 17 00:00:00 2001 From: Odysseas Gabrielides Date: Wed, 1 May 2024 15:57:20 +0300 Subject: [PATCH 19/37] feat: db state sync by merk chunking (#292) * wip finalize multi chunk with limit implement chunk op test chunk op encoding convert chunk op chunk id to string implement traversal instruction to string add chunking error + devoid multi subtree chunk from encoding work multi-subtree-chunk skeleton + return encoding length in multichunk make chunks fixed size height 2 test height proof implement height proof verifier update documentation verify height proof generation add documentation test no of chunk under chunk id fn implement number of chunks under chunk id function extract chunk layer function from chunk height seperate number_of_chunk into height and layer_height functions return multi chunk result enforce limit without storage overhead add test for encoding length check implement iterator for chunk producer remove cost from chunks fix the error type implement random chunk access fixes implement chunk height function add traverse then build chunk function to ref walker update comment implement chunk producer length init chunk producer struct implement merk tree height function update traversal generation instruction add instruction traversal test fix documentation implement binary range function clean up number of chunks function given a subtree of a given height return the exit node count documentation fixes implement chunk_height_per_layer verify that chunks produce expected root hash implement and test variable depth chunk creation restart chunking v2 * Squashed commit of the following: remove bad test rename files update documentation wip wip implement merk verifier + state building implement replication from multichunk fix chunk verification fixed implementation of chunkid from traversal instructions fix some tests make chunk_id from traversal instruction test resistant to changes in underlying chunking scheme add restoration logic test function returning the next chunk id when you call chunk use strings as communication interface between producer and restorer implement chunk id from traversal instruction add traversal instruction generation to direct string chunk producer returns next index as string for multi chunk clean up rewrite parent links restoration done successfully rough implementation of rewrite parent implement function to extract sum from node type wip chunk write logic + restorer finalization + parent key tracking new visit ref function that keeps track of traversal path implement instruction string to traversal instruction test child to link functionality for basic and sum merks implement node to link include sum wip implement and test chunk verification Fix layer iter function Previous implementation made a key assumption that nodes are unique including hash nodes, this made the layer iteration functionality depend on the contents of the tree, which shouldn't be the case. This adds a simpler implementation of the layer iter logic using breadth first search. add test to ensure chunks only contain hash and kvfeaturetype test for avl tree during proof op execution remove chunk_height_per_layer_lin_comb every chunk now has fixed height of 2 * wip * wip * wip * rename job * clippy fixes * feat: base state sync * dynamic chunk id calculation * more work * more work * more work * fix: help with lifetimes * more work * final work * cargo fmt * more fmt * clippy fixes * more fmt * fix for verify feature * more fmt * test fixes * more fmt * refactor * refactor * refactor * more refactoring * more refactoring * fmt * suggestions --------- Co-authored-by: Wisdom Ogwu --- Cargo.toml | 2 +- grovedb/Cargo.toml | 2 + grovedb/src/batch/mod.rs | 15 +- grovedb/src/lib.rs | 12 +- grovedb/src/operations/auxiliary.rs | 54 +- grovedb/src/operations/delete/mod.rs | 58 +- grovedb/src/replication.rs | 1324 +++++---------- grovedb/src/tests/mod.rs | 13 +- grovedb/src/tests/query_tests.rs | 20 +- grovedb/src/versioning.rs | 2 +- merk/src/error.rs | 26 +- merk/src/lib.rs | 2 +- merk/src/merk/chunks.rs | 1276 +++++++++++---- merk/src/merk/mod.rs | 190 ++- merk/src/merk/restore.rs | 1456 +++++++++++++---- merk/src/proofs/chunk.rs | 614 +------ merk/src/proofs/chunk/binary_range.rs | 239 +++ merk/src/proofs/chunk/chunk.rs | 662 ++++++++ merk/src/proofs/chunk/chunk_op.rs | 169 ++ merk/src/proofs/chunk/error.rs | 79 + merk/src/proofs/chunk/util.rs | 700 ++++++++ merk/src/proofs/tree.rs | 259 ++- merk/src/test_utils/mod.rs | 13 +- merk/src/tree/link.rs | 2 +- merk/src/tree/mod.rs | 6 +- merk/src/visualize.rs | 4 +- storage/src/rocksdb_storage.rs | 2 +- .../src/rocksdb_storage/storage_context.rs | 2 +- tutorials/Cargo.toml | 9 +- tutorials/src/bin/replication.rs | 244 +++ 30 files changed, 5055 insertions(+), 2401 deletions(-) create mode 100644 merk/src/proofs/chunk/binary_range.rs create mode 100644 merk/src/proofs/chunk/chunk.rs create mode 100644 merk/src/proofs/chunk/chunk_op.rs create mode 100644 merk/src/proofs/chunk/error.rs create mode 100644 merk/src/proofs/chunk/util.rs create mode 100644 tutorials/src/bin/replication.rs diff --git a/Cargo.toml b/Cargo.toml index b0a38948..6ebd27d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,5 +6,5 @@ members = [ "node-grove", "storage", "visualize", - "path", + "path" ] diff --git a/grovedb/Cargo.toml b/grovedb/Cargo.toml index dc67a761..c932b138 100644 --- a/grovedb/Cargo.toml +++ b/grovedb/Cargo.toml @@ -26,6 +26,8 @@ nohash-hasher = { version = "0.2.0", optional = true } indexmap = { version = "2.2.6", optional = true } intmap = { version = "2.0.0", optional = true } grovedb-path = { version = "1.0.0-rc.2", path = "../path" } +blake3 = "1.4.0" +bitvec = "1" [dev-dependencies] rand = "0.8.5" diff --git a/grovedb/src/batch/mod.rs b/grovedb/src/batch/mod.rs index f6a4e7ea..8674672c 100644 --- a/grovedb/src/batch/mod.rs +++ b/grovedb/src/batch/mod.rs @@ -552,7 +552,7 @@ impl GroveDbOp { } /// Verify consistency of operations - pub fn verify_consistency_of_operations(ops: &Vec) -> GroveDbOpConsistencyResults { + pub fn verify_consistency_of_operations(ops: &[GroveDbOp]) -> GroveDbOpConsistencyResults { let ops_len = ops.len(); // operations should not have any duplicates let mut repeated_ops = vec![]; @@ -2424,8 +2424,8 @@ mod tests { Element::empty_tree(), ), ]; - assert!(matches!( - db.apply_batch( + assert!(db + .apply_batch( ops, Some(BatchApplyOptions { validate_insertion_does_not_override: false, @@ -2438,9 +2438,8 @@ mod tests { }), None ) - .unwrap(), - Ok(_) - )); + .unwrap() + .is_ok()); } #[test] @@ -3481,7 +3480,7 @@ mod tests { elem.clone(), ), ]; - assert!(matches!(db.apply_batch(batch, None, None).unwrap(), Ok(_))); + assert!(db.apply_batch(batch, None, None).unwrap().is_ok()); assert_eq!( db.get([TEST_LEAF].as_ref(), b"key1", None) .unwrap() @@ -3498,7 +3497,7 @@ mod tests { .unwrap() .expect("should generate proof"); let verification_result = GroveDb::verify_query_raw(&proof, &path_query); - assert!(matches!(verification_result, Ok(_))); + assert!(verification_result.is_ok()); // Hit reference limit when you specify max reference hop, lower than actual hop // count diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index 9ea95513..fd11f10d 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -160,7 +160,7 @@ pub mod query_result_type; #[cfg(any(feature = "full", feature = "verify"))] pub mod reference_path; #[cfg(feature = "full")] -mod replication; +pub mod replication; #[cfg(all(test, feature = "full"))] mod tests; #[cfg(feature = "full")] @@ -172,8 +172,6 @@ mod visualize; #[cfg(feature = "full")] use std::{collections::HashMap, option::Option::None, path::Path}; -#[cfg(any(feature = "full", feature = "verify"))] -use element::helpers; #[cfg(any(feature = "full", feature = "verify"))] pub use element::Element; #[cfg(feature = "full")] @@ -217,14 +215,12 @@ use grovedb_storage::{Storage, StorageContext}; use grovedb_visualize::DebugByteVectors; #[cfg(any(feature = "full", feature = "verify"))] pub use query::{PathQuery, SizedQuery}; -#[cfg(feature = "full")] -pub use replication::{BufferedRestorer, Restorer, SiblingsChunkProducer, SubtreeChunkProducer}; +#[cfg(feature = "full")] +use crate::element::helpers::raw_decode; #[cfg(any(feature = "full", feature = "verify"))] pub use crate::error::Error; #[cfg(feature = "full")] -use crate::helpers::raw_decode; -#[cfg(feature = "full")] use crate::util::{root_merk_optional_tx, storage_context_optional_tx}; use crate::Error::MerkError; @@ -237,6 +233,8 @@ pub struct GroveDb { db: RocksDbStorage, } +pub(crate) type SubtreePrefix = [u8; blake3::OUT_LEN]; + /// Transaction #[cfg(feature = "full")] pub type Transaction<'db> = >::Transaction; diff --git a/grovedb/src/operations/auxiliary.rs b/grovedb/src/operations/auxiliary.rs index 0a29c510..1b6b884d 100644 --- a/grovedb/src/operations/auxiliary.rs +++ b/grovedb/src/operations/auxiliary.rs @@ -30,15 +30,17 @@ #[cfg(feature = "full")] use grovedb_costs::{ - cost_return_on_error_no_add, storage_cost::key_value_cost::KeyValueStorageCost, CostResult, - CostsExt, OperationCost, + cost_return_on_error, cost_return_on_error_no_add, + storage_cost::key_value_cost::KeyValueStorageCost, CostResult, CostsExt, OperationCost, }; +use grovedb_path::SubtreePath; #[cfg(feature = "full")] use grovedb_storage::StorageContext; use grovedb_storage::{Storage, StorageBatch}; +use crate::util::storage_context_optional_tx; #[cfg(feature = "full")] -use crate::{util::meta_storage_context_optional_tx, Error, GroveDb, TransactionArg}; +use crate::{util::meta_storage_context_optional_tx, Element, Error, GroveDb, TransactionArg}; #[cfg(feature = "full")] impl GroveDb { @@ -118,4 +120,50 @@ impl GroveDb { Ok(value).wrap_with_cost(cost) }) } + + // TODO: dumb traversal should not be tolerated + /// Finds keys which are trees for a given subtree recursively. + /// One element means a key of a `merk`, n > 1 elements mean relative path + /// for a deeply nested subtree. + pub fn find_subtrees>( + &self, + path: &SubtreePath, + transaction: TransactionArg, + ) -> CostResult>>, Error> { + let mut cost = OperationCost::default(); + + // TODO: remove conversion to vec; + // However, it's not easy for a reason: + // new keys to enqueue are taken from raw iterator which returns Vec; + // changing that to slice is hard as cursor should be moved for next iteration + // which requires exclusive (&mut) reference, also there is no guarantee that + // slice which points into storage internals will remain valid if raw + // iterator got altered so why that reference should be exclusive; + // + // Update: there are pinned views into RocksDB to return slices of data, perhaps + // there is something for iterators + + let mut queue: Vec>> = vec![path.to_vec()]; + let mut result: Vec>> = queue.clone(); + + while let Some(q) = queue.pop() { + let subtree_path: SubtreePath> = q.as_slice().into(); + // Get the correct subtree with q_ref as path + storage_context_optional_tx!(self.db, subtree_path, None, transaction, storage, { + let storage = storage.unwrap_add_cost(&mut cost); + let mut raw_iter = Element::iterator(storage.raw_iter()).unwrap_add_cost(&mut cost); + while let Some((key, value)) = + cost_return_on_error!(&mut cost, raw_iter.next_element()) + { + if value.is_tree() { + let mut sub_path = q.clone(); + sub_path.push(key.to_vec()); + queue.push(sub_path.clone()); + result.push(sub_path); + } + } + }) + } + Ok(result).wrap_with_cost(cost) + } } diff --git a/grovedb/src/operations/delete/mod.rs b/grovedb/src/operations/delete/mod.rs index a8c1c876..84d14652 100644 --- a/grovedb/src/operations/delete/mod.rs +++ b/grovedb/src/operations/delete/mod.rs @@ -59,7 +59,7 @@ use grovedb_storage::{ #[cfg(feature = "full")] use crate::{ batch::{GroveDbOp, Op}, - util::{storage_context_optional_tx, storage_context_with_parent_optional_tx}, + util::storage_context_with_parent_optional_tx, Element, ElementFlags, Error, GroveDb, Transaction, TransactionArg, }; use crate::{raw_decode, util::merk_optional_tx_path_not_empty}; @@ -879,52 +879,6 @@ impl GroveDb { Ok(true).wrap_with_cost(cost) } - - // TODO: dumb traversal should not be tolerated - /// Finds keys which are trees for a given subtree recursively. - /// One element means a key of a `merk`, n > 1 elements mean relative path - /// for a deeply nested subtree. - pub(crate) fn find_subtrees>( - &self, - path: &SubtreePath, - transaction: TransactionArg, - ) -> CostResult>>, Error> { - let mut cost = OperationCost::default(); - - // TODO: remove conversion to vec; - // However, it's not easy for a reason: - // new keys to enqueue are taken from raw iterator which returns Vec; - // changing that to slice is hard as cursor should be moved for next iteration - // which requires exclusive (&mut) reference, also there is no guarantee that - // slice which points into storage internals will remain valid if raw - // iterator got altered so why that reference should be exclusive; - // - // Update: there are pinned views into RocksDB to return slices of data, perhaps - // there is something for iterators - - let mut queue: Vec>> = vec![path.to_vec()]; - let mut result: Vec>> = queue.clone(); - - while let Some(q) = queue.pop() { - let subtree_path: SubtreePath> = q.as_slice().into(); - // Get the correct subtree with q_ref as path - storage_context_optional_tx!(self.db, subtree_path, None, transaction, storage, { - let storage = storage.unwrap_add_cost(&mut cost); - let mut raw_iter = Element::iterator(storage.raw_iter()).unwrap_add_cost(&mut cost); - while let Some((key, value)) = - cost_return_on_error!(&mut cost, raw_iter.next_element()) - { - if value.is_tree() { - let mut sub_path = q.clone(); - sub_path.push(key.to_vec()); - queue.push(sub_path.clone()); - result.push(sub_path); - } - } - }) - } - Ok(result).wrap_with_cost(cost) - } } #[cfg(feature = "full")] @@ -1029,10 +983,7 @@ mod tests { db.get([TEST_LEAF].as_ref(), b"key1", None).unwrap(), Err(Error::PathKeyNotFound(_)) )); - assert!(matches!( - db.get([TEST_LEAF].as_ref(), b"key4", None).unwrap(), - Ok(_) - )); + assert!(db.get([TEST_LEAF].as_ref(), b"key4", None).unwrap().is_ok()); } #[test] @@ -1397,10 +1348,7 @@ mod tests { db.get([TEST_LEAF].as_ref(), b"key1", None).unwrap(), Err(Error::PathKeyNotFound(_)) )); - assert!(matches!( - db.get([TEST_LEAF].as_ref(), b"key4", None).unwrap(), - Ok(_) - )); + assert!(db.get([TEST_LEAF].as_ref(), b"key4", None).unwrap().is_ok()); } #[test] diff --git a/grovedb/src/replication.rs b/grovedb/src/replication.rs index 898f5ff1..0484cfa1 100644 --- a/grovedb/src/replication.rs +++ b/grovedb/src/replication.rs @@ -1,990 +1,470 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Replication - use std::{ - collections::VecDeque, - iter::{empty, once}, + collections::{BTreeMap, BTreeSet}, + fmt, + str::Utf8Error, }; use grovedb_merk::{ - proofs::{Node, Op}, - Merk, TreeFeatureType, + merk::restore::Restorer, + proofs::Op, + tree::{hash::CryptoHash, kv::ValueDefinedCostType, value_hash}, + ChunkProducer, }; use grovedb_path::SubtreePath; -use grovedb_storage::{ - rocksdb_storage::{PrefixedRocksDbImmediateStorageContext, PrefixedRocksDbStorageContext}, - Storage, StorageContext, -}; - -use crate::{Element, Error, GroveDb, Hash, Transaction}; - -const OPS_PER_CHUNK: usize = 128; - -impl GroveDb { - /// Creates a chunk producer to replicate GroveDb. - pub fn chunks(&self) -> SubtreeChunkProducer { - SubtreeChunkProducer::new(self) - } -} - -/// Subtree chunks producer. -pub struct SubtreeChunkProducer<'db> { - grove_db: &'db GroveDb, - cache: Option>, +use grovedb_storage::rocksdb_storage::RocksDbStorage; +#[rustfmt::skip] +use grovedb_storage::rocksdb_storage::storage_context::context_immediate::PrefixedRocksDbImmediateStorageContext; + +use crate::{replication, Error, GroveDb, Transaction, TransactionArg}; + +pub(crate) type SubtreePrefix = [u8; blake3::OUT_LEN]; + +// Struct governing state sync +pub struct StateSyncInfo<'db> { + // Current Chunk restorer + pub restorer: Option>>, + // Set of processed prefixes (Path digests) + pub processed_prefixes: BTreeSet, + // Current processed prefix (Path digest) + pub current_prefix: Option, + // Set of global chunk ids requested to be fetched and pending for processing. For the + // description of global chunk id check fetch_chunk(). + pub pending_chunks: BTreeSet>, + // Number of processed chunks in current prefix (Path digest) + pub num_processed_chunks: usize, } -struct SubtreeChunkProducerCache<'db> { - current_merk_path: Vec>, - current_merk: Merk>, - // This needed to be an `Option` because it requires a reference on Merk but it's within the - // same struct and during struct init a referenced Merk would be moved inside a struct, - // using `Option` this init happens in two steps. - current_chunk_producer: - Option>>, +// Struct containing information about current subtrees found in GroveDB +pub struct SubtreesMetadata { + // Map of Prefix (Path digest) -> (Actual path, Parent Subtree actual_value_hash, Parent + // Subtree elem_value_hash) Note: Parent Subtree actual_value_hash, Parent Subtree + // elem_value_hash are needed when verifying the new constructed subtree after wards. + pub data: BTreeMap>, CryptoHash, CryptoHash)>, } -impl<'db> SubtreeChunkProducer<'db> { - fn new(storage: &'db GroveDb) -> Self { - SubtreeChunkProducer { - grove_db: storage, - cache: None, +impl SubtreesMetadata { + pub fn new() -> SubtreesMetadata { + SubtreesMetadata { + data: BTreeMap::new(), } } +} - /// Chunks in current producer - pub fn chunks_in_current_producer(&self) -> usize { - self.cache - .as_ref() - .and_then(|c| c.current_chunk_producer.as_ref().map(|p| p.len())) - .unwrap_or(0) +impl Default for SubtreesMetadata { + fn default() -> Self { + Self::new() } +} - /// Get chunk - pub fn get_chunk<'p, P>(&mut self, path: P, index: usize) -> Result, Error> - where - P: IntoIterator, -

::IntoIter: Clone + DoubleEndedIterator, - { - let path_iter = path.into_iter(); - - if let Some(SubtreeChunkProducerCache { - current_merk_path, .. - }) = &self.cache - { - if !itertools::equal(current_merk_path, path_iter.clone()) { - self.cache = None; - } - } - - if self.cache.is_none() { - let current_merk = self - .grove_db - .open_non_transactional_merk_at_path( - path_iter.clone().collect::>().as_slice().into(), - None, - ) - .unwrap()?; - - if current_merk.root_key().is_none() { - return Ok(Vec::new()); - } - - self.cache = Some(SubtreeChunkProducerCache { - current_merk_path: path_iter.map(|p| p.to_vec()).collect(), - current_merk, - current_chunk_producer: None, - }); - let cache = self.cache.as_mut().expect("exists at this point"); - cache.current_chunk_producer = Some( - grovedb_merk::ChunkProducer::new(&cache.current_merk) - .map_err(|e| Error::CorruptedData(e.to_string()))?, +impl fmt::Debug for SubtreesMetadata { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + for (prefix, metadata) in self.data.iter() { + let metadata_path = &metadata.0; + let metadata_path_str = util_path_to_string(metadata_path); + writeln!( + f, + " prefix:{:?} -> path:{:?}\n", + hex::encode(prefix), + metadata_path_str ); } - - self.cache - .as_mut() - .expect("must exist at this point") - .current_chunk_producer - .as_mut() - .expect("must exist at this point") - .chunk(index) - .map_err(|e| Error::CorruptedData(e.to_string())) + Ok(()) } } -// TODO: make generic over storage_cost context -type MerkRestorer<'db> = grovedb_merk::Restorer>; - -type Path = Vec>; - -/// Structure to drive GroveDb restore process. -pub struct Restorer<'db> { - current_merk_restorer: Option>, - current_merk_chunk_index: usize, - current_merk_path: Path, - queue: VecDeque<(Path, Vec, Hash, TreeFeatureType)>, - grove_db: &'db GroveDb, - tx: &'db Transaction<'db>, +// Converts a path into a human-readable string (for debugging) +pub fn util_path_to_string(path: &[Vec]) -> Vec { + let mut subtree_path_str: Vec = vec![]; + for subtree in path { + let string = std::str::from_utf8(subtree).expect("should be able to convert path"); + subtree_path_str.push( + string + .parse() + .expect("should be able to parse path to string"), + ); + } + subtree_path_str } -/// Indicates what next piece of information `Restorer` expects or wraps a -/// successful result. -#[derive(Debug)] -pub enum RestorerResponse { - AwaitNextChunk { path: Vec>, index: usize }, - Ready, -} +// Splits the given global chunk id into [SUBTREE_PREFIX:CHUNK_ID] +pub fn util_split_global_chunk_id( + global_chunk_id: &[u8], +) -> Result<(crate::SubtreePrefix, String), Error> { + let chunk_prefix_length: usize = 32; + if global_chunk_id.len() < chunk_prefix_length { + return Err(Error::CorruptedData( + "expected global chunk id of at least 32 length".to_string(), + )); + } -#[derive(Debug)] -pub struct RestorerError(String); - -impl<'db> Restorer<'db> { - /// Create a GroveDb restorer using a backing storage_cost and root hash. - pub fn new( - grove_db: &'db GroveDb, - root_hash: Hash, - tx: &'db Transaction<'db>, - ) -> Result { - Ok(Restorer { - tx, - current_merk_restorer: Some(MerkRestorer::new( - Merk::open_base( - grove_db - .db - .get_immediate_storage_context(SubtreePath::empty(), tx) - .unwrap(), - false, - Some(&Element::value_defined_cost_for_serialized_value), - ) - .unwrap() - .map_err(|e| RestorerError(e.to_string()))?, - None, - root_hash, - )), - current_merk_chunk_index: 0, - current_merk_path: vec![], - queue: VecDeque::new(), - grove_db, - }) + let (chunk_prefix, chunk_id) = global_chunk_id.split_at(chunk_prefix_length); + let mut array = [0u8; 32]; + array.copy_from_slice(chunk_prefix); + let chunk_prefix_key: crate::SubtreePrefix = array; + let str_chunk_id = String::from_utf8(chunk_id.to_vec()); + match str_chunk_id { + Ok(s) => Ok((chunk_prefix_key, s)), + Err(_) => Err(Error::CorruptedData( + "unable to convert chunk id to string".to_string(), + )), } +} - /// Process next chunk and receive instruction on what to do next. - pub fn process_chunk( - &mut self, - chunk_ops: impl IntoIterator, - ) -> Result { - if self.current_merk_restorer.is_none() { - // Last restorer was consumed and no more Merks to process. - return Ok(RestorerResponse::Ready); +#[cfg(feature = "full")] +impl GroveDb { + pub fn create_state_sync_info(&self) -> StateSyncInfo { + let pending_chunks = BTreeSet::new(); + let processed_prefixes = BTreeSet::new(); + StateSyncInfo { + restorer: None, + processed_prefixes, + current_prefix: None, + pending_chunks, + num_processed_chunks: 0, } - // First we decode a chunk to take out info about nested trees to add them into - // todo list. - let mut ops = Vec::new(); - for op in chunk_ops { - ops.push(op); - match ops.last().expect("just inserted") { - Op::Push(Node::KVValueHashFeatureType( - key, - value_bytes, - value_hash, - feature_type, - )) - | Op::PushInverted(Node::KVValueHashFeatureType( - key, - value_bytes, - value_hash, - feature_type, - )) => { - if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = - Element::deserialize(value_bytes) - .map_err(|e| RestorerError(e.to_string()))? - { - if root_key.is_none() || self.current_merk_path.last() == Some(key) { - // We add only subtrees of the current subtree to queue, skipping - // itself; Also skipping empty Merks. - continue; + } + + // Returns the discovered subtrees found recursively along with their associated + // metadata Params: + // tx: Transaction. Function returns the data by opening merks at given tx. + // TODO: Add a SubTreePath as param and start searching from that path instead + // of root (as it is now) + pub fn get_subtrees_metadata(&self, tx: TransactionArg) -> Result { + let mut subtrees_metadata = crate::replication::SubtreesMetadata::new(); + + let subtrees_root = self.find_subtrees(&SubtreePath::empty(), tx).value?; + for subtree in subtrees_root.into_iter() { + let subtree_path: Vec<&[u8]> = subtree.iter().map(|vec| vec.as_slice()).collect(); + let path: &[&[u8]] = &subtree_path; + let prefix = RocksDbStorage::build_prefix(path.as_ref().into()).unwrap(); + + let current_path = SubtreePath::from(path); + + match (current_path.derive_parent(), subtree.last()) { + (Some((parent_path, _)), Some(parent_key)) => match tx { + None => { + let parent_merk = self + .open_non_transactional_merk_at_path(parent_path, None) + .value?; + if let Ok(Some((elem_value, elem_value_hash))) = parent_merk + .get_value_and_value_hash( + parent_key, + true, + None::<&fn(&[u8]) -> Option>, + ) + .value + { + let actual_value_hash = value_hash(&elem_value).unwrap(); + subtrees_metadata.data.insert( + prefix, + (current_path.to_vec(), actual_value_hash, elem_value_hash), + ); + } + } + Some(t) => { + let parent_merk = self + .open_transactional_merk_at_path(parent_path, t, None) + .value?; + if let Ok(Some((elem_value, elem_value_hash))) = parent_merk + .get_value_and_value_hash( + parent_key, + true, + None::<&fn(&[u8]) -> Option>, + ) + .value + { + let actual_value_hash = value_hash(&elem_value).unwrap(); + subtrees_metadata.data.insert( + prefix, + (current_path.to_vec(), actual_value_hash, elem_value_hash), + ); } - let mut path = self.current_merk_path.clone(); - path.push(key.clone()); - // The value hash is the root tree hash - self.queue.push_back(( - path, - value_bytes.to_owned(), - *value_hash, - *feature_type, - )); } + }, + _ => { + subtrees_metadata.data.insert( + prefix, + ( + current_path.to_vec(), + CryptoHash::default(), + CryptoHash::default(), + ), + ); } - _ => {} } } - - // Process chunk using Merk's possibilities. - let remaining = self - .current_merk_restorer - .as_mut() - .expect("restorer exists at this point") - .process_chunk(ops) - .map_err(|e| RestorerError(e.to_string()))?; - - self.current_merk_chunk_index += 1; - - if remaining == 0 { - // If no more chunks for this Merk required decide if we're done or take a next - // Merk to process. - self.current_merk_restorer - .take() - .expect("restorer exists at this point") - .finalize() - .map_err(|e| RestorerError(e.to_string()))?; - if let Some((next_path, combining_value, expected_hash, _)) = self.queue.pop_front() { - // Process next subtree. - let merk = self - .grove_db - .open_merk_for_replication(next_path.as_slice().into(), self.tx) - .map_err(|e| RestorerError(e.to_string()))?; - self.current_merk_restorer = Some(MerkRestorer::new( - merk, - Some(combining_value), - expected_hash, - )); - self.current_merk_chunk_index = 0; - self.current_merk_path = next_path; - - Ok(RestorerResponse::AwaitNextChunk { - path: self.current_merk_path.clone(), - index: self.current_merk_chunk_index, - }) - } else { - Ok(RestorerResponse::Ready) - } - } else { - // Request a chunk at the same path but with incremented index. - Ok(RestorerResponse::AwaitNextChunk { - path: self.current_merk_path.clone(), - index: self.current_merk_chunk_index, - }) - } - } -} - -/// Chunk producer wrapper which uses bigger messages that may include chunks of -/// requested subtree with its right siblings. -/// -/// Because `Restorer` builds GroveDb replica breadth-first way from top to -/// bottom it makes sense to send a subtree's siblings next instead of its own -/// subtrees. -pub struct SiblingsChunkProducer<'db> { - chunk_producer: SubtreeChunkProducer<'db>, -} - -#[derive(Debug)] -pub struct GroveChunk { - subtree_chunks: Vec<(usize, Vec)>, -} - -impl<'db> SiblingsChunkProducer<'db> { - /// New - pub fn new(chunk_producer: SubtreeChunkProducer<'db>) -> Self { - SiblingsChunkProducer { chunk_producer } + Ok(subtrees_metadata) } - /// Get a collection of chunks possibly from different Merks with the first - /// one as requested. - pub fn get_chunk<'p, P>(&mut self, path: P, index: usize) -> Result, Error> - where - P: IntoIterator, -

::IntoIter: Clone + DoubleEndedIterator + ExactSizeIterator, - { - let path_iter = path.into_iter(); - let mut result = Vec::new(); - let mut ops_count = 0; - - if path_iter.len() == 0 { - // We're at the root of GroveDb, no siblings here. - self.process_subtree_chunks(&mut result, &mut ops_count, empty(), index)?; - return Ok(result); - }; - - // Get siblings on the right to send chunks of multiple Merks if it meets the - // limit. - - let mut siblings_keys: VecDeque> = VecDeque::new(); - - let mut parent_path = path_iter; - let requested_key = parent_path.next_back(); - - let parent_ctx = self - .chunk_producer - .grove_db - .db - .get_storage_context( - parent_path.clone().collect::>().as_slice().into(), - None, - ) - .unwrap(); - let mut siblings_iter = Element::iterator(parent_ctx.raw_iter()).unwrap(); - - if let Some(key) = requested_key { - siblings_iter.fast_forward(key)?; - } - - while let Some(element) = siblings_iter.next_element().unwrap()? { - if let (key, Element::Tree(..)) | (key, Element::SumTree(..)) = element { - siblings_keys.push_back(key); - } - } - - let mut current_index = index; - // Process each subtree - while let Some(subtree_key) = siblings_keys.pop_front() { - #[allow(clippy::map_identity)] - let subtree_path = parent_path - .clone() - .map(|x| x) - .chain(once(subtree_key.as_slice())); - - self.process_subtree_chunks(&mut result, &mut ops_count, subtree_path, current_index)?; - // Going to a next sibling, should start from 0. - - if ops_count >= OPS_PER_CHUNK { - break; - } - current_index = 0; + // Fetch a chunk by global chunk id (should be called by ABCI when + // LoadSnapshotChunk method is called) Params: + // global_chunk_id: Global chunk id in the following format: + // [SUBTREE_PREFIX:CHUNK_ID] SUBTREE_PREFIX: 32 bytes (mandatory) (All zeros + // = Root subtree) CHUNK_ID: 0.. bytes (optional) Traversal instructions to + // the root of the given chunk. Traversal instructions are "1" for left, and + // "0" for right. TODO: Compact CHUNK_ID into bitset for size optimization + // as a subtree can be big hence traversal instructions for the deepest chunks + // tx: Transaction. Function returns the data by opening merks at given tx. + // Returns the Chunk proof operators for the requested chunk + pub fn fetch_chunk( + &self, + global_chunk_id: &[u8], + tx: TransactionArg, + ) -> Result, Error> { + let chunk_prefix_length: usize = 32; + if global_chunk_id.len() < chunk_prefix_length { + return Err(Error::CorruptedData( + "expected global chunk id of at least 32 length".to_string(), + )); } - Ok(result) - } + let (chunk_prefix, chunk_id) = global_chunk_id.split_at(chunk_prefix_length); - /// Process one subtree's chunks - fn process_subtree_chunks<'p, P>( - &mut self, - result: &mut Vec, - ops_count: &mut usize, - subtree_path: P, - from_index: usize, - ) -> Result<(), Error> - where - P: IntoIterator, -

::IntoIter: Clone + DoubleEndedIterator, - { - let path_iter = subtree_path.into_iter(); - - let mut current_index = from_index; - let mut subtree_chunks = Vec::new(); - - loop { - let ops = self - .chunk_producer - .get_chunk(path_iter.clone(), current_index)?; - - *ops_count += ops.len(); - subtree_chunks.push((current_index, ops)); - current_index += 1; - if current_index >= self.chunk_producer.chunks_in_current_producer() - || *ops_count >= OPS_PER_CHUNK - { - break; - } - } + let mut array = [0u8; 32]; + array.copy_from_slice(chunk_prefix); + let chunk_prefix_key: crate::SubtreePrefix = array; - result.push(GroveChunk { subtree_chunks }); + let subtrees_metadata = self.get_subtrees_metadata(tx)?; - Ok(()) - } -} + match subtrees_metadata.data.get(&chunk_prefix_key) { + Some(path_data) => { + let subtree = &path_data.0; + let subtree_path: Vec<&[u8]> = subtree.iter().map(|vec| vec.as_slice()).collect(); + let path: &[&[u8]] = &subtree_path; -/// `Restorer` wrapper that applies multiple chunks at once and eventually -/// returns less requests. It is named by analogy with IO types that do less -/// syscalls. -pub struct BufferedRestorer<'db> { - restorer: Restorer<'db>, -} + match tx { + None => { + let merk = self + .open_non_transactional_merk_at_path(path.into(), None) + .value?; -impl<'db> BufferedRestorer<'db> { - /// New - pub fn new(restorer: Restorer<'db>) -> Self { - BufferedRestorer { restorer } - } + if merk.is_empty_tree().unwrap() { + return Ok(vec![]); + } - /// Process next chunk and receive instruction on what to do next. - pub fn process_grove_chunks(&mut self, chunks: I) -> Result - where - I: IntoIterator + ExactSizeIterator, - { - let mut response = RestorerResponse::Ready; - - for c in chunks.into_iter() { - for ops in c.subtree_chunks.into_iter().map(|x| x.1) { - if !ops.is_empty() { - response = self.restorer.process_chunk(ops)?; - } - } - } + let chunk_producer_res = ChunkProducer::new(&merk); + match chunk_producer_res { + Ok(mut chunk_producer) => match std::str::from_utf8(chunk_id) { + Ok(chunk_id_str) => { + let chunk_res = chunk_producer.chunk(chunk_id_str); + match chunk_res { + Ok((chunk, _)) => Ok(chunk), + Err(_) => Err(Error::CorruptedData( + "Unable to create to load chunk".to_string(), + )), + } + } + Err(_) => Err(Error::CorruptedData( + "Unable to process chunk id".to_string(), + )), + }, + Err(_) => Err(Error::CorruptedData( + "Unable to create Chunk producer".to_string(), + )), + } + } + Some(t) => { + let merk = self + .open_transactional_merk_at_path(path.into(), &t, None) + .value?; - Ok(response) - } -} + if merk.is_empty_tree().unwrap() { + return Ok(vec![]); + } -#[cfg(test)] -mod test { - use rand::RngCore; - use tempfile::TempDir; - - use super::*; - use crate::{ - batch::GroveDbOp, - reference_path::ReferencePathType, - tests::{common::EMPTY_PATH, make_test_grovedb, TempGroveDb, ANOTHER_TEST_LEAF, TEST_LEAF}, - }; - - fn replicate(original_db: &GroveDb) -> TempDir { - let replica_tempdir = TempDir::new().unwrap(); - - { - let replica_db = GroveDb::open(replica_tempdir.path()).unwrap(); - let mut chunk_producer = original_db.chunks(); - let tx = replica_db.start_transaction(); - - let mut restorer = Restorer::new( - &replica_db, - original_db.root_hash(None).unwrap().unwrap(), - &tx, - ) - .expect("cannot create restorer"); - - // That means root tree chunk with index 0 - let mut next_chunk: (Vec>, usize) = (vec![], 0); - - loop { - let chunk = chunk_producer - .get_chunk(next_chunk.0.iter().map(|x| x.as_slice()), next_chunk.1) - .expect("cannot get next chunk"); - match restorer.process_chunk(chunk).expect("cannot process chunk") { - RestorerResponse::Ready => break, - RestorerResponse::AwaitNextChunk { path, index } => { - next_chunk = (path, index); + let chunk_producer_res = ChunkProducer::new(&merk); + match chunk_producer_res { + Ok(mut chunk_producer) => match std::str::from_utf8(chunk_id) { + Ok(chunk_id_str) => { + let chunk_res = chunk_producer.chunk(chunk_id_str); + match chunk_res { + Ok((chunk, _)) => Ok(chunk), + Err(_) => Err(Error::CorruptedData( + "Unable to create to load chunk".to_string(), + )), + } + } + Err(_) => Err(Error::CorruptedData( + "Unable to process chunk id".to_string(), + )), + }, + Err(_) => Err(Error::CorruptedData( + "Unable to create Chunk producer".to_string(), + )), + } } } } - - replica_db.commit_transaction(tx).unwrap().unwrap(); + None => Err(Error::CorruptedData("Prefix not found".to_string())), } - replica_tempdir } - fn replicate_bigger_messages(original_db: &GroveDb) -> TempDir { - let replica_tempdir = TempDir::new().unwrap(); - - { - let replica_grove_db = GroveDb::open(replica_tempdir.path()).unwrap(); - let mut chunk_producer = SiblingsChunkProducer::new(original_db.chunks()); - let tx = replica_grove_db.start_transaction(); - - let mut restorer = BufferedRestorer::new( - Restorer::new( - &replica_grove_db, - original_db.root_hash(None).unwrap().unwrap(), - &tx, - ) - .expect("cannot create restorer"), - ); - - // That means root tree chunk with index 0 - let mut next_chunk: (Vec>, usize) = (vec![], 0); - - loop { - let chunks = chunk_producer - .get_chunk(next_chunk.0.iter().map(|x| x.as_slice()), next_chunk.1) - .expect("cannot get next chunk"); - match restorer - .process_grove_chunks(chunks.into_iter()) - .expect("cannot process chunk") + // Starts a state sync process (should be called by ABCI when OfferSnapshot + // method is called) Params: + // state_sync_info: Consumed StateSyncInfo + // app_hash: Snapshot's AppHash + // tx: Transaction for the state sync + // Returns the first set of global chunk ids that can be fetched from sources (+ + // the StateSyncInfo transferring ownership back to the caller) + pub fn start_snapshot_syncing<'db>( + &'db self, + mut state_sync_info: StateSyncInfo<'db>, + app_hash: CryptoHash, + tx: &'db Transaction, + ) -> Result<(Vec>, StateSyncInfo), Error> { + let mut res = vec![]; + + match ( + &mut state_sync_info.restorer, + &state_sync_info.current_prefix, + ) { + (None, None) => { + if state_sync_info.pending_chunks.is_empty() + && state_sync_info.processed_prefixes.is_empty() { - RestorerResponse::Ready => break, - RestorerResponse::AwaitNextChunk { path, index } => { - next_chunk = (path, index); + let root_prefix = [0u8; 32]; + if let Ok(merk) = self.open_merk_for_replication(SubtreePath::empty(), tx) { + let restorer = Restorer::new(merk, app_hash, None); + state_sync_info.restorer = Some(restorer); + state_sync_info.current_prefix = Some(root_prefix); + state_sync_info.pending_chunks.insert(root_prefix.to_vec()); + + res.push(root_prefix.to_vec()); + } else { + return Err(Error::InternalError("Unable to open merk for replication")); } + } else { + return Err(Error::InternalError("Invalid internal state sync info")); } } - - replica_grove_db.commit_transaction(tx).unwrap().unwrap(); - } - - replica_tempdir - } - - fn test_replication_internal<'a, I, R, F>( - original_db: &TempGroveDb, - to_compare: I, - replicate_fn: F, - ) where - R: AsRef<[u8]> + 'a, - I: Iterator, - F: Fn(&GroveDb) -> TempDir, - { - let expected_root_hash = original_db.root_hash(None).unwrap().unwrap(); - - let replica_tempdir = replicate_fn(original_db); - - let replica = GroveDb::open(replica_tempdir.path()).unwrap(); - assert_eq!( - replica.root_hash(None).unwrap().unwrap(), - expected_root_hash - ); - - for full_path in to_compare { - let (key, path) = full_path.split_last().unwrap(); - assert_eq!( - original_db.get(path, key.as_ref(), None).unwrap().unwrap(), - replica.get(path, key.as_ref(), None).unwrap().unwrap() - ); - } - } - - fn test_replication<'a, I, R>(original_db: &TempGroveDb, to_compare: I) - where - R: AsRef<[u8]> + 'a, - I: Iterator + Clone, - { - test_replication_internal(original_db, to_compare.clone(), replicate); - test_replication_internal(original_db, to_compare, replicate_bigger_messages); - } - - #[test] - fn replicate_wrong_root_hash() { - let db = make_test_grovedb(); - let mut bad_hash = db.root_hash(None).unwrap().unwrap(); - bad_hash[0] = bad_hash[0].wrapping_add(1); - - let tmp_dir = TempDir::new().unwrap(); - let restored_db = GroveDb::open(tmp_dir.path()).unwrap(); - let tx = restored_db.start_transaction(); - let mut restorer = Restorer::new(&restored_db, bad_hash, &tx).unwrap(); - let mut chunks = db.chunks(); - assert!(restorer - .process_chunk(chunks.get_chunk([], 0).unwrap()) - .is_err()); - } - - #[test] - fn replicate_provide_wrong_tree() { - let db = make_test_grovedb(); - db.insert( - &[TEST_LEAF], - b"key1", - Element::new_item(b"ayya".to_vec()), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[ANOTHER_TEST_LEAF], - b"key1", - Element::new_item(b"ayyb".to_vec()), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - - let expected_hash = db.root_hash(None).unwrap().unwrap(); - - let tmp_dir = TempDir::new().unwrap(); - let restored_db = GroveDb::open(tmp_dir.path()).unwrap(); - let tx = restored_db.start_transaction(); - let mut restorer = Restorer::new(&restored_db, expected_hash, &tx).unwrap(); - let mut chunks = db.chunks(); - - let next_op = restorer - .process_chunk(chunks.get_chunk([], 0).unwrap()) - .unwrap(); - match next_op { - RestorerResponse::AwaitNextChunk { path, index } => { - // Feed restorer a wrong Merk! - let chunk = if path == [TEST_LEAF] { - chunks.get_chunk([ANOTHER_TEST_LEAF], index).unwrap() - } else { - chunks.get_chunk([TEST_LEAF], index).unwrap() - }; - assert!(restorer.process_chunk(chunk).is_err()); + _ => { + return Err(Error::InternalError( + "GroveDB has already started a snapshot syncing", + )); } - _ => {} } - } - - #[test] - fn replicate_nested_grovedb() { - let db = make_test_grovedb(); - db.insert( - &[TEST_LEAF], - b"key1", - Element::new_item(b"ayya".to_vec()), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[TEST_LEAF], - b"key2", - Element::new_reference(ReferencePathType::SiblingReference(b"key1".to_vec())), - None, - None, - ) - .unwrap() - .expect("should insert reference"); - db.insert( - &[ANOTHER_TEST_LEAF], - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[ANOTHER_TEST_LEAF, b"key2"], - b"key3", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[ANOTHER_TEST_LEAF, b"key2", b"key3"], - b"key4", - Element::new_item(b"ayyb".to_vec()), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - - let to_compare = [ - [TEST_LEAF].as_ref(), - [TEST_LEAF, b"key1"].as_ref(), - [TEST_LEAF, b"key2"].as_ref(), - [ANOTHER_TEST_LEAF].as_ref(), - [ANOTHER_TEST_LEAF, b"key2"].as_ref(), - [ANOTHER_TEST_LEAF, b"key2", b"key3"].as_ref(), - [ANOTHER_TEST_LEAF, b"key2", b"key3", b"key4"].as_ref(), - ]; - test_replication(&db, to_compare.into_iter()); - } - #[test] - fn replicate_nested_grovedb_with_sum_trees() { - let db = make_test_grovedb(); - db.insert( - &[TEST_LEAF], - b"key1", - Element::new_item(b"ayya".to_vec()), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[TEST_LEAF], - b"key2", - Element::new_reference(ReferencePathType::SiblingReference(b"key1".to_vec())), - None, - None, - ) - .unwrap() - .expect("should insert reference"); - db.insert( - &[ANOTHER_TEST_LEAF], - b"key2", - Element::empty_sum_tree(), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[ANOTHER_TEST_LEAF, b"key2"], - b"sumitem", - Element::new_sum_item(15), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[ANOTHER_TEST_LEAF, b"key2"], - b"key3", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[ANOTHER_TEST_LEAF, b"key2", b"key3"], - b"key4", - Element::new_item(b"ayyb".to_vec()), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - - let to_compare = [ - [TEST_LEAF].as_ref(), - [TEST_LEAF, b"key1"].as_ref(), - [TEST_LEAF, b"key2"].as_ref(), - [ANOTHER_TEST_LEAF].as_ref(), - [ANOTHER_TEST_LEAF, b"key2"].as_ref(), - [ANOTHER_TEST_LEAF, b"key2", b"sumitem"].as_ref(), - [ANOTHER_TEST_LEAF, b"key2", b"key3"].as_ref(), - [ANOTHER_TEST_LEAF, b"key2", b"key3", b"key4"].as_ref(), - ]; - test_replication(&db, to_compare.into_iter()); + Ok((res, state_sync_info)) } - // TODO: Highlights a bug in replication - #[test] - fn replicate_grovedb_with_sum_tree() { - let db = make_test_grovedb(); - db.insert(&[TEST_LEAF], b"key1", Element::empty_tree(), None, None) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[TEST_LEAF, b"key1"], - b"key2", - Element::new_item(vec![4]), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - db.insert( - &[TEST_LEAF, b"key1"], - b"key3", - Element::new_item(vec![10]), - None, - None, - ) - .unwrap() - .expect("cannot insert an element"); - - let to_compare = [ - [TEST_LEAF].as_ref(), - [ANOTHER_TEST_LEAF].as_ref(), - [TEST_LEAF, b"key1"].as_ref(), - [TEST_LEAF, b"key1", b"key2"].as_ref(), - [TEST_LEAF, b"key1", b"key3"].as_ref(), - ]; - test_replication(&db, to_compare.into_iter()); - } - - #[test] - fn replicate_a_big_one() { - const HEIGHT: usize = 3; - const SUBTREES_FOR_EACH: usize = 3; - const SCALARS_FOR_EACH: usize = 600; - - let db = make_test_grovedb(); - let mut to_compare = Vec::new(); - - let mut rng = rand::thread_rng(); - let mut subtrees: VecDeque> = VecDeque::new(); - - // Generate root tree leafs - for _ in 0..SUBTREES_FOR_EACH { - let mut bytes = [0; 8]; - rng.fill_bytes(&mut bytes); - db.insert(EMPTY_PATH, &bytes, Element::empty_tree(), None, None) - .unwrap() - .unwrap(); - subtrees.push_front(vec![bytes]); - to_compare.push(vec![bytes]); - } - - while let Some(path) = subtrees.pop_front() { - let mut batch = Vec::new(); - - if path.len() < HEIGHT { - for _ in 0..SUBTREES_FOR_EACH { - let mut bytes = [0; 8]; - rng.fill_bytes(&mut bytes); - - batch.push(GroveDbOp::insert_op( - path.iter().map(|x| x.to_vec()).collect(), - bytes.to_vec(), - Element::empty_tree(), + // Apply a chunk (should be called by ABCI when ApplySnapshotChunk method is + // called) Params: + // state_sync_info: Consumed StateSyncInfo + // chunk: (Global chunk id, Chunk proof operators) + // tx: Transaction for the state sync + // Returns the next set of global chunk ids that can be fetched from sources (+ + // the StateSyncInfo transferring ownership back to the caller) + pub fn apply_chunk<'db>( + &'db self, + mut state_sync_info: StateSyncInfo<'db>, + chunk: (&[u8], Vec), + tx: &'db Transaction, + ) -> Result<(Vec>, StateSyncInfo), Error> { + let mut res = vec![]; + + let (global_chunk_id, chunk_data) = chunk; + let (chunk_prefix, chunk_id) = replication::util_split_global_chunk_id(global_chunk_id)?; + + match ( + &mut state_sync_info.restorer, + &state_sync_info.current_prefix, + ) { + (Some(restorer), Some(ref current_prefix)) => { + if *current_prefix != chunk_prefix { + return Err(Error::InternalError("Invalid incoming prefix")); + } + if !state_sync_info.pending_chunks.contains(global_chunk_id) { + return Err(Error::InternalError( + "Incoming global_chunk_id not expected", )); - - let mut new_path = path.clone(); - new_path.push(bytes); - subtrees.push_front(new_path.clone()); - to_compare.push(new_path.clone()); + } + state_sync_info.pending_chunks.remove(global_chunk_id); + if !chunk_data.is_empty() { + match restorer.process_chunk(chunk_id.to_string(), chunk_data) { + Ok(next_chunk_ids) => { + state_sync_info.num_processed_chunks += 1; + for next_chunk_id in next_chunk_ids { + let mut next_global_chunk_id = chunk_prefix.to_vec(); + next_global_chunk_id.extend(next_chunk_id.as_bytes().to_vec()); + state_sync_info + .pending_chunks + .insert(next_global_chunk_id.clone()); + res.push(next_global_chunk_id); + } + } + _ => { + return Err(Error::InternalError("Unable to process incoming chunk")); + } + }; } } - - for _ in 0..SCALARS_FOR_EACH { - let mut bytes = [0; 8]; - let mut bytes_val = vec![]; - rng.fill_bytes(&mut bytes); - rng.fill_bytes(&mut bytes_val); - - batch.push(GroveDbOp::insert_op( - path.iter().map(|x| x.to_vec()).collect(), - bytes.to_vec(), - Element::new_item(bytes_val), - )); - - let mut new_path = path.clone(); - new_path.push(bytes); - to_compare.push(new_path.clone()); + _ => { + return Err(Error::InternalError("GroveDB is not in syncing mode")); } - - db.apply_batch(batch, None, None).unwrap().unwrap(); } - test_replication(&db, to_compare.iter().map(|x| x.as_slice())); - } - - #[test] - fn replicate_from_checkpoint() { - // Create a simple GroveDb first - let db = make_test_grovedb(); - db.insert( - &[TEST_LEAF], - b"key1", - Element::new_item(b"ayya".to_vec()), - None, - None, - ) - .unwrap() - .unwrap(); - db.insert( - &[ANOTHER_TEST_LEAF], - b"key2", - Element::new_item(b"ayyb".to_vec()), - None, - None, - ) - .unwrap() - .unwrap(); - - // Save its state with checkpoint - let checkpoint_dir_parent = TempDir::new().unwrap(); - let checkpoint_dir = checkpoint_dir_parent.path().join("cp"); - db.create_checkpoint(&checkpoint_dir).unwrap(); - - // Alter the db to make difference between current state and checkpoint - db.delete(&[TEST_LEAF], b"key1", None, None) - .unwrap() - .unwrap(); - db.insert( - &[TEST_LEAF], - b"key3", - Element::new_item(b"ayyd".to_vec()), - None, - None, - ) - .unwrap() - .unwrap(); - db.insert( - &[ANOTHER_TEST_LEAF], - b"key2", - Element::new_item(b"ayyc".to_vec()), - None, - None, - ) - .unwrap() - .unwrap(); - - let checkpoint_db = GroveDb::open(&checkpoint_dir).unwrap(); - - // Ensure checkpoint differs from current state - assert_ne!( - checkpoint_db - .get(&[ANOTHER_TEST_LEAF], b"key2", None) - .unwrap() - .unwrap(), - db.get(&[ANOTHER_TEST_LEAF], b"key2", None) - .unwrap() - .unwrap(), - ); - - // Build a replica from checkpoint - let replica_dir = replicate(&checkpoint_db); - let replica_db = GroveDb::open(&replica_dir).unwrap(); - - assert_eq!( - checkpoint_db.root_hash(None).unwrap().unwrap(), - replica_db.root_hash(None).unwrap().unwrap() - ); + if res.is_empty() { + if !state_sync_info.pending_chunks.is_empty() { + return Ok((res, state_sync_info)); + } + match ( + state_sync_info.restorer.take(), + state_sync_info.current_prefix.take(), + ) { + (Some(restorer), Some(current_prefix)) => { + if (state_sync_info.num_processed_chunks > 0) && (restorer.finalize().is_err()) + { + return Err(Error::InternalError("Unable to finalize merk")); + } + state_sync_info.processed_prefixes.insert(current_prefix); + + let subtrees_metadata = self.get_subtrees_metadata(Some(tx))?; + if let Some(value) = subtrees_metadata.data.get(¤t_prefix) { + println!( + " path:{:?} done", + replication::util_path_to_string(&value.0) + ); + } - assert_eq!( - checkpoint_db - .get(&[TEST_LEAF], b"key1", None) - .unwrap() - .unwrap(), - replica_db - .get(&[TEST_LEAF], b"key1", None) - .unwrap() - .unwrap(), - ); - assert_eq!( - checkpoint_db - .get(&[ANOTHER_TEST_LEAF], b"key2", None) - .unwrap() - .unwrap(), - replica_db - .get(&[ANOTHER_TEST_LEAF], b"key2", None) - .unwrap() - .unwrap(), - ); - assert!(matches!( - replica_db.get(&[TEST_LEAF], b"key3", None).unwrap(), - Err(Error::PathKeyNotFound(_)) - )); + for (prefix, prefix_metadata) in &subtrees_metadata.data { + if !state_sync_info.processed_prefixes.contains(prefix) { + let (current_path, s_actual_value_hash, s_elem_value_hash) = + &prefix_metadata; + + let subtree_path: Vec<&[u8]> = + current_path.iter().map(|vec| vec.as_slice()).collect(); + let path: &[&[u8]] = &subtree_path; + + if let Ok(merk) = self.open_merk_for_replication(path.into(), tx) { + let restorer = Restorer::new( + merk, + *s_elem_value_hash, + Some(*s_actual_value_hash), + ); + state_sync_info.restorer = Some(restorer); + state_sync_info.current_prefix = Some(*prefix); + state_sync_info.num_processed_chunks = 0; + + let root_chunk_prefix = prefix.to_vec(); + state_sync_info + .pending_chunks + .insert(root_chunk_prefix.clone()); + res.push(root_chunk_prefix); + } else { + return Err(Error::InternalError( + "Unable to open merk for replication", + )); + } + break; + } + } + } + _ => { + return Err(Error::InternalError("Unable to finalize tree")); + } + } + } - // Drop original db and checkpoint dir too to ensure there is no dependency - drop(db); - drop(checkpoint_db); - drop(checkpoint_dir); - - assert_eq!( - replica_db - .get(&[ANOTHER_TEST_LEAF], b"key2", None) - .unwrap() - .unwrap(), - Element::new_item(b"ayyb".to_vec()) - ); + Ok((res, state_sync_info)) } } diff --git a/grovedb/src/tests/mod.rs b/grovedb/src/tests/mod.rs index 09a38e6b..95e0d2b1 100644 --- a/grovedb/src/tests/mod.rs +++ b/grovedb/src/tests/mod.rs @@ -465,7 +465,7 @@ fn test_element_with_flags() { let db = make_test_grovedb(); db.insert( - [TEST_LEAF.as_ref()].as_ref(), + [TEST_LEAF].as_ref(), b"key1", Element::empty_tree(), None, @@ -2812,7 +2812,7 @@ fn test_root_hash() { #[test] fn test_get_non_existing_root_leaf() { let db = make_test_grovedb(); - assert!(matches!(db.get(EMPTY_PATH, b"ayy", None).unwrap(), Err(_))); + assert!(db.get(EMPTY_PATH, b"ayy", None).unwrap().is_err()); } #[test] @@ -2839,7 +2839,7 @@ fn test_check_subtree_exists_function() { // Empty tree path means root always exist assert!(db - .check_subtree_exists_invalid_path(EMPTY_PATH.into(), None) + .check_subtree_exists_invalid_path(EMPTY_PATH, None) .unwrap() .is_ok()); @@ -2952,17 +2952,14 @@ fn test_storage_wipe() { .expect("cannot insert item"); // retrieve key before wipe - let elem = db - .get(&[TEST_LEAF.as_ref()], b"key", None) - .unwrap() - .unwrap(); + let elem = db.get(&[TEST_LEAF], b"key", None).unwrap().unwrap(); assert_eq!(elem, Element::new_item(b"ayy".to_vec())); // wipe the database db.grove_db.wipe().unwrap(); // retrieve key after wipe - let elem_result = db.get(&[TEST_LEAF.as_ref()], b"key", None).unwrap(); + let elem_result = db.get(&[TEST_LEAF], b"key", None).unwrap(); assert!(elem_result.is_err()); assert!(matches!( elem_result, diff --git a/grovedb/src/tests/query_tests.rs b/grovedb/src/tests/query_tests.rs index 304042bd..579b2e42 100644 --- a/grovedb/src/tests/query_tests.rs +++ b/grovedb/src/tests/query_tests.rs @@ -46,7 +46,7 @@ use crate::{ fn populate_tree_for_non_unique_range_subquery(db: &TempGroveDb) { // Insert a couple of subtrees first for i in 1985u32..2000 { - let i_vec = (i as u32).to_be_bytes().to_vec(); + let i_vec = i.to_be_bytes().to_vec(); db.insert( [TEST_LEAF].as_ref(), &i_vec, @@ -70,7 +70,7 @@ fn populate_tree_for_non_unique_range_subquery(db: &TempGroveDb) { for j in 100u32..150 { let mut j_vec = i_vec.clone(); - j_vec.append(&mut (j as u32).to_be_bytes().to_vec()); + j_vec.append(&mut j.to_be_bytes().to_vec()); db.insert( [TEST_LEAF, i_vec.as_slice(), b"\0"].as_ref(), &j_vec.clone(), @@ -87,7 +87,7 @@ fn populate_tree_for_non_unique_range_subquery(db: &TempGroveDb) { fn populate_tree_for_non_unique_double_range_subquery(db: &TempGroveDb) { // Insert a couple of subtrees first for i in 0u32..10 { - let i_vec = (i as u32).to_be_bytes().to_vec(); + let i_vec = i.to_be_bytes().to_vec(); db.insert( [TEST_LEAF].as_ref(), &i_vec, @@ -110,7 +110,7 @@ fn populate_tree_for_non_unique_double_range_subquery(db: &TempGroveDb) { .expect("successful subtree insert"); for j in 25u32..50 { - let j_vec = (j as u32).to_be_bytes().to_vec(); + let j_vec = j.to_be_bytes().to_vec(); db.insert( [TEST_LEAF, i_vec.as_slice(), b"a"].as_ref(), &j_vec, @@ -134,7 +134,7 @@ fn populate_tree_for_non_unique_double_range_subquery(db: &TempGroveDb) { .expect("successful subtree insert"); for k in 100u32..110 { - let k_vec = (k as u32).to_be_bytes().to_vec(); + let k_vec = k.to_be_bytes().to_vec(); db.insert( [TEST_LEAF, i_vec.as_slice(), b"a", &j_vec, b"\0"].as_ref(), &k_vec.clone(), @@ -173,7 +173,7 @@ fn populate_tree_by_reference_for_non_unique_range_subquery(db: &TempGroveDb) { .expect("successful subtree insert"); // Insert a couple of subtrees first for i in 1985u32..2000 { - let i_vec = (i as u32).to_be_bytes().to_vec(); + let i_vec = i.to_be_bytes().to_vec(); db.insert( [TEST_LEAF, b"1"].as_ref(), &i_vec, @@ -198,7 +198,7 @@ fn populate_tree_by_reference_for_non_unique_range_subquery(db: &TempGroveDb) { for j in 100u32..150 { let random_key = rand::thread_rng().gen::<[u8; 32]>(); let mut j_vec = i_vec.clone(); - j_vec.append(&mut (j as u32).to_be_bytes().to_vec()); + j_vec.append(&mut j.to_be_bytes().to_vec()); // We should insert every item to the tree holding items db.insert( @@ -231,7 +231,7 @@ fn populate_tree_by_reference_for_non_unique_range_subquery(db: &TempGroveDb) { fn populate_tree_for_unique_range_subquery(db: &TempGroveDb) { // Insert a couple of subtrees first for i in 1985u32..2000 { - let i_vec = (i as u32).to_be_bytes().to_vec(); + let i_vec = i.to_be_bytes().to_vec(); db.insert( [TEST_LEAF].as_ref(), &i_vec, @@ -278,7 +278,7 @@ fn populate_tree_by_reference_for_unique_range_subquery(db: &TempGroveDb) { .expect("successful subtree insert"); for i in 1985u32..2000 { - let i_vec = (i as u32).to_be_bytes().to_vec(); + let i_vec = i.to_be_bytes().to_vec(); db.insert( [TEST_LEAF, b"1"].as_ref(), &i_vec, @@ -333,7 +333,7 @@ fn populate_tree_for_unique_range_subquery_with_non_unique_null_values(db: &mut .expect("successful subtree insert"); // Insert a couple of subtrees first for i in 100u32..200 { - let i_vec = (i as u32).to_be_bytes().to_vec(); + let i_vec = i.to_be_bytes().to_vec(); db.insert( [TEST_LEAF, &[], b"\0"].as_ref(), &i_vec, diff --git a/grovedb/src/versioning.rs b/grovedb/src/versioning.rs index a041b3d8..5a724afc 100644 --- a/grovedb/src/versioning.rs +++ b/grovedb/src/versioning.rs @@ -52,7 +52,7 @@ mod tests { assert_eq!(new_data, [244, 3, 1, 2, 3]); // show that read_version doesn't consume - assert_eq!(read_proof_version(&mut new_data.as_slice()).unwrap(), 500); + assert_eq!(read_proof_version(new_data.as_slice()).unwrap(), 500); assert_eq!(new_data, [244, 3, 1, 2, 3]); // show that we consume the version number and return the remaining vector diff --git a/merk/src/error.rs b/merk/src/error.rs index 405fdeb1..83fb3bde 100644 --- a/merk/src/error.rs +++ b/merk/src/error.rs @@ -27,6 +27,8 @@ // DEALINGS IN THE SOFTWARE. //! Errors +#[cfg(feature = "full")] +use crate::proofs::chunk::error::ChunkError; #[cfg(any(feature = "full", feature = "verify"))] #[derive(Debug, thiserror::Error)] @@ -57,13 +59,29 @@ pub enum Error { #[error("corrupted code execution error {0}")] CorruptedCodeExecution(&'static str), + /// Corrupted state + #[error("corrupted state: {0}")] + CorruptedState(&'static str), + /// Chunking error + #[cfg(feature = "full")] #[error("chunking error {0}")] - ChunkingError(&'static str), + ChunkingError(ChunkError), + + // TODO: remove + /// Old chunking error + #[error("chunking error {0}")] + OldChunkingError(&'static str), /// Chunk restoring error + #[cfg(feature = "full")] #[error("chunk restoring error {0}")] - ChunkRestoringError(String), + ChunkRestoringError(ChunkError), + + // TODO: remove + /// Chunk restoring error + #[error("chunk restoring error {0}")] + OldChunkRestoringError(String), /// Key not found error #[error("key not found error {0}")] @@ -97,6 +115,10 @@ pub enum Error { #[error("invalid operation error {0}")] InvalidOperation(&'static str), + /// Internal error + #[error("internal error {0}")] + InternalError(&'static str), + /// Specialized costs error #[error("specialized costs error {0}")] SpecializedCostsError(&'static str), diff --git a/merk/src/lib.rs b/merk/src/lib.rs index caf3837c..18255b27 100644 --- a/merk/src/lib.rs +++ b/merk/src/lib.rs @@ -35,7 +35,7 @@ extern crate core; /// The top-level store API. #[cfg(feature = "full")] -mod merk; +pub mod merk; #[cfg(feature = "full")] pub use crate::merk::{chunks::ChunkProducer, options::MerkOptions, restore::Restorer}; diff --git a/merk/src/merk/chunks.rs b/merk/src/merk/chunks.rs index 4f6564ef..8f840f91 100644 --- a/merk/src/merk/chunks.rs +++ b/merk/src/merk/chunks.rs @@ -26,479 +26,1039 @@ // IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! Provides `ChunkProducer`, which creates chunk proofs for full replication of -//! a Merk. +use std::collections::VecDeque; -#[cfg(feature = "full")] -use grovedb_costs::CostsExt; -#[cfg(feature = "full")] -use grovedb_storage::{RawIterator, StorageContext}; +use ed::Encode; +use grovedb_storage::StorageContext; -#[cfg(feature = "full")] -use super::Merk; -#[cfg(feature = "full")] use crate::{ error::Error, - proofs::{chunk::get_next_chunk, Node, Op}, + proofs::{ + chunk::{ + chunk_op::ChunkOp, + error::ChunkError, + util::{ + chunk_height, chunk_id_from_traversal_instruction, + chunk_id_from_traversal_instruction_with_recovery, generate_traversal_instruction, + generate_traversal_instruction_as_string, number_of_chunks, + string_as_traversal_instruction, + }, + }, + Node, Op, + }, + Error::ChunkingError, + Merk, }; -#[cfg(feature = "full")] +/// ChunkProof for replication of a single subtree +#[derive(Debug)] +pub struct SubtreeChunk { + chunk: Vec, + next_index: Option, + remaining_limit: Option, +} + +impl SubtreeChunk { + pub fn new(chunk: Vec, next_index: Option, remaining_limit: Option) -> Self { + Self { + chunk, + next_index, + remaining_limit, + } + } +} + +/// ChunkProof for the replication of multiple subtrees. +#[derive(Debug)] +pub struct MultiChunk { + pub chunk: Vec, + pub next_index: Option, + pub remaining_limit: Option, +} + +impl MultiChunk { + pub fn new( + chunk: Vec, + next_index: Option, + remaining_limit: Option, + ) -> Self { + Self { + chunk, + next_index, + remaining_limit, + } + } +} + /// A `ChunkProducer` allows the creation of chunk proofs, used for trustlessly /// replicating entire Merk trees. Chunks can be generated on the fly in a /// random order, or iterated in order for slightly better performance. -pub struct ChunkProducer<'db, S: StorageContext<'db>> { - trunk: Vec, - chunk_boundaries: Vec>, - raw_iter: S::RawIterator, +pub struct ChunkProducer<'db, S> { + /// Represents the max height of the Merk tree + height: usize, + /// Represents the index of the next chunk index: usize, + merk: &'db Merk, } -#[cfg(feature = "full")] impl<'db, S> ChunkProducer<'db, S> where S: StorageContext<'db>, { - /// Creates a new `ChunkProducer` for the given `Merk` instance. In the - /// constructor, the first chunk (the "trunk") will be created. - pub fn new(merk: &Merk) -> Result { - let (trunk, has_more) = merk - .walk(|maybe_walker| match maybe_walker { - Some(mut walker) => walker.create_trunk_proof(), - None => Ok((vec![], false)).wrap_with_cost(Default::default()), - }) - .unwrap()?; - - let chunk_boundaries = if has_more { - trunk - .iter() - .filter_map(|op| match op { - Op::Push(Node::KVValueHashFeatureType(key, ..)) => Some(key.clone()), - _ => None, - }) - .collect() - } else { - vec![] - }; - - let mut raw_iter = merk.storage.raw_iter(); - raw_iter.seek_to_first().unwrap(); - - Ok(ChunkProducer { - trunk, - chunk_boundaries, - raw_iter, - index: 0, + /// Creates a new `ChunkProducer` for the given `Merk` instance + pub fn new(merk: &'db Merk) -> Result { + let tree_height = merk + .height() + .ok_or(Error::ChunkingError(ChunkError::EmptyTree( + "cannot create chunk producer for empty Merk", + )))?; + Ok(Self { + height: tree_height as usize, + index: 1, + merk, }) } /// Gets the chunk with the given index. Errors if the index is out of /// bounds or the tree is empty - the number of chunks can be checked by /// calling `producer.len()`. - pub fn chunk(&mut self, index: usize) -> Result, Error> { - if index >= self.len() { - return Err(Error::ChunkingError("Chunk index out-of-bounds")); + pub fn chunk_with_index( + &mut self, + chunk_index: usize, + ) -> Result<(Vec, Option), Error> { + let traversal_instructions = generate_traversal_instruction(self.height, chunk_index)?; + self.chunk_internal(chunk_index, traversal_instructions) + } + + /// Returns the chunk at a given chunk id. + pub fn chunk(&mut self, chunk_id: &str) -> Result<(Vec, Option), Error> { + let traversal_instructions = string_as_traversal_instruction(chunk_id)?; + let chunk_index = chunk_id_from_traversal_instruction_with_recovery( + traversal_instructions.as_slice(), + self.height, + )?; + let (chunk, next_index) = self.chunk_internal(chunk_index, traversal_instructions)?; + let index_string = next_index + .map(|index| generate_traversal_instruction_as_string(self.height, index)) + .transpose()?; + Ok((chunk, index_string)) + } + + /// Returns the chunk at the given index + /// Assumes index and traversal_instructions represents the same information + fn chunk_internal( + &mut self, + index: usize, + traversal_instructions: Vec, + ) -> Result<(Vec, Option), Error> { + // ensure that the chunk index is within bounds + let max_chunk_index = self.len(); + if index < 1 || index > max_chunk_index { + return Err(ChunkingError(ChunkError::OutOfBounds( + "chunk index out of bounds", + ))); } - self.index = index; + self.index = index + 1; + + let chunk_height = chunk_height(self.height, index).unwrap(); - if index == 0 || index == 1 { - self.raw_iter.seek_to_first().unwrap(); + let chunk = self.merk.walk(|maybe_walker| match maybe_walker { + Some(mut walker) => { + walker.traverse_and_build_chunk(&traversal_instructions, chunk_height) + } + None => Err(Error::ChunkingError(ChunkError::EmptyTree( + "cannot create chunk producer for empty Merk", + ))), + })?; + + // now we need to return the next index + // how do we know if we should return some or none + if self.index > max_chunk_index { + Ok((chunk, None)) } else { - let preceding_key = self.chunk_boundaries.get(index - 2).unwrap(); - self.raw_iter.seek(preceding_key).unwrap(); - self.raw_iter.next().unwrap(); + Ok((chunk, Some(self.index))) } + } - self.next_chunk() + /// Generate multichunk with chunk id + /// Multichunks accumulate as many chunks as they can until they have all + /// chunks or hit some optional limit + pub fn multi_chunk_with_limit( + &mut self, + chunk_id: &str, + limit: Option, + ) -> Result { + // we want to convert the chunk id to the index + let chunk_index = string_as_traversal_instruction(chunk_id).and_then(|instruction| { + chunk_id_from_traversal_instruction(instruction.as_slice(), self.height) + })?; + self.multi_chunk_with_limit_and_index(chunk_index, limit) } - /// Returns the total number of chunks for the underlying Merk tree. - #[allow(clippy::len_without_is_empty)] - pub fn len(&self) -> usize { - let boundaries_len = self.chunk_boundaries.len(); - if boundaries_len == 0 { - 1 - } else { - boundaries_len + 2 + /// Generate multichunk with chunk index + /// Multichunks accumulate as many chunks as they can until they have all + /// chunks or hit some optional limit + pub fn multi_chunk_with_limit_and_index( + &mut self, + index: usize, + limit: Option, + ) -> Result { + // TODO: what happens if the vec is filled? + // we need to have some kind of hardhoc limit value if none is supplied. + // maybe we can just do something with the length to fix this? + let mut chunk = vec![]; + + let mut current_index = Some(index); + let mut current_limit = limit; + + // generate as many subtree chunks as we can + // until we have exhausted all or hit a limit restriction + while current_index.is_some() { + let current_index_traversal_instruction = generate_traversal_instruction( + self.height, + current_index.expect("confirmed is Some"), + )?; + let chunk_id_op = ChunkOp::ChunkId(current_index_traversal_instruction); + + // factor in the ChunkId encoding length in limit calculations + let temp_limit = if let Some(limit) = current_limit { + let chunk_id_op_encoding_len = chunk_id_op.encoding_length().map_err(|_e| { + Error::ChunkingError(ChunkError::InternalError("cannot get encoding length")) + })?; + if limit >= chunk_id_op_encoding_len { + Some(limit - chunk_id_op_encoding_len) + } else { + Some(0) + } + } else { + None + }; + + let subtree_multi_chunk_result = self.subtree_multi_chunk_with_limit( + current_index.expect("confirmed is not None"), + temp_limit, + ); + + let limit_too_small_error = matches!( + subtree_multi_chunk_result, + Err(ChunkingError(ChunkError::LimitTooSmall(..))) + ); + + if limit_too_small_error { + if chunk.is_empty() { + // no progress, return limit too small error + return Err(Error::ChunkingError(ChunkError::LimitTooSmall( + "limit too small for initial chunk", + ))); + } else { + // made progress, send accumulated chunk + break; + } + } + + let subtree_multi_chunk = subtree_multi_chunk_result?; + + chunk.push(chunk_id_op); + chunk.push(ChunkOp::Chunk(subtree_multi_chunk.chunk)); + + // update loop parameters + current_index = subtree_multi_chunk.next_index; + current_limit = subtree_multi_chunk.remaining_limit; } + + let index_string = current_index + .map(|index| generate_traversal_instruction_as_string(self.height, index)) + .transpose()?; + + Ok(MultiChunk::new(chunk, index_string, current_limit)) } - /// Gets the next chunk based on the `ChunkProducer`'s internal index state. - /// This is mostly useful for letting `ChunkIter` yield the chunks in order, - /// optimizing throughput compared to random access. - fn next_chunk(&mut self) -> Result, Error> { - if self.index == 0 { - if self.trunk.is_empty() { - return Err(Error::ChunkingError( - "Attempted to fetch chunk on empty tree", - )); + /// Packs as many chunks as it can from a starting chunk index, into a + /// vector. Stops when we have exhausted all chunks or we have reached + /// some limit. + fn subtree_multi_chunk_with_limit( + &mut self, + index: usize, + limit: Option, + ) -> Result { + let max_chunk_index = number_of_chunks(self.height); + let mut chunk_index = index; + + // we first get the chunk at the given index + // TODO: use the returned chunk index rather than tracking + let (chunk_ops, _) = self.chunk_with_index(chunk_index)?; + let mut chunk_byte_length = chunk_ops.encoding_length().map_err(|_e| { + Error::ChunkingError(ChunkError::InternalError("can't get encoding length")) + })?; + chunk_index += 1; + + let mut chunk = VecDeque::from(chunk_ops); + + // ensure the limit is not less than first chunk byte length + // if it is we can't proceed and didn't make progress so we return an error + if let Some(limit) = limit { + if chunk_byte_length > limit { + return Err(Error::ChunkingError(ChunkError::LimitTooSmall( + "limit too small for initial chunk", + ))); } - self.index += 1; - return Ok(self.trunk.clone()); } - if self.index >= self.len() { - panic!("Called next_chunk after end"); + let mut iteration_index = 0; + while iteration_index < chunk.len() { + // we only perform replacements on Hash nodes + if matches!(chunk[iteration_index], Op::Push(Node::Hash(..))) { + // TODO: use the returned chunk index rather than tracking + let (replacement_chunk, _) = self.chunk_with_index(chunk_index)?; + + // calculate the new total + let new_total = replacement_chunk.encoding_length().map_err(|_e| { + Error::ChunkingError(ChunkError::InternalError("can't get encoding length")) + })? + chunk_byte_length + - chunk[iteration_index].encoding_length().map_err(|_e| { + Error::ChunkingError(ChunkError::InternalError("can't get encoding length")) + })?; + + // verify that this chunk doesn't make use exceed the limit + if let Some(limit) = limit { + if new_total > limit { + let next_index = match chunk_index > max_chunk_index { + true => None, + _ => Some(chunk_index), + }; + + return Ok(SubtreeChunk::new( + chunk.into(), + next_index, + Some(limit - chunk_byte_length), + )); + } + } + + chunk_byte_length = new_total; + chunk_index += 1; + + chunk.remove(iteration_index); + for op in replacement_chunk.into_iter().rev() { + chunk.insert(iteration_index, op); + } + } else { + iteration_index += 1; + } } - let end_key = self.chunk_boundaries.get(self.index - 1); - let end_key_slice = end_key.as_ref().map(|k| k.as_slice()); + let remaining_limit = limit.map(|l| l - chunk_byte_length); + let next_index = match chunk_index > max_chunk_index { + true => None, + _ => Some(chunk_index), + }; - self.index += 1; + Ok(SubtreeChunk::new(chunk.into(), next_index, remaining_limit)) + } - get_next_chunk(&mut self.raw_iter, end_key_slice).unwrap() + /// Returns the total number of chunks for the underlying Merk tree. + pub fn len(&self) -> usize { + number_of_chunks(self.height) } -} -#[cfg(feature = "full")] -impl<'db, S> IntoIterator for ChunkProducer<'db, S> -where - S: StorageContext<'db>, -{ - type IntoIter = ChunkIter<'db, S>; - type Item = as Iterator>::Item; + pub fn is_empty(&self) -> bool { + number_of_chunks(self.height) == 0 + } - fn into_iter(self) -> Self::IntoIter { - ChunkIter(self) + /// Gets the next chunk based on the `ChunkProducer`'s internal index state. + /// This is mostly useful for letting `ChunkIter` yield the chunks in order, + /// optimizing throughput compared to random access. + // TODO: this is not better than random access, as we are not keeping state + // that will make this more efficient, decide if this should be fixed or not + fn next_chunk(&mut self) -> Option, Option), Error>> { + let max_index = number_of_chunks(self.height); + if self.index > max_index { + return None; + } + + // get the chunk at the given index + // return the next index as a string + Some( + self.chunk_with_index(self.index) + .and_then(|(chunk, chunk_index)| { + chunk_index + .map(|index| generate_traversal_instruction_as_string(self.height, index)) + .transpose() + .map(|v| (chunk, v)) + }), + ) } } -#[cfg(feature = "full")] -/// A `ChunkIter` iterates through all the chunks for the underlying `Merk` -/// instance in order (the first chunk is the "trunk" chunk). Yields `None` -/// after all chunks have been yielded. -pub struct ChunkIter<'db, S>(ChunkProducer<'db, S>) -where - S: StorageContext<'db>; - -#[cfg(feature = "full")] -impl<'db, S> Iterator for ChunkIter<'db, S> +/// Iterate over each chunk, returning `None` after last chunk +impl<'db, S> Iterator for ChunkProducer<'db, S> where S: StorageContext<'db>, { - type Item = Result, Error>; - - fn size_hint(&self) -> (usize, Option) { - (self.0.len(), Some(self.0.len())) - } + type Item = Result<(Vec, Option), Error>; fn next(&mut self) -> Option { - if self.0.index >= self.0.len() { - None - } else { - Some(self.0.next_chunk()) - } + self.next_chunk() } } -#[cfg(feature = "full")] impl<'db, S> Merk where S: StorageContext<'db>, { /// Creates a `ChunkProducer` which can return chunk proofs for replicating /// the entire Merk tree. - pub fn chunks(&self) -> Result, Error> { + pub fn chunks(&'db self) -> Result, Error> { ChunkProducer::new(self) } } -#[cfg(feature = "full")] #[cfg(test)] -mod tests { - use grovedb_path::SubtreePath; - use grovedb_storage::{rocksdb_storage::RocksDbStorage, Storage, StorageBatch}; - use tempfile::TempDir; - +mod test { use super::*; use crate::{ - proofs::chunk::{verify_leaf, verify_trunk}, - test_utils::*, - tree::kv::ValueDefinedCostType, + proofs::{ + chunk::{ + chunk::{ + tests::{traverse_get_kv_feature_type, traverse_get_node_hash}, + LEFT, RIGHT, + }, + util::traversal_instruction_as_string, + }, + tree::execute, + Tree, + }, + test_utils::{make_batch_seq, TempMerk}, + tree::RefWalker, + PanicSource, }; - #[test] - fn len_small() { - let mut merk = TempMerk::new(); - let batch = make_batch_seq(1..256); - merk.apply::<_, Vec<_>>(&batch, &[], None).unwrap().unwrap(); - merk.commit(); + #[derive(Default)] + struct NodeCounts { + hash: usize, + kv_hash: usize, + kv: usize, + kv_value_hash: usize, + kv_digest: usize, + kv_ref_value_hash: usize, + kv_value_hash_feature_type: usize, + } + + impl NodeCounts { + fn sum(&self) -> usize { + self.hash + + self.kv_hash + + self.kv + + self.kv_value_hash + + self.kv_digest + + self.kv_ref_value_hash + + self.kv_value_hash_feature_type + } + } - let chunks = merk.chunks().unwrap(); - assert_eq!(chunks.len(), 1); - assert_eq!(chunks.into_iter().size_hint().0, 1); + fn count_node_types(tree: Tree) -> NodeCounts { + let mut counts = NodeCounts::default(); + + tree.visit_nodes(&mut |node| { + match node { + Node::Hash(_) => counts.hash += 1, + Node::KVHash(_) => counts.kv_hash += 1, + Node::KV(..) => counts.kv += 1, + Node::KVValueHash(..) => counts.kv_value_hash += 1, + Node::KVDigest(..) => counts.kv_digest += 1, + Node::KVRefValueHash(..) => counts.kv_ref_value_hash += 1, + Node::KVValueHashFeatureType(..) => counts.kv_value_hash_feature_type += 1, + }; + }); + + counts } #[test] - fn len_big() { + fn test_merk_chunk_len() { + // Tree of height 5 - max of 31 elements, min of 16 elements + // 5 will be broken into 2 layers = [3, 2] + // exit nodes from first layer = 2^3 = 8 + // total_chunk = 1 + 8 = 9 chunks let mut merk = TempMerk::new(); - let batch = make_batch_seq(1..10_000); - merk.apply::<_, Vec<_>>(&batch, &[], None).unwrap().unwrap(); - merk.commit(); + let batch = make_batch_seq(0..20); + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(5)); + let chunk_producer = ChunkProducer::new(&merk).unwrap(); + assert_eq!(chunk_producer.len(), 9); - let chunks = merk.chunks().unwrap(); - assert_eq!(chunks.len(), 129); - assert_eq!(chunks.into_iter().size_hint().0, 129); + // Tree of height 10 - max of 1023 elements, min of 512 elements + // 4 layers -> [3,3,2,2] + // chunk_count_per_layer -> [1, 8, 64, 256] + // total = 341 chunks + let mut merk = TempMerk::new(); + let batch = make_batch_seq(0..1000); + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(10)); + let chunk_producer = ChunkProducer::new(&merk).unwrap(); + assert_eq!(chunk_producer.len(), 329); } #[test] - fn generate_and_verify_chunks() { - let mut merk = TempMerk::new(); - let batch = make_batch_seq(1..10_000); - merk.apply::<_, Vec<_>>(&batch, &[], None).unwrap().unwrap(); - merk.commit(); + fn test_chunk_producer_iter() { + // tree with height 4 + // full tree + // 7 + // / \ + // 3 11 + // / \ / \ + // 1 5 9 13 + // / \ / \ / \ / \ + // 0 2 4 6 8 10 12 14 + // going to be broken into [2, 2] + // that's a total of 5 chunks - let mut chunks = merk.chunks().unwrap().into_iter().map(|x| x.unwrap()); + let mut merk = TempMerk::new(); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); - let chunk = chunks.next().unwrap(); - let (trunk, height) = verify_trunk(chunk.into_iter().map(Ok)).unwrap().unwrap(); - assert_eq!(height, 14); - assert_eq!(trunk.hash().unwrap(), merk.root_hash().unwrap()); + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); - assert_eq!(trunk.layer(7).count(), 128); + // build iterator from first chunk producer + let mut chunks = merk.chunks().expect("should return producer"); - for (ops, node) in chunks.zip(trunk.layer(height / 2)) { - verify_leaf(ops.into_iter().map(Ok), node.hash().unwrap()) - .unwrap() - .unwrap(); + // ensure that the chunks gotten from the iterator is the same + // as that from the chunk producer + for i in 1..=5 { + assert_eq!( + chunks.next().unwrap().unwrap().0, + chunk_producer.chunk_with_index(i).unwrap().0 + ); } + + // returns None after max + assert!(chunks.next().is_none()); } #[test] - fn chunks_from_reopen() { - let tmp_dir = TempDir::new().expect("cannot create tempdir"); - let original_chunks = { - let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) - .expect("cannot open rocksdb storage"); - let batch = StorageBatch::new(); - let mut merk = Merk::open_base( - storage - .get_storage_context(SubtreePath::empty(), Some(&batch)) - .unwrap(), - false, - None::<&fn(&[u8]) -> Option>, - ) - .unwrap() - .unwrap(); - let merk_batch = make_batch_seq(1..10); - merk.apply::<_, Vec<_>>(&merk_batch, &[], None) - .unwrap() - .unwrap(); + fn test_random_chunk_access() { + // tree with height 4 + // full tree + // 7 + // / \ + // 3 11 + // / \ / \ + // 1 5 9 13 + // / \ / \ / \ / \ + // 0 2 4 6 8 10 12 14 + // going to be broken into [2, 2] + // that's a total of 5 chunks - storage - .commit_multi_context_batch(batch, None) - .unwrap() - .expect("cannot commit batch"); - - let merk = Merk::open_base( - storage - .get_storage_context(SubtreePath::empty(), None) - .unwrap(), - false, - None::<&fn(&[u8]) -> Option>, - ) + let mut merk = TempMerk::new(); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None) .unwrap() - .unwrap(); + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); - merk.chunks() - .unwrap() - .into_iter() - .map(|x| x.unwrap()) - .collect::>() - .into_iter() - }; - let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) - .expect("cannot open rocksdb storage"); - let merk = Merk::open_base( - storage - .get_storage_context(SubtreePath::empty(), None) - .unwrap(), - false, - None::<&fn(&[u8]) -> Option>, - ) - .unwrap() - .unwrap(); - let reopen_chunks = merk.chunks().unwrap().into_iter().map(|x| x.unwrap()); + let mut inner_tree = merk.tree.take().expect("has inner tree"); + merk.tree.set(Some(inner_tree.clone())); - for (original, checkpoint) in original_chunks.zip(reopen_chunks) { - assert_eq!(original.len(), checkpoint.len()); - } - } + // TODO: should I be using panic source? + let mut tree_walker = RefWalker::new(&mut inner_tree, PanicSource {}); - // #[test] - // fn chunks_from_checkpoint() { - // let mut merk = TempMerk::new(); - // let batch = make_batch_seq(1..10); - // merk.apply(batch.as_slice(), &[]).unwrap(); + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + assert_eq!(chunk_producer.len(), 5); - // let path: std::path::PathBuf = - // "generate_and_verify_chunks_from_checkpoint.db".into(); if path. - // exists() { std::fs::remove_dir_all(&path).unwrap(); - // } - // let checkpoint = merk.checkpoint(&path).unwrap(); + // assert bounds + assert!(chunk_producer.chunk_with_index(0).is_err()); + assert!(chunk_producer.chunk_with_index(6).is_err()); - // let original_chunks = - // merk.chunks().unwrap().into_iter().map(Result::unwrap); - // let checkpoint_chunks = - // checkpoint.chunks().unwrap().into_iter().map(Result::unwrap); + // first chunk + // expected: + // 7 + // / \ + // 3 11 + // / \ / \ + // H(1) H(5) H(9) H(13) + let (chunk, next_chunk) = chunk_producer + .chunk_with_index(1) + .expect("should generate chunk"); + assert_eq!(chunk.len(), 13); + assert_eq!(next_chunk, Some(2)); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_node_hash(&mut tree_walker, &[LEFT, LEFT])), + Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[LEFT])), + Op::Parent, + Op::Push(traverse_get_node_hash(&mut tree_walker, &[LEFT, RIGHT])), + Op::Child, + Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[])), + Op::Parent, + Op::Push(traverse_get_node_hash(&mut tree_walker, &[RIGHT, LEFT])), + Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[RIGHT])), + Op::Parent, + Op::Push(traverse_get_node_hash(&mut tree_walker, &[RIGHT, RIGHT])), + Op::Child, + Op::Child + ] + ); - // for (original, checkpoint) in original_chunks.zip(checkpoint_chunks) { - // assert_eq!(original.len(), checkpoint.len()); - // } + // second chunk + // expected: + // 1 + // / \ + // 0 2 + let (chunk, next_chunk) = chunk_producer + .chunk_with_index(2) + .expect("should generate chunk"); + assert_eq!(chunk.len(), 5); + assert_eq!(next_chunk, Some(3)); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, LEFT, LEFT] + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, LEFT] + )), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, LEFT, RIGHT] + )), + Op::Child + ] + ); - // std::fs::remove_dir_all(&path).unwrap(); - // } + // third chunk + // expected: + // 5 + // / \ + // 4 6 + let (chunk, next_chunk) = chunk_producer + .chunk_with_index(3) + .expect("should generate chunk"); + assert_eq!(chunk.len(), 5); + assert_eq!(next_chunk, Some(4)); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, RIGHT, LEFT] + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, RIGHT] + )), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, RIGHT, RIGHT] + )), + Op::Child + ] + ); + + // third chunk + // expected: + // 9 + // / \ + // 8 10 + let (chunk, next_chunk) = chunk_producer + .chunk_with_index(4) + .expect("should generate chunk"); + assert_eq!(chunk.len(), 5); + assert_eq!(next_chunk, Some(5)); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT, LEFT] + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT] + )), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT, RIGHT] + )), + Op::Child + ] + ); + + // third chunk + // expected: + // 13 + // / \ + // 12 14 + let (chunk, next_chunk) = chunk_producer + .chunk_with_index(5) + .expect("should generate chunk"); + assert_eq!(chunk.len(), 5); + assert_eq!(next_chunk, None); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, RIGHT, LEFT] + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, RIGHT] + )), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, RIGHT, RIGHT] + )), + Op::Child + ] + ); + } #[test] - fn random_access_chunks() { + fn test_subtree_chunk_no_limit() { + // tree of height 4 + // 5 chunks let mut merk = TempMerk::new(); - let batch = make_batch_seq(1..111); - merk.apply::<_, Vec<_>>(&batch, &[], None).unwrap().unwrap(); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + // generate multi chunk with no limit + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + let chunk_result = chunk_producer + .subtree_multi_chunk_with_limit(1, None) + .expect("should generate chunk with limit"); - let chunks = merk - .chunks() + assert_eq!(chunk_result.remaining_limit, None); + assert_eq!(chunk_result.next_index, None); + + let tree = execute(chunk_result.chunk.into_iter().map(Ok), false, |_| Ok(())) .unwrap() - .into_iter() - .map(|x| x.unwrap()) - .collect::>(); - - let mut producer = merk.chunks().unwrap(); - for i in 0..chunks.len() * 2 { - let index = i % chunks.len(); - assert_eq!(producer.chunk(index).unwrap(), chunks[index]); - } + .expect("should reconstruct tree"); + assert_eq!(tree.hash().unwrap(), merk.root_hash().unwrap()); + + // assert that all nodes are of type kv_value_hash_feature_type + let node_counts = count_node_types(tree); + assert_eq!(node_counts.hash, 0); + assert_eq!(node_counts.kv_hash, 0); + assert_eq!(node_counts.kv, 0); + assert_eq!(node_counts.kv_value_hash, 0); + assert_eq!(node_counts.kv_digest, 0); + assert_eq!(node_counts.kv_ref_value_hash, 0); + assert_eq!(node_counts.kv_value_hash_feature_type, 15); } #[test] - #[should_panic(expected = "Attempted to fetch chunk on empty tree")] - fn test_chunk_empty() { - let merk = TempMerk::new(); + fn test_subtree_chunk_with_limit() { + // tree of height 4 + // 5 chunks + let mut merk = TempMerk::new(); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); - let _chunks = merk - .chunks() + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + + // initial chunk is of size 453, so limit of 10 is too small + // should return an error + let chunk = chunk_producer.subtree_multi_chunk_with_limit(1, Some(10)); + assert!(chunk.is_err()); + + // get just the fist chunk + let chunk_result = chunk_producer + .subtree_multi_chunk_with_limit(1, Some(453)) + .expect("should generate chunk with limit"); + assert_eq!(chunk_result.remaining_limit, Some(0)); + assert_eq!(chunk_result.next_index, Some(2)); + + let chunk = chunk_result.chunk; + assert_eq!(chunk.encoding_length().unwrap(), 453); + assert_eq!(chunk.len(), 13); // op count + let tree = execute(chunk.into_iter().map(Ok), false, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(tree.hash().unwrap(), merk.root_hash().unwrap()); + + let node_counts = count_node_types(tree); + assert_eq!(node_counts.kv_value_hash_feature_type, 3); + assert_eq!(node_counts.hash, 4); + assert_eq!(node_counts.sum(), 4 + 3); + + // get up to second chunk + let chunk_result = chunk_producer + .subtree_multi_chunk_with_limit(1, Some(737)) + .expect("should generate chunk with limit"); + assert_eq!(chunk_result.remaining_limit, Some(0)); + assert_eq!(chunk_result.next_index, Some(3)); + + let chunk = chunk_result.chunk; + assert_eq!(chunk.encoding_length().unwrap(), 737); + assert_eq!(chunk.len(), 17); // op count + let tree = execute(chunk.into_iter().map(Ok), false, |_| Ok(())) .unwrap() - .into_iter() - .map(|x| x.unwrap()) - .collect::>(); + .expect("should reconstruct tree"); + assert_eq!(tree.hash().unwrap(), merk.root_hash().unwrap()); + + let node_counts = count_node_types(tree); + assert_eq!(node_counts.kv_value_hash_feature_type, 6); + assert_eq!(node_counts.hash, 3); + assert_eq!(node_counts.sum(), 6 + 3); + + // get up to third chunk + let chunk_result = chunk_producer + .subtree_multi_chunk_with_limit(1, Some(1021)) + .expect("should generate chunk with limit"); + assert_eq!(chunk_result.remaining_limit, Some(0)); + assert_eq!(chunk_result.next_index, Some(4)); + + let chunk = chunk_result.chunk; + assert_eq!(chunk.encoding_length().unwrap(), 1021); + assert_eq!(chunk.len(), 21); // op count + let tree = execute(chunk.into_iter().map(Ok), false, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(tree.hash().unwrap(), merk.root_hash().unwrap()); + + let node_counts = count_node_types(tree); + assert_eq!(node_counts.kv_value_hash_feature_type, 9); + assert_eq!(node_counts.hash, 2); + assert_eq!(node_counts.sum(), 9 + 2); + + // get up to fourth chunk + let chunk_result = chunk_producer + .subtree_multi_chunk_with_limit(1, Some(1305)) + .expect("should generate chunk with limit"); + assert_eq!(chunk_result.remaining_limit, Some(0)); + assert_eq!(chunk_result.next_index, Some(5)); + + let chunk = chunk_result.chunk; + assert_eq!(chunk.encoding_length().unwrap(), 1305); + assert_eq!(chunk.len(), 25); // op count + let tree = execute(chunk.into_iter().map(Ok), false, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(tree.hash().unwrap(), merk.root_hash().unwrap()); + + let node_counts = count_node_types(tree); + assert_eq!(node_counts.kv_value_hash_feature_type, 12); + assert_eq!(node_counts.hash, 1); + assert_eq!(node_counts.sum(), 12 + 1); + + // get up to fifth chunk + let chunk_result = chunk_producer + .subtree_multi_chunk_with_limit(1, Some(1589)) + .expect("should generate chunk with limit"); + assert_eq!(chunk_result.remaining_limit, Some(0)); + assert_eq!(chunk_result.next_index, None); + + let chunk = chunk_result.chunk; + assert_eq!(chunk.encoding_length().unwrap(), 1589); + assert_eq!(chunk.len(), 29); // op count + let tree = execute(chunk.into_iter().map(Ok), false, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(tree.hash().unwrap(), merk.root_hash().unwrap()); + + let node_counts = count_node_types(tree); + assert_eq!(node_counts.kv_value_hash_feature_type, 15); + assert_eq!(node_counts.hash, 0); + assert_eq!(node_counts.sum(), 15); + + // limit larger than total chunk + let chunk_result = chunk_producer + .subtree_multi_chunk_with_limit(1, Some(usize::MAX)) + .expect("should generate chunk with limit"); + assert_eq!(chunk_result.remaining_limit, Some(18446744073709550026)); + assert_eq!(chunk_result.next_index, None); + + let chunk = chunk_result.chunk; + assert_eq!(chunk.encoding_length().unwrap(), 1589); + assert_eq!(chunk.len(), 29); // op count + let tree = execute(chunk.into_iter().map(Ok), false, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(tree.hash().unwrap(), merk.root_hash().unwrap()); + + let node_counts = count_node_types(tree); + assert_eq!(node_counts.kv_value_hash_feature_type, 15); + assert_eq!(node_counts.hash, 0); + assert_eq!(node_counts.sum(), 15); } #[test] - #[should_panic(expected = "Chunk index out-of-bounds")] - fn test_chunk_index_oob() { + fn test_multi_chunk_with_no_limit_trunk() { + // tree of height 4 + // 5 chunks let mut merk = TempMerk::new(); - let batch = make_batch_seq(1..42); - merk.apply::<_, Vec<_>>(&batch, &[], None).unwrap().unwrap(); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); - let mut producer = merk.chunks().unwrap(); - let _chunk = producer.chunk(50000).unwrap(); + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + + // we generate the chunk starting from index 1, this has no hash nodes + // so no multi chunk will be generated + let chunk_result = chunk_producer + .multi_chunk_with_limit_and_index(1, None) + .expect("should generate chunk with limit"); + + assert_eq!(chunk_result.remaining_limit, None); + assert_eq!(chunk_result.next_index, None); + + // should only contain 2 items, the starting chunk id and the entire tree + assert_eq!(chunk_result.chunk.len(), 2); + + // assert items + assert_eq!(chunk_result.chunk[0], ChunkOp::ChunkId(vec![])); + if let ChunkOp::Chunk(chunk) = &chunk_result.chunk[1] { + let tree = execute(chunk.clone().into_iter().map(Ok), false, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(tree.hash().unwrap(), merk.root_hash().unwrap()); + } else { + panic!("expected ChunkOp::Chunk"); + } } - // #[test] - // fn test_chunk_index_gt_1_access() { - // let mut merk = TempMerk::new(); - // let batch = make_batch_seq(1..513); - // merk.apply::<_, Vec<_>>(&batch, &[]).unwrap().unwrap(); - - // let mut producer = merk.chunks().unwrap(); - // println!("length: {}", producer.len()); - // let chunk = producer.chunk(2).unwrap(); - // assert_eq!( - // chunk, - // vec![ - // 3, 8, 0, 0, 0, 0, 0, 0, 0, 18, 0, 60, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 3, 8, 0, 0, 0, 0, 0, 0, 0, 19, 0, 60, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 16, 3, 8, 0, 0, - // 0, 0, 0, 0, 0, 20, 0, 60, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 17, 3, 8, 0, 0, 0, 0, 0, 0, 0, 21, 0, 60, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 16, 3, 8, 0, 0, 0, 0, 0, 0, 0, 22, - // 0, 60, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 3, 8, 0, 0, - // 0, 0, 0, 0, 0, 23, 0, 60, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 16, 3, 8, 0, 0, 0, 0, 0, 0, 0, 24, 0, 60, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 17, 17, 3, 8, 0, 0, 0, 0, 0, 0, 0, 25, 0, - // 60, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 16, 3, 8, 0, 0, - // 0, 0, 0, 0, 0, 26, 0, 60, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 3, 8, 0, 0, 0, 0, 0, 0, 0, 27, 0, 60, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 16, 3, 8, 0, 0, 0, 0, - // 0, 0, 0, 28, 0, 60, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 17, 3, 8, 0, 0, 0, 0, 0, 0, 0, 29, 0, 60, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 16, 3, 8, 0, 0, 0, 0, 0, 0, 0, - // 30, 0, 60, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 3, 8, 0, 0, - // 0, 0, 0, 0, 0, 31, 0, 60, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 16, 3, 8, 0, 0, 0, 0, 0, 0, 0, 32, 0, 60, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, - // 123, 123, 123, 123, 123, 123, 17, 17, 17 - // ] - // ); - // } + #[test] + fn test_multi_chunk_with_no_limit_not_trunk() { + // tree of height 4 + // 5 chunks + let mut merk = TempMerk::new(); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + + // we generate the chunk starting from index 2, this has no hash nodes + // so no multi chunk will be generated + let chunk_result = chunk_producer + .multi_chunk_with_limit_and_index(2, None) + .expect("should generate chunk with limit"); + + assert_eq!(chunk_result.remaining_limit, None); + assert_eq!(chunk_result.next_index, None); + + // chunk 2 - 5 will be considered separate subtrees + // each will have an accompanying chunk id, so 8 elements total + assert_eq!(chunk_result.chunk.len(), 8); + + // assert the chunk id's + assert_eq!(chunk_result.chunk[0], ChunkOp::ChunkId(vec![LEFT, LEFT])); + assert_eq!(chunk_result.chunk[2], ChunkOp::ChunkId(vec![LEFT, RIGHT])); + assert_eq!(chunk_result.chunk[4], ChunkOp::ChunkId(vec![RIGHT, LEFT])); + assert_eq!(chunk_result.chunk[6], ChunkOp::ChunkId(vec![RIGHT, RIGHT])); + + // assert the chunks + assert_eq!( + chunk_result.chunk[1], + ChunkOp::Chunk( + chunk_producer + .chunk_with_index(2) + .expect("should generate chunk") + .0 + ) + ); + assert_eq!( + chunk_result.chunk[3], + ChunkOp::Chunk( + chunk_producer + .chunk_with_index(3) + .expect("should generate chunk") + .0 + ) + ); + assert_eq!( + chunk_result.chunk[5], + ChunkOp::Chunk( + chunk_producer + .chunk_with_index(4) + .expect("should generate chunk") + .0 + ) + ); + assert_eq!( + chunk_result.chunk[7], + ChunkOp::Chunk( + chunk_producer + .chunk_with_index(5) + .expect("should generate chunk") + .0 + ) + ); + } #[test] - #[should_panic(expected = "Called next_chunk after end")] - fn test_next_chunk_index_oob() { + fn test_multi_chunk_with_limit() { + // tree of height 4 + // 5 chunks let mut merk = TempMerk::new(); - let batch = make_batch_seq(1..42); - merk.apply::<_, Vec<_>>(&batch, &[], None).unwrap().unwrap(); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + + // ensure that the remaining limit, next index and values given are correct + // if limit is smaller than first chunk, we should get an error + let chunk_result = chunk_producer.multi_chunk_with_limit("", Some(5)); + assert!(matches!( + chunk_result, + Err(Error::ChunkingError(ChunkError::LimitTooSmall(..))) + )); + + // get chunk 2 + // data size of chunk 2 is exactly 317 + // chunk op encoding for chunk 2 = 321 + // hence limit of 317 will be insufficient + let chunk_result = chunk_producer.multi_chunk_with_limit_and_index(2, Some(317)); + assert!(matches!( + chunk_result, + Err(Error::ChunkingError(ChunkError::LimitTooSmall(..))) + )); - let mut producer = merk.chunks().unwrap(); - let _chunk1 = producer.next_chunk(); - let _chunk2 = producer.next_chunk(); + // get chunk 2 and 3 + // chunk 2 chunk op = 331 + // chunk 3 chunk op = 321 + // padding = 5 + let chunk_result = chunk_producer + .multi_chunk_with_limit_and_index(2, Some(321 + 321 + 5)) + .expect("should generate chunk"); + assert_eq!( + chunk_result.next_index, + Some(traversal_instruction_as_string( + &generate_traversal_instruction(4, 4).unwrap() + )) + ); + assert_eq!(chunk_result.remaining_limit, Some(5)); + assert_eq!(chunk_result.chunk.len(), 4); + assert_eq!(chunk_result.chunk[0], ChunkOp::ChunkId(vec![LEFT, LEFT])); + assert_eq!(chunk_result.chunk[2], ChunkOp::ChunkId(vec![LEFT, RIGHT])); } } diff --git a/merk/src/merk/mod.rs b/merk/src/merk/mod.rs index 93c052a4..94b99add 100644 --- a/merk/src/merk/mod.rs +++ b/merk/src/merk/mod.rs @@ -29,7 +29,6 @@ //! Merk pub mod chunks; - pub(crate) mod defaults; pub mod options; @@ -45,7 +44,7 @@ pub mod source; use std::{ cell::Cell, - collections::{BTreeSet, LinkedList}, + collections::{BTreeMap, BTreeSet, LinkedList}, fmt, }; @@ -61,11 +60,19 @@ use source::MerkSource; use crate::{ error::Error, merk::{defaults::ROOT_KEY_KEY, options::MerkOptions}, - proofs::{query::query_item::QueryItem, Query}, + proofs::{ + chunk::{ + chunk::{LEFT, RIGHT}, + util::traversal_instruction_as_string, + }, + query::query_item::QueryItem, + Query, + }, tree::{ kv::ValueDefinedCostType, AuxMerkBatch, CryptoHash, Op, RefWalker, TreeNode, NULL_HASH, }, Error::{CostsError, EdError, StorageError}, + Link, MerkType::{BaseMerk, LayeredMerk, StandaloneMerk}, }; @@ -276,6 +283,11 @@ where }) } + /// Returns the height of the Merk tree + pub fn height(&self) -> Option { + self.use_tree(|tree| tree.map(|tree| tree.height())) + } + /// Returns the root non-prefixed key of the tree. If the tree is empty, /// None. pub fn root_key(&self) -> Option> { @@ -536,6 +548,142 @@ where Ok(()).wrap_with_cost(Default::default()) } } + + /// Verifies the correctness of a merk tree + /// hash values are computed correctly, heights are accurate and links + /// consistent with backing store. + // TODO: define the return types + pub fn verify( + &self, + skip_sum_checks: bool, + ) -> (BTreeMap, BTreeMap>) { + let tree = self.tree.take(); + + let mut bad_link_map: BTreeMap = BTreeMap::new(); + let mut parent_keys: BTreeMap> = BTreeMap::new(); + let mut root_traversal_instruction = vec![]; + + // TODO: remove clone + self.verify_tree( + // TODO: handle unwrap + &tree.clone().unwrap(), + &mut root_traversal_instruction, + &mut bad_link_map, + &mut parent_keys, + skip_sum_checks, + ); + self.tree.set(tree); + + (bad_link_map, parent_keys) + } + + fn verify_tree( + &self, + tree: &TreeNode, + traversal_instruction: &mut Vec, + bad_link_map: &mut BTreeMap, + parent_keys: &mut BTreeMap>, + skip_sum_checks: bool, + ) { + if let Some(link) = tree.link(LEFT) { + traversal_instruction.push(LEFT); + self.verify_link( + link, + tree.key(), + traversal_instruction, + bad_link_map, + parent_keys, + skip_sum_checks, + ); + traversal_instruction.pop(); + } + + if let Some(link) = tree.link(RIGHT) { + traversal_instruction.push(RIGHT); + self.verify_link( + link, + tree.key(), + traversal_instruction, + bad_link_map, + parent_keys, + skip_sum_checks, + ); + traversal_instruction.pop(); + } + } + + fn verify_link( + &self, + link: &Link, + parent_key: &[u8], + traversal_instruction: &mut Vec, + bad_link_map: &mut BTreeMap, + parent_keys: &mut BTreeMap>, + skip_sum_checks: bool, + ) { + let (hash, key, sum) = match link { + Link::Reference { hash, key, sum, .. } => { + (hash.to_owned(), key.to_owned(), sum.to_owned()) + } + Link::Modified { tree, .. } => ( + tree.hash().unwrap(), + tree.key().to_vec(), + tree.sum().unwrap(), + ), + Link::Loaded { + hash, + child_heights: _, + sum, + tree, + } => (hash.to_owned(), tree.key().to_vec(), sum.to_owned()), + _ => todo!(), + }; + + let instruction_id = traversal_instruction_as_string(traversal_instruction); + let node = TreeNode::get( + &self.storage, + key, + None::<&fn(&[u8]) -> Option>, + ) + .unwrap(); + + if node.is_err() { + bad_link_map.insert(instruction_id.clone(), hash); + parent_keys.insert(instruction_id, parent_key.to_vec()); + return; + } + + let node = node.unwrap(); + if node.is_none() { + bad_link_map.insert(instruction_id.clone(), hash); + parent_keys.insert(instruction_id, parent_key.to_vec()); + return; + } + + let node = node.unwrap(); + if node.hash().unwrap() != hash { + bad_link_map.insert(instruction_id.clone(), hash); + parent_keys.insert(instruction_id, parent_key.to_vec()); + return; + } + + // Need to skip this when restoring a sum tree + if !skip_sum_checks && node.sum().unwrap() != sum { + bad_link_map.insert(instruction_id.clone(), hash); + parent_keys.insert(instruction_id, parent_key.to_vec()); + return; + } + + // TODO: check child heights + // all checks passed, recurse + self.verify_tree( + &node, + traversal_instruction, + bad_link_map, + parent_keys, + skip_sum_checks, + ); + } } fn fetch_node<'db>( @@ -557,6 +705,7 @@ fn fetch_node<'db>( #[cfg(test)] mod test { + use grovedb_path::SubtreePath; use grovedb_storage::{ rocksdb_storage::{PrefixedRocksDbStorageContext, RocksDbStorage}, @@ -598,6 +747,41 @@ mod test { ); } + #[test] + fn tree_height() { + let mut merk = TempMerk::new(); + let batch = make_batch_seq(0..1); + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(1)); + + // height 2 + let mut merk = TempMerk::new(); + let batch = make_batch_seq(0..2); + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(2)); + + // height 5 + // 2^5 - 1 = 31 (max number of elements in tree of height 5) + let mut merk = TempMerk::new(); + let batch = make_batch_seq(0..31); + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(5)); + + // should still be height 5 for 29 elements + let mut merk = TempMerk::new(); + let batch = make_batch_seq(0..29); + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(5)); + } + #[test] fn insert_uncached() { let batch_size = 20; diff --git a/merk/src/merk/restore.rs b/merk/src/merk/restore.rs index e6ac22e2..e2439f5c 100644 --- a/merk/src/merk/restore.rs +++ b/merk/src/merk/restore.rs @@ -29,263 +29,284 @@ //! Provides `Restorer`, which can create a replica of a Merk instance by //! receiving chunk proofs. -#[cfg(feature = "full")] -use std::{iter::Peekable, u8}; +use std::collections::BTreeMap; -#[cfg(feature = "full")] use grovedb_storage::{Batch, StorageContext}; -#[cfg(feature = "full")] -use super::Merk; -#[cfg(feature = "full")] -use crate::merk::source::MerkSource; -use crate::tree::kv::ValueDefinedCostType; -#[cfg(feature = "full")] use crate::{ - error::Error, + merk, + merk::MerkSource, proofs::{ - chunk::{verify_leaf, verify_trunk, MIN_TRUNK_HEIGHT}, - tree::{Child, Tree as ProofTree}, + chunk::{ + chunk::{LEFT, RIGHT}, + chunk_op::ChunkOp, + error::{ChunkError, ChunkError::InternalError}, + util::{string_as_traversal_instruction, traversal_instruction_as_string}, + }, + tree::{execute, Child, Tree as ProofTree}, Node, Op, }, - tree::{combine_hash, value_hash, Link, RefWalker, TreeNode}, - CryptoHash, - Error::{CostsError, EdError, StorageError}, - TreeFeatureType::BasicMerkNode, + tree::{combine_hash, kv::ValueDefinedCostType, RefWalker, TreeNode}, + CryptoHash, Error, + Error::{CostsError, StorageError}, + Link, Merk, }; -#[cfg(feature = "full")] -/// A `Restorer` handles decoding, verifying, and storing chunk proofs to -/// replicate an entire Merk tree. It expects the chunks to be processed in -/// order, retrying the last chunk if verification fails. +/// Restorer handles verification of chunks and replication of Merk trees. +/// Chunks can be processed randomly as long as their parent has been processed +/// already. pub struct Restorer { - leaf_hashes: Option>>, - parent_keys: Option>>>, - trunk_height: Option, merk: Merk, - expected_root_hash: CryptoHash, - combining_value: Option>, + chunk_id_to_root_hash: BTreeMap, + parent_key_value_hash: Option, + // this is used to keep track of parents whose links need to be rewritten + parent_keys: BTreeMap>, } -#[cfg(feature = "full")] impl<'db, S: StorageContext<'db>> Restorer { - /// Creates a new `Restorer`, which will initialize a new Merk at the given - /// file path. The first chunk (the "trunk") will be compared against - /// `expected_root_hash`, then each subsequent chunk will be compared - /// against the hashes stored in the trunk, so that the restore process will - /// never allow malicious peers to send more than a single invalid chunk. + /// Initializes a new chunk restorer with the expected root hash for the + /// first chunk pub fn new( merk: Merk, - combining_value: Option>, expected_root_hash: CryptoHash, + parent_key_value_hash: Option, ) -> Self { + let mut chunk_id_to_root_hash = BTreeMap::new(); + chunk_id_to_root_hash.insert(traversal_instruction_as_string(&[]), expected_root_hash); Self { - expected_root_hash, - combining_value, - trunk_height: None, merk, - leaf_hashes: None, - parent_keys: None, + chunk_id_to_root_hash, + parent_key_value_hash, + parent_keys: BTreeMap::new(), } } - /// Verifies a chunk and writes it to the working RocksDB instance. Expects - /// to be called for each chunk in order. Returns the number of remaining - /// chunks. - /// - /// Once there are no remaining chunks to be processed, `finalize` should - /// be called. - pub fn process_chunk(&mut self, ops: impl IntoIterator) -> Result { - match self.leaf_hashes { - None => self.process_trunk(ops), - Some(_) => self.process_leaf(ops), + // TODO: consider converting chunk id to a vec + /// Processes a chunk at some chunk id, returns the chunks id's of chunks + /// that can be requested + pub fn process_chunk( + &mut self, + chunk_id: String, + chunk: Vec, + ) -> Result, Error> { + let expected_root_hash = self + .chunk_id_to_root_hash + .get(&chunk_id) + .ok_or(Error::ChunkRestoringError(ChunkError::UnexpectedChunk))?; + + let mut parent_key_value_hash: Option = None; + if chunk_id.is_empty() { + parent_key_value_hash = self.parent_key_value_hash; } - } + let chunk_tree = Self::verify_chunk(chunk, expected_root_hash, &parent_key_value_hash)?; - /// Consumes the `Restorer` and returns the newly-created, fully-populated - /// Merk instance. This method will return an error if called before - /// processing all chunks (e.g. `restorer.remaining_chunks()` is not equal - /// to 0). - pub fn finalize(mut self) -> Result, Error> { - if self.remaining_chunks().unwrap_or(0) != 0 { - return Err(Error::ChunkRestoringError( - "Called finalize before all chunks were processed".to_string(), - )); - } + let mut root_traversal_instruction = string_as_traversal_instruction(&chunk_id)?; - if self.trunk_height.unwrap() >= MIN_TRUNK_HEIGHT { - self.rewrite_trunk_child_heights()?; + if root_traversal_instruction.is_empty() { + let _ = self.merk.set_base_root_key(Some(chunk_tree.key().to_vec())); + } else { + // every non root chunk has some associated parent with an placeholder link + // here we update the placeholder link to represent the true data + self.rewrite_parent_link(&chunk_id, &root_traversal_instruction, &chunk_tree)?; } - self.merk - .load_base_root(None:: Option>) - .unwrap()?; + // next up, we need to write the chunk and build the map again + let chunk_write_result = self.write_chunk(chunk_tree, &mut root_traversal_instruction); + if chunk_write_result.is_ok() { + // if we were able to successfully write the chunk, we can remove + // the chunk expected root hash from our chunk id map + self.chunk_id_to_root_hash.remove(&chunk_id); + } - Ok(self.merk) + chunk_write_result } - /// Returns the number of remaining chunks to be processed. If called before - /// the first chunk is processed, this method will return `None` since we do - /// not yet have enough information to know about the number of chunks. - pub fn remaining_chunks(&self) -> Option { - self.leaf_hashes.as_ref().map(|lh| lh.len()) + /// Process multi chunks (space optimized chunk proofs that can contain + /// multiple singular chunks) + pub fn process_multi_chunk(&mut self, multi_chunk: Vec) -> Result, Error> { + let mut expect_chunk_id = true; + let mut chunk_ids = vec![]; + let mut current_chunk_id: String = "".to_string(); + + for chunk_op in multi_chunk { + if (matches!(chunk_op, ChunkOp::ChunkId(..)) && !expect_chunk_id) + || (matches!(chunk_op, ChunkOp::Chunk(..)) && expect_chunk_id) + { + return Err(Error::ChunkRestoringError(ChunkError::InvalidMultiChunk( + "invalid multi chunk ordering", + ))); + } + match chunk_op { + ChunkOp::ChunkId(instructions) => { + current_chunk_id = traversal_instruction_as_string(&instructions); + } + ChunkOp::Chunk(chunk) => { + // TODO: remove clone + let next_chunk_ids = self.process_chunk(current_chunk_id.clone(), chunk)?; + chunk_ids.extend(next_chunk_ids); + } + } + expect_chunk_id = !expect_chunk_id; + } + Ok(chunk_ids) } - /// Writes the data contained in `tree` (extracted from a verified chunk - /// proof) to the RocksDB. - fn write_chunk(&mut self, tree: ProofTree) -> Result<(), Error> { - let mut batch = self.merk.storage.new_batch(); - - tree.visit_refs(&mut |proof_node| { - if let Some((mut node, key)) = match &proof_node.node { - Node::KV(key, value) => Some(( - TreeNode::new(key.clone(), value.clone(), None, BasicMerkNode).unwrap(), - key, - )), - Node::KVValueHash(key, value, value_hash) => Some(( - TreeNode::new_with_value_hash( - key.clone(), - value.clone(), - *value_hash, - BasicMerkNode, - ) - .unwrap(), - key, - )), - Node::KVValueHashFeatureType(key, value, value_hash, feature_type) => Some(( - TreeNode::new_with_value_hash( - key.clone(), - value.clone(), - *value_hash, - *feature_type, - ) - .unwrap(), - key, - )), - _ => None, - } { - // TODO: encode tree node without cloning key/value - *node.slot_mut(true) = proof_node.left.as_ref().map(Child::as_link); - *node.slot_mut(false) = proof_node.right.as_ref().map(Child::as_link); - - let bytes = node.encode(); - batch.put(key, &bytes, None, None).map_err(CostsError) - } else { + /// Verifies the structure of a chunk and ensures the chunk matches the + /// expected root hash + fn verify_chunk( + chunk: Vec, + expected_root_hash: &CryptoHash, + parent_key_value_hash_opt: &Option, + ) -> Result { + let chunk_len = chunk.len(); + let mut kv_count = 0; + let mut hash_count = 0; + + // build tree from ops + // ensure only made of KvValueFeatureType and Hash nodes and count them + let tree = execute(chunk.clone().into_iter().map(Ok), false, |node| { + if matches!(node, Node::KVValueHashFeatureType(..)) { + kv_count += 1; Ok(()) + } else if matches!(node, Node::Hash(..)) { + hash_count += 1; + Ok(()) + } else { + Err(Error::ChunkRestoringError(ChunkError::InvalidChunkProof( + "expected chunk proof to contain only kvvaluefeaturetype or hash nodes", + ))) + } + }) + .unwrap()?; + + // chunk len must be exactly equal to the kv_count + hash_count + + // parent_branch_count + child_branch_count + debug_assert_eq!(chunk_len, ((kv_count + hash_count) * 2) - 1); + + // chunk structure verified, next verify root hash + match parent_key_value_hash_opt { + Some(val_hash) => { + let combined_hash = combine_hash(val_hash, &tree.hash().unwrap()).unwrap(); + if &combined_hash != expected_root_hash { + return Err(Error::ChunkRestoringError(ChunkError::InvalidChunkProof( + "chunk doesn't match expected root hash", + ))); + } } - })?; + None => { + if &tree.hash().unwrap() != expected_root_hash { + return Err(Error::ChunkRestoringError(ChunkError::InvalidChunkProof( + "chunk doesn't match expected root hash", + ))); + } + } + }; + Ok(tree) + } + + /// Write the verified chunk to storage + fn write_chunk( + &mut self, + chunk_tree: ProofTree, + traversal_instruction: &mut Vec, + ) -> Result, Error> { + // this contains all the elements we want to write to storage + let mut batch = self.merk.storage.new_batch(); + let mut new_chunk_ids = Vec::new(); + + chunk_tree.visit_refs_track_traversal_and_parent( + traversal_instruction, + None, + &mut |proof_node, node_traversal_instruction, parent_key| { + match &proof_node.node { + Node::KVValueHashFeatureType(key, value, value_hash, feature_type) => { + // build tree from node value + let mut tree = TreeNode::new_with_value_hash( + key.clone(), + value.clone(), + *value_hash, + *feature_type, + ) + .unwrap(); + + // update tree links + *tree.slot_mut(LEFT) = proof_node.left.as_ref().map(Child::as_link); + *tree.slot_mut(RIGHT) = proof_node.right.as_ref().map(Child::as_link); + + // encode the node and add it to the batch + let bytes = tree.encode(); + + batch.put(key, &bytes, None, None).map_err(CostsError) + } + Node::Hash(hash) => { + // the node hash points to the root of another chunk + // we get the chunk id and add the hash to restorer state + let chunk_id = traversal_instruction_as_string(node_traversal_instruction); + new_chunk_ids.push(chunk_id.clone()); + self.chunk_id_to_root_hash.insert(chunk_id.clone(), *hash); + // TODO: handle unwrap + self.parent_keys + .insert(chunk_id, parent_key.unwrap().to_owned()); + Ok(()) + } + _ => { + // we do nothing for other node types + // technically verify chunk will be called before this + // as such this should be be reached + Ok(()) + } + } + }, + )?; + + // write the batch self.merk .storage .commit_batch(batch) .unwrap() - .map_err(StorageError) - } - - /// Verifies the trunk then writes its data to the RocksDB. - fn process_trunk(&mut self, ops: impl IntoIterator) -> Result { - let (trunk, height) = verify_trunk(ops.into_iter().map(Ok)).unwrap()?; - - let root_hash = if self.combining_value.is_none() { - trunk.hash().unwrap() - } else { - combine_hash( - value_hash(self.combining_value.as_ref().expect("confirmed exists")).value(), - &trunk.hash().unwrap(), - ) - .value - }; - - if root_hash != self.expected_root_hash { - return Err(Error::ChunkRestoringError(format!( - "Proof did not match expected hash\n\tExpected: {:?}\n\tActual: {:?}", - self.expected_root_hash, - trunk.hash() - ))); - } - - let root_key = trunk.key().to_vec(); - - let trunk_height = height / 2; - self.trunk_height = Some(trunk_height); - - let chunks_remaining = if trunk_height >= MIN_TRUNK_HEIGHT { - let leaf_hashes = trunk - .layer(trunk_height) - .map(|node| node.hash().unwrap()) - .collect::>() - .into_iter() - .peekable(); - self.leaf_hashes = Some(leaf_hashes); - - let parent_keys = trunk - .layer(trunk_height - 1) - .map(|node| node.key().to_vec()) - .collect::>>() - .into_iter() - .peekable(); - self.parent_keys = Some(parent_keys); - assert_eq!( - self.parent_keys.as_ref().unwrap().len(), - self.leaf_hashes.as_ref().unwrap().len() / 2 - ); - - let chunks_remaining = (2_usize).pow(trunk_height as u32); - assert_eq!(self.remaining_chunks_unchecked(), chunks_remaining); - chunks_remaining - } else { - self.leaf_hashes = Some(vec![].into_iter().peekable()); - self.parent_keys = Some(vec![].into_iter().peekable()); - 0 - }; - - // note that these writes don't happen atomically, which is fine here - // because if anything fails during the restore process we will just - // scrap the whole restore and start over - self.write_chunk(trunk)?; - self.merk.set_base_root_key(Some(root_key)).unwrap()?; - - Ok(chunks_remaining) - } - - /// Verifies a leaf chunk then writes it to the RocksDB. This needs to be - /// called in order, retrying the last chunk for any failed verifications. - fn process_leaf(&mut self, ops: impl IntoIterator) -> Result { - let leaf_hashes = self.leaf_hashes.as_mut().unwrap(); - let leaf_hash = leaf_hashes - .peek() - .expect("Received more chunks than expected"); - - let leaf = verify_leaf(ops.into_iter().map(Ok), *leaf_hash).unwrap()?; - self.rewrite_parent_link(&leaf)?; - self.write_chunk(leaf)?; - - let leaf_hashes = self.leaf_hashes.as_mut().unwrap(); - leaf_hashes.next(); + .map_err(StorageError)?; - Ok(self.remaining_chunks_unchecked()) + Ok(new_chunk_ids) } - /// The parent of the root node of the leaf does not know the key of its - /// children when it is first written. Now that we have verified this leaf, - /// we can write the key into the parent node's entry. Note that this does - /// not need to recalcuate hashes since it already had the child hash. - fn rewrite_parent_link(&mut self, leaf: &ProofTree) -> Result<(), Error> { - let parent_keys = self.parent_keys.as_mut().unwrap(); - let parent_key = parent_keys.peek().unwrap().clone(); - let mut parent = crate::merk::fetch_node( + /// When we process truncated chunks, the parents of Node::Hash have invalid + /// placeholder for links. + /// When we get the actual chunk associated with the Node::Hash, + /// we need to update the parent link to reflect the correct data. + fn rewrite_parent_link( + &mut self, + chunk_id: &str, + traversal_instruction: &[bool], + chunk_tree: &ProofTree, + ) -> Result<(), Error> { + let parent_key = self + .parent_keys + .get(chunk_id) + .ok_or(Error::ChunkRestoringError(InternalError( + "after successful chunk verification parent key should exist", + )))?; + + let mut parent = merk::fetch_node( &self.merk.storage, parent_key.as_slice(), - None:: Option>, + None::<&fn(&[u8]) -> Option>, )? - .expect("Could not find parent of leaf chunk"); + .ok_or(Error::ChunkRestoringError(InternalError( + "cannot find expected parent in memory, most likely state corruption issue", + )))?; - let is_left_child = self.remaining_chunks_unchecked() % 2 == 0; - if let Some(Link::Reference { ref mut key, .. }) = parent.link_mut(is_left_child) { - *key = leaf.key().to_vec(); - } else { - panic!("Expected parent links to be type Link::Reference"); - }; + let is_left = traversal_instruction + .last() + .expect("rewrite is only called when traversal_instruction is not empty"); + + let updated_key = chunk_tree.key(); + let updated_sum = chunk_tree.sum(); + + if let Some(Link::Reference { key, sum, .. }) = parent.link_mut(*is_left) { + *key = updated_key.to_vec(); + *sum = updated_sum; + } let parent_bytes = parent.encode(); self.merk @@ -294,67 +315,66 @@ impl<'db, S: StorageContext<'db>> Restorer { .unwrap() .map_err(StorageError)?; - if !is_left_child { - let parent_keys = self.parent_keys.as_mut().unwrap(); - parent_keys.next(); - } + self.parent_keys + .remove(chunk_id) + .expect("confirmed parent key exists above"); Ok(()) } - fn rewrite_trunk_child_heights(&mut self) -> Result<(), Error> { - fn recurse<'s, 'db, S: StorageContext<'db>>( - mut node: RefWalker>, - remaining_depth: usize, + /// Each nodes height is not added to state as such the producer could lie + /// about the height values after replication we need to verify the + /// heights and if invalid recompute the correct values + fn rewrite_heights(&mut self) -> Result<(), Error> { + fn rewrite_child_heights<'s, 'db, S: StorageContext<'db>>( + mut walker: RefWalker>, batch: &mut >::Batch, ) -> Result<(u8, u8), Error> { - if remaining_depth == 0 { - return Ok(node.tree().child_heights()); - } - + // TODO: remove unwrap let mut cloned_node = TreeNode::decode( - node.tree().key().to_vec(), - node.tree().encode().as_slice(), - None:: Option>, + walker.tree().key().to_vec(), + walker.tree().encode().as_slice(), + None::<&fn(&[u8]) -> Option>, ) - .map_err(EdError)?; + .unwrap(); + + let mut left_height = 0; + let mut right_height = 0; - let left_child = node - .walk(true, None::<&fn(&[u8]) -> Option>) + if let Some(left_walker) = walker + .walk(LEFT, None::<&fn(&[u8]) -> Option>) .unwrap()? - .unwrap(); - let left_child_heights = recurse(left_child, remaining_depth - 1, batch)?; - let left_height = left_child_heights.0.max(left_child_heights.1) + 1; - *cloned_node.link_mut(true).unwrap().child_heights_mut() = left_child_heights; + { + let left_child_heights = rewrite_child_heights(left_walker, batch)?; + left_height = left_child_heights.0.max(left_child_heights.1) + 1; + *cloned_node.link_mut(LEFT).unwrap().child_heights_mut() = left_child_heights; + } - let right_child = node - .walk(false, None::<&fn(&[u8]) -> Option>) + if let Some(right_walker) = walker + .walk(RIGHT, None::<&fn(&[u8]) -> Option>) .unwrap()? - .unwrap(); - let right_child_heights = recurse(right_child, remaining_depth - 1, batch)?; - let right_height = right_child_heights.0.max(right_child_heights.1) + 1; - *cloned_node.link_mut(false).unwrap().child_heights_mut() = right_child_heights; + { + let right_child_heights = rewrite_child_heights(right_walker, batch)?; + right_height = right_child_heights.0.max(right_child_heights.1) + 1; + *cloned_node.link_mut(RIGHT).unwrap().child_heights_mut() = right_child_heights; + } let bytes = cloned_node.encode(); batch - .put(node.tree().key(), &bytes, None, None) + .put(walker.tree().key(), &bytes, None, None) .map_err(CostsError)?; Ok((left_height, right_height)) } - self.merk - .load_base_root(None:: Option>) - .unwrap()?; - let mut batch = self.merk.storage.new_batch(); + // TODO: deal with unwrap + let mut tree = self.merk.tree.take().unwrap(); + let walker = RefWalker::new(&mut tree, self.merk.source()); + + rewrite_child_heights(walker, &mut batch)?; - let depth = self.trunk_height.unwrap(); - self.merk.use_tree_mut(|maybe_tree| { - let tree = maybe_tree.unwrap(); - let walker = RefWalker::new(tree, self.merk.source()); - recurse(walker, depth, &mut batch) - })?; + self.merk.tree.set(Some(tree)); self.merk .storage @@ -363,72 +383,262 @@ impl<'db, S: StorageContext<'db>> Restorer { .map_err(StorageError) } - /// Returns the number of remaining chunks to be processed. This method will - /// panic if called before processing the first chunk (since that chunk - /// gives us the information to know how many chunks to expect). - pub fn remaining_chunks_unchecked(&self) -> usize { - self.leaf_hashes.as_ref().unwrap().len() - } -} + /// Rebuild restoration state from partial storage state + fn attempt_state_recovery(&mut self) -> Result<(), Error> { + // TODO: think about the return type some more + let (bad_link_map, parent_keys) = self.merk.verify(false); + if !bad_link_map.is_empty() { + self.chunk_id_to_root_hash = bad_link_map; + self.parent_keys = parent_keys; + } -#[cfg(feature = "full")] -impl<'db, S: StorageContext<'db>> Merk { - /// Creates a new `Restorer`, which can be used to verify chunk proofs to - /// replicate an entire Merk tree. A new Merk instance will be initialized - /// by creating a RocksDB at `path`. - pub fn restore(merk: Merk, expected_root_hash: CryptoHash) -> Restorer { - Restorer::new(merk, None, expected_root_hash) + Ok(()) } -} -#[cfg(feature = "full")] -impl ProofTree { - fn child_heights(&self) -> (u8, u8) { - ( - self.left.as_ref().map_or(0, |c| c.tree.height as u8), - self.right.as_ref().map_or(0, |c| c.tree.height as u8), - ) + /// Consumes the `Restorer` and returns a newly created, fully populated + /// Merk instance. This method will return an error if called before + /// processing all chunks. + pub fn finalize(mut self) -> Result, Error> { + // ensure all chunks have been processed + if !self.chunk_id_to_root_hash.is_empty() || !self.parent_keys.is_empty() { + return Err(Error::ChunkRestoringError( + ChunkError::RestorationNotComplete, + )); + } + + // get the latest version of the root node + let _ = self + .merk + .load_base_root(None::<&fn(&[u8]) -> Option>); + + // if height values are wrong, rewrite height + if self.verify_height().is_err() { + let _ = self.rewrite_heights(); + // update the root node after height rewrite + let _ = self + .merk + .load_base_root(None::<&fn(&[u8]) -> Option>); + } + + if !self.merk.verify(self.merk.is_sum_tree).0.is_empty() { + return Err(Error::ChunkRestoringError(ChunkError::InternalError( + "restored tree invalid", + ))); + } + + Ok(self.merk) } -} -#[cfg(feature = "full")] -impl Child { - fn as_link(&self) -> Link { - let key = match &self.tree.node { - Node::KV(key, _) - | Node::KVValueHash(key, ..) - | Node::KVValueHashFeatureType(key, ..) => key.as_slice(), - // for the connection between the trunk and leaf chunks, we don't - // have the child key so we must first write in an empty one. once - // the leaf gets verified, we can write in this key to its parent - _ => &[], + /// Verify that the child heights of the merk tree links correctly represent + /// the tree + fn verify_height(&self) -> Result<(), Error> { + let tree = self.merk.tree.take(); + let height_verification_result = if let Some(tree) = &tree { + self.verify_tree_height(tree, tree.height()) + } else { + Ok(()) }; + self.merk.tree.set(tree); + height_verification_result + } - Link::Reference { - hash: self.hash, - sum: None, - child_heights: self.tree.child_heights(), - key: key.to_vec(), + fn verify_tree_height(&self, tree: &TreeNode, parent_height: u8) -> Result<(), Error> { + let (left_height, right_height) = tree.child_heights(); + + if (left_height.abs_diff(right_height)) > 1 { + return Err(Error::CorruptedState( + "invalid child heights, difference greater than 1 for AVL tree", + )); + } + + let max_child_height = left_height.max(right_height); + if parent_height <= max_child_height || parent_height - max_child_height != 1 { + return Err(Error::CorruptedState( + "invalid child heights, parent height is not 1 less than max child height", + )); } + + let left_link = tree.link(LEFT); + let right_link = tree.link(RIGHT); + + if (left_height == 0 && left_link.is_some()) || (right_height == 0 && right_link.is_some()) + { + return Err(Error::CorruptedState( + "invalid child heights node has child height 0, but hash child", + )); + } + + if let Some(link) = left_link { + let left_tree = link.tree(); + if left_tree.is_none() { + let left_tree = TreeNode::get( + &self.merk.storage, + link.key(), + None::<&fn(&[u8]) -> Option>, + ) + .unwrap()? + .ok_or(Error::CorruptedState("link points to non-existent node"))?; + self.verify_tree_height(&left_tree, left_height)?; + } else { + self.verify_tree_height(left_tree.unwrap(), left_height)?; + } + } + + if let Some(link) = right_link { + let right_tree = link.tree(); + if right_tree.is_none() { + let right_tree = TreeNode::get( + &self.merk.storage, + link.key(), + None::<&fn(&[u8]) -> Option>, + ) + .unwrap()? + .ok_or(Error::CorruptedState("link points to non-existent node"))?; + self.verify_tree_height(&right_tree, right_height)?; + } else { + self.verify_tree_height(right_tree.unwrap(), right_height)?; + } + } + + Ok(()) } } -#[cfg(feature = "full")] #[cfg(test)] mod tests { use grovedb_path::SubtreePath; use grovedb_storage::{ - rocksdb_storage::{test_utils::TempStorage, PrefixedRocksDbImmediateStorageContext}, + rocksdb_storage::{ + test_utils::TempStorage, PrefixedRocksDbImmediateStorageContext, + PrefixedRocksDbStorageContext, + }, RawIterator, Storage, }; use super::*; - use crate::{test_utils::*, tree::Op, MerkBatch}; + use crate::{ + merk::chunks::ChunkProducer, + proofs::chunk::{ + chunk::tests::traverse_get_node_hash, error::ChunkError::InvalidChunkProof, + }, + test_utils::{make_batch_seq, TempMerk}, + Error::ChunkRestoringError, + Merk, PanicSource, + }; + + #[test] + fn test_chunk_verification_non_avl_tree() { + let non_avl_tree_proof = vec![ + Op::Push(Node::KV(vec![1], vec![1])), + Op::Push(Node::KV(vec![2], vec![2])), + Op::Parent, + Op::Push(Node::KV(vec![3], vec![3])), + Op::Parent, + ]; + assert!(Restorer::::verify_chunk( + non_avl_tree_proof, + &[0; 32], + &None + ) + .is_err()); + } + + #[test] + fn test_chunk_verification_only_kv_feature_and_hash() { + // should not accept kv + let invalid_chunk_proof = vec![Op::Push(Node::KV(vec![1], vec![1]))]; + let verification_result = Restorer::::verify_chunk( + invalid_chunk_proof, + &[0; 32], + &None, + ); + assert!(matches!( + verification_result, + Err(ChunkRestoringError(InvalidChunkProof( + "expected chunk proof to contain only kvvaluefeaturetype or hash nodes", + ))) + )); + + // should not accept kvhash + let invalid_chunk_proof = vec![Op::Push(Node::KVHash([0; 32]))]; + let verification_result = Restorer::::verify_chunk( + invalid_chunk_proof, + &[0; 32], + &None, + ); + assert!(matches!( + verification_result, + Err(ChunkRestoringError(InvalidChunkProof( + "expected chunk proof to contain only kvvaluefeaturetype or hash nodes", + ))) + )); + + // should not accept kvdigest + let invalid_chunk_proof = vec![Op::Push(Node::KVDigest(vec![0], [0; 32]))]; + let verification_result = Restorer::::verify_chunk( + invalid_chunk_proof, + &[0; 32], + &None, + ); + assert!(matches!( + verification_result, + Err(ChunkRestoringError(InvalidChunkProof( + "expected chunk proof to contain only kvvaluefeaturetype or hash nodes", + ))) + )); + + // should not accept kvvaluehash + let invalid_chunk_proof = vec![Op::Push(Node::KVValueHash(vec![0], vec![0], [0; 32]))]; + let verification_result = Restorer::::verify_chunk( + invalid_chunk_proof, + &[0; 32], + &None, + ); + assert!(matches!( + verification_result, + Err(ChunkRestoringError(InvalidChunkProof( + "expected chunk proof to contain only kvvaluefeaturetype or hash nodes", + ))) + )); + + // should not accept kvrefvaluehash + let invalid_chunk_proof = vec![Op::Push(Node::KVRefValueHash(vec![0], vec![0], [0; 32]))]; + let verification_result = Restorer::::verify_chunk( + invalid_chunk_proof, + &[0; 32], + &None, + ); + assert!(matches!( + verification_result, + Err(ChunkRestoringError(InvalidChunkProof( + "expected chunk proof to contain only kvvaluefeaturetype or hash nodes", + ))) + )); + } + + fn get_node_hash(node: Node) -> Result { + match node { + Node::Hash(hash) => Ok(hash), + _ => Err("expected node hash".to_string()), + } + } + + #[test] + fn test_process_chunk_correct_chunk_id_map() { + let mut merk = TempMerk::new(); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + let mut merk_tree = merk.tree.take().expect("should have inner tree"); + merk.tree.set(Some(merk_tree.clone())); + let mut tree_walker = RefWalker::new(&mut merk_tree, PanicSource {}); - fn restore_test(batches: &[&MerkBatch>], expected_nodes: usize) { let storage = TempStorage::new(); let tx = storage.start_transaction(); - let mut original = Merk::open_base( + let restoration_merk = Merk::open_base( storage .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), @@ -437,83 +647,136 @@ mod tests { ) .unwrap() .unwrap(); - for batch in batches { - original - .apply::, Vec<_>>(batch, &[], None) - .unwrap() - .unwrap(); - } - let chunks = original.chunks().unwrap(); + // restorer root hash should be empty + assert_eq!(restoration_merk.root_hash().unwrap(), [0; 32]); - let storage = TempStorage::default(); - let _tx2 = storage.start_transaction(); - let ctx = storage - .get_immediate_storage_context(SubtreePath::empty(), &tx) - .unwrap(); - let merk = Merk::open_base( - ctx, - false, - None::<&fn(&[u8]) -> Option>, - ) - .unwrap() - .unwrap(); - let mut restorer = Merk::restore(merk, original.root_hash().unwrap()); - - assert_eq!(restorer.remaining_chunks(), None); + // at the start both merks should have different root hash values + assert_ne!( + merk.root_hash().unwrap(), + restoration_merk.root_hash().unwrap() + ); - let mut expected_remaining = chunks.len(); - for chunk in chunks { - let remaining = restorer.process_chunk(chunk.unwrap()).unwrap(); + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + let mut restorer = Restorer::new(restoration_merk, merk.root_hash().unwrap(), None); - expected_remaining -= 1; - assert_eq!(remaining, expected_remaining); - assert_eq!(restorer.remaining_chunks().unwrap(), expected_remaining); - } - assert_eq!(expected_remaining, 0); - - let restored = restorer.finalize().unwrap(); - assert_eq!(restored.root_hash(), original.root_hash()); - assert_raw_db_entries_eq(&restored, &original, expected_nodes); - } + // initial restorer state should contain just the root hash of the source merk + assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); + assert_eq!( + restorer.chunk_id_to_root_hash.get(""), + Some(merk.root_hash().unwrap()).as_ref() + ); - #[test] - fn restore_10000() { - restore_test(&[&make_batch_seq(0..10_000)], 10_000); - } + // generate first chunk + let (chunk, _) = chunk_producer.chunk_with_index(1).unwrap(); + // apply first chunk + let new_chunk_ids = restorer + .process_chunk(traversal_instruction_as_string(&vec![]), chunk) + .expect("should process chunk successfully"); + assert_eq!(new_chunk_ids.len(), 4); + + // after first chunk application + // the chunk_map should contain 4 items + assert_eq!(restorer.chunk_id_to_root_hash.len(), 4); + // assert all the chunk hash values + assert_eq!( + restorer.chunk_id_to_root_hash.get("11"), + Some(get_node_hash(traverse_get_node_hash(&mut tree_walker, &[LEFT, LEFT])).unwrap()) + .as_ref() + ); + assert_eq!( + restorer.chunk_id_to_root_hash.get("10"), + Some(get_node_hash(traverse_get_node_hash(&mut tree_walker, &[LEFT, RIGHT])).unwrap()) + .as_ref() + ); + assert_eq!( + restorer.chunk_id_to_root_hash.get("01"), + Some(get_node_hash(traverse_get_node_hash(&mut tree_walker, &[RIGHT, LEFT])).unwrap()) + .as_ref() + ); + assert_eq!( + restorer.chunk_id_to_root_hash.get("00"), + Some(get_node_hash(traverse_get_node_hash(&mut tree_walker, &[RIGHT, RIGHT])).unwrap()) + .as_ref() + ); - #[test] - fn restore_3() { - restore_test(&[&make_batch_seq(0..3)], 3); - } + // generate second chunk + let (chunk, _) = chunk_producer.chunk_with_index(2).unwrap(); + // apply second chunk + let new_chunk_ids = restorer + .process_chunk(traversal_instruction_as_string(&vec![LEFT, LEFT]), chunk) + .unwrap(); + assert_eq!(new_chunk_ids.len(), 0); + // chunk_map should have 1 less element + assert_eq!(restorer.chunk_id_to_root_hash.len(), 3); + assert_eq!(restorer.chunk_id_to_root_hash.get("11"), None); + + // let's try to apply the second chunk again, should not work + let (chunk, _) = chunk_producer.chunk_with_index(2).unwrap(); + // apply second chunk + let chunk_process_result = + restorer.process_chunk(traversal_instruction_as_string(&vec![LEFT, LEFT]), chunk); + assert!(chunk_process_result.is_err()); + assert!(matches!( + chunk_process_result, + Err(Error::ChunkRestoringError(ChunkError::UnexpectedChunk)) + )); + + // next let's get a random but expected chunk and work with that e.g. chunk 4 + // but let's apply it to the wrong place + let (chunk, _) = chunk_producer.chunk_with_index(4).unwrap(); + let chunk_process_result = + restorer.process_chunk(traversal_instruction_as_string(&vec![LEFT, RIGHT]), chunk); + assert!(chunk_process_result.is_err()); + assert!(matches!( + chunk_process_result, + Err(Error::ChunkRestoringError(ChunkError::InvalidChunkProof( + .. + ))) + )); + + // correctly apply chunk 5 + let (chunk, _) = chunk_producer.chunk_with_index(5).unwrap(); + // apply second chunk + let new_chunk_ids = restorer + .process_chunk(traversal_instruction_as_string(&vec![RIGHT, RIGHT]), chunk) + .unwrap(); + assert_eq!(new_chunk_ids.len(), 0); + // chunk_map should have 1 less element + assert_eq!(restorer.chunk_id_to_root_hash.len(), 2); + assert_eq!(restorer.chunk_id_to_root_hash.get("00"), None); + + // correctly apply chunk 3 + let (chunk, _) = chunk_producer.chunk_with_index(3).unwrap(); + // apply second chunk + let new_chunk_ids = restorer + .process_chunk(traversal_instruction_as_string(&vec![LEFT, RIGHT]), chunk) + .unwrap(); + assert_eq!(new_chunk_ids.len(), 0); + // chunk_map should have 1 less element + assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); + assert_eq!(restorer.chunk_id_to_root_hash.get("10"), None); + + // correctly apply chunk 4 + let (chunk, _) = chunk_producer.chunk_with_index(4).unwrap(); + // apply second chunk + let new_chunk_ids = restorer + .process_chunk(traversal_instruction_as_string(&vec![RIGHT, LEFT]), chunk) + .unwrap(); + assert_eq!(new_chunk_ids.len(), 0); + // chunk_map should have 1 less element + assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); + assert_eq!(restorer.chunk_id_to_root_hash.get("01"), None); - #[test] - fn restore_2_left_heavy() { - restore_test( - &[ - &[(vec![0], Op::Put(vec![], BasicMerkNode))], - &[(vec![1], Op::Put(vec![], BasicMerkNode))], - ], - 2, - ); - } + // finalize merk + let restored_merk = restorer.finalize().expect("should finalized successfully"); - #[test] - fn restore_2_right_heavy() { - restore_test( - &[ - &[(vec![1], Op::Put(vec![], BasicMerkNode))], - &[(vec![0], Op::Put(vec![], BasicMerkNode))], - ], - 2, + assert_eq!( + restored_merk.root_hash().unwrap(), + merk.root_hash().unwrap() ); } - #[test] - fn restore_1() { - restore_test(&[&make_batch_seq(0..1)], 1); - } - fn assert_raw_db_entries_eq( restored: &Merk, original: &Merk, @@ -528,7 +791,10 @@ mod tests { let mut i = 0; loop { - assert_eq!(restored_entries.valid(), original_entries.valid()); + assert_eq!( + restored_entries.valid().unwrap(), + original_entries.valid().unwrap() + ); if !restored_entries.valid().unwrap() { break; } @@ -544,4 +810,474 @@ mod tests { assert_eq!(i, length); } + + // Builds a source merk with batch_size number of elements + // attempts restoration on some empty merk + // verifies that restoration was performed correctly. + fn test_restoration_single_chunk_strategy(batch_size: u64) { + // build the source merk + let storage = TempStorage::new(); + let tx = storage.start_transaction(); + let mut source_merk = Merk::open_base( + storage + .get_immediate_storage_context(SubtreePath::empty(), &tx) + .unwrap(), + false, + None::<&fn(&[u8]) -> Option>, + ) + .unwrap() + .unwrap(); + let batch = make_batch_seq(0..batch_size); + source_merk + .apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + + // build the restoration merk + let storage = TempStorage::new(); + let tx = storage.start_transaction(); + let restoration_merk = Merk::open_base( + storage + .get_immediate_storage_context(SubtreePath::empty(), &tx) + .unwrap(), + false, + None::<&fn(&[u8]) -> Option>, + ) + .unwrap() + .unwrap(); + + // at the start + // restoration merk should have empty root hash + // and source merk should have a different root hash + assert_eq!(restoration_merk.root_hash().unwrap(), [0; 32]); + assert_ne!( + source_merk.root_hash().unwrap(), + restoration_merk.root_hash().unwrap() + ); + + // instantiate chunk producer and restorer + let mut chunk_producer = + ChunkProducer::new(&source_merk).expect("should create chunk producer"); + let mut restorer = Restorer::new(restoration_merk, source_merk.root_hash().unwrap(), None); + + // perform chunk production and processing + let mut chunk_id_opt = Some("".to_string()); + while let Some(chunk_id) = chunk_id_opt { + let (chunk, next_chunk_id) = chunk_producer + .chunk(chunk_id.as_str()) + .expect("should get chunk"); + restorer + .process_chunk(chunk_id.to_string(), chunk) + .expect("should process chunk successfully"); + chunk_id_opt = next_chunk_id; + } + + // after chunk processing we should be able to finalize + assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); + assert_eq!(restorer.parent_keys.len(), 0); + let restored_merk = restorer.finalize().expect("should finalize"); + + // compare root hash values + assert_eq!( + source_merk.root_hash().unwrap(), + restored_merk.root_hash().unwrap() + ); + + assert_raw_db_entries_eq(&restored_merk, &source_merk, batch_size as usize); + } + + #[test] + fn restore_single_chunk_20() { + test_restoration_single_chunk_strategy(20); + } + + #[test] + fn restore_single_chunk_1000() { + test_restoration_single_chunk_strategy(1000); + } + + #[test] + fn test_process_multi_chunk_no_limit() { + let mut merk = TempMerk::new(); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + let storage = TempStorage::new(); + let tx = storage.start_transaction(); + let restoration_merk = Merk::open_base( + storage + .get_immediate_storage_context(SubtreePath::empty(), &tx) + .unwrap(), + false, + None::<&fn(&[u8]) -> Option>, + ) + .unwrap() + .unwrap(); + + // restorer root hash should be empty + assert_eq!(restoration_merk.root_hash().unwrap(), [0; 32]); + + // at the start both merks should have different root hash values + assert_ne!( + merk.root_hash().unwrap(), + restoration_merk.root_hash().unwrap() + ); + + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + let mut restorer = Restorer::new(restoration_merk, merk.root_hash().unwrap(), None); + + assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); + assert_eq!( + restorer.chunk_id_to_root_hash.get(""), + Some(merk.root_hash().unwrap()).as_ref() + ); + + // generate multi chunk from root with no limit + let chunk = chunk_producer + .multi_chunk_with_limit("", None) + .expect("should generate multichunk"); + + assert_eq!(chunk.chunk.len(), 2); + assert_eq!(chunk.next_index, None); + assert_eq!(chunk.remaining_limit, None); + + let next_ids = restorer + .process_multi_chunk(chunk.chunk) + .expect("should process chunk"); + // should have replicated all chunks + assert_eq!(next_ids.len(), 0); + assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); + assert_eq!(restorer.parent_keys.len(), 0); + + let restored_merk = restorer.finalize().expect("should be able to finalize"); + + // compare root hash values + assert_eq!( + restored_merk.root_hash().unwrap(), + merk.root_hash().unwrap() + ); + } + + #[test] + fn test_process_multi_chunk_no_limit_but_non_root() { + let mut merk = TempMerk::new(); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + let storage = TempStorage::new(); + let tx = storage.start_transaction(); + let restoration_merk = Merk::open_base( + storage + .get_immediate_storage_context(SubtreePath::empty(), &tx) + .unwrap(), + false, + None::<&fn(&[u8]) -> Option>, + ) + .unwrap() + .unwrap(); + + // restorer root hash should be empty + assert_eq!(restoration_merk.root_hash().unwrap(), [0; 32]); + + // at the start both merks should have different root hash values + assert_ne!( + merk.root_hash().unwrap(), + restoration_merk.root_hash().unwrap() + ); + + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + let mut restorer = Restorer::new(restoration_merk, merk.root_hash().unwrap(), None); + + assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); + assert_eq!( + restorer.chunk_id_to_root_hash.get(""), + Some(merk.root_hash().unwrap()).as_ref() + ); + + // first restore the first chunk + let (chunk, next_chunk_index) = chunk_producer.chunk_with_index(1).unwrap(); + let new_chunk_ids = restorer + .process_chunk(traversal_instruction_as_string(&vec![]), chunk) + .expect("should process chunk"); + assert_eq!(new_chunk_ids.len(), 4); + assert_eq!(next_chunk_index, Some(2)); + assert_eq!(restorer.chunk_id_to_root_hash.len(), 4); + assert_eq!(restorer.parent_keys.len(), 4); + + // generate multi chunk from the 2nd chunk with no limit + let multi_chunk = chunk_producer + .multi_chunk_with_limit_and_index(next_chunk_index.unwrap(), None) + .unwrap(); + // tree of height 4 has 5 chunks + // we have restored the first leaving 4 chunks + // each chunk has an extra chunk id, since they are disjoint + // hence the size of the multi chunk should be 8 + assert_eq!(multi_chunk.chunk.len(), 8); + let new_chunk_ids = restorer.process_multi_chunk(multi_chunk.chunk).unwrap(); + assert_eq!(new_chunk_ids.len(), 0); + assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); + assert_eq!(restorer.parent_keys.len(), 0); + + let restored_merk = restorer.finalize().expect("should be able to finalize"); + + // compare root hash values + assert_eq!( + restored_merk.root_hash().unwrap(), + merk.root_hash().unwrap() + ); + } + + #[test] + fn test_process_multi_chunk_with_limit() { + let mut merk = TempMerk::new(); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + let storage = TempStorage::new(); + let tx = storage.start_transaction(); + let restoration_merk = Merk::open_base( + storage + .get_immediate_storage_context(SubtreePath::empty(), &tx) + .unwrap(), + false, + None::<&fn(&[u8]) -> Option>, + ) + .unwrap() + .unwrap(); + + // restorer root hash should be empty + assert_eq!(restoration_merk.root_hash().unwrap(), [0; 32]); + + // at the start both merks should have different root hash values + assert_ne!( + merk.root_hash().unwrap(), + restoration_merk.root_hash().unwrap() + ); + + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + let mut restorer = Restorer::new(restoration_merk, merk.root_hash().unwrap(), None); + + // build multi chunk with with limit of 325 + let multi_chunk = chunk_producer + .multi_chunk_with_limit("", Some(600)) + .unwrap(); + // should only contain the first chunk + assert_eq!(multi_chunk.chunk.len(), 2); + // should point to chunk 2 + assert_eq!(multi_chunk.next_index, Some("11".to_string())); + let next_ids = restorer.process_multi_chunk(multi_chunk.chunk).unwrap(); + assert_eq!(next_ids.len(), 4); + assert_eq!(restorer.chunk_id_to_root_hash.len(), 4); + assert_eq!(restorer.parent_keys.len(), 4); + + // subsequent chunks are of size 321 + // with limit just above 642 should get 2 chunks (2 and 3) + // disjoint, so multi chunk len should be 4 + let multi_chunk = chunk_producer + .multi_chunk_with_limit(multi_chunk.next_index.unwrap().as_str(), Some(645)) + .unwrap(); + assert_eq!(multi_chunk.chunk.len(), 4); + assert_eq!(multi_chunk.next_index, Some("01".to_string())); + let next_ids = restorer.process_multi_chunk(multi_chunk.chunk).unwrap(); + // chunks 2 and 3 are leaf chunks + assert_eq!(next_ids.len(), 0); + assert_eq!(restorer.chunk_id_to_root_hash.len(), 2); + assert_eq!(restorer.parent_keys.len(), 2); + + // get the last 2 chunks + let multi_chunk = chunk_producer + .multi_chunk_with_limit(multi_chunk.next_index.unwrap().as_str(), Some(645)) + .unwrap(); + assert_eq!(multi_chunk.chunk.len(), 4); + assert_eq!(multi_chunk.next_index, None); + let next_ids = restorer.process_multi_chunk(multi_chunk.chunk).unwrap(); + // chunks 2 and 3 are leaf chunks + assert_eq!(next_ids.len(), 0); + assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); + assert_eq!(restorer.parent_keys.len(), 0); + + // finalize merk + let restored_merk = restorer.finalize().unwrap(); + + // compare root hash values + assert_eq!( + restored_merk.root_hash().unwrap(), + merk.root_hash().unwrap() + ); + } + + // Builds a source merk with batch_size number of elements + // attempts restoration on some empty merk, with multi chunks + // verifies that restoration was performed correctly. + fn test_restoration_multi_chunk_strategy(batch_size: u64, limit: Option) { + // build the source merk + let mut source_merk = TempMerk::new(); + let batch = make_batch_seq(0..batch_size); + source_merk + .apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + + // build the restoration merk + let storage = TempStorage::new(); + let tx = storage.start_transaction(); + let restoration_merk = Merk::open_base( + storage + .get_immediate_storage_context(SubtreePath::empty(), &tx) + .unwrap(), + false, + None::<&fn(&[u8]) -> Option>, + ) + .unwrap() + .unwrap(); + + // at the start + // restoration merk should have empty root hash + // and source merk should have a different root hash + assert_eq!(restoration_merk.root_hash().unwrap(), [0; 32]); + assert_ne!( + source_merk.root_hash().unwrap(), + restoration_merk.root_hash().unwrap() + ); + + // instantiate chunk producer and restorer + let mut chunk_producer = + ChunkProducer::new(&source_merk).expect("should create chunk producer"); + let mut restorer = Restorer::new(restoration_merk, source_merk.root_hash().unwrap(), None); + + // perform chunk production and processing + let mut chunk_id_opt = Some("".to_string()); + while let Some(chunk_id) = chunk_id_opt { + let multi_chunk = chunk_producer + .multi_chunk_with_limit(chunk_id.as_str(), limit) + .expect("should get chunk"); + restorer + .process_multi_chunk(multi_chunk.chunk) + .expect("should process chunk successfully"); + chunk_id_opt = multi_chunk.next_index; + } + + // after chunk processing we should be able to finalize + assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); + assert_eq!(restorer.parent_keys.len(), 0); + let restored_merk = restorer.finalize().expect("should finalize"); + + // compare root hash values + assert_eq!( + source_merk.root_hash().unwrap(), + restored_merk.root_hash().unwrap() + ); + } + + #[test] + fn restore_multi_chunk_20_no_limit() { + test_restoration_multi_chunk_strategy(20, None); + } + + #[test] + #[should_panic] + fn restore_multi_chunk_20_tiny_limit() { + test_restoration_multi_chunk_strategy(20, Some(1)); + } + + #[test] + fn restore_multi_chunk_20_limit() { + test_restoration_multi_chunk_strategy(20, Some(1200)); + } + + #[test] + fn restore_multi_chunk_10000_limit() { + test_restoration_multi_chunk_strategy(10000, Some(1200)); + } + + #[test] + fn test_restoration_interruption() { + let mut merk = TempMerk::new(); + let batch = make_batch_seq(0..15); + merk.apply::<_, Vec<_>>(&batch, &[], None) + .unwrap() + .expect("apply failed"); + assert_eq!(merk.height(), Some(4)); + + let storage = TempStorage::new(); + let tx = storage.start_transaction(); + let restoration_merk = Merk::open_base( + storage + .get_immediate_storage_context(SubtreePath::empty(), &tx) + .unwrap(), + false, + None::<&fn(&[u8]) -> Option>, + ) + .unwrap() + .unwrap(); + + // restorer root hash should be empty + assert_eq!(restoration_merk.root_hash().unwrap(), [0; 32]); + + // at the start both merks should have different root hash values + assert_ne!( + merk.root_hash().unwrap(), + restoration_merk.root_hash().unwrap() + ); + + let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); + let mut restorer = Restorer::new(restoration_merk, merk.root_hash().unwrap(), None); + + assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); + assert_eq!( + restorer.chunk_id_to_root_hash.get(""), + Some(merk.root_hash().unwrap()).as_ref() + ); + + // first restore the first chunk + let (chunk, next_chunk_index) = chunk_producer.chunk_with_index(1).unwrap(); + let new_chunk_ids = restorer + .process_chunk(traversal_instruction_as_string(&vec![]), chunk) + .expect("should process chunk"); + assert_eq!(new_chunk_ids.len(), 4); + assert_eq!(next_chunk_index, Some(2)); + assert_eq!(restorer.chunk_id_to_root_hash.len(), 4); + assert_eq!(restorer.parent_keys.len(), 4); + + // store old state for later reference + let old_chunk_id_to_root_hash = restorer.chunk_id_to_root_hash.clone(); + let old_parent_keys = restorer.parent_keys.clone(); + + // drop the restorer and the restoration merk + drop(restorer); + // open the restoration merk again and build a restorer from it + let restoration_merk = Merk::open_base( + storage + .get_immediate_storage_context(SubtreePath::empty(), &tx) + .unwrap(), + false, + None::<&fn(&[u8]) -> Option>, + ) + .unwrap() + .unwrap(); + let mut restorer = Restorer::new(restoration_merk, merk.root_hash().unwrap(), None); + + // assert the state of the restorer + assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); + assert_eq!(restorer.parent_keys.len(), 0); + + // recover state + let recovery_attempt = restorer.attempt_state_recovery(); + assert!(recovery_attempt.is_ok()); + assert_eq!(restorer.chunk_id_to_root_hash.len(), 4); + assert_eq!(restorer.parent_keys.len(), 4); + + // assert equality to old state + assert_eq!(old_chunk_id_to_root_hash, restorer.chunk_id_to_root_hash); + assert_eq!(old_parent_keys, restorer.parent_keys); + } } diff --git a/merk/src/proofs/chunk.rs b/merk/src/proofs/chunk.rs index 1e3b9fb1..22334688 100644 --- a/merk/src/proofs/chunk.rs +++ b/merk/src/proofs/chunk.rs @@ -28,614 +28,10 @@ //! Chunk proofs +mod binary_range; #[cfg(feature = "full")] -use grovedb_costs::{ - cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, -}; +pub mod chunk; +pub mod chunk_op; +pub mod error; #[cfg(feature = "full")] -use grovedb_storage::RawIterator; -#[cfg(feature = "full")] -use { - super::tree::{execute, Tree as ProofTree}, - crate::tree::CryptoHash, - crate::tree::TreeNode, -}; - -#[cfg(feature = "full")] -use super::{Node, Op}; -use crate::tree::kv::ValueDefinedCostType; -#[cfg(feature = "full")] -use crate::{ - error::Error, - tree::{Fetch, RefWalker}, - Error::EdError, - TreeFeatureType::BasicMerkNode, -}; - -/// The minimum number of layers the trunk will be guaranteed to have before -/// splitting into multiple chunks. If the tree's height is less than double -/// this value, the trunk should be verified as a leaf chunk. -#[cfg(feature = "full")] -pub const MIN_TRUNK_HEIGHT: usize = 5; - -#[cfg(feature = "full")] -impl<'a, S> RefWalker<'a, S> -where - S: Fetch + Sized + Clone, -{ - /// Generates a trunk proof by traversing the tree. - /// - /// Returns a tuple containing the produced proof, and a boolean indicating - /// whether or not there will be more chunks to follow. If the chunk - /// contains the entire tree, the boolean will be `false`, if the chunk - /// is abridged and will be connected to leaf chunks, it will be `true`. - pub fn create_trunk_proof(&mut self) -> CostResult<(Vec, bool), Error> { - let approx_size = 2usize.pow((self.tree().height() / 2) as u32) * 3; - let mut proof = Vec::with_capacity(approx_size); - - self.traverse_for_height_proof(&mut proof, 1) - .flat_map_ok(|trunk_height| { - if trunk_height < MIN_TRUNK_HEIGHT { - proof.clear(); - self.traverse_for_trunk(&mut proof, usize::MAX, true) - .map_ok(|_| Ok((proof, false))) - } else { - self.traverse_for_trunk(&mut proof, trunk_height, true) - .map_ok(|_| Ok((proof, true))) - } - }) - .flatten() - } - - /// Traverses down the left edge of the tree and pushes ops to the proof, to - /// act as a proof of the height of the tree. This is the first step in - /// generating a trunk proof. - fn traverse_for_height_proof( - &mut self, - proof: &mut Vec, - depth: usize, - ) -> CostResult { - let mut cost = OperationCost::default(); - let maybe_left = match self - .walk(true, None::<&fn(&[u8]) -> Option>) - .unwrap_add_cost(&mut cost) - { - Ok(maybe_left) => maybe_left, - Err(e) => { - return Err(e).wrap_with_cost(cost); - } - }; - let has_left_child = maybe_left.is_some(); - - let trunk_height = if let Some(mut left) = maybe_left { - match left - .traverse_for_height_proof(proof, depth + 1) - .unwrap_add_cost(&mut cost) - { - Ok(x) => x, - Err(e) => return Err(e).wrap_with_cost(cost), - } - } else { - depth / 2 - }; - - if depth > trunk_height { - proof.push(Op::Push(self.to_kvhash_node())); - - if has_left_child { - proof.push(Op::Parent); - } - - if let Some(right) = self.tree().link(false) { - proof.push(Op::Push(Node::Hash(*right.hash()))); - proof.push(Op::Child); - } - } - - Ok(trunk_height).wrap_with_cost(cost) - } - - /// Traverses down the tree and adds KV push ops for all nodes up to a - /// certain depth. This expects the proof to contain a height proof as - /// generated by `traverse_for_height_proof`. - fn traverse_for_trunk( - &mut self, - proof: &mut Vec, - remaining_depth: usize, - is_leftmost: bool, - ) -> CostResult<(), Error> { - let mut cost = OperationCost::default(); - - if remaining_depth == 0 { - // return early if we have reached bottom of trunk - - // for leftmost node, we already have height proof - if is_leftmost { - return Ok(()).wrap_with_cost(cost); - } - - // add this node's hash - proof.push(Op::Push(self.to_hash_node().unwrap_add_cost(&mut cost))); - - return Ok(()).wrap_with_cost(cost); - } - - // traverse left - let has_left_child = self.tree().link(true).is_some(); - if has_left_child { - let mut left = cost_return_on_error!( - &mut cost, - self.walk(true, None::<&fn(&[u8]) -> Option>) - ) - .unwrap(); - cost_return_on_error!( - &mut cost, - left.traverse_for_trunk(proof, remaining_depth - 1, is_leftmost) - ); - } - - // add this node's data - proof.push(Op::Push(self.to_kv_value_hash_feature_type_node())); - - if has_left_child { - proof.push(Op::Parent); - } - - // traverse right - if let Some(mut right) = cost_return_on_error!( - &mut cost, - self.walk(false, None::<&fn(&[u8]) -> Option>) - ) { - cost_return_on_error!( - &mut cost, - right.traverse_for_trunk(proof, remaining_depth - 1, false) - ); - proof.push(Op::Child); - } - - Ok(()).wrap_with_cost(cost) - } -} - -/// Builds a chunk proof by iterating over values in a RocksDB, ending the chunk -/// when a node with key `end_key` is encountered. -/// -/// Advances the iterator for all nodes in the chunk and the `end_key` (if any). -#[cfg(feature = "full")] -pub(crate) fn get_next_chunk( - iter: &mut impl RawIterator, - end_key: Option<&[u8]>, -) -> CostResult, Error> { - let mut cost = OperationCost::default(); - - let mut chunk = Vec::with_capacity(512); - let mut stack = Vec::with_capacity(32); - let mut node = TreeNode::new(vec![], vec![], None, BasicMerkNode).unwrap_add_cost(&mut cost); - - while iter.valid().unwrap_add_cost(&mut cost) { - let key = iter.key().unwrap_add_cost(&mut cost).unwrap(); - - if let Some(end_key) = end_key { - if key == end_key { - break; - } - } - - let encoded_node = iter.value().unwrap_add_cost(&mut cost).unwrap(); - cost_return_on_error_no_add!( - &cost, - TreeNode::decode_into( - &mut node, - vec![], - encoded_node, - None:: Option> - ) - .map_err(EdError) - ); - - // TODO: Only use the KVValueHash if needed, saves 32 bytes - // only needed when dealing with references and trees - let kv = Node::KVValueHashFeatureType( - key.to_vec(), - node.value_ref().to_vec(), - *node.value_hash(), - node.feature_type(), - ); - - chunk.push(Op::Push(kv)); - - if node.link(true).is_some() { - chunk.push(Op::Parent); - } - - if let Some(child) = node.link(false) { - stack.push(child.key().to_vec()); - } else { - while let Some(top_key) = stack.last() { - if key < top_key.as_slice() { - break; - } - stack.pop(); - chunk.push(Op::Child); - } - } - - iter.next().unwrap_add_cost(&mut cost); - } - - if iter.valid().unwrap_add_cost(&mut cost) { - iter.next().unwrap_add_cost(&mut cost); - } - - Ok(chunk).wrap_with_cost(cost) -} - -/// Verifies a leaf chunk proof by executing its operators. Checks that there -/// were no abridged nodes (Hash or KVHash) and the proof hashes to -/// `expected_hash`. -#[cfg(feature = "full")] -#[allow(dead_code)] // TODO: remove when proofs will be enabled -pub(crate) fn verify_leaf>>( - ops: I, - expected_hash: CryptoHash, -) -> CostResult { - execute(ops, false, |node| match node { - Node::KVValueHash(..) | Node::KV(..) | Node::KVValueHashFeatureType(..) => Ok(()), - _ => Err(Error::ChunkRestoringError( - "Leaf chunks must contain full subtree".to_string(), - )), - }) - .flat_map_ok(|tree| { - tree.hash().map(|hash| { - if hash != expected_hash { - Error::ChunkRestoringError(format!( - "Leaf chunk proof did not match expected hash\n\tExpected: {:?}\n\tActual: \ - {:?}", - expected_hash, - tree.hash() - )); - } - Ok(tree) - }) - }) -} - -/// Verifies a trunk chunk proof by executing its operators. Ensures the -/// resulting tree contains a valid height proof, the trunk is the correct -/// height, and all of its inner nodes are not abridged. Returns the tree and -/// the height given by the height proof. -#[cfg(feature = "full")] -pub(crate) fn verify_trunk>>( - ops: I, -) -> CostResult<(ProofTree, usize), Error> { - let mut cost = OperationCost::default(); - - fn verify_height_proof(tree: &ProofTree) -> Result { - Ok(match tree.child(true) { - Some(child) => { - if let Node::Hash(_) = child.tree.node { - return Err(Error::ChunkRestoringError( - "Expected height proof to only contain KV and KVHash nodes".to_string(), - )); - } - verify_height_proof(&child.tree)? + 1 - } - None => 1, - }) - } - - fn verify_completeness( - tree: &ProofTree, - remaining_depth: usize, - leftmost: bool, - ) -> Result<(), Error> { - let recurse = |left, leftmost| { - if let Some(child) = tree.child(left) { - verify_completeness(&child.tree, remaining_depth - 1, left && leftmost)?; - } - Ok(()) - }; - - if remaining_depth > 0 { - match tree.node { - Node::KVValueHash(..) | Node::KV(..) | Node::KVValueHashFeatureType(..) => {} - _ => { - return Err(Error::ChunkRestoringError( - "Expected trunk inner nodes to contain keys and values".to_string(), - )) - } - } - recurse(true, leftmost)?; - recurse(false, false) - } else if !leftmost { - match tree.node { - Node::Hash(_) => Ok(()), - _ => Err(Error::ChunkRestoringError( - "Expected trunk leaves to contain Hash nodes".to_string(), - )), - } - } else { - match &tree.node { - Node::KVHash(_) => Ok(()), - _ => Err(Error::ChunkRestoringError( - "Expected leftmost trunk leaf to contain KVHash node".to_string(), - )), - } - } - } - - let mut kv_only = true; - let tree = cost_return_on_error!( - &mut cost, - execute(ops, false, |node| { - kv_only &= matches!(node, Node::KVValueHash(..)) - || matches!(node, Node::KV(..)) - || matches!(node, Node::KVValueHashFeatureType(..)); - Ok(()) - }) - ); - - let height = cost_return_on_error_no_add!(&cost, verify_height_proof(&tree)); - let trunk_height = height / 2; - - if trunk_height < MIN_TRUNK_HEIGHT { - if !kv_only { - return Err(Error::ChunkRestoringError( - "Leaf chunks must contain full subtree".to_string(), - )) - .wrap_with_cost(cost); - } - } else { - cost_return_on_error_no_add!(&cost, verify_completeness(&tree, trunk_height, true)); - } - - Ok((tree, height)).wrap_with_cost(cost) -} - -#[cfg(feature = "full")] -#[cfg(test)] -mod tests { - use std::usize; - - use grovedb_storage::StorageContext; - - use super::{super::tree::Tree, *}; - use crate::{ - test_utils::*, - tree::{NoopCommit, PanicSource, TreeNode as BaseTree}, - }; - - #[derive(Default)] - struct NodeCounts { - hash: usize, - kv_hash: usize, - kv: usize, - kv_value_hash: usize, - kv_digest: usize, - kv_ref_value_hash: usize, - kv_value_hash_feature_type: usize, - } - - fn count_node_types(tree: Tree) -> NodeCounts { - let mut counts = NodeCounts::default(); - - tree.visit_nodes(&mut |node| { - match node { - Node::Hash(_) => counts.hash += 1, - Node::KVHash(_) => counts.kv_hash += 1, - Node::KV(..) => counts.kv += 1, - Node::KVValueHash(..) => counts.kv_value_hash += 1, - Node::KVDigest(..) => counts.kv_digest += 1, - Node::KVRefValueHash(..) => counts.kv_ref_value_hash += 1, - Node::KVValueHashFeatureType(..) => counts.kv_value_hash_feature_type += 1, - }; - }); - - counts - } - - #[test] - fn small_trunk_roundtrip() { - let mut tree = make_tree_seq(31); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let (proof, has_more) = walker.create_trunk_proof().unwrap().unwrap(); - assert!(!has_more); - - // println!("{:?}", &proof); - let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap().unwrap(); - - let counts = count_node_types(trunk); - assert_eq!(counts.hash, 0); - assert_eq!(counts.kv_value_hash_feature_type, 32); - assert_eq!(counts.kv_hash, 0); - } - - #[test] - fn big_trunk_roundtrip() { - let mut tree = make_tree_seq(2u64.pow(MIN_TRUNK_HEIGHT as u32 * 2 + 1) - 1); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let (proof, has_more) = walker.create_trunk_proof().unwrap().unwrap(); - assert!(has_more); - let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap().unwrap(); - - let counts = count_node_types(trunk); - // are these formulas correct for all values of `MIN_TRUNK_HEIGHT`? 🤔 - assert_eq!( - counts.hash, - 2usize.pow(MIN_TRUNK_HEIGHT as u32) + MIN_TRUNK_HEIGHT - 1 - ); - assert_eq!( - counts.kv_value_hash_feature_type, - 2usize.pow(MIN_TRUNK_HEIGHT as u32) - 1 - ); - assert_eq!(counts.kv_hash, MIN_TRUNK_HEIGHT + 1); - } - - #[test] - fn one_node_tree_trunk_roundtrip() { - let mut tree = BaseTree::new(vec![0], vec![], None, BasicMerkNode).unwrap(); - tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) - .unwrap() - .unwrap(); - - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let (proof, has_more) = walker.create_trunk_proof().unwrap().unwrap(); - assert!(!has_more); - - let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap().unwrap(); - let counts = count_node_types(trunk); - assert_eq!(counts.hash, 0); - assert_eq!(counts.kv_value_hash_feature_type, 1); - assert_eq!(counts.kv_hash, 0); - } - - #[test] - fn two_node_right_heavy_tree_trunk_roundtrip() { - // 0 - // \ - // 1 - let mut tree = BaseTree::new(vec![0], vec![], None, BasicMerkNode) - .unwrap() - .attach( - false, - Some(BaseTree::new(vec![1], vec![], None, BasicMerkNode).unwrap()), - ); - tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) - .unwrap() - .unwrap(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let (proof, has_more) = walker.create_trunk_proof().unwrap().unwrap(); - assert!(!has_more); - - let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap().unwrap(); - let counts = count_node_types(trunk); - assert_eq!(counts.hash, 0); - assert_eq!(counts.kv_value_hash_feature_type, 2); - assert_eq!(counts.kv_hash, 0); - } - - #[test] - fn two_node_left_heavy_tree_trunk_roundtrip() { - // 1 - // / - // 0 - let mut tree = BaseTree::new(vec![1], vec![], None, BasicMerkNode) - .unwrap() - .attach( - true, - Some(BaseTree::new(vec![0], vec![], None, BasicMerkNode).unwrap()), - ); - tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) - .unwrap() - .unwrap(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let (proof, has_more) = walker.create_trunk_proof().unwrap().unwrap(); - assert!(!has_more); - - let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap().unwrap(); - let counts = count_node_types(trunk); - assert_eq!(counts.hash, 0); - assert_eq!(counts.kv_value_hash_feature_type, 2); - assert_eq!(counts.kv_hash, 0); - } - - #[test] - fn three_node_tree_trunk_roundtrip() { - // 1 - // / \ - // 0 2 - let mut tree = BaseTree::new(vec![1], vec![], None, BasicMerkNode) - .unwrap() - .attach( - true, - Some(BaseTree::new(vec![0], vec![], None, BasicMerkNode).unwrap()), - ) - .attach( - false, - Some(BaseTree::new(vec![2], vec![], None, BasicMerkNode).unwrap()), - ); - tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) - .unwrap() - .unwrap(); - - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let (proof, has_more) = walker.create_trunk_proof().unwrap().unwrap(); - assert!(!has_more); - - let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap().unwrap(); - let counts = count_node_types(trunk); - assert_eq!(counts.hash, 0); - assert_eq!(counts.kv_value_hash_feature_type, 3); - assert_eq!(counts.kv_hash, 0); - } - - #[test] - fn leaf_chunk_roundtrip() { - let mut merk = TempMerk::new(); - let batch = make_batch_seq(0..31); - merk.apply::<_, Vec<_>>(batch.as_slice(), &[], None) - .unwrap() - .unwrap(); - - merk.commit(); - - let root_node = merk.tree.take(); - let root_key = root_node.as_ref().unwrap().key().to_vec(); - merk.tree.set(root_node); - - // whole tree as 1 leaf - let mut iter = merk.storage.raw_iter(); - iter.seek_to_first().unwrap(); - let chunk = get_next_chunk(&mut iter, None).unwrap().unwrap(); - let ops = chunk.into_iter().map(Ok); - let chunk = verify_leaf(ops, merk.root_hash().unwrap()) - .unwrap() - .unwrap(); - let counts = count_node_types(chunk); - assert_eq!(counts.kv_value_hash_feature_type, 31); - assert_eq!(counts.hash, 0); - assert_eq!(counts.kv_hash, 0); - drop(iter); - - let mut iter = merk.storage.raw_iter(); - iter.seek_to_first().unwrap(); - - // left leaf - let chunk = get_next_chunk(&mut iter, Some(root_key.as_slice())) - .unwrap() - .unwrap(); - let ops = chunk.into_iter().map(Ok); - let chunk = verify_leaf( - ops, - [ - 78, 230, 25, 188, 163, 2, 169, 185, 254, 174, 196, 206, 162, 187, 245, 188, 74, 70, - 220, 160, 35, 78, 120, 122, 61, 90, 241, 105, 35, 180, 133, 98, - ], - ) - .unwrap() - .unwrap(); - let counts = count_node_types(chunk); - assert_eq!(counts.kv_value_hash_feature_type, 15); - assert_eq!(counts.hash, 0); - assert_eq!(counts.kv_hash, 0); - - // right leaf - let chunk = get_next_chunk(&mut iter, None).unwrap().unwrap(); - let ops = chunk.into_iter().map(Ok); - let chunk = verify_leaf( - ops, - [ - 21, 147, 223, 29, 106, 19, 23, 38, 233, 134, 245, 44, 246, 179, 48, 19, 111, 50, - 19, 191, 134, 37, 165, 5, 35, 111, 233, 213, 212, 5, 92, 45, - ], - ) - .unwrap() - .unwrap(); - let counts = count_node_types(chunk); - assert_eq!(counts.kv_value_hash_feature_type, 15); - assert_eq!(counts.hash, 0); - assert_eq!(counts.kv_hash, 0); - } -} +pub mod util; diff --git a/merk/src/proofs/chunk/binary_range.rs b/merk/src/proofs/chunk/binary_range.rs new file mode 100644 index 00000000..2acaa728 --- /dev/null +++ b/merk/src/proofs/chunk/binary_range.rs @@ -0,0 +1,239 @@ +// MIT LICENSE +// +// Copyright (c) 2021 Dash Core Group +// +// Permission is hereby granted, free of charge, to any +// person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the +// Software without restriction, including without +// limitation the rights to use, copy, modify, merge, +// publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software +// is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice +// shall be included in all copies or substantial portions +// of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +const LEFT: bool = true; +const RIGHT: bool = false; + +/// Utility type for range bisection and advancement +#[derive(Debug)] +pub(crate) struct BinaryRange { + start: usize, + end: usize, +} + +impl BinaryRange { + /// Returns a new BinaryRange and ensures that start < end + /// and min start value is 1 + pub fn new(start: usize, end: usize) -> Result { + // start should be less than or equal to end + if start > end { + return Err(String::from("start value cannot be greater than end value")); + } + + // the minimum value for start should be 1 + // that way the length of the maximum length + // of the range is usize::MAX and not + // usize::MAX + 1 + if start < 1 { + return Err(String::from( + "minimum start value should be 1 to avoid len overflow", + )); + } + + Ok(Self { start, end }) + } + + /// Returns the len of the current range + pub fn len(&self) -> usize { + self.end - self.start + 1 + } + + /// Returns true when the len of the range is odd + pub fn odd(&self) -> bool { + (self.len() % 2) != 0 + } + + /// Determines if a value belongs to the left half or right half of a range + /// returns true for left and false for right + /// returns None if value is outside the range or range len is odd + pub fn which_half(&self, value: usize) -> Option { + // return None if value is not in the range + if value < self.start || value > self.end { + return None; + } + + // can't divide the range into equal halves + // when odd, so return None + if self.odd() { + return None; + } + + let half_size = self.len() / 2; + let second_half_start = self.start + half_size; + + if value >= second_half_start { + return Some(RIGHT); + } + + Some(LEFT) + } + + /// Returns a new range that only contains elements on the specified half + /// returns an error if range is not odd + pub fn get_half(&self, left: bool) -> Result { + if self.odd() { + return Err(String::from("cannot break odd range in half")); + } + + let half_size = self.len() / 2; + let second_half_start = self.start + half_size; + + Ok(if left { + Self { + start: self.start, + end: second_half_start - 1, + } + } else { + Self { + start: second_half_start, + end: self.end, + } + }) + } + + /// Returns a new range that increments the start value + /// also return the previous start value + /// returns an error if the operation will cause start to be larger than end + pub fn advance_range_start(&self) -> Result<(Self, usize), String> { + // check if operation will cause start > end + if self.start == self.end { + return Err(String::from( + "can't advance start when start is equal to end", + )); + } + + Ok(( + Self { + start: self.start + 1, + end: self.end, + }, + self.start, + )) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn cannot_create_invalid_range() { + let invalid_range = BinaryRange::new(5, 3); + assert!(invalid_range.is_err()); + } + + #[test] + fn can_get_range_len() { + let range = BinaryRange::new(2, 5).expect("should create range"); + assert_eq!(range.len(), 4); + assert!(!range.odd()); + + let range = BinaryRange::new(2, 2).expect("should create range"); + assert_eq!(range.len(), 1); + assert!(range.odd()); + } + + #[test] + fn can_determine_correct_half() { + let range = BinaryRange::new(3, 7).expect("should create range"); + assert_eq!(range.len(), 5); + assert!(range.odd()); + + // cannot determine half for value outside a range + assert!(range.which_half(1).is_none()); + assert!(range.which_half(7).is_none()); + + // cannot determine half when range is odd + assert!(range.which_half(3).is_none()); + + let range = BinaryRange::new(3, 6).expect("should create range"); + assert_eq!(range.len(), 4); + assert!(!range.odd()); + + assert_eq!(range.which_half(3), Some(LEFT)); + assert_eq!(range.which_half(4), Some(LEFT)); + assert_eq!(range.which_half(5), Some(RIGHT)); + assert_eq!(range.which_half(6), Some(RIGHT)); + } + + #[test] + fn can_advance_start_range() { + let range = BinaryRange::new(2, 5).expect("should create range"); + assert_eq!(range.len(), 4); + assert_eq!(range.start, 2); + + // advance the range + let (range, prev_start) = range.advance_range_start().expect("should advance range"); + assert_eq!(prev_start, 2); + assert_eq!(range.len(), 3); + assert_eq!(range.start, 3); + + // advance range + let (range, prev_start) = range.advance_range_start().expect("should advance range"); + assert_eq!(prev_start, 3); + assert_eq!(range.len(), 2); + assert_eq!(range.start, 4); + + // advance range + let (range, prev_start) = range.advance_range_start().expect("should advance range"); + assert_eq!(prev_start, 4); + assert_eq!(range.len(), 1); + assert_eq!(range.start, 5); + + // should not be allowed to advance the range anymore + let advance_result = range.advance_range_start(); + assert!(advance_result.is_err()); + } + + #[test] + fn can_break_range_into_halves() { + let range = BinaryRange::new(2, 10).expect("should create range"); + assert_eq!(range.len(), 9); + assert!(range.odd()); + assert!(range.get_half(LEFT).is_err()); + + let range = BinaryRange::new(2, 11).expect("should create range"); + assert_eq!(range.len(), 10); + assert!(!range.odd()); + + let left_range = range.get_half(LEFT).expect("should get sub range"); + assert_eq!(left_range.start, 2); + assert_eq!(left_range.end, 6); + + let right_range = range.get_half(RIGHT).expect("should get sub range"); + assert_eq!(right_range.start, 7); + assert_eq!(right_range.end, 11); + + // right_range is false, advance to make even + let (right_range, _prev) = right_range.advance_range_start().expect("should advance"); + let right_left_range = right_range.get_half(LEFT).expect("should get sub range"); + assert_eq!(right_left_range.len(), 2); + assert_eq!(right_left_range.start, 8); + assert_eq!(right_left_range.end, 9); + } +} diff --git a/merk/src/proofs/chunk/chunk.rs b/merk/src/proofs/chunk/chunk.rs new file mode 100644 index 00000000..95d888ec --- /dev/null +++ b/merk/src/proofs/chunk/chunk.rs @@ -0,0 +1,662 @@ +// MIT LICENSE +// +// Copyright (c) 2021 Dash Core Group +// +// Permission is hereby granted, free of charge, to any +// person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the +// Software without restriction, including without +// limitation the rights to use, copy, modify, merge, +// publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software +// is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice +// shall be included in all copies or substantial portions +// of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; + +// TODO: add copyright comment +use crate::proofs::{Node, Op, Tree}; +use crate::{ + proofs::{chunk::error::ChunkError, tree::execute}, + tree::{kv::ValueDefinedCostType, Fetch, RefWalker}, + CryptoHash, Error, +}; + +pub const LEFT: bool = true; +pub const RIGHT: bool = false; + +impl<'a, S> RefWalker<'a, S> +where + S: Fetch + Sized + Clone, +{ + /// Returns a chunk of a given depth from a RefWalker + pub fn create_chunk(&mut self, depth: usize) -> Result, Error> { + // build the proof vector + let mut proof = vec![]; + + self.create_chunk_internal(&mut proof, depth)?; + + Ok(proof) + } + + fn create_chunk_internal( + &mut self, + proof: &mut Vec, + remaining_depth: usize, + ) -> Result<(), Error> { + // at some point we will reach the depth + // here we need to put the node hash + if remaining_depth == 0 { + proof.push(Op::Push(self.to_hash_node().unwrap())); + return Ok(()); + } + + // traverse left + let has_left_child = self.tree().link(true).is_some(); + if has_left_child { + let mut left = self + .walk(true, None::<&fn(&[u8]) -> Option>) + .unwrap()? + .expect("confirmed is some"); + left.create_chunk_internal(proof, remaining_depth - 1)?; + } + + // add current node's data + proof.push(Op::Push(self.to_kv_value_hash_feature_type_node())); + + if has_left_child { + proof.push(Op::Parent); + } + + // traverse right + if let Some(mut right) = self + .walk(false, None::<&fn(&[u8]) -> Option>) + .unwrap()? + { + right.create_chunk_internal(proof, remaining_depth - 1)?; + + proof.push(Op::Child); + } + + Ok(()) + } + + /// Returns a chunk of a given depth after applying some traversal + /// instruction to the RefWalker + pub fn traverse_and_build_chunk( + &mut self, + instructions: &[bool], + depth: usize, + ) -> Result, Error> { + // base case + if instructions.is_empty() { + // we are at the desired node + return self.create_chunk(depth); + } + + // link must exist + let has_link = self.tree().link(instructions[0]).is_some(); + if !has_link { + return Err(Error::ChunkingError(ChunkError::BadTraversalInstruction( + "no node found at given traversal instruction", + ))); + } + + // grab child + let mut child = self + .walk( + instructions[0], + None::<&fn(&[u8]) -> Option>, + ) + .unwrap()? + .expect("confirmed link exists so cannot be none"); + + // recurse on child + child.traverse_and_build_chunk(&instructions[1..], depth) + } + + /// Returns the smallest amount of tree ops, that can convince + /// a verifier of the tree height + /// the generated subtree is of this form + /// kv_hash + /// / \ + /// kv_hash node_hash + /// / \ + /// kv_hash node_hash + /// . + /// . + /// . + pub fn generate_height_proof(&mut self, proof: &mut Vec) -> CostResult<(), Error> { + // TODO: look into making height proofs more efficient + // they will always be used in the context of some + // existing chunk, we don't want to repeat nodes unnecessarily + let mut cost = OperationCost::default(); + + let maybe_left = cost_return_on_error!( + &mut cost, + self.walk(LEFT, None::<&fn(&[u8]) -> Option>) + ); + let has_left_child = maybe_left.is_some(); + + // recurse to leftmost element + if let Some(mut left) = maybe_left { + cost_return_on_error!(&mut cost, left.generate_height_proof(proof)) + } + + proof.push(Op::Push(self.to_kvhash_node())); + + if has_left_child { + proof.push(Op::Parent); + } + + if let Some(right) = self.tree().link(RIGHT) { + proof.push(Op::Push(Node::Hash(*right.hash()))); + proof.push(Op::Child); + } + + Ok(()).wrap_with_cost(cost) + } +} + +// TODO: add documentation +pub fn verify_height_proof(proof: Vec, expected_root_hash: CryptoHash) -> Result { + // todo: remove unwrap + let height_proof_tree = execute(proof.into_iter().map(Ok), false, |_| Ok(())).unwrap()?; + + // todo: deal with cost + // todo: deal with old chunk restoring error + if height_proof_tree.hash().unwrap() != expected_root_hash { + return Err(Error::OldChunkRestoringError( + "invalid height proof: root hash mismatch".to_string(), + )); + } + + verify_height_tree(&height_proof_tree) +} + +// TODO: add documentation +pub fn verify_height_tree(height_proof_tree: &Tree) -> Result { + return Ok(match height_proof_tree.child(LEFT) { + Some(child) => { + if !matches!(child.tree.node, Node::KVHash(..)) { + // todo deal with old chunk restoring error + return Err(Error::OldChunkRestoringError( + "Expected left nodes in height proofs to be kvhash nodes".to_string(), + )); + } + verify_height_tree(&child.tree)? + 1 + } + None => 1, + }); +} + +#[cfg(test)] +pub mod tests { + use ed::Encode; + + use crate::{ + proofs::{ + chunk::chunk::{verify_height_proof, LEFT, RIGHT}, + tree::execute, + Node, Op, + }, + test_utils::make_tree_seq_with_start_key, + tree::{kv::ValueDefinedCostType, RefWalker, TreeNode}, + PanicSource, TreeFeatureType, + }; + + fn build_tree_10_nodes() -> TreeNode { + // 3 + // / \ + // 1 7 + // / \ / \ + // 0 2 5 8 + // / \ \ + // 4 6 9 + make_tree_seq_with_start_key(10, [0; 8].to_vec()) + } + + /// Traverses a tree to a certain node and returns the node hash of that + /// node + pub fn traverse_get_node_hash( + walker: &mut RefWalker, + traverse_instructions: &[bool], + ) -> Node { + traverse_and_apply(walker, traverse_instructions, |walker| { + walker.to_hash_node().unwrap() + }) + } + + /// Traverses a tree to a certain node and returns the kv_feature_type of + /// that node + pub fn traverse_get_kv_feature_type( + walker: &mut RefWalker, + traverse_instructions: &[bool], + ) -> Node { + traverse_and_apply(walker, traverse_instructions, |walker| { + walker.to_kv_value_hash_feature_type_node() + }) + } + /// Traverses a tree to a certain node and returns the kv_hash of + /// that node + pub fn traverse_get_kv_hash( + walker: &mut RefWalker, + traverse_instructions: &[bool], + ) -> Node { + traverse_and_apply(walker, traverse_instructions, |walker| { + walker.to_kvhash_node() + }) + } + + /// Traverses a tree to a certain node and returns the result of applying + /// some arbitrary function + pub fn traverse_and_apply( + walker: &mut RefWalker, + traverse_instructions: &[bool], + apply_fn: T, + ) -> Node + where + T: Fn(&mut RefWalker) -> Node, + { + if traverse_instructions.is_empty() { + return apply_fn(walker); + } + + let mut child = walker + .walk( + traverse_instructions[0], + None::<&fn(&[u8]) -> Option>, + ) + .unwrap() + .unwrap() + .unwrap(); + traverse_and_apply(&mut child, &traverse_instructions[1..], apply_fn) + } + + #[test] + fn build_chunk_from_root_depth_0() { + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + // should return the node hash of the root node + let chunk = tree_walker.create_chunk(0).expect("should build chunk"); + assert_eq!(chunk.len(), 1); + assert_eq!( + chunk[0], + Op::Push(traverse_get_node_hash(&mut tree_walker, &[])) + ); + + let computed_tree = execute(chunk.into_iter().map(Ok), true, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(computed_tree.hash().unwrap(), tree.hash().unwrap()); + } + + #[test] + fn build_chunk_from_root_depth_1() { + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + // build chunk for depth 1 + // expected: + // 3 + // / \ + // Hash(1) Hash(7) + let chunk = tree_walker.create_chunk(1).expect("should build chunk"); + assert_eq!(chunk.len(), 5); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_node_hash(&mut tree_walker, &[LEFT])), + Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[])), + Op::Parent, + Op::Push(traverse_get_node_hash(&mut tree_walker, &[RIGHT])), + Op::Child + ] + ); + + let computed_tree = execute(chunk.into_iter().map(Ok), true, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(computed_tree.hash().unwrap(), tree.hash().unwrap()); + } + + #[test] + fn build_chunk_from_root_depth_3() { + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + // build chunk for depth 3 + // expected: + // 3 + // / \ + // 1 7 + // / \ / \ + // 0 2 5 8 + // / \ \ + // H(4) H(6) H(9) + let chunk = tree_walker.create_chunk(3).expect("should build chunk"); + assert_eq!(chunk.len(), 19); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, LEFT] + )), + Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[LEFT])), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, RIGHT] + )), + Op::Child, + Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[])), + Op::Parent, + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, LEFT, LEFT] + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT] + )), + Op::Parent, + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, LEFT, RIGHT] + )), + Op::Child, + Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[RIGHT])), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, RIGHT] + )), + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, RIGHT, RIGHT] + )), + Op::Child, + Op::Child, + Op::Child + ] + ); + + let computed_tree = execute(chunk.into_iter().map(Ok), true, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(computed_tree.hash().unwrap(), tree.hash().unwrap()); + } + + #[test] + fn build_chunk_from_root_depth_max_depth() { + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + // build chunk for entire tree (depth 4) + // 3 + // / \ + // 1 7 + // / \ / \ + // 0 2 5 8 + // / \ \ + // 4 6 9 + let chunk = tree_walker.create_chunk(4).expect("should build chunk"); + assert_eq!(chunk.len(), 19); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, LEFT] + )), + Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[LEFT])), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT, RIGHT] + )), + Op::Child, + Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[])), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT, LEFT] + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT] + )), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT, RIGHT] + )), + Op::Child, + Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[RIGHT])), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, RIGHT] + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, RIGHT, RIGHT] + )), + Op::Child, + Op::Child, + Op::Child + ] + ); + + let computed_tree = execute(chunk.into_iter().map(Ok), true, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(computed_tree.hash().unwrap(), tree.hash().unwrap()); + } + + #[test] + fn chunk_greater_than_max_should_equal_max_depth() { + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + // build chunk with depth greater than tree + // we should get the same result as building with the exact depth + let large_depth_chunk = tree_walker.create_chunk(100).expect("should build chunk"); + let exact_depth_chunk = tree_walker.create_chunk(4).expect("should build chunk"); + assert_eq!(large_depth_chunk, exact_depth_chunk); + + let tree_a = execute(large_depth_chunk.into_iter().map(Ok), true, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + let tree_b = execute(exact_depth_chunk.into_iter().map(Ok), true, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!(tree_a.hash().unwrap(), tree_b.hash().unwrap()); + } + + #[test] + fn build_chunk_after_traversal_depth_2() { + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + // traverse to the right first then build chunk + // expected + // 7 + // / \ + // 5 8 + // / \ \ + // H(4) H(6) H(9) + + // right traversal + let chunk = tree_walker + .traverse_and_build_chunk(&[RIGHT], 2) + .expect("should build chunk"); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, LEFT, LEFT] + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT] + )), + Op::Parent, + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, LEFT, RIGHT] + )), + Op::Child, + Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[RIGHT])), + Op::Parent, + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, RIGHT] + )), + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, RIGHT, RIGHT] + )), + Op::Child, + Op::Child, + ] + ); + + // the hash of the tree computed from the chunk + // should be the same as the node_hash of the element + // on the right + let computed_tree = execute(chunk.into_iter().map(Ok), true, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!( + Node::Hash(computed_tree.hash().unwrap()), + traverse_get_node_hash(&mut tree_walker, &[RIGHT]) + ); + } + + #[test] + fn build_chunk_after_traversal_depth_1() { + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + // traverse with [right, left] and then build chunk of depth 1 + // expected + // 5 + // / \ + // H(4) H(6) + + // instruction traversal + let chunk = tree_walker + .traverse_and_build_chunk(&[RIGHT, LEFT], 1) + .expect("should build chunk"); + assert_eq!( + chunk, + vec![ + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, LEFT, LEFT] + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT, LEFT] + )), + Op::Parent, + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, LEFT, RIGHT] + )), + Op::Child, + ] + ); + + let computed_tree = execute(chunk.into_iter().map(Ok), true, |_| Ok(())) + .unwrap() + .expect("should reconstruct tree"); + assert_eq!( + Node::Hash(computed_tree.hash().unwrap()), + traverse_get_node_hash(&mut tree_walker, &[RIGHT, LEFT]) + ); + } + + #[test] + fn test_chunk_encoding() { + let chunk = vec![ + Op::Push(Node::Hash([0; 32])), + Op::Push(Node::KVValueHashFeatureType( + vec![1], + vec![2], + [0; 32], + TreeFeatureType::BasicMerkNode, + )), + ]; + let encoded_chunk = chunk.encode().expect("should encode"); + assert_eq!(encoded_chunk.len(), 33 + 39); + assert_eq!( + encoded_chunk.len(), + chunk.encoding_length().expect("should get encoding length") + ); + } + + #[test] + fn test_height_proof_generation() { + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + let mut height_proof = vec![]; + tree_walker + .generate_height_proof(&mut height_proof) + .unwrap() + .expect("should generate height proof"); + + assert_eq!(height_proof.len(), 9); + assert_eq!( + height_proof, + vec![ + Op::Push(traverse_get_kv_hash(&mut tree_walker, &[LEFT, LEFT])), + Op::Push(traverse_get_kv_hash(&mut tree_walker, &[LEFT])), + Op::Parent, + Op::Push(traverse_get_node_hash(&mut tree_walker, &[LEFT, RIGHT])), + Op::Child, + Op::Push(traverse_get_kv_hash(&mut tree_walker, &[])), + Op::Parent, + Op::Push(traverse_get_node_hash(&mut tree_walker, &[RIGHT])), + Op::Child, + ] + ); + } + + #[test] + fn test_height_proof_verification() { + let mut tree = build_tree_10_nodes(); + let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); + + let mut height_proof = vec![]; + tree_walker + .generate_height_proof(&mut height_proof) + .unwrap() + .expect("should generate height proof"); + + let verified_height = verify_height_proof(height_proof, tree.hash().unwrap()) + .expect("should verify height proof"); + + // doesn't represent the max height of the tree + assert_eq!(verified_height, 3); + } +} diff --git a/merk/src/proofs/chunk/chunk_op.rs b/merk/src/proofs/chunk/chunk_op.rs new file mode 100644 index 00000000..6d0d08cd --- /dev/null +++ b/merk/src/proofs/chunk/chunk_op.rs @@ -0,0 +1,169 @@ +// MIT LICENSE +// +// Copyright (c) 2021 Dash Core Group +// +// Permission is hereby granted, free of charge, to any +// person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the +// Software without restriction, including without +// limitation the rights to use, copy, modify, merge, +// publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software +// is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice +// shall be included in all copies or substantial portions +// of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::io::{Read, Write}; + +use ed::{Decode, Encode}; +use integer_encoding::{VarInt, VarIntReader}; + +use crate::proofs::Op; + +/// Represents the chunk generated from a given starting chunk id +#[derive(PartialEq, Debug)] +pub enum ChunkOp { + ChunkId(Vec), + Chunk(Vec), +} + +impl Encode for ChunkOp { + fn encode_into(&self, dest: &mut W) -> ed::Result<()> { + match self { + Self::ChunkId(instruction) => { + // write the marker then the len + let _ = dest.write_all(&[0_u8]); + dest.write_all(instruction.len().encode_var_vec().as_slice())?; + let instruction_as_binary: Vec = instruction + .iter() + .map(|v| if *v { 1_u8 } else { 0_u8 }) + .collect(); + dest.write_all(&instruction_as_binary)?; + } + Self::Chunk(chunk) => { + let _ = dest.write_all(&[1_u8]); + // chunk len represents the number of ops not the total encoding len of ops + dest.write_all(chunk.len().encode_var_vec().as_slice())?; + for op in chunk { + dest.write_all(&op.encode()?)?; + } + } + } + + Ok(()) + } + + fn encoding_length(&self) -> ed::Result { + Ok(match self { + Self::ChunkId(instruction) => { + 1 + instruction.len().encode_var_vec().len() + instruction.len() + } + Self::Chunk(chunk) => { + 1 + chunk.len().encode_var_vec().len() + chunk.encoding_length()? + } + }) + } +} + +impl Decode for ChunkOp { + fn decode(input: R) -> ed::Result { + let mut chunk_op = ChunkOp::ChunkId(vec![]); + Self::decode_into(&mut chunk_op, input)?; + Ok(chunk_op) + } + + fn decode_into(&mut self, mut input: R) -> ed::Result<()> { + let mut marker = [0_u8; 1]; + input.read_exact(&mut marker)?; + + match marker[0] { + 0 => { + let length = input.read_varint()?; + let mut instruction_as_binary = vec![0_u8; length]; + input.read_exact(&mut instruction_as_binary)?; + + let instruction: Vec = instruction_as_binary + .into_iter() + .map(|v| v == 1_u8) + .collect(); + + *self = ChunkOp::ChunkId(instruction); + } + 1 => { + let ops_length = input.read_varint()?; + let mut chunk = Vec::with_capacity(ops_length); + + for _ in 0..ops_length { + let op = Decode::decode(&mut input)?; + chunk.push(op); + } + + *self = ChunkOp::Chunk(chunk); + } + _ => return Err(ed::Error::UnexpectedByte(marker[0])), + } + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use ed::{Decode, Encode}; + + use crate::proofs::{ + chunk::{ + chunk::{LEFT, RIGHT}, + chunk_op::ChunkOp, + }, + Node, Op, + }; + + #[test] + fn test_chunk_op_encoding() { + let chunk_op = ChunkOp::ChunkId(vec![LEFT, RIGHT]); + let encoded_chunk_op = chunk_op.encode().unwrap(); + assert_eq!(encoded_chunk_op, vec![0, 2, 1, 0]); + assert_eq!(encoded_chunk_op.len(), chunk_op.encoding_length().unwrap()); + + let chunk_op = ChunkOp::Chunk(vec![Op::Push(Node::Hash([0; 32])), Op::Child]); + let encoded_chunk_op = chunk_op.encode().unwrap(); + let mut expected_encoding = vec![1, 2]; + expected_encoding.extend(Op::Push(Node::Hash([0; 32])).encode().unwrap()); + expected_encoding.extend(Op::Child.encode().unwrap()); + assert_eq!(encoded_chunk_op, expected_encoding); + assert_eq!(encoded_chunk_op.len(), chunk_op.encoding_length().unwrap()); + } + + #[test] + fn test_chunk_op_decoding() { + let encoded_chunk_op = vec![0, 3, 1, 0, 1]; + let decoded_chunk_op = ChunkOp::decode(encoded_chunk_op.as_slice()).unwrap(); + assert_eq!(decoded_chunk_op, ChunkOp::ChunkId(vec![LEFT, RIGHT, LEFT])); + + let mut encoded_chunk_op = vec![1, 2]; + encoded_chunk_op.extend(Op::Push(Node::Hash([1; 32])).encode().unwrap()); + encoded_chunk_op.extend(Op::Push(Node::KV(vec![1], vec![2])).encode().unwrap()); + let decoded_chunk_op = ChunkOp::decode(encoded_chunk_op.as_slice()).unwrap(); + assert_eq!( + decoded_chunk_op, + ChunkOp::Chunk(vec![ + Op::Push(Node::Hash([1; 32])), + Op::Push(Node::KV(vec![1], vec![2])) + ]) + ); + } +} diff --git a/merk/src/proofs/chunk/error.rs b/merk/src/proofs/chunk/error.rs new file mode 100644 index 00000000..bd482666 --- /dev/null +++ b/merk/src/proofs/chunk/error.rs @@ -0,0 +1,79 @@ +// MIT LICENSE +// +// Copyright (c) 2021 Dash Core Group +// +// Permission is hereby granted, free of charge, to any +// person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the +// Software without restriction, including without +// limitation the rights to use, copy, modify, merge, +// publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software +// is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice +// shall be included in all copies or substantial portions +// of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +#[derive(Debug, thiserror::Error)] +/// Chunk related errors +pub enum ChunkError { + /// Limit too small for first chunk, cannot make progress + #[error("overflow error {0}")] + LimitTooSmall(&'static str), + + /// Chunk index out of bounds + #[error("chunk index out of bounds: {0}")] + OutOfBounds(&'static str), + + /// Empty tree contains no chunks + #[error("chunk from empty tree: {0}")] + EmptyTree(&'static str), + + /// Invalid traversal instruction (points to no element) + #[error("traversal instruction invalid {0}")] + BadTraversalInstruction(&'static str), + + /// Expected ChunkId when parsing chunk ops + #[error("expected chunk id when parsing chunk op")] + ExpectedChunkId, + + /// Expected Chunk when parsing chunk ops + #[error("expected chunk when parsing chunk op")] + ExpectedChunk, + + // Restoration Errors + /// Chunk restoration starts from the root chunk, this lead to a set of + /// root hash values to verify other chunks .... + /// Hence before you can verify a child you need to have verified it's + /// parent. + #[error("unexpected chunk: cannot verify chunk because verification hash is not in memory")] + UnexpectedChunk, + + /// Invalid chunk proof when verifying chunk + #[error("invalid chunk proof: {0}")] + InvalidChunkProof(&'static str), + + /// Invalid multi chunk + #[error("invalid multi chunk: {0}")] + InvalidMultiChunk(&'static str), + + #[error("called finalize too early still expecting chunks")] + RestorationNotComplete, + + /// Internal error, this should never surface + /// if it does, it means wrong assumption in code + #[error("internal error {0}")] + InternalError(&'static str), +} diff --git a/merk/src/proofs/chunk/util.rs b/merk/src/proofs/chunk/util.rs new file mode 100644 index 00000000..2f64ba8d --- /dev/null +++ b/merk/src/proofs/chunk/util.rs @@ -0,0 +1,700 @@ +// MIT LICENSE +// +// Copyright (c) 2021 Dash Core Group +// +// Permission is hereby granted, free of charge, to any +// person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the +// Software without restriction, including without +// limitation the rights to use, copy, modify, merge, +// publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software +// is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice +// shall be included in all copies or substantial portions +// of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! Collection of state independent algorithms needed for facilitate chunk +//! production and restoration + +use std::io::Write; + +// TODO: figure out better nomenclature +use crate::{proofs::chunk::binary_range::BinaryRange, Error}; +use crate::{ + proofs::chunk::{ + chunk::{LEFT, RIGHT}, + error::{ChunkError, ChunkError::BadTraversalInstruction}, + }, + Error::InternalError, +}; + +/// Represents the height as a linear combination of 3 amd 2 +/// of the form 3x + 2y +/// this breaks the tree into layers of height 3 or 2 +/// the minimum chunk height is 2, so if tree height is less than 2 +/// we just return a single layer of height 2 +fn chunk_height_per_layer(height: usize) -> Vec { + let mut two_count = 0; + let mut three_count = height / 3; + + if height == 0 { + return vec![]; + } + + // minimum chunk height is 2, if tree height is less than 2 + // return a single layer with chunk height 2 + if height < 2 { + two_count = 1; + } else { + match height % 3 { + 0 => { /* do nothing */ } + 1 => { + // reduce the three_count by 1 + // so the remainder becomes 3 + 1 + // which is equivalent to 2 + 2 + three_count -= 1; + two_count += 2; + } + 2 => { + // remainder is a factor of 2 + // just increase the two_count + two_count += 1; + } + // this is unreachable because height is a positive number + // remainder set after diving by 3 is fixed to [0,1,2] + _ => unreachable!(""), + } + } + + let mut layer_heights = vec![3; three_count]; + layer_heights.extend(vec![2; two_count]); + + layer_heights +} + +/// Return the layer a chunk subtree belongs to +pub fn chunk_layer(height: usize, chunk_id: usize) -> Result { + // remaining depth tells us how deep in the tree the specified chunk is + let mut remaining_depth = generate_traversal_instruction(height, chunk_id)?.len() + 1; + let layer_heights = chunk_height_per_layer(height); + + let mut layer = 1; + + while remaining_depth > 1 { + // remaining depth will always larger than the next layer height + // if it is not already 1 + // this is because a every chunk always starts at a layer boundary + // and remaining depth points to a chunk + debug_assert!(remaining_depth > layer_heights[layer - 1]); + + remaining_depth -= layer_heights[layer - 1]; + layer += 1; + } + + Ok(layer - 1) +} + +/// Return the depth of a chunk given the height +/// and chunk id +pub fn chunk_height(height: usize, chunk_id: usize) -> Result { + let chunk_layer = chunk_layer(height, chunk_id)?; + let layer_heights = chunk_height_per_layer(height); + + Ok(layer_heights[chunk_layer]) +} + +/// Given a tree of height h, return the number of chunks needed +/// to completely represent the tree +pub fn number_of_chunks(height: usize) -> usize { + let layer_heights = chunk_height_per_layer(height); + number_of_chunks_internal(layer_heights) +} + +/// Locates the subtree represented by a chunk id and returns +/// the number of chunks under that subtree +pub fn number_of_chunks_under_chunk_id(height: usize, chunk_id: usize) -> Result { + let chunk_layer = chunk_layer(height, chunk_id)?; + let layer_heights = chunk_height_per_layer(height); + + // we only care about the layer heights after the chunk layer + // as we are getting the number of chunks under a subtree and not + // the entire tree of height h + Ok(number_of_chunks_internal( + layer_heights[chunk_layer..].to_vec(), + )) +} + +/// Given the heights of a tree per layer, return the total number of chunks in +/// that tree +fn number_of_chunks_internal(layer_heights: Vec) -> usize { + // a layer consists of 1 or more subtrees of a given height + // here we figure out number of exit nodes from a single subtree for each layer + let mut single_subtree_exits_per_layer = layer_heights + .into_iter() + .map(exit_node_count) + .collect::>(); + + // we don't care about exit nodes from the last layer + // as that points to non-existent subtrees + single_subtree_exits_per_layer.pop(); + + // now we get the total exit nodes per layer + // by multiplying the exits per subtree with the number of subtrees on that + // layer + let mut chunk_counts_per_layer = vec![1]; + for i in 0..single_subtree_exits_per_layer.len() { + let previous_layer_chunk_count = chunk_counts_per_layer[i]; + let current_layer_chunk_count = + previous_layer_chunk_count * single_subtree_exits_per_layer[i]; + chunk_counts_per_layer.push(current_layer_chunk_count); + } + + chunk_counts_per_layer.into_iter().sum() +} + +/// Calculates the maximum number of exit nodes for a tree of height h. +fn exit_node_count(height: usize) -> usize { + 2_usize.pow(height as u32) +} + +/// Generate instruction for traversing to a given chunk in a binary tree +pub fn generate_traversal_instruction(height: usize, chunk_id: usize) -> Result, Error> { + let mut instructions = vec![]; + + let total_chunk_count = number_of_chunks(height); + + // out of bounds + if chunk_id < 1 || chunk_id > total_chunk_count { + return Err(Error::ChunkingError(ChunkError::OutOfBounds( + "chunk id out of bounds", + ))); + } + + let mut chunk_range = BinaryRange::new(1, total_chunk_count).map_err(|_| { + Error::ChunkingError(ChunkError::InternalError( + "failed to initialize chunk range", + )) + })?; + + // total chunk count will always be odd because + // from the initial chunk (1) we have an even number of + // exit nodes, and they have even numbers of exit nodes ... + // so total_chunk_count = 1 + some_even_number = odd + debug_assert!(chunk_range.odd()); + + // bisect and reduce the chunk range until we get to the desired chunk + // we keep track of every left right decision we make + while chunk_range.len() > 1 { + if chunk_range.odd() { + // checks if we last decision we made got us to the desired chunk id + let advance_result = chunk_range.advance_range_start().unwrap(); + chunk_range = advance_result.0; + if advance_result.1 == chunk_id { + return Ok(instructions); + } + } else { + // for even chunk range, we are at the decision point + // we can either go left or right + // we first check which half the desired chunk is + // then follow that path + let chunk_id_half = chunk_range + .which_half(chunk_id) + .expect("chunk id must exist in range"); + instructions.push(chunk_id_half); + chunk_range = chunk_range + .get_half(chunk_id_half) + .expect("confirmed range is not odd"); + } + } + + // chunk range len is exactly 1 + // this must be the desired chunk id + // return instructions that got us here + Ok(instructions) +} + +/// Determine the chunk id given the traversal instruction and the max height of +/// the tree +pub fn chunk_id_from_traversal_instruction( + traversal_instruction: &[bool], + height: usize, +) -> Result { + // empty traversal instruction points to the first chunk + if traversal_instruction.is_empty() { + return Ok(1); + } + + let mut chunk_count = number_of_chunks(height); + let mut current_chunk_id = 1; + + let mut layer_heights = chunk_height_per_layer(height); + let last_layer_height = layer_heights.pop().expect("confirmed not empty"); + + // traversal instructions should only point to the root node of chunks (chunk + // boundaries) the layer heights represent the height of each chunk layer + // the last chunk layer is at height = total_height - last_chunk_height + 1 + // traversal instructions require 1 less than height to address it + // e.g. height 1 is represented by [] - len of 0 + // height 2 is represented by [left] or [right] len of 1 + // therefore last chunk root node is address with total_height - + // last_chunk_height + if traversal_instruction.len() > height - last_layer_height { + return Err(Error::ChunkingError(BadTraversalInstruction( + "traversal instruction should not address nodes past the root of the last layer chunks", + ))); + } + + // verify that the traversal instruction points to a chunk boundary + let mut traversal_length = traversal_instruction.len(); + let mut relevant_layer_heights = vec![]; + for layer_height in layer_heights { + // the traversal_length should be a perfect sum of a subset of the layer_height + // if the traversal_length is not 0, it should be larger than or equal to the + // next layer height. + if traversal_length < layer_height { + return Err(Error::ChunkingError(BadTraversalInstruction( + "traversal instruction should point to a chunk boundary", + ))); + } + + traversal_length -= layer_height; + relevant_layer_heights.push(layer_height); + + if traversal_length == 0 { + break; + } + } + + // take layer_height instructions and determine the updated chunk id + let mut start_index = 0; + for layer_height in relevant_layer_heights { + let end_index = start_index + layer_height; + let subset_instructions = &traversal_instruction[start_index..end_index]; + + // offset multiplier determines what subchunk we are on based on the given + // instruction offset multiplier just converts the binary instruction to + // decimal, taking left as 0 and right as 0 i.e [left, left, left] = 0 + // means we are at subchunk 0 + let mut offset_multiplier = 0; + for (i, instruction) in subset_instructions.iter().enumerate() { + offset_multiplier += 2_usize.pow((subset_instructions.len() - i - 1) as u32) + * (1 - *instruction as usize); + } + + if chunk_count % 2 != 0 { + // remove the current chunk from the chunk count + chunk_count -= 1; + } + + chunk_count /= exit_node_count(layer_height); + + current_chunk_id = current_chunk_id + offset_multiplier * chunk_count + 1; + + start_index = end_index; + } + + Ok(current_chunk_id) +} + +/// Determine the chunk id given the traversal instruction and the max height of +/// the tree. This can recover from traversal instructions not pointing to a +/// chunk boundary, in such a case, it backtracks until it hits a chunk +/// boundary. +pub fn chunk_id_from_traversal_instruction_with_recovery( + traversal_instruction: &[bool], + height: usize, +) -> Result { + let chunk_id_result = chunk_id_from_traversal_instruction(traversal_instruction, height); + if chunk_id_result.is_err() { + return chunk_id_from_traversal_instruction_with_recovery( + &traversal_instruction[0..traversal_instruction.len() - 1], + height, + ); + } + chunk_id_result +} + +/// Generate instruction for traversing to a given chunk in a binary tree, +/// returns string representation +pub fn generate_traversal_instruction_as_string( + height: usize, + chunk_id: usize, +) -> Result { + let instruction = generate_traversal_instruction(height, chunk_id)?; + Ok(traversal_instruction_as_string(&instruction)) +} + +/// Convert traversal instruction to byte string +/// 1 represents left (true) +/// 0 represents right (false) +pub fn traversal_instruction_as_string(instruction: &[bool]) -> String { + instruction + .iter() + .map(|v| if *v { "1" } else { "0" }) + .collect() +} + +/// Converts a string that represents a traversal instruction +/// to a vec of bool, true = left and false = right +pub fn string_as_traversal_instruction(instruction_string: &str) -> Result, Error> { + instruction_string + .chars() + .map(|char| match char { + '1' => Ok(LEFT), + '0' => Ok(RIGHT), + _ => Err(Error::ChunkingError(ChunkError::BadTraversalInstruction( + "failed to parse instruction string", + ))), + }) + .collect() +} + +pub fn write_to_vec(dest: &mut W, value: &[u8]) -> Result<(), Error> { + dest.write_all(value) + .map_err(|_e| InternalError("failed to write to vector")) +} + +#[cfg(test)] +mod test { + + use super::*; + use crate::proofs::chunk::chunk::{LEFT, RIGHT}; + + #[test] + fn test_chunk_height_per_layer() { + let layer_heights = chunk_height_per_layer(10); + assert_eq!(layer_heights.iter().sum::(), 10); + assert_eq!(layer_heights, [3, 3, 2, 2]); + + let layer_heights = chunk_height_per_layer(45); + assert_eq!(layer_heights.iter().sum::(), 45); + assert_eq!(layer_heights, [3; 15]); + + let layer_heights = chunk_height_per_layer(2); + assert_eq!(layer_heights.iter().sum::(), 2); + assert_eq!(layer_heights, [2]); + + // height less than 2 + let layer_heights = chunk_height_per_layer(1); + assert_eq!(layer_heights.iter().sum::(), 2); + assert_eq!(layer_heights, [2]); + + let layer_heights = chunk_height_per_layer(0); + assert_eq!(layer_heights.iter().sum::(), 0); + assert_eq!(layer_heights, Vec::::new()); + } + + #[test] + fn test_exit_node_count() { + // tree with just one node has 2 exit nodes + assert_eq!(exit_node_count(1), 2); + + // tree with height 2 has 4 exit nodes + assert_eq!(exit_node_count(2), 4); + + // tree with height 6 has 64 exit nodes + assert_eq!(exit_node_count(6), 64); + } + + #[test] + fn test_number_of_chunks() { + // given a chunk of height less than 3 chunk count should be 1 + assert_eq!(number_of_chunks(1), 1); + assert_eq!(number_of_chunks(2), 1); + + // tree with height 4 should have 5 chunks + // we split the tree into 2 layers of chunk height 2 each + // first layer contains just one chunk (1), but has 4 exit nodes + // hence total chunk count = 1 + 4 = 5 + assert_eq!(number_of_chunks(4), 5); + + // tree with height 6 should have 9 chunks + // will be split into two layers of chunk height 3 = [3,3] + // first chunk takes 1, has 2^3 = 8 exit nodes + // total chunks = 1 + 8 = 9 + assert_eq!(number_of_chunks(6), 9); + + // tree with height 10 should have 341 chunks + // will be split into 5 layers = [3, 3, 2, 2] + // first layer has just 1 chunk, exit nodes = 2^3 = 8 + // second layer has 4 chunks, exit nodes = 2^3 * 8 = 64 + // third layer has 16 chunks, exit nodes = 2^2 * 64 = 256 + // fourth layer has 256 chunks + // total chunks = 1 + 8 + 64 + 256 = 329 chunks + assert_eq!(number_of_chunks(10), 329); + } + + #[test] + fn test_number_of_chunks_under_chunk_id() { + // tree with height less than 3 should have just 1 chunk + assert_eq!(number_of_chunks_under_chunk_id(1, 1).unwrap(), 1); + assert_eq!(number_of_chunks_under_chunk_id(2, 1).unwrap(), 1); + + // asking for chunk out of bounds should return error + assert!(number_of_chunks_under_chunk_id(1, 3).is_err()); + + // tree with height 4 should have 5 chunks at chunk id 1 + // but 1 chunk at id 2 - 5 + assert_eq!(number_of_chunks_under_chunk_id(4, 1).unwrap(), 5); + assert_eq!(number_of_chunks_under_chunk_id(4, 2).unwrap(), 1); + assert_eq!(number_of_chunks_under_chunk_id(4, 3).unwrap(), 1); + assert_eq!(number_of_chunks_under_chunk_id(4, 4).unwrap(), 1); + assert_eq!(number_of_chunks_under_chunk_id(4, 5).unwrap(), 1); + + // tree with height 10 should have 329 chunks + // layer_heights = [3, 3, 2, 2] + // chunk_id 1 = 329 + // chunk_id 2 = 41 i.e (329 - 1) / 2^3 + // chunk_id 3 = 5 i.e (41 - 1) / 2^3 + // chunk_id 4 = 1 i.e (5 - 1) / 2^2 + // chunk_id 5 = 1 on the same layer as 4 + // chunk_id 43 = 41 as chunk 43 should wrap back to the same layer as chunk_id 2 + // chunk_id 44 = mirrors chunk_id 3 + // chunk_id 45 = mirrors chunk_id 4 + // chunk_id 46 = mirrors chunk_id 5 + assert_eq!(number_of_chunks_under_chunk_id(10, 1).unwrap(), 329); + assert_eq!(number_of_chunks_under_chunk_id(10, 2).unwrap(), 41); + assert_eq!(number_of_chunks_under_chunk_id(10, 3).unwrap(), 5); + assert_eq!(number_of_chunks_under_chunk_id(10, 4).unwrap(), 1); + assert_eq!(number_of_chunks_under_chunk_id(10, 5).unwrap(), 1); + assert_eq!(number_of_chunks_under_chunk_id(10, 43).unwrap(), 41); + assert_eq!(number_of_chunks_under_chunk_id(10, 44).unwrap(), 5); + assert_eq!(number_of_chunks_under_chunk_id(10, 45).unwrap(), 1); + assert_eq!(number_of_chunks_under_chunk_id(10, 46).unwrap(), 1); + } + + #[test] + fn test_traversal_instruction_generation() { + // 3 + // / \ + // 1 7 + // / \ / \ + // 0 2 5 8 + // / \ \ + // 4 6 9 + // height: 4 + // layer_height: 3, 3 + // 3 + // / \ + // 1 7 + // / \ / \ + // 0 2 5 8 + // ............................ + // / \ \ + // 4 6 9 + // 5 chunks + // chunk 1 entry - 3 + // chunk 2 entry - 0 + // chunk 3 entry - 2 + // chunk 4 entry - 5 + // chunk 5 entry - 8 + + // chunk 1 entry - 3 is at the top of the tree so empty instruction set + let instruction = + generate_traversal_instruction(4, 1).expect("should generate traversal instruction"); + let empty_instruction: &[bool] = &[]; + assert_eq!(instruction, empty_instruction); + + // chunk 2 entry - 0 + // go left twice from root i.e 3 left -> 1 left -> 0 + let instruction = + generate_traversal_instruction(4, 2).expect("should generate traversal instruction"); + assert_eq!(instruction, &[LEFT, LEFT]); + + // chunk 3 entry - 2 + // go left then right from root i.e 3 left -> 1 right -> 2 + let instruction = + generate_traversal_instruction(4, 3).expect("should generate traversal instruction"); + assert_eq!(instruction, &[LEFT, RIGHT]); + + // chunk 4 entry - 5 + // go right then left i.e 3 right -> 7 left -> 5 + let instruction = + generate_traversal_instruction(4, 4).expect("should generate traversal instruction"); + assert_eq!(instruction, &[RIGHT, LEFT]); + + // chunk 5 entry - 8 + // go right twice i.e 3 right -> 7 right -> 8 + let instruction = + generate_traversal_instruction(4, 5).expect("should generate traversal instruction"); + assert_eq!(instruction, &[RIGHT, RIGHT]); + + // out of bound tests + assert!(generate_traversal_instruction(4, 6).is_err()); + assert!(generate_traversal_instruction(4, 0).is_err()); + } + + #[test] + fn test_chunk_height() { + // tree of height 6 + // all chunks have the same height + // since layer height = [3,3] + // we have 9 chunks in a tree of this height + for i in 1..=9 { + assert_eq!(chunk_height(6, i).unwrap(), 3); + } + + // tree of height 5 + // layer_height = [3, 2] + // we have 9 chunks, just the first chunk is of height 3 + // the rest are of height 2 + assert_eq!(chunk_height(5, 1).unwrap(), 3); + for i in 2..=9 { + assert_eq!(chunk_height(5, i).unwrap(), 2); + } + + // tree of height 10 + // layer_height = [3, 3, 2, 2] + // just going to check chunk 1 - 5 + assert_eq!(chunk_height(10, 1).unwrap(), 3); + assert_eq!(chunk_height(10, 2).unwrap(), 3); + assert_eq!(chunk_height(10, 3).unwrap(), 2); + assert_eq!(chunk_height(10, 4).unwrap(), 2); + assert_eq!(chunk_height(10, 5).unwrap(), 2); + } + + #[test] + fn test_traversal_instruction_as_string() { + assert_eq!(traversal_instruction_as_string(&vec![]), ""); + assert_eq!(traversal_instruction_as_string(&vec![LEFT]), "1"); + assert_eq!(traversal_instruction_as_string(&vec![RIGHT]), "0"); + assert_eq!( + traversal_instruction_as_string(&vec![RIGHT, LEFT, LEFT, RIGHT]), + "0110" + ); + } + + #[test] + fn test_instruction_string_to_traversal_instruction() { + assert_eq!(string_as_traversal_instruction("1").unwrap(), vec![LEFT]); + assert_eq!(string_as_traversal_instruction("0").unwrap(), vec![RIGHT]); + assert_eq!( + string_as_traversal_instruction("001").unwrap(), + vec![RIGHT, RIGHT, LEFT] + ); + assert!(string_as_traversal_instruction("002").is_err()); + assert_eq!( + string_as_traversal_instruction("").unwrap(), + Vec::::new() + ); + } + + #[test] + fn test_chunk_id_from_traversal_instruction() { + // tree of height 4 + let traversal_instruction = generate_traversal_instruction(4, 1).unwrap(); + assert_eq!( + chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 4).unwrap(), + 1 + ); + let traversal_instruction = generate_traversal_instruction(4, 2).unwrap(); + assert_eq!( + chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 4).unwrap(), + 2 + ); + let traversal_instruction = generate_traversal_instruction(4, 3).unwrap(); + assert_eq!( + chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 4).unwrap(), + 3 + ); + let traversal_instruction = generate_traversal_instruction(4, 4).unwrap(); + assert_eq!( + chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 4).unwrap(), + 4 + ); + + // tree of height 6 + let traversal_instruction = generate_traversal_instruction(6, 1).unwrap(); + assert_eq!( + chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 1 + ); + let traversal_instruction = generate_traversal_instruction(6, 2).unwrap(); + assert_eq!( + chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 2 + ); + let traversal_instruction = generate_traversal_instruction(6, 3).unwrap(); + assert_eq!( + chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 3 + ); + let traversal_instruction = generate_traversal_instruction(6, 4).unwrap(); + assert_eq!( + chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 4 + ); + let traversal_instruction = generate_traversal_instruction(6, 5).unwrap(); + assert_eq!( + chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 5 + ); + let traversal_instruction = generate_traversal_instruction(6, 6).unwrap(); + assert_eq!( + chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 6 + ); + let traversal_instruction = generate_traversal_instruction(6, 7).unwrap(); + assert_eq!( + chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 7 + ); + let traversal_instruction = generate_traversal_instruction(6, 8).unwrap(); + assert_eq!( + chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 8 + ); + let traversal_instruction = generate_traversal_instruction(6, 9).unwrap(); + assert_eq!( + chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + 9 + ); + } + + #[test] + fn test_chunk_id_from_traversal_instruction_with_recovery() { + // tree of height 5 + // layer heights = [3, 2] + // first chunk boundary is at instruction len 0 e.g. [] + // second chunk boundary is at instruction len 3 e.g. [left, left, left] + // anything outside of this should return an error with regular chunk_id + // function with recovery we expect this to backtrack to the last chunk + // boundary e.g. [left] should backtrack to [] + // [left, left, right, left] should backtrack to [left, left, right] + assert!(chunk_id_from_traversal_instruction(&[LEFT], 5).is_err()); + assert_eq!( + chunk_id_from_traversal_instruction_with_recovery(&[LEFT], 5).unwrap(), + 1 + ); + assert_eq!( + chunk_id_from_traversal_instruction_with_recovery(&[LEFT, LEFT], 5).unwrap(), + 1 + ); + assert_eq!( + chunk_id_from_traversal_instruction_with_recovery(&[LEFT, LEFT, RIGHT], 5).unwrap(), + 3 + ); + assert_eq!( + chunk_id_from_traversal_instruction_with_recovery(&[LEFT, LEFT, RIGHT, LEFT], 5) + .unwrap(), + 3 + ); + assert_eq!( + chunk_id_from_traversal_instruction_with_recovery(&[LEFT; 50], 5).unwrap(), + 2 + ); + } +} diff --git a/merk/src/proofs/tree.rs b/merk/src/proofs/tree.rs index 819fd43b..b3bf9cf1 100644 --- a/merk/src/proofs/tree.rs +++ b/merk/src/proofs/tree.rs @@ -43,6 +43,12 @@ use super::{Node, Op}; use crate::tree::{combine_hash, kv_digest_to_kv_hash, kv_hash, node_hash, value_hash, NULL_HASH}; #[cfg(any(feature = "full", feature = "verify"))] use crate::{error::Error, tree::CryptoHash}; +#[cfg(feature = "full")] +use crate::{ + proofs::chunk::chunk::{LEFT, RIGHT}, + Link, + TreeFeatureType::SummedMerkNode, +}; #[cfg(any(feature = "full", feature = "verify"))] /// Contains a tree's child node and its hash. The hash can always be assumed to @@ -55,6 +61,36 @@ pub struct Child { pub hash: CryptoHash, } +impl Child { + #[cfg(feature = "full")] + pub fn as_link(&self) -> Link { + let (key, sum) = match &self.tree.node { + Node::KV(key, _) | Node::KVValueHash(key, ..) => (key.as_slice(), None), + Node::KVValueHashFeatureType(key, _, _, feature_type) => { + let sum_value = match feature_type { + SummedMerkNode(sum) => Some(*sum), + _ => None, + }; + (key.as_slice(), sum_value) + } + // for the connection between the trunk and leaf chunks, we don't + // have the child key so we must first write in an empty one. once + // the leaf gets verified, we can write in this key to its parent + _ => (&[] as &[u8], None), + }; + + Link::Reference { + hash: self.hash, + sum, + child_heights: ( + self.tree.child_heights.0 as u8, + self.tree.child_heights.1 as u8, + ), + key: key.to_vec(), + } + } +} + #[cfg(any(feature = "full", feature = "verify"))] /// A binary tree data structure used to represent a select subset of a tree /// when verifying Merkle proofs. @@ -68,6 +104,8 @@ pub struct Tree { pub right: Option, /// Height pub height: usize, + /// Child Heights + pub child_heights: (usize, usize), } #[cfg(any(feature = "full", feature = "verify"))] @@ -79,6 +117,7 @@ impl From for Tree { left: None, right: None, height: 1, + child_heights: (0, 0), } } } @@ -167,6 +206,42 @@ impl Tree { Ok(()) } + #[cfg(feature = "full")] + /// Does an in-order traversal over references to all the nodes in the tree, + /// calling `visit_node` for each with the current traversal path. + pub fn visit_refs_track_traversal_and_parent< + F: FnMut(&Self, &mut Vec, Option<&[u8]>) -> Result<(), Error>, + >( + &self, + base_traversal_instruction: &mut Vec, + parent_key: Option<&[u8]>, + visit_node: &mut F, + ) -> Result<(), Error> { + if let Some(child) = &self.left { + base_traversal_instruction.push(LEFT); + child.tree.visit_refs_track_traversal_and_parent( + base_traversal_instruction, + Some(self.key()), + visit_node, + )?; + base_traversal_instruction.pop(); + } + + visit_node(self, base_traversal_instruction, parent_key)?; + + if let Some(child) = &self.right { + base_traversal_instruction.push(RIGHT); + child.tree.visit_refs_track_traversal_and_parent( + base_traversal_instruction, + Some(self.key()), + visit_node, + )?; + base_traversal_instruction.pop(); + } + + Ok(()) + } + /// Returns an immutable reference to the child on the given side, if any. #[cfg(any(feature = "full", feature = "verify"))] pub const fn child(&self, left: bool) -> Option<&Child> { @@ -202,6 +277,13 @@ impl Tree { self.height = self.height.max(child.height + 1); + // update child height + if left { + self.child_heights.0 = child.height; + } else { + self.child_heights.1 = child.height; + } + let hash = child.hash().unwrap_add_cost(&mut cost); let tree = Box::new(child); *self.child_mut(left) = Some(Child { tree, hash }); @@ -238,13 +320,24 @@ impl Tree { _ => panic!("Expected node to be type KV"), } } + + #[cfg(feature = "full")] + pub(crate) fn sum(&self) -> Option { + match self.node { + Node::KVValueHashFeatureType(.., feature_type) => match feature_type { + SummedMerkNode(sum) => Some(sum), + _ => None, + }, + _ => panic!("Expected node to be type KVValueHashFeatureType"), + } + } } #[cfg(feature = "full")] /// `LayerIter` iterates over the nodes in a `Tree` at a given depth. Nodes are /// visited in order. pub struct LayerIter<'a> { - stack: Vec<&'a Tree>, + stack: Vec<(&'a Tree, usize)>, depth: usize, } @@ -257,25 +350,9 @@ impl<'a> LayerIter<'a> { depth, }; - iter.traverse_to_start(tree, depth); + iter.stack.push((tree, 0)); iter } - - /// Builds up the stack by traversing through left children to the desired - /// depth. - fn traverse_to_start(&mut self, tree: &'a Tree, remaining_depth: usize) { - self.stack.push(tree); - - if remaining_depth == 0 { - return; - } - - if let Some(child) = tree.child(true) { - self.traverse_to_start(&child.tree, remaining_depth - 1) - } else { - panic!("Could not traverse to given layer") - } - } } #[cfg(feature = "full")] @@ -283,32 +360,20 @@ impl<'a> Iterator for LayerIter<'a> { type Item = &'a Tree; fn next(&mut self) -> Option { - let item = self.stack.pop(); - let mut popped = item; - - loop { - if self.stack.is_empty() { - return item; - } - - let parent = self.stack.last().unwrap(); - let left_child = parent.child(true).unwrap(); - let right_child = parent.child(false).unwrap(); - - if left_child.tree.as_ref() == popped.unwrap() { - self.stack.push(&right_child.tree); - - while self.stack.len() - 1 < self.depth { - let parent = self.stack.last().unwrap(); - let left_child = parent.child(true).unwrap(); - self.stack.push(&left_child.tree); + while let Some((item, item_depth)) = self.stack.pop() { + if item_depth != self.depth { + if let Some(right_child) = item.child(false) { + self.stack.push((&right_child.tree, item_depth + 1)) + } + if let Some(left_child) = item.child(true) { + self.stack.push((&left_child.tree, item_depth + 1)) } - - return item; } else { - popped = self.stack.pop(); + return Some(item); } } + + None } } @@ -471,7 +536,19 @@ where .wrap_with_cost(cost); } - Ok(stack.pop().unwrap()).wrap_with_cost(cost) + let tree = stack.pop().unwrap(); + + if tree.child_heights.0.max(tree.child_heights.1) + - tree.child_heights.0.min(tree.child_heights.1) + > 1 + { + return Err(Error::InvalidProofError( + "Expected proof to result in a valid avl tree".to_string(), + )) + .wrap_with_cost(cost); + } + + Ok(tree).wrap_with_cost(cost) } #[cfg(feature = "full")] @@ -555,4 +632,104 @@ mod test { } assert!(iter.next().is_none()); } + + #[test] + fn execute_non_avl_tree() { + let non_avl_tree_proof = vec![ + Op::Push(Node::KV(vec![1], vec![1])), + Op::Push(Node::KV(vec![2], vec![2])), + Op::Parent, + Op::Push(Node::KV(vec![3], vec![3])), + Op::Parent, + ]; + let execution_result = + execute(non_avl_tree_proof.into_iter().map(Ok), false, |_| Ok(())).unwrap(); + assert!(execution_result.is_err()); + } + + #[test] + fn child_to_link() { + let basic_merk_tree = vec![ + Op::Push(Node::KV(vec![1], vec![1])), + Op::Push(Node::KV(vec![2], vec![2])), + Op::Parent, + Op::Push(Node::KV(vec![3], vec![3])), + Op::Child, + ]; + let tree = execute(basic_merk_tree.into_iter().map(Ok), false, |_| Ok(())) + .unwrap() + .unwrap(); + + let left_link = tree.left.as_ref().unwrap().as_link(); + let right_link = tree.right.as_ref().unwrap().as_link(); + + assert_eq!( + left_link, + Link::Reference { + hash: tree.left.as_ref().map(|node| node.hash).unwrap(), + sum: None, + child_heights: (0, 0), + key: vec![1] + } + ); + + assert_eq!( + right_link, + Link::Reference { + hash: tree.right.as_ref().map(|node| node.hash).unwrap(), + sum: None, + child_heights: (0, 0), + key: vec![3] + } + ); + + let sum_merk_tree = vec![ + Op::Push(Node::KVValueHashFeatureType( + vec![1], + vec![1], + [0; 32], + SummedMerkNode(3), + )), + Op::Push(Node::KVValueHashFeatureType( + vec![2], + vec![2], + [0; 32], + SummedMerkNode(1), + )), + Op::Parent, + Op::Push(Node::KVValueHashFeatureType( + vec![3], + vec![3], + [0; 32], + SummedMerkNode(1), + )), + Op::Child, + ]; + let tree = execute(sum_merk_tree.into_iter().map(Ok), false, |_| Ok(())) + .unwrap() + .unwrap(); + + let left_link = tree.left.as_ref().unwrap().as_link(); + let right_link = tree.right.as_ref().unwrap().as_link(); + + assert_eq!( + left_link, + Link::Reference { + hash: tree.left.as_ref().map(|node| node.hash).unwrap(), + sum: Some(3), + child_heights: (0, 0), + key: vec![1] + } + ); + + assert_eq!( + right_link, + Link::Reference { + hash: tree.right.as_ref().map(|node| node.hash).unwrap(), + sum: Some(1), + child_heights: (0, 0), + key: vec![3] + } + ); + } } diff --git a/merk/src/test_utils/mod.rs b/merk/src/test_utils/mod.rs index 6abe167e..49a492e2 100644 --- a/merk/src/test_utils/mod.rs +++ b/merk/src/test_utils/mod.rs @@ -164,6 +164,7 @@ pub fn apply_to_memonly( }) .unwrap() .expect("commit failed"); + println!("{:?}", &tree); assert_tree_invariants(&tree); tree }) @@ -260,7 +261,15 @@ pub fn make_tree_rand( /// Create tree with initial fixed values and apply `node count` Put ops using /// sequential keys using memory only +/// starting tree node is [0; 20] pub fn make_tree_seq(node_count: u64) -> TreeNode { + make_tree_seq_with_start_key(node_count, [0; 20].to_vec()) +} + +/// Create tree with initial fixed values and apply `node count` Put ops using +/// sequential keys using memory only +/// requires a starting key vector +pub fn make_tree_seq_with_start_key(node_count: u64, start_key: Vec) -> TreeNode { let batch_size = if node_count >= 10_000 { assert_eq!(node_count % 10_000, 0); 10_000 @@ -269,7 +278,8 @@ pub fn make_tree_seq(node_count: u64) -> TreeNode { }; let value = vec![123; 60]; - let mut tree = TreeNode::new(vec![0; 20], value, None, BasicMerkNode).unwrap(); + + let mut tree = TreeNode::new(start_key, value, None, BasicMerkNode).unwrap(); let batch_count = node_count / batch_size; for i in 0..batch_count { @@ -279,7 +289,6 @@ pub fn make_tree_seq(node_count: u64) -> TreeNode { tree } - /// Shortcut to open a Merk with a provided storage and batch pub fn empty_path_merk<'db, S>( storage: &'db S, diff --git a/merk/src/tree/link.rs b/merk/src/tree/link.rs index ab26159b..fa0d1563 100644 --- a/merk/src/tree/link.rs +++ b/merk/src/tree/link.rs @@ -46,7 +46,7 @@ use crate::HASH_LENGTH_U32; #[cfg(feature = "full")] /// Represents a reference to a child tree node. Links may or may not contain /// the child's `Tree` instance (storing its key if not). -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub enum Link { /// Represents a child tree node which has been pruned from memory, only /// retaining a reference to it (its key). The child node can always be diff --git a/merk/src/tree/mod.rs b/merk/src/tree/mod.rs index cb732b56..401b8722 100644 --- a/merk/src/tree/mod.rs +++ b/merk/src/tree/mod.rs @@ -37,7 +37,7 @@ mod encoding; #[cfg(feature = "full")] mod fuzz_tests; #[cfg(any(feature = "full", feature = "verify"))] -mod hash; +pub mod hash; #[cfg(feature = "full")] mod iter; #[cfg(feature = "full")] @@ -102,7 +102,7 @@ use crate::{error::Error, Error::Overflow}; #[cfg(feature = "full")] /// The fields of the `Tree` type, stored on the heap. -#[derive(Clone, Encode, Decode, Debug)] +#[derive(Clone, Encode, Decode, Debug, PartialEq)] pub struct TreeNodeInner { pub(crate) left: Option, pub(crate) right: Option, @@ -141,7 +141,7 @@ impl Terminated for Box {} /// Trees' inner fields are stored on the heap so that nodes can recursively /// link to each other, and so we can detach nodes from their parents, then /// reattach without allocating or freeing heap memory. -#[derive(Clone)] +#[derive(Clone, PartialEq)] pub struct TreeNode { pub(crate) inner: Box, pub(crate) old_value: Option>, diff --git a/merk/src/visualize.rs b/merk/src/visualize.rs index 4b3b2fb7..0235f92d 100644 --- a/merk/src/visualize.rs +++ b/merk/src/visualize.rs @@ -87,9 +87,9 @@ impl<'a, 'db, S: StorageContext<'db>, T: Visualize, F: Fn(&[u8]) -> T + Copy> Vi impl<'a, T: Visualize, F: Fn(&[u8]) -> T + Copy> Visualize for VisualizableTree<'a, F> { fn visualize(&self, mut drawer: Drawer) -> Result> { drawer.write(b"[key: ")?; - drawer = self.tree.inner.key_as_slice().visualize(drawer)?; + drawer = self.tree.inner.kv.key_as_ref().visualize(drawer)?; drawer.write(b", value: ")?; - drawer = (self.deserialize_fn)(self.tree.inner.value_as_slice()).visualize(drawer)?; + drawer = (self.deserialize_fn)(self.tree.inner.kv.value_as_slice()).visualize(drawer)?; drawer.down(); drawer.write(b"\n")?; diff --git a/storage/src/rocksdb_storage.rs b/storage/src/rocksdb_storage.rs index 90d0cc21..14c4df5a 100644 --- a/storage/src/rocksdb_storage.rs +++ b/storage/src/rocksdb_storage.rs @@ -28,7 +28,7 @@ //! GroveDB storage layer implemented over RocksDB backend. mod storage; -mod storage_context; +pub mod storage_context; pub mod test_utils; #[cfg(test)] mod tests; diff --git a/storage/src/rocksdb_storage/storage_context.rs b/storage/src/rocksdb_storage/storage_context.rs index 7481fc13..0611d51c 100644 --- a/storage/src/rocksdb_storage/storage_context.rs +++ b/storage/src/rocksdb_storage/storage_context.rs @@ -29,7 +29,7 @@ //! Implementation of prefixed storage context. mod batch; -mod context_immediate; +pub mod context_immediate; mod context_no_tx; mod context_tx; mod raw_iterator; diff --git a/tutorials/Cargo.toml b/tutorials/Cargo.toml index ec220b44..409a1c64 100644 --- a/tutorials/Cargo.toml +++ b/tutorials/Cargo.toml @@ -7,9 +7,14 @@ default-run = "tutorials" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -grovedb = { git = "https://github.com/dashpay/grovedb.git" } -path = { path = "../path" } +#grovedb = { git = "https://github.com/dashpay/grovedb.git" } +grovedb = { path = "../grovedb" } +grovedb-merk = { path = "../merk" } +grovedb-storage = { path = "../storage" } +grovedb-visualize = { path = "../visualize" } +grovedb-path = { path = "../path" } rand = "0.8.5" +hex = "0.4" [workspace] diff --git a/tutorials/src/bin/replication.rs b/tutorials/src/bin/replication.rs new file mode 100644 index 00000000..fc9c058c --- /dev/null +++ b/tutorials/src/bin/replication.rs @@ -0,0 +1,244 @@ +use std::collections::VecDeque; +use std::path::Path; +use grovedb::{operations::insert::InsertOptions, Element, GroveDb, PathQuery, Query, Transaction, replication::StateSyncInfo}; +use grovedb::reference_path::ReferencePathType; +use rand::{distributions::Alphanumeric, Rng, }; +use grovedb::element::SumValue; +use grovedb_path::{SubtreePath}; + +const MAIN_ΚΕΥ: &[u8] = b"key_main"; +const MAIN_ΚΕΥ_EMPTY: &[u8] = b"key_main_empty"; + +const KEY_INT_0: &[u8] = b"key_int_0"; +const KEY_INT_REF_0: &[u8] = b"key_int_ref_0"; +const KEY_INT_A: &[u8] = b"key_sum_0"; +const ROOT_PATH: &[&[u8]] = &[]; + +// Allow insertions to overwrite trees +// This is necessary so the tutorial can be rerun easily +const INSERT_OPTIONS: Option = Some(InsertOptions { + validate_insertion_does_not_override: false, + validate_insertion_does_not_override_tree: false, + base_root_storage_is_free: true, +}); + +fn populate_db(grovedb_path: String) -> GroveDb { + let db = GroveDb::open(grovedb_path).unwrap(); + + insert_empty_tree_db(&db, ROOT_PATH, MAIN_ΚΕΥ); + insert_empty_tree_db(&db, ROOT_PATH, MAIN_ΚΕΥ_EMPTY); + insert_empty_tree_db(&db, &[MAIN_ΚΕΥ], KEY_INT_0); + + let tx = db.start_transaction(); + let batch_size = 100; + for i in 0..=10 { + insert_range_values_db(&db, &[MAIN_ΚΕΥ, KEY_INT_0], i * batch_size, i * batch_size + batch_size - 1, &tx); + } + let _ = db.commit_transaction(tx); + + insert_empty_tree_db(&db, &[MAIN_ΚΕΥ], KEY_INT_REF_0); + + let tx_2 = db.start_transaction(); + insert_range_ref_double_values_db(&db, &[MAIN_ΚΕΥ, KEY_INT_REF_0], KEY_INT_0, 1, 50, &tx_2); + let _ = db.commit_transaction(tx_2); + + insert_empty_sum_tree_db(&db, &[MAIN_ΚΕΥ], KEY_INT_A); + + let tx_3 = db.start_transaction(); + insert_range_values_db(&db, &[MAIN_ΚΕΥ, KEY_INT_A], 1, 100, &tx_3); + insert_sum_element_db(&db, &[MAIN_ΚΕΥ, KEY_INT_A], 101, 150, &tx_3); + let _ = db.commit_transaction(tx_3); + db +} + +fn create_empty_db(grovedb_path: String) -> GroveDb { + let db = GroveDb::open(grovedb_path).unwrap(); + db +} + +fn main() { + let path_source = generate_random_path("../tutorial-storage/", "/db_0", 24); + let db_source = populate_db(path_source.clone()); + + let checkpoint_dir = path_source + "/checkpoint"; + let path_checkpoint = Path::new(checkpoint_dir.as_str()); + + db_source.create_checkpoint(&path_checkpoint).expect("cannot create checkpoint"); + let db_checkpoint_0 = GroveDb::open(path_checkpoint).expect("cannot open groveDB from checkpoint"); + + let path_destination = generate_random_path("../tutorial-storage/", "/db_copy", 24); + let db_destination = create_empty_db(path_destination.clone()); + + println!("\n######### root_hashes:"); + let root_hash_source = db_source.root_hash(None).unwrap().unwrap(); + println!("root_hash_source: {:?}", hex::encode(root_hash_source)); + let root_hash_checkpoint_0 = db_checkpoint_0.root_hash(None).unwrap().unwrap(); + println!("root_hash_checkpoint_0: {:?}", hex::encode(root_hash_checkpoint_0)); + let root_hash_destination = db_destination.root_hash(None).unwrap().unwrap(); + println!("root_hash_destination: {:?}", hex::encode(root_hash_destination)); + + println!("\n######### source_subtree_metadata of db_source"); + let subtrees_metadata_source = db_source.get_subtrees_metadata(None).unwrap(); + println!("{:?}", subtrees_metadata_source); + + println!("\n######### db_checkpoint_0 -> db_destination state sync"); + let state_info = db_destination.create_state_sync_info(); + let tx = db_destination.start_transaction(); + sync_db_demo(&db_checkpoint_0, &db_destination, state_info, &tx).unwrap(); + db_destination.commit_transaction(tx).unwrap().expect("expected to commit transaction"); + + println!("\n######### verify db_destination"); + let incorrect_hashes = db_destination.verify_grovedb(None).unwrap(); + if incorrect_hashes.len() > 0 { + println!("DB verification failed!"); + } + else { + println!("DB verification success"); + } + + println!("\n######### root_hashes:"); + let root_hash_source = db_source.root_hash(None).unwrap().unwrap(); + println!("root_hash_source: {:?}", hex::encode(root_hash_source)); + let root_hash_checkpoint_0 = db_checkpoint_0.root_hash(None).unwrap().unwrap(); + println!("root_hash_checkpoint_0: {:?}", hex::encode(root_hash_checkpoint_0)); + let root_hash_destination = db_destination.root_hash(None).unwrap().unwrap(); + println!("root_hash_destination: {:?}", hex::encode(root_hash_destination)); + + let query_path = &[MAIN_ΚΕΥ, KEY_INT_0]; + let query_key = (20487u32).to_be_bytes().to_vec(); + println!("\n######## Query on db_checkpoint_0:"); + query_db(&db_checkpoint_0, query_path, query_key.clone()); + println!("\n######## Query on db_destination:"); + query_db(&db_destination, query_path, query_key.clone()); + + return; + +} + +fn insert_empty_tree_db(db: &GroveDb, path: &[&[u8]], key: &[u8]) +{ + db.insert(path, key, Element::empty_tree(), INSERT_OPTIONS, None) + .unwrap() + .expect("successfully inserted tree"); +} +fn insert_range_values_db(db: &GroveDb, path: &[&[u8]], min_i: u32, max_i: u32, transaction: &Transaction) +{ + for i in min_i..=max_i { + let i_vec = i.to_be_bytes().to_vec(); + db.insert( + path, + &i_vec, + Element::new_item(i_vec.to_vec()), + INSERT_OPTIONS, + Some(&transaction), + ) + .unwrap() + .expect("successfully inserted values"); + } +} + +fn insert_range_ref_double_values_db(db: &GroveDb, path: &[&[u8]], ref_key: &[u8], min_i: u32, max_i: u32, transaction: &Transaction) +{ + for i in min_i..=max_i { + let i_vec = i.to_be_bytes().to_vec(); + let value = i * 2; + let value_vec = value.to_be_bytes().to_vec(); + db.insert( + path, + &i_vec, + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + MAIN_ΚΕΥ.to_vec(), + ref_key.to_vec(), + value_vec.to_vec() + ])), + INSERT_OPTIONS, + Some(&transaction), + ) + .unwrap() + .expect("successfully inserted values"); + } +} + +fn insert_empty_sum_tree_db(db: &GroveDb, path: &[&[u8]], key: &[u8]) +{ + db.insert(path, key, Element::empty_sum_tree(), INSERT_OPTIONS, None) + .unwrap() + .expect("successfully inserted tree"); +} +fn insert_sum_element_db(db: &GroveDb, path: &[&[u8]], min_i: u32, max_i: u32, transaction: &Transaction) +{ + for i in min_i..=max_i { + //let value : u32 = i; + let value = i as u64; + //let value: u64 = 1; + let i_vec = i.to_be_bytes().to_vec(); + db.insert( + path, + &i_vec, + Element::new_sum_item(value as SumValue), + INSERT_OPTIONS, + Some(&transaction), + ) + .unwrap() + .expect("successfully inserted values"); + } +} +fn generate_random_path(prefix: &str, suffix: &str, len: usize) -> String { + let random_string: String = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(len) + .map(char::from) + .collect(); + format!("{}{}{}", prefix, random_string, suffix) +} + +fn query_db(db: &GroveDb, path: &[&[u8]], key: Vec) { + let path_vec: Vec> = path.iter() + .map(|&slice| slice.to_vec()) + .collect(); + + let mut query = Query::new(); + query.insert_key(key); + + let path_query = PathQuery::new_unsized(path_vec, query.clone()); + + let (elements, _) = db + .query_item_value(&path_query, true, None) + .unwrap() + .expect("expected successful get_path_query"); + for e in elements.into_iter() { + println!(">> {:?}", e); + } + + let proof = db.prove_query(&path_query).unwrap().unwrap(); + // Get hash from query proof and print to terminal along with GroveDB root hash. + let (verify_hash, _) = GroveDb::verify_query(&proof, &path_query).unwrap(); + println!("verify_hash: {:?}", hex::encode(verify_hash)); + if verify_hash == db.root_hash(None).unwrap().unwrap() { + println!("Query verified"); + } else { println!("Verification FAILED"); }; +} + +fn sync_db_demo( + source_db: &GroveDb, + target_db: &GroveDb, + state_sync_info: StateSyncInfo, + target_tx: &Transaction, +) -> Result<(), grovedb::Error> { + let app_hash = source_db.root_hash(None).value.unwrap(); + let (chunk_ids, mut state_sync_info) = target_db.start_snapshot_syncing(state_sync_info, app_hash, target_tx)?; + + let mut chunk_queue : VecDeque> = VecDeque::new(); + + chunk_queue.extend(chunk_ids); + + while let Some(chunk_id) = chunk_queue.pop_front() { + let ops = source_db.fetch_chunk(chunk_id.as_slice(), None)?; + let (more_chunks, new_state_sync_info) = target_db.apply_chunk(state_sync_info, (chunk_id.as_slice(), ops), target_tx)?; + state_sync_info = new_state_sync_info; + chunk_queue.extend(more_chunks); + } + + Ok(()) +} + From cda80d5f3bc1645f7169b89f643fc9894bfe1a4c Mon Sep 17 00:00:00 2001 From: Odysseas Gabrielides Date: Wed, 8 May 2024 11:59:17 +0300 Subject: [PATCH 20/37] feat: chunk ids as vec bytes (#294) * chunk ids as a vec of bytes * more work * more work * more work * fmt --- grovedb/src/replication.rs | 56 ++++------- merk/src/merk/chunks.rs | 50 +++++----- merk/src/merk/mod.rs | 34 +++---- merk/src/merk/restore.rs | 144 ++++++++++++++++++----------- merk/src/proofs/chunk/util.rs | 141 +++++++++++++++------------- tutorials/src/bin/proofs.rs | 2 +- tutorials/src/bin/query-complex.rs | 2 +- tutorials/src/bin/query-simple.rs | 2 +- tutorials/src/bin/replication.rs | 2 +- 9 files changed, 231 insertions(+), 202 deletions(-) diff --git a/grovedb/src/replication.rs b/grovedb/src/replication.rs index 0484cfa1..e4f3547e 100644 --- a/grovedb/src/replication.rs +++ b/grovedb/src/replication.rs @@ -89,7 +89,7 @@ pub fn util_path_to_string(path: &[Vec]) -> Vec { // Splits the given global chunk id into [SUBTREE_PREFIX:CHUNK_ID] pub fn util_split_global_chunk_id( global_chunk_id: &[u8], -) -> Result<(crate::SubtreePrefix, String), Error> { +) -> Result<(crate::SubtreePrefix, Vec), Error> { let chunk_prefix_length: usize = 32; if global_chunk_id.len() < chunk_prefix_length { return Err(Error::CorruptedData( @@ -101,13 +101,7 @@ pub fn util_split_global_chunk_id( let mut array = [0u8; 32]; array.copy_from_slice(chunk_prefix); let chunk_prefix_key: crate::SubtreePrefix = array; - let str_chunk_id = String::from_utf8(chunk_id.to_vec()); - match str_chunk_id { - Ok(s) => Ok((chunk_prefix_key, s)), - Err(_) => Err(Error::CorruptedData( - "unable to convert chunk id to string".to_string(), - )), - } + Ok((chunk_prefix_key, chunk_id.to_vec())) } #[cfg(feature = "full")] @@ -244,20 +238,15 @@ impl GroveDb { let chunk_producer_res = ChunkProducer::new(&merk); match chunk_producer_res { - Ok(mut chunk_producer) => match std::str::from_utf8(chunk_id) { - Ok(chunk_id_str) => { - let chunk_res = chunk_producer.chunk(chunk_id_str); - match chunk_res { - Ok((chunk, _)) => Ok(chunk), - Err(_) => Err(Error::CorruptedData( - "Unable to create to load chunk".to_string(), - )), - } + Ok(mut chunk_producer) => { + let chunk_res = chunk_producer.chunk(chunk_id); + match chunk_res { + Ok((chunk, _)) => Ok(chunk), + Err(_) => Err(Error::CorruptedData( + "Unable to create to load chunk".to_string(), + )), } - Err(_) => Err(Error::CorruptedData( - "Unable to process chunk id".to_string(), - )), - }, + } Err(_) => Err(Error::CorruptedData( "Unable to create Chunk producer".to_string(), )), @@ -274,20 +263,15 @@ impl GroveDb { let chunk_producer_res = ChunkProducer::new(&merk); match chunk_producer_res { - Ok(mut chunk_producer) => match std::str::from_utf8(chunk_id) { - Ok(chunk_id_str) => { - let chunk_res = chunk_producer.chunk(chunk_id_str); - match chunk_res { - Ok((chunk, _)) => Ok(chunk), - Err(_) => Err(Error::CorruptedData( - "Unable to create to load chunk".to_string(), - )), - } + Ok(mut chunk_producer) => { + let chunk_res = chunk_producer.chunk(chunk_id); + match chunk_res { + Ok((chunk, _)) => Ok(chunk), + Err(_) => Err(Error::CorruptedData( + "Unable to create to load chunk".to_string(), + )), } - Err(_) => Err(Error::CorruptedData( - "Unable to process chunk id".to_string(), - )), - }, + } Err(_) => Err(Error::CorruptedData( "Unable to create Chunk producer".to_string(), )), @@ -380,12 +364,12 @@ impl GroveDb { } state_sync_info.pending_chunks.remove(global_chunk_id); if !chunk_data.is_empty() { - match restorer.process_chunk(chunk_id.to_string(), chunk_data) { + match restorer.process_chunk(&chunk_id, chunk_data) { Ok(next_chunk_ids) => { state_sync_info.num_processed_chunks += 1; for next_chunk_id in next_chunk_ids { let mut next_global_chunk_id = chunk_prefix.to_vec(); - next_global_chunk_id.extend(next_chunk_id.as_bytes().to_vec()); + next_global_chunk_id.extend(next_chunk_id.to_vec()); state_sync_info .pending_chunks .insert(next_global_chunk_id.clone()); diff --git a/merk/src/merk/chunks.rs b/merk/src/merk/chunks.rs index 8f840f91..f6b1b64c 100644 --- a/merk/src/merk/chunks.rs +++ b/merk/src/merk/chunks.rs @@ -38,10 +38,10 @@ use crate::{ chunk_op::ChunkOp, error::ChunkError, util::{ - chunk_height, chunk_id_from_traversal_instruction, - chunk_id_from_traversal_instruction_with_recovery, generate_traversal_instruction, - generate_traversal_instruction_as_string, number_of_chunks, - string_as_traversal_instruction, + chunk_height, chunk_index_from_traversal_instruction, + chunk_index_from_traversal_instruction_with_recovery, + generate_traversal_instruction, generate_traversal_instruction_as_vec_bytes, + number_of_chunks, vec_bytes_as_traversal_instruction, }, }, Node, Op, @@ -72,14 +72,14 @@ impl SubtreeChunk { #[derive(Debug)] pub struct MultiChunk { pub chunk: Vec, - pub next_index: Option, + pub next_index: Option>, pub remaining_limit: Option, } impl MultiChunk { pub fn new( chunk: Vec, - next_index: Option, + next_index: Option>, remaining_limit: Option, ) -> Self { Self { @@ -131,17 +131,17 @@ where } /// Returns the chunk at a given chunk id. - pub fn chunk(&mut self, chunk_id: &str) -> Result<(Vec, Option), Error> { - let traversal_instructions = string_as_traversal_instruction(chunk_id)?; - let chunk_index = chunk_id_from_traversal_instruction_with_recovery( + pub fn chunk(&mut self, chunk_id: &[u8]) -> Result<(Vec, Option>), Error> { + let traversal_instructions = vec_bytes_as_traversal_instruction(chunk_id)?; + let chunk_index = chunk_index_from_traversal_instruction_with_recovery( traversal_instructions.as_slice(), self.height, )?; let (chunk, next_index) = self.chunk_internal(chunk_index, traversal_instructions)?; - let index_string = next_index - .map(|index| generate_traversal_instruction_as_string(self.height, index)) + let next_chunk_id = next_index + .map(|index| generate_traversal_instruction_as_vec_bytes(self.height, index)) .transpose()?; - Ok((chunk, index_string)) + Ok((chunk, next_chunk_id)) } /// Returns the chunk at the given index @@ -186,12 +186,12 @@ where /// chunks or hit some optional limit pub fn multi_chunk_with_limit( &mut self, - chunk_id: &str, + chunk_id: &[u8], limit: Option, ) -> Result { // we want to convert the chunk id to the index - let chunk_index = string_as_traversal_instruction(chunk_id).and_then(|instruction| { - chunk_id_from_traversal_instruction(instruction.as_slice(), self.height) + let chunk_index = vec_bytes_as_traversal_instruction(chunk_id).and_then(|instruction| { + chunk_index_from_traversal_instruction(instruction.as_slice(), self.height) })?; self.multi_chunk_with_limit_and_index(chunk_index, limit) } @@ -267,11 +267,11 @@ where current_limit = subtree_multi_chunk.remaining_limit; } - let index_string = current_index - .map(|index| generate_traversal_instruction_as_string(self.height, index)) + let index_bytes = current_index + .map(|index| generate_traversal_instruction_as_vec_bytes(self.height, index)) .transpose()?; - Ok(MultiChunk::new(chunk, index_string, current_limit)) + Ok(MultiChunk::new(chunk, index_bytes, current_limit)) } /// Packs as many chunks as it can from a starting chunk index, into a @@ -371,7 +371,7 @@ where /// optimizing throughput compared to random access. // TODO: this is not better than random access, as we are not keeping state // that will make this more efficient, decide if this should be fixed or not - fn next_chunk(&mut self) -> Option, Option), Error>> { + fn next_chunk(&mut self) -> Option, Option>), Error>> { let max_index = number_of_chunks(self.height); if self.index > max_index { return None; @@ -383,7 +383,9 @@ where self.chunk_with_index(self.index) .and_then(|(chunk, chunk_index)| { chunk_index - .map(|index| generate_traversal_instruction_as_string(self.height, index)) + .map(|index| { + generate_traversal_instruction_as_vec_bytes(self.height, index) + }) .transpose() .map(|v| (chunk, v)) }), @@ -396,7 +398,7 @@ impl<'db, S> Iterator for ChunkProducer<'db, S> where S: StorageContext<'db>, { - type Item = Result<(Vec, Option), Error>; + type Item = Result<(Vec, Option>), Error>; fn next(&mut self) -> Option { self.next_chunk() @@ -424,7 +426,7 @@ mod test { tests::{traverse_get_kv_feature_type, traverse_get_node_hash}, LEFT, RIGHT, }, - util::traversal_instruction_as_string, + util::traversal_instruction_as_vec_bytes, }, tree::execute, Tree, @@ -1027,7 +1029,7 @@ mod test { // ensure that the remaining limit, next index and values given are correct // if limit is smaller than first chunk, we should get an error - let chunk_result = chunk_producer.multi_chunk_with_limit("", Some(5)); + let chunk_result = chunk_producer.multi_chunk_with_limit(vec![].as_slice(), Some(5)); assert!(matches!( chunk_result, Err(Error::ChunkingError(ChunkError::LimitTooSmall(..))) @@ -1052,7 +1054,7 @@ mod test { .expect("should generate chunk"); assert_eq!( chunk_result.next_index, - Some(traversal_instruction_as_string( + Some(traversal_instruction_as_vec_bytes( &generate_traversal_instruction(4, 4).unwrap() )) ); diff --git a/merk/src/merk/mod.rs b/merk/src/merk/mod.rs index 94b99add..0a0b805e 100644 --- a/merk/src/merk/mod.rs +++ b/merk/src/merk/mod.rs @@ -63,7 +63,7 @@ use crate::{ proofs::{ chunk::{ chunk::{LEFT, RIGHT}, - util::traversal_instruction_as_string, + util::traversal_instruction_as_vec_bytes, }, query::query_item::QueryItem, Query, @@ -556,11 +556,11 @@ where pub fn verify( &self, skip_sum_checks: bool, - ) -> (BTreeMap, BTreeMap>) { + ) -> (BTreeMap, CryptoHash>, BTreeMap, Vec>) { let tree = self.tree.take(); - let mut bad_link_map: BTreeMap = BTreeMap::new(); - let mut parent_keys: BTreeMap> = BTreeMap::new(); + let mut bad_link_map: BTreeMap, CryptoHash> = BTreeMap::new(); + let mut parent_keys: BTreeMap, Vec> = BTreeMap::new(); let mut root_traversal_instruction = vec![]; // TODO: remove clone @@ -581,8 +581,8 @@ where &self, tree: &TreeNode, traversal_instruction: &mut Vec, - bad_link_map: &mut BTreeMap, - parent_keys: &mut BTreeMap>, + bad_link_map: &mut BTreeMap, CryptoHash>, + parent_keys: &mut BTreeMap, Vec>, skip_sum_checks: bool, ) { if let Some(link) = tree.link(LEFT) { @@ -617,8 +617,8 @@ where link: &Link, parent_key: &[u8], traversal_instruction: &mut Vec, - bad_link_map: &mut BTreeMap, - parent_keys: &mut BTreeMap>, + bad_link_map: &mut BTreeMap, CryptoHash>, + parent_keys: &mut BTreeMap, Vec>, skip_sum_checks: bool, ) { let (hash, key, sum) = match link { @@ -639,7 +639,7 @@ where _ => todo!(), }; - let instruction_id = traversal_instruction_as_string(traversal_instruction); + let instruction_id = traversal_instruction_as_vec_bytes(traversal_instruction); let node = TreeNode::get( &self.storage, key, @@ -648,29 +648,29 @@ where .unwrap(); if node.is_err() { - bad_link_map.insert(instruction_id.clone(), hash); - parent_keys.insert(instruction_id, parent_key.to_vec()); + bad_link_map.insert(instruction_id.to_vec(), hash); + parent_keys.insert(instruction_id.to_vec(), parent_key.to_vec()); return; } let node = node.unwrap(); if node.is_none() { - bad_link_map.insert(instruction_id.clone(), hash); - parent_keys.insert(instruction_id, parent_key.to_vec()); + bad_link_map.insert(instruction_id.to_vec(), hash); + parent_keys.insert(instruction_id.to_vec(), parent_key.to_vec()); return; } let node = node.unwrap(); if node.hash().unwrap() != hash { - bad_link_map.insert(instruction_id.clone(), hash); - parent_keys.insert(instruction_id, parent_key.to_vec()); + bad_link_map.insert(instruction_id.to_vec(), hash); + parent_keys.insert(instruction_id.to_vec(), parent_key.to_vec()); return; } // Need to skip this when restoring a sum tree if !skip_sum_checks && node.sum().unwrap() != sum { - bad_link_map.insert(instruction_id.clone(), hash); - parent_keys.insert(instruction_id, parent_key.to_vec()); + bad_link_map.insert(instruction_id.to_vec(), hash); + parent_keys.insert(instruction_id.to_vec(), parent_key.to_vec()); return; } diff --git a/merk/src/merk/restore.rs b/merk/src/merk/restore.rs index e2439f5c..9e26b1af 100644 --- a/merk/src/merk/restore.rs +++ b/merk/src/merk/restore.rs @@ -41,7 +41,7 @@ use crate::{ chunk::{LEFT, RIGHT}, chunk_op::ChunkOp, error::{ChunkError, ChunkError::InternalError}, - util::{string_as_traversal_instruction, traversal_instruction_as_string}, + util::{traversal_instruction_as_vec_bytes, vec_bytes_as_traversal_instruction}, }, tree::{execute, Child, Tree as ProofTree}, Node, Op, @@ -57,10 +57,10 @@ use crate::{ /// already. pub struct Restorer { merk: Merk, - chunk_id_to_root_hash: BTreeMap, + chunk_id_to_root_hash: BTreeMap, CryptoHash>, parent_key_value_hash: Option, // this is used to keep track of parents whose links need to be rewritten - parent_keys: BTreeMap>, + parent_keys: BTreeMap, Vec>, } impl<'db, S: StorageContext<'db>> Restorer { @@ -72,7 +72,7 @@ impl<'db, S: StorageContext<'db>> Restorer { parent_key_value_hash: Option, ) -> Self { let mut chunk_id_to_root_hash = BTreeMap::new(); - chunk_id_to_root_hash.insert(traversal_instruction_as_string(&[]), expected_root_hash); + chunk_id_to_root_hash.insert(traversal_instruction_as_vec_bytes(&[]), expected_root_hash); Self { merk, chunk_id_to_root_hash, @@ -81,17 +81,16 @@ impl<'db, S: StorageContext<'db>> Restorer { } } - // TODO: consider converting chunk id to a vec /// Processes a chunk at some chunk id, returns the chunks id's of chunks /// that can be requested pub fn process_chunk( &mut self, - chunk_id: String, + chunk_id: &[u8], chunk: Vec, - ) -> Result, Error> { + ) -> Result>, Error> { let expected_root_hash = self .chunk_id_to_root_hash - .get(&chunk_id) + .get(chunk_id) .ok_or(Error::ChunkRestoringError(ChunkError::UnexpectedChunk))?; let mut parent_key_value_hash: Option = None; @@ -100,14 +99,14 @@ impl<'db, S: StorageContext<'db>> Restorer { } let chunk_tree = Self::verify_chunk(chunk, expected_root_hash, &parent_key_value_hash)?; - let mut root_traversal_instruction = string_as_traversal_instruction(&chunk_id)?; + let mut root_traversal_instruction = vec_bytes_as_traversal_instruction(chunk_id)?; if root_traversal_instruction.is_empty() { let _ = self.merk.set_base_root_key(Some(chunk_tree.key().to_vec())); } else { // every non root chunk has some associated parent with an placeholder link // here we update the placeholder link to represent the true data - self.rewrite_parent_link(&chunk_id, &root_traversal_instruction, &chunk_tree)?; + self.rewrite_parent_link(chunk_id, &root_traversal_instruction, &chunk_tree)?; } // next up, we need to write the chunk and build the map again @@ -115,7 +114,7 @@ impl<'db, S: StorageContext<'db>> Restorer { if chunk_write_result.is_ok() { // if we were able to successfully write the chunk, we can remove // the chunk expected root hash from our chunk id map - self.chunk_id_to_root_hash.remove(&chunk_id); + self.chunk_id_to_root_hash.remove(chunk_id); } chunk_write_result @@ -123,10 +122,13 @@ impl<'db, S: StorageContext<'db>> Restorer { /// Process multi chunks (space optimized chunk proofs that can contain /// multiple singular chunks) - pub fn process_multi_chunk(&mut self, multi_chunk: Vec) -> Result, Error> { + pub fn process_multi_chunk( + &mut self, + multi_chunk: Vec, + ) -> Result>, Error> { let mut expect_chunk_id = true; let mut chunk_ids = vec![]; - let mut current_chunk_id: String = "".to_string(); + let mut current_chunk_id = vec![]; for chunk_op in multi_chunk { if (matches!(chunk_op, ChunkOp::ChunkId(..)) && !expect_chunk_id) @@ -138,11 +140,11 @@ impl<'db, S: StorageContext<'db>> Restorer { } match chunk_op { ChunkOp::ChunkId(instructions) => { - current_chunk_id = traversal_instruction_as_string(&instructions); + current_chunk_id = traversal_instruction_as_vec_bytes(&instructions); } ChunkOp::Chunk(chunk) => { // TODO: remove clone - let next_chunk_ids = self.process_chunk(current_chunk_id.clone(), chunk)?; + let next_chunk_ids = self.process_chunk(¤t_chunk_id, chunk)?; chunk_ids.extend(next_chunk_ids); } } @@ -210,7 +212,7 @@ impl<'db, S: StorageContext<'db>> Restorer { &mut self, chunk_tree: ProofTree, traversal_instruction: &mut Vec, - ) -> Result, Error> { + ) -> Result>, Error> { // this contains all the elements we want to write to storage let mut batch = self.merk.storage.new_batch(); let mut new_chunk_ids = Vec::new(); @@ -242,9 +244,10 @@ impl<'db, S: StorageContext<'db>> Restorer { Node::Hash(hash) => { // the node hash points to the root of another chunk // we get the chunk id and add the hash to restorer state - let chunk_id = traversal_instruction_as_string(node_traversal_instruction); - new_chunk_ids.push(chunk_id.clone()); - self.chunk_id_to_root_hash.insert(chunk_id.clone(), *hash); + let chunk_id = + traversal_instruction_as_vec_bytes(node_traversal_instruction); + new_chunk_ids.push(chunk_id.to_vec()); + self.chunk_id_to_root_hash.insert(chunk_id.to_vec(), *hash); // TODO: handle unwrap self.parent_keys .insert(chunk_id, parent_key.unwrap().to_owned()); @@ -276,7 +279,7 @@ impl<'db, S: StorageContext<'db>> Restorer { /// we need to update the parent link to reflect the correct data. fn rewrite_parent_link( &mut self, - chunk_id: &str, + chunk_id: &[u8], traversal_instruction: &[bool], chunk_tree: &ProofTree, ) -> Result<(), Error> { @@ -663,7 +666,7 @@ mod tests { // initial restorer state should contain just the root hash of the source merk assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); assert_eq!( - restorer.chunk_id_to_root_hash.get(""), + restorer.chunk_id_to_root_hash.get(vec![].as_slice()), Some(merk.root_hash().unwrap()).as_ref() ); @@ -671,7 +674,10 @@ mod tests { let (chunk, _) = chunk_producer.chunk_with_index(1).unwrap(); // apply first chunk let new_chunk_ids = restorer - .process_chunk(traversal_instruction_as_string(&vec![]), chunk) + .process_chunk( + &traversal_instruction_as_vec_bytes(vec![].as_slice()), + chunk, + ) .expect("should process chunk successfully"); assert_eq!(new_chunk_ids.len(), 4); @@ -680,22 +686,22 @@ mod tests { assert_eq!(restorer.chunk_id_to_root_hash.len(), 4); // assert all the chunk hash values assert_eq!( - restorer.chunk_id_to_root_hash.get("11"), + restorer.chunk_id_to_root_hash.get(vec![1, 1].as_slice()), Some(get_node_hash(traverse_get_node_hash(&mut tree_walker, &[LEFT, LEFT])).unwrap()) .as_ref() ); assert_eq!( - restorer.chunk_id_to_root_hash.get("10"), + restorer.chunk_id_to_root_hash.get(vec![1, 0].as_slice()), Some(get_node_hash(traverse_get_node_hash(&mut tree_walker, &[LEFT, RIGHT])).unwrap()) .as_ref() ); assert_eq!( - restorer.chunk_id_to_root_hash.get("01"), + restorer.chunk_id_to_root_hash.get(vec![0, 1].as_slice()), Some(get_node_hash(traverse_get_node_hash(&mut tree_walker, &[RIGHT, LEFT])).unwrap()) .as_ref() ); assert_eq!( - restorer.chunk_id_to_root_hash.get("00"), + restorer.chunk_id_to_root_hash.get(vec![0, 0].as_slice()), Some(get_node_hash(traverse_get_node_hash(&mut tree_walker, &[RIGHT, RIGHT])).unwrap()) .as_ref() ); @@ -704,18 +710,26 @@ mod tests { let (chunk, _) = chunk_producer.chunk_with_index(2).unwrap(); // apply second chunk let new_chunk_ids = restorer - .process_chunk(traversal_instruction_as_string(&vec![LEFT, LEFT]), chunk) + .process_chunk( + &traversal_instruction_as_vec_bytes(&vec![LEFT, LEFT]), + chunk, + ) .unwrap(); assert_eq!(new_chunk_ids.len(), 0); // chunk_map should have 1 less element assert_eq!(restorer.chunk_id_to_root_hash.len(), 3); - assert_eq!(restorer.chunk_id_to_root_hash.get("11"), None); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![1, 1].as_slice()), + None + ); // let's try to apply the second chunk again, should not work let (chunk, _) = chunk_producer.chunk_with_index(2).unwrap(); // apply second chunk - let chunk_process_result = - restorer.process_chunk(traversal_instruction_as_string(&vec![LEFT, LEFT]), chunk); + let chunk_process_result = restorer.process_chunk( + &traversal_instruction_as_vec_bytes(&vec![LEFT, LEFT]), + chunk, + ); assert!(chunk_process_result.is_err()); assert!(matches!( chunk_process_result, @@ -725,8 +739,10 @@ mod tests { // next let's get a random but expected chunk and work with that e.g. chunk 4 // but let's apply it to the wrong place let (chunk, _) = chunk_producer.chunk_with_index(4).unwrap(); - let chunk_process_result = - restorer.process_chunk(traversal_instruction_as_string(&vec![LEFT, RIGHT]), chunk); + let chunk_process_result = restorer.process_chunk( + &traversal_instruction_as_vec_bytes(&vec![LEFT, RIGHT]), + chunk, + ); assert!(chunk_process_result.is_err()); assert!(matches!( chunk_process_result, @@ -739,34 +755,52 @@ mod tests { let (chunk, _) = chunk_producer.chunk_with_index(5).unwrap(); // apply second chunk let new_chunk_ids = restorer - .process_chunk(traversal_instruction_as_string(&vec![RIGHT, RIGHT]), chunk) + .process_chunk( + &traversal_instruction_as_vec_bytes(&vec![RIGHT, RIGHT]), + chunk, + ) .unwrap(); assert_eq!(new_chunk_ids.len(), 0); // chunk_map should have 1 less element assert_eq!(restorer.chunk_id_to_root_hash.len(), 2); - assert_eq!(restorer.chunk_id_to_root_hash.get("00"), None); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![0, 0].as_slice()), + None + ); // correctly apply chunk 3 let (chunk, _) = chunk_producer.chunk_with_index(3).unwrap(); // apply second chunk let new_chunk_ids = restorer - .process_chunk(traversal_instruction_as_string(&vec![LEFT, RIGHT]), chunk) + .process_chunk( + &traversal_instruction_as_vec_bytes(&vec![LEFT, RIGHT]), + chunk, + ) .unwrap(); assert_eq!(new_chunk_ids.len(), 0); // chunk_map should have 1 less element assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); - assert_eq!(restorer.chunk_id_to_root_hash.get("10"), None); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![1, 0].as_slice()), + None + ); // correctly apply chunk 4 let (chunk, _) = chunk_producer.chunk_with_index(4).unwrap(); // apply second chunk let new_chunk_ids = restorer - .process_chunk(traversal_instruction_as_string(&vec![RIGHT, LEFT]), chunk) + .process_chunk( + &traversal_instruction_as_vec_bytes(&vec![RIGHT, LEFT]), + chunk, + ) .unwrap(); assert_eq!(new_chunk_ids.len(), 0); // chunk_map should have 1 less element assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); - assert_eq!(restorer.chunk_id_to_root_hash.get("01"), None); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![0, 1].as_slice()), + None + ); // finalize merk let restored_merk = restorer.finalize().expect("should finalized successfully"); @@ -861,13 +895,11 @@ mod tests { let mut restorer = Restorer::new(restoration_merk, source_merk.root_hash().unwrap(), None); // perform chunk production and processing - let mut chunk_id_opt = Some("".to_string()); + let mut chunk_id_opt = Some(vec![]); while let Some(chunk_id) = chunk_id_opt { - let (chunk, next_chunk_id) = chunk_producer - .chunk(chunk_id.as_str()) - .expect("should get chunk"); + let (chunk, next_chunk_id) = chunk_producer.chunk(&chunk_id).expect("should get chunk"); restorer - .process_chunk(chunk_id.to_string(), chunk) + .process_chunk(&chunk_id, chunk) .expect("should process chunk successfully"); chunk_id_opt = next_chunk_id; } @@ -931,13 +963,13 @@ mod tests { assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); assert_eq!( - restorer.chunk_id_to_root_hash.get(""), + restorer.chunk_id_to_root_hash.get(vec![].as_slice()), Some(merk.root_hash().unwrap()).as_ref() ); // generate multi chunk from root with no limit let chunk = chunk_producer - .multi_chunk_with_limit("", None) + .multi_chunk_with_limit(vec![].as_slice(), None) .expect("should generate multichunk"); assert_eq!(chunk.chunk.len(), 2); @@ -996,14 +1028,14 @@ mod tests { assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); assert_eq!( - restorer.chunk_id_to_root_hash.get(""), + restorer.chunk_id_to_root_hash.get(vec![].as_slice()), Some(merk.root_hash().unwrap()).as_ref() ); // first restore the first chunk let (chunk, next_chunk_index) = chunk_producer.chunk_with_index(1).unwrap(); let new_chunk_ids = restorer - .process_chunk(traversal_instruction_as_string(&vec![]), chunk) + .process_chunk(&traversal_instruction_as_vec_bytes(&vec![]), chunk) .expect("should process chunk"); assert_eq!(new_chunk_ids.len(), 4); assert_eq!(next_chunk_index, Some(2)); @@ -1068,12 +1100,12 @@ mod tests { // build multi chunk with with limit of 325 let multi_chunk = chunk_producer - .multi_chunk_with_limit("", Some(600)) + .multi_chunk_with_limit(vec![].as_slice(), Some(600)) .unwrap(); // should only contain the first chunk assert_eq!(multi_chunk.chunk.len(), 2); // should point to chunk 2 - assert_eq!(multi_chunk.next_index, Some("11".to_string())); + assert_eq!(multi_chunk.next_index, Some(vec![1, 1])); let next_ids = restorer.process_multi_chunk(multi_chunk.chunk).unwrap(); assert_eq!(next_ids.len(), 4); assert_eq!(restorer.chunk_id_to_root_hash.len(), 4); @@ -1083,10 +1115,10 @@ mod tests { // with limit just above 642 should get 2 chunks (2 and 3) // disjoint, so multi chunk len should be 4 let multi_chunk = chunk_producer - .multi_chunk_with_limit(multi_chunk.next_index.unwrap().as_str(), Some(645)) + .multi_chunk_with_limit(multi_chunk.next_index.unwrap().as_slice(), Some(645)) .unwrap(); assert_eq!(multi_chunk.chunk.len(), 4); - assert_eq!(multi_chunk.next_index, Some("01".to_string())); + assert_eq!(multi_chunk.next_index, Some(vec![0u8, 1u8])); let next_ids = restorer.process_multi_chunk(multi_chunk.chunk).unwrap(); // chunks 2 and 3 are leaf chunks assert_eq!(next_ids.len(), 0); @@ -1095,7 +1127,7 @@ mod tests { // get the last 2 chunks let multi_chunk = chunk_producer - .multi_chunk_with_limit(multi_chunk.next_index.unwrap().as_str(), Some(645)) + .multi_chunk_with_limit(multi_chunk.next_index.unwrap().as_slice(), Some(645)) .unwrap(); assert_eq!(multi_chunk.chunk.len(), 4); assert_eq!(multi_chunk.next_index, None); @@ -1155,10 +1187,10 @@ mod tests { let mut restorer = Restorer::new(restoration_merk, source_merk.root_hash().unwrap(), None); // perform chunk production and processing - let mut chunk_id_opt = Some("".to_string()); + let mut chunk_id_opt = Some(vec![]); while let Some(chunk_id) = chunk_id_opt { let multi_chunk = chunk_producer - .multi_chunk_with_limit(chunk_id.as_str(), limit) + .multi_chunk_with_limit(&chunk_id, limit) .expect("should get chunk"); restorer .process_multi_chunk(multi_chunk.chunk) @@ -1234,14 +1266,14 @@ mod tests { assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); assert_eq!( - restorer.chunk_id_to_root_hash.get(""), + restorer.chunk_id_to_root_hash.get(vec![].as_slice()), Some(merk.root_hash().unwrap()).as_ref() ); // first restore the first chunk let (chunk, next_chunk_index) = chunk_producer.chunk_with_index(1).unwrap(); let new_chunk_ids = restorer - .process_chunk(traversal_instruction_as_string(&vec![]), chunk) + .process_chunk(&traversal_instruction_as_vec_bytes(&vec![]), chunk) .expect("should process chunk"); assert_eq!(new_chunk_ids.len(), 4); assert_eq!(next_chunk_index, Some(2)); diff --git a/merk/src/proofs/chunk/util.rs b/merk/src/proofs/chunk/util.rs index 2f64ba8d..39c513b7 100644 --- a/merk/src/proofs/chunk/util.rs +++ b/merk/src/proofs/chunk/util.rs @@ -170,14 +170,17 @@ fn exit_node_count(height: usize) -> usize { 2_usize.pow(height as u32) } -/// Generate instruction for traversing to a given chunk in a binary tree -pub fn generate_traversal_instruction(height: usize, chunk_id: usize) -> Result, Error> { +/// Generate instruction for traversing to a given chunk index in a binary tree +pub fn generate_traversal_instruction( + height: usize, + chunk_index: usize, +) -> Result, Error> { let mut instructions = vec![]; let total_chunk_count = number_of_chunks(height); // out of bounds - if chunk_id < 1 || chunk_id > total_chunk_count { + if chunk_index < 1 || chunk_index > total_chunk_count { return Err(Error::ChunkingError(ChunkError::OutOfBounds( "chunk id out of bounds", ))); @@ -202,7 +205,7 @@ pub fn generate_traversal_instruction(height: usize, chunk_id: usize) -> Result< // checks if we last decision we made got us to the desired chunk id let advance_result = chunk_range.advance_range_start().unwrap(); chunk_range = advance_result.0; - if advance_result.1 == chunk_id { + if advance_result.1 == chunk_index { return Ok(instructions); } } else { @@ -211,7 +214,7 @@ pub fn generate_traversal_instruction(height: usize, chunk_id: usize) -> Result< // we first check which half the desired chunk is // then follow that path let chunk_id_half = chunk_range - .which_half(chunk_id) + .which_half(chunk_index) .expect("chunk id must exist in range"); instructions.push(chunk_id_half); chunk_range = chunk_range @@ -226,9 +229,9 @@ pub fn generate_traversal_instruction(height: usize, chunk_id: usize) -> Result< Ok(instructions) } -/// Determine the chunk id given the traversal instruction and the max height of -/// the tree -pub fn chunk_id_from_traversal_instruction( +/// Determine the chunk index given the traversal instruction and the max height +/// of the tree +pub fn chunk_index_from_traversal_instruction( traversal_instruction: &[bool], height: usize, ) -> Result { @@ -238,7 +241,7 @@ pub fn chunk_id_from_traversal_instruction( } let mut chunk_count = number_of_chunks(height); - let mut current_chunk_id = 1; + let mut current_chunk_index = 1; let mut layer_heights = chunk_height_per_layer(height); let last_layer_height = layer_heights.pop().expect("confirmed not empty"); @@ -301,62 +304,64 @@ pub fn chunk_id_from_traversal_instruction( chunk_count /= exit_node_count(layer_height); - current_chunk_id = current_chunk_id + offset_multiplier * chunk_count + 1; + current_chunk_index = current_chunk_index + offset_multiplier * chunk_count + 1; start_index = end_index; } - Ok(current_chunk_id) + Ok(current_chunk_index) } -/// Determine the chunk id given the traversal instruction and the max height of -/// the tree. This can recover from traversal instructions not pointing to a +/// Determine the chunk index given the traversal instruction and the max height +/// of the tree. This can recover from traversal instructions not pointing to a /// chunk boundary, in such a case, it backtracks until it hits a chunk /// boundary. -pub fn chunk_id_from_traversal_instruction_with_recovery( +pub fn chunk_index_from_traversal_instruction_with_recovery( traversal_instruction: &[bool], height: usize, ) -> Result { - let chunk_id_result = chunk_id_from_traversal_instruction(traversal_instruction, height); - if chunk_id_result.is_err() { - return chunk_id_from_traversal_instruction_with_recovery( + let chunk_index_result = chunk_index_from_traversal_instruction(traversal_instruction, height); + if chunk_index_result.is_err() { + return chunk_index_from_traversal_instruction_with_recovery( &traversal_instruction[0..traversal_instruction.len() - 1], height, ); } - chunk_id_result + chunk_index_result } -/// Generate instruction for traversing to a given chunk in a binary tree, -/// returns string representation -pub fn generate_traversal_instruction_as_string( +/// Generate instruction for traversing to a given chunk index in a binary tree, +/// returns vec bytes representation +pub fn generate_traversal_instruction_as_vec_bytes( height: usize, - chunk_id: usize, -) -> Result { - let instruction = generate_traversal_instruction(height, chunk_id)?; - Ok(traversal_instruction_as_string(&instruction)) + chunk_index: usize, +) -> Result, Error> { + let instruction = generate_traversal_instruction(height, chunk_index)?; + Ok(traversal_instruction_as_vec_bytes(&instruction)) } -/// Convert traversal instruction to byte string +/// Convert traversal instruction to bytes vec /// 1 represents left (true) /// 0 represents right (false) -pub fn traversal_instruction_as_string(instruction: &[bool]) -> String { +pub fn traversal_instruction_as_vec_bytes(instruction: &[bool]) -> Vec { instruction .iter() - .map(|v| if *v { "1" } else { "0" }) + .map(|v| if *v { 1u8 } else { 0u8 }) .collect() } -/// Converts a string that represents a traversal instruction +/// Converts a vec bytes that represents a traversal instruction /// to a vec of bool, true = left and false = right -pub fn string_as_traversal_instruction(instruction_string: &str) -> Result, Error> { - instruction_string - .chars() - .map(|char| match char { - '1' => Ok(LEFT), - '0' => Ok(RIGHT), +pub fn vec_bytes_as_traversal_instruction( + instruction_vec_bytes: &[u8], +) -> Result, Error> { + instruction_vec_bytes + .iter() + .map(|byte| match byte { + 1u8 => Ok(LEFT), + 0u8 => Ok(RIGHT), _ => Err(Error::ChunkingError(ChunkError::BadTraversalInstruction( - "failed to parse instruction string", + "failed to parse instruction vec bytes", ))), }) .collect() @@ -568,26 +573,32 @@ mod test { #[test] fn test_traversal_instruction_as_string() { - assert_eq!(traversal_instruction_as_string(&vec![]), ""); - assert_eq!(traversal_instruction_as_string(&vec![LEFT]), "1"); - assert_eq!(traversal_instruction_as_string(&vec![RIGHT]), "0"); + assert_eq!(traversal_instruction_as_vec_bytes(&vec![]), vec![]); + assert_eq!(traversal_instruction_as_vec_bytes(&vec![LEFT]), vec![1u8]); + assert_eq!(traversal_instruction_as_vec_bytes(&vec![RIGHT]), vec![0u8]); assert_eq!( - traversal_instruction_as_string(&vec![RIGHT, LEFT, LEFT, RIGHT]), - "0110" + traversal_instruction_as_vec_bytes(&vec![RIGHT, LEFT, LEFT, RIGHT]), + vec![0u8, 1u8, 1u8, 0u8] ); } #[test] fn test_instruction_string_to_traversal_instruction() { - assert_eq!(string_as_traversal_instruction("1").unwrap(), vec![LEFT]); - assert_eq!(string_as_traversal_instruction("0").unwrap(), vec![RIGHT]); assert_eq!( - string_as_traversal_instruction("001").unwrap(), + vec_bytes_as_traversal_instruction(&vec![1u8]).unwrap(), + vec![LEFT] + ); + assert_eq!( + vec_bytes_as_traversal_instruction(&vec![0u8]).unwrap(), + vec![RIGHT] + ); + assert_eq!( + vec_bytes_as_traversal_instruction(&vec![0u8, 0u8, 1u8]).unwrap(), vec![RIGHT, RIGHT, LEFT] ); - assert!(string_as_traversal_instruction("002").is_err()); + assert!(vec_bytes_as_traversal_instruction(&vec![0u8, 0u8, 2u8]).is_err()); assert_eq!( - string_as_traversal_instruction("").unwrap(), + vec_bytes_as_traversal_instruction(&vec![]).unwrap(), Vec::::new() ); } @@ -597,69 +608,69 @@ mod test { // tree of height 4 let traversal_instruction = generate_traversal_instruction(4, 1).unwrap(); assert_eq!( - chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 4).unwrap(), + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 4).unwrap(), 1 ); let traversal_instruction = generate_traversal_instruction(4, 2).unwrap(); assert_eq!( - chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 4).unwrap(), + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 4).unwrap(), 2 ); let traversal_instruction = generate_traversal_instruction(4, 3).unwrap(); assert_eq!( - chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 4).unwrap(), + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 4).unwrap(), 3 ); let traversal_instruction = generate_traversal_instruction(4, 4).unwrap(); assert_eq!( - chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 4).unwrap(), + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 4).unwrap(), 4 ); // tree of height 6 let traversal_instruction = generate_traversal_instruction(6, 1).unwrap(); assert_eq!( - chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), 1 ); let traversal_instruction = generate_traversal_instruction(6, 2).unwrap(); assert_eq!( - chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), 2 ); let traversal_instruction = generate_traversal_instruction(6, 3).unwrap(); assert_eq!( - chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), 3 ); let traversal_instruction = generate_traversal_instruction(6, 4).unwrap(); assert_eq!( - chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), 4 ); let traversal_instruction = generate_traversal_instruction(6, 5).unwrap(); assert_eq!( - chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), 5 ); let traversal_instruction = generate_traversal_instruction(6, 6).unwrap(); assert_eq!( - chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), 6 ); let traversal_instruction = generate_traversal_instruction(6, 7).unwrap(); assert_eq!( - chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), 7 ); let traversal_instruction = generate_traversal_instruction(6, 8).unwrap(); assert_eq!( - chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), 8 ); let traversal_instruction = generate_traversal_instruction(6, 9).unwrap(); assert_eq!( - chunk_id_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), + chunk_index_from_traversal_instruction(traversal_instruction.as_slice(), 6).unwrap(), 9 ); } @@ -674,26 +685,26 @@ mod test { // function with recovery we expect this to backtrack to the last chunk // boundary e.g. [left] should backtrack to [] // [left, left, right, left] should backtrack to [left, left, right] - assert!(chunk_id_from_traversal_instruction(&[LEFT], 5).is_err()); + assert!(chunk_index_from_traversal_instruction(&[LEFT], 5).is_err()); assert_eq!( - chunk_id_from_traversal_instruction_with_recovery(&[LEFT], 5).unwrap(), + chunk_index_from_traversal_instruction_with_recovery(&[LEFT], 5).unwrap(), 1 ); assert_eq!( - chunk_id_from_traversal_instruction_with_recovery(&[LEFT, LEFT], 5).unwrap(), + chunk_index_from_traversal_instruction_with_recovery(&[LEFT, LEFT], 5).unwrap(), 1 ); assert_eq!( - chunk_id_from_traversal_instruction_with_recovery(&[LEFT, LEFT, RIGHT], 5).unwrap(), + chunk_index_from_traversal_instruction_with_recovery(&[LEFT, LEFT, RIGHT], 5).unwrap(), 3 ); assert_eq!( - chunk_id_from_traversal_instruction_with_recovery(&[LEFT, LEFT, RIGHT, LEFT], 5) + chunk_index_from_traversal_instruction_with_recovery(&[LEFT, LEFT, RIGHT, LEFT], 5) .unwrap(), 3 ); assert_eq!( - chunk_id_from_traversal_instruction_with_recovery(&[LEFT; 50], 5).unwrap(), + chunk_index_from_traversal_instruction_with_recovery(&[LEFT; 50], 5).unwrap(), 2 ); } diff --git a/tutorials/src/bin/proofs.rs b/tutorials/src/bin/proofs.rs index e62fb17b..173b700d 100644 --- a/tutorials/src/bin/proofs.rs +++ b/tutorials/src/bin/proofs.rs @@ -28,7 +28,7 @@ fn main() { let path_query = PathQuery::new_unsized(path, query.clone()); // Execute the query and collect the result items in "elements". let (_elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, false, true, None) .unwrap() .expect("expected successful get_path_query"); diff --git a/tutorials/src/bin/query-complex.rs b/tutorials/src/bin/query-complex.rs index a101bb37..131faa92 100644 --- a/tutorials/src/bin/query-complex.rs +++ b/tutorials/src/bin/query-complex.rs @@ -66,7 +66,7 @@ fn main() { // Execute the path query and collect the result items in "elements". let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, false, true, None) .unwrap() .expect("expected successful get_path_query"); diff --git a/tutorials/src/bin/query-simple.rs b/tutorials/src/bin/query-simple.rs index 05ac6264..6bc7a2fd 100644 --- a/tutorials/src/bin/query-simple.rs +++ b/tutorials/src/bin/query-simple.rs @@ -36,7 +36,7 @@ fn main() { // Execute the query and collect the result items in "elements". let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, false, true,None) .unwrap() .expect("expected successful get_path_query"); diff --git a/tutorials/src/bin/replication.rs b/tutorials/src/bin/replication.rs index fc9c058c..e285861a 100644 --- a/tutorials/src/bin/replication.rs +++ b/tutorials/src/bin/replication.rs @@ -203,7 +203,7 @@ fn query_db(db: &GroveDb, path: &[&[u8]], key: Vec) { let path_query = PathQuery::new_unsized(path_vec, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, None) + .query_item_value(&path_query, true, false,true, None) .unwrap() .expect("expected successful get_path_query"); for e in elements.into_iter() { From 60037b68d86ba2af8182ab3f83236c5d08251edc Mon Sep 17 00:00:00 2001 From: Odysseas Gabrielides Date: Wed, 8 May 2024 12:00:59 +0300 Subject: [PATCH 21/37] feat: added version in state sync (#293) * feat: added versioning in state sync * clippy fixes --- grovedb/src/replication.rs | 42 ++++++++++++++++++++++++++++++-- tutorials/src/bin/replication.rs | 7 +++--- 2 files changed, 44 insertions(+), 5 deletions(-) diff --git a/grovedb/src/replication.rs b/grovedb/src/replication.rs index e4f3547e..f018053e 100644 --- a/grovedb/src/replication.rs +++ b/grovedb/src/replication.rs @@ -1,7 +1,6 @@ use std::{ collections::{BTreeMap, BTreeSet}, fmt, - str::Utf8Error, }; use grovedb_merk::{ @@ -19,6 +18,8 @@ use crate::{replication, Error, GroveDb, Transaction, TransactionArg}; pub(crate) type SubtreePrefix = [u8; blake3::OUT_LEN]; +pub const CURRENT_STATE_SYNC_VERSION: u16 = 1; + // Struct governing state sync pub struct StateSyncInfo<'db> { // Current Chunk restorer @@ -32,6 +33,8 @@ pub struct StateSyncInfo<'db> { pub pending_chunks: BTreeSet>, // Number of processed chunks in current prefix (Path digest) pub num_processed_chunks: usize, + // Version of state sync protocol, + pub version: u16, } // Struct containing information about current subtrees found in GroveDB @@ -115,6 +118,7 @@ impl GroveDb { current_prefix: None, pending_chunks, num_processed_chunks: 0, + version: CURRENT_STATE_SYNC_VERSION, } } @@ -204,7 +208,15 @@ impl GroveDb { &self, global_chunk_id: &[u8], tx: TransactionArg, + version: u16, ) -> Result, Error> { + // For now, only CURRENT_STATE_SYNC_VERSION is supported + if version != CURRENT_STATE_SYNC_VERSION { + return Err(Error::CorruptedData( + "Unsupported state sync protocol version".to_string(), + )); + } + let chunk_prefix_length: usize = 32; if global_chunk_id.len() < chunk_prefix_length { return Err(Error::CorruptedData( @@ -254,7 +266,7 @@ impl GroveDb { } Some(t) => { let merk = self - .open_transactional_merk_at_path(path.into(), &t, None) + .open_transactional_merk_at_path(path.into(), t, None) .value?; if merk.is_empty_tree().unwrap() { @@ -295,7 +307,20 @@ impl GroveDb { mut state_sync_info: StateSyncInfo<'db>, app_hash: CryptoHash, tx: &'db Transaction, + version: u16, ) -> Result<(Vec>, StateSyncInfo), Error> { + // For now, only CURRENT_STATE_SYNC_VERSION is supported + if version != CURRENT_STATE_SYNC_VERSION { + return Err(Error::CorruptedData( + "Unsupported state sync protocol version".to_string(), + )); + } + if version != state_sync_info.version { + return Err(Error::CorruptedData( + "Unsupported state sync protocol version".to_string(), + )); + } + let mut res = vec![]; match ( @@ -343,7 +368,20 @@ impl GroveDb { mut state_sync_info: StateSyncInfo<'db>, chunk: (&[u8], Vec), tx: &'db Transaction, + version: u16, ) -> Result<(Vec>, StateSyncInfo), Error> { + // For now, only CURRENT_STATE_SYNC_VERSION is supported + if version != CURRENT_STATE_SYNC_VERSION { + return Err(Error::CorruptedData( + "Unsupported state sync protocol version".to_string(), + )); + } + if version != state_sync_info.version { + return Err(Error::CorruptedData( + "Unsupported state sync protocol version".to_string(), + )); + } + let mut res = vec![]; let (global_chunk_id, chunk_data) = chunk; diff --git a/tutorials/src/bin/replication.rs b/tutorials/src/bin/replication.rs index e285861a..5911c8e7 100644 --- a/tutorials/src/bin/replication.rs +++ b/tutorials/src/bin/replication.rs @@ -4,6 +4,7 @@ use grovedb::{operations::insert::InsertOptions, Element, GroveDb, PathQuery, Qu use grovedb::reference_path::ReferencePathType; use rand::{distributions::Alphanumeric, Rng, }; use grovedb::element::SumValue; +use grovedb::replication::CURRENT_STATE_SYNC_VERSION; use grovedb_path::{SubtreePath}; const MAIN_ΚΕΥ: &[u8] = b"key_main"; @@ -226,15 +227,15 @@ fn sync_db_demo( target_tx: &Transaction, ) -> Result<(), grovedb::Error> { let app_hash = source_db.root_hash(None).value.unwrap(); - let (chunk_ids, mut state_sync_info) = target_db.start_snapshot_syncing(state_sync_info, app_hash, target_tx)?; + let (chunk_ids, mut state_sync_info) = target_db.start_snapshot_syncing(state_sync_info, app_hash, target_tx, CURRENT_STATE_SYNC_VERSION)?; let mut chunk_queue : VecDeque> = VecDeque::new(); chunk_queue.extend(chunk_ids); while let Some(chunk_id) = chunk_queue.pop_front() { - let ops = source_db.fetch_chunk(chunk_id.as_slice(), None)?; - let (more_chunks, new_state_sync_info) = target_db.apply_chunk(state_sync_info, (chunk_id.as_slice(), ops), target_tx)?; + let ops = source_db.fetch_chunk(chunk_id.as_slice(), None, CURRENT_STATE_SYNC_VERSION)?; + let (more_chunks, new_state_sync_info) = target_db.apply_chunk(state_sync_info, (chunk_id.as_slice(), ops), target_tx, CURRENT_STATE_SYNC_VERSION)?; state_sync_info = new_state_sync_info; chunk_queue.extend(more_chunks); } From 5b67055cb508531977ec0b6591e418b85141596b Mon Sep 17 00:00:00 2001 From: Odysseas Gabrielides Date: Thu, 9 May 2024 12:05:10 +0300 Subject: [PATCH 22/37] feat: sync multiple subtrees (#295) * multi subtrees sync * adjustements * fmt * clippy warnings * fmt --- grovedb/src/replication.rs | 316 +++++++++++++++++++------------ tutorials/src/bin/replication.rs | 36 +++- 2 files changed, 220 insertions(+), 132 deletions(-) diff --git a/grovedb/src/replication.rs b/grovedb/src/replication.rs index f018053e..6cb9e3fb 100644 --- a/grovedb/src/replication.rs +++ b/grovedb/src/replication.rs @@ -20,21 +20,25 @@ pub(crate) type SubtreePrefix = [u8; blake3::OUT_LEN]; pub const CURRENT_STATE_SYNC_VERSION: u16 = 1; -// Struct governing state sync -pub struct StateSyncInfo<'db> { +struct SubtreeStateSyncInfo<'db> { // Current Chunk restorer - pub restorer: Option>>, - // Set of processed prefixes (Path digests) - pub processed_prefixes: BTreeSet, - // Current processed prefix (Path digest) - pub current_prefix: Option, + restorer: Option>>, // Set of global chunk ids requested to be fetched and pending for processing. For the // description of global chunk id check fetch_chunk(). - pub pending_chunks: BTreeSet>, + pending_chunks: BTreeSet>, // Number of processed chunks in current prefix (Path digest) - pub num_processed_chunks: usize, + num_processed_chunks: usize, +} + +// Struct governing state sync +pub struct MultiStateSyncInfo<'db> { + // Map of current processing subtrees + // SubtreePrefix (Path digest) -> SubtreeStateSyncInfo + current_prefixes: BTreeMap>, + // Set of processed prefixes (Path digests) + processed_prefixes: BTreeSet, // Version of state sync protocol, - pub version: u16, + version: u16, } // Struct containing information about current subtrees found in GroveDB @@ -66,7 +70,7 @@ impl fmt::Debug for SubtreesMetadata { let metadata_path_str = util_path_to_string(metadata_path); writeln!( f, - " prefix:{:?} -> path:{:?}\n", + " prefix:{:?} -> path:{:?}", hex::encode(prefix), metadata_path_str ); @@ -109,15 +113,21 @@ pub fn util_split_global_chunk_id( #[cfg(feature = "full")] impl GroveDb { - pub fn create_state_sync_info(&self) -> StateSyncInfo { + fn create_subtree_state_sync_info(&self) -> SubtreeStateSyncInfo { let pending_chunks = BTreeSet::new(); - let processed_prefixes = BTreeSet::new(); - StateSyncInfo { + SubtreeStateSyncInfo { restorer: None, - processed_prefixes, - current_prefix: None, pending_chunks, num_processed_chunks: 0, + } + } + + pub fn create_multi_state_sync_info(&self) -> MultiStateSyncInfo { + let processed_prefixes = BTreeSet::new(); + let current_prefixes = BTreeMap::default(); + MultiStateSyncInfo { + current_prefixes, + processed_prefixes, version: CURRENT_STATE_SYNC_VERSION, } } @@ -304,11 +314,11 @@ impl GroveDb { // the StateSyncInfo transferring ownership back to the caller) pub fn start_snapshot_syncing<'db>( &'db self, - mut state_sync_info: StateSyncInfo<'db>, + mut state_sync_info: MultiStateSyncInfo<'db>, app_hash: CryptoHash, tx: &'db Transaction, version: u16, - ) -> Result<(Vec>, StateSyncInfo), Error> { + ) -> Result<(Vec>, MultiStateSyncInfo), Error> { // For now, only CURRENT_STATE_SYNC_VERSION is supported if version != CURRENT_STATE_SYNC_VERSION { return Err(Error::CorruptedData( @@ -323,34 +333,32 @@ impl GroveDb { let mut res = vec![]; - match ( - &mut state_sync_info.restorer, - &state_sync_info.current_prefix, - ) { - (None, None) => { - if state_sync_info.pending_chunks.is_empty() - && state_sync_info.processed_prefixes.is_empty() - { - let root_prefix = [0u8; 32]; - if let Ok(merk) = self.open_merk_for_replication(SubtreePath::empty(), tx) { - let restorer = Restorer::new(merk, app_hash, None); - state_sync_info.restorer = Some(restorer); - state_sync_info.current_prefix = Some(root_prefix); - state_sync_info.pending_chunks.insert(root_prefix.to_vec()); - - res.push(root_prefix.to_vec()); - } else { - return Err(Error::InternalError("Unable to open merk for replication")); - } - } else { - return Err(Error::InternalError("Invalid internal state sync info")); - } - } - _ => { - return Err(Error::InternalError( - "GroveDB has already started a snapshot syncing", - )); - } + if !state_sync_info.current_prefixes.is_empty() + || !state_sync_info.processed_prefixes.is_empty() + { + return Err(Error::InternalError( + "GroveDB has already started a snapshot syncing", + )); + } + + println!( + " starting:{:?}...", + replication::util_path_to_string(&[]) + ); + + let mut root_prefix_state_sync_info = self.create_subtree_state_sync_info(); + let root_prefix = [0u8; 32]; + if let Ok(merk) = self.open_merk_for_replication(SubtreePath::empty(), tx) { + let restorer = Restorer::new(merk, app_hash, None); + root_prefix_state_sync_info.restorer = Some(restorer); + root_prefix_state_sync_info.pending_chunks.insert(vec![]); + state_sync_info + .current_prefixes + .insert(root_prefix, root_prefix_state_sync_info); + + res.push(root_prefix.to_vec()); + } else { + return Err(Error::InternalError("Unable to open merk for replication")); } Ok((res, state_sync_info)) @@ -358,18 +366,18 @@ impl GroveDb { // Apply a chunk (should be called by ABCI when ApplySnapshotChunk method is // called) Params: - // state_sync_info: Consumed StateSyncInfo + // state_sync_info: Consumed MultiStateSyncInfo // chunk: (Global chunk id, Chunk proof operators) // tx: Transaction for the state sync // Returns the next set of global chunk ids that can be fetched from sources (+ - // the StateSyncInfo transferring ownership back to the caller) + // the MultiStateSyncInfo transferring ownership back to the caller) pub fn apply_chunk<'db>( &'db self, - mut state_sync_info: StateSyncInfo<'db>, + mut state_sync_info: MultiStateSyncInfo<'db>, chunk: (&[u8], Vec), tx: &'db Transaction, version: u16, - ) -> Result<(Vec>, StateSyncInfo), Error> { + ) -> Result<(Vec>, MultiStateSyncInfo), Error> { // For now, only CURRENT_STATE_SYNC_VERSION is supported if version != CURRENT_STATE_SYNC_VERSION { return Err(Error::CorruptedData( @@ -382,36 +390,109 @@ impl GroveDb { )); } - let mut res = vec![]; + let mut next_chunk_ids = vec![]; let (global_chunk_id, chunk_data) = chunk; let (chunk_prefix, chunk_id) = replication::util_split_global_chunk_id(global_chunk_id)?; - match ( - &mut state_sync_info.restorer, - &state_sync_info.current_prefix, - ) { - (Some(restorer), Some(ref current_prefix)) => { - if *current_prefix != chunk_prefix { - return Err(Error::InternalError("Invalid incoming prefix")); + if state_sync_info.current_prefixes.is_empty() { + return Err(Error::InternalError("GroveDB is not in syncing mode")); + } + if let Some(subtree_state_sync) = state_sync_info.current_prefixes.remove(&chunk_prefix) { + if let Ok((res, mut new_subtree_state_sync)) = + self.apply_inner_chunk(subtree_state_sync, &chunk_id, chunk_data) + { + if !res.is_empty() { + for local_chunk_id in res.iter() { + let mut next_global_chunk_id = chunk_prefix.to_vec(); + next_global_chunk_id.extend(local_chunk_id.to_vec()); + next_chunk_ids.push(next_global_chunk_id); + } + + // re-insert subtree_state_sync in state_sync_info + state_sync_info + .current_prefixes + .insert(chunk_prefix, new_subtree_state_sync); + Ok((next_chunk_ids, state_sync_info)) + } else { + if !new_subtree_state_sync.pending_chunks.is_empty() { + // re-insert subtree_state_sync in state_sync_info + state_sync_info + .current_prefixes + .insert(chunk_prefix, new_subtree_state_sync); + return Ok((vec![], state_sync_info)); + } + + // Subtree is finished. We can save it. + match new_subtree_state_sync.restorer.take() { + None => Err(Error::InternalError("Unable to finalize subtree")), + Some(restorer) => { + if (new_subtree_state_sync.num_processed_chunks > 0) + && (restorer.finalize().is_err()) + { + return Err(Error::InternalError("Unable to finalize Merk")); + } + state_sync_info.processed_prefixes.insert(chunk_prefix); + + // Subtree was successfully save. Time to discover new subtrees that + // need to be processed + let subtrees_metadata = self.get_subtrees_metadata(Some(tx))?; + if let Some(value) = subtrees_metadata.data.get(&chunk_prefix) { + println!( + " path:{:?} done (num_processed_chunks:{:?})", + replication::util_path_to_string(&value.0), + new_subtree_state_sync.num_processed_chunks + ); + } + + if let Ok((res, new_state_sync_info)) = + self.discover_subtrees(state_sync_info, subtrees_metadata, tx) + { + next_chunk_ids.extend(res); + Ok((next_chunk_ids, new_state_sync_info)) + } else { + Err(Error::InternalError("Unable to discover Subtrees")) + } + } + } } - if !state_sync_info.pending_chunks.contains(global_chunk_id) { + } else { + Err(Error::InternalError("Unable to process incoming chunk")) + } + } else { + Err(Error::InternalError("Invalid incoming prefix")) + } + } + + // Apply a chunk using the given SubtreeStateSyncInfo + // state_sync_info: Consumed SubtreeStateSyncInfo + // chunk_id: Local chunk id + // chunk_data: Chunk proof operators + // Returns the next set of global chunk ids that can be fetched from sources (+ + // the SubtreeStateSyncInfo transferring ownership back to the caller) + fn apply_inner_chunk<'db>( + &'db self, + mut state_sync_info: SubtreeStateSyncInfo<'db>, + chunk_id: &[u8], + chunk_data: Vec, + ) -> Result<(Vec>, SubtreeStateSyncInfo), Error> { + let mut res = vec![]; + + match &mut state_sync_info.restorer { + Some(restorer) => { + if !state_sync_info.pending_chunks.contains(chunk_id) { return Err(Error::InternalError( "Incoming global_chunk_id not expected", )); } - state_sync_info.pending_chunks.remove(global_chunk_id); + state_sync_info.pending_chunks.remove(chunk_id); if !chunk_data.is_empty() { - match restorer.process_chunk(&chunk_id, chunk_data) { + match restorer.process_chunk(chunk_id, chunk_data) { Ok(next_chunk_ids) => { state_sync_info.num_processed_chunks += 1; for next_chunk_id in next_chunk_ids { - let mut next_global_chunk_id = chunk_prefix.to_vec(); - next_global_chunk_id.extend(next_chunk_id.to_vec()); - state_sync_info - .pending_chunks - .insert(next_global_chunk_id.clone()); - res.push(next_global_chunk_id); + state_sync_info.pending_chunks.insert(next_chunk_id.clone()); + res.push(next_chunk_id); } } _ => { @@ -421,68 +502,57 @@ impl GroveDb { } } _ => { - return Err(Error::InternalError("GroveDB is not in syncing mode")); + return Err(Error::InternalError("Invalid internal state (restorer")); } } - if res.is_empty() { - if !state_sync_info.pending_chunks.is_empty() { - return Ok((res, state_sync_info)); - } - match ( - state_sync_info.restorer.take(), - state_sync_info.current_prefix.take(), - ) { - (Some(restorer), Some(current_prefix)) => { - if (state_sync_info.num_processed_chunks > 0) && (restorer.finalize().is_err()) - { - return Err(Error::InternalError("Unable to finalize merk")); - } - state_sync_info.processed_prefixes.insert(current_prefix); - - let subtrees_metadata = self.get_subtrees_metadata(Some(tx))?; - if let Some(value) = subtrees_metadata.data.get(¤t_prefix) { - println!( - " path:{:?} done", - replication::util_path_to_string(&value.0) - ); - } + Ok((res, state_sync_info)) + } - for (prefix, prefix_metadata) in &subtrees_metadata.data { - if !state_sync_info.processed_prefixes.contains(prefix) { - let (current_path, s_actual_value_hash, s_elem_value_hash) = - &prefix_metadata; + // Prepares SubtreeStateSyncInfos for the freshly discovered subtrees in + // subtrees_metadata and returns the root global chunk ids for all of those + // new subtrees. state_sync_info: Consumed MultiStateSyncInfo + // subtrees_metadata: Metadata about discovered subtrees + // chunk_data: Chunk proof operators + // Returns the next set of global chunk ids that can be fetched from sources (+ + // the MultiStateSyncInfo transferring ownership back to the caller) + fn discover_subtrees<'db>( + &'db self, + mut state_sync_info: MultiStateSyncInfo<'db>, + subtrees_metadata: SubtreesMetadata, + tx: &'db Transaction, + ) -> Result<(Vec>, MultiStateSyncInfo), Error> { + let mut res = vec![]; - let subtree_path: Vec<&[u8]> = - current_path.iter().map(|vec| vec.as_slice()).collect(); - let path: &[&[u8]] = &subtree_path; + for (prefix, prefix_metadata) in &subtrees_metadata.data { + if !state_sync_info.processed_prefixes.contains(prefix) + && !state_sync_info.current_prefixes.contains_key(prefix) + { + let (current_path, s_actual_value_hash, s_elem_value_hash) = &prefix_metadata; - if let Ok(merk) = self.open_merk_for_replication(path.into(), tx) { - let restorer = Restorer::new( - merk, - *s_elem_value_hash, - Some(*s_actual_value_hash), - ); - state_sync_info.restorer = Some(restorer); - state_sync_info.current_prefix = Some(*prefix); - state_sync_info.num_processed_chunks = 0; - - let root_chunk_prefix = prefix.to_vec(); - state_sync_info - .pending_chunks - .insert(root_chunk_prefix.clone()); - res.push(root_chunk_prefix); - } else { - return Err(Error::InternalError( - "Unable to open merk for replication", - )); - } - break; - } - } - } - _ => { - return Err(Error::InternalError("Unable to finalize tree")); + let subtree_path: Vec<&[u8]> = + current_path.iter().map(|vec| vec.as_slice()).collect(); + let path: &[&[u8]] = &subtree_path; + println!( + " path:{:?} starting...", + replication::util_path_to_string(&prefix_metadata.0) + ); + + let mut subtree_state_sync_info = self.create_subtree_state_sync_info(); + if let Ok(merk) = self.open_merk_for_replication(path.into(), tx) { + let restorer = + Restorer::new(merk, *s_elem_value_hash, Some(*s_actual_value_hash)); + subtree_state_sync_info.restorer = Some(restorer); + subtree_state_sync_info.pending_chunks.insert(vec![]); + + state_sync_info + .current_prefixes + .insert(*prefix, subtree_state_sync_info); + + let root_chunk_prefix = prefix.to_vec(); + res.push(root_chunk_prefix.to_vec()); + } else { + return Err(Error::InternalError("Unable to open Merk for replication")); } } } diff --git a/tutorials/src/bin/replication.rs b/tutorials/src/bin/replication.rs index 5911c8e7..9f2ba25e 100644 --- a/tutorials/src/bin/replication.rs +++ b/tutorials/src/bin/replication.rs @@ -1,16 +1,18 @@ use std::collections::VecDeque; use std::path::Path; -use grovedb::{operations::insert::InsertOptions, Element, GroveDb, PathQuery, Query, Transaction, replication::StateSyncInfo}; +use grovedb::{operations::insert::InsertOptions, Element, GroveDb, PathQuery, Query, Transaction}; use grovedb::reference_path::ReferencePathType; use rand::{distributions::Alphanumeric, Rng, }; use grovedb::element::SumValue; use grovedb::replication::CURRENT_STATE_SYNC_VERSION; -use grovedb_path::{SubtreePath}; +use grovedb::replication::MultiStateSyncInfo; const MAIN_ΚΕΥ: &[u8] = b"key_main"; const MAIN_ΚΕΥ_EMPTY: &[u8] = b"key_main_empty"; const KEY_INT_0: &[u8] = b"key_int_0"; +const KEY_INT_1: &[u8] = b"key_int_1"; +const KEY_INT_2: &[u8] = b"key_int_2"; const KEY_INT_REF_0: &[u8] = b"key_int_ref_0"; const KEY_INT_A: &[u8] = b"key_sum_0"; const ROOT_PATH: &[&[u8]] = &[]; @@ -29,14 +31,30 @@ fn populate_db(grovedb_path: String) -> GroveDb { insert_empty_tree_db(&db, ROOT_PATH, MAIN_ΚΕΥ); insert_empty_tree_db(&db, ROOT_PATH, MAIN_ΚΕΥ_EMPTY); insert_empty_tree_db(&db, &[MAIN_ΚΕΥ], KEY_INT_0); + insert_empty_tree_db(&db, &[MAIN_ΚΕΥ], KEY_INT_1); + insert_empty_tree_db(&db, &[MAIN_ΚΕΥ], KEY_INT_2); let tx = db.start_transaction(); - let batch_size = 100; - for i in 0..=10 { + let batch_size = 50; + for i in 0..=5 { insert_range_values_db(&db, &[MAIN_ΚΕΥ, KEY_INT_0], i * batch_size, i * batch_size + batch_size - 1, &tx); } let _ = db.commit_transaction(tx); + let tx = db.start_transaction(); + let batch_size = 50; + for i in 0..=5 { + insert_range_values_db(&db, &[MAIN_ΚΕΥ, KEY_INT_1], i * batch_size, i * batch_size + batch_size - 1, &tx); + } + let _ = db.commit_transaction(tx); + + let tx = db.start_transaction(); + let batch_size = 50; + for i in 0..=5 { + insert_range_values_db(&db, &[MAIN_ΚΕΥ, KEY_INT_2], i * batch_size, i * batch_size + batch_size - 1, &tx); + } + let _ = db.commit_transaction(tx); + insert_empty_tree_db(&db, &[MAIN_ΚΕΥ], KEY_INT_REF_0); let tx_2 = db.start_transaction(); @@ -46,8 +64,8 @@ fn populate_db(grovedb_path: String) -> GroveDb { insert_empty_sum_tree_db(&db, &[MAIN_ΚΕΥ], KEY_INT_A); let tx_3 = db.start_transaction(); - insert_range_values_db(&db, &[MAIN_ΚΕΥ, KEY_INT_A], 1, 100, &tx_3); - insert_sum_element_db(&db, &[MAIN_ΚΕΥ, KEY_INT_A], 101, 150, &tx_3); + insert_range_values_db(&db, &[MAIN_ΚΕΥ, KEY_INT_A], 1, 500, &tx_3); + insert_sum_element_db(&db, &[MAIN_ΚΕΥ, KEY_INT_A], 501, 550, &tx_3); let _ = db.commit_transaction(tx_3); db } @@ -83,7 +101,7 @@ fn main() { println!("{:?}", subtrees_metadata_source); println!("\n######### db_checkpoint_0 -> db_destination state sync"); - let state_info = db_destination.create_state_sync_info(); + let state_info = db_destination.create_multi_state_sync_info(); let tx = db_destination.start_transaction(); sync_db_demo(&db_checkpoint_0, &db_destination, state_info, &tx).unwrap(); db_destination.commit_transaction(tx).unwrap().expect("expected to commit transaction"); @@ -204,7 +222,7 @@ fn query_db(db: &GroveDb, path: &[&[u8]], key: Vec) { let path_query = PathQuery::new_unsized(path_vec, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, false,true, None) + .query_item_value(&path_query, true, false, true, None) .unwrap() .expect("expected successful get_path_query"); for e in elements.into_iter() { @@ -223,7 +241,7 @@ fn query_db(db: &GroveDb, path: &[&[u8]], key: Vec) { fn sync_db_demo( source_db: &GroveDb, target_db: &GroveDb, - state_sync_info: StateSyncInfo, + state_sync_info: MultiStateSyncInfo, target_tx: &Transaction, ) -> Result<(), grovedb::Error> { let app_hash = source_db.root_hash(None).value.unwrap(); From 65a75120fbc600fee8b2450833f69d7806364ee4 Mon Sep 17 00:00:00 2001 From: Odysseas Gabrielides Date: Mon, 13 May 2024 18:21:57 +0300 Subject: [PATCH 23/37] feat: State sync APIs should use encoded Ops (#297) * state sync APIs use encoded ops * fmt * clippy * fmt * refactor --- grovedb/src/replication.rs | 88 +++++++++++++++++++++++++++++-------- merk/src/proofs/encoding.rs | 19 ++++++++ 2 files changed, 88 insertions(+), 19 deletions(-) diff --git a/grovedb/src/replication.rs b/grovedb/src/replication.rs index 6cb9e3fb..c42d6b7c 100644 --- a/grovedb/src/replication.rs +++ b/grovedb/src/replication.rs @@ -4,8 +4,9 @@ use std::{ }; use grovedb_merk::{ + ed::Encode, merk::restore::Restorer, - proofs::Op, + proofs::{Decoder, Op}, tree::{hash::CryptoHash, kv::ValueDefinedCostType, value_hash}, ChunkProducer, }; @@ -111,6 +112,32 @@ pub fn util_split_global_chunk_id( Ok((chunk_prefix_key, chunk_id.to_vec())) } +pub fn util_encode_vec_ops(chunk: Vec) -> Result, Error> { + let mut res = vec![]; + for op in chunk { + op.encode_into(&mut res) + .map_err(|e| Error::CorruptedData(format!("unable to encode chunk: {}", e)))?; + } + Ok(res) +} + +pub fn util_decode_vec_ops(chunk: Vec) -> Result, Error> { + let decoder = Decoder::new(&chunk); + let mut res = vec![]; + for op in decoder { + match op { + Ok(op) => res.push(op), + Err(e) => { + return Err(Error::CorruptedData(format!( + "unable to decode chunk: {}", + e + ))); + } + } + } + Ok(res) +} + #[cfg(feature = "full")] impl GroveDb { fn create_subtree_state_sync_info(&self) -> SubtreeStateSyncInfo { @@ -213,13 +240,13 @@ impl GroveDb { // "0" for right. TODO: Compact CHUNK_ID into bitset for size optimization // as a subtree can be big hence traversal instructions for the deepest chunks // tx: Transaction. Function returns the data by opening merks at given tx. - // Returns the Chunk proof operators for the requested chunk + // Returns the Chunk proof operators for the requested chunk encoded in bytes pub fn fetch_chunk( &self, global_chunk_id: &[u8], tx: TransactionArg, version: u16, - ) -> Result, Error> { + ) -> Result, Error> { // For now, only CURRENT_STATE_SYNC_VERSION is supported if version != CURRENT_STATE_SYNC_VERSION { return Err(Error::CorruptedData( @@ -263,7 +290,12 @@ impl GroveDb { Ok(mut chunk_producer) => { let chunk_res = chunk_producer.chunk(chunk_id); match chunk_res { - Ok((chunk, _)) => Ok(chunk), + Ok((chunk, _)) => match util_encode_vec_ops(chunk) { + Ok(op_bytes) => Ok(op_bytes), + Err(_) => Err(Error::CorruptedData( + "Unable to create to load chunk".to_string(), + )), + }, Err(_) => Err(Error::CorruptedData( "Unable to create to load chunk".to_string(), )), @@ -288,7 +320,12 @@ impl GroveDb { Ok(mut chunk_producer) => { let chunk_res = chunk_producer.chunk(chunk_id); match chunk_res { - Ok((chunk, _)) => Ok(chunk), + Ok((chunk, _)) => match util_encode_vec_ops(chunk) { + Ok(op_bytes) => Ok(op_bytes), + Err(_) => Err(Error::CorruptedData( + "Unable to create to load chunk".to_string(), + )), + }, Err(_) => Err(Error::CorruptedData( "Unable to create to load chunk".to_string(), )), @@ -367,14 +404,14 @@ impl GroveDb { // Apply a chunk (should be called by ABCI when ApplySnapshotChunk method is // called) Params: // state_sync_info: Consumed MultiStateSyncInfo - // chunk: (Global chunk id, Chunk proof operators) + // chunk: (Global chunk id, Chunk proof operators encoded in bytes) // tx: Transaction for the state sync // Returns the next set of global chunk ids that can be fetched from sources (+ // the MultiStateSyncInfo transferring ownership back to the caller) pub fn apply_chunk<'db>( &'db self, mut state_sync_info: MultiStateSyncInfo<'db>, - chunk: (&[u8], Vec), + chunk: (&[u8], Vec), tx: &'db Transaction, version: u16, ) -> Result<(Vec>, MultiStateSyncInfo), Error> { @@ -467,14 +504,14 @@ impl GroveDb { // Apply a chunk using the given SubtreeStateSyncInfo // state_sync_info: Consumed SubtreeStateSyncInfo // chunk_id: Local chunk id - // chunk_data: Chunk proof operators + // chunk_data: Chunk proof operators encoded in bytes // Returns the next set of global chunk ids that can be fetched from sources (+ // the SubtreeStateSyncInfo transferring ownership back to the caller) fn apply_inner_chunk<'db>( &'db self, mut state_sync_info: SubtreeStateSyncInfo<'db>, chunk_id: &[u8], - chunk_data: Vec, + chunk_data: Vec, ) -> Result<(Vec>, SubtreeStateSyncInfo), Error> { let mut res = vec![]; @@ -487,18 +524,31 @@ impl GroveDb { } state_sync_info.pending_chunks.remove(chunk_id); if !chunk_data.is_empty() { - match restorer.process_chunk(chunk_id, chunk_data) { - Ok(next_chunk_ids) => { - state_sync_info.num_processed_chunks += 1; - for next_chunk_id in next_chunk_ids { - state_sync_info.pending_chunks.insert(next_chunk_id.clone()); - res.push(next_chunk_id); - } + match util_decode_vec_ops(chunk_data) { + Ok(ops) => { + match restorer.process_chunk(chunk_id, ops) { + Ok(next_chunk_ids) => { + state_sync_info.num_processed_chunks += 1; + for next_chunk_id in next_chunk_ids { + state_sync_info + .pending_chunks + .insert(next_chunk_id.clone()); + res.push(next_chunk_id); + } + } + _ => { + return Err(Error::InternalError( + "Unable to process incoming chunk", + )); + } + }; } - _ => { - return Err(Error::InternalError("Unable to process incoming chunk")); + Err(_) => { + return Err(Error::CorruptedData( + "Unable to decode incoming chunk".to_string(), + )); } - }; + } } } _ => { diff --git a/merk/src/proofs/encoding.rs b/merk/src/proofs/encoding.rs index 6b5f95b0..b0e31484 100644 --- a/merk/src/proofs/encoding.rs +++ b/merk/src/proofs/encoding.rs @@ -464,6 +464,7 @@ impl<'a> Iterator for Decoder<'a> { mod test { use super::super::{Node, Op}; use crate::{ + proofs::Decoder, tree::HASH_LENGTH, TreeFeatureType::{BasicMerkNode, SummedMerkNode}, }; @@ -994,6 +995,24 @@ mod test { assert_eq!(op, Op::Child); } + #[test] + fn decode_multiple_child() { + let bytes = [0x11, 0x11, 0x11, 0x10]; + let mut decoder = Decoder { + bytes: &bytes, + offset: 0, + }; + + let mut vecop = vec![]; + for op in decoder { + match op { + Ok(op) => vecop.push(op), + Err(e) => eprintln!("Error decoding: {:?}", e), + } + } + assert_eq!(vecop, vec![Op::Child, Op::Child, Op::Child, Op::Parent]); + } + #[test] fn decode_parent_inverted() { let bytes = [0x12]; From 016795ea3d958dafa8f5fe9c89fdd9f7fc5402ca Mon Sep 17 00:00:00 2001 From: Odysseas Gabrielides Date: Thu, 16 May 2024 09:58:49 +0300 Subject: [PATCH 24/37] refactor: correct constructors for `SubtreeStateSyncInfo` and `MultiStateSyncInfo` (#298) * correct constructors * default impl * default impl * refactoring * fix --- grovedb/src/replication.rs | 41 ++++++++++++++++---------------- tutorials/src/bin/replication.rs | 2 +- 2 files changed, 21 insertions(+), 22 deletions(-) diff --git a/grovedb/src/replication.rs b/grovedb/src/replication.rs index c42d6b7c..11b0cb6c 100644 --- a/grovedb/src/replication.rs +++ b/grovedb/src/replication.rs @@ -21,6 +21,7 @@ pub(crate) type SubtreePrefix = [u8; blake3::OUT_LEN]; pub const CURRENT_STATE_SYNC_VERSION: u16 = 1; +#[derive(Default)] struct SubtreeStateSyncInfo<'db> { // Current Chunk restorer restorer: Option>>, @@ -31,6 +32,13 @@ struct SubtreeStateSyncInfo<'db> { num_processed_chunks: usize, } +impl<'a> SubtreeStateSyncInfo<'a> { + // Function to create an instance of SubtreeStateSyncInfo with default values + pub fn new() -> Self { + Self::default() + } +} + // Struct governing state sync pub struct MultiStateSyncInfo<'db> { // Map of current processing subtrees @@ -42,6 +50,16 @@ pub struct MultiStateSyncInfo<'db> { version: u16, } +impl<'db> Default for MultiStateSyncInfo<'db> { + fn default() -> Self { + Self { + current_prefixes: BTreeMap::new(), + processed_prefixes: BTreeSet::new(), + version: CURRENT_STATE_SYNC_VERSION, + } + } +} + // Struct containing information about current subtrees found in GroveDB pub struct SubtreesMetadata { // Map of Prefix (Path digest) -> (Actual path, Parent Subtree actual_value_hash, Parent @@ -140,25 +158,6 @@ pub fn util_decode_vec_ops(chunk: Vec) -> Result, Error> { #[cfg(feature = "full")] impl GroveDb { - fn create_subtree_state_sync_info(&self) -> SubtreeStateSyncInfo { - let pending_chunks = BTreeSet::new(); - SubtreeStateSyncInfo { - restorer: None, - pending_chunks, - num_processed_chunks: 0, - } - } - - pub fn create_multi_state_sync_info(&self) -> MultiStateSyncInfo { - let processed_prefixes = BTreeSet::new(); - let current_prefixes = BTreeMap::default(); - MultiStateSyncInfo { - current_prefixes, - processed_prefixes, - version: CURRENT_STATE_SYNC_VERSION, - } - } - // Returns the discovered subtrees found recursively along with their associated // metadata Params: // tx: Transaction. Function returns the data by opening merks at given tx. @@ -383,7 +382,7 @@ impl GroveDb { replication::util_path_to_string(&[]) ); - let mut root_prefix_state_sync_info = self.create_subtree_state_sync_info(); + let mut root_prefix_state_sync_info = SubtreeStateSyncInfo::default(); let root_prefix = [0u8; 32]; if let Ok(merk) = self.open_merk_for_replication(SubtreePath::empty(), tx) { let restorer = Restorer::new(merk, app_hash, None); @@ -588,7 +587,7 @@ impl GroveDb { replication::util_path_to_string(&prefix_metadata.0) ); - let mut subtree_state_sync_info = self.create_subtree_state_sync_info(); + let mut subtree_state_sync_info = SubtreeStateSyncInfo::default(); if let Ok(merk) = self.open_merk_for_replication(path.into(), tx) { let restorer = Restorer::new(merk, *s_elem_value_hash, Some(*s_actual_value_hash)); diff --git a/tutorials/src/bin/replication.rs b/tutorials/src/bin/replication.rs index 9f2ba25e..f3e09532 100644 --- a/tutorials/src/bin/replication.rs +++ b/tutorials/src/bin/replication.rs @@ -101,7 +101,7 @@ fn main() { println!("{:?}", subtrees_metadata_source); println!("\n######### db_checkpoint_0 -> db_destination state sync"); - let state_info = db_destination.create_multi_state_sync_info(); + let state_info = MultiStateSyncInfo::default(); let tx = db_destination.start_transaction(); sync_db_demo(&db_checkpoint_0, &db_destination, state_info, &tx).unwrap(); db_destination.commit_transaction(tx).unwrap().expect("expected to commit transaction"); From aabfd3dfa06c636ea0fb42744a01af1397594e09 Mon Sep 17 00:00:00 2001 From: fominok Date: Mon, 20 May 2024 14:35:14 +0200 Subject: [PATCH 25/37] feat: GroveDB visualizer (#299) add `grovedbg` feature and a visualizer HTTP server under it --- .github/workflows/grovedb.yml | 12 ++ .gitmodules | 3 + Cargo.toml | 5 +- README.md | 18 +++ grovedb/Cargo.toml | 19 ++- grovedb/build.rs | 31 +++++ grovedb/grovedbg | 1 + grovedb/src/debugger.rs | 170 +++++++++++++++++++++++++++ grovedb/src/lib.rs | 18 ++- grovedb/src/operations/proof/util.rs | 14 +-- grovedbg-types/Cargo.toml | 7 ++ grovedbg-types/src/lib.rs | 60 ++++++++++ merk/Cargo.toml | 1 + merk/src/debugger.rs | 44 +++++++ merk/src/lib.rs | 6 +- merk/src/merk/get.rs | 2 +- path/src/lib.rs | 1 + path/src/subtree_path.rs | 44 +++++-- path/src/subtree_path_builder.rs | 50 ++++++-- path/src/subtree_path_iter.rs | 14 +++ path/src/util/compact_bytes.rs | 1 - 21 files changed, 480 insertions(+), 41 deletions(-) create mode 100644 .gitmodules create mode 100644 grovedb/build.rs create mode 160000 grovedb/grovedbg create mode 100644 grovedb/src/debugger.rs create mode 100644 grovedbg-types/Cargo.toml create mode 100644 grovedbg-types/src/lib.rs create mode 100644 merk/src/debugger.rs diff --git a/.github/workflows/grovedb.yml b/.github/workflows/grovedb.yml index 118beafb..5ad86e74 100644 --- a/.github/workflows/grovedb.yml +++ b/.github/workflows/grovedb.yml @@ -19,17 +19,23 @@ jobs: access_token: ${{ github.token }} - uses: actions/checkout@v2 + with: + submodules: recursive - name: Setup Rust uses: actions-rs/toolchain@v1 with: toolchain: stable + target: wasm32-unknown-unknown - name: Enable Rust cache uses: Swatinem/rust-cache@v2 with: cache-on-failure: "false" + - name: Setup Trunk + uses: jetli/trunk-action@v0.5.0 + - run: cargo test --workspace --all-features @@ -44,6 +50,8 @@ jobs: - name: Check out repo uses: actions/checkout@v2 + with: + submodules: recursive - name: Setup Rust uses: actions-rs/toolchain@v1 @@ -51,12 +59,16 @@ jobs: toolchain: stable default: true components: clippy + target: wasm32-unknown-unknown - name: Enable Rust cache uses: Swatinem/rust-cache@v2 with: cache-on-failure: "false" + - name: Setup Trunk + uses: jetli/trunk-action@v0.5.0 + - uses: actions-rs/clippy-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..2b7c58dd --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "grovedb/grovedbg"] + path = grovedb/grovedbg + url = https://github.com/dashpay/grovedbg diff --git a/Cargo.toml b/Cargo.toml index 6ebd27d7..6f5b5260 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,6 @@ [workspace] +resolver = "2" +exclude = ["grovedb/grovedbg"] members = [ "costs", "grovedb", @@ -6,5 +8,6 @@ members = [ "node-grove", "storage", "visualize", - "path" + "path", + "grovedbg-types", ] diff --git a/README.md b/README.md index 5094ed8b..4d66f7f9 100644 --- a/README.md +++ b/README.md @@ -232,6 +232,24 @@ From here we can build: ```cargo build``` +## grovedbg + +There is a work in progress implementation of a debugger layer for GroveDB. To use this library with +these capabilities enabled one needs to set a dependency with `grovedbg` feature. + +At build time this requires two environment dependencies: +1. `wasm32-unknown-unknown` Rust toolchain; +2. [trunk](https://trunkrs.dev/) utility. + +Then, to launch visualizer tool to observe the database structure inside of your browser on a port, +let's say 10000, the following snippet should do: + +```rust + let db = Arc::new(GroveDb::open("db").unwrap()); + db.start_visualzier(10000); +``` + +Just remember to use Arc because the HTTP server might outlast the GroveDB instance. ## Performance diff --git a/grovedb/Cargo.toml b/grovedb/Cargo.toml index c932b138..96f2364e 100644 --- a/grovedb/Cargo.toml +++ b/grovedb/Cargo.toml @@ -10,7 +10,6 @@ repository = "https://github.com/dashpay/grovedb" readme = "../README.md" documentation = "https://docs.rs/grovedb" - [dependencies] grovedb-merk = { version = "1.0.0-rc.2", path = "../merk", optional = true, default-features = false } thiserror = { version = "1.0.59", optional = true } @@ -26,8 +25,13 @@ nohash-hasher = { version = "0.2.0", optional = true } indexmap = { version = "2.2.6", optional = true } intmap = { version = "2.0.0", optional = true } grovedb-path = { version = "1.0.0-rc.2", path = "../path" } +grovedbg-types = { path = "../grovedbg-types", optional = true } +tokio = { version = "1.37.0", features = ["rt-multi-thread", "net"], optional = true } +axum = { version = "0.7.5", features = ["macros"], optional = true } +tower-http = { version = "0.5.2", features = ["fs"], optional = true } blake3 = "1.4.0" bitvec = "1" +zip-extensions = { version ="0.6.2", optional = true } [dev-dependencies] rand = "0.8.5" @@ -65,3 +69,16 @@ verify = [ "integer-encoding", ] estimated_costs = ["full"] +grovedbg = [ + "grovedbg-types", + "tokio", + "full", + "grovedb-merk/grovedbg", + "axum", + "tower-http", + "zip-extensions", + "tempfile" +] + +[build-dependencies] +zip-extensions = "0.6.2" diff --git a/grovedb/build.rs b/grovedb/build.rs new file mode 100644 index 00000000..6186d0b1 --- /dev/null +++ b/grovedb/build.rs @@ -0,0 +1,31 @@ +#[cfg(feature = "grovedbg")] +fn main() { + use std::{ + env, + path::PathBuf, + process::{Command, ExitStatus}, + }; + + let out_dir = PathBuf::from(&env::var_os("OUT_DIR").unwrap()); + + if !Command::new("trunk") + .arg("build") + .arg("--release") + .arg("--dist") + .arg(&out_dir) + .arg("grovedbg/index.html") + .status() + .as_ref() + .map(ExitStatus::success) + .unwrap_or(false) + { + panic!("Error running `trunk build --release`"); + } + + let zip_file = out_dir.join("grovedbg.zip"); + zip_extensions::write::zip_create_from_directory(&zip_file, &out_dir) + .expect("can't create a grovedbg zip archive"); +} + +#[cfg(not(feature = "grovedbg"))] +fn main() {} diff --git a/grovedb/grovedbg b/grovedb/grovedbg new file mode 160000 index 00000000..ac48aa20 --- /dev/null +++ b/grovedb/grovedbg @@ -0,0 +1 @@ +Subproject commit ac48aa20c3e696708e592545b201dee731716ef3 diff --git a/grovedb/src/debugger.rs b/grovedb/src/debugger.rs new file mode 100644 index 00000000..d087ae1c --- /dev/null +++ b/grovedb/src/debugger.rs @@ -0,0 +1,170 @@ +//! GroveDB debugging support module. + +use std::{fs, net::Ipv4Addr, sync::Weak}; + +use axum::{extract::State, http::StatusCode, response::IntoResponse, routing::post, Json, Router}; +use grovedb_merk::debugger::NodeDbg; +use grovedb_path::SubtreePath; +use grovedbg_types::{NodeFetchRequest, NodeUpdate, Path}; +use tokio::sync::mpsc::{self, Sender}; +use tower_http::services::ServeDir; + +use crate::{reference_path::ReferencePathType, GroveDb}; + +const GROVEDBG_ZIP: [u8; include_bytes!(concat!(env!("OUT_DIR"), "/grovedbg.zip")).len()] = + *include_bytes!(concat!(env!("OUT_DIR"), "/grovedbg.zip")); + +pub(super) fn start_visualizer(grovedb: Weak, port: u16) { + std::thread::spawn(move || { + let grovedbg_tmp = + tempfile::tempdir().expect("cannot create tempdir for grovedbg contents"); + let grovedbg_zip = grovedbg_tmp.path().join("grovedbg.zip"); + let grovedbg_www = grovedbg_tmp.path().join("grovedbg_www"); + + fs::write(&grovedbg_zip, GROVEDBG_ZIP).expect("cannot crate grovedbg.zip"); + zip_extensions::read::zip_extract(&grovedbg_zip, &grovedbg_www) + .expect("cannot extract grovedbg contents"); + + let (shutdown_send, mut shutdown_receive) = mpsc::channel::<()>(1); + let app = Router::new() + .route("/fetch_node", post(fetch_node)) + .route("/fetch_root_node", post(fetch_root_node)) + .fallback_service(ServeDir::new(grovedbg_www)) + .with_state((shutdown_send, grovedb)); + + tokio::runtime::Runtime::new() + .unwrap() + .block_on(async move { + let listener = tokio::net::TcpListener::bind((Ipv4Addr::LOCALHOST, port)) + .await + .expect("can't bind visualizer port"); + axum::serve(listener, app) + .with_graceful_shutdown(async move { + shutdown_receive.recv().await; + }) + .await + .unwrap() + }); + }); +} + +enum AppError { + Closed, + Any(String), +} + +impl IntoResponse for AppError { + fn into_response(self) -> axum::response::Response { + match self { + AppError::Closed => { + (StatusCode::SERVICE_UNAVAILABLE, "GroveDB is closed").into_response() + } + AppError::Any(e) => (StatusCode::INTERNAL_SERVER_ERROR, e).into_response(), + } + } +} + +impl From for AppError { + fn from(err: E) -> Self { + Self::Any(err.to_string()) + } +} + +async fn fetch_node( + State((shutdown, grovedb)): State<(Sender<()>, Weak)>, + Json(NodeFetchRequest { path, key }): Json, +) -> Result>, AppError> { + let Some(db) = grovedb.upgrade() else { + shutdown.send(()).await.ok(); + return Err(AppError::Closed); + }; + + let merk = db + .open_non_transactional_merk_at_path(path.as_slice().into(), None) + .unwrap()?; + let node = merk.get_node_dbg(&key)?; + + if let Some(node) = node { + let node_update: NodeUpdate = node_to_update(path, node)?; + Ok(Json(Some(node_update))) + } else { + Ok(None.into()) + } +} + +async fn fetch_root_node( + State((shutdown, grovedb)): State<(Sender<()>, Weak)>, +) -> Result>, AppError> { + let Some(db) = grovedb.upgrade() else { + shutdown.send(()).await.ok(); + return Err(AppError::Closed); + }; + + let merk = db + .open_non_transactional_merk_at_path(SubtreePath::empty(), None) + .unwrap()?; + + let node = merk.get_root_node_dbg()?; + + if let Some(node) = node { + let node_update: NodeUpdate = node_to_update(Vec::new(), node)?; + Ok(Json(Some(node_update))) + } else { + Ok(None.into()) + } +} + +fn node_to_update( + path: Path, + NodeDbg { + key, + value, + left_child, + right_child, + }: NodeDbg, +) -> Result { + let grovedb_element = crate::Element::deserialize(&value)?; + + let element = match grovedb_element { + crate::Element::Item(value, ..) => grovedbg_types::Element::Item { value }, + crate::Element::Tree(root_key, ..) => grovedbg_types::Element::Subtree { root_key }, + crate::Element::Reference(ReferencePathType::AbsolutePathReference(path), ..) => { + grovedbg_types::Element::AbsolutePathReference { path } + } + crate::Element::Reference( + ReferencePathType::UpstreamRootHeightReference(n_keep, path_append), + .., + ) => grovedbg_types::Element::UpstreamRootHeightReference { + n_keep: n_keep.into(), + path_append, + }, + crate::Element::Reference( + ReferencePathType::UpstreamFromElementHeightReference(n_remove, path_append), + .., + ) => grovedbg_types::Element::UpstreamFromElementHeightReference { + n_remove: n_remove.into(), + path_append, + }, + crate::Element::Reference(ReferencePathType::CousinReference(swap_parent), ..) => { + grovedbg_types::Element::CousinReference { swap_parent } + } + crate::Element::Reference(ReferencePathType::RemovedCousinReference(swap_parent), ..) => { + grovedbg_types::Element::RemovedCousinReference { swap_parent } + } + crate::Element::Reference(ReferencePathType::SiblingReference(sibling_key), ..) => { + grovedbg_types::Element::SiblingReference { sibling_key } + } + crate::Element::SumItem(value, _) => grovedbg_types::Element::SumItem { value }, + crate::Element::SumTree(root_key, sum, _) => { + grovedbg_types::Element::Sumtree { root_key, sum } + } + }; + + Ok(NodeUpdate { + path, + key, + element, + left_child, + right_child, + }) +} diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index fd11f10d..69a59045 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -140,11 +140,10 @@ //! [Architectural Decision Records](https://github.com/dashpay/grovedb/tree/master/adr) or //! [Tutorial](https://www.grovedb.org/tutorials.html) -#[cfg(feature = "full")] -extern crate core; - #[cfg(feature = "full")] pub mod batch; +#[cfg(feature = "grovedbg")] +pub mod debugger; #[cfg(any(feature = "full", feature = "verify"))] pub mod element; #[cfg(any(feature = "full", feature = "verify"))] @@ -165,13 +164,18 @@ pub mod replication; mod tests; #[cfg(feature = "full")] mod util; +#[cfg(any(feature = "full", feature = "verify"))] mod versioning; #[cfg(feature = "full")] mod visualize; +#[cfg(feature = "grovedbg")] +use std::sync::Arc; #[cfg(feature = "full")] use std::{collections::HashMap, option::Option::None, path::Path}; +#[cfg(feature = "grovedbg")] +use debugger::start_visualizer; #[cfg(any(feature = "full", feature = "verify"))] pub use element::Element; #[cfg(feature = "full")] @@ -222,6 +226,7 @@ use crate::element::helpers::raw_decode; pub use crate::error::Error; #[cfg(feature = "full")] use crate::util::{root_merk_optional_tx, storage_context_optional_tx}; +#[cfg(any(feature = "full", feature = "verify"))] use crate::Error::MerkError; #[cfg(feature = "full")] @@ -250,6 +255,13 @@ impl GroveDb { Ok(GroveDb { db }) } + #[cfg(feature = "grovedbg")] + // Start visualizer server for the GroveDB instance + pub fn start_visualzier(self: &Arc, port: u16) { + let weak = Arc::downgrade(self); + start_visualizer(weak, port); + } + /// Uses raw iter to delete GroveDB key values pairs from rocksdb pub fn wipe(&self) -> Result<(), Error> { self.db.wipe()?; diff --git a/grovedb/src/operations/proof/util.rs b/grovedb/src/operations/proof/util.rs index 0ea9ad26..05e868a3 100644 --- a/grovedb/src/operations/proof/util.rs +++ b/grovedb/src/operations/proof/util.rs @@ -253,7 +253,7 @@ impl<'a> ProofReader<'a> { let mut data_type = [0; 1]; self.read_into_slice(&mut data_type)?; - if data_type != [ProofTokenType::PathInfo.into()] { + if data_type != [Into::::into(ProofTokenType::PathInfo)] { return Err(Error::InvalidProof("wrong data_type, expected path_info")); } @@ -395,12 +395,12 @@ mod tests { #[test] fn test_proof_token_type_encoding() { - assert_eq!(0x01_u8, ProofTokenType::Merk.into()); - assert_eq!(0x02_u8, ProofTokenType::SizedMerk.into()); - assert_eq!(0x04_u8, ProofTokenType::EmptyTree.into()); - assert_eq!(0x05_u8, ProofTokenType::AbsentPath.into()); - assert_eq!(0x06_u8, ProofTokenType::PathInfo.into()); - assert_eq!(0x10_u8, ProofTokenType::Invalid.into()); + assert_eq!(0x01_u8, Into::::into(ProofTokenType::Merk)); + assert_eq!(0x02_u8, Into::::into(ProofTokenType::SizedMerk)); + assert_eq!(0x04_u8, Into::::into(ProofTokenType::EmptyTree)); + assert_eq!(0x05_u8, Into::::into(ProofTokenType::AbsentPath)); + assert_eq!(0x06_u8, Into::::into(ProofTokenType::PathInfo)); + assert_eq!(0x10_u8, Into::::into(ProofTokenType::Invalid)); } #[test] diff --git a/grovedbg-types/Cargo.toml b/grovedbg-types/Cargo.toml new file mode 100644 index 00000000..300eb5c7 --- /dev/null +++ b/grovedbg-types/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "grovedbg-types" +version = "0.1.0" +edition = "2021" + +[dependencies] +serde = { version = "1.0.201", features = ["derive"] } diff --git a/grovedbg-types/src/lib.rs b/grovedbg-types/src/lib.rs new file mode 100644 index 00000000..ff7a4127 --- /dev/null +++ b/grovedbg-types/src/lib.rs @@ -0,0 +1,60 @@ +use serde::{Deserialize, Serialize}; + +pub type Key = Vec; +pub type Path = Vec; +pub type PathSegment = Vec; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct NodeFetchRequest { + pub path: Path, + pub key: Key, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct RootFetchRequest; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct NodeUpdate { + pub left_child: Option, + pub right_child: Option, + pub path: Path, + pub key: Key, + pub element: Element, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum Element { + Subtree { + root_key: Option, + }, + Sumtree { + root_key: Option, + sum: i64, + }, + Item { + value: Vec, + }, + SumItem { + value: i64, + }, + AbsolutePathReference { + path: Path, + }, + UpstreamRootHeightReference { + n_keep: u32, + path_append: Vec, + }, + UpstreamFromElementHeightReference { + n_remove: u32, + path_append: Vec, + }, + CousinReference { + swap_parent: PathSegment, + }, + RemovedCousinReference { + swap_parent: Vec, + }, + SiblingReference { + sibling_key: Key, + }, +} diff --git a/merk/Cargo.toml b/merk/Cargo.toml index e381f5ed..d7864897 100644 --- a/merk/Cargo.toml +++ b/merk/Cargo.toml @@ -70,6 +70,7 @@ verify = [ "ed", "blake3" ] +grovedbg = ["full"] [dev-dependencies] tempfile = "3.10.1" diff --git a/merk/src/debugger.rs b/merk/src/debugger.rs new file mode 100644 index 00000000..fc710c0a --- /dev/null +++ b/merk/src/debugger.rs @@ -0,0 +1,44 @@ +//! Merk API enhancements for GroveDbg support + +use grovedb_costs::CostsExt; +use grovedb_storage::StorageContext; + +use crate::{tree::kv::ValueDefinedCostType, Error, Merk}; + +impl<'a, S: StorageContext<'a>> Merk { + pub fn get_node_dbg(&self, key: &[u8]) -> Result, Error> { + self.get_node_direct_fn( + key, + |tree| { + NodeDbg { + key: tree.inner.key_as_slice().to_owned(), + value: tree.inner.value_as_slice().to_owned(), + left_child: tree.link(true).map(|link| link.key().to_owned()), + right_child: tree.link(false).map(|link| link.key().to_owned()), + } + .wrap_with_cost(Default::default()) + }, + None:: Option>, // I wish I knew why + ) + .unwrap() + } + + pub fn get_root_node_dbg(&self) -> Result, Error> { + Ok(self.use_tree(|tree_opt| { + tree_opt.map(|tree| NodeDbg { + key: tree.inner.key_as_slice().to_owned(), + value: tree.inner.value_as_slice().to_owned(), + left_child: tree.link(true).map(|link| link.key().to_owned()), + right_child: tree.link(false).map(|link| link.key().to_owned()), + }) + })) + } +} + +#[derive(Debug)] +pub struct NodeDbg { + pub key: Vec, + pub value: Vec, + pub left_child: Option>, + pub right_child: Option>, +} diff --git a/merk/src/lib.rs b/merk/src/lib.rs index 18255b27..356bd5b8 100644 --- a/merk/src/lib.rs +++ b/merk/src/lib.rs @@ -30,13 +30,13 @@ // #![deny(missing_docs)] -#[cfg(feature = "full")] -extern crate core; - /// The top-level store API. #[cfg(feature = "full")] pub mod merk; +#[cfg(feature = "grovedbg")] +pub mod debugger; + #[cfg(feature = "full")] pub use crate::merk::{chunks::ChunkProducer, options::MerkOptions, restore::Restorer}; diff --git a/merk/src/merk/get.rs b/merk/src/merk/get.rs index f0c25b42..4f953d9f 100644 --- a/merk/src/merk/get.rs +++ b/merk/src/merk/get.rs @@ -234,7 +234,7 @@ where } /// Generic way to get a node's field - fn get_node_direct_fn( + pub(crate) fn get_node_direct_fn( &self, key: &[u8], f: F, diff --git a/path/src/lib.rs b/path/src/lib.rs index 0691874e..548dc410 100644 --- a/path/src/lib.rs +++ b/path/src/lib.rs @@ -62,6 +62,7 @@ mod tests { let subtree_path_builder = subtree_path_ref.derive_owned(); assert_eq!(calculate_hash(&path), calculate_hash(&subtree_path_ref)); assert_eq!(calculate_hash(&path), calculate_hash(&subtree_path_builder)); + assert_eq!(path.len(), reference.len()); } #[test] diff --git a/path/src/subtree_path.rs b/path/src/subtree_path.rs index 1752c7ff..437f911a 100644 --- a/path/src/subtree_path.rs +++ b/path/src/subtree_path.rs @@ -146,6 +146,26 @@ impl SubtreePath<'static, [u8; 0]> { } } +impl SubtreePath<'_, B> { + /// Returns the length of the subtree path. + pub fn len(&self) -> usize { + match &self.ref_variant { + SubtreePathInner::Slice(s) => s.len(), + SubtreePathInner::SubtreePath(path) => path.len(), + SubtreePathInner::SubtreePathIter(path_iter) => path_iter.len(), + } + } + + /// Returns whether the path is empty (the root tree). + pub fn is_empty(&self) -> bool { + match &self.ref_variant { + SubtreePathInner::Slice(s) => s.is_empty(), + SubtreePathInner::SubtreePath(path) => path.is_empty(), + SubtreePathInner::SubtreePathIter(path_iter) => path_iter.is_empty(), + } + } +} + impl<'b, B: AsRef<[u8]>> SubtreePath<'b, B> { /// Get a derived path that will reuse this [Self] as it's base path and /// capable of owning data. @@ -241,17 +261,17 @@ mod tests { let parent = builder.derive_parent().unwrap().0; let as_vec = parent.to_vec(); - assert_eq!( - as_vec, - vec![ - b"one".to_vec(), - b"two".to_vec(), - b"three".to_vec(), - b"four".to_vec(), - b"five".to_vec(), - b"six".to_vec(), - b"seven".to_vec(), - ], - ); + let reference_vec = vec![ + b"one".to_vec(), + b"two".to_vec(), + b"three".to_vec(), + b"four".to_vec(), + b"five".to_vec(), + b"six".to_vec(), + b"seven".to_vec(), + ]; + + assert_eq!(as_vec, reference_vec); + assert_eq!(parent.len(), reference_vec.len()); } } diff --git a/path/src/subtree_path_builder.rs b/path/src/subtree_path_builder.rs index c3e868e6..4ef25f0a 100644 --- a/path/src/subtree_path_builder.rs +++ b/path/src/subtree_path_builder.rs @@ -107,6 +107,20 @@ pub(crate) enum SubtreePathRelative<'r> { Multi(CompactBytes), } +impl SubtreePathRelative<'_> { + pub fn len(&self) -> usize { + match self { + SubtreePathRelative::Empty => 0, + SubtreePathRelative::Single(_) => 1, + SubtreePathRelative::Multi(cb) => cb.len(), + } + } + + pub fn is_empty(&self) -> bool { + matches!(self, SubtreePathRelative::Empty) + } +} + impl Hash for SubtreePathRelative<'_> { fn hash(&self, state: &mut H) { match self { @@ -135,6 +149,18 @@ impl Default for SubtreePathBuilder<'static, [u8; 0]> { } } +impl SubtreePathBuilder<'_, B> { + /// Returns the length of the subtree path. + pub fn len(&self) -> usize { + self.base.len() + self.relative.len() + } + + /// Returns whether the path is empty (the root tree). + pub fn is_empty(&self) -> bool { + self.base.is_empty() && self.relative.is_empty() + } +} + impl<'b, B: AsRef<[u8]>> SubtreePathBuilder<'b, B> { /// Get a derived path that will use another subtree path (or reuse the base /// slice) as it's base, then could be edited in place. @@ -266,17 +292,17 @@ mod tests { builder.push_segment(b"seven"); let as_vec = builder.to_vec(); - assert_eq!( - as_vec, - vec![ - b"one".to_vec(), - b"two".to_vec(), - b"three".to_vec(), - b"four".to_vec(), - b"five".to_vec(), - b"six".to_vec(), - b"seven".to_vec(), - ], - ); + let reference_vec = vec![ + b"one".to_vec(), + b"two".to_vec(), + b"three".to_vec(), + b"four".to_vec(), + b"five".to_vec(), + b"six".to_vec(), + b"seven".to_vec(), + ]; + + assert_eq!(as_vec, reference_vec); + assert_eq!(builder.len(), reference_vec.len()); } } diff --git a/path/src/subtree_path_iter.rs b/path/src/subtree_path_iter.rs index 78f3e12d..2ca65866 100644 --- a/path/src/subtree_path_iter.rs +++ b/path/src/subtree_path_iter.rs @@ -52,6 +52,10 @@ impl<'b, B> Clone for SubtreePathIter<'b, B> { } impl<'b, B> SubtreePathIter<'b, B> { + pub(crate) fn len(&self) -> usize { + self.current_iter.len() + self.next_subtree_path.map(|p| p.len()).unwrap_or_default() + } + pub(crate) fn new(iter: I) -> Self where I: Into>, @@ -133,6 +137,16 @@ pub(crate) enum CurrentSubtreePathIter<'b, B> { OwnedBytes(CompactBytesIter<'b>), } +impl CurrentSubtreePathIter<'_, B> { + pub fn len(&self) -> usize { + match self { + CurrentSubtreePathIter::Single(_) => 1, + CurrentSubtreePathIter::Slice(s) => s.len(), + CurrentSubtreePathIter::OwnedBytes(cb) => cb.len(), + } + } +} + impl<'b, B> Clone for CurrentSubtreePathIter<'b, B> { fn clone(&self) -> Self { match self { diff --git a/path/src/util/compact_bytes.rs b/path/src/util/compact_bytes.rs index d14e2644..1e4362cb 100644 --- a/path/src/util/compact_bytes.rs +++ b/path/src/util/compact_bytes.rs @@ -61,7 +61,6 @@ impl CompactBytes { } } - #[cfg(test)] pub fn len(&self) -> usize { self.n_segments } From 36f7468b557cf243a7f9ba4a7a26d1f3c4b1a41b Mon Sep 17 00:00:00 2001 From: Odysseas Gabrielides Date: Mon, 3 Jun 2024 20:47:37 +0300 Subject: [PATCH 26/37] refactor: root `chunk_id` should be equal to `app_hash` (#301) * adjustements * fmt * clippy * clippy * fmt * suggestions --- grovedb/src/replication.rs | 62 ++++++++++++++------------------ tutorials/src/bin/replication.rs | 7 ++-- 2 files changed, 31 insertions(+), 38 deletions(-) diff --git a/grovedb/src/replication.rs b/grovedb/src/replication.rs index 11b0cb6c..5f7db1f3 100644 --- a/grovedb/src/replication.rs +++ b/grovedb/src/replication.rs @@ -32,13 +32,6 @@ struct SubtreeStateSyncInfo<'db> { num_processed_chunks: usize, } -impl<'a> SubtreeStateSyncInfo<'a> { - // Function to create an instance of SubtreeStateSyncInfo with default values - pub fn new() -> Self { - Self::default() - } -} - // Struct governing state sync pub struct MultiStateSyncInfo<'db> { // Map of current processing subtrees @@ -46,6 +39,8 @@ pub struct MultiStateSyncInfo<'db> { current_prefixes: BTreeMap>, // Set of processed prefixes (Path digests) processed_prefixes: BTreeSet, + // Root app_hash + app_hash: [u8; 32], // Version of state sync protocol, version: u16, } @@ -55,6 +50,7 @@ impl<'db> Default for MultiStateSyncInfo<'db> { Self { current_prefixes: BTreeMap::new(), processed_prefixes: BTreeSet::new(), + app_hash: [0; 32], version: CURRENT_STATE_SYNC_VERSION, } } @@ -115,6 +111,7 @@ pub fn util_path_to_string(path: &[Vec]) -> Vec { // Splits the given global chunk id into [SUBTREE_PREFIX:CHUNK_ID] pub fn util_split_global_chunk_id( global_chunk_id: &[u8], + app_hash: &[u8], ) -> Result<(crate::SubtreePrefix, Vec), Error> { let chunk_prefix_length: usize = 32; if global_chunk_id.len() < chunk_prefix_length { @@ -123,6 +120,12 @@ pub fn util_split_global_chunk_id( )); } + if global_chunk_id == app_hash { + let array_of_zeros: [u8; 32] = [0; 32]; + let root_chunk_prefix_key: crate::SubtreePrefix = array_of_zeros; + return Ok((root_chunk_prefix_key, vec![])); + } + let (chunk_prefix, chunk_id) = global_chunk_id.split_at(chunk_prefix_length); let mut array = [0u8; 32]; array.copy_from_slice(chunk_prefix); @@ -253,22 +256,13 @@ impl GroveDb { )); } - let chunk_prefix_length: usize = 32; - if global_chunk_id.len() < chunk_prefix_length { - return Err(Error::CorruptedData( - "expected global chunk id of at least 32 length".to_string(), - )); - } - - let (chunk_prefix, chunk_id) = global_chunk_id.split_at(chunk_prefix_length); - - let mut array = [0u8; 32]; - array.copy_from_slice(chunk_prefix); - let chunk_prefix_key: crate::SubtreePrefix = array; + let root_app_hash = self.root_hash(tx).value?; + let (chunk_prefix, chunk_id) = + replication::util_split_global_chunk_id(global_chunk_id, &root_app_hash)?; let subtrees_metadata = self.get_subtrees_metadata(tx)?; - match subtrees_metadata.data.get(&chunk_prefix_key) { + match subtrees_metadata.data.get(&chunk_prefix) { Some(path_data) => { let subtree = &path_data.0; let subtree_path: Vec<&[u8]> = subtree.iter().map(|vec| vec.as_slice()).collect(); @@ -287,7 +281,7 @@ impl GroveDb { let chunk_producer_res = ChunkProducer::new(&merk); match chunk_producer_res { Ok(mut chunk_producer) => { - let chunk_res = chunk_producer.chunk(chunk_id); + let chunk_res = chunk_producer.chunk(&chunk_id); match chunk_res { Ok((chunk, _)) => match util_encode_vec_ops(chunk) { Ok(op_bytes) => Ok(op_bytes), @@ -317,7 +311,7 @@ impl GroveDb { let chunk_producer_res = ChunkProducer::new(&merk); match chunk_producer_res { Ok(mut chunk_producer) => { - let chunk_res = chunk_producer.chunk(chunk_id); + let chunk_res = chunk_producer.chunk(&chunk_id); match chunk_res { Ok((chunk, _)) => match util_encode_vec_ops(chunk) { Ok(op_bytes) => Ok(op_bytes), @@ -346,15 +340,14 @@ impl GroveDb { // state_sync_info: Consumed StateSyncInfo // app_hash: Snapshot's AppHash // tx: Transaction for the state sync - // Returns the first set of global chunk ids that can be fetched from sources (+ - // the StateSyncInfo transferring ownership back to the caller) + // Returns the StateSyncInfo transferring ownership back to the caller) pub fn start_snapshot_syncing<'db>( &'db self, mut state_sync_info: MultiStateSyncInfo<'db>, app_hash: CryptoHash, tx: &'db Transaction, version: u16, - ) -> Result<(Vec>, MultiStateSyncInfo), Error> { + ) -> Result { // For now, only CURRENT_STATE_SYNC_VERSION is supported if version != CURRENT_STATE_SYNC_VERSION { return Err(Error::CorruptedData( @@ -367,8 +360,6 @@ impl GroveDb { )); } - let mut res = vec![]; - if !state_sync_info.current_prefixes.is_empty() || !state_sync_info.processed_prefixes.is_empty() { @@ -391,26 +382,27 @@ impl GroveDb { state_sync_info .current_prefixes .insert(root_prefix, root_prefix_state_sync_info); - - res.push(root_prefix.to_vec()); + state_sync_info.app_hash = app_hash; } else { return Err(Error::InternalError("Unable to open merk for replication")); } - Ok((res, state_sync_info)) + Ok(state_sync_info) } // Apply a chunk (should be called by ABCI when ApplySnapshotChunk method is // called) Params: // state_sync_info: Consumed MultiStateSyncInfo - // chunk: (Global chunk id, Chunk proof operators encoded in bytes) + // global_chunk_id: Global chunk id + // chunk: Chunk proof operators encoded in bytes // tx: Transaction for the state sync // Returns the next set of global chunk ids that can be fetched from sources (+ // the MultiStateSyncInfo transferring ownership back to the caller) pub fn apply_chunk<'db>( &'db self, mut state_sync_info: MultiStateSyncInfo<'db>, - chunk: (&[u8], Vec), + global_chunk_id: &[u8], + chunk: Vec, tx: &'db Transaction, version: u16, ) -> Result<(Vec>, MultiStateSyncInfo), Error> { @@ -428,15 +420,15 @@ impl GroveDb { let mut next_chunk_ids = vec![]; - let (global_chunk_id, chunk_data) = chunk; - let (chunk_prefix, chunk_id) = replication::util_split_global_chunk_id(global_chunk_id)?; + let (chunk_prefix, chunk_id) = + replication::util_split_global_chunk_id(global_chunk_id, &state_sync_info.app_hash)?; if state_sync_info.current_prefixes.is_empty() { return Err(Error::InternalError("GroveDB is not in syncing mode")); } if let Some(subtree_state_sync) = state_sync_info.current_prefixes.remove(&chunk_prefix) { if let Ok((res, mut new_subtree_state_sync)) = - self.apply_inner_chunk(subtree_state_sync, &chunk_id, chunk_data) + self.apply_inner_chunk(subtree_state_sync, &chunk_id, chunk) { if !res.is_empty() { for local_chunk_id in res.iter() { diff --git a/tutorials/src/bin/replication.rs b/tutorials/src/bin/replication.rs index f3e09532..bfdc1782 100644 --- a/tutorials/src/bin/replication.rs +++ b/tutorials/src/bin/replication.rs @@ -245,15 +245,16 @@ fn sync_db_demo( target_tx: &Transaction, ) -> Result<(), grovedb::Error> { let app_hash = source_db.root_hash(None).value.unwrap(); - let (chunk_ids, mut state_sync_info) = target_db.start_snapshot_syncing(state_sync_info, app_hash, target_tx, CURRENT_STATE_SYNC_VERSION)?; + let mut state_sync_info = target_db.start_snapshot_syncing(state_sync_info, app_hash, target_tx, CURRENT_STATE_SYNC_VERSION)?; let mut chunk_queue : VecDeque> = VecDeque::new(); - chunk_queue.extend(chunk_ids); + // The very first chunk to fetch is always identified by the root app_hash + chunk_queue.push_back(app_hash.to_vec()); while let Some(chunk_id) = chunk_queue.pop_front() { let ops = source_db.fetch_chunk(chunk_id.as_slice(), None, CURRENT_STATE_SYNC_VERSION)?; - let (more_chunks, new_state_sync_info) = target_db.apply_chunk(state_sync_info, (chunk_id.as_slice(), ops), target_tx, CURRENT_STATE_SYNC_VERSION)?; + let (more_chunks, new_state_sync_info) = target_db.apply_chunk(state_sync_info, chunk_id.as_slice(), ops, target_tx, CURRENT_STATE_SYNC_VERSION)?; state_sync_info = new_state_sync_info; chunk_queue.extend(more_chunks); } From abe89b6186a2077a972c969a1fd7a07202eb23c6 Mon Sep 17 00:00:00 2001 From: fominok Date: Mon, 17 Jun 2024 11:17:50 +0200 Subject: [PATCH 27/37] bump visualizer (#302) 1. Each path segment can be displayed with individual settings shared across every usage 2. Bytes (path segments, keys, values) now have more sensible default for their display option 3. VarInt display variant 4. Jump to subtree functionality 5. Error logging 6. Drive profile --- grovedb/grovedbg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grovedb/grovedbg b/grovedb/grovedbg index ac48aa20..33c40cf0 160000 --- a/grovedb/grovedbg +++ b/grovedb/grovedbg @@ -1 +1 @@ -Subproject commit ac48aa20c3e696708e592545b201dee731716ef3 +Subproject commit 33c40cf0117ab3cf446da00000658e7118c5e648 From 43ebe832fd1bc3419d69b898cda1745b2c59f9ee Mon Sep 17 00:00:00 2001 From: fominok Date: Mon, 17 Jun 2024 14:43:42 +0200 Subject: [PATCH 28/37] typo (#304) --- grovedb/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index 69a59045..e0bcdb5c 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -257,7 +257,7 @@ impl GroveDb { #[cfg(feature = "grovedbg")] // Start visualizer server for the GroveDB instance - pub fn start_visualzier(self: &Arc, port: u16) { + pub fn start_visualizer(self: &Arc, port: u16) { let weak = Arc::downgrade(self); start_visualizer(weak, port); } From a90cea1193f50c97752961ea8702cf999d137266 Mon Sep 17 00:00:00 2001 From: fominok Date: Wed, 19 Jun 2024 15:48:12 +0200 Subject: [PATCH 29/37] fix visualizer files placed on stack (#305) --- grovedb/src/debugger.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grovedb/src/debugger.rs b/grovedb/src/debugger.rs index d087ae1c..e4e79209 100644 --- a/grovedb/src/debugger.rs +++ b/grovedb/src/debugger.rs @@ -21,7 +21,7 @@ pub(super) fn start_visualizer(grovedb: Weak, port: u16) { let grovedbg_zip = grovedbg_tmp.path().join("grovedbg.zip"); let grovedbg_www = grovedbg_tmp.path().join("grovedbg_www"); - fs::write(&grovedbg_zip, GROVEDBG_ZIP).expect("cannot crate grovedbg.zip"); + fs::write(&grovedbg_zip, &GROVEDBG_ZIP).expect("cannot crate grovedbg.zip"); zip_extensions::read::zip_extract(&grovedbg_zip, &grovedbg_www) .expect("cannot extract grovedbg contents"); From f0d08583236462e2c656a89d5dc7b66dae2bbe7c Mon Sep 17 00:00:00 2001 From: fominok Date: Thu, 20 Jun 2024 09:11:40 +0200 Subject: [PATCH 30/37] receive full address for visualizer (#306) --- grovedb/src/debugger.rs | 12 +++++++++--- grovedb/src/lib.rs | 9 +++++++-- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/grovedb/src/debugger.rs b/grovedb/src/debugger.rs index e4e79209..f987060c 100644 --- a/grovedb/src/debugger.rs +++ b/grovedb/src/debugger.rs @@ -6,7 +6,10 @@ use axum::{extract::State, http::StatusCode, response::IntoResponse, routing::po use grovedb_merk::debugger::NodeDbg; use grovedb_path::SubtreePath; use grovedbg_types::{NodeFetchRequest, NodeUpdate, Path}; -use tokio::sync::mpsc::{self, Sender}; +use tokio::{ + net::ToSocketAddrs, + sync::mpsc::{self, Sender}, +}; use tower_http::services::ServeDir; use crate::{reference_path::ReferencePathType, GroveDb}; @@ -14,7 +17,10 @@ use crate::{reference_path::ReferencePathType, GroveDb}; const GROVEDBG_ZIP: [u8; include_bytes!(concat!(env!("OUT_DIR"), "/grovedbg.zip")).len()] = *include_bytes!(concat!(env!("OUT_DIR"), "/grovedbg.zip")); -pub(super) fn start_visualizer(grovedb: Weak, port: u16) { +pub(super) fn start_visualizer(grovedb: Weak, addr: A) +where + A: ToSocketAddrs + Send + 'static, +{ std::thread::spawn(move || { let grovedbg_tmp = tempfile::tempdir().expect("cannot create tempdir for grovedbg contents"); @@ -35,7 +41,7 @@ pub(super) fn start_visualizer(grovedb: Weak, port: u16) { tokio::runtime::Runtime::new() .unwrap() .block_on(async move { - let listener = tokio::net::TcpListener::bind((Ipv4Addr::LOCALHOST, port)) + let listener = tokio::net::TcpListener::bind(addr) .await .expect("can't bind visualizer port"); axum::serve(listener, app) diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index e0bcdb5c..5cf4fe84 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -219,6 +219,8 @@ use grovedb_storage::{Storage, StorageContext}; use grovedb_visualize::DebugByteVectors; #[cfg(any(feature = "full", feature = "verify"))] pub use query::{PathQuery, SizedQuery}; +#[cfg(feature = "grovedbg")] +use tokio::net::ToSocketAddrs; #[cfg(feature = "full")] use crate::element::helpers::raw_decode; @@ -257,9 +259,12 @@ impl GroveDb { #[cfg(feature = "grovedbg")] // Start visualizer server for the GroveDB instance - pub fn start_visualizer(self: &Arc, port: u16) { + pub fn start_visualizer(self: &Arc, addr: A) + where + A: ToSocketAddrs + Send + 'static, + { let weak = Arc::downgrade(self); - start_visualizer(weak, port); + start_visualizer(weak, addr); } /// Uses raw iter to delete GroveDB key values pairs from rocksdb From a4326b7ec36a639e94eaac397663166846c83f52 Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Sun, 30 Jun 2024 20:19:26 +0300 Subject: [PATCH 31/37] feat: various helper functions and fixes for platform contested resources PR (#307) * feat: added new method to query item value or sum * feat: added new method to query item value or sum * feat: added new method to query item value or sum * feat: added new method to query item value or sum * feat: extra helpers * feat: allow to refer to sum trees * changed lifetimes * changed lifetimes * more fixes * fix * more fixes * sync session manages its transaction * more fixes * fmt * small helper * additional helpers * added new reference type * some fixes * some fixes * some fixes * added convenience method * added a new function * made a struct copyable * added a little more information for an error * added a few more helper methods * added a few more helper methods * fmt * better internal error fix * fix * fixed import * fmt * small fixes * some clippy fixes * query sum tree * qfix * better error messages for proof errors * added debug * make hex required * a few improvements * a few improvements * a few improvements * a few improvements --------- Co-authored-by: Odysseas Gabrielides Co-authored-by: Evgeny Fomin --- costs/src/lib.rs | 5 + grovedb/Cargo.toml | 3 +- .../estimated_costs/average_case_costs.rs | 2 +- .../batch/estimated_costs/worst_case_costs.rs | 4 +- grovedb/src/debugger.rs | 10 + grovedb/src/element/get.rs | 8 +- grovedb/src/element/helpers.rs | 48 ++++- grovedb/src/element/mod.rs | 14 +- grovedb/src/element/query.rs | 4 +- grovedb/src/error.rs | 2 +- grovedb/src/lib.rs | 7 +- grovedb/src/operations.rs | 3 + grovedb/src/operations/get/mod.rs | 2 + grovedb/src/operations/get/query.rs | 106 +++++++++- grovedb/src/operations/proof/util.rs | 52 ++++- grovedb/src/operations/proof/verify.rs | 143 +++++++++----- grovedb/src/query_result_type.rs | 81 ++++++++ grovedb/src/reference_path.rs | 85 +++++++- grovedb/src/util.rs | 183 ++++++++++++++++++ grovedb/src/visualize.rs | 15 ++ grovedbg-types/src/lib.rs | 4 + .../src/estimated_costs/average_case_costs.rs | 8 +- merk/src/merk/mod.rs | 11 ++ merk/src/proofs/encoding.rs | 2 +- merk/src/proofs/query/mod.rs | 2 +- 25 files changed, 711 insertions(+), 93 deletions(-) diff --git a/costs/src/lib.rs b/costs/src/lib.rs index 83d29f64..a6867028 100644 --- a/costs/src/lib.rs +++ b/costs/src/lib.rs @@ -93,6 +93,11 @@ pub struct OperationCost { } impl OperationCost { + /// Is Nothing + pub fn is_nothing(&self) -> bool { + self == &Self::default() + } + /// Helper function to build default `OperationCost` with different /// `seek_count`. pub fn with_seek_count(seek_count: u16) -> Self { diff --git a/grovedb/Cargo.toml b/grovedb/Cargo.toml index 96f2364e..992cb19e 100644 --- a/grovedb/Cargo.toml +++ b/grovedb/Cargo.toml @@ -17,7 +17,7 @@ tempfile = { version = "3.10.1", optional = true } bincode = { version = "2.0.0-rc.3" } grovedb-storage = { version = "1.0.0-rc.2", path = "../storage", optional = true } grovedb-visualize = { version = "1.0.0-rc.2", path = "../visualize", optional = true } -hex = { version = "0.4.3", optional = true } +hex = { version = "0.4.3"} itertools = { version = "0.12.1", optional = true } integer-encoding = { version = "4.0.0", optional = true } grovedb-costs = { version = "1.0.0-rc.2", path = "../costs", optional = true } @@ -51,7 +51,6 @@ full = [ "tempfile", "grovedb-storage/rocksdb_storage", "visualize", - "hex", "itertools", "integer-encoding", "grovedb-costs", diff --git a/grovedb/src/batch/estimated_costs/average_case_costs.rs b/grovedb/src/batch/estimated_costs/average_case_costs.rs index 0a8d573d..7f4521a7 100644 --- a/grovedb/src/batch/estimated_costs/average_case_costs.rs +++ b/grovedb/src/batch/estimated_costs/average_case_costs.rs @@ -274,7 +274,7 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { let base_path = KeyInfoPath(vec![]); if let Some(estimated_layer_info) = self.paths.get(&base_path) { // Then we have to get the tree - if self.cached_merks.get(&base_path).is_none() { + if !self.cached_merks.contains_key(&base_path) { GroveDb::add_average_case_get_merk_at_path::( &mut cost, &base_path, diff --git a/grovedb/src/batch/estimated_costs/worst_case_costs.rs b/grovedb/src/batch/estimated_costs/worst_case_costs.rs index 5bb59dfa..f45bbff7 100644 --- a/grovedb/src/batch/estimated_costs/worst_case_costs.rs +++ b/grovedb/src/batch/estimated_costs/worst_case_costs.rs @@ -214,7 +214,7 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { ); // Then we have to get the tree - if self.cached_merks.get(path).is_none() { + if !self.cached_merks.contains(path) { GroveDb::add_worst_case_get_merk_at_path::(&mut cost, path, false); self.cached_merks.insert(path.clone()); } @@ -239,7 +239,7 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { let base_path = KeyInfoPath(vec![]); if let Some(_estimated_layer_info) = self.paths.get(&base_path) { // Then we have to get the tree - if self.cached_merks.get(&base_path).is_none() { + if !self.cached_merks.contains(&base_path) { GroveDb::add_worst_case_get_merk_at_path::( &mut cost, &base_path, false, ); diff --git a/grovedb/src/debugger.rs b/grovedb/src/debugger.rs index f987060c..23acf447 100644 --- a/grovedb/src/debugger.rs +++ b/grovedb/src/debugger.rs @@ -144,6 +144,16 @@ fn node_to_update( n_keep: n_keep.into(), path_append, }, + crate::Element::Reference( + ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference( + n_keep, + path_append, + ), + .., + ) => grovedbg_types::Element::UpstreamRootHeightWithParentPathAdditionReference { + n_keep: n_keep.into(), + path_append, + }, crate::Element::Reference( ReferencePathType::UpstreamFromElementHeightReference(n_remove, path_append), .., diff --git a/grovedb/src/element/get.rs b/grovedb/src/element/get.rs index b6f75b10..957618d0 100644 --- a/grovedb/src/element/get.rs +++ b/grovedb/src/element/get.rs @@ -59,8 +59,12 @@ impl Element { let value = result?; value.ok_or_else(|| { Error::PathKeyNotFound(format!( - "key not found in Merk for get: {}", - hex::encode(key) + "get: key \"{}\" not found in Merk that has a root key [{}] and is of type {}", + hex::encode(key), + merk.root_key() + .map(hex::encode) + .unwrap_or("None".to_string()), + merk.merk_type )) }) }) diff --git a/grovedb/src/element/helpers.rs b/grovedb/src/element/helpers.rs index 91d57fe3..e7cb9df1 100644 --- a/grovedb/src/element/helpers.rs +++ b/grovedb/src/element/helpers.rs @@ -43,12 +43,13 @@ use grovedb_merk::{ #[cfg(feature = "full")] use integer_encoding::VarInt; +#[cfg(any(feature = "full", feature = "verify"))] +use crate::reference_path::{path_from_reference_path_type, ReferencePathType}; #[cfg(any(feature = "full", feature = "verify"))] use crate::{element::SUM_ITEM_COST_SIZE, Element, Error}; #[cfg(feature = "full")] use crate::{ element::{SUM_TREE_COST_SIZE, TREE_COST_SIZE}, - reference_path::{path_from_reference_path_type, ReferencePathType}, ElementFlags, }; @@ -64,8 +65,7 @@ impl Element { } #[cfg(any(feature = "full", feature = "verify"))] - /// Decoded the integer value in the SumItem element type, returns 0 for - /// everything else + /// Decoded the integer value in the SumItem element type pub fn as_sum_item_value(&self) -> Result { match self { Element::SumItem(value, _) => Ok(*value), @@ -73,6 +73,33 @@ impl Element { } } + #[cfg(any(feature = "full", feature = "verify"))] + /// Decoded the integer value in the SumItem element type + pub fn into_sum_item_value(self) -> Result { + match self { + Element::SumItem(value, _) => Ok(value), + _ => Err(Error::WrongElementType("expected a sum item")), + } + } + + #[cfg(any(feature = "full", feature = "verify"))] + /// Decoded the integer value in the SumTree element type + pub fn as_sum_tree_value(&self) -> Result { + match self { + Element::SumTree(_, value, _) => Ok(*value), + _ => Err(Error::WrongElementType("expected a sum tree")), + } + } + + #[cfg(any(feature = "full", feature = "verify"))] + /// Decoded the integer value in the SumTree element type + pub fn into_sum_tree_value(self) -> Result { + match self { + Element::SumTree(_, value, _) => Ok(value), + _ => Err(Error::WrongElementType("expected a sum tree")), + } + } + #[cfg(any(feature = "full", feature = "verify"))] /// Gives the item value in the Item element type pub fn as_item_bytes(&self) -> Result<&[u8], Error> { @@ -91,6 +118,15 @@ impl Element { } } + #[cfg(any(feature = "full", feature = "verify"))] + /// Gives the reference path type in the Reference element type + pub fn into_reference_path_type(self) -> Result { + match self { + Element::Reference(value, ..) => Ok(value), + _ => Err(Error::WrongElementType("expected a reference")), + } + } + #[cfg(any(feature = "full", feature = "verify"))] /// Check if the element is a sum tree pub fn is_sum_tree(&self) -> bool { @@ -103,6 +139,12 @@ impl Element { matches!(self, Element::SumTree(..) | Element::Tree(..)) } + #[cfg(any(feature = "full", feature = "verify"))] + /// Check if the element is a reference + pub fn is_reference(&self) -> bool { + matches!(self, Element::Reference(..)) + } + #[cfg(any(feature = "full", feature = "verify"))] /// Check if the element is an item pub fn is_item(&self) -> bool { diff --git a/grovedb/src/element/mod.rs b/grovedb/src/element/mod.rs index c71bb52f..4c29c400 100644 --- a/grovedb/src/element/mod.rs +++ b/grovedb/src/element/mod.rs @@ -101,7 +101,7 @@ pub enum Element { Item(Vec, Option), /// A reference to an object by its path Reference(ReferencePathType, MaxReferenceHop, Option), - /// A subtree, contains the a prefixed key representing the root of the + /// A subtree, contains the prefixed key representing the root of the /// subtree. Tree(Option>, Option), /// Signed integer value that can be totaled in a sum tree @@ -111,6 +111,18 @@ pub enum Element { SumTree(Option>, SumValue, Option), } +impl Element { + pub fn type_str(&self) -> &str { + match self { + Element::Item(..) => "item", + Element::Reference(..) => "reference", + Element::Tree(..) => "tree", + Element::SumItem(..) => "sum item", + Element::SumTree(..) => "sum tree", + } + } +} + #[cfg(any(feature = "full", feature = "visualize"))] impl fmt::Debug for Element { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/grovedb/src/element/query.rs b/grovedb/src/element/query.rs index eba5ae1f..c992ba26 100644 --- a/grovedb/src/element/query.rs +++ b/grovedb/src/element/query.rs @@ -53,7 +53,7 @@ use crate::{ QueryPathKeyElementTrioResultType, }, }, - util::{merk_optional_tx, storage_context_optional_tx}, + util::{merk_optional_tx, merk_optional_tx_internal_error, storage_context_optional_tx}, Error, PathQuery, TransactionArg, }; #[cfg(any(feature = "full", feature = "verify"))] @@ -563,7 +563,7 @@ impl Element { if !item.is_range() { // this is a query on a key if let QueryItem::Key(key) = item { - let element_res = merk_optional_tx!( + let element_res = merk_optional_tx_internal_error!( &mut cost, storage, subtree_path, diff --git a/grovedb/src/error.rs b/grovedb/src/error.rs index d7f476af..956b5343 100644 --- a/grovedb/src/error.rs +++ b/grovedb/src/error.rs @@ -19,7 +19,7 @@ pub enum Error { InternalError(&'static str), #[error("invalid proof: {0}")] /// Invalid proof - InvalidProof(&'static str), + InvalidProof(String), #[error("invalid input: {0}")] /// Invalid input InvalidInput(&'static str), diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index 5cf4fe84..9a0068eb 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -164,7 +164,6 @@ pub mod replication; mod tests; #[cfg(feature = "full")] mod util; -#[cfg(any(feature = "full", feature = "verify"))] mod versioning; #[cfg(feature = "full")] mod visualize; @@ -336,11 +335,11 @@ impl GroveDb { /// Opens a Merk at given path for with direct write access. Intended for /// replication purposes. - fn open_merk_for_replication<'db, 'b, B>( + fn open_merk_for_replication<'tx, 'db: 'tx, 'b, B>( &'db self, path: SubtreePath<'b, B>, - tx: &'db Transaction, - ) -> Result>, Error> + tx: &'tx Transaction<'db>, + ) -> Result>, Error> where B: AsRef<[u8]> + 'b, { diff --git a/grovedb/src/operations.rs b/grovedb/src/operations.rs index af637f42..9864b0bc 100644 --- a/grovedb/src/operations.rs +++ b/grovedb/src/operations.rs @@ -40,3 +40,6 @@ pub mod insert; pub(crate) mod is_empty_tree; #[cfg(any(feature = "full", feature = "verify"))] pub mod proof; + +#[cfg(feature = "full")] +pub use get::{QueryItemOrSumReturnType, MAX_REFERENCE_HOPS}; diff --git a/grovedb/src/operations/get/mod.rs b/grovedb/src/operations/get/mod.rs index 69512567..12700106 100644 --- a/grovedb/src/operations/get/mod.rs +++ b/grovedb/src/operations/get/mod.rs @@ -32,6 +32,8 @@ mod average_case; #[cfg(feature = "full")] mod query; +#[cfg(feature = "full")] +pub use query::QueryItemOrSumReturnType; #[cfg(feature = "estimated_costs")] mod worst_case; diff --git a/grovedb/src/operations/get/query.rs b/grovedb/src/operations/get/query.rs index 29a581d9..7e29b233 100644 --- a/grovedb/src/operations/get/query.rs +++ b/grovedb/src/operations/get/query.rs @@ -36,6 +36,8 @@ use grovedb_costs::{ #[cfg(feature = "full")] use integer_encoding::VarInt; +#[cfg(feature = "full")] +use crate::element::SumValue; use crate::{element::QueryOptions, query_result_type::PathKeyOptionalElementTrio}; #[cfg(feature = "full")] use crate::{ @@ -44,6 +46,16 @@ use crate::{ Element, Error, GroveDb, PathQuery, TransactionArg, }; +#[cfg(feature = "full")] +#[derive(Debug, Eq, PartialEq, Clone)] +/// A return type for query_item_value_or_sum +pub enum QueryItemOrSumReturnType { + /// an Item in serialized form + ItemData(Vec), + /// A sum item or a sum tree value + SumValue(SumValue), +} + #[cfg(feature = "full")] impl GroveDb { /// Encoded query for multiple path queries @@ -190,10 +202,8 @@ where { )), } } - Element::Item(..) | Element::SumItem(..) => Ok(element), - Element::Tree(..) | Element::SumTree(..) => Err(Error::InvalidQuery( - "path_queries can only refer to items and references", - )), + Element::Item(..) | Element::SumItem(..) | Element::SumTree(..) => Ok(element), + Element::Tree(..) => Err(Error::InvalidQuery("path_queries can not refer to trees")), } } @@ -309,6 +319,94 @@ where { Ok((results, skipped)).wrap_with_cost(cost) } + /// Queries the backing store and returns element items by their value, + /// Sum Items are returned + pub fn query_item_value_or_sum( + &self, + path_query: &PathQuery, + allow_cache: bool, + decrease_limit_on_range_with_no_sub_elements: bool, + error_if_intermediate_path_tree_not_present: bool, + transaction: TransactionArg, + ) -> CostResult<(Vec, u16), Error> { + let mut cost = OperationCost::default(); + + let (elements, skipped) = cost_return_on_error!( + &mut cost, + self.query_raw( + path_query, + allow_cache, + decrease_limit_on_range_with_no_sub_elements, + error_if_intermediate_path_tree_not_present, + QueryResultType::QueryElementResultType, + transaction + ) + ); + + let results_wrapped = elements + .into_iterator() + .map(|result_item| match result_item { + QueryResultElement::ElementResultItem(element) => { + match element { + Element::Reference(reference_path, ..) => { + match reference_path { + ReferencePathType::AbsolutePathReference(absolute_path) => { + // While `map` on iterator is lazy, we should accumulate costs + // even if `collect` will + // end in `Err`, so we'll use + // external costs accumulator instead of + // returning costs from `map` call. + let maybe_item = self + .follow_reference( + absolute_path.as_slice().into(), + allow_cache, + transaction, + ) + .unwrap_add_cost(&mut cost)?; + + match maybe_item { + Element::Item(item, _) => { + Ok(QueryItemOrSumReturnType::ItemData(item)) + } + Element::SumItem(sum_value, _) => { + Ok(QueryItemOrSumReturnType::SumValue(sum_value)) + } + Element::SumTree(_, sum_value, _) => { + Ok(QueryItemOrSumReturnType::SumValue(sum_value)) + } + _ => Err(Error::InvalidQuery( + "the reference must result in an item", + )), + } + } + _ => Err(Error::CorruptedCodeExecution( + "reference after query must have absolute paths", + )), + } + } + Element::Item(item, _) => Ok(QueryItemOrSumReturnType::ItemData(item)), + Element::SumItem(sum_value, _) => { + Ok(QueryItemOrSumReturnType::SumValue(sum_value)) + } + Element::SumTree(_, sum_value, _) => { + Ok(QueryItemOrSumReturnType::SumValue(sum_value)) + } + Element::Tree(..) => Err(Error::InvalidQuery( + "path_queries can only refer to items, sum items, references and sum \ + trees", + )), + } + } + _ => Err(Error::CorruptedCodeExecution( + "query returned incorrect result type", + )), + }) + .collect::, Error>>(); + + let results = cost_return_on_error_no_add!(&cost, results_wrapped); + Ok((results, skipped)).wrap_with_cost(cost) + } + /// Retrieves only SumItem elements that match a path query pub fn query_sums( &self, diff --git a/grovedb/src/operations/proof/util.rs b/grovedb/src/operations/proof/util.rs index 05e868a3..82e8c585 100644 --- a/grovedb/src/operations/proof/util.rs +++ b/grovedb/src/operations/proof/util.rs @@ -26,6 +26,7 @@ // IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::fmt; #[cfg(any(feature = "full", feature = "verify"))] use std::io::Read; #[cfg(feature = "full")] @@ -38,9 +39,9 @@ use grovedb_merk::{ #[cfg(any(feature = "full", feature = "verify"))] use integer_encoding::{VarInt, VarIntReader}; -use crate::operations::proof::verify::ProvedKeyValues; #[cfg(any(feature = "full", feature = "verify"))] use crate::Error; +use crate::{operations::proof::verify::ProvedKeyValues, reference_path::ReferencePathType}; #[cfg(any(feature = "full", feature = "verify"))] pub const EMPTY_TREE_HASH: [u8; 32] = [0; 32]; @@ -60,6 +61,21 @@ pub enum ProofTokenType { Invalid, } +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for ProofTokenType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let variant_str = match self { + ProofTokenType::Merk => "Merk", + ProofTokenType::SizedMerk => "SizedMerk", + ProofTokenType::EmptyTree => "EmptyTree", + ProofTokenType::AbsentPath => "AbsentPath", + ProofTokenType::PathInfo => "PathInfo", + ProofTokenType::Invalid => "Invalid", + }; + write!(f, "{}", variant_str) + } +} + #[cfg(any(feature = "full", feature = "verify"))] impl From for u8 { fn from(proof_token_type: ProofTokenType) -> Self { @@ -88,6 +104,20 @@ impl From for ProofTokenType { } } +#[cfg(any(feature = "full", feature = "verify"))] +impl ProofTokenType { + pub fn u8_to_display(val: u8) -> String { + match val { + 0x01 => "merk".to_string(), + 0x02 => "sized merk".to_string(), + 0x04 => "empty tree".to_string(), + 0x05 => "absent path".to_string(), + 0x06 => "path info".to_string(), + v => format!("invalid proof token {}", v), + } + } +} + #[cfg(any(feature = "full", feature = "verify"))] #[derive(Debug)] // TODO: possibility for a proof writer?? @@ -151,7 +181,7 @@ impl<'a> ProofReader<'a> { fn read_length_data(&mut self) -> Result { self.proof_data .read_varint() - .map_err(|_| Error::InvalidProof("expected length data")) + .map_err(|_| Error::InvalidProof("expected length data".to_string())) } /// Read proof with optional type @@ -175,7 +205,7 @@ impl<'a> ProofReader<'a> { proof_token_type, proof, Some(key.ok_or(Error::InvalidProof( - "key must exist for verbose merk proofs", + "key must exist for verbose merk proofs".to_string(), ))?), )) } @@ -207,8 +237,11 @@ impl<'a> ProofReader<'a> { self.read_into_slice(&mut data_type)?; if let Some(expected_data_type) = expected_data_type_option { - if data_type != [expected_data_type] { - return Err(Error::InvalidProof("wrong data_type")); + if data_type[0] != expected_data_type { + return Err(Error::InvalidProof(format!( + "wrong data_type, expected {}, got {}", + expected_data_type, data_type[0] + ))); } } @@ -242,7 +275,9 @@ impl<'a> ProofReader<'a> { (proof, key) } else { - return Err(Error::InvalidProof("expected merk or sized merk proof")); + return Err(Error::InvalidProof( + "expected merk or sized merk proof".to_string(), + )); }; Ok((proof_token_type, proof, key)) @@ -254,7 +289,10 @@ impl<'a> ProofReader<'a> { self.read_into_slice(&mut data_type)?; if data_type != [Into::::into(ProofTokenType::PathInfo)] { - return Err(Error::InvalidProof("wrong data_type, expected path_info")); + return Err(Error::InvalidProof(format!( + "wrong data_type, expected path_info, got {}", + ProofTokenType::u8_to_display(data_type[0]) + ))); } let mut path = vec![]; diff --git a/grovedb/src/operations/proof/verify.rs b/grovedb/src/operations/proof/verify.rs index baea8735..7a347c15 100644 --- a/grovedb/src/operations/proof/verify.rs +++ b/grovedb/src/operations/proof/verify.rs @@ -223,9 +223,11 @@ impl GroveDb { )?; let (new_root_hash, new_elements) = Self::verify_subset_query(proof, &new_path_query)?; if new_root_hash != last_root_hash { - return Err(Error::InvalidProof( - "root hash for different path queries do no match", - )); + return Err(Error::InvalidProof(format!( + "root hash for different path queries do no match, first is {}, this one is {}", + hex::encode(last_root_hash), + hex::encode(new_root_hash) + ))); } results.push(new_elements); } @@ -276,7 +278,8 @@ impl ProofVerifier { } else if original_path.len() > path_slices.len() { // TODO: can we relax this constraint return Err(Error::InvalidProof( - "original path query path must not be greater than the subset path len", + "original path query path must not be greater than the subset path len" + .to_string(), )); } else { let original_path_in_new_path = original_path @@ -285,7 +288,7 @@ impl ProofVerifier { if !original_path_in_new_path { return Err(Error::InvalidProof( - "the original path should be a subset of the subset path", + "the original path should be a subset of the subset path".to_string(), )); } else { // We construct a new path query @@ -373,7 +376,7 @@ impl ProofVerifier { last_root_hash = proof_root_hash; let children = children.ok_or(Error::InvalidProof( - "MERK_PROOF always returns a result set", + "MERK_PROOF always returns a result set".to_string(), ))?; for proved_path_key_value in children { @@ -474,7 +477,8 @@ impl ProofVerifier { // which is invalid as there exists a subquery value return Err(Error::InvalidProof( "expected unsized proof for subquery path as subquery \ - value exists", + value exists" + .to_string(), )); } let subquery_path_result_set = @@ -517,9 +521,11 @@ impl ProofVerifier { .to_owned(); if combined_child_hash != expected_combined_child_hash { - return Err(Error::InvalidProof( - "child hash doesn't match the expected hash", - )); + return Err(Error::InvalidProof(format!( + "child hash {} doesn't match the expected hash {}", + hex::encode(combined_child_hash), + hex::encode(expected_combined_child_hash) + ))); } } _ => { @@ -552,10 +558,13 @@ impl ProofVerifier { ProofTokenType::EmptyTree => { last_root_hash = EMPTY_TREE_HASH; } - _ => { + t => { // execute_subquery_proof only expects proofs for merk trees // root proof is handled separately - return Err(Error::InvalidProof("wrong proof type")); + return Err(Error::InvalidProof(format!( + "wrong proof type, expected sized merk, merk or empty tree but got {}", + t + ))); } } Ok(last_root_hash) @@ -576,13 +585,14 @@ impl ProofVerifier { *expected_child_hash = subquery_path_result_set[0].proof; *current_value_bytes = subquery_path_result_set[0].value.to_owned(); } - _ => { + e => { // the means that the subquery path pointed to a non tree // element, this is not valid as you cannot apply the // the subquery value to non tree items - return Err(Error::InvalidProof( - "subquery path cannot point to non tree element", - )); + return Err(Error::InvalidProof(format!( + "subquery path cannot point to non tree element, got {}", + e.type_str() + ))); } } Ok(()) @@ -607,9 +617,10 @@ impl ProofVerifier { proof_reader.read_next_proof(current_path.last().unwrap_or(&Default::default()))?; // intermediate proofs are all going to be unsized merk proofs if proof_token_type != ProofTokenType::Merk { - return Err(Error::InvalidProof( - "expected MERK proof type for intermediate subquery path keys", - )); + return Err(Error::InvalidProof(format!( + "expected MERK proof type for intermediate subquery path keys, got {}", + proof_token_type + ))); } match proof_token_type { ProofTokenType::Merk => { @@ -645,9 +656,11 @@ impl ProofVerifier { .to_owned(); if combined_child_hash != *expected_root_hash { - return Err(Error::InvalidProof( - "child hash doesn't match the expected hash", - )); + return Err(Error::InvalidProof(format!( + "child hash {} doesn't match the expected hash {}", + hex::encode(combined_child_hash), + hex::encode(expected_root_hash) + ))); } // after confirming they are linked use the latest hash values for subsequent @@ -658,10 +671,11 @@ impl ProofVerifier { &result_set.expect("confirmed is some"), )?; } - _ => { - return Err(Error::InvalidProof( - "expected merk of sized merk proof type for subquery path", - )); + t => { + return Err(Error::InvalidProof(format!( + "expected merk of sized merk proof type for subquery path, got {}", + t + ))); } } } @@ -669,9 +683,10 @@ impl ProofVerifier { let (proof_token_type, subkey_proof) = proof_reader.read_next_proof(current_path.last().unwrap_or(&Default::default()))?; if proof_token_type != expected_proof_token_type { - return Err(Error::InvalidProof( - "unexpected proof type for subquery path", - )); + return Err(Error::InvalidProof(format!( + "unexpected proof type for subquery path, expected {}, got {}", + expected_proof_token_type, proof_token_type + ))); } match proof_token_type { @@ -691,9 +706,10 @@ impl ProofVerifier { Ok((verification_result.0, verification_result.1, false)) } - _ => Err(Error::InvalidProof( - "expected merk or sized merk proof type for subquery path", - )), + t => Err(Error::InvalidProof(format!( + "expected merk or sized merk proof type for subquery path, got {}", + t + ))), } } @@ -720,14 +736,18 @@ impl ProofVerifier { if Some(combined_hash) != expected_child_hash { return Err(Error::InvalidProof( "proof invalid: could not verify empty subtree while generating absent \ - path proof", + path proof" + .to_string(), )); } else { last_result_set = vec![]; break; } } else if proof_token_type != ProofTokenType::Merk { - return Err(Error::InvalidProof("expected a merk proof for absent path")); + return Err(Error::InvalidProof(format!( + "expected a merk proof for absent path, got {}", + proof_token_type + ))); } let mut child_query = Query::new(); @@ -743,18 +763,22 @@ impl ProofVerifier { Vec::new(), )?; - if expected_child_hash.is_none() { - root_key_hash = Some(proof_result.0); - } else { + if let Some(expected_child_hash) = expected_child_hash { let combined_hash = combine_hash( value_hash_fn(last_result_set[0].value.as_slice()).value(), &proof_result.0, ) .value() .to_owned(); - if Some(combined_hash) != expected_child_hash { - return Err(Error::InvalidProof("proof invalid: invalid parent")); + if combined_hash != expected_child_hash { + return Err(Error::InvalidProof(format!( + "proof invalid: invalid parent, expected {}, got {}", + hex::encode(expected_child_hash), + hex::encode(combined_hash) + ))); } + } else { + root_key_hash = Some(proof_result.0); } last_result_set = proof_result @@ -768,9 +792,10 @@ impl ProofVerifier { let elem = Element::deserialize(last_result_set[0].value.as_slice())?; let child_hash = match elem { Element::Tree(..) | Element::SumTree(..) => Ok(Some(last_result_set[0].proof)), - _ => Err(Error::InvalidProof( - "intermediate proofs should be for trees", - )), + e => Err(Error::InvalidProof(format!( + "intermediate proofs should be for trees, got {}", + e.type_str() + ))), }?; expected_child_hash = child_hash; } @@ -779,10 +804,14 @@ impl ProofVerifier { if let Some(hash) = root_key_hash { Ok(hash) } else { - Err(Error::InvalidProof("proof invalid: no non root tree found")) + Err(Error::InvalidProof( + "proof invalid: no non root tree found".to_string(), + )) } } else { - Err(Error::InvalidProof("proof invalid: path not absent")) + Err(Error::InvalidProof( + "proof invalid: path not absent".to_string(), + )) } } @@ -802,7 +831,10 @@ impl ProofVerifier { let (proof_token_type, parent_merk_proof) = proof_reader.read_next_proof(path_slice.last().unwrap_or(&Default::default()))?; if proof_token_type != ProofTokenType::Merk { - return Err(Error::InvalidProof("wrong data_type expected merk proof")); + return Err(Error::InvalidProof(format!( + "wrong data_type expected Merk Proof, got {}", + proof_token_type + ))); } let mut parent_query = Query::new(); @@ -821,15 +853,18 @@ impl ProofVerifier { .1 .expect("MERK_PROOF always returns a result set"); if result_set.is_empty() || &result_set[0].key != key { - return Err(Error::InvalidProof("proof invalid: invalid parent")); + return Err(Error::InvalidProof( + "proof invalid: invalid parent".to_string(), + )); } let elem = Element::deserialize(result_set[0].value.as_slice())?; let child_hash = match elem { Element::Tree(..) | Element::SumTree(..) => Ok(result_set[0].proof), - _ => Err(Error::InvalidProof( - "intermediate proofs should be for trees", - )), + t => Err(Error::InvalidProof(format!( + "intermediate proofs should be for trees, got {}", + t.type_str() + ))), }?; let combined_root_hash = combine_hash( @@ -839,9 +874,11 @@ impl ProofVerifier { .value() .to_owned(); if child_hash != combined_root_hash { - return Err(Error::InvalidProof( - "Bad path: tree hash does not have expected hash", - )); + return Err(Error::InvalidProof(format!( + "Bad path: tree hash does not have expected hash, got {}, expected {}", + hex::encode(child_hash), + hex::encode(combined_root_hash) + ))); } *expected_root_hash = proof_result.0; @@ -876,7 +913,7 @@ impl ProofVerifier { .unwrap() .map_err(|e| { eprintln!("{e}"); - Error::InvalidProof("invalid proof verification parameters") + Error::InvalidProof("invalid proof verification parameters".to_string()) })?; // convert the result set to proved_path_key_values diff --git a/grovedb/src/query_result_type.rs b/grovedb/src/query_result_type.rs index 37de6c0d..289ffb26 100644 --- a/grovedb/src/query_result_type.rs +++ b/grovedb/src/query_result_type.rs @@ -49,6 +49,7 @@ pub enum QueryResultType { } /// Query result elements +#[derive(Debug, Clone)] pub struct QueryResultElements { /// Elements pub elements: Vec, @@ -187,6 +188,85 @@ impl QueryResultElements { }) .collect() } + + /// To last path to keys btree map + /// This is useful if for example the element is a sum item and isn't + /// important Used in Platform Drive for getting voters for multiple + /// contenders + pub fn to_last_path_to_keys_btree_map(self) -> BTreeMap> { + let mut map: BTreeMap, Vec> = BTreeMap::new(); + + for result_item in self.elements.into_iter() { + if let QueryResultElement::PathKeyElementTrioResultItem((mut path, key, _)) = + result_item + { + if let Some(last) = path.pop() { + map.entry(last).or_insert_with(Vec::new).push(key); + } + } + } + + map + } + + /// To last path to key, elements btree map + pub fn to_last_path_to_key_elements_btree_map(self) -> BTreeMap> { + let mut map: BTreeMap, BTreeMap> = BTreeMap::new(); + + for result_item in self.elements.into_iter() { + if let QueryResultElement::PathKeyElementTrioResultItem((mut path, key, element)) = + result_item + { + if let Some(last) = path.pop() { + map.entry(last) + .or_insert_with(BTreeMap::new) + .insert(key, element); + } + } + } + + map + } + + /// To last path to elements btree map + /// This is useful if the key is not import + pub fn to_last_path_to_elements_btree_map(self) -> BTreeMap> { + let mut map: BTreeMap, Vec> = BTreeMap::new(); + + for result_item in self.elements.into_iter() { + if let QueryResultElement::PathKeyElementTrioResultItem((mut path, _, element)) = + result_item + { + if let Some(last) = path.pop() { + map.entry(last).or_insert_with(Vec::new).push(element); + } + } + } + + map + } + + /// To last path to keys btree map + /// This is useful if for example the element is a sum item and isn't + /// important Used in Platform Drive for getting voters for multiple + /// contenders + pub fn to_previous_of_last_path_to_keys_btree_map(self) -> BTreeMap> { + let mut map: BTreeMap, Vec> = BTreeMap::new(); + + for result_item in self.elements.into_iter() { + if let QueryResultElement::PathKeyElementTrioResultItem((mut path, key, _)) = + result_item + { + if let Some(_) = path.pop() { + if let Some(last) = path.pop() { + map.entry(last).or_insert_with(Vec::new).push(key); + } + } + } + } + + map + } } impl Default for QueryResultElements { @@ -196,6 +276,7 @@ impl Default for QueryResultElements { } /// Query result element +#[derive(Debug, Clone)] pub enum QueryResultElement { /// Element result item ElementResultItem(Element), diff --git a/grovedb/src/reference_path.rs b/grovedb/src/reference_path.rs index 52f07eb8..38c3f147 100644 --- a/grovedb/src/reference_path.rs +++ b/grovedb/src/reference_path.rs @@ -28,7 +28,7 @@ //! Space efficient methods for referencing other elements in GroveDB -#[cfg(feature = "full")] +#[cfg(any(feature = "full", feature = "verify"))] use std::fmt; use bincode::{Decode, Encode}; @@ -37,7 +37,7 @@ use grovedb_visualize::visualize_to_vec; #[cfg(feature = "full")] use integer_encoding::VarInt; -#[cfg(feature = "full")] +#[cfg(any(feature = "full", feature = "verify"))] use crate::Error; #[cfg(any(feature = "full", feature = "verify"))] @@ -54,6 +54,16 @@ pub enum ReferencePathType { /// path [p, q] result = [a, b, p, q] UpstreamRootHeightReference(u8, Vec>), + /// This is very similar to the UpstreamRootHeightReference, however + /// it appends to the absolute path when resolving the parent of the + /// reference. If the reference is stored at 15/9/80/7 then 80 will be + /// appended to what we are referring to. For example if we have the + /// reference at [a, b, c, d, e, f] (e is the parent path here) and we + /// have in the UpstreamRootHeightWithParentPathAdditionReference the + /// height set to 2 and the addon path set to [x, y], we would get as a + /// result [a, b, x, y, e] + UpstreamRootHeightWithParentPathAdditionReference(u8, Vec>), + /// This discards the last n elements from the current path and appends a /// new path to the subpath. If current path is [a, b, c, d] and we /// discard the last element, subpath = [a, b, c] we can then append @@ -76,7 +86,31 @@ pub enum ReferencePathType { SiblingReference(Vec), } -#[cfg(feature = "full")] +#[cfg(any(feature = "full", feature = "verify"))] +impl ReferencePathType { + /// Given the reference path type and the current qualified path (path+key), + /// this computes the absolute path of the item the reference is pointing + /// to. + pub fn absolute_path_using_current_qualified_path>( + self, + current_qualified_path: &[B], + ) -> Result>, Error> { + path_from_reference_qualified_path_type(self, current_qualified_path) + } + + /// Given the reference path type, the current path and the terminal key, + /// this computes the absolute path of the item the reference is + /// pointing to. + pub fn absolute_path>( + self, + current_path: &[B], + current_key: Option<&[u8]>, + ) -> Result>, Error> { + path_from_reference_path_type(self, current_path, current_key) + } +} + +#[cfg(any(feature = "full", feature = "visualize"))] impl fmt::Debug for ReferencePathType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut v = Vec::new(); @@ -86,7 +120,7 @@ impl fmt::Debug for ReferencePathType { } } -#[cfg(feature = "full")] +#[cfg(any(feature = "full", feature = "verify"))] /// Given the reference path type and the current qualified path (path+key), /// this computes the absolute path of the item the reference is pointing to. pub fn path_from_reference_qualified_path_type>( @@ -103,7 +137,7 @@ pub fn path_from_reference_qualified_path_type>( } } -#[cfg(feature = "full")] +#[cfg(any(feature = "full", feature = "verify"))] /// Given the reference path type, the current path and the terminal key, this /// computes the absolute path of the item the reference is pointing to. pub fn path_from_reference_path_type>( @@ -130,6 +164,25 @@ pub fn path_from_reference_path_type>( subpath_as_vec.append(&mut path); Ok(subpath_as_vec) } + ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference( + no_of_elements_to_keep, + mut path, + ) => { + if usize::from(no_of_elements_to_keep) > current_path.len() || current_path.len() == 0 { + return Err(Error::InvalidInput( + "reference stored path cannot satisfy reference constraints", + )); + } + let last = current_path.last().unwrap().as_ref().to_vec(); + let current_path_iter = current_path.iter(); + let mut subpath_as_vec = current_path_iter + .take(no_of_elements_to_keep as usize) + .map(|x| x.as_ref().to_vec()) + .collect::>(); + subpath_as_vec.append(&mut path); + subpath_as_vec.push(last); + Ok(subpath_as_vec) + } // Discard the last n elements from current path, append new path to subpath ReferencePathType::UpstreamFromElementHeightReference( @@ -224,6 +277,7 @@ impl ReferencePathType { .sum::() } ReferencePathType::UpstreamRootHeightReference(_, path) + | ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference(_, path) | ReferencePathType::UpstreamFromElementHeightReference(_, path) => { 1 + 1 + path @@ -266,6 +320,27 @@ mod tests { ); } + #[test] + fn test_upstream_root_height_with_parent_addition_reference() { + let stored_path = vec![b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]; + // selects the first 2 elements from the stored path and appends the new path. + let ref1 = ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference( + 2, + vec![b"c".to_vec(), b"d".to_vec()], + ); + let final_path = path_from_reference_path_type(ref1, &stored_path, None).unwrap(); + assert_eq!( + final_path, + vec![ + b"a".to_vec(), + b"b".to_vec(), + b"c".to_vec(), + b"d".to_vec(), + b"m".to_vec() + ] + ); + } + #[test] fn test_upstream_from_element_height_reference() { let stored_path = vec![b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]; diff --git a/grovedb/src/util.rs b/grovedb/src/util.rs index cbedc9af..d05f2396 100644 --- a/grovedb/src/util.rs +++ b/grovedb/src/util.rs @@ -149,6 +149,117 @@ macro_rules! storage_context_with_parent_optional_tx { }; } +/// Macro to execute same piece of code on different storage contexts +/// (transactional or not) using path argument. +macro_rules! storage_context_with_parent_optional_tx_internal_error { + ( + &mut $cost:ident, + $db:expr, + $path:expr, + $batch:expr, + $transaction:ident, + $storage:ident, + $root_key:ident, + $is_sum_tree:ident, + { $($body:tt)* } + ) => { + { + use ::grovedb_storage::Storage; + if let Some(tx) = $transaction { + let $storage = $db + .get_transactional_storage_context($path.clone(), $batch, tx) + .unwrap_add_cost(&mut $cost); + if let Some((parent_path, parent_key)) = $path.derive_parent() { + let parent_storage = $db + .get_transactional_storage_context(parent_path, $batch, tx) + .unwrap_add_cost(&mut $cost); + let result = Element::get_from_storage(&parent_storage, parent_key) + .map_err(|e| { + Error::PathParentLayerNotFound( + format!( + "could not get key for parent of subtree optional on tx: {}", + e + ) + ) + }).unwrap_add_cost(&mut $cost); + match result { + Ok(element) => { + match element { + Element::Tree(root_key, _) => { + let $root_key = root_key; + let $is_sum_tree = false; + $($body)* + } + Element::SumTree(root_key, ..) => { + let $root_key = root_key; + let $is_sum_tree = true; + $($body)* + } + _ => { + return Err(Error::CorruptedData( + "parent is not a tree" + .to_owned(), + )).wrap_with_cost($cost); + } + } + }, + Err(e) => Err(e), + } + } else { + return Err(Error::CorruptedData( + "path is empty".to_owned(), + )).wrap_with_cost($cost); + } + } else { + let $storage = $db + .get_storage_context($path.clone(), $batch).unwrap_add_cost(&mut $cost); + if let Some((parent_path, parent_key)) = $path.derive_parent() { + let parent_storage = $db.get_storage_context( + parent_path, + $batch + ).unwrap_add_cost(&mut $cost); + let result = Element::get_from_storage(&parent_storage, parent_key) + .map_err(|e| { + Error::PathParentLayerNotFound( + format!( + "could not get key for parent of subtree optional no tx: {}", + e + ) + ) + }).unwrap_add_cost(&mut $cost); + match result { + Ok(element) => { + match element { + Element::Tree(root_key, _) => { + let $root_key = root_key; + let $is_sum_tree = false; + $($body)* + } + Element::SumTree(root_key, ..) => { + let $root_key = root_key; + let $is_sum_tree = true; + $($body)* + } + _ => { + return Err(Error::CorruptedData( + "parent is not a tree" + .to_owned(), + )).wrap_with_cost($cost); + } + } + }, + Err(e) => Err(e), + } + } else { + return Err(Error::CorruptedData( + "path is empty".to_owned(), + )).wrap_with_cost($cost); + } + } + } + }; +} + /// Macro to execute same piece of code on different storage contexts with /// empty prefix. macro_rules! meta_storage_context_optional_tx { @@ -245,6 +356,76 @@ macro_rules! merk_optional_tx { }; } +/// Macro to execute same piece of code on Merk with varying storage +/// contexts. +macro_rules! merk_optional_tx_internal_error { + ( + &mut $cost:ident, + $db:expr, + $path:expr, + $batch:expr, + $transaction:ident, + $subtree:ident, + { $($body:tt)* } + ) => { + if $path.is_root() { + use crate::util::storage_context_optional_tx; + storage_context_optional_tx!( + $db, + ::grovedb_path::SubtreePath::empty(), + $batch, + $transaction, + storage, + { + let $subtree = cost_return_on_error!( + &mut $cost, + ::grovedb_merk::Merk::open_base( + storage.unwrap_add_cost(&mut $cost), + false, + Some(&Element::value_defined_cost_for_serialized_value) + ).map(|merk_res| + merk_res + .map_err(|_| crate::Error::CorruptedData( + "cannot open a subtree".to_owned() + )) + ) + ); + $($body)* + }) + } else { + use crate::util::storage_context_with_parent_optional_tx_internal_error; + storage_context_with_parent_optional_tx_internal_error!( + &mut $cost, + $db, + $path, + $batch, + $transaction, + storage, + root_key, + is_sum_tree, + { + #[allow(unused_mut)] + let mut $subtree = cost_return_on_error!( + &mut $cost, + ::grovedb_merk::Merk::open_layered_with_root_key( + storage, + root_key, + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), + ).map(|merk_res| + merk_res + .map_err(|_| crate::Error::CorruptedData( + "cannot open a subtree".to_owned() + )) + ) + ); + $($body)* + } + ) + } + }; +} + /// Macro to execute same piece of code on Merk with varying storage /// contexts. macro_rules! merk_optional_tx_path_not_empty { @@ -331,8 +512,10 @@ macro_rules! root_merk_optional_tx { } pub(crate) use merk_optional_tx; +pub(crate) use merk_optional_tx_internal_error; pub(crate) use merk_optional_tx_path_not_empty; pub(crate) use meta_storage_context_optional_tx; pub(crate) use root_merk_optional_tx; pub(crate) use storage_context_optional_tx; pub(crate) use storage_context_with_parent_optional_tx; +pub(crate) use storage_context_with_parent_optional_tx_internal_error; diff --git a/grovedb/src/visualize.rs b/grovedb/src/visualize.rs index 6f1f1c0d..9eb1c00b 100644 --- a/grovedb/src/visualize.rs +++ b/grovedb/src/visualize.rs @@ -130,6 +130,21 @@ impl Visualize for ReferencePathType { .as_bytes(), )?; } + ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference( + height, + end_path, + ) => { + drawer.write(b"upstream root height with parent path addition reference: ")?; + drawer.write(format!("[height: {height}").as_bytes())?; + drawer.write( + end_path + .iter() + .map(hex::encode) + .collect::>() + .join("/") + .as_bytes(), + )?; + } ReferencePathType::UpstreamFromElementHeightReference(up, end_path) => { drawer.write(b"upstream from element reference: ")?; drawer.write(format!("[up: {up}").as_bytes())?; diff --git a/grovedbg-types/src/lib.rs b/grovedbg-types/src/lib.rs index ff7a4127..dacc4255 100644 --- a/grovedbg-types/src/lib.rs +++ b/grovedbg-types/src/lib.rs @@ -44,6 +44,10 @@ pub enum Element { n_keep: u32, path_append: Vec, }, + UpstreamRootHeightWithParentPathAdditionReference { + n_keep: u32, + path_append: Vec, + }, UpstreamFromElementHeightReference { n_remove: u32, path_append: Vec, diff --git a/merk/src/estimated_costs/average_case_costs.rs b/merk/src/estimated_costs/average_case_costs.rs index b92222ac..1453d708 100644 --- a/merk/src/estimated_costs/average_case_costs.rs +++ b/merk/src/estimated_costs/average_case_costs.rs @@ -55,7 +55,7 @@ pub type AverageFlagsSize = u32; pub type Weight = u8; #[cfg(feature = "full")] -#[derive(Clone, PartialEq, Eq, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] /// Estimated number of sum trees #[derive(Default)] pub enum EstimatedSumTrees { @@ -91,7 +91,7 @@ impl EstimatedSumTrees { } #[cfg(feature = "full")] -#[derive(Clone, PartialEq, Eq, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] /// Estimated layer sizes pub enum EstimatedLayerSizes { /// All subtrees @@ -259,7 +259,7 @@ pub type EstimatedLevelNumber = u32; pub type EstimatedToBeEmpty = bool; #[cfg(feature = "full")] -#[derive(Clone, PartialEq, Eq, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] /// Information on an estimated layer pub struct EstimatedLayerInformation { /// Is sum tree? @@ -274,7 +274,7 @@ pub struct EstimatedLayerInformation { impl EstimatedLayerInformation {} #[cfg(feature = "full")] -#[derive(Clone, PartialEq, Eq, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] /// Estimated elements and level number of a layer pub enum EstimatedLayerCount { /// Potentially at max elements diff --git a/merk/src/merk/mod.rs b/merk/src/merk/mod.rs index 0a0b805e..28ce3f43 100644 --- a/merk/src/merk/mod.rs +++ b/merk/src/merk/mod.rs @@ -220,6 +220,17 @@ pub enum MerkType { LayeredMerk, } +impl fmt::Display for MerkType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let description = match self { + MerkType::StandaloneMerk => "StandaloneMerk", + MerkType::BaseMerk => "BaseMerk", + MerkType::LayeredMerk => "LayeredMerk", + }; + write!(f, "{}", description) + } +} + impl MerkType { /// Returns bool pub(crate) fn requires_root_storage_update(&self) -> bool { diff --git a/merk/src/proofs/encoding.rs b/merk/src/proofs/encoding.rs index b0e31484..d0395fe7 100644 --- a/merk/src/proofs/encoding.rs +++ b/merk/src/proofs/encoding.rs @@ -998,7 +998,7 @@ mod test { #[test] fn decode_multiple_child() { let bytes = [0x11, 0x11, 0x11, 0x10]; - let mut decoder = Decoder { + let decoder = Decoder { bytes: &bytes, offset: 0, }; diff --git a/merk/src/proofs/query/mod.rs b/merk/src/proofs/query/mod.rs index 9d485564..29296efc 100644 --- a/merk/src/proofs/query/mod.rs +++ b/merk/src/proofs/query/mod.rs @@ -44,7 +44,7 @@ mod verify; #[cfg(any(feature = "full", feature = "verify"))] use std::cmp::Ordering; -use std::collections::HashSet; +use std::{collections::HashSet, ops::RangeFull}; #[cfg(any(feature = "full", feature = "verify"))] use grovedb_costs::{cost_return_on_error, CostContext, CostResult, CostsExt, OperationCost}; From d9814a2e4449156ed5d89badc2cde4b5cbb755b4 Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Sun, 7 Jul 2024 02:55:46 +0700 Subject: [PATCH 32/37] fix: query item range inclusive right to left bounds (#310) --- merk/src/proofs/query/query_item/mod.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/merk/src/proofs/query/query_item/mod.rs b/merk/src/proofs/query/query_item/mod.rs index e950df81..63f3cc0a 100644 --- a/merk/src/proofs/query/query_item/mod.rs +++ b/merk/src/proofs/query/query_item/mod.rs @@ -295,11 +295,13 @@ impl QueryItem { iter.seek(end).flat_map(|_| iter.prev()) } } - QueryItem::RangeInclusive(range_inclusive) => iter.seek(if left_to_right { - range_inclusive.start() - } else { - range_inclusive.end() - }), + QueryItem::RangeInclusive(range_inclusive) => { + if left_to_right { + iter.seek(range_inclusive.start()) + } else { + iter.seek_for_prev(range_inclusive.end()) + } + } QueryItem::RangeFull(..) => { if left_to_right { iter.seek_to_first() From 1b9593278b053b59b54c077218d07daf7598f58d Mon Sep 17 00:00:00 2001 From: fominok Date: Mon, 8 Jul 2024 18:23:12 +0200 Subject: [PATCH 33/37] better trunk panic (#311) --- grovedb/build.rs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/grovedb/build.rs b/grovedb/build.rs index 6186d0b1..45197586 100644 --- a/grovedb/build.rs +++ b/grovedb/build.rs @@ -3,23 +3,28 @@ fn main() { use std::{ env, path::PathBuf, - process::{Command, ExitStatus}, + process::{Command, ExitStatus, Output}, }; let out_dir = PathBuf::from(&env::var_os("OUT_DIR").unwrap()); - if !Command::new("trunk") + let Output { + status, + stdout, + stderr, + } = Command::new("trunk") .arg("build") .arg("--release") .arg("--dist") .arg(&out_dir) .arg("grovedbg/index.html") - .status() - .as_ref() - .map(ExitStatus::success) - .unwrap_or(false) - { - panic!("Error running `trunk build --release`"); + .output() + .expect("cannot start trunk process"); + + if !status.success() { + let stdout_msg = String::from_utf8_lossy(&stdout); + let stderr_msg = String::from_utf8_lossy(&stderr); + panic!("Error running `trunk build --release`\n{stdout_msg}\n{stderr_msg}"); } let zip_file = out_dir.join("grovedbg.zip"); From 9cecef910c453fd1dd00eaa8ba2251b15a193bcd Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Tue, 9 Jul 2024 21:11:47 +0700 Subject: [PATCH 34/37] feat!: proof system v1 (from v0.1) (#309) * potential limit fix * removed offset from proofs * new proof system * much more work on new proof system * more work on new proof system * more work on proofs * much more work on proofs * more work on proofs * more work * more fixes * fmt * more work * fixed tests for merk proofs * more work * more work * more work * more work * all tests passing * trial * more work * complex conditional queries * cleanup * clippy fixes * fmt * small fix * fmt * reactivated test * cleaned up merk * cleaned up merk * small fix * cleaned up verification imports * clippy fixes * clippy fixes * added documentation --- grovedb/Cargo.toml | 5 +- grovedb/src/batch/mod.rs | 6 +- grovedb/src/element/helpers.rs | 26 +- grovedb/src/element/mod.rs | 92 +- grovedb/src/element/query.rs | 163 +- grovedb/src/error.rs | 25 +- grovedb/src/lib.rs | 19 +- grovedb/src/operations.rs | 45 - grovedb/src/operations/auxiliary.rs | 2 +- grovedb/src/operations/delete/mod.rs | 8 +- grovedb/src/operations/get/mod.rs | 2 +- grovedb/src/operations/get/query.rs | 16 +- grovedb/src/operations/insert/mod.rs | 4 +- grovedb/src/operations/mod.rs | 18 + grovedb/src/operations/proof.rs | 36 - grovedb/src/operations/proof/generate.rs | 1062 ++--- grovedb/src/operations/proof/mod.rs | 165 + grovedb/src/operations/proof/util.rs | 615 +-- grovedb/src/operations/proof/verify.rs | 1211 ++---- grovedb/src/query/mod.rs | 853 +++- grovedb/src/query_result_type.rs | 266 +- grovedb/src/reference_path.rs | 57 +- grovedb/src/replication.rs | 40 +- grovedb/src/tests/common.rs | 30 +- grovedb/src/tests/mod.rs | 4986 ++++++++++++---------- grovedb/src/tests/query_tests.rs | 4584 ++++++++++---------- grovedb/src/tests/sum_tree_tests.rs | 32 +- grovedb/src/versioning.rs | 63 - merk/Cargo.toml | 7 +- merk/benches/merk.rs | 2 +- merk/src/lib.rs | 6 +- merk/src/merk/chunks.rs | 28 - merk/src/merk/mod.rs | 8 + merk/src/merk/prove.rs | 71 +- merk/src/merk/restore.rs | 36 +- merk/src/proofs/chunk.rs | 28 - merk/src/proofs/chunk/util.rs | 18 +- merk/src/proofs/encoding.rs | 28 - merk/src/proofs/mod.rs | 77 +- merk/src/proofs/query/map.rs | 92 +- merk/src/proofs/query/mod.rs | 3000 ++++--------- merk/src/proofs/query/query_item/mod.rs | 50 +- merk/src/proofs/query/verify.rs | 644 +-- merk/src/proofs/tree.rs | 28 - merk/src/test_utils/mod.rs | 1 - merk/src/tree/commit.rs | 28 - merk/src/tree/encoding.rs | 28 - merk/src/tree/fuzz_tests.rs | 28 - merk/src/tree/hash.rs | 28 - merk/src/tree/iter.rs | 28 - merk/src/tree/kv.rs | 28 - merk/src/tree/link.rs | 28 - merk/src/tree/mod.rs | 28 - merk/src/tree/ops.rs | 28 - merk/src/tree/tree_feature_type.rs | 28 - merk/src/tree/walk/fetch.rs | 28 - merk/src/tree/walk/mod.rs | 28 - merk/src/tree/walk/ref_walker.rs | 28 - tutorials/src/bin/proofs.rs | 2 +- tutorials/src/bin/replication.rs | 2 +- 60 files changed, 8935 insertions(+), 9988 deletions(-) delete mode 100644 grovedb/src/operations.rs create mode 100644 grovedb/src/operations/mod.rs delete mode 100644 grovedb/src/operations/proof.rs create mode 100644 grovedb/src/operations/proof/mod.rs delete mode 100644 grovedb/src/versioning.rs diff --git a/grovedb/Cargo.toml b/grovedb/Cargo.toml index 992cb19e..28ba9995 100644 --- a/grovedb/Cargo.toml +++ b/grovedb/Cargo.toml @@ -19,10 +19,11 @@ grovedb-storage = { version = "1.0.0-rc.2", path = "../storage", optional = true grovedb-visualize = { version = "1.0.0-rc.2", path = "../visualize", optional = true } hex = { version = "0.4.3"} itertools = { version = "0.12.1", optional = true } +derive_more = { version = "0.99.18" } integer-encoding = { version = "4.0.0", optional = true } grovedb-costs = { version = "1.0.0-rc.2", path = "../costs", optional = true } nohash-hasher = { version = "0.2.0", optional = true } -indexmap = { version = "2.2.6", optional = true } +indexmap = { version = "2.2.6"} intmap = { version = "2.0.0", optional = true } grovedb-path = { version = "1.0.0-rc.2", path = "../path" } grovedbg-types = { path = "../grovedbg-types", optional = true } @@ -45,6 +46,7 @@ harness = false [features] default = ["full"] +proof_debug = ["grovedb-merk/proof_debug"] full = [ "grovedb-merk/full", "thiserror", @@ -55,7 +57,6 @@ full = [ "integer-encoding", "grovedb-costs", "nohash-hasher", - "indexmap", "intmap" ] visualize = [ diff --git a/grovedb/src/batch/mod.rs b/grovedb/src/batch/mod.rs index 8674672c..474a304e 100644 --- a/grovedb/src/batch/mod.rs +++ b/grovedb/src/batch/mod.rs @@ -1000,7 +1000,7 @@ where let mut merk = cost_return_on_error!(&mut cost, merk_wrapped); merk.set_base_root_key(root_key) .add_cost(cost) - .map_err(|_| Error::InternalError("unable to set base root key")) + .map_err(|_| Error::InternalError("unable to set base root key".to_string())) } fn execute_ops_on_path( @@ -1804,7 +1804,7 @@ impl GroveDb { .add_cost(cost) } else { Err(Error::CorruptedPath( - "cannot open a subtree as parent exists but is not a tree", + "cannot open a subtree as parent exists but is not a tree".to_string(), )) .wrap_with_cost(OperationCost::default()) } @@ -3493,7 +3493,7 @@ mod tests { reference_key_query.insert_key(b"key1".to_vec()); let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], reference_key_query); let proof = db - .prove_query(&path_query) + .prove_query(&path_query, None) .unwrap() .expect("should generate proof"); let verification_result = GroveDb::verify_query_raw(&proof, &path_query); diff --git a/grovedb/src/element/helpers.rs b/grovedb/src/element/helpers.rs index e7cb9df1..59cc2563 100644 --- a/grovedb/src/element/helpers.rs +++ b/grovedb/src/element/helpers.rs @@ -43,15 +43,17 @@ use grovedb_merk::{ #[cfg(feature = "full")] use integer_encoding::VarInt; +#[cfg(feature = "full")] +use crate::reference_path::path_from_reference_path_type; #[cfg(any(feature = "full", feature = "verify"))] -use crate::reference_path::{path_from_reference_path_type, ReferencePathType}; -#[cfg(any(feature = "full", feature = "verify"))] -use crate::{element::SUM_ITEM_COST_SIZE, Element, Error}; +use crate::reference_path::ReferencePathType; #[cfg(feature = "full")] use crate::{ - element::{SUM_TREE_COST_SIZE, TREE_COST_SIZE}, + element::{SUM_ITEM_COST_SIZE, SUM_TREE_COST_SIZE, TREE_COST_SIZE}, ElementFlags, }; +#[cfg(any(feature = "full", feature = "verify"))] +use crate::{Element, Error}; impl Element { #[cfg(any(feature = "full", feature = "verify"))] @@ -133,9 +135,15 @@ impl Element { matches!(self, Element::SumTree(..)) } + #[cfg(any(feature = "full", feature = "verify"))] + /// Check if the element is a tree but not a sum tree + pub fn is_basic_tree(&self) -> bool { + matches!(self, Element::Tree(..)) + } + #[cfg(any(feature = "full", feature = "verify"))] /// Check if the element is a tree - pub fn is_tree(&self) -> bool { + pub fn is_any_tree(&self) -> bool { matches!(self, Element::SumTree(..) | Element::Tree(..)) } @@ -147,10 +155,16 @@ impl Element { #[cfg(any(feature = "full", feature = "verify"))] /// Check if the element is an item - pub fn is_item(&self) -> bool { + pub fn is_any_item(&self) -> bool { matches!(self, Element::Item(..) | Element::SumItem(..)) } + #[cfg(any(feature = "full", feature = "verify"))] + /// Check if the element is an item + pub fn is_basic_item(&self) -> bool { + matches!(self, Element::Item(..)) + } + #[cfg(any(feature = "full", feature = "verify"))] /// Check if the element is a sum item pub fn is_sum_item(&self) -> bool { diff --git a/grovedb/src/element/mod.rs b/grovedb/src/element/mod.rs index 4c29c400..a6add9e6 100644 --- a/grovedb/src/element/mod.rs +++ b/grovedb/src/element/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Module for subtrees handling. //! Subtrees handling is isolated so basically this module is about adapting //! Merk API to GroveDB needs. @@ -48,8 +20,8 @@ mod query; pub use query::QueryOptions; #[cfg(any(feature = "full", feature = "verify"))] mod serialize; -#[cfg(feature = "full")] -use core::fmt; +#[cfg(any(feature = "full", feature = "verify"))] +use std::fmt; use bincode::{Decode, Encode}; #[cfg(any(feature = "full", feature = "verify"))] @@ -59,6 +31,7 @@ use grovedb_merk::estimated_costs::{LAYER_COST_SIZE, SUM_LAYER_COST_SIZE}; #[cfg(feature = "full")] use grovedb_visualize::visualize_to_vec; +use crate::operations::proof::util::hex_to_ascii; #[cfg(any(feature = "full", feature = "verify"))] use crate::reference_path::ReferencePathType; @@ -111,6 +84,65 @@ pub enum Element { SumTree(Option>, SumValue, Option), } +impl fmt::Display for Element { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Element::Item(data, flags) => { + write!( + f, + "Item({}{})", + hex_to_ascii(data), + flags + .as_ref() + .map_or(String::new(), |f| format!(", flags: {:?}", f)) + ) + } + Element::Reference(path, max_hop, flags) => { + write!( + f, + "Reference({}, max_hop: {}{})", + path, + max_hop.map_or("None".to_string(), |h| h.to_string()), + flags + .as_ref() + .map_or(String::new(), |f| format!(", flags: {:?}", f)) + ) + } + Element::Tree(root_key, flags) => { + write!( + f, + "Tree({}{})", + root_key.as_ref().map_or("None".to_string(), hex::encode), + flags + .as_ref() + .map_or(String::new(), |f| format!(", flags: {:?}", f)) + ) + } + Element::SumItem(sum_value, flags) => { + write!( + f, + "SumItem({}{}", + sum_value, + flags + .as_ref() + .map_or(String::new(), |f| format!(", flags: {:?}", f)) + ) + } + Element::SumTree(root_key, sum_value, flags) => { + write!( + f, + "SumTree({}, {}{}", + root_key.as_ref().map_or("None".to_string(), hex::encode), + sum_value, + flags + .as_ref() + .map_or(String::new(), |f| format!(", flags: {:?}", f)) + ) + } + } + } +} + impl Element { pub fn type_str(&self) -> &str { match self { diff --git a/grovedb/src/element/query.rs b/grovedb/src/element/query.rs index c992ba26..48d9e34d 100644 --- a/grovedb/src/element/query.rs +++ b/grovedb/src/element/query.rs @@ -29,6 +29,8 @@ //! Query //! Implements functions in Element for querying +use std::fmt; + #[cfg(feature = "full")] use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, CostContext, CostResult, CostsExt, @@ -36,13 +38,17 @@ use grovedb_costs::{ }; #[cfg(feature = "full")] use grovedb_merk::proofs::query::query_item::QueryItem; +#[cfg(feature = "full")] +use grovedb_merk::proofs::query::SubqueryBranch; #[cfg(any(feature = "full", feature = "verify"))] use grovedb_merk::proofs::Query; +#[cfg(feature = "full")] use grovedb_path::SubtreePath; #[cfg(feature = "full")] use grovedb_storage::{rocksdb_storage::RocksDbStorage, RawIterator, StorageContext}; -use crate::query_result_type::Path; +#[cfg(feature = "full")] +use crate::operations::proof::util::hex_to_ascii; #[cfg(feature = "full")] use crate::{ element::helpers::raw_decode, @@ -57,7 +63,7 @@ use crate::{ Error, PathQuery, TransactionArg, }; #[cfg(any(feature = "full", feature = "verify"))] -use crate::{Element, SizedQuery}; +use crate::{query_result_type::Path, Element, SizedQuery}; #[cfg(any(feature = "full", feature = "verify"))] #[derive(Copy, Clone, Debug)] @@ -74,6 +80,26 @@ pub struct QueryOptions { pub error_if_intermediate_path_tree_not_present: bool, } +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for QueryOptions { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "QueryOptions {{")?; + writeln!(f, " allow_get_raw: {}", self.allow_get_raw)?; + writeln!(f, " allow_cache: {}", self.allow_cache)?; + writeln!( + f, + " decrease_limit_on_range_with_no_sub_elements: {}", + self.decrease_limit_on_range_with_no_sub_elements + )?; + writeln!( + f, + " error_if_intermediate_path_tree_not_present: {}", + self.error_if_intermediate_path_tree_not_present + )?; + write!(f, "}}") + } +} + #[cfg(any(feature = "full", feature = "verify"))] impl Default for QueryOptions { fn default() -> Self { @@ -107,6 +133,124 @@ where pub offset: &'a mut Option, } +#[cfg(feature = "full")] +fn format_query(query: &Query, indent: usize) -> String { + let indent_str = " ".repeat(indent); + let mut output = format!("{}Query {{\n", indent_str); + + output += &format!("{} items: [\n", indent_str); + for item in &query.items { + output += &format!("{} {},\n", indent_str, item); + } + output += &format!("{} ],\n", indent_str); + + output += &format!( + "{} default_subquery_branch: {}\n", + indent_str, + format_subquery_branch(&query.default_subquery_branch, indent + 2) + ); + + if let Some(ref branches) = query.conditional_subquery_branches { + output += &format!("{} conditional_subquery_branches: {{\n", indent_str); + for (item, branch) in branches { + output += &format!( + "{} {}: {},\n", + indent_str, + item, + format_subquery_branch(branch, indent + 4) + ); + } + output += &format!("{} }},\n", indent_str); + } + + output += &format!("{} left_to_right: {}\n", indent_str, query.left_to_right); + output += &format!("{}}}", indent_str); + + output +} + +#[cfg(feature = "full")] +fn format_subquery_branch(branch: &SubqueryBranch, indent: usize) -> String { + let indent_str = " ".repeat(indent); + let mut output = "SubqueryBranch {{\n".to_string(); + + if let Some(ref path) = branch.subquery_path { + output += &format!("{} subquery_path: {:?},\n", indent_str, path); + } + + if let Some(ref subquery) = branch.subquery { + output += &format!( + "{} subquery: {},\n", + indent_str, + format_query(subquery, indent + 2) + ); + } + + output += &format!("{}}}", " ".repeat(indent)); + + output +} + +#[cfg(feature = "full")] +impl<'db, 'ctx, 'a> fmt::Display for PathQueryPushArgs<'db, 'ctx, 'a> +where + 'db: 'ctx, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "PathQueryPushArgs {{")?; + writeln!( + f, + " key: {}", + self.key.map_or("None".to_string(), hex_to_ascii) + )?; + writeln!(f, " element: {}", self.element)?; + writeln!( + f, + " path: [{}]", + self.path + .iter() + .map(|p| hex_to_ascii(p)) + .collect::>() + .join(", ") + )?; + writeln!( + f, + " subquery_path: {}", + self.subquery_path + .as_ref() + .map_or("None".to_string(), |p| format!( + "[{}]", + p.iter() + .map(|e| hex_to_ascii(e.as_slice())) + .collect::>() + .join(", ") + )) + )?; + writeln!( + f, + " subquery: {}", + self.subquery + .as_ref() + .map_or("None".to_string(), |q| format!("\n{}", format_query(q, 4))) + )?; + writeln!(f, " left_to_right: {}", self.left_to_right)?; + writeln!(f, " query_options: {}", self.query_options)?; + writeln!(f, " result_type: {}", self.result_type)?; + writeln!( + f, + " results: [{}]", + self.results + .iter() + .map(|r| format!("{}", r)) + .collect::>() + .join(", ") + )?; + writeln!(f, " limit: {:?}", self.limit)?; + writeln!(f, " offset: {:?}", self.offset)?; + write!(f, "}}") + } +} + impl Element { #[cfg(feature = "full")] /// Returns a vector of result elements based on given query @@ -285,6 +429,8 @@ impl Element { #[cfg(feature = "full")] /// Push arguments to path query fn path_query_push(args: PathQueryPushArgs) -> CostResult<(), Error> { + // println!("path_query_push {} \n", args); + let mut cost = OperationCost::default(); let PathQueryPushArgs { @@ -308,7 +454,7 @@ impl Element { decrease_limit_on_range_with_no_sub_elements, .. } = query_options; - if element.is_tree() { + if element.is_any_tree() { let mut path_vec = path.to_vec(); let key = cost_return_on_error_no_add!( &cost, @@ -623,7 +769,7 @@ impl Element { } } else { Err(Error::InternalError( - "QueryItem must be a Key if not a range", + "QueryItem must be a Key if not a range".to_string(), )) } } else { @@ -698,6 +844,7 @@ impl Element { #[cfg(feature = "full")] fn basic_push(args: PathQueryPushArgs) -> Result<(), Error> { + // println!("basic_push {}", args); let PathQueryPushArgs { path, key, @@ -717,14 +864,18 @@ impl Element { results.push(QueryResultElement::ElementResultItem(element)); } QueryResultType::QueryKeyElementPairResultType => { - let key = key.ok_or(Error::CorruptedPath("basic push must have a key"))?; + let key = key.ok_or(Error::CorruptedPath( + "basic push must have a key".to_string(), + ))?; results.push(QueryResultElement::KeyElementPairResultItem(( Vec::from(key), element, ))); } QueryResultType::QueryPathKeyElementTrioResultType => { - let key = key.ok_or(Error::CorruptedPath("basic push must have a key"))?; + let key = key.ok_or(Error::CorruptedPath( + "basic push must have a key".to_string(), + ))?; let path = path.iter().map(|a| a.to_vec()).collect(); results.push(QueryResultElement::PathKeyElementTrioResultItem(( path, diff --git a/grovedb/src/error.rs b/grovedb/src/error.rs index 956b5343..c430c5ae 100644 --- a/grovedb/src/error.rs +++ b/grovedb/src/error.rs @@ -1,9 +1,14 @@ //! GroveDB Errors +use std::convert::Infallible; + /// GroveDB Errors #[cfg(any(feature = "full", feature = "verify"))] #[derive(Debug, thiserror::Error)] pub enum Error { + #[error("infallible")] + /// This error can not happen, used for generics + Infallible, // Input data errors #[error("cyclic reference path")] /// Cyclic reference @@ -16,7 +21,7 @@ pub enum Error { MissingReference(String), #[error("internal error: {0}")] /// Internal error - InternalError(&'static str), + InternalError(String), #[error("invalid proof: {0}")] /// Invalid proof InvalidProof(String), @@ -62,7 +67,7 @@ pub enum Error { /// The corrupted path represents a consistency error in internal groveDB /// logic #[error("corrupted path: {0}")] - CorruptedPath(&'static str), + CorruptedPath(String), // Query errors #[error("invalid query: {0}")] @@ -85,6 +90,10 @@ pub enum Error { /// Corrupted data CorruptedData(String), + #[error("data storage error: {0}")] + /// Corrupted storage + CorruptedStorage(String), + #[error("invalid code execution error: {0}")] /// Invalid code execution InvalidCodeExecution(&'static str), @@ -139,3 +148,15 @@ pub enum Error { /// Merk error MerkError(grovedb_merk::error::Error), } + +impl From for Error { + fn from(_value: Infallible) -> Self { + Self::Infallible + } +} + +impl From for Error { + fn from(value: grovedb_merk::Error) -> Self { + Error::MerkError(value) + } +} diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index 9a0068eb..206ace71 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -164,7 +164,6 @@ pub mod replication; mod tests; #[cfg(feature = "full")] mod util; -mod versioning; #[cfg(feature = "full")] mod visualize; @@ -202,6 +201,7 @@ use grovedb_merk::{ tree::{combine_hash, value_hash}, BatchEntry, CryptoHash, KVIterator, Merk, }; +#[cfg(feature = "full")] use grovedb_path::SubtreePath; #[cfg(feature = "full")] use grovedb_storage::rocksdb_storage::PrefixedRocksDbImmediateStorageContext; @@ -227,7 +227,7 @@ use crate::element::helpers::raw_decode; pub use crate::error::Error; #[cfg(feature = "full")] use crate::util::{root_merk_optional_tx, storage_context_optional_tx}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(feature = "full")] use crate::Error::MerkError; #[cfg(feature = "full")] @@ -239,6 +239,7 @@ pub struct GroveDb { db: RocksDbStorage, } +#[cfg(feature = "full")] pub(crate) type SubtreePrefix = [u8; blake3::OUT_LEN]; /// Transaction @@ -318,7 +319,7 @@ impl GroveDb { .add_cost(cost) } else { Err(Error::CorruptedPath( - "cannot open a subtree as parent exists but is not a tree", + "cannot open a subtree as parent exists but is not a tree".to_string(), )) .wrap_with_cost(cost) } @@ -378,7 +379,7 @@ impl GroveDb { .unwrap() } else { Err(Error::CorruptedPath( - "cannot open a subtree as parent exists but is not a tree", + "cannot open a subtree as parent exists but is not a tree".to_string(), )) } } else { @@ -438,7 +439,7 @@ impl GroveDb { .add_cost(cost) } else { Err(Error::CorruptedPath( - "cannot open a subtree as parent exists but is not a tree", + "cannot open a subtree as parent exists but is not a tree".to_string(), )) .wrap_with_cost(cost) } @@ -894,7 +895,7 @@ impl GroveDb { while let Some((key, element_value)) = element_iterator.next_kv().unwrap() { let element = raw_decode(&element_value)?; - if element.is_tree() { + if element.is_any_tree() { let (kv_value, element_value_hash) = merk .get_value_and_value_hash( &key, @@ -924,7 +925,7 @@ impl GroveDb { ); } issues.extend(self.verify_merk_and_submerks(inner_merk, &new_path_ref, batch)?); - } else if element.is_item() { + } else if element.is_any_item() { let (kv_value, element_value_hash) = merk .get_value_and_value_hash( &key, @@ -964,7 +965,7 @@ impl GroveDb { while let Some((key, element_value)) = element_iterator.next_kv().unwrap() { let element = raw_decode(&element_value)?; - if element.is_tree() { + if element.is_any_tree() { let (kv_value, element_value_hash) = merk .get_value_and_value_hash( &key, @@ -999,7 +1000,7 @@ impl GroveDb { batch, transaction, )?); - } else if element.is_item() { + } else if element.is_any_item() { let (kv_value, element_value_hash) = merk .get_value_and_value_hash( &key, diff --git a/grovedb/src/operations.rs b/grovedb/src/operations.rs deleted file mode 100644 index 9864b0bc..00000000 --- a/grovedb/src/operations.rs +++ /dev/null @@ -1,45 +0,0 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Operations for the manipulation of GroveDB state - -#[cfg(feature = "full")] -pub(crate) mod auxiliary; -#[cfg(feature = "full")] -pub mod delete; -#[cfg(feature = "full")] -pub(crate) mod get; -#[cfg(feature = "full")] -pub mod insert; -#[cfg(feature = "full")] -pub(crate) mod is_empty_tree; -#[cfg(any(feature = "full", feature = "verify"))] -pub mod proof; - -#[cfg(feature = "full")] -pub use get::{QueryItemOrSumReturnType, MAX_REFERENCE_HOPS}; diff --git a/grovedb/src/operations/auxiliary.rs b/grovedb/src/operations/auxiliary.rs index 1b6b884d..6f9fd576 100644 --- a/grovedb/src/operations/auxiliary.rs +++ b/grovedb/src/operations/auxiliary.rs @@ -155,7 +155,7 @@ impl GroveDb { while let Some((key, value)) = cost_return_on_error!(&mut cost, raw_iter.next_element()) { - if value.is_tree() { + if value.is_any_tree() { let mut sub_path = q.clone(); sub_path.push(key.to_vec()); queue.push(sub_path.clone()); diff --git a/grovedb/src/operations/delete/mod.rs b/grovedb/src/operations/delete/mod.rs index 84d14652..d13fdd61 100644 --- a/grovedb/src/operations/delete/mod.rs +++ b/grovedb/src/operations/delete/mod.rs @@ -221,7 +221,7 @@ impl GroveDb { element_iterator.next_kv().unwrap_add_cost(&mut cost) { let element = raw_decode(&element_value).unwrap(); - if element.is_tree() { + if element.is_any_tree() { if options.allow_deleting_subtrees { cost_return_on_error!( &mut cost, @@ -284,7 +284,7 @@ impl GroveDb { { let element = raw_decode(&element_value).unwrap(); if options.allow_deleting_subtrees { - if element.is_tree() { + if element.is_any_tree() { cost_return_on_error!( &mut cost, self.delete( @@ -636,7 +636,7 @@ impl GroveDb { self.open_transactional_merk_at_path(path.clone(), transaction, Some(batch)) ); let uses_sum_tree = subtree_to_delete_from.is_sum_tree; - if element.is_tree() { + if element.is_any_tree() { let subtree_merk_path = path.derive_owned_with_child(key); let subtree_merk_path_ref = SubtreePath::from(&subtree_merk_path); @@ -800,7 +800,7 @@ impl GroveDb { self.open_non_transactional_merk_at_path(path.clone(), Some(batch)) ); let uses_sum_tree = subtree_to_delete_from.is_sum_tree; - if element.is_tree() { + if element.is_any_tree() { let subtree_merk_path = path.derive_owned_with_child(key); let subtree_of_tree_we_are_deleting = cost_return_on_error!( &mut cost, diff --git a/grovedb/src/operations/get/mod.rs b/grovedb/src/operations/get/mod.rs index 12700106..4cc9f949 100644 --- a/grovedb/src/operations/get/mod.rs +++ b/grovedb/src/operations/get/mod.rs @@ -143,7 +143,7 @@ impl GroveDb { }) ) } else { - return Err(Error::CorruptedPath("empty path")).wrap_with_cost(cost); + return Err(Error::CorruptedPath("empty path".to_string())).wrap_with_cost(cost); } visited.insert(current_path.clone()); match current_element { diff --git a/grovedb/src/operations/get/query.rs b/grovedb/src/operations/get/query.rs index 7e29b233..6ba914ef 100644 --- a/grovedb/src/operations/get/query.rs +++ b/grovedb/src/operations/get/query.rs @@ -38,7 +38,10 @@ use integer_encoding::VarInt; #[cfg(feature = "full")] use crate::element::SumValue; -use crate::{element::QueryOptions, query_result_type::PathKeyOptionalElementTrio}; +use crate::{ + element::QueryOptions, operations::proof::ProveOptions, + query_result_type::PathKeyOptionalElementTrio, +}; #[cfg(feature = "full")] use crate::{ query_result_type::{QueryResultElement, QueryResultElements, QueryResultType}, @@ -152,7 +155,7 @@ where { pub fn get_proved_path_query( &self, path_query: &PathQuery, - is_verbose: bool, + prove_options: Option, transaction: TransactionArg, ) -> CostResult, Error> { if transaction.is_some() { @@ -160,10 +163,8 @@ where { "transactions are not currently supported".to_string(), )) .wrap_with_cost(Default::default()) - } else if is_verbose { - self.prove_verbose(path_query) } else { - self.prove_query(path_query) + self.prove_query(path_query, prove_options) } } @@ -191,7 +192,7 @@ where { ) .unwrap_add_cost(cost)?; - if maybe_item.is_item() { + if maybe_item.is_any_item() { Ok(maybe_item) } else { Err(Error::InvalidQuery("the reference must result in an item")) @@ -1273,8 +1274,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); - let raw_result = db - .query_raw_keys_optional(&path_query, true, true, true, None) + db.query_raw_keys_optional(&path_query, true, true, true, None) .unwrap() .expect_err( "query with subquery should error if error_if_intermediate_path_tree_not_present \ diff --git a/grovedb/src/operations/insert/mod.rs b/grovedb/src/operations/insert/mod.rs index 513e2098..5670a939 100644 --- a/grovedb/src/operations/insert/mod.rs +++ b/grovedb/src/operations/insert/mod.rs @@ -239,7 +239,7 @@ impl GroveDb { Error::CorruptedData(String::from("unable to deserialize element")) }) ); - if element.is_tree() { + if element.is_any_tree() { return Err(Error::OverrideNotAllowed( "insertion not allowed to override tree", )) @@ -378,7 +378,7 @@ impl GroveDb { Error::CorruptedData(String::from("unable to deserialize element")) }) ); - if element.is_tree() { + if element.is_any_tree() { return Err(Error::OverrideNotAllowed( "insertion not allowed to override tree", )) diff --git a/grovedb/src/operations/mod.rs b/grovedb/src/operations/mod.rs new file mode 100644 index 00000000..ba9b8599 --- /dev/null +++ b/grovedb/src/operations/mod.rs @@ -0,0 +1,18 @@ +//! Operations for the manipulation of GroveDB state + +#[cfg(feature = "full")] +pub(crate) mod auxiliary; +#[cfg(feature = "full")] +pub mod delete; +#[cfg(feature = "full")] +pub(crate) mod get; +#[cfg(feature = "full")] +pub mod insert; +#[cfg(feature = "full")] +pub(crate) mod is_empty_tree; + +#[cfg(any(feature = "full", feature = "verify"))] +pub mod proof; + +#[cfg(feature = "full")] +pub use get::{QueryItemOrSumReturnType, MAX_REFERENCE_HOPS}; diff --git a/grovedb/src/operations/proof.rs b/grovedb/src/operations/proof.rs deleted file mode 100644 index 1734c6c6..00000000 --- a/grovedb/src/operations/proof.rs +++ /dev/null @@ -1,36 +0,0 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Proof operations - -#[cfg(feature = "full")] -mod generate; -#[cfg(any(feature = "full", feature = "verify"))] -pub mod util; -#[cfg(any(feature = "full", feature = "verify"))] -pub mod verify; diff --git a/grovedb/src/operations/proof/generate.rs b/grovedb/src/operations/proof/generate.rs index fad64c84..fe5866e8 100644 --- a/grovedb/src/operations/proof/generate.rs +++ b/grovedb/src/operations/proof/generate.rs @@ -1,84 +1,42 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Generate proof operations -// TODO: entire file is due for a refactor, need some kind of path generator -// that supports multiple implementations for verbose and non-verbose -// generation +use std::collections::BTreeMap; use grovedb_costs::{ cost_return_on_error, cost_return_on_error_default, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, }; use grovedb_merk::{ - proofs::{encode_into, Node, Op}, + proofs::{encode_into, query::QueryItem, Node, Op}, tree::value_hash, - KVIterator, Merk, ProofWithoutEncodingResult, + Merk, ProofWithoutEncodingResult, }; -use grovedb_path::SubtreePath; use grovedb_storage::StorageContext; +#[cfg(feature = "proof_debug")] +use crate::query_result_type::QueryResultType; use crate::{ - element::helpers::raw_decode, - operations::proof::util::{ - increase_limit_and_offset_by, reduce_limit_and_offset_by, write_slice_of_slice_to_slice, - write_slice_to_vec, write_to_vec, ProofTokenType, EMPTY_TREE_HASH, + operations::proof::{ + util::hex_to_ascii, GroveDBProof, GroveDBProofV0, LayerProof, ProveOptions, }, reference_path::path_from_reference_path_type, - versioning::{prepend_version_to_bytes, PROOF_VERSION}, - Element, Error, GroveDb, PathQuery, Query, + Element, Error, GroveDb, PathQuery, }; -type LimitOffset = (Option, Option); - impl GroveDb { /// Prove one or more path queries. - /// If we more than one path query, we merge into a single path query before - /// proving. - pub fn prove_query_many(&self, query: Vec<&PathQuery>) -> CostResult, Error> { - if query.len() > 1 { - let query = cost_return_on_error_default!(PathQuery::merge(query)); - self.prove_query(&query) - } else { - self.prove_query(query[0]) - } - } - - /// Prove one or more path queries verbose. - /// If we more than one path query, we merge into a single path query before - /// proving verbose. - pub fn prove_verbose_many(&self, query: Vec<&PathQuery>) -> CostResult, Error> { + /// If we have more than one path query, we merge into a single path query + /// before proving. + pub fn prove_query_many( + &self, + query: Vec<&PathQuery>, + prove_options: Option, + ) -> CostResult, Error> { if query.len() > 1 { let query = cost_return_on_error_default!(PathQuery::merge(query)); - self.prove_verbose(&query) + self.prove_query(&query, prove_options) } else { - self.prove_verbose(query[0]) + self.prove_query(query[0], prove_options) } } @@ -86,551 +44,185 @@ impl GroveDb { /// doesn't allow for subset verification /// Proofs generated with this can only be verified by the path query used /// to generate them. - pub fn prove_query(&self, query: &PathQuery) -> CostResult, Error> { - self.prove_internal(query, false) - } - - /// Generate a verbose proof for a given path query - /// Any path query that is a subset of the original proof generating path - /// query can be used to verify this (subset verification) - pub fn prove_verbose(&self, query: &PathQuery) -> CostResult, Error> { - // TODO: we need to solve the localized limit and offset problem. - // when using a path query that has a limit and offset value, - // to get the expected behaviour, you need to know exactly - // how the proving internals work and how your state looks. - self.prove_internal(query, true) + pub fn prove_query( + &self, + query: &PathQuery, + prove_options: Option, + ) -> CostResult, Error> { + self.prove_internal_serialized(query, prove_options) } - /// Generates a verbose or non verbose proof based on a bool - fn prove_internal(&self, query: &PathQuery, is_verbose: bool) -> CostResult, Error> { + /// Generates a proof and serializes it + fn prove_internal_serialized( + &self, + path_query: &PathQuery, + prove_options: Option, + ) -> CostResult, Error> { let mut cost = OperationCost::default(); - - let mut proof_result = - cost_return_on_error_default!(prepend_version_to_bytes(vec![], PROOF_VERSION)); - - let mut limit: Option = query.query.limit; - let mut offset: Option = query.query.offset; - - let path_slices = query.path.iter().map(|x| x.as_slice()).collect::>(); - - let subtree_exists = self - .check_subtree_exists_path_not_found(path_slices.as_slice().into(), None) - .unwrap_add_cost(&mut cost); - - // if the subtree at the given path doesn't exists, prove that this path - // doesn't point to a valid subtree - match subtree_exists { - Ok(_) => { - // subtree exists - // do nothing - } - Err(_) => { - cost_return_on_error!( - &mut cost, - self.generate_and_store_absent_path_proof( - &path_slices, - &mut proof_result, - is_verbose - ) - ); - // return the absence proof no need to continue proof generation - return Ok(proof_result).wrap_with_cost(cost); - } - } - - // if the subtree exists and the proof type is verbose we need to insert - // the path information to the proof - if is_verbose { - cost_return_on_error!( - &mut cost, - Self::generate_and_store_path_proof(path_slices.clone(), &mut proof_result) - ); + let proof = + cost_return_on_error!(&mut cost, self.prove_internal(path_query, prove_options)); + #[cfg(feature = "proof_debug")] + { + println!("constructed proof is {}", proof); } - - cost_return_on_error!( - &mut cost, - self.prove_subqueries( - &mut proof_result, - path_slices.clone(), - query, - &mut limit, - &mut offset, - true, - is_verbose - ) - ); - cost_return_on_error!( - &mut cost, - self.prove_path(&mut proof_result, path_slices, is_verbose) + let config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + let encoded_proof = cost_return_on_error_no_add!( + &cost, + bincode::encode_to_vec(proof, config) + .map_err(|e| Error::CorruptedData(format!("unable to encode proof {}", e))) ); - - Ok(proof_result).wrap_with_cost(cost) + Ok(encoded_proof).wrap_with_cost(cost) } - /// Perform a pre-order traversal of the tree based on the provided - /// subqueries - fn prove_subqueries( + /// Generates a proof + fn prove_internal( &self, - proofs: &mut Vec, - path: Vec<&[u8]>, - query: &PathQuery, - current_limit: &mut Option, - current_offset: &mut Option, - is_first_call: bool, - is_verbose: bool, - ) -> CostResult<(), Error> { + path_query: &PathQuery, + prove_options: Option, + ) -> CostResult { let mut cost = OperationCost::default(); - let mut to_add_to_result_set: u16 = 0; - let subtree = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path(path.as_slice().into(), None) - ); - if subtree.root_hash().unwrap_add_cost(&mut cost) == EMPTY_TREE_HASH { - cost_return_on_error_no_add!( - &cost, - write_to_vec(proofs, &[ProofTokenType::EmptyTree.into()]) - ); - return Ok(()).wrap_with_cost(cost); - } + let prove_options = prove_options.unwrap_or_default(); - let reached_limit = query.query.limit.is_some() && query.query.limit.unwrap() == 0; - if reached_limit { - if is_first_call { - cost_return_on_error!( - &mut cost, - self.generate_and_store_merk_proof( - &path.as_slice().into(), - &subtree, - &query.query.query, - (*current_limit, *current_offset), - ProofTokenType::SizedMerk, - proofs, - is_verbose, - path.iter().last().unwrap_or(&(&[][..])) - ) - ); - } - return Ok(()).wrap_with_cost(cost); + if path_query.query.offset.is_some() && path_query.query.offset != Some(0) { + return Err(Error::InvalidQuery( + "proved path queries can not have offsets", + )) + .wrap_with_cost(cost); } - let mut is_leaf_tree = true; - - let mut offset_inc = 0; - let mut limit_inc = 0; - - let mut kv_iterator = KVIterator::new(subtree.storage.raw_iter(), &query.query.query) - .unwrap_add_cost(&mut cost); - - while let Some((key, value_bytes)) = kv_iterator.next_kv().unwrap_add_cost(&mut cost) { - let mut encountered_absence = false; - - let element = cost_return_on_error_no_add!(&cost, raw_decode(&value_bytes)); - match element { - Element::Tree(root_key, _) | Element::SumTree(root_key, ..) => { - let (mut subquery_path, subquery_value) = - Element::subquery_paths_and_value_for_sized_query(&query.query, &key); - - if subquery_value.is_none() && subquery_path.is_none() { - // this element should be added to the result set - // hence we have to update the limit and offset value - let reduced_offset = - reduce_limit_and_offset_by(current_limit, current_offset, 1); - if reduced_offset { - offset_inc += 1; - } else { - limit_inc += 1; - } - continue; - } - - if root_key.is_none() { - continue; - } - - // if the element is a non empty tree then current tree is not a leaf tree - if is_leaf_tree { - is_leaf_tree = false; - cost_return_on_error!( - &mut cost, - self.generate_and_store_merk_proof( - &path.as_slice().into(), - &subtree, - &query.query.query, - (None, None), - ProofTokenType::Merk, - proofs, - is_verbose, - path.iter().last().unwrap_or(&Default::default()) - ) - ); - } - - let mut new_path = path.clone(); - new_path.push(key.as_ref()); - - let mut query = subquery_value; - - if query.is_some() { - if let Some(subquery_path) = &subquery_path { - for subkey in subquery_path.iter() { - let inner_subtree = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path( - new_path.as_slice().into(), - None, - ) - ); - - let mut key_as_query = Query::new(); - key_as_query.insert_key(subkey.clone()); - - cost_return_on_error!( - &mut cost, - self.generate_and_store_merk_proof( - &new_path.as_slice().into(), - &inner_subtree, - &key_as_query, - (None, None), - ProofTokenType::Merk, - proofs, - is_verbose, - new_path.iter().last().unwrap_or(&Default::default()) - ) - ); - - new_path.push(subkey); - - if self - .check_subtree_exists_path_not_found( - new_path.as_slice().into(), - None, - ) - .unwrap_add_cost(&mut cost) - .is_err() - { - encountered_absence = true; - break; - } - } - - if encountered_absence { - continue; - } - } - } else if let Some(subquery_path) = &mut subquery_path { - if subquery_path.is_empty() { - // nothing to do on this path, since subquery path is empty - // and there is no consecutive subquery value - continue; - } - - let last_key = subquery_path.remove(subquery_path.len() - 1); - - for subkey in subquery_path.iter() { - let inner_subtree = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path( - new_path.as_slice().into(), - None - ) - ); - - let mut key_as_query = Query::new(); - key_as_query.insert_key(subkey.clone()); - - cost_return_on_error!( - &mut cost, - self.generate_and_store_merk_proof( - &new_path.as_slice().into(), - &inner_subtree, - &key_as_query, - (None, None), - ProofTokenType::Merk, - proofs, - is_verbose, - new_path.iter().last().unwrap_or(&Default::default()) - ) - ); - - new_path.push(subkey); - - // check if the new path points to a valid subtree - // if it does not, we should stop proof generation on this path - if self - .check_subtree_exists_path_not_found( - new_path.as_slice().into(), - None, - ) - .unwrap_add_cost(&mut cost) - .is_err() - { - encountered_absence = true; - break; - } - } - - if encountered_absence { - continue; - } - - let mut key_as_query = Query::new(); - key_as_query.insert_key(last_key); - query = Some(key_as_query); - } else { - return Err(Error::CorruptedCodeExecution("subquery_path must exist")) - .wrap_with_cost(cost); - } - - let new_path_owned = new_path.iter().map(|a| a.to_vec()).collect(); - - let new_path_query = PathQuery::new_unsized(new_path_owned, query.unwrap()); - - if self - .check_subtree_exists_path_not_found(new_path.as_slice().into(), None) - .unwrap_add_cost(&mut cost) - .is_err() - { - continue; - } - - cost_return_on_error!( - &mut cost, - self.prove_subqueries( - proofs, - new_path, - &new_path_query, - current_limit, - current_offset, - false, - is_verbose, - ) - ); - - if *current_limit == Some(0) { - break; - } - } - _ => { - to_add_to_result_set += 1; - } - } + if path_query.query.limit == Some(0) { + return Err(Error::InvalidQuery( + "proved path queries can not be for limit 0", + )) + .wrap_with_cost(cost); } - if is_leaf_tree { - // if no useful subtree, then we care about the result set of this subtree. - // apply the sized query - increase_limit_and_offset_by(current_limit, current_offset, limit_inc, offset_inc); - let limit_offset = cost_return_on_error!( + #[cfg(feature = "proof_debug")] + { + // we want to query raw because we want the references to not be resolved at + // this point + + let values = cost_return_on_error!( &mut cost, - self.generate_and_store_merk_proof( - &path.as_slice().into(), - &subtree, - &query.query.query, - (*current_limit, *current_offset), - ProofTokenType::SizedMerk, - proofs, - is_verbose, - path.iter().last().unwrap_or(&Default::default()) + self.query_raw( + path_query, + false, + prove_options.decrease_limit_on_empty_sub_query_result, + false, + QueryResultType::QueryPathKeyElementTrioResultType, + None ) - ); - - // update limit and offset values - *current_limit = limit_offset.0; - *current_offset = limit_offset.1; - } else { - reduce_limit_and_offset_by(current_limit, current_offset, to_add_to_result_set); - } + ) + .0; - Ok(()).wrap_with_cost(cost) - } + println!("values are {}", values); - /// Given a path, construct and append a set of proofs that shows there is - /// a valid path from the root of the db to that point. - fn prove_path( - &self, - proof_result: &mut Vec, - path_slices: Vec<&[u8]>, - is_verbose: bool, - ) -> CostResult<(), Error> { - let mut cost = OperationCost::default(); - - // generate proof to show that the path leads up to the root - let mut split_path = path_slices.split_last(); - while let Some((key, path_slice)) = split_path { - let subtree = cost_return_on_error!( + let precomputed_result_map = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(path_slice.into(), None) - ); - let mut query = Query::new(); - query.insert_key(key.to_vec()); - - cost_return_on_error!( - &mut cost, - self.generate_and_store_merk_proof( - &path_slice.into(), - &subtree, - &query, - (None, None), - ProofTokenType::Merk, - proof_result, - is_verbose, - path_slice.iter().last().unwrap_or(&Default::default()) + self.query_raw( + path_query, + false, + prove_options.decrease_limit_on_empty_sub_query_result, + false, + QueryResultType::QueryPathKeyElementTrioResultType, + None ) - ); - split_path = path_slice.split_last(); - } - Ok(()).wrap_with_cost(cost) - } + ) + .0 + .to_btree_map_level_results(); - /// Generates query proof given a subtree and appends the result to a proof - /// list - fn generate_and_store_merk_proof<'a, S, B>( - &self, - path: &SubtreePath, - subtree: &'a Merk, - query: &Query, - limit_offset: LimitOffset, - proof_token_type: ProofTokenType, - proofs: &mut Vec, - is_verbose: bool, - key: &[u8], - ) -> CostResult<(Option, Option), Error> - where - S: StorageContext<'a> + 'a, - B: AsRef<[u8]>, - { - if proof_token_type != ProofTokenType::Merk && proof_token_type != ProofTokenType::SizedMerk - { - return Err(Error::InvalidInput( - "expect proof type for merk proof generation to be sized or merk proof type", - )) - .wrap_with_cost(Default::default()); + println!("precomputed results are {}", precomputed_result_map); } - let mut cost = OperationCost::default(); - - // if the subtree is empty, return the EmptyTree proof op - if subtree.root_hash().unwrap() == EMPTY_TREE_HASH { - cost_return_on_error_no_add!( - &cost, - write_to_vec(proofs, &[ProofTokenType::EmptyTree.into()]) - ); - return Ok(limit_offset).wrap_with_cost(cost); - } + let mut limit = path_query.query.limit; - let mut proof_result = cost_return_on_error_no_add!( - &cost, - subtree - .prove_without_encoding(query.clone(), limit_offset.0, limit_offset.1) - .unwrap() - .map_err(|_e| Error::InternalError("failed to generate proof")) + let root_layer = cost_return_on_error!( + &mut cost, + self.prove_subqueries(vec![], path_query, &mut limit, &prove_options) ); - cost_return_on_error!(&mut cost, self.post_process_proof(path, &mut proof_result)); - - let mut proof_bytes = Vec::with_capacity(128); - encode_into(proof_result.proof.iter(), &mut proof_bytes); - - cost_return_on_error_no_add!(&cost, write_to_vec(proofs, &[proof_token_type.into()])); - - // if is verbose, write the key - if is_verbose { - cost_return_on_error_no_add!(&cost, write_slice_to_vec(proofs, key)); + Ok(GroveDBProofV0 { + root_layer, + prove_options, } - - // write the merk proof - cost_return_on_error_no_add!(&cost, write_slice_to_vec(proofs, &proof_bytes)); - - Ok((proof_result.limit, proof_result.offset)).wrap_with_cost(cost) - } - - /// Serializes a path and add it to the proof vector - fn generate_and_store_path_proof( - path: Vec<&[u8]>, - proofs: &mut Vec, - ) -> CostResult<(), Error> { - let cost = OperationCost::default(); - - cost_return_on_error_no_add!( - &cost, - write_to_vec(proofs, &[ProofTokenType::PathInfo.into()]) - ); - - cost_return_on_error_no_add!(&cost, write_slice_of_slice_to_slice(proofs, &path)); - - Ok(()).wrap_with_cost(cost) + .into()) + .wrap_with_cost(cost) } - fn generate_and_store_absent_path_proof( + /// Perform a pre-order traversal of the tree based on the provided + /// subqueries + fn prove_subqueries( &self, - path_slices: &[&[u8]], - proof_result: &mut Vec, - is_verbose: bool, - ) -> CostResult<(), Error> { + path: Vec<&[u8]>, + path_query: &PathQuery, + overall_limit: &mut Option, + prove_options: &ProveOptions, + ) -> CostResult { let mut cost = OperationCost::default(); - cost_return_on_error_no_add!( + let query = cost_return_on_error_no_add!( &cost, - write_to_vec(proof_result, &[ProofTokenType::AbsentPath.into()]) + path_query + .query_items_at_path(path.as_slice()) + .ok_or(Error::CorruptedPath(format!( + "prove subqueries: path {} should be part of path_query {}", + path.iter() + .map(|a| hex_to_ascii(a)) + .collect::>() + .join("/"), + path_query + ))) ); - let mut current_path: Vec<&[u8]> = vec![]; - let mut split_path = path_slices.split_first(); - while let Some((key, path_slice)) = split_path { - let subtree = self - .open_non_transactional_merk_at_path(current_path.as_slice().into(), None) - .unwrap_add_cost(&mut cost); + let subtree = cost_return_on_error!( + &mut cost, + self.open_non_transactional_merk_at_path(path.as_slice().into(), None) + ); - let Ok(subtree) = subtree else { - break; - }; + let limit = if path.len() < path_query.path.len() { + // There is no need for a limit because we are only asking for a single item + None + } else { + *overall_limit + }; - let has_item = Element::get(&subtree, key, true).unwrap_add_cost(&mut cost); + let mut merk_proof = cost_return_on_error!( + &mut cost, + self.generate_merk_proof(&subtree, &query.items, query.left_to_right, limit) + ); - let mut next_key_query = Query::new(); - next_key_query.insert_key(key.to_vec()); - cost_return_on_error!( - &mut cost, - self.generate_and_store_merk_proof( - ¤t_path.as_slice().into(), - &subtree, - &next_key_query, - (None, None), - ProofTokenType::Merk, - proof_result, - is_verbose, - current_path.iter().last().unwrap_or(&(&[][..])) - ) + #[cfg(feature = "proof_debug")] + { + println!( + "generated merk proof at level path level [{}], limit is {:?}, {}", + path.iter() + .map(|a| hex_to_ascii(a)) + .collect::>() + .join("/"), + overall_limit, + if query.left_to_right { + "left to right" + } else { + "right to left" + } ); - - current_path.push(key); - - if has_item.is_err() || path_slice.is_empty() { - // reached last key - break; - } - - split_path = path_slice.split_first(); } - Ok(()).wrap_with_cost(cost) - } + let mut lower_layers = BTreeMap::new(); - /// Converts Items to Node::KV from Node::KVValueHash - /// Converts References to Node::KVRefValueHash and sets the value to the - /// referenced element - fn post_process_proof>( - &self, - path: &SubtreePath, - proof_result: &mut ProofWithoutEncodingResult, - ) -> CostResult<(), Error> { - let mut cost = OperationCost::default(); + let mut has_a_result_at_level = false; + let mut done_with_results = false; - for op in proof_result.proof.iter_mut() { + for op in merk_proof.proof.iter_mut() { + done_with_results |= overall_limit == &Some(0); match op { Op::Push(node) | Op::PushInverted(node) => match node { - Node::KV(key, value) | Node::KVValueHash(key, value, ..) => { + Node::KV(key, value) | Node::KVValueHash(key, value, ..) + if !done_with_results => + { let elem = Element::deserialize(value); match elem { Ok(Element::Reference(reference_path, ..)) => { @@ -665,11 +257,82 @@ impl GroveDb { key.to_owned(), serialized_referenced_elem.expect("confirmed ok above"), value_hash(value).unwrap_add_cost(&mut cost), - ) + ); + if let Some(limit) = overall_limit.as_mut() { + *limit -= 1; + } + has_a_result_at_level |= true; + } + Ok(Element::Item(..)) if !done_with_results => { + #[cfg(feature = "proof_debug")] + { + println!("found {}", hex_to_ascii(key)); + } + *node = Node::KV(key.to_owned(), value.to_owned()); + if let Some(limit) = overall_limit.as_mut() { + *limit -= 1; + } + has_a_result_at_level |= true; + } + Ok(Element::Tree(Some(_), _)) | Ok(Element::SumTree(Some(_), ..)) + if !done_with_results + && query.has_subquery_or_matching_in_path_on_key(key) => + { + #[cfg(feature = "proof_debug")] + { + println!( + "found tree {}, query is {}", + hex_to_ascii(key), + query + ); + } + // We only want to check in sub nodes for the proof if the tree has + // elements + let mut lower_path = path.clone(); + lower_path.push(key.as_slice()); + + let previous_limit = *overall_limit; + + let layer_proof = cost_return_on_error!( + &mut cost, + self.prove_subqueries( + lower_path, + path_query, + overall_limit, + prove_options, + ) + ); + + if previous_limit != *overall_limit { + // a lower layer updated the limit, don't subtract 1 at this + // level + has_a_result_at_level |= true; + } + lower_layers.insert(key.clone(), layer_proof); } - Ok(Element::Item(..)) => { - *node = Node::KV(key.to_owned(), value.to_owned()) + + Ok(Element::Tree(..)) | Ok(Element::SumTree(..)) + if !done_with_results => + { + #[cfg(feature = "proof_debug")] + { + println!( + "found tree {}, no subquery query is {:?}", + hex_to_ascii(key), + query + ); + } + if let Some(limit) = overall_limit.as_mut() { + *limit -= 1; + } + has_a_result_at_level |= true; } + // todo: transform the unused trees into a Hash or KVHash to make proof + // smaller Ok(Element::Tree(..)) if + // done_with_results => { *node = + // Node::Hash() // we are done with the + // results, we can modify the proof to alter + // } _ => continue, } } @@ -678,236 +341,61 @@ impl GroveDb { _ => continue, } } - Ok(()).wrap_with_cost(cost) - } -} - -#[cfg(test)] -mod tests { - use grovedb_merk::{execute_proof, proofs::Query}; - use grovedb_storage::StorageBatch; - - use crate::{ - operations::proof::util::{ProofReader, ProofTokenType}, - tests::{common::EMPTY_PATH, make_deep_tree, TEST_LEAF}, - GroveDb, - }; - - #[test] - fn test_path_info_encoding_and_decoding() { - let path = vec![b"a".as_slice(), b"b".as_slice(), b"c".as_slice()]; - let mut proof_vector = vec![]; - GroveDb::generate_and_store_path_proof(path.clone(), &mut proof_vector) - .unwrap() - .unwrap(); - - let mut proof_reader = ProofReader::new(proof_vector.as_slice()); - let decoded_path = proof_reader.read_path_info().unwrap(); - - assert_eq!(path, decoded_path); - } - - #[test] - fn test_reading_of_verbose_proofs() { - let db = make_deep_tree(); - let path = vec![TEST_LEAF, b"innertree"]; - let mut query = Query::new(); - query.insert_all(); + if !has_a_result_at_level + && !done_with_results + && prove_options.decrease_limit_on_empty_sub_query_result + { + #[cfg(feature = "proof_debug")] + { + println!( + "no results at level {}", + path.iter() + .map(|a| hex_to_ascii(a)) + .collect::>() + .join("/") + ); + } + if let Some(limit) = overall_limit.as_mut() { + *limit -= 1; + } + } - let batch = StorageBatch::new(); + let mut serialized_merk_proof = Vec::with_capacity(1024); + encode_into(merk_proof.proof.iter(), &mut serialized_merk_proof); - let merk = db - .open_non_transactional_merk_at_path( - [TEST_LEAF, b"innertree"].as_ref().into(), - Some(&batch), - ) - .unwrap() - .unwrap(); - let expected_root_hash = merk.root_hash().unwrap(); - - let mut proof = vec![]; - db.generate_and_store_merk_proof( - &path.as_slice().into(), - &merk, - &query, - (None, None), - ProofTokenType::Merk, - &mut proof, - true, - b"innertree", - ) - .unwrap() - .unwrap(); - assert_ne!(proof.len(), 0); - - let mut proof_reader = ProofReader::new(&proof); - let (proof_token_type, proof, key) = proof_reader.read_verbose_proof().unwrap(); - - assert_eq!(proof_token_type, ProofTokenType::Merk); - assert_eq!(key, Some(b"innertree".to_vec())); - - let (root_hash, result_set) = execute_proof(&proof, &query, None, None, true) - .unwrap() - .unwrap(); - assert_eq!(root_hash, expected_root_hash); - assert_eq!(result_set.result_set.len(), 3); - - // what is the key is empty?? - let merk = db - .open_non_transactional_merk_at_path(EMPTY_PATH, Some(&batch)) - .unwrap() - .unwrap(); - let expected_root_hash = merk.root_hash().unwrap(); - - let mut proof = vec![]; - db.generate_and_store_merk_proof( - &EMPTY_PATH, - &merk, - &query, - (None, None), - ProofTokenType::Merk, - &mut proof, - true, - &[], - ) - .unwrap() - .unwrap(); - assert_ne!(proof.len(), 0); - - let mut proof_reader = ProofReader::new(&proof); - let (proof_token_type, proof, key) = proof_reader.read_verbose_proof().unwrap(); - - assert_eq!(proof_token_type, ProofTokenType::Merk); - assert_eq!(key, Some(vec![])); - - let (root_hash, result_set) = execute_proof(&proof, &query, None, None, true) - .unwrap() - .unwrap(); - assert_eq!(root_hash, expected_root_hash); - assert_eq!(result_set.result_set.len(), 3); + Ok(LayerProof { + merk_proof: serialized_merk_proof, + lower_layers, + }) + .wrap_with_cost(cost) } - #[test] - fn test_reading_verbose_proof_at_key() { - // going to generate an array of multiple proofs with different keys - let db = make_deep_tree(); - let mut proofs = vec![]; - - let mut query = Query::new(); - query.insert_all(); - - // insert all under inner tree - let path = vec![TEST_LEAF, b"innertree"]; - - let batch = StorageBatch::new(); - - let merk = db - .open_non_transactional_merk_at_path(path.as_slice().into(), Some(&batch)) - .unwrap() - .unwrap(); - let inner_tree_root_hash = merk.root_hash().unwrap(); - db.generate_and_store_merk_proof( - &path.as_slice().into(), - &merk, - &query, - (None, None), - ProofTokenType::Merk, - &mut proofs, - true, - path.iter().last().unwrap_or(&(&[][..])), - ) - .unwrap() - .unwrap(); - - // insert all under innertree4 - let path = vec![TEST_LEAF, b"innertree4"]; - let merk = db - .open_non_transactional_merk_at_path(path.as_slice().into(), Some(&batch)) - .unwrap() - .unwrap(); - let inner_tree_4_root_hash = merk.root_hash().unwrap(); - db.generate_and_store_merk_proof( - &path.as_slice().into(), - &merk, - &query, - (None, None), - ProofTokenType::Merk, - &mut proofs, - true, - path.iter().last().unwrap_or(&(&[][..])), - ) - .unwrap() - .unwrap(); - - // insert all for deeper_1 - let path: Vec<&[u8]> = vec![b"deep_leaf", b"deep_node_1", b"deeper_1"]; - let merk = db - .open_non_transactional_merk_at_path(path.as_slice().into(), Some(&batch)) - .unwrap() - .unwrap(); - let deeper_1_root_hash = merk.root_hash().unwrap(); - db.generate_and_store_merk_proof( - &path.as_slice().into(), - &merk, - &query, - (None, None), - ProofTokenType::Merk, - &mut proofs, - true, - path.iter().last().unwrap_or(&(&[][..])), - ) - .unwrap() - .unwrap(); - - // read the proof at innertree - let contextual_proof = proofs.clone(); - let mut proof_reader = ProofReader::new(&contextual_proof); - let (proof_token_type, proof) = proof_reader - .read_verbose_proof_at_key(b"innertree") - .unwrap(); - - assert_eq!(proof_token_type, ProofTokenType::Merk); - - let (root_hash, result_set) = execute_proof(&proof, &query, None, None, true) - .unwrap() - .unwrap(); - assert_eq!(root_hash, inner_tree_root_hash); - assert_eq!(result_set.result_set.len(), 3); - - // read the proof at innertree4 - let contextual_proof = proofs.clone(); - let mut proof_reader = ProofReader::new(&contextual_proof); - let (proof_token_type, proof) = proof_reader - .read_verbose_proof_at_key(b"innertree4") - .unwrap(); - - assert_eq!(proof_token_type, ProofTokenType::Merk); - - let (root_hash, result_set) = execute_proof(&proof, &query, None, None, true) - .unwrap() - .unwrap(); - assert_eq!(root_hash, inner_tree_4_root_hash); - assert_eq!(result_set.result_set.len(), 2); - - // read the proof at deeper_1 - let contextual_proof = proofs.clone(); - let mut proof_reader = ProofReader::new(&contextual_proof); - let (proof_token_type, proof) = - proof_reader.read_verbose_proof_at_key(b"deeper_1").unwrap(); - - assert_eq!(proof_token_type, ProofTokenType::Merk); - - let (root_hash, result_set) = execute_proof(&proof, &query, None, None, true) - .unwrap() - .unwrap(); - assert_eq!(root_hash, deeper_1_root_hash); - assert_eq!(result_set.result_set.len(), 3); - - // read the proof at an invalid key - let contextual_proof = proofs.clone(); - let mut proof_reader = ProofReader::new(&contextual_proof); - let reading_result = proof_reader.read_verbose_proof_at_key(b"unknown_key"); - assert!(reading_result.is_err()) + /// Generates query proof given a subtree and appends the result to a proof + /// list + fn generate_merk_proof<'a, S>( + &self, + subtree: &'a Merk, + query_items: &[QueryItem], + left_to_right: bool, + limit: Option, + ) -> CostResult + where + S: StorageContext<'a> + 'a, + { + subtree + .prove_unchecked_query_items(query_items, limit, left_to_right) + .map_ok(|(proof, limit)| ProofWithoutEncodingResult::new(proof, limit)) + .map_err(|e| { + Error::InternalError(format!( + "failed to generate proof for query_items [{}] error is : {}", + query_items + .iter() + .map(|e| e.to_string()) + .collect::>() + .join(", "), + e + )) + }) } } diff --git a/grovedb/src/operations/proof/mod.rs b/grovedb/src/operations/proof/mod.rs new file mode 100644 index 00000000..88243d59 --- /dev/null +++ b/grovedb/src/operations/proof/mod.rs @@ -0,0 +1,165 @@ +//! Proof operations + +#[cfg(feature = "full")] +mod generate; +pub mod util; +mod verify; + +use std::{collections::BTreeMap, fmt}; + +use bincode::{Decode, Encode}; +use derive_more::From; +use grovedb_merk::proofs::{query::Key, Decoder, Node, Op}; + +use crate::operations::proof::util::{element_hex_to_ascii, hex_to_ascii}; + +#[derive(Debug, Clone, Copy, Encode, Decode)] +pub struct ProveOptions { + /// This tells the proof system to decrease the available limit of the query + /// by 1 in the case of empty subtrees. Generally this should be set to + /// true. The case where this could be set to false is if there is a + /// known structure where we know that there are only a few empty + /// subtrees. + /// + /// !!! Warning !!! Be very careful: + /// If this is set to `false` then you must be sure that the sub queries do + /// not match many trees, Otherwise you could crash the system as the + /// proof system goes through millions of subtrees and eventually runs + /// out of memory + pub decrease_limit_on_empty_sub_query_result: bool, +} + +impl fmt::Display for ProveOptions { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "ProveOptions {{ decrease_limit_on_empty_sub_query_result: {} }}", + self.decrease_limit_on_empty_sub_query_result + ) + } +} + +impl Default for ProveOptions { + fn default() -> Self { + ProveOptions { + decrease_limit_on_empty_sub_query_result: true, + } + } +} + +#[derive(Encode, Decode)] +pub struct LayerProof { + pub merk_proof: Vec, + pub lower_layers: BTreeMap, +} + +#[derive(Encode, Decode, From)] +pub enum GroveDBProof { + V0(GroveDBProofV0), +} + +#[derive(Encode, Decode)] +pub struct GroveDBProofV0 { + pub root_layer: LayerProof, + pub prove_options: ProveOptions, +} + +impl fmt::Display for LayerProof { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "LayerProof {{")?; + writeln!(f, " merk_proof: {}", decode_merk_proof(&self.merk_proof))?; + if !self.lower_layers.is_empty() { + writeln!(f, " lower_layers: {{")?; + for (key, layer_proof) in &self.lower_layers { + writeln!(f, " {} => {{", hex_to_ascii(key))?; + for line in format!("{}", layer_proof).lines() { + writeln!(f, " {}", line)?; + } + writeln!(f, " }}")?; + } + writeln!(f, " }}")?; + } + write!(f, "}}") + } +} + +impl fmt::Display for GroveDBProof { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + GroveDBProof::V0(proof) => write!(f, "{}", proof), + } + } +} + +impl fmt::Display for GroveDBProofV0 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "GroveDBProofV0 {{")?; + for line in format!("{}", self.root_layer).lines() { + writeln!(f, " {}", line)?; + } + write!(f, "}}") + } +} + +fn decode_merk_proof(proof: &[u8]) -> String { + let mut result = String::new(); + let ops = Decoder::new(proof); + + for (i, op) in ops.enumerate() { + match op { + Ok(op) => { + result.push_str(&format!("\n {}: {}", i, op_to_string(&op))); + } + Err(e) => { + result.push_str(&format!("\n {}: Error decoding op: {}", i, e)); + } + } + } + + result +} + +fn op_to_string(op: &Op) -> String { + match op { + Op::Push(node) => format!("Push({})", node_to_string(node)), + Op::PushInverted(node) => format!("PushInverted({})", node_to_string(node)), + Op::Parent => "Parent".to_string(), + Op::Child => "Child".to_string(), + Op::ParentInverted => "ParentInverted".to_string(), + Op::ChildInverted => "ChildInverted".to_string(), + } +} + +fn node_to_string(node: &Node) -> String { + match node { + Node::Hash(hash) => format!("Hash(HASH[{}])", hex::encode(hash)), + Node::KVHash(kv_hash) => format!("KVHash(HASH[{}])", hex::encode(kv_hash)), + Node::KV(key, value) => { + format!("KV({}, {})", hex_to_ascii(key), element_hex_to_ascii(value)) + } + Node::KVValueHash(key, value, value_hash) => format!( + "KVValueHash({}, {}, HASH[{}])", + hex_to_ascii(key), + element_hex_to_ascii(value), + hex::encode(value_hash) + ), + Node::KVDigest(key, value_hash) => format!( + "KVDigest({}, HASH[{}])", + hex_to_ascii(key), + hex::encode(value_hash) + ), + Node::KVRefValueHash(key, value, value_hash) => format!( + "KVRefValueHash({}, {}, HASH[{}])", + hex_to_ascii(key), + element_hex_to_ascii(value), + hex::encode(value_hash) + ), + Node::KVValueHashFeatureType(key, value, value_hash, feature_type) => format!( + "KVValueHashFeatureType({}, {}, HASH[{}], {:?})", + hex_to_ascii(key), + element_hex_to_ascii(value), + hex::encode(value_hash), + feature_type + ), + } +} diff --git a/grovedb/src/operations/proof/util.rs b/grovedb/src/operations/proof/util.rs index 82e8c585..50480c13 100644 --- a/grovedb/src/operations/proof/util.rs +++ b/grovedb/src/operations/proof/util.rs @@ -1,412 +1,165 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - use std::fmt; -#[cfg(any(feature = "full", feature = "verify"))] -use std::io::Read; -#[cfg(feature = "full")] -use std::io::Write; use grovedb_merk::{ - proofs::query::{Key, Path, ProvedKeyValue}, - CryptoHash, + proofs::query::{Key, Path, ProvedKeyOptionalValue, ProvedKeyValue}, + CryptoHash, Error, }; -#[cfg(any(feature = "full", feature = "verify"))] -use integer_encoding::{VarInt, VarIntReader}; -#[cfg(any(feature = "full", feature = "verify"))] -use crate::Error; -use crate::{operations::proof::verify::ProvedKeyValues, reference_path::ReferencePathType}; +use crate::Element; #[cfg(any(feature = "full", feature = "verify"))] -pub const EMPTY_TREE_HASH: [u8; 32] = [0; 32]; - -pub type ProofTokenInfo = (ProofTokenType, Vec, Option>); +pub type ProvedKeyValues = Vec; #[cfg(any(feature = "full", feature = "verify"))] -#[derive(Debug, PartialEq, Eq)] -/// Proof type -// TODO: there might be a better name for this -pub enum ProofTokenType { - Merk, - SizedMerk, - EmptyTree, - AbsentPath, - PathInfo, - Invalid, -} +pub type ProvedKeyOptionalValues = Vec; #[cfg(any(feature = "full", feature = "verify"))] -impl fmt::Display for ProofTokenType { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let variant_str = match self { - ProofTokenType::Merk => "Merk", - ProofTokenType::SizedMerk => "SizedMerk", - ProofTokenType::EmptyTree => "EmptyTree", - ProofTokenType::AbsentPath => "AbsentPath", - ProofTokenType::PathInfo => "PathInfo", - ProofTokenType::Invalid => "Invalid", - }; - write!(f, "{}", variant_str) - } -} +pub type ProvedPathKeyValues = Vec; #[cfg(any(feature = "full", feature = "verify"))] -impl From for u8 { - fn from(proof_token_type: ProofTokenType) -> Self { - match proof_token_type { - ProofTokenType::Merk => 0x01, - ProofTokenType::SizedMerk => 0x02, - ProofTokenType::EmptyTree => 0x04, - ProofTokenType::AbsentPath => 0x05, - ProofTokenType::PathInfo => 0x06, - ProofTokenType::Invalid => 0x10, - } - } -} +pub type ProvedPathKeyOptionalValues = Vec; +/// Proved path-key-value #[cfg(any(feature = "full", feature = "verify"))] -impl From for ProofTokenType { - fn from(val: u8) -> Self { - match val { - 0x01 => ProofTokenType::Merk, - 0x02 => ProofTokenType::SizedMerk, - 0x04 => ProofTokenType::EmptyTree, - 0x05 => ProofTokenType::AbsentPath, - 0x06 => ProofTokenType::PathInfo, - _ => ProofTokenType::Invalid, - } - } +#[derive(Debug, PartialEq, Eq)] +pub struct ProvedPathKeyOptionalValue { + /// Path + pub path: Path, + /// Key + pub key: Key, + /// Value + pub value: Option>, + /// Proof + pub proof: CryptoHash, } #[cfg(any(feature = "full", feature = "verify"))] -impl ProofTokenType { - pub fn u8_to_display(val: u8) -> String { - match val { - 0x01 => "merk".to_string(), - 0x02 => "sized merk".to_string(), - 0x04 => "empty tree".to_string(), - 0x05 => "absent path".to_string(), - 0x06 => "path info".to_string(), - v => format!("invalid proof token {}", v), - } +impl fmt::Display for ProvedPathKeyOptionalValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "ProvedPathKeyValue {{")?; + writeln!( + f, + " path: [{}],", + self.path + .iter() + .map(|p| hex_to_ascii(p)) + .collect::>() + .join(", ") + )?; + writeln!(f, " key: {},", hex_to_ascii(&self.key))?; + writeln!( + f, + " value: {},", + optional_element_hex_to_ascii(self.value.as_ref()) + )?; + writeln!(f, " proof: {}", hex::encode(self.proof))?; + write!(f, "}}") } } +/// Proved path-key-value #[cfg(any(feature = "full", feature = "verify"))] -#[derive(Debug)] -// TODO: possibility for a proof writer?? -/// Proof reader -pub struct ProofReader<'a> { - proof_data: &'a [u8], - is_verbose: bool, +#[derive(Debug, PartialEq, Eq)] +pub struct ProvedPathKeyValue { + /// Path + pub path: Path, + /// Key + pub key: Key, + /// Value + pub value: Vec, + /// Proof + pub proof: CryptoHash, } #[cfg(any(feature = "full", feature = "verify"))] -impl<'a> ProofReader<'a> { - /// New proof reader - pub fn new(proof_data: &'a [u8]) -> Self { - Self { - proof_data, - is_verbose: false, - } - } - - /// New proof reader with verbose_status - pub fn new_with_verbose_status(proof_data: &'a [u8], is_verbose: bool) -> Self { - Self { - proof_data, - is_verbose, - } - } - - /// For non verbose proof read the immediate next proof, for verbose proof - /// read the first proof that matches a given key - pub fn read_next_proof(&mut self, key: &[u8]) -> Result<(ProofTokenType, Vec), Error> { - if self.is_verbose { - self.read_verbose_proof_at_key(key) - } else { - let (proof_token_type, proof, _) = self.read_proof_with_optional_type(None)?; - Ok((proof_token_type, proof)) - } - } - - /// Read the next proof, return the proof type - pub fn read_proof(&mut self) -> Result { - if self.is_verbose { - self.read_verbose_proof_with_optional_type(None) - } else { - self.read_proof_with_optional_type(None) - } - } - - /// Read verbose proof - pub fn read_verbose_proof(&mut self) -> Result { - self.read_verbose_proof_with_optional_type(None) - } - - /// Reads data from proof into slice of specific size - fn read_into_slice(&mut self, buf: &mut [u8]) -> Result { - self.proof_data - .read(buf) - .map_err(|_| Error::CorruptedData(String::from("failed to read proof data"))) - } - - /// Read varint encoded length information from proof data - fn read_length_data(&mut self) -> Result { - self.proof_data - .read_varint() - .map_err(|_| Error::InvalidProof("expected length data".to_string())) - } - - /// Read proof with optional type - pub fn read_proof_with_optional_type( - &mut self, - expected_data_type_option: Option, - ) -> Result { - let (proof_token_type, proof, _) = - self.read_proof_internal_with_optional_type(expected_data_type_option, false)?; - Ok((proof_token_type, proof, None)) +impl fmt::Display for ProvedPathKeyValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "ProvedPathKeyValue {{")?; + writeln!( + f, + " path: [{}],", + self.path + .iter() + .map(|p| hex_to_ascii(p)) + .collect::>() + .join(", ") + )?; + writeln!(f, " key: {},", hex_to_ascii(&self.key))?; + writeln!(f, " value: {},", element_hex_to_ascii(self.value.as_ref()))?; + writeln!(f, " proof: {}", hex::encode(self.proof))?; + write!(f, "}}") } +} - /// Read verbose proof with optional type - pub fn read_verbose_proof_with_optional_type( - &mut self, - expected_data_type_option: Option, - ) -> Result { - let (proof_token_type, proof, key) = - self.read_proof_internal_with_optional_type(expected_data_type_option, true)?; - Ok(( - proof_token_type, +impl From for ProvedPathKeyOptionalValue { + fn from(value: ProvedPathKeyValue) -> Self { + let ProvedPathKeyValue { + path, + key, + value, proof, - Some(key.ok_or(Error::InvalidProof( - "key must exist for verbose merk proofs".to_string(), - ))?), - )) - } + } = value; - /// Read verbose proof at key - /// Returns an error if it can't find a proof for that key - pub fn read_verbose_proof_at_key( - &mut self, - expected_key: &[u8], - ) -> Result<(ProofTokenType, Vec), Error> { - let (proof_token_type, proof, _) = loop { - let (proof_token_type, proof, key) = self.read_verbose_proof()?; - let key = key.expect("read_verbose_proof enforces that this exists"); - if key.as_slice() == expected_key { - break (proof_token_type, proof, key); - } - }; - - Ok((proof_token_type, proof)) - } - - /// Read proof with optional type - pub fn read_proof_internal_with_optional_type( - &mut self, - expected_data_type_option: Option, - is_verbose: bool, - ) -> Result { - let mut data_type = [0; 1]; - self.read_into_slice(&mut data_type)?; - - if let Some(expected_data_type) = expected_data_type_option { - if data_type[0] != expected_data_type { - return Err(Error::InvalidProof(format!( - "wrong data_type, expected {}, got {}", - expected_data_type, data_type[0] - ))); - } - } - - let proof_token_type: ProofTokenType = data_type[0].into(); - - if proof_token_type == ProofTokenType::EmptyTree - || proof_token_type == ProofTokenType::AbsentPath - { - return Ok((proof_token_type, vec![], None)); - } - - let (proof, key) = if proof_token_type == ProofTokenType::Merk - || proof_token_type == ProofTokenType::SizedMerk - { - // if verbose we need to read the key first - let key = if is_verbose { - let key_length = self.read_length_data()?; - - let mut key = vec![0; key_length]; - self.read_into_slice(&mut key)?; - - Some(key) - } else { - None - }; - - let proof_length = self.read_length_data()?; - - let mut proof = vec![0; proof_length]; - self.read_into_slice(&mut proof)?; - - (proof, key) - } else { - return Err(Error::InvalidProof( - "expected merk or sized merk proof".to_string(), - )); - }; - - Ok((proof_token_type, proof, key)) - } - - /// Reads path information from the proof vector - pub fn read_path_info(&mut self) -> Result>, Error> { - let mut data_type = [0; 1]; - self.read_into_slice(&mut data_type)?; - - if data_type != [Into::::into(ProofTokenType::PathInfo)] { - return Err(Error::InvalidProof(format!( - "wrong data_type, expected path_info, got {}", - ProofTokenType::u8_to_display(data_type[0]) - ))); - } - - let mut path = vec![]; - let path_slice_len = self.read_length_data()?; - - for _ in 0..path_slice_len { - let path_len = self.read_length_data()?; - let mut path_value = vec![0; path_len]; - self.read_into_slice(&mut path_value)?; - path.push(path_value); + ProvedPathKeyOptionalValue { + path, + key, + value: Some(value), + proof, } - - Ok(path) } } -#[cfg(feature = "full")] -/// Write to vec -// TODO: this can error out handle the error -pub fn write_to_vec(dest: &mut W, value: &[u8]) -> Result<(), Error> { - dest.write_all(value) - .map_err(|_e| Error::InternalError("failed to write to vector")) -} +impl TryFrom for ProvedPathKeyValue { + type Error = Error; -#[cfg(feature = "full")] -/// Write a slice to the vector, first write the length of the slice -pub fn write_slice_to_vec(dest: &mut W, value: &[u8]) -> Result<(), Error> { - write_to_vec(dest, value.len().encode_var_vec().as_slice())?; - write_to_vec(dest, value)?; - Ok(()) -} - -#[cfg(feature = "full")] -/// Write a slice of a slice to a flat vector:w -pub fn write_slice_of_slice_to_slice(dest: &mut W, value: &[&[u8]]) -> Result<(), Error> { - // write the number of slices we are about to write - write_to_vec(dest, value.len().encode_var_vec().as_slice())?; - for inner_slice in value { - write_slice_to_vec(dest, inner_slice)?; + fn try_from(value: ProvedPathKeyOptionalValue) -> Result { + let ProvedPathKeyOptionalValue { + path, + key, + value, + proof, + } = value; + let value = value.ok_or(Error::InvalidProofError(format!( + "expected {}", + hex_to_ascii(&key) + )))?; + Ok(ProvedPathKeyValue { + path, + key, + value, + proof, + }) } - Ok(()) } -#[cfg(any(feature = "full", feature = "verify"))] -pub fn reduce_limit_and_offset_by( - limit: &mut Option, - offset: &mut Option, - n: u16, -) -> bool { - let mut skip_limit = false; - let mut n = n; - - if let Some(offset_value) = *offset { - if offset_value > 0 { - if offset_value >= n { - *offset = Some(offset_value - n); - n = 0; - } else { - *offset = Some(0); - n -= offset_value; - } - skip_limit = true; - } - } - - if let Some(limit_value) = *limit { - if !skip_limit && limit_value > 0 { - if limit_value >= n { - *limit = Some(limit_value - n); - } else { - *limit = Some(0); - } +impl ProvedPathKeyValue { + // TODO: make path a reference + /// Consumes the ProvedKeyValue and returns a ProvedPathKeyValue given a + /// Path + pub fn from_proved_key_value(path: Path, proved_key_value: ProvedKeyValue) -> Self { + Self { + path, + key: proved_key_value.key, + value: proved_key_value.value, + proof: proved_key_value.proof, } } - skip_limit -} - -pub fn increase_limit_and_offset_by( - limit: &mut Option, - offset: &mut Option, - limit_inc: u16, - offset_inc: u16, -) { - if let Some(offset_value) = *offset { - *offset = Some(offset_value + offset_inc); - } - if let Some(limit_value) = *limit { - *limit = Some(limit_value + limit_inc); + /// Transforms multiple ProvedKeyValues to their equivalent + /// ProvedPathKeyValue given a Path + pub fn from_proved_key_values(path: Path, proved_key_values: ProvedKeyValues) -> Vec { + proved_key_values + .into_iter() + .map(|pkv| Self::from_proved_key_value(path.clone(), pkv)) + .collect() } } -/// Proved path-key-values -pub type ProvedPathKeyValues = Vec; - -/// Proved path-key-value -#[cfg(any(feature = "full", feature = "verify"))] -#[derive(Debug, PartialEq, Eq)] -pub struct ProvedPathKeyValue { - /// Path - pub path: Path, - /// Key - pub key: Key, - /// Value - pub value: Vec, - /// Proof - pub proof: CryptoHash, -} - -impl ProvedPathKeyValue { +impl ProvedPathKeyOptionalValue { // TODO: make path a reference /// Consumes the ProvedKeyValue and returns a ProvedPathKeyValue given a /// Path - pub fn from_proved_key_value(path: Path, proved_key_value: ProvedKeyValue) -> Self { + pub fn from_proved_key_value(path: Path, proved_key_value: ProvedKeyOptionalValue) -> Self { Self { path, key: proved_key_value.key, @@ -417,7 +170,10 @@ impl ProvedPathKeyValue { /// Transforms multiple ProvedKeyValues to their equivalent /// ProvedPathKeyValue given a Path - pub fn from_proved_key_values(path: Path, proved_key_values: ProvedKeyValues) -> Vec { + pub fn from_proved_key_values( + path: Path, + proved_key_values: ProvedKeyOptionalValues, + ) -> Vec { proved_key_values .into_iter() .map(|pkv| Self::from_proved_key_value(path.clone(), pkv)) @@ -427,46 +183,26 @@ impl ProvedPathKeyValue { #[cfg(test)] mod tests { - use grovedb_merk::proofs::query::ProvedKeyValue; - - use crate::operations::proof::util::{ProofTokenType, ProvedPathKeyValue}; + use grovedb_merk::proofs::query::ProvedKeyOptionalValue; - #[test] - fn test_proof_token_type_encoding() { - assert_eq!(0x01_u8, Into::::into(ProofTokenType::Merk)); - assert_eq!(0x02_u8, Into::::into(ProofTokenType::SizedMerk)); - assert_eq!(0x04_u8, Into::::into(ProofTokenType::EmptyTree)); - assert_eq!(0x05_u8, Into::::into(ProofTokenType::AbsentPath)); - assert_eq!(0x06_u8, Into::::into(ProofTokenType::PathInfo)); - assert_eq!(0x10_u8, Into::::into(ProofTokenType::Invalid)); - } - - #[test] - fn test_proof_token_type_decoding() { - assert_eq!(ProofTokenType::Merk, 0x01_u8.into()); - assert_eq!(ProofTokenType::SizedMerk, 0x02_u8.into()); - assert_eq!(ProofTokenType::EmptyTree, 0x04_u8.into()); - assert_eq!(ProofTokenType::AbsentPath, 0x05_u8.into()); - assert_eq!(ProofTokenType::PathInfo, 0x06_u8.into()); - assert_eq!(ProofTokenType::Invalid, 0x10_u8.into()); - } + use crate::operations::proof::util::ProvedPathKeyOptionalValue; #[test] fn test_proved_path_from_single_proved_key_value() { let path = vec![b"1".to_vec(), b"2".to_vec()]; - let proved_key_value = ProvedKeyValue { + let proved_key_value = ProvedKeyOptionalValue { key: b"a".to_vec(), - value: vec![5, 6], + value: Some(vec![5, 6]), proof: [0; 32], }; let proved_path_key_value = - ProvedPathKeyValue::from_proved_key_value(path.clone(), proved_key_value); + ProvedPathKeyOptionalValue::from_proved_key_value(path.clone(), proved_key_value); assert_eq!( proved_path_key_value, - ProvedPathKeyValue { + ProvedPathKeyOptionalValue { path, key: b"a".to_vec(), - value: vec![5, 6], + value: Some(vec![5, 6]), proof: [0; 32] } ); @@ -475,51 +211,116 @@ mod tests { #[test] fn test_many_proved_path_from_many_proved_key_value() { let path = vec![b"1".to_vec(), b"2".to_vec()]; - let proved_key_value_a = ProvedKeyValue { + let proved_key_value_a = ProvedKeyOptionalValue { key: b"a".to_vec(), - value: vec![5, 6], + value: Some(vec![5, 6]), proof: [0; 32], }; - let proved_key_value_b = ProvedKeyValue { + let proved_key_value_b = ProvedKeyOptionalValue { key: b"b".to_vec(), - value: vec![5, 7], + value: Some(vec![5, 7]), proof: [1; 32], }; - let proved_key_value_c = ProvedKeyValue { + let proved_key_value_c = ProvedKeyOptionalValue { key: b"c".to_vec(), - value: vec![6, 7], + value: Some(vec![6, 7]), proof: [2; 32], }; - let proved_key_values = vec![proved_key_value_a, proved_key_value_b, proved_key_value_c]; + let proved_key_value_d = ProvedKeyOptionalValue { + key: b"d".to_vec(), + value: None, + proof: [2; 32], + }; + let proved_key_values = vec![ + proved_key_value_a, + proved_key_value_b, + proved_key_value_c, + proved_key_value_d, + ]; let proved_path_key_values = - ProvedPathKeyValue::from_proved_key_values(path.clone(), proved_key_values); - assert_eq!(proved_path_key_values.len(), 3); + ProvedPathKeyOptionalValue::from_proved_key_values(path.clone(), proved_key_values); + assert_eq!(proved_path_key_values.len(), 4); assert_eq!( proved_path_key_values[0], - ProvedPathKeyValue { + ProvedPathKeyOptionalValue { path: path.clone(), key: b"a".to_vec(), - value: vec![5, 6], + value: Some(vec![5, 6]), proof: [0; 32] } ); assert_eq!( proved_path_key_values[1], - ProvedPathKeyValue { + ProvedPathKeyOptionalValue { path: path.clone(), key: b"b".to_vec(), - value: vec![5, 7], + value: Some(vec![5, 7]), proof: [1; 32] } ); assert_eq!( proved_path_key_values[2], - ProvedPathKeyValue { - path, + ProvedPathKeyOptionalValue { + path: path.clone(), key: b"c".to_vec(), - value: vec![6, 7], + value: Some(vec![6, 7]), + proof: [2; 32] + } + ); + + assert_eq!( + proved_path_key_values[3], + ProvedPathKeyOptionalValue { + path, + key: b"d".to_vec(), + value: None, proof: [2; 32] } ); } } + +pub fn hex_to_ascii(hex_value: &[u8]) -> String { + // Define the set of allowed characters + const ALLOWED_CHARS: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\ + abcdefghijklmnopqrstuvwxyz\ + 0123456789_-/\\[]@"; + + // Check if all characters in hex_value are allowed + if hex_value.iter().all(|&c| ALLOWED_CHARS.contains(&c)) { + // Try to convert to UTF-8 + String::from_utf8(hex_value.to_vec()) + .unwrap_or_else(|_| format!("0x{}", hex::encode(hex_value))) + } else { + // Hex encode and prepend "0x" + format!("0x{}", hex::encode(hex_value)) + } +} + +pub fn path_hex_to_ascii(path: &Path) -> String { + path.iter() + .map(|e| hex_to_ascii(e.as_slice())) + .collect::>() + .join("/") +} + +pub fn path_as_slices_hex_to_ascii(path: &[&[u8]]) -> String { + path.into_iter() + .map(|e| hex_to_ascii(e)) + .collect::>() + .join("/") +} +pub fn optional_element_hex_to_ascii(hex_value: Option<&Vec>) -> String { + match hex_value { + None => "None".to_string(), + Some(hex_value) => Element::deserialize(hex_value) + .map(|e| e.to_string()) + .unwrap_or_else(|_| hex::encode(hex_value)), + } +} + +pub fn element_hex_to_ascii(hex_value: &[u8]) -> String { + Element::deserialize(hex_value) + .map(|e| e.to_string()) + .unwrap_or_else(|_| hex::encode(hex_value)) +} diff --git a/grovedb/src/operations/proof/verify.rs b/grovedb/src/operations/proof/verify.rs index 7a347c15..4e0375e3 100644 --- a/grovedb/src/operations/proof/verify.rs +++ b/grovedb/src/operations/proof/verify.rs @@ -1,200 +1,408 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Verify proof operations - -use std::{borrow::Cow, collections::BTreeMap}; - -use grovedb_merk::proofs::query::PathKey; -#[cfg(any(feature = "full", feature = "verify"))] -pub use grovedb_merk::proofs::query::{Path, ProvedKeyValue}; -#[cfg(any(feature = "full", feature = "verify"))] +use std::collections::{BTreeMap, BTreeSet}; + use grovedb_merk::{ - proofs::Query, - tree::{combine_hash, value_hash as value_hash_fn}, + proofs::{ + query::{PathKey, VerifyOptions}, + Query, + }, + tree::{combine_hash, value_hash}, CryptoHash, }; -use crate::{ - operations::proof::util::{ - reduce_limit_and_offset_by, ProvedPathKeyValue, ProvedPathKeyValues, - }, - query_result_type::PathKeyOptionalElementTrio, - versioning::read_and_consume_proof_version, - SizedQuery, +#[cfg(feature = "proof_debug")] +use crate::operations::proof::util::{ + hex_to_ascii, path_as_slices_hex_to_ascii, path_hex_to_ascii, }; -#[cfg(any(feature = "full", feature = "verify"))] use crate::{ - operations::proof::util::{ - ProofReader, ProofTokenType, ProofTokenType::AbsentPath, EMPTY_TREE_HASH, + operations::proof::{ + util::{ProvedPathKeyOptionalValue, ProvedPathKeyValues}, + GroveDBProof, GroveDBProofV0, LayerProof, ProveOptions, }, + query_result_type::PathKeyOptionalElementTrio, Element, Error, GroveDb, PathQuery, }; -#[cfg(any(feature = "full", feature = "verify"))] -pub type ProvedKeyValues = Vec; - -#[cfg(any(feature = "full", feature = "verify"))] -type EncounteredAbsence = bool; - -#[cfg(any(feature = "full", feature = "verify"))] impl GroveDb { - /// Verify proof given a path query - /// Returns the root hash + deserialized elements - pub fn verify_query( + pub fn verify_query_with_options( proof: &[u8], query: &PathQuery, - ) -> Result<([u8; 32], Vec), Error> { - let (root_hash, proved_path_key_values) = Self::verify_query_raw(proof, query)?; - let path_key_optional_elements = proved_path_key_values - .into_iter() - .map(|pkv| pkv.try_into()) - .collect::, Error>>()?; - Ok((root_hash, path_key_optional_elements)) + options: VerifyOptions, + ) -> Result<(CryptoHash, Vec), Error> { + if options.absence_proofs_for_non_existing_searched_keys { + // must have a limit + query.query.limit.ok_or(Error::NotSupported( + "limits must be set in verify_query_with_absence_proof".to_string(), + ))?; + } + + // must have no offset + if query.query.offset.is_some() { + return Err(Error::NotSupported( + "offsets in path queries are not supported for proofs".to_string(), + )); + } + + let config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + let grovedb_proof: GroveDBProof = bincode::decode_from_slice(proof, config) + .map_err(|e| Error::CorruptedData(format!("unable to decode proof: {}", e)))? + .0; + + let (root_hash, result) = Self::verify_proof_internal(&grovedb_proof, query, options)?; + + Ok((root_hash, result)) } - /// Verify proof for a given path query returns serialized elements pub fn verify_query_raw( proof: &[u8], query: &PathQuery, - ) -> Result<([u8; 32], ProvedPathKeyValues), Error> { - let mut verifier = ProofVerifier::new(query); - let hash = verifier.execute_proof(proof, query, false)?; - - Ok((hash, verifier.result_set)) + ) -> Result<(CryptoHash, ProvedPathKeyValues), Error> { + let config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + let grovedb_proof: GroveDBProof = bincode::decode_from_slice(proof, config) + .map_err(|e| Error::CorruptedData(format!("unable to decode proof: {}", e)))? + .0; + + let (root_hash, result) = Self::verify_proof_raw_internal( + &grovedb_proof, + query, + VerifyOptions { + absence_proofs_for_non_existing_searched_keys: false, + verify_proof_succinctness: false, + include_empty_trees_in_result: true, + }, + )?; + + Ok((root_hash, result)) } - /// Verify proof given multiple path queries. - /// If we have more than one path query we merge before performing - /// verification. - pub fn verify_query_many( - proof: &[u8], - query: Vec<&PathQuery>, - ) -> Result<([u8; 32], ProvedPathKeyValues), Error> { - if query.len() > 1 { - let query = PathQuery::merge(query)?; - GroveDb::verify_query_raw(proof, &query) - } else { - GroveDb::verify_query_raw(proof, query[0]) + fn verify_proof_internal( + proof: &GroveDBProof, + query: &PathQuery, + options: VerifyOptions, + ) -> Result<(CryptoHash, Vec), Error> { + match proof { + GroveDBProof::V0(proof_v0) => Self::verify_proof_internal_v0(proof_v0, query, options), } } - /// Given a verbose proof, we can verify it with a subset path query. - /// Returning the root hash and the deserialized result set. - pub fn verify_subset_query( - proof: &[u8], + fn verify_proof_internal_v0( + proof: &GroveDBProofV0, query: &PathQuery, - ) -> Result<([u8; 32], Vec), Error> { - let (root_hash, proved_path_key_values) = Self::verify_subset_query_raw(proof, query)?; - let path_key_optional_elements = proved_path_key_values - .into_iter() - .map(|pkv| pkv.try_into()) - .collect::, Error>>()?; - Ok((root_hash, path_key_optional_elements)) - } + options: VerifyOptions, + ) -> Result<(CryptoHash, Vec), Error> { + let mut result = Vec::new(); + let mut limit = query.query.limit; + let root_hash = Self::verify_layer_proof( + &proof.root_layer, + &proof.prove_options, + query, + &mut limit, + &[], + &mut result, + &options, + )?; + + if options.absence_proofs_for_non_existing_searched_keys { + // must have a limit + let max_results = query.query.limit.ok_or(Error::NotSupported( + "limits must be set in verify_query_with_absence_proof".to_string(), + ))? as usize; + + let terminal_keys = query.terminal_keys(max_results)?; + + // convert the result set to a btree map + let mut result_set_as_map: BTreeMap> = result + .into_iter() + .map(|(path, key, element)| ((path, key), element)) + .collect(); + #[cfg(feature = "proof_debug")] + { + println!( + "terminal keys are [{}] \n result set is [{}]", + terminal_keys + .iter() + .map(|(path, key)| format!( + "path: {} key: {}", + path_hex_to_ascii(path), + hex_to_ascii(key) + )) + .collect::>() + .join(", "), + result_set_as_map + .iter() + .map(|((path, key), e)| { + let element_string = if let Some(e) = e { + e.to_string() + } else { + "None".to_string() + }; + format!( + "path: {} key: {} element: {}", + path_hex_to_ascii(path), + hex_to_ascii(key), + element_string, + ) + }) + .collect::>() + .join(", ") + ); + } - /// Given a verbose proof, we can verify it with a subset path query. - /// Returning the root hash and the serialized result set. - pub fn verify_subset_query_raw( - proof: &[u8], - query: &PathQuery, - ) -> Result<([u8; 32], ProvedPathKeyValues), Error> { - let mut verifier = ProofVerifier::new(query); - let hash = verifier.execute_proof(proof, query, true)?; - Ok((hash, verifier.result_set)) + result = terminal_keys + .into_iter() + .map(|terminal_key| { + let element = result_set_as_map.remove(&terminal_key).flatten(); + (terminal_key.0, terminal_key.1, element) + }) + .collect(); + } + + Ok((root_hash, result)) } - /// Verify non subset query return the absence proof - /// Returns all possible keys within the Path Query with an optional Element - /// Value Element is set to None if absent - pub fn verify_query_with_absence_proof( - proof: &[u8], + fn verify_proof_raw_internal( + proof: &GroveDBProof, query: &PathQuery, - ) -> Result<([u8; 32], Vec), Error> { - Self::verify_with_absence_proof(proof, query, Self::verify_query) + options: VerifyOptions, + ) -> Result<(CryptoHash, ProvedPathKeyValues), Error> { + match proof { + GroveDBProof::V0(proof_v0) => { + Self::verify_proof_raw_internal_v0(proof_v0, query, options) + } + } } - /// Verify subset query return the absence proof - /// Returns all possible keys within the Path Query with an optional Element - /// Value Element is set to None if absent - pub fn verify_subset_query_with_absence_proof( - proof: &[u8], + fn verify_proof_raw_internal_v0( + proof: &GroveDBProofV0, query: &PathQuery, - ) -> Result<([u8; 32], Vec), Error> { - Self::verify_with_absence_proof(proof, query, Self::verify_subset_query) + options: VerifyOptions, + ) -> Result<(CryptoHash, ProvedPathKeyValues), Error> { + let mut result = Vec::new(); + let mut limit = query.query.limit; + let root_hash = Self::verify_layer_proof( + &proof.root_layer, + &proof.prove_options, + query, + &mut limit, + &[], + &mut result, + &options, + )?; + Ok((root_hash, result)) } - /// Verifies the proof and returns both elements in the result set and the - /// elements in query but not in state. - /// Note: This only works for certain path queries. - // TODO: We should not care about terminal keys, as theoretically they can be - // infinite we should perform the absence check solely on the proof and the - // given key, this is a temporary solution - fn verify_with_absence_proof( - proof: &[u8], + fn verify_layer_proof( + layer_proof: &LayerProof, + prove_options: &ProveOptions, query: &PathQuery, - verification_fn: T, - ) -> Result<([u8; 32], Vec), Error> + limit_left: &mut Option, + current_path: &[&[u8]], + result: &mut Vec, + options: &VerifyOptions, + ) -> Result where - T: Fn(&[u8], &PathQuery) -> Result<([u8; 32], Vec), Error>, + T: TryFrom, + Error: From<>::Error>, { - // must have a limit - let max_results = query.query.limit.ok_or(Error::NotSupported( - "limits must be set in verify_query_with_absence_proof".to_string(), - ))? as usize; + let internal_query = + query + .query_items_at_path(current_path) + .ok_or(Error::CorruptedPath(format!( + "verify raw: path {} should be part of path_query {}", + current_path + .iter() + .map(hex::encode) + .collect::>() + .join("/"), + query + )))?; + + let level_query = Query { + items: internal_query.items.to_vec(), + left_to_right: internal_query.left_to_right, + ..Default::default() + }; - // must have no offset - if query.query.offset.is_some() { - return Err(Error::NotSupported( - "offsets are not supported for verify_query_with_absence_proof".to_string(), - )); + let (root_hash, merk_result) = level_query + .execute_proof( + &layer_proof.merk_proof, + *limit_left, + internal_query.left_to_right, + ) + .unwrap() + .map_err(|e| { + eprintln!("{e}"); + Error::InvalidProof(format!("invalid proof verification parameters: {}", e)) + })?; + #[cfg(feature = "proof_debug")] + { + println!( + "current path {} \n merk result is {}", + path_as_slices_hex_to_ascii(current_path), + merk_result + ); } - let terminal_keys = query.terminal_keys(max_results)?; + let mut verified_keys = BTreeSet::new(); - // need to actually verify the query - let (root_hash, result_set) = verification_fn(proof, query)?; + if merk_result.result_set.is_empty() { + if prove_options.decrease_limit_on_empty_sub_query_result { + limit_left.as_mut().map(|limit| *limit -= 1); + } + } else { + for proved_key_value in merk_result.result_set { + let mut path = current_path.to_vec(); + let key = &proved_key_value.key; + let hash = &proved_key_value.proof; + if let Some(value_bytes) = &proved_key_value.value { + let element = Element::deserialize(value_bytes)?; + + verified_keys.insert(key.clone()); + + if let Some(lower_layer) = layer_proof.lower_layers.get(key) { + #[cfg(feature = "proof_debug")] + { + println!("lower layer had key {}", hex_to_ascii(key)); + } + match element { + Element::Tree(Some(_), _) | Element::SumTree(Some(_), ..) => { + path.push(key); + let lower_hash = Self::verify_layer_proof( + lower_layer, + prove_options, + query, + limit_left, + &path, + result, + options, + )?; + let combined_root_hash = + combine_hash(value_hash(value_bytes).value(), &lower_hash) + .value() + .to_owned(); + if hash != &combined_root_hash { + return Err(Error::InvalidProof(format!( + "Mismatch in lower layer hash, expected {}, got {}", + hex::encode(hash), + hex::encode(combined_root_hash) + ))); + } + if limit_left == &Some(0) { + break; + } + } + Element::Tree(None, _) + | Element::SumTree(None, ..) + | Element::SumItem(..) + | Element::Item(..) + | Element::Reference(..) => { + return Err(Error::InvalidProof( + "Proof has lower layer for a non Tree".into(), + )); + } + } + } else if element.is_any_item() + || !internal_query.has_subquery_or_matching_in_path_on_key(key) + && (options.include_empty_trees_in_result + || !matches!(element, Element::Tree(None, _))) + { + let path_key_optional_value = + ProvedPathKeyOptionalValue::from_proved_key_value( + path.iter().map(|p| p.to_vec()).collect(), + proved_key_value, + ); + #[cfg(feature = "proof_debug")] + { + println!( + "pushing {} limit left after is {:?}", + &path_key_optional_value, limit_left + ); + } + result.push(path_key_optional_value.try_into()?); - // convert the result set to a btree map - let mut result_set_as_map: BTreeMap> = result_set - .into_iter() - .map(|(path, key, element)| ((path, key), element)) - .collect(); + limit_left.as_mut().map(|limit| *limit -= 1); + if limit_left == &Some(0) { + break; + } + } else { + #[cfg(feature = "proof_debug")] + { + println!( + "we have subquery on key {} with value {}: {}", + hex_to_ascii(key), + element, + level_query + ) + } + } + } + } + } - let result_set_with_absence: Vec = terminal_keys - .into_iter() - .map(|terminal_key| { - let element = result_set_as_map.remove(&terminal_key).flatten(); - (terminal_key.0, terminal_key.1, element) - }) - .collect(); + Ok(root_hash) + } - Ok((root_hash, result_set_with_absence)) + pub fn verify_query( + proof: &[u8], + query: &PathQuery, + ) -> Result<(CryptoHash, Vec), Error> { + Self::verify_query_with_options( + proof, + query, + VerifyOptions { + absence_proofs_for_non_existing_searched_keys: false, + verify_proof_succinctness: true, + include_empty_trees_in_result: false, + }, + ) + } + + pub fn verify_subset_query( + proof: &[u8], + query: &PathQuery, + ) -> Result<(CryptoHash, Vec), Error> { + Self::verify_query_with_options( + proof, + query, + VerifyOptions { + absence_proofs_for_non_existing_searched_keys: false, + verify_proof_succinctness: false, + include_empty_trees_in_result: false, + }, + ) + } + + pub fn verify_query_with_absence_proof( + proof: &[u8], + query: &PathQuery, + ) -> Result<(CryptoHash, Vec), Error> { + Self::verify_query_with_options( + proof, + query, + VerifyOptions { + absence_proofs_for_non_existing_searched_keys: true, + verify_proof_succinctness: true, + include_empty_trees_in_result: false, + }, + ) + } + + pub fn verify_subset_query_with_absence_proof( + proof: &[u8], + query: &PathQuery, + ) -> Result<(CryptoHash, Vec), Error> { + Self::verify_query_with_options( + proof, + query, + VerifyOptions { + absence_proofs_for_non_existing_searched_keys: true, + verify_proof_succinctness: false, + include_empty_trees_in_result: false, + }, + ) } /// Verify subset proof with a chain of path query functions. @@ -235,698 +443,3 @@ impl GroveDb { Ok((last_root_hash, results)) } } - -#[cfg(any(feature = "full", feature = "verify"))] -/// Proof verifier -struct ProofVerifier { - limit: Option, - offset: Option, - result_set: ProvedPathKeyValues, -} - -#[cfg(any(feature = "full", feature = "verify"))] -impl ProofVerifier { - /// New query - pub fn new(query: &PathQuery) -> Self { - ProofVerifier { - limit: query.query.limit, - offset: query.query.offset, - result_set: vec![], - } - } - - /// Execute proof - pub fn execute_proof( - &mut self, - proof: &[u8], - query: &PathQuery, - is_verbose: bool, - ) -> Result<[u8; 32], Error> { - let (_proof_version, proof) = read_and_consume_proof_version(proof)?; - let mut proof_reader = ProofReader::new_with_verbose_status(proof, is_verbose); - - let path_slices = query.path.iter().map(|x| x.as_slice()).collect::>(); - let mut query = Cow::Borrowed(query); - - // TODO: refactor and add better comments - // if verbose, the first thing we want to do is read the path info - if is_verbose { - let original_path = proof_reader.read_path_info()?; - - if original_path == path_slices { - // do nothing - } else if original_path.len() > path_slices.len() { - // TODO: can we relax this constraint - return Err(Error::InvalidProof( - "original path query path must not be greater than the subset path len" - .to_string(), - )); - } else { - let original_path_in_new_path = original_path - .iter() - .all(|key| path_slices.contains(&key.as_slice())); - - if !original_path_in_new_path { - return Err(Error::InvalidProof( - "the original path should be a subset of the subset path".to_string(), - )); - } else { - // We construct a new path query - let path_not_common = path_slices[original_path.len()..].to_vec(); - let mut path_iter = path_not_common.iter(); - - let mut new_query = Query::new(); - if path_iter.len() >= 1 { - new_query - .insert_key(path_iter.next().expect("confirmed has value").to_vec()); - } - - // need to add the first key to the query - new_query.set_subquery_path(path_iter.map(|a| a.to_vec()).collect()); - new_query.set_subquery(query.query.query.clone()); - - query = Cow::Owned(PathQuery::new( - original_path, - SizedQuery::new(new_query, query.query.limit, query.query.offset), - )); - } - } - } - - let (proof_token_type, proof, _) = proof_reader.read_proof()?; - - let root_hash = if proof_token_type == AbsentPath { - self.verify_absent_path(&mut proof_reader, path_slices)? - } else { - let path_owned = query.path.iter().map(|a| a.to_vec()).collect(); - let mut last_subtree_root_hash = self.execute_subquery_proof( - proof_token_type, - proof, - &mut proof_reader, - query.as_ref(), - path_owned, - )?; - - // validate the path elements are connected - self.verify_path_to_root( - query.as_ref(), - query.path.iter().map(|a| a.as_ref()).collect(), - &mut proof_reader, - &mut last_subtree_root_hash, - )? - }; - - Ok(root_hash) - } - - fn execute_subquery_proof( - &mut self, - proof_token_type: ProofTokenType, - proof: Vec, - proof_reader: &mut ProofReader, - query: &PathQuery, - path: Path, - ) -> Result<[u8; 32], Error> { - let last_root_hash: [u8; 32]; - - match proof_token_type { - ProofTokenType::SizedMerk => { - // verify proof with limit and offset values - let verification_result = self.execute_merk_proof( - ProofTokenType::SizedMerk, - &proof, - &query.query.query, - query.query.query.left_to_right, - path, - )?; - - last_root_hash = verification_result.0; - } - ProofTokenType::Merk => { - // for non leaf subtrees, we want to prove that all the queried keys - // have an accompanying proof as long as the limit is non zero - // and their child subtree is not empty - let (proof_root_hash, children) = self.execute_merk_proof( - ProofTokenType::Merk, - &proof, - &query.query.query, - query.query.query.left_to_right, - path, - )?; - - last_root_hash = proof_root_hash; - let children = children.ok_or(Error::InvalidProof( - "MERK_PROOF always returns a result set".to_string(), - ))?; - - for proved_path_key_value in children { - let ProvedPathKeyValue { - path, - key, - value: value_bytes, - proof: value_hash, - } = proved_path_key_value; - let child_element = Element::deserialize(value_bytes.as_slice())?; - match child_element { - Element::Tree(expected_root_key, _) - | Element::SumTree(expected_root_key, ..) => { - let mut expected_combined_child_hash = value_hash; - let mut current_value_bytes = value_bytes; - - if self.limit == Some(0) { - // we are done verifying the subqueries - break; - } - - let (subquery_path, subquery_value) = - Element::subquery_paths_and_value_for_sized_query( - &query.query, - key.as_slice(), - ); - - if subquery_value.is_none() && subquery_path.is_none() { - // add this element to the result set - let skip_limit = reduce_limit_and_offset_by( - &mut self.limit, - &mut self.offset, - 1, - ); - - if !skip_limit { - // only insert to the result set if the offset value is not - // greater than 0 - self.result_set.push( - ProvedPathKeyValue::from_proved_key_value( - path, - ProvedKeyValue { - key, - value: current_value_bytes, - proof: value_hash, - }, - ), - ); - } - - continue; - } - - // What is the equivalent for an empty tree - if expected_root_key.is_none() { - // child node is empty, move on to next - continue; - } - - // update the path, we are about to perform a subquery call - let mut new_path = path.to_owned(); - new_path.push(key); - - if subquery_path.is_some() - && !subquery_path.as_ref().unwrap().is_empty() - { - if subquery_value.is_none() { - self.verify_subquery_path( - proof_reader, - ProofTokenType::SizedMerk, - &mut subquery_path.expect("confirmed it has a value above"), - &mut expected_combined_child_hash, - &mut current_value_bytes, - &mut new_path, - )?; - continue; - } else { - let (_, result_set_opt, encountered_absence) = self - .verify_subquery_path( - proof_reader, - ProofTokenType::Merk, - &mut subquery_path - .expect("confirmed it has a value above"), - &mut expected_combined_child_hash, - &mut current_value_bytes, - &mut new_path, - )?; - - if encountered_absence { - // we hit an absence proof while verifying the subquery path - continue; - } - - let subquery_path_result_set = result_set_opt; - if subquery_path_result_set.is_none() { - // this means a sized proof was generated for the subquery - // key - // which is invalid as there exists a subquery value - return Err(Error::InvalidProof( - "expected unsized proof for subquery path as subquery \ - value exists" - .to_string(), - )); - } - let subquery_path_result_set = - subquery_path_result_set.expect("confirmed exists above"); - - if subquery_path_result_set.is_empty() { - // we have a valid proof that shows the absence of the - // subquery path in the tree, hence the subquery value - // cannot be applied, move on to the next. - continue; - } - - Self::update_root_key_from_subquery_path_element( - &mut expected_combined_child_hash, - &mut current_value_bytes, - &subquery_path_result_set, - )?; - } - } - - let new_path_query = - PathQuery::new_unsized(vec![], subquery_value.unwrap()); - - let (child_proof_token_type, child_proof) = proof_reader - .read_next_proof(new_path.last().unwrap_or(&Default::default()))?; - - let child_hash = self.execute_subquery_proof( - child_proof_token_type, - child_proof, - proof_reader, - &new_path_query, - new_path, - )?; - - let combined_child_hash = combine_hash( - value_hash_fn(¤t_value_bytes).value(), - &child_hash, - ) - .value() - .to_owned(); - - if combined_child_hash != expected_combined_child_hash { - return Err(Error::InvalidProof(format!( - "child hash {} doesn't match the expected hash {}", - hex::encode(combined_child_hash), - hex::encode(expected_combined_child_hash) - ))); - } - } - _ => { - // encountered a non tree element, we can't apply a subquery to it - // add it to the result set. - if self.limit == Some(0) { - break; - } - - let skip_limit = - reduce_limit_and_offset_by(&mut self.limit, &mut self.offset, 1); - - if !skip_limit { - // only insert to the result set if the offset value is not greater - // than 0 - self.result_set - .push(ProvedPathKeyValue::from_proved_key_value( - path, - ProvedKeyValue { - key, - value: value_bytes, - proof: value_hash, - }, - )); - } - } - } - } - } - ProofTokenType::EmptyTree => { - last_root_hash = EMPTY_TREE_HASH; - } - t => { - // execute_subquery_proof only expects proofs for merk trees - // root proof is handled separately - return Err(Error::InvalidProof(format!( - "wrong proof type, expected sized merk, merk or empty tree but got {}", - t - ))); - } - } - Ok(last_root_hash) - } - - /// Deserialize subkey_element and update expected root hash and element - /// value - fn update_root_key_from_subquery_path_element( - expected_child_hash: &mut CryptoHash, - current_value_bytes: &mut Vec, - subquery_path_result_set: &[ProvedPathKeyValue], - ) -> Result<(), Error> { - let elem_value = &subquery_path_result_set[0].value; - let subquery_path_element = Element::deserialize(elem_value) - .map_err(|_| Error::CorruptedData("failed to deserialize element".to_string()))?; - match subquery_path_element { - Element::Tree(..) | Element::SumTree(..) => { - *expected_child_hash = subquery_path_result_set[0].proof; - *current_value_bytes = subquery_path_result_set[0].value.to_owned(); - } - e => { - // the means that the subquery path pointed to a non tree - // element, this is not valid as you cannot apply the - // the subquery value to non tree items - return Err(Error::InvalidProof(format!( - "subquery path cannot point to non tree element, got {}", - e.type_str() - ))); - } - } - Ok(()) - } - - /// Checks that a valid proof showing the existence or absence of the - /// subquery path is present - fn verify_subquery_path( - &mut self, - proof_reader: &mut ProofReader, - expected_proof_token_type: ProofTokenType, - subquery_path: &mut Path, - expected_root_hash: &mut CryptoHash, - current_value_bytes: &mut Vec, - current_path: &mut Path, - ) -> Result<(CryptoHash, Option, EncounteredAbsence), Error> { - // the subquery path contains at least one item. - let last_key = subquery_path.remove(subquery_path.len() - 1); - - for subquery_key in subquery_path.iter() { - let (proof_token_type, subkey_proof) = - proof_reader.read_next_proof(current_path.last().unwrap_or(&Default::default()))?; - // intermediate proofs are all going to be unsized merk proofs - if proof_token_type != ProofTokenType::Merk { - return Err(Error::InvalidProof(format!( - "expected MERK proof type for intermediate subquery path keys, got {}", - proof_token_type - ))); - } - match proof_token_type { - ProofTokenType::Merk => { - let mut key_as_query = Query::new(); - key_as_query.insert_key(subquery_key.to_owned()); - current_path.push(subquery_key.to_owned()); - - let (proof_root_hash, result_set) = self.execute_merk_proof( - proof_token_type, - &subkey_proof, - &key_as_query, - key_as_query.left_to_right, - current_path.to_owned(), - )?; - - // should always be some as we force the proof type to be MERK - debug_assert!(result_set.is_some(), "{}", true); - - // result_set being empty means we could not find the given key in the subtree - // which essentially means an absence proof - if result_set - .as_ref() - .expect("result set should always be some for merk proof type") - .is_empty() - { - return Ok((proof_root_hash, None, true)); - } - - // verify that the elements in the subquery path are linked by root hashes. - let combined_child_hash = - combine_hash(value_hash_fn(current_value_bytes).value(), &proof_root_hash) - .value() - .to_owned(); - - if combined_child_hash != *expected_root_hash { - return Err(Error::InvalidProof(format!( - "child hash {} doesn't match the expected hash {}", - hex::encode(combined_child_hash), - hex::encode(expected_root_hash) - ))); - } - - // after confirming they are linked use the latest hash values for subsequent - // checks - Self::update_root_key_from_subquery_path_element( - expected_root_hash, - current_value_bytes, - &result_set.expect("confirmed is some"), - )?; - } - t => { - return Err(Error::InvalidProof(format!( - "expected merk of sized merk proof type for subquery path, got {}", - t - ))); - } - } - } - - let (proof_token_type, subkey_proof) = - proof_reader.read_next_proof(current_path.last().unwrap_or(&Default::default()))?; - if proof_token_type != expected_proof_token_type { - return Err(Error::InvalidProof(format!( - "unexpected proof type for subquery path, expected {}, got {}", - expected_proof_token_type, proof_token_type - ))); - } - - match proof_token_type { - ProofTokenType::Merk | ProofTokenType::SizedMerk => { - let mut key_as_query = Query::new(); - key_as_query.insert_key(last_key.to_owned()); - - let verification_result = self.execute_merk_proof( - proof_token_type, - &subkey_proof, - &key_as_query, - key_as_query.left_to_right, - current_path.to_owned(), - )?; - - current_path.push(last_key); - - Ok((verification_result.0, verification_result.1, false)) - } - t => Err(Error::InvalidProof(format!( - "expected merk or sized merk proof type for subquery path, got {}", - t - ))), - } - } - - fn verify_absent_path( - &mut self, - proof_reader: &mut ProofReader, - path_slices: Vec<&[u8]>, - ) -> Result<[u8; 32], Error> { - let mut root_key_hash = None; - let mut expected_child_hash = None; - let mut last_result_set: ProvedPathKeyValues = vec![]; - - for key in path_slices { - let (proof_token_type, merk_proof, _) = proof_reader.read_proof()?; - if proof_token_type == ProofTokenType::EmptyTree { - // when we encounter the empty tree op, we need to ensure - // that the expected tree hash is the combination of the - // Element_value_hash and the empty root hash [0; 32] - let combined_hash = combine_hash( - value_hash_fn(last_result_set[0].value.as_slice()).value(), - &[0; 32], - ) - .unwrap(); - if Some(combined_hash) != expected_child_hash { - return Err(Error::InvalidProof( - "proof invalid: could not verify empty subtree while generating absent \ - path proof" - .to_string(), - )); - } else { - last_result_set = vec![]; - break; - } - } else if proof_token_type != ProofTokenType::Merk { - return Err(Error::InvalidProof(format!( - "expected a merk proof for absent path, got {}", - proof_token_type - ))); - } - - let mut child_query = Query::new(); - child_query.insert_key(key.to_vec()); - - // TODO: don't pass empty vec - let proof_result = self.execute_merk_proof( - ProofTokenType::Merk, - &merk_proof, - &child_query, - true, - // cannot return a result set - Vec::new(), - )?; - - if let Some(expected_child_hash) = expected_child_hash { - let combined_hash = combine_hash( - value_hash_fn(last_result_set[0].value.as_slice()).value(), - &proof_result.0, - ) - .value() - .to_owned(); - if combined_hash != expected_child_hash { - return Err(Error::InvalidProof(format!( - "proof invalid: invalid parent, expected {}, got {}", - hex::encode(expected_child_hash), - hex::encode(combined_hash) - ))); - } - } else { - root_key_hash = Some(proof_result.0); - } - - last_result_set = proof_result - .1 - .expect("MERK_PROOF always returns a result set"); - if last_result_set.is_empty() { - // if result set is empty then we have reached the absence point, break - break; - } - - let elem = Element::deserialize(last_result_set[0].value.as_slice())?; - let child_hash = match elem { - Element::Tree(..) | Element::SumTree(..) => Ok(Some(last_result_set[0].proof)), - e => Err(Error::InvalidProof(format!( - "intermediate proofs should be for trees, got {}", - e.type_str() - ))), - }?; - expected_child_hash = child_hash; - } - - if last_result_set.is_empty() { - if let Some(hash) = root_key_hash { - Ok(hash) - } else { - Err(Error::InvalidProof( - "proof invalid: no non root tree found".to_string(), - )) - } - } else { - Err(Error::InvalidProof( - "proof invalid: path not absent".to_string(), - )) - } - } - - /// Verifies that the correct proof was provided to confirm the path in - /// query - fn verify_path_to_root( - &mut self, - query: &PathQuery, - path_slices: Vec<&[u8]>, - proof_reader: &mut ProofReader, - expected_root_hash: &mut [u8; 32], - ) -> Result<[u8; 32], Error> { - let mut split_path = path_slices.split_last(); - while let Some((key, path_slice)) = split_path { - // for every subtree, there should be a corresponding proof for the parent - // which should prove that this subtree is a child of the parent tree - let (proof_token_type, parent_merk_proof) = - proof_reader.read_next_proof(path_slice.last().unwrap_or(&Default::default()))?; - if proof_token_type != ProofTokenType::Merk { - return Err(Error::InvalidProof(format!( - "wrong data_type expected Merk Proof, got {}", - proof_token_type - ))); - } - - let mut parent_query = Query::new(); - parent_query.insert_key(key.to_vec()); - - let proof_result = self.execute_merk_proof( - ProofTokenType::Merk, - &parent_merk_proof, - &parent_query, - query.query.query.left_to_right, - // TODO: don't pass empty vec - Vec::new(), - )?; - - let result_set = proof_result - .1 - .expect("MERK_PROOF always returns a result set"); - if result_set.is_empty() || &result_set[0].key != key { - return Err(Error::InvalidProof( - "proof invalid: invalid parent".to_string(), - )); - } - - let elem = Element::deserialize(result_set[0].value.as_slice())?; - let child_hash = match elem { - Element::Tree(..) | Element::SumTree(..) => Ok(result_set[0].proof), - t => Err(Error::InvalidProof(format!( - "intermediate proofs should be for trees, got {}", - t.type_str() - ))), - }?; - - let combined_root_hash = combine_hash( - value_hash_fn(&result_set[0].value).value(), - expected_root_hash, - ) - .value() - .to_owned(); - if child_hash != combined_root_hash { - return Err(Error::InvalidProof(format!( - "Bad path: tree hash does not have expected hash, got {}, expected {}", - hex::encode(child_hash), - hex::encode(combined_root_hash) - ))); - } - - *expected_root_hash = proof_result.0; - - split_path = path_slice.split_last(); - } - - Ok(*expected_root_hash) - } - - /// Execute a merk proof, update the state when a sized proof is - /// encountered i.e. update the limit, offset and result set values - fn execute_merk_proof( - &mut self, - proof_token_type: ProofTokenType, - proof: &[u8], - query: &Query, - left_to_right: bool, - path: Path, - ) -> Result<(CryptoHash, Option), Error> { - let is_sized_proof = proof_token_type == ProofTokenType::SizedMerk; - let mut limit = None; - let mut offset = None; - - if is_sized_proof { - limit = self.limit; - offset = self.offset; - } - - let (hash, result) = - grovedb_merk::execute_proof(proof, query, limit, offset, left_to_right) - .unwrap() - .map_err(|e| { - eprintln!("{e}"); - Error::InvalidProof("invalid proof verification parameters".to_string()) - })?; - - // convert the result set to proved_path_key_values - let proved_path_key_values = - ProvedPathKeyValue::from_proved_key_values(path, result.result_set); - - if is_sized_proof { - self.limit = result.limit; - self.offset = result.offset; - self.result_set.extend(proved_path_key_values); - Ok((hash, None)) - } else { - Ok((hash, Some(proved_path_key_values))) - } - } -} diff --git a/grovedb/src/query/mod.rs b/grovedb/src/query/mod.rs index db75144d..7b4fe42a 100644 --- a/grovedb/src/query/mod.rs +++ b/grovedb/src/query/mod.rs @@ -1,41 +1,19 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Queries -use std::cmp::Ordering; +use std::{ + borrow::{Cow, Cow::Borrowed}, + cmp::Ordering, + fmt, +}; #[cfg(any(feature = "full", feature = "verify"))] use grovedb_merk::proofs::query::query_item::QueryItem; -use grovedb_merk::proofs::query::SubqueryBranch; +use grovedb_merk::proofs::query::{Key, SubqueryBranch}; #[cfg(any(feature = "full", feature = "verify"))] use grovedb_merk::proofs::Query; +use indexmap::IndexMap; +use crate::operations::proof::util::hex_to_ascii; #[cfg(any(feature = "full", feature = "verify"))] use crate::query_result_type::PathKey; #[cfg(any(feature = "full", feature = "verify"))] @@ -55,6 +33,20 @@ pub struct PathQuery { pub query: SizedQuery, } +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for PathQuery { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "PathQuery {{ path: [")?; + for (i, path_element) in self.path.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + write!(f, "{}", hex_to_ascii(path_element))?; + } + write!(f, "], query: {} }}", self.query) + } +} + #[cfg(any(feature = "full", feature = "verify"))] #[derive(Debug, Clone)] /// Holds a query to apply to a tree and an optional limit/offset value. @@ -68,6 +60,20 @@ pub struct SizedQuery { pub offset: Option, } +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for SizedQuery { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "SizedQuery {{ query: {}", self.query)?; + if let Some(limit) = self.limit { + write!(f, ", limit: {}", limit)?; + } + if let Some(offset) = self.offset { + write!(f, ", offset: {}", offset)?; + } + write!(f, " }}") + } +} + #[cfg(any(feature = "full", feature = "verify"))] impl SizedQuery { /// New sized query @@ -269,19 +275,289 @@ impl PathQuery { } } } + + pub fn query_items_at_path(&self, path: &[&[u8]]) -> Option { + fn recursive_query_items<'b>( + query: &'b Query, + path: &[&[u8]], + ) -> Option> { + if path.is_empty() { + return Some(SinglePathSubquery::from_query(query)); + } + + let key = path[0]; + let path_after_top_removed = &path[1..]; + + if let Some(conditional_branches) = &query.conditional_subquery_branches { + for (query_item, subquery_branch) in conditional_branches { + if query_item.contains(key) { + if let Some(subquery_path) = &subquery_branch.subquery_path { + if path_after_top_removed.len() <= subquery_path.len() { + if path_after_top_removed + .iter() + .zip(subquery_path) + .all(|(a, b)| *a == b.as_slice()) + { + return if path_after_top_removed.len() == subquery_path.len() { + subquery_branch.subquery.as_ref().map(|subquery| { + SinglePathSubquery::from_query(subquery) + }) + } else { + let last_path_item = path.len() == subquery_path.len(); + let has_subquery = subquery_branch.subquery.is_some(); + Some(SinglePathSubquery::from_key_when_in_path( + &subquery_path[path_after_top_removed.len()], + last_path_item, + has_subquery, + )) + }; + } + } else if path_after_top_removed + .iter() + .take(subquery_path.len()) + .zip(subquery_path) + .all(|(a, b)| *a == b.as_slice()) + { + if let Some(subquery) = &subquery_branch.subquery { + return recursive_query_items( + subquery, + &path_after_top_removed[subquery_path.len()..], + ); + } + } + } else if let Some(subquery) = &subquery_branch.subquery { + return recursive_query_items(subquery, path_after_top_removed); + } + + return None; + } + } + } + + if let Some(subquery_path) = &query.default_subquery_branch.subquery_path { + if path_after_top_removed.len() <= subquery_path.len() { + if path_after_top_removed + .iter() + .zip(subquery_path) + .all(|(a, b)| *a == b.as_slice()) + { + // The paths are equal for example if we had a sub path of + // path : 1 / 2 + // subquery : All items + + // If we are asking what is the subquery when we are at 1 / 2 + // we should get + return if path_after_top_removed.len() == subquery_path.len() { + query + .default_subquery_branch + .subquery + .as_ref() + .map(|subquery| SinglePathSubquery::from_query(subquery)) + } else { + let last_path_item = path.len() == subquery_path.len(); + let has_subquery = query.default_subquery_branch.subquery.is_some(); + Some(SinglePathSubquery::from_key_when_in_path( + &subquery_path[path_after_top_removed.len()], + last_path_item, + has_subquery, + )) + }; + } + } else if path_after_top_removed + .iter() + .take(subquery_path.len()) + .zip(subquery_path) + .all(|(a, b)| *a == b.as_slice()) + { + if let Some(subquery) = &query.default_subquery_branch.subquery { + return recursive_query_items( + subquery, + &path_after_top_removed[subquery_path.len()..], + ); + } + } + } else if let Some(subquery) = &query.default_subquery_branch.subquery { + return recursive_query_items(subquery, path_after_top_removed); + } + + None + } + + let self_path_len = self.path.len(); + let given_path_len = path.len(); + + match given_path_len.cmp(&self_path_len) { + Ordering::Less => { + if path.iter().zip(&self.path).all(|(a, b)| *a == b.as_slice()) { + Some(SinglePathSubquery::from_key_when_in_path( + &self.path[given_path_len], + false, + true, + )) + } else { + None + } + } + Ordering::Equal => { + if path.iter().zip(&self.path).all(|(a, b)| *a == b.as_slice()) { + Some(SinglePathSubquery::from_path_query(self)) + } else { + None + } + } + Ordering::Greater => { + if !self.path.iter().zip(path).all(|(a, b)| a.as_slice() == *b) { + return None; + } + recursive_query_items(&self.query.query, &path[self_path_len..]) + } + } + } +} + +#[cfg(any(feature = "full", feature = "verify"))] +#[derive(Debug, Clone, PartialEq)] +pub enum HasSubquery<'a> { + NoSubquery, + Always, + Conditionally(Cow<'a, IndexMap>), +} + +#[cfg(any(feature = "full", feature = "verify"))] +impl<'a> fmt::Display for HasSubquery<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + HasSubquery::NoSubquery => write!(f, "NoSubquery"), + HasSubquery::Always => write!(f, "Always"), + HasSubquery::Conditionally(map) => { + writeln!(f, "Conditionally {{")?; + for (query_item, subquery_branch) in map.iter() { + writeln!(f, " {query_item}: {subquery_branch},")?; + } + write!(f, "}}") + } + } + } +} + +impl<'a> HasSubquery<'a> { + /// Checks to see if we have a subquery on a specific key + pub fn has_subquery_on_key(&self, key: &[u8]) -> bool { + match self { + HasSubquery::NoSubquery => false, + HasSubquery::Conditionally(conditionally) => conditionally + .keys() + .any(|query_item| query_item.contains(key)), + HasSubquery::Always => true, + } + } +} + +/// This represents a query where the items might be borrowed, it is used to get +/// subquery information +#[cfg(any(feature = "full", feature = "verify"))] +#[derive(Debug, Clone, PartialEq)] +pub struct SinglePathSubquery<'a> { + /// Items + pub items: Cow<'a, Vec>, + /// Default subquery branch + pub has_subquery: HasSubquery<'a>, + /// Left to right? + pub left_to_right: bool, + /// In the path of the path_query, or in a subquery path + pub in_path: Option>, +} + +#[cfg(any(feature = "full", feature = "verify"))] +impl<'a> fmt::Display for SinglePathSubquery<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "InternalCowItemsQuery {{")?; + writeln!(f, " items: [")?; + for item in self.items.iter() { + writeln!(f, " {item},")?; + } + writeln!(f, " ]")?; + writeln!(f, " has_subquery: {}", self.has_subquery)?; + writeln!(f, " left_to_right: {}", self.left_to_right)?; + match &self.in_path { + Some(path) => writeln!(f, " in_path: Some({})", hex_to_ascii(path)), + None => writeln!(f, " in_path: None"), + }?; + write!(f, "}}") + } +} + +impl<'a> SinglePathSubquery<'a> { + /// Checks to see if we have a subquery on a specific key + pub fn has_subquery_or_matching_in_path_on_key(&self, key: &[u8]) -> bool { + if self.has_subquery.has_subquery_on_key(key) { + true + } else if let Some(path) = self.in_path.as_ref() { + path.as_slice() == key + } else { + false + } + } + + pub fn from_key_when_in_path( + key: &'a Vec, + subquery_is_last_path_item: bool, + subquery_has_inner_subquery: bool, + ) -> SinglePathSubquery<'a> { + // in this case there should be no in_path, because we are trying to get this + // level of items and nothing underneath + let in_path = if subquery_is_last_path_item && !subquery_has_inner_subquery { + None + } else { + Some(Borrowed(key)) + }; + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(key.clone())]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path, + } + } + + pub fn from_path_query(path_query: &PathQuery) -> SinglePathSubquery { + Self::from_query(&path_query.query.query) + } + + pub fn from_query(query: &Query) -> SinglePathSubquery { + let has_subquery = if query.default_subquery_branch.subquery.is_some() + || query.default_subquery_branch.subquery_path.is_some() + { + HasSubquery::Always + } else if let Some(conditional) = query.conditional_subquery_branches.as_ref() { + HasSubquery::Conditionally(Cow::Borrowed(conditional)) + } else { + HasSubquery::NoSubquery + }; + SinglePathSubquery { + items: Cow::Borrowed(&query.items), + has_subquery, + left_to_right: query.left_to_right, + in_path: None, + } + } } #[cfg(feature = "full")] #[cfg(test)] mod tests { - use std::ops::RangeFull; + use std::{borrow::Cow, ops::RangeFull}; - use grovedb_merk::proofs::{query::query_item::QueryItem, Query}; + use grovedb_merk::proofs::{ + query::{query_item::QueryItem, SubqueryBranch}, + Query, + }; + use indexmap::IndexMap; use crate::{ + query::{HasSubquery, SinglePathSubquery}, query_result_type::QueryResultType, tests::{common::compare_result_tuples, make_deep_tree, TEST_LEAF}, - Element, GroveDb, PathQuery, + Element, GroveDb, PathQuery, SizedQuery, }; #[test] @@ -294,7 +570,7 @@ mod tests { let path_query_one = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_one); - let proof = temp_db.prove_query(&path_query_one).unwrap().unwrap(); + let proof = temp_db.prove_query(&path_query_one, None).unwrap().unwrap(); let (_, result_set_one) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) .expect("should execute proof"); assert_eq!(result_set_one.len(), 1); @@ -304,7 +580,7 @@ mod tests { let path_query_two = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_two); - let proof = temp_db.prove_query(&path_query_two).unwrap().unwrap(); + let proof = temp_db.prove_query(&path_query_two, None).unwrap().unwrap(); let (_, result_set_two) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) .expect("should execute proof"); assert_eq!(result_set_two.len(), 1); @@ -312,7 +588,10 @@ mod tests { let merged_path_query = PathQuery::merge(vec![&path_query_one, &path_query_two]) .expect("should merge path queries"); - let proof = temp_db.prove_query(&merged_path_query).unwrap().unwrap(); + let proof = temp_db + .prove_query(&merged_path_query, None) + .unwrap() + .unwrap(); let (_, result_set_tree) = GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) .expect("should execute proof"); assert_eq!(result_set_tree.len(), 2); @@ -330,7 +609,7 @@ mod tests { let path_query_one = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_one); - let proof = temp_db.prove_query(&path_query_one).unwrap().unwrap(); + let proof = temp_db.prove_query(&path_query_one, None).unwrap().unwrap(); let (_, result_set_one) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) .expect("should execute proof"); assert_eq!(result_set_one.len(), 1); @@ -340,7 +619,7 @@ mod tests { let path_query_two = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()], query_two); - let proof = temp_db.prove_query(&path_query_two).unwrap().unwrap(); + let proof = temp_db.prove_query(&path_query_two, None).unwrap().unwrap(); let (_, result_set_two) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) .expect("should execute proof"); assert_eq!(result_set_two.len(), 1); @@ -350,7 +629,10 @@ mod tests { assert_eq!(merged_path_query.path, vec![TEST_LEAF.to_vec()]); assert_eq!(merged_path_query.query.query.items.len(), 2); - let proof = temp_db.prove_query(&merged_path_query).unwrap().unwrap(); + let proof = temp_db + .prove_query(&merged_path_query, None) + .unwrap() + .unwrap(); let (_, result_set_merged) = GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) .expect("should execute proof"); @@ -374,7 +656,7 @@ mod tests { query_one.clone(), ); - let proof = temp_db.prove_query(&path_query_one).unwrap().unwrap(); + let proof = temp_db.prove_query(&path_query_one, None).unwrap().unwrap(); let (_, result_set_one) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) .expect("should execute proof"); assert_eq!(result_set_one.len(), 3); @@ -391,7 +673,7 @@ mod tests { query_two.clone(), ); - let proof = temp_db.prove_query(&path_query_two).unwrap().unwrap(); + let proof = temp_db.prove_query(&path_query_two, None).unwrap().unwrap(); let (_, result_set_two) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) .expect("should execute proof"); assert_eq!(result_set_two.len(), 2); @@ -408,7 +690,10 @@ mod tests { query_three.clone(), ); - let proof = temp_db.prove_query(&path_query_three).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query_three, None) + .unwrap() + .unwrap(); let (_, result_set_two) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_three) .expect("should execute proof"); assert_eq!(result_set_two.len(), 2); @@ -544,7 +829,10 @@ mod tests { .expect("expected to get results"); assert_eq!(result_set_merged.len(), 7); - let proof = temp_db.prove_query(&merged_path_query).unwrap().unwrap(); + let proof = temp_db + .prove_query(&merged_path_query, None) + .unwrap() + .unwrap(); let (_, proved_result_set_merged) = GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) .expect("should execute proof"); @@ -589,7 +877,7 @@ mod tests { query_one, ); - let proof = temp_db.prove_query(&path_query_one).unwrap().unwrap(); + let proof = temp_db.prove_query(&path_query_one, None).unwrap().unwrap(); let (_, result_set_one) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) .expect("should execute proof"); assert_eq!(result_set_one.len(), 6); @@ -606,7 +894,7 @@ mod tests { query_two, ); - let proof = temp_db.prove_query(&path_query_two).unwrap().unwrap(); + let proof = temp_db.prove_query(&path_query_two, None).unwrap().unwrap(); let (_, result_set_two) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) .expect("should execute proof"); assert_eq!(result_set_two.len(), 2); @@ -615,7 +903,10 @@ mod tests { .expect("expect to merge path queries"); assert_eq!(merged_path_query.path, vec![b"deep_leaf".to_vec()]); - let proof = temp_db.prove_query(&merged_path_query).unwrap().unwrap(); + let proof = temp_db + .prove_query(&merged_path_query, None) + .unwrap() + .unwrap(); let (_, result_set_merged) = GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) .expect("should execute proof"); @@ -655,7 +946,7 @@ mod tests { let path_query_one = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_one); - let proof = temp_db.prove_query(&path_query_one).unwrap().unwrap(); + let proof = temp_db.prove_query(&path_query_one, None).unwrap().unwrap(); let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) .expect("should execute proof"); assert_eq!(result_set.len(), 1); @@ -665,7 +956,7 @@ mod tests { let path_query_two = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_two); - let proof = temp_db.prove_query(&path_query_two).unwrap().unwrap(); + let proof = temp_db.prove_query(&path_query_two, None).unwrap().unwrap(); let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) .expect("should execute proof"); assert_eq!(result_set.len(), 1); @@ -677,7 +968,10 @@ mod tests { query_three, ); - let proof = temp_db.prove_query(&path_query_three).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query_three, None) + .unwrap() + .unwrap(); let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_three) .expect("should execute proof"); assert_eq!(result_set.len(), 2); @@ -686,7 +980,10 @@ mod tests { PathQuery::merge(vec![&path_query_one, &path_query_two, &path_query_three]) .expect("should merge three queries"); - let proof = temp_db.prove_query(&merged_path_query).unwrap().unwrap(); + let proof = temp_db + .prove_query(&merged_path_query, None) + .unwrap() + .unwrap(); let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) .expect("should execute proof"); assert_eq!(result_set.len(), 4); @@ -705,7 +1002,7 @@ mod tests { let path_query_one = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_one); - let proof = temp_db.prove_query(&path_query_one).unwrap().unwrap(); + let proof = temp_db.prove_query(&path_query_one, None).unwrap().unwrap(); let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) .expect("should execute proof"); assert_eq!(result_set.len(), 1); @@ -715,7 +1012,7 @@ mod tests { let path_query_two = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_two); - let proof = temp_db.prove_query(&path_query_two).unwrap().unwrap(); + let proof = temp_db.prove_query(&path_query_two, None).unwrap().unwrap(); let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) .expect("should execute proof"); assert_eq!(result_set.len(), 1); @@ -723,7 +1020,10 @@ mod tests { let merged_path_query = PathQuery::merge(vec![&path_query_one, &path_query_two]) .expect("should merge three queries"); - let proof = temp_db.prove_query(&merged_path_query).unwrap().unwrap(); + let proof = temp_db + .prove_query(&merged_path_query, None) + .unwrap() + .unwrap(); let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) .expect("should execute proof"); assert_eq!(result_set.len(), 2); @@ -738,7 +1038,7 @@ mod tests { query_one, ); - let proof = temp_db.prove_query(&path_query_one).unwrap().unwrap(); + let proof = temp_db.prove_query(&path_query_one, None).unwrap().unwrap(); let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) .expect("should execute proof"); assert_eq!(result_set.len(), 2); @@ -755,7 +1055,7 @@ mod tests { query_one, ); - let proof = temp_db.prove_query(&path_query_two).unwrap().unwrap(); + let proof = temp_db.prove_query(&path_query_two, None).unwrap().unwrap(); let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) .expect("should execute proof"); assert_eq!(result_set.len(), 3); @@ -838,9 +1138,450 @@ mod tests { .expect("expected to get results"); assert_eq!(result_set_merged.len(), 4); - let proof = temp_db.prove_query(&merged_path_query).unwrap().unwrap(); + let proof = temp_db + .prove_query(&merged_path_query, None) + .unwrap() + .unwrap(); let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) .expect("should execute proof"); assert_eq!(result_set.len(), 4); } + + #[test] + fn test_path_query_items_with_subquery_and_inner_subquery_path() { + // Constructing the keys and paths + let root_path_key_1 = b"root_path_key_1".to_vec(); + let root_path_key_2 = b"root_path_key_2".to_vec(); + let root_item_key = b"root_item_key".to_vec(); + let subquery_path_key_1 = b"subquery_path_key_1".to_vec(); + let subquery_path_key_2 = b"subquery_path_key_2".to_vec(); + let subquery_item_key = b"subquery_item_key".to_vec(); + let inner_subquery_path_key = b"inner_subquery_path_key".to_vec(); + + // Constructing the subquery + let subquery = Query { + items: vec![QueryItem::Key(subquery_item_key.clone())], + default_subquery_branch: SubqueryBranch { + subquery_path: Some(vec![inner_subquery_path_key.clone()]), + subquery: None, + }, + left_to_right: true, + conditional_subquery_branches: None, + }; + + // Constructing the PathQuery + let path_query = PathQuery { + path: vec![root_path_key_1.clone(), root_path_key_2.clone()], + query: SizedQuery { + query: Query { + items: vec![QueryItem::Key(root_item_key.clone())], + default_subquery_branch: SubqueryBranch { + subquery_path: Some(vec![ + subquery_path_key_1.clone(), + subquery_path_key_2.clone(), + ]), + subquery: Some(Box::new(subquery)), + }, + left_to_right: true, + conditional_subquery_branches: None, + }, + limit: Some(2), + offset: None, + }, + }; + + { + let path = vec![root_path_key_1.as_slice()]; + let first = path_query + .query_items_at_path(&path) + .expect("expected query items"); + + assert_eq!( + first, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(root_path_key_2.clone())]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path: Some(Cow::Borrowed(&root_path_key_2)), + } + ); + } + + { + let path = vec![root_path_key_1.as_slice(), root_path_key_2.as_slice()]; + + let second = path_query + .query_items_at_path(&path) + .expect("expected query items"); + + assert_eq!( + second, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(root_item_key.clone())]), + has_subquery: HasSubquery::Always, /* This is correct because there's a + * subquery for one item */ + left_to_right: true, + in_path: None, + } + ); + } + + { + let path = vec![ + root_path_key_1.as_slice(), + root_path_key_2.as_slice(), + root_item_key.as_slice(), + ]; + + let third = path_query + .query_items_at_path(&path) + .expect("expected query items"); + + assert_eq!( + third, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(subquery_path_key_1.clone())]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path: Some(Cow::Borrowed(&subquery_path_key_1)) + } + ); + } + + { + let path = vec![ + root_path_key_1.as_slice(), + root_path_key_2.as_slice(), + root_item_key.as_slice(), + subquery_path_key_1.as_slice(), + ]; + + let fourth = path_query + .query_items_at_path(&path) + .expect("expected query items"); + + assert_eq!( + fourth, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(subquery_path_key_2.clone())]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path: Some(Cow::Borrowed(&subquery_path_key_2)) + } + ); + } + + { + let path = vec![ + root_path_key_1.as_slice(), + root_path_key_2.as_slice(), + root_item_key.as_slice(), + subquery_path_key_1.as_slice(), + subquery_path_key_2.as_slice(), + ]; + + let fifth = path_query + .query_items_at_path(&path) + .expect("expected query items"); + + assert_eq!( + fifth, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(subquery_item_key.clone())]), + has_subquery: HasSubquery::Always, /* This means that we should be able to + * add items underneath */ + left_to_right: true, + in_path: None, + } + ); + } + + { + let path = vec![ + root_path_key_1.as_slice(), + root_path_key_2.as_slice(), + root_item_key.as_slice(), + subquery_path_key_1.as_slice(), + subquery_path_key_2.as_slice(), + subquery_item_key.as_slice(), + ]; + + let sixth = path_query + .query_items_at_path(&path) + .expect("expected query items"); + + assert_eq!( + sixth, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(inner_subquery_path_key.clone())]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path: None, + } + ); + } + } + + #[test] + fn test_path_query_items_with_subquery_path() { + // Constructing the keys and paths + let root_path_key = b"higher".to_vec(); + let dash_key = b"dash".to_vec(); + let quantum_key = b"quantum".to_vec(); + + // Constructing the PathQuery + let path_query = PathQuery { + path: vec![root_path_key.clone()], + query: SizedQuery { + query: Query { + items: vec![QueryItem::RangeFull(RangeFull)], + default_subquery_branch: SubqueryBranch { + subquery_path: Some(vec![quantum_key.clone()]), + subquery: None, + }, + left_to_right: true, + conditional_subquery_branches: None, + }, + limit: Some(100), + offset: None, + }, + }; + + // Validating the PathQuery structure + { + let path = vec![root_path_key.as_slice()]; + let first = path_query + .query_items_at_path(&path) + .expect("expected query items"); + + assert_eq!( + first, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::RangeFull(RangeFull)]), + has_subquery: HasSubquery::Always, + left_to_right: true, + in_path: None, + } + ); + } + + { + let path = vec![root_path_key.as_slice(), dash_key.as_slice()]; + + let second = path_query + .query_items_at_path(&path) + .expect("expected query items"); + + assert_eq!( + second, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(quantum_key.clone())]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path: None, // There should be no path because we are at the end of the path + } + ); + } + } + + #[test] + fn test_conditional_subquery_refusing_elements() { + let empty_vec: Vec = vec![]; + let zero_vec: Vec = vec![0]; + + let mut conditional_subquery_branches = IndexMap::new(); + conditional_subquery_branches.insert( + QueryItem::Key(b"".to_vec()), + SubqueryBranch { + subquery_path: Some(vec![zero_vec.clone()]), + subquery: Some(Query::new().into()), + }, + ); + + let path_query = PathQuery { + path: vec![TEST_LEAF.to_vec()], + query: SizedQuery { + query: Query { + items: vec![QueryItem::RangeFull(RangeFull)], + default_subquery_branch: SubqueryBranch { + subquery_path: Some(vec![zero_vec.clone()]), + subquery: None, + }, + left_to_right: true, + conditional_subquery_branches: Some(conditional_subquery_branches), + }, + limit: Some(100), + offset: None, + }, + }; + + { + let path = vec![TEST_LEAF, empty_vec.as_slice()]; + + let second = path_query + .query_items_at_path(&path) + .expect("expected query items"); + + assert_eq!( + second, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(zero_vec.clone())]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path: Some(Cow::Borrowed(&zero_vec)), + } + ); + } + } + + #[test] + fn test_complex_path_query_with_conditional_subqueries() { + let identity_id = + hex::decode("8b8948a6801501bbe0431e3d994dcf71cf5a2a0939fe51b0e600076199aba4fb") + .unwrap(); + + let key_20 = vec![20u8]; + + let key_80 = vec![80u8]; + + let inner_conditional_subquery_branches = IndexMap::from([( + QueryItem::Key(vec![80]), + SubqueryBranch { + subquery_path: None, + subquery: Some(Box::new(Query { + items: vec![QueryItem::RangeFull(RangeFull)], + default_subquery_branch: SubqueryBranch { + subquery_path: None, + subquery: None, + }, + left_to_right: true, + conditional_subquery_branches: None, + })), + }, + )]); + + let conditional_subquery_branches = IndexMap::from([ + ( + QueryItem::Key(vec![]), + SubqueryBranch { + subquery_path: None, + subquery: Some(Box::new(Query { + items: vec![QueryItem::Key(identity_id.to_vec())], + default_subquery_branch: SubqueryBranch { + subquery_path: None, + subquery: None, + }, + left_to_right: true, + conditional_subquery_branches: None, + })), + }, + ), + ( + QueryItem::Key(vec![20]), + SubqueryBranch { + subquery_path: Some(vec![identity_id.to_vec()]), + subquery: Some(Box::new(Query { + items: vec![QueryItem::Key(vec![80]), QueryItem::Key(vec![0xc0])], + default_subquery_branch: SubqueryBranch { + subquery_path: None, + subquery: None, + }, + conditional_subquery_branches: Some( + inner_conditional_subquery_branches.clone(), + ), + left_to_right: true, + })), + }, + ), + ]); + + let path_query = PathQuery { + path: vec![], + query: SizedQuery { + query: Query { + items: vec![QueryItem::Key(vec![20]), QueryItem::Key(vec![96])], + default_subquery_branch: SubqueryBranch { + subquery_path: None, + subquery: None, + }, + conditional_subquery_branches: Some(conditional_subquery_branches.clone()), + left_to_right: true, + }, + limit: Some(100), + offset: None, + }, + }; + + { + let path = vec![]; + let first = path_query + .query_items_at_path(&path) + .expect("expected query items"); + + assert_eq!( + first, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(vec![20]), QueryItem::Key(vec![96]),]), + has_subquery: HasSubquery::Conditionally(Cow::Borrowed( + &conditional_subquery_branches + )), + left_to_right: true, + in_path: None, + } + ); + } + + { + let path = vec![key_20.as_slice()]; + let query = path_query + .query_items_at_path(&path) + .expect("expected query items"); + + assert_eq!( + query, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(identity_id.clone()),]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path: Some(Cow::Borrowed(&identity_id)), + } + ); + } + + { + let path = vec![key_20.as_slice(), identity_id.as_slice()]; + let query = path_query + .query_items_at_path(&path) + .expect("expected query items"); + + assert_eq!( + query, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::Key(vec![80]), QueryItem::Key(vec![0xc0]),]), + has_subquery: HasSubquery::Conditionally(Cow::Borrowed( + &inner_conditional_subquery_branches + )), + left_to_right: true, + in_path: None, + } + ); + } + + { + let path = vec![key_20.as_slice(), identity_id.as_slice(), key_80.as_slice()]; + let query = path_query + .query_items_at_path(&path) + .expect("expected query items"); + + assert_eq!( + query, + SinglePathSubquery { + items: Cow::Owned(vec![QueryItem::RangeFull(RangeFull)]), + has_subquery: HasSubquery::NoSubquery, + left_to_right: true, + in_path: None, + } + ); + } + } } diff --git a/grovedb/src/query_result_type.rs b/grovedb/src/query_result_type.rs index 289ffb26..6bf8bd5b 100644 --- a/grovedb/src/query_result_type.rs +++ b/grovedb/src/query_result_type.rs @@ -1,41 +1,19 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Determines the query result form use std::{ collections::{BTreeMap, HashMap}, + fmt, vec::IntoIter, }; pub use grovedb_merk::proofs::query::{Key, Path, PathKey}; -use crate::{operations::proof::util::ProvedPathKeyValue, Element, Error}; +use crate::{ + operations::proof::util::{ + hex_to_ascii, path_hex_to_ascii, ProvedPathKeyOptionalValue, ProvedPathKeyValue, + }, + Element, Error, +}; #[derive(Copy, Clone)] /// Query result type @@ -48,13 +26,116 @@ pub enum QueryResultType { QueryPathKeyElementTrioResultType, } +impl fmt::Display for QueryResultType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + QueryResultType::QueryElementResultType => write!(f, "QueryElementResultType"), + QueryResultType::QueryKeyElementPairResultType => { + write!(f, "QueryKeyElementPairResultType") + } + QueryResultType::QueryPathKeyElementTrioResultType => { + write!(f, "QueryPathKeyElementTrioResultType") + } + } + } +} + /// Query result elements -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Eq, PartialEq)] pub struct QueryResultElements { /// Elements pub elements: Vec, } +impl fmt::Display for QueryResultElements { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "QueryResultElements {{")?; + for (index, element) in self.elements.iter().enumerate() { + writeln!(f, " {}: {}", index, element)?; + } + write!(f, "}}") + } +} + +#[derive(Debug, Clone)] +pub enum BTreeMapLevelResultOrItem { + BTreeMapLevelResult(BTreeMapLevelResult), + ResultItem(Element), +} + +/// BTreeMap level result +#[derive(Debug, Clone)] +pub struct BTreeMapLevelResult { + pub key_values: BTreeMap, +} + +impl fmt::Display for BTreeMapLevelResultOrItem { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + BTreeMapLevelResultOrItem::BTreeMapLevelResult(result) => { + write!(f, "{}", result) + } + BTreeMapLevelResultOrItem::ResultItem(element) => { + write!(f, "{}", element) + } + } + } +} + +impl fmt::Display for BTreeMapLevelResult { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "BTreeMapLevelResult {{")?; + self.fmt_inner(f, 1)?; + write!(f, "}}") + } +} + +impl BTreeMapLevelResult { + fn fmt_inner(&self, f: &mut fmt::Formatter<'_>, indent: usize) -> fmt::Result { + for (key, value) in &self.key_values { + write!(f, "{:indent$}", "", indent = indent * 2)?; + write!(f, "{}: ", hex_to_ascii(key))?; + match value { + BTreeMapLevelResultOrItem::BTreeMapLevelResult(result) => { + writeln!(f, "BTreeMapLevelResult {{")?; + result.fmt_inner(f, indent + 1)?; + write!(f, "{:indent$}}}", "", indent = indent * 2)?; + } + BTreeMapLevelResultOrItem::ResultItem(element) => { + write!(f, "{}", element)?; + } + } + writeln!(f)?; + } + Ok(()) + } +} + +impl BTreeMapLevelResult { + pub fn len_of_values_at_path(&self, path: &[&[u8]]) -> u16 { + let mut current = self; + + // Traverse the path + for segment in path { + match current.key_values.get(*segment) { + Some(BTreeMapLevelResultOrItem::BTreeMapLevelResult(next_level)) => { + current = next_level; + } + Some(BTreeMapLevelResultOrItem::ResultItem(_)) => { + // We've reached a ResultItem before the end of the path + return 0; + } + None => { + // Path not found + return 0; + } + } + } + + current.key_values.len() as u16 + } +} + impl QueryResultElements { /// New pub fn new() -> Self { @@ -62,7 +143,7 @@ impl QueryResultElements { } /// From elements - pub(crate) fn from_elements(elements: Vec) -> Self { + pub fn from_elements(elements: Vec) -> Self { QueryResultElements { elements } } @@ -209,6 +290,21 @@ impl QueryResultElements { map } + /// To path to key, elements btree map + pub fn to_path_to_key_elements_btree_map(self) -> BTreeMap> { + let mut map: BTreeMap> = BTreeMap::new(); + + for result_item in self.elements.into_iter() { + if let QueryResultElement::PathKeyElementTrioResultItem((path, key, element)) = + result_item + { + map.entry(path).or_default().insert(key, element); + } + } + + map + } + /// To last path to key, elements btree map pub fn to_last_path_to_key_elements_btree_map(self) -> BTreeMap> { let mut map: BTreeMap, BTreeMap> = BTreeMap::new(); @@ -218,9 +314,7 @@ impl QueryResultElements { result_item { if let Some(last) = path.pop() { - map.entry(last) - .or_insert_with(BTreeMap::new) - .insert(key, element); + map.entry(last).or_default().insert(key, element); } } } @@ -246,6 +340,60 @@ impl QueryResultElements { map } + /// To last path to elements btree map + /// This is useful if the key is not import + pub fn to_btree_map_level_results(self) -> BTreeMapLevelResult { + fn insert_recursive( + current_level: &mut BTreeMapLevelResult, + mut path: std::vec::IntoIter>, + key: Vec, + element: Element, + ) { + if let Some(segment) = path.next() { + let next_level = current_level.key_values.entry(segment).or_insert_with(|| { + BTreeMapLevelResultOrItem::BTreeMapLevelResult(BTreeMapLevelResult { + key_values: BTreeMap::new(), + }) + }); + + match next_level { + BTreeMapLevelResultOrItem::BTreeMapLevelResult(inner) => { + insert_recursive(inner, path, key, element); + } + BTreeMapLevelResultOrItem::ResultItem(_) => { + // This shouldn't happen in a well-formed structure, but we'll handle it + // anyway + *next_level = + BTreeMapLevelResultOrItem::BTreeMapLevelResult(BTreeMapLevelResult { + key_values: BTreeMap::new(), + }); + if let BTreeMapLevelResultOrItem::BTreeMapLevelResult(inner) = next_level { + insert_recursive(inner, path, key, element); + } + } + } + } else { + current_level + .key_values + .insert(key, BTreeMapLevelResultOrItem::ResultItem(element)); + } + } + + let mut root = BTreeMapLevelResult { + key_values: BTreeMap::new(), + }; + + for result_item in self.elements { + if let QueryResultElement::PathKeyElementTrioResultItem((path, key, element)) = + result_item + { + insert_recursive(&mut root, path.into_iter(), key, element); + } + } + + root + } + /// To last path to keys btree map /// This is useful if for example the element is a sum item and isn't /// important Used in Platform Drive for getting voters for multiple @@ -257,9 +405,9 @@ impl QueryResultElements { if let QueryResultElement::PathKeyElementTrioResultItem((mut path, key, _)) = result_item { - if let Some(_) = path.pop() { + if path.pop().is_some() { if let Some(last) = path.pop() { - map.entry(last).or_insert_with(Vec::new).push(key); + map.entry(last).or_default().push(key); } } } @@ -276,7 +424,7 @@ impl Default for QueryResultElements { } /// Query result element -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Eq, PartialEq)] pub enum QueryResultElement { /// Element result item ElementResultItem(Element), @@ -286,6 +434,33 @@ pub enum QueryResultElement { PathKeyElementTrioResultItem(PathKeyElementTrio), } +impl fmt::Display for QueryResultElement { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + QueryResultElement::ElementResultItem(element) => { + write!(f, "ElementResultItem({})", element) + } + QueryResultElement::KeyElementPairResultItem((key, element)) => { + write!( + f, + "KeyElementPairResultItem(key: {}, element: {})", + hex_to_ascii(key), + element + ) + } + QueryResultElement::PathKeyElementTrioResultItem((path, key, element)) => { + write!( + f, + "PathKeyElementTrioResultItem(path: {}, key: {}, element: {})", + path_hex_to_ascii(path), + hex_to_ascii(key), + element + ) + } + } + } +} + #[cfg(feature = "full")] impl QueryResultElement { /// Map element @@ -341,6 +516,23 @@ impl TryFrom for PathKeyOptionalElementTrio { } } +#[cfg(any(feature = "full", feature = "verify"))] +impl TryFrom for PathKeyOptionalElementTrio { + type Error = Error; + + fn try_from(proved_path_key_value: ProvedPathKeyOptionalValue) -> Result { + let element = proved_path_key_value + .value + .map(|e| Element::deserialize(e.as_slice())) + .transpose()?; + Ok(( + proved_path_key_value.path, + proved_path_key_value.key, + element, + )) + } +} + #[cfg(feature = "full")] #[cfg(test)] mod tests { diff --git a/grovedb/src/reference_path.rs b/grovedb/src/reference_path.rs index 38c3f147..09fc1684 100644 --- a/grovedb/src/reference_path.rs +++ b/grovedb/src/reference_path.rs @@ -86,6 +86,57 @@ pub enum ReferencePathType { SiblingReference(Vec), } +// Helper function to display paths +fn display_path(path: &[Vec]) -> String { + path.iter() + .map(hex::encode) + .collect::>() + .join("/") +} + +impl fmt::Display for ReferencePathType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ReferencePathType::AbsolutePathReference(path) => { + write!(f, "AbsolutePathReference({})", display_path(path)) + } + ReferencePathType::UpstreamRootHeightReference(height, path) => { + write!( + f, + "UpstreamRootHeightReference({}, {})", + height, + display_path(path) + ) + } + ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference(height, path) => { + write!( + f, + "UpstreamRootHeightWithParentPathAdditionReference({}, {})", + height, + display_path(path) + ) + } + ReferencePathType::UpstreamFromElementHeightReference(height, path) => { + write!( + f, + "UpstreamFromElementHeightReference({}, {})", + height, + display_path(path) + ) + } + ReferencePathType::CousinReference(key) => { + write!(f, "CousinReference({})", hex::encode(key)) + } + ReferencePathType::RemovedCousinReference(path) => { + write!(f, "RemovedCousinReference({})", display_path(path)) + } + ReferencePathType::SiblingReference(key) => { + write!(f, "SiblingReference({})", hex::encode(key)) + } + } + } +} + #[cfg(any(feature = "full", feature = "verify"))] impl ReferencePathType { /// Given the reference path type and the current qualified path (path+key), @@ -129,7 +180,7 @@ pub fn path_from_reference_qualified_path_type>( ) -> Result>, Error> { match current_qualified_path.split_last() { None => Err(Error::CorruptedPath( - "qualified path should always have an element", + "qualified path should always have an element".to_string(), )), Some((key, path)) => { path_from_reference_path_type(reference_path_type, path, Some(key.as_ref())) @@ -168,7 +219,7 @@ pub fn path_from_reference_path_type>( no_of_elements_to_keep, mut path, ) => { - if usize::from(no_of_elements_to_keep) > current_path.len() || current_path.len() == 0 { + if usize::from(no_of_elements_to_keep) > current_path.len() || current_path.is_empty() { return Err(Error::InvalidInput( "reference stored path cannot satisfy reference constraints", )); @@ -478,7 +529,7 @@ mod tests { ); let proof = db - .prove_query(&path_query) + .prove_query(&path_query, None) .unwrap() .expect("should generate proof"); let (hash, result) = diff --git a/grovedb/src/replication.rs b/grovedb/src/replication.rs index 5f7db1f3..b6533868 100644 --- a/grovedb/src/replication.rs +++ b/grovedb/src/replication.rs @@ -364,7 +364,7 @@ impl GroveDb { || !state_sync_info.processed_prefixes.is_empty() { return Err(Error::InternalError( - "GroveDB has already started a snapshot syncing", + "GroveDB has already started a snapshot syncing".to_string(), )); } @@ -384,7 +384,9 @@ impl GroveDb { .insert(root_prefix, root_prefix_state_sync_info); state_sync_info.app_hash = app_hash; } else { - return Err(Error::InternalError("Unable to open merk for replication")); + return Err(Error::InternalError( + "Unable to open merk for replication".to_string(), + )); } Ok(state_sync_info) @@ -424,7 +426,9 @@ impl GroveDb { replication::util_split_global_chunk_id(global_chunk_id, &state_sync_info.app_hash)?; if state_sync_info.current_prefixes.is_empty() { - return Err(Error::InternalError("GroveDB is not in syncing mode")); + return Err(Error::InternalError( + "GroveDB is not in syncing mode".to_string(), + )); } if let Some(subtree_state_sync) = state_sync_info.current_prefixes.remove(&chunk_prefix) { if let Ok((res, mut new_subtree_state_sync)) = @@ -453,12 +457,16 @@ impl GroveDb { // Subtree is finished. We can save it. match new_subtree_state_sync.restorer.take() { - None => Err(Error::InternalError("Unable to finalize subtree")), + None => Err(Error::InternalError( + "Unable to finalize subtree".to_string(), + )), Some(restorer) => { if (new_subtree_state_sync.num_processed_chunks > 0) && (restorer.finalize().is_err()) { - return Err(Error::InternalError("Unable to finalize Merk")); + return Err(Error::InternalError( + "Unable to finalize Merk".to_string(), + )); } state_sync_info.processed_prefixes.insert(chunk_prefix); @@ -479,16 +487,20 @@ impl GroveDb { next_chunk_ids.extend(res); Ok((next_chunk_ids, new_state_sync_info)) } else { - Err(Error::InternalError("Unable to discover Subtrees")) + Err(Error::InternalError( + "Unable to discover Subtrees".to_string(), + )) } } } } } else { - Err(Error::InternalError("Unable to process incoming chunk")) + Err(Error::InternalError( + "Unable to process incoming chunk".to_string(), + )) } } else { - Err(Error::InternalError("Invalid incoming prefix")) + Err(Error::InternalError("Invalid incoming prefix".to_string())) } } @@ -510,7 +522,7 @@ impl GroveDb { Some(restorer) => { if !state_sync_info.pending_chunks.contains(chunk_id) { return Err(Error::InternalError( - "Incoming global_chunk_id not expected", + "Incoming global_chunk_id not expected".to_string(), )); } state_sync_info.pending_chunks.remove(chunk_id); @@ -529,7 +541,7 @@ impl GroveDb { } _ => { return Err(Error::InternalError( - "Unable to process incoming chunk", + "Unable to process incoming chunk".to_string(), )); } }; @@ -543,7 +555,9 @@ impl GroveDb { } } _ => { - return Err(Error::InternalError("Invalid internal state (restorer")); + return Err(Error::InternalError( + "Invalid internal state (restorer".to_string(), + )); } } @@ -593,7 +607,9 @@ impl GroveDb { let root_chunk_prefix = prefix.to_vec(); res.push(root_chunk_prefix.to_vec()); } else { - return Err(Error::InternalError("Unable to open Merk for replication")); + return Err(Error::InternalError( + "Unable to open Merk for replication".to_string(), + )); } } } diff --git a/grovedb/src/tests/common.rs b/grovedb/src/tests/common.rs index 10f05b80..2fe8dfde 100644 --- a/grovedb/src/tests/common.rs +++ b/grovedb/src/tests/common.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Common tests use grovedb_path::SubtreePath; @@ -48,7 +20,7 @@ fn deserialize_and_extract_item_bytes(raw_bytes: &[u8]) -> Result, Error let elem = Element::deserialize(raw_bytes)?; match elem { Element::Item(item, _) => Ok(item), - _ => Err(Error::CorruptedPath("expected only item type")), + _ => Err(Error::CorruptedPath("expected only item type".to_string())), } } diff --git a/grovedb/src/tests/mod.rs b/grovedb/src/tests/mod.rs index 95e0d2b1..43a9c34d 100644 --- a/grovedb/src/tests/mod.rs +++ b/grovedb/src/tests/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Tests pub mod common; @@ -47,8 +19,9 @@ use tempfile::TempDir; use self::common::EMPTY_PATH; use super::*; use crate::{ - query_result_type::QueryResultType::QueryKeyElementPairResultType, - reference_path::ReferencePathType, tests::common::compare_result_tuples, + query_result_type::{QueryResultType, QueryResultType::QueryKeyElementPairResultType}, + reference_path::ReferencePathType, + tests::common::compare_result_tuples, }; pub const TEST_LEAF: &[u8] = b"test_leaf"; @@ -157,6 +130,10 @@ pub fn make_deep_tree() -> TempGroveDb { // deeper_4 // k10,v10 // k11,v11 + // deeper_5 + // k12,v12 + // k13,v13 + // k14,v14 // Insert elements into grovedb instance let temp_db = make_test_grovedb(); @@ -339,6 +316,16 @@ pub fn make_deep_tree() -> TempGroveDb { ) .unwrap() .expect("successful subtree insert"); + temp_db + .insert( + [DEEP_LEAF, b"deep_node_2"].as_ref(), + b"deeper_5", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); // Insert level 3 nodes temp_db .insert( @@ -452,739 +439,511 @@ pub fn make_deep_tree() -> TempGroveDb { .unwrap() .expect("successful subtree insert"); temp_db -} - -#[test] -fn test_init() { - let tmp_dir = TempDir::new().unwrap(); - GroveDb::open(tmp_dir).expect("empty tree is ok"); -} - -#[test] -fn test_element_with_flags() { - let db = make_test_grovedb(); - - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("should insert subtree successfully"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"elem1", - Element::new_item(b"flagless".to_vec()), - None, - None, - ) - .unwrap() - .expect("should insert subtree successfully"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"elem2", - Element::new_item_with_flags(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())), - None, - None, - ) - .unwrap() - .expect("should insert subtree successfully"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"elem3", - Element::new_tree_with_flags(None, Some([1].to_vec())), - None, - None, - ) - .unwrap() - .expect("should insert subtree successfully"); - db.insert( - [TEST_LEAF, b"key1", b"elem3"].as_ref(), - b"elem4", - Element::new_reference_with_flags( - ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - b"key1".to_vec(), - b"elem2".to_vec(), - ]), - Some([9].to_vec()), - ), - None, - None, - ) - .unwrap() - .expect("should insert subtree successfully"); - - let element_without_flag = db - .get([TEST_LEAF, b"key1"].as_ref(), b"elem1", None) - .unwrap() - .expect("should get successfully"); - let element_with_flag = db - .get([TEST_LEAF, b"key1"].as_ref(), b"elem2", None) - .unwrap() - .expect("should get successfully"); - let tree_element_with_flag = db - .get([TEST_LEAF, b"key1"].as_ref(), b"elem3", None) - .unwrap() - .expect("should get successfully"); - let flagged_ref_follow = db - .get([TEST_LEAF, b"key1", b"elem3"].as_ref(), b"elem4", None) - .unwrap() - .expect("should get successfully"); - - let mut query = Query::new(); - query.insert_key(b"elem4".to_vec()); - let path_query = PathQuery::new( - vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"elem3".to_vec()], - SizedQuery::new(query, None, None), - ); - let (flagged_ref_no_follow, _) = db - .query_raw( - &path_query, - true, - true, - true, - QueryKeyElementPairResultType, - None, - ) - .unwrap() - .expect("should get successfully"); - - assert_eq!( - element_without_flag, - Element::Item(b"flagless".to_vec(), None) - ); - assert_eq!( - element_with_flag, - Element::Item(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())) - ); - assert_eq!(tree_element_with_flag.get_flags(), &Some([1].to_vec())); - assert_eq!( - flagged_ref_follow, - Element::Item(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())) - ); - assert_eq!( - flagged_ref_no_follow.to_key_elements()[0], - ( - b"elem4".to_vec(), - Element::Reference( - ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - b"key1".to_vec(), - b"elem2".to_vec() - ]), - None, - Some([9].to_vec()) - ) + .insert( + [DEEP_LEAF, b"deep_node_2", b"deeper_5"].as_ref(), + b"key12", + Element::new_item(b"value12".to_vec()), + None, + None, ) - ); - - // Test proofs with flags - let mut query = Query::new(); - query.insert_all(); - - let path_query = PathQuery::new( - vec![TEST_LEAF.to_vec(), b"key1".to_vec()], - SizedQuery::new(query, None, None), - ); - let proof = db - .prove_query(&path_query) - .unwrap() - .expect("should successfully create proof"); - let (root_hash, result_set) = - GroveDb::verify_query_raw(&proof, &path_query).expect("should verify proof"); - assert_eq!(root_hash, db.grove_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - assert_eq!( - Element::deserialize(&result_set[0].value).expect("should deserialize element"), - Element::Item(b"flagless".to_vec(), None) - ); - assert_eq!( - Element::deserialize(&result_set[1].value).expect("should deserialize element"), - Element::Item(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())) - ); - assert_eq!( - Element::deserialize(&result_set[2].value) - .expect("should deserialize element") - .get_flags(), - &Some([1].to_vec()) - ); -} - -#[test] -fn test_cannot_update_populated_tree_item() { - // This test shows that you cannot update a tree item - // in a way that disconnects it's root hash from that of - // the merk it points to. - let db = make_deep_tree(); - - let old_element = db - .get([TEST_LEAF].as_ref(), b"innertree", None) - .unwrap() - .expect("should fetch item"); - - let new_element = Element::empty_tree(); - db.insert( - [TEST_LEAF].as_ref(), - b"innertree", - new_element.clone(), - None, - None, - ) - .unwrap() - .expect_err("should not override tree"); - - let current_element = db - .get([TEST_LEAF].as_ref(), b"innertree", None) .unwrap() - .expect("should fetch item"); - - assert_eq!(current_element, old_element); - assert_ne!(current_element, new_element); -} - -#[test] -fn test_changes_propagated() { - let db = make_test_grovedb(); - let old_hash = db.root_hash(None).unwrap().unwrap(); - let element = Element::new_item(b"ayy".to_vec()); - - // Insert some nested subtrees - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 1 insert"); - - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 2 insert"); - - db.insert( - [TEST_LEAF, b"key1", b"key2"].as_ref(), - b"key3", - element.clone(), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - - assert_eq!( - db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) - .unwrap() - .expect("successful get"), - element - ); - assert_ne!(old_hash, db.root_hash(None).unwrap().unwrap()); -} - -// TODO: Add solid test cases to this - -#[test] -fn test_references() { - let db = make_test_grovedb(); - db.insert( - [TEST_LEAF].as_ref(), - b"merk_1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, b"merk_1"].as_ref(), - b"key1", - Element::new_item(b"value1".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, b"merk_1"].as_ref(), - b"key2", - Element::new_item(b"value2".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - db.insert( - [TEST_LEAF].as_ref(), - b"merk_2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - // db.insert([TEST_LEAF, b"merk_2"].as_ref(), b"key2", - // Element::new_item(b"value2".to_vec()), None).expect("successful subtree - // insert"); - db.insert( - [TEST_LEAF, b"merk_2"].as_ref(), - b"key1", - Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - b"merk_1".to_vec(), - b"key1".to_vec(), - ])), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, b"merk_2"].as_ref(), - b"key2", - Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - b"merk_1".to_vec(), - b"key2".to_vec(), - ])), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - assert!(db - .get([TEST_LEAF].as_ref(), b"merk_1", None) + .expect("successful subtree insert"); + temp_db + .insert( + [DEEP_LEAF, b"deep_node_2", b"deeper_5"].as_ref(), + b"key13", + Element::new_item(b"value13".to_vec()), + None, + None, + ) .unwrap() - .is_ok()); - assert!(db - .get([TEST_LEAF].as_ref(), b"merk_2", None) + .expect("successful subtree insert"); + temp_db + .insert( + [DEEP_LEAF, b"deep_node_2", b"deeper_5"].as_ref(), + b"key14", + Element::new_item(b"value14".to_vec()), + None, + None, + ) .unwrap() - .is_ok()); + .expect("successful subtree insert"); + temp_db } -#[test] -fn test_follow_references() { - let db = make_test_grovedb(); - let element = Element::new_item(b"ayy".to_vec()); - - // Insert an item to refer to - db.insert( - [TEST_LEAF].as_ref(), - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 1 insert"); - db.insert( - [TEST_LEAF, b"key2"].as_ref(), - b"key3", - element.clone(), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - - // Insert a reference - db.insert( - [TEST_LEAF].as_ref(), - b"reference_key", - Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - b"key2".to_vec(), - b"key3".to_vec(), - ])), - None, - None, - ) - .unwrap() - .expect("successful reference insert"); +pub fn make_deep_tree_with_sum_trees() -> TempGroveDb { + // Tree Structure + // root + // deep_leaf + // deep_node_1 + // "" -> "empty" + // a -> "storage" + // c + // 1 (sum tree) + // [0;32], 1 + // [1;32], 1 + // d + // 0,v1 + // 1 (sum tree) + // [0;32], 4 + // [1;32], 1 + // e + // 0,v4 + // 1 (sum tree) + // [0;32], 1 + // [1;32], 4 + // f + // 0,v1 + // 1 (sum tree) + // [0;32], 1 + // [1;32], 4 + // g + // 0,v4 + // 1 (sum tree) + // [3;32], 4 + // [5;32], 4 + // h -> "h" + // .. -> .. + // z -> "z" - assert_eq!( - db.get([TEST_LEAF].as_ref(), b"reference_key", None) - .unwrap() - .expect("successful get"), - element - ); -} + let temp_db = make_test_grovedb(); -#[test] -fn test_reference_must_point_to_item() { - let db = make_test_grovedb(); + // Add deep_leaf to root + temp_db + .insert(EMPTY_PATH, DEEP_LEAF, Element::empty_tree(), None, None) + .unwrap() + .expect("successful root tree leaf insert"); - let result = db + // Add deep_node_1 to deep_leaf + temp_db .insert( - [TEST_LEAF].as_ref(), - b"reference_key_1", - Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - b"reference_key_2".to_vec(), - ])), + [DEEP_LEAF].as_ref(), + b"deep_node_1", + Element::empty_tree(), None, None, ) - .unwrap(); - - assert!(matches!(result, Err(Error::MissingReference(_)))); -} - -#[test] -fn test_too_many_indirections() { - use crate::operations::get::MAX_REFERENCE_HOPS; - let db = make_test_grovedb(); - - let keygen = |idx| format!("key{}", idx).bytes().collect::>(); - - db.insert( - [TEST_LEAF].as_ref(), - b"key0", - Element::new_item(b"oops".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful item insert"); + .unwrap() + .expect("successful subtree insert"); - for i in 1..=(MAX_REFERENCE_HOPS) { - db.insert( - [TEST_LEAF].as_ref(), - &keygen(i), - Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - keygen(i - 1), - ])), + // Add a -> "storage" to deep_node_1 + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1"].as_ref(), + b"", + Element::new_item("empty".as_bytes().to_vec()), None, None, ) .unwrap() - .expect("successful reference insert"); - } - - // Add one more reference - db.insert( - [TEST_LEAF].as_ref(), - &keygen(MAX_REFERENCE_HOPS + 1), - Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - keygen(MAX_REFERENCE_HOPS), - ])), - None, - None, - ) - .unwrap() - .expect("expected insert"); - - let result = db - .get([TEST_LEAF].as_ref(), &keygen(MAX_REFERENCE_HOPS + 1), None) - .unwrap(); - - assert!(matches!(result, Err(Error::ReferenceLimit))); -} + .expect("successful item insert"); -#[test] -fn test_reference_value_affects_state() { - let db_one = make_test_grovedb(); - db_one + // Add a -> "storage" to deep_node_1 + temp_db .insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::new_item(vec![0]), + [DEEP_LEAF, b"deep_node_1"].as_ref(), + b"a", + Element::new_item("storage".as_bytes().to_vec()), None, None, ) .unwrap() - .expect("should insert item"); - db_one + .expect("successful item insert"); + + // Add c, d, e, f, g to deep_node_1 + for key in [b"c", b"d", b"e", b"f", b"g"].iter() { + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1"].as_ref(), + key.as_slice(), + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + } + + // Add sum tree to c + temp_db .insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"ref", - Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - b"key1".to_vec(), - ])), + [DEEP_LEAF, b"deep_node_1", b"c"].as_ref(), + b"1", + Element::new_sum_tree(None), None, None, ) .unwrap() - .expect("should insert item"); + .expect("successful sum tree insert"); - let db_two = make_test_grovedb(); - db_two + // Add items to sum tree in c + temp_db .insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::new_item(vec![0]), + [DEEP_LEAF, b"deep_node_1", b"c", b"1"].as_ref(), + &[0; 32], + Element::SumItem(1, None), None, None, ) .unwrap() - .expect("should insert item"); - db_two + .expect("successful sum item insert"); + temp_db .insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"ref", - Element::new_reference(ReferencePathType::UpstreamRootHeightReference( - 0, - vec![TEST_LEAF.to_vec(), b"key1".to_vec()], - )), + [DEEP_LEAF, b"deep_node_1", b"c", b"1"].as_ref(), + &[1; 32], + Element::SumItem(1, None), None, None, ) .unwrap() - .expect("should insert item"); + .expect("successful sum item insert"); - assert_ne!( - db_one - .root_hash(None) + // Add items to 4, 5, 6, 7 + for (key, value) in [(b"d", b"v1"), (b"e", b"v4"), (b"f", b"v1"), (b"g", b"v4")].iter() { + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1", key.as_slice()].as_ref(), + b"0", + Element::new_item(value.to_vec()), + None, + None, + ) .unwrap() - .expect("should return root hash"), - db_two - .root_hash(None) + .expect("successful item insert"); + + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1", key.as_slice()].as_ref(), + b"1", + Element::new_sum_tree(None), + None, + None, + ) .unwrap() - .expect("should return toor hash") - ); -} + .expect("successful sum tree insert"); + } -#[test] -fn test_tree_structure_is_persistent() { - let tmp_dir = TempDir::new().unwrap(); - let element = Element::new_item(b"ayy".to_vec()); - // Create a scoped GroveDB - let prev_root_hash = { - let mut db = GroveDb::open(tmp_dir.path()).unwrap(); - add_test_leaves(&mut db); + // Add items to sum trees in d, e, f + for key in [b"d", b"e", b"f"].iter() { + let (value1, value2) = if *key == b"d" { (4, 1) } else { (1, 4) }; - // Insert some nested subtrees - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 1 insert"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"key2", - Element::empty_tree(), + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1", key.as_slice(), b"1"].as_ref(), + &[0; 32], + Element::SumItem(value1, None), + None, + None, + ) + .unwrap() + .expect("successful sum item insert"); + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1", key.as_slice(), b"1"].as_ref(), + &[1; 32], + Element::SumItem(value2, None), + None, + None, + ) + .unwrap() + .expect("successful sum item insert"); + } + + // Add items to sum tree in 7 + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1", b"g", b"1"].as_ref(), + &[3; 32], + Element::SumItem(4, None), None, None, ) .unwrap() - .expect("successful subtree 2 insert"); - // Insert an element into subtree - db.insert( - [TEST_LEAF, b"key1", b"key2"].as_ref(), - b"key3", - element.clone(), + .expect("successful sum item insert"); + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1", b"g", b"1"].as_ref(), + &[5; 32], + Element::SumItem(4, None), None, None, ) .unwrap() - .expect("successful value insert"); - assert_eq!( - db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) - .unwrap() - .expect("successful get 1"), - element - ); - db.root_hash(None).unwrap().unwrap() - }; - // Open a persisted GroveDB - let db = GroveDb::open(tmp_dir).unwrap(); - assert_eq!( - db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) - .unwrap() - .expect("successful get 2"), - element - ); - assert!(db - .get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key4", None) - .unwrap() - .is_err()); - assert_eq!(prev_root_hash, db.root_hash(None).unwrap().unwrap()); -} - -#[test] -fn test_root_tree_leaves_are_noted() { - let db = make_test_grovedb(); - db.check_subtree_exists_path_not_found([TEST_LEAF].as_ref().into(), None) - .unwrap() - .expect("should exist"); - db.check_subtree_exists_path_not_found([ANOTHER_TEST_LEAF].as_ref().into(), None) - .unwrap() - .expect("should exist"); -} + .expect("successful sum item insert"); -#[test] -fn test_proof_for_invalid_path_root_key() { - let db = make_test_grovedb(); + // Add entries for all letters from "h" to "z" + for letter in b'h'..=b'z' { + temp_db + .insert( + [DEEP_LEAF, b"deep_node_1"].as_ref(), + &[letter], + Element::new_item(vec![letter]), + None, + None, + ) + .unwrap() + .expect(&format!("successful item insert for {}", letter as char)); + } - let query = Query::new(); - let path_query = PathQuery::new_unsized(vec![b"invalid_path_key".to_vec()], query); + temp_db +} - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); +mod tests { + use super::*; - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); -} + #[test] + fn test_init() { + let tmp_dir = TempDir::new().unwrap(); + GroveDb::open(tmp_dir).expect("empty tree is ok"); + } -#[test] -fn test_proof_for_invalid_path() { - let db = make_deep_tree(); + #[test] + fn test_element_with_flags() { + let db = make_test_grovedb(); - let query = Query::new(); - let path_query = - PathQuery::new_unsized(vec![b"deep_leaf".to_vec(), b"invalid_key".to_vec()], query); + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"elem1", + Element::new_item(b"flagless".to_vec()), + None, + None, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"elem2", + Element::new_item_with_flags(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())), + None, + None, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"elem3", + Element::new_tree_with_flags(None, Some([1].to_vec())), + None, + None, + ) + .unwrap() + .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF, b"key1", b"elem3"].as_ref(), + b"elem4", + Element::new_reference_with_flags( + ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + b"key1".to_vec(), + b"elem2".to_vec(), + ]), + Some([9].to_vec()), + ), + None, + None, + ) + .unwrap() + .expect("should insert subtree successfully"); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + let element_without_flag = db + .get([TEST_LEAF, b"key1"].as_ref(), b"elem1", None) + .unwrap() + .expect("should get successfully"); + let element_with_flag = db + .get([TEST_LEAF, b"key1"].as_ref(), b"elem2", None) + .unwrap() + .expect("should get successfully"); + let tree_element_with_flag = db + .get([TEST_LEAF, b"key1"].as_ref(), b"elem3", None) + .unwrap() + .expect("should get successfully"); + let flagged_ref_follow = db + .get([TEST_LEAF, b"key1", b"elem3"].as_ref(), b"elem4", None) + .unwrap() + .expect("should get successfully"); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); + let mut query = Query::new(); + query.insert_key(b"elem4".to_vec()); + let path_query = PathQuery::new( + vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"elem3".to_vec()], + SizedQuery::new(query, None, None), + ); + let (flagged_ref_no_follow, _) = db + .query_raw( + &path_query, + true, + true, + true, + QueryKeyElementPairResultType, + None, + ) + .unwrap() + .expect("should get successfully"); - let query = Query::new(); - let path_query = PathQuery::new_unsized( - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"invalid_key".to_vec(), - ], - query, - ); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); - - let query = Query::new(); - let path_query = PathQuery::new_unsized( - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"deeper_1".to_vec(), - b"invalid_key".to_vec(), - ], - query, - ); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); - - let query = Query::new(); - let path_query = PathQuery::new_unsized( - vec![ - b"deep_leaf".to_vec(), - b"early_invalid_key".to_vec(), - b"deeper_1".to_vec(), - b"invalid_key".to_vec(), - ], - query, - ); + assert_eq!( + element_without_flag, + Element::Item(b"flagless".to_vec(), None) + ); + assert_eq!( + element_with_flag, + Element::Item(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())) + ); + assert_eq!(tree_element_with_flag.get_flags(), &Some([1].to_vec())); + assert_eq!( + flagged_ref_follow, + Element::Item(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())) + ); + assert_eq!( + flagged_ref_no_follow.to_key_elements()[0], + ( + b"elem4".to_vec(), + Element::Reference( + ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + b"key1".to_vec(), + b"elem2".to_vec() + ]), + None, + Some([9].to_vec()) + ) + ) + ); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + // Test proofs with flags + let mut query = Query::new(); + query.insert_all(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); -} + let path_query = PathQuery::new( + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + SizedQuery::new(query, None, None), + ); + let proof = db + .prove_query(&path_query, None) + .unwrap() + .expect("should successfully create proof"); + let (root_hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query).expect("should verify proof"); + assert_eq!(root_hash, db.grove_db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 3); + assert_eq!( + Element::deserialize(&result_set[0].value).expect("should deserialize element"), + Element::Item(b"flagless".to_vec(), None) + ); + assert_eq!( + Element::deserialize(&result_set[1].value).expect("should deserialize element"), + Element::Item(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())) + ); + assert_eq!( + Element::deserialize(&result_set[2].value) + .expect("should deserialize element") + .get_flags(), + &Some([1].to_vec()) + ); + } -#[test] -fn test_proof_for_non_existent_data() { - let temp_db = make_test_grovedb(); + #[test] + fn test_cannot_update_populated_tree_item() { + // This test shows that you cannot update a tree item + // in a way that disconnects it's root hash from that of + // the merk it points to. + let db = make_deep_tree(); - let mut query = Query::new(); - query.insert_key(b"key1".to_vec()); + let old_element = db + .get([TEST_LEAF].as_ref(), b"innertree", None) + .unwrap() + .expect("should fetch item"); - // path to empty subtree - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); + let new_element = Element::empty_tree(); + db.insert( + [TEST_LEAF].as_ref(), + b"innertree", + new_element.clone(), + None, + None, + ) + .unwrap() + .expect_err("should not override tree"); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + let current_element = db + .get([TEST_LEAF].as_ref(), b"innertree", None) + .unwrap() + .expect("should fetch item"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); -} + assert_eq!(current_element, old_element); + assert_ne!(current_element, new_element); + } -#[test] -fn test_path_query_proofs_without_subquery_with_reference() { - // Tree Structure - // root - // test_leaf - // innertree - // k1,v1 - // k2,v2 - // k3,v3 - // another_test_leaf - // innertree2 - // k3,v3 - // k4, reference to k1 in innertree - // k5, reference to k4 in innertree3 - // innertree3 - // k4,v4 + #[test] + fn test_changes_propagated() { + let db = make_test_grovedb(); + let old_hash = db.root_hash(None).unwrap().unwrap(); + let element = Element::new_item(b"ayy".to_vec()); - // Insert elements into grovedb instance - let temp_db = make_test_grovedb(); - // Insert level 1 nodes - temp_db - .insert( + // Insert some nested subtrees + db.insert( [TEST_LEAF].as_ref(), - b"innertree", + b"key1", Element::empty_tree(), None, None, ) .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"innertree2", + .expect("successful subtree 1 insert"); + + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key2", Element::empty_tree(), None, None, ) .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"innertree3", + .expect("successful subtree 2 insert"); + + db.insert( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + element.clone(), + None, + None, + ) + .unwrap() + .expect("successful value insert"); + + assert_eq!( + db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) + .unwrap() + .expect("successful get"), + element + ); + assert_ne!(old_hash, db.root_hash(None).unwrap().unwrap()); + } + + // TODO: Add solid test cases to this + + #[test] + fn test_references() { + let db = make_test_grovedb(); + db.insert( + [TEST_LEAF].as_ref(), + b"merk_1", Element::empty_tree(), None, None, ) .unwrap() .expect("successful subtree insert"); - // Insert level 2 nodes - temp_db - .insert( - [TEST_LEAF, b"innertree"].as_ref(), + db.insert( + [TEST_LEAF, b"merk_1"].as_ref(), b"key1", Element::new_item(b"value1".to_vec()), None, @@ -1192,9 +951,8 @@ fn test_path_query_proofs_without_subquery_with_reference() { ) .unwrap() .expect("successful subtree insert"); - temp_db - .insert( - [TEST_LEAF, b"innertree"].as_ref(), + db.insert( + [TEST_LEAF, b"merk_1"].as_ref(), b"key2", Element::new_item(b"value2".to_vec()), None, @@ -1202,1767 +960,2419 @@ fn test_path_query_proofs_without_subquery_with_reference() { ) .unwrap() .expect("successful subtree insert"); - temp_db - .insert( - [TEST_LEAF, b"innertree"].as_ref(), - b"key3", - Element::new_item(b"value3".to_vec()), + + db.insert( + [TEST_LEAF].as_ref(), + b"merk_2", + Element::empty_tree(), None, None, ) .unwrap() .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF, b"innertree2"].as_ref(), - b"key3", - Element::new_item(b"value3".to_vec()), + // db.insert([TEST_LEAF, b"merk_2"].as_ref(), b"key2", + // Element::new_item(b"value2".to_vec()), None).expect("successful subtree + // insert"); + db.insert( + [TEST_LEAF, b"merk_2"].as_ref(), + b"key1", + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + b"merk_1".to_vec(), + b"key1".to_vec(), + ])), None, None, ) .unwrap() .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF, b"innertree2"].as_ref(), - b"key4", + db.insert( + [TEST_LEAF, b"merk_2"].as_ref(), + b"key2", Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ TEST_LEAF.to_vec(), - b"innertree".to_vec(), - b"key1".to_vec(), + b"merk_1".to_vec(), + b"key2".to_vec(), ])), None, None, ) .unwrap() .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF, b"innertree3"].as_ref(), - b"key4", - Element::new_item(b"value4".to_vec()), + assert!(db + .get([TEST_LEAF].as_ref(), b"merk_1", None) + .unwrap() + .is_ok()); + assert!(db + .get([TEST_LEAF].as_ref(), b"merk_2", None) + .unwrap() + .is_ok()); + } + + #[test] + fn test_follow_references() { + let db = make_test_grovedb(); + let element = Element::new_item(b"ayy".to_vec()); + + // Insert an item to refer to + db.insert( + [TEST_LEAF].as_ref(), + b"key2", + Element::empty_tree(), None, None, ) .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF, b"innertree2"].as_ref(), - b"key5", + .expect("successful subtree 1 insert"); + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"key3", + element.clone(), + None, + None, + ) + .unwrap() + .expect("successful value insert"); + + // Insert a reference + db.insert( + [TEST_LEAF].as_ref(), + b"reference_key", Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - ANOTHER_TEST_LEAF.to_vec(), - b"innertree3".to_vec(), - b"key4".to_vec(), + TEST_LEAF.to_vec(), + b"key2".to_vec(), + b"key3".to_vec(), ])), None, None, ) .unwrap() - .expect("successful subtree insert"); - - // Single key query - let mut query = Query::new(); - query.insert_range_from(b"key4".to_vec()..); - - let path_query = PathQuery::new_unsized( - vec![ANOTHER_TEST_LEAF.to_vec(), b"innertree2".to_vec()], - query, - ); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - assert_eq!( - hex::encode(&proof), - "010285010198ebd6dc7e1c82951c41fcfa6487711cac6a399ebb01bb979cb\ - e4a51e0b2f08d06046b6579340009000676616c75653100bf2f052b01c2b\ - b83ff3a40504d42b5b9141c582a3e0c98679189b33a24478a6f1006046b6\ - 579350009000676616c75653400f084ffdbc429a89c9b6620e7224d73c2e\ - e505eb7e6fb5eb574e1a8dc8b0d0884110158040a696e6e6572747265653\ - 200080201046b657934008ba21f835b2ff60f16b7fccfbda107bec3da0c4\ - 709357d40de223d769547ec21013a090155ea7d14038c7062d94930798f8\ - 85a19d6ebff8a87489a1debf665604711015e02cfb7d035b8f4a3631be46\ - c597510a16770c15c74331b3dc8dcb577a206e49675040a746573745f6c6\ - 5616632000e02010a696e6e657274726565320049870f2813c0c3c5c105a\ - 988c0ef1372178245152fa9a43b209a6b6d95589bdc11" - ); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - let r1 = Element::new_item(b"value1".to_vec()).serialize().unwrap(); - let r2 = Element::new_item(b"value4".to_vec()).serialize().unwrap(); - - compare_result_tuples( - result_set, - vec![(b"key4".to_vec(), r1), (b"key5".to_vec(), r2)], - ); -} + .expect("successful reference insert"); -#[test] -fn test_path_query_proofs_without_subquery() { - // Tree Structure - // root - // test_leaf - // innertree - // k1,v1 - // k2,v2 - // k3,v3 - // another_test_leaf - // innertree2 - // k3,v3 - // innertree3 - // k4,v4 + assert_eq!( + db.get([TEST_LEAF].as_ref(), b"reference_key", None) + .unwrap() + .expect("successful get"), + element + ); + } - // Insert elements into grovedb instance - let temp_db = make_test_grovedb(); - // Insert level 1 nodes - temp_db - .insert( + #[test] + fn test_reference_must_point_to_item() { + let db = make_test_grovedb(); + + let result = db + .insert( + [TEST_LEAF].as_ref(), + b"reference_key_1", + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + b"reference_key_2".to_vec(), + ])), + None, + None, + ) + .unwrap(); + + assert!(matches!(result, Err(Error::MissingReference(_)))); + } + + #[test] + fn test_too_many_indirections() { + use crate::operations::get::MAX_REFERENCE_HOPS; + let db = make_test_grovedb(); + + let keygen = |idx| format!("key{}", idx).bytes().collect::>(); + + db.insert( [TEST_LEAF].as_ref(), - b"innertree", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"innertree2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"innertree3", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - // Insert level 2 nodes - temp_db - .insert( - [TEST_LEAF, b"innertree"].as_ref(), - b"key1", - Element::new_item(b"value1".to_vec()), + b"key0", + Element::new_item(b"oops".to_vec()), None, None, ) .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [TEST_LEAF, b"innertree"].as_ref(), - b"key2", - Element::new_item(b"value2".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [TEST_LEAF, b"innertree"].as_ref(), - b"key3", - Element::new_item(b"value3".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF, b"innertree2"].as_ref(), - b"key3", - Element::new_item(b"value3".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - temp_db - .insert( - [ANOTHER_TEST_LEAF, b"innertree3"].as_ref(), - b"key4", - Element::new_item(b"value4".to_vec()), + .expect("successful item insert"); + + for i in 1..=(MAX_REFERENCE_HOPS) { + db.insert( + [TEST_LEAF].as_ref(), + &keygen(i), + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + keygen(i - 1), + ])), + None, + None, + ) + .unwrap() + .expect("successful reference insert"); + } + + // Add one more reference + db.insert( + [TEST_LEAF].as_ref(), + &keygen(MAX_REFERENCE_HOPS + 1), + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + keygen(MAX_REFERENCE_HOPS), + ])), None, None, ) .unwrap() - .expect("successful subtree insert"); - - // Single key query - let mut query = Query::new(); - query.insert_key(b"key1".to_vec()); - - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - assert_eq!( - hex::encode(proof.as_slice()), - "01025503046b6579310009000676616c7565310002018655e18e4555b0b65\ - bbcec64c749db6b9ad84231969fb4fbe769a3093d10f2100198ebd6dc7e1\ - c82951c41fcfa6487711cac6a399ebb01bb979cbe4a51e0b2f08d1101350\ - 409696e6e65727472656500080201046b657932004910536da659a3dbdbc\ - f68c4a6630e72de4ba20cfc60b08b3dd45b4225a599b6015c04097465737\ - 45f6c656166000d020109696e6e65727472656500fafa16d06e8d8696dae\ - 443731ae2a4eae521e4a9a79c331c8a7e22e34c0f1a6e01b55f830550604\ - 719833d54ce2bf139aff4bb699fa4111b9741633554318792c511" - ); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - let r1 = Element::new_item(b"value1".to_vec()).serialize().unwrap(); - compare_result_tuples(result_set, vec![(b"key1".to_vec(), r1)]); - - // Range query + limit - let mut query = Query::new(); - query.insert_range_after(b"key1".to_vec()..); - let path_query = PathQuery::new( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - SizedQuery::new(query, Some(1), None), - ); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - let r1 = Element::new_item(b"value2".to_vec()).serialize().unwrap(); - compare_result_tuples(result_set, vec![(b"key2".to_vec(), r1)]); - - // Range query + offset + limit - let mut query = Query::new(); - query.insert_range_after(b"key1".to_vec()..); - let path_query = PathQuery::new( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - SizedQuery::new(query, Some(1), Some(1)), - ); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - let r1 = Element::new_item(b"value3".to_vec()).serialize().unwrap(); - compare_result_tuples(result_set, vec![(b"key3".to_vec(), r1)]); - - // Range query + direction + limit - let mut query = Query::new_with_direction(false); - query.insert_all(); - let path_query = PathQuery::new( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - SizedQuery::new(query, Some(2), None), - ); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - let r1 = Element::new_item(b"value3".to_vec()).serialize().unwrap(); - let r2 = Element::new_item(b"value2".to_vec()).serialize().unwrap(); - compare_result_tuples( - result_set, - vec![(b"key3".to_vec(), r1), (b"key2".to_vec(), r2)], - ); -} - -#[test] -fn test_path_query_proofs_with_default_subquery() { - let temp_db = make_deep_tree(); - - let mut query = Query::new(); - query.insert_all(); - - let mut subq = Query::new(); - subq.insert_all(); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); - - let keys = [ - b"key1".to_vec(), - b"key2".to_vec(), - b"key3".to_vec(), - b"key4".to_vec(), - b"key5".to_vec(), - ]; - let values = [ - b"value1".to_vec(), - b"value2".to_vec(), - b"value3".to_vec(), - b"value4".to_vec(), - b"value5".to_vec(), - ]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); - - let mut query = Query::new(); - query.insert_range_after(b"innertree".to_vec()..); - - let mut subq = Query::new(); - subq.insert_all(); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 2); - - let keys = [b"key4".to_vec(), b"key5".to_vec()]; - let values = [b"value4".to_vec(), b"value5".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); - - // range subquery - let mut query = Query::new(); - query.insert_all(); - - let mut subq = Query::new(); - subq.insert_range_after_to_inclusive(b"key1".to_vec()..=b"key4".to_vec()); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect( - "should - execute proof", - ); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - - let keys = [b"key2".to_vec(), b"key3".to_vec(), b"key4".to_vec()]; - let values = [b"value2".to_vec(), b"value3".to_vec(), b"value4".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); - - // deep tree test - let mut query = Query::new(); - query.insert_all(); - - let mut subq = Query::new(); - subq.insert_all(); - - let mut sub_subquery = Query::new(); - sub_subquery.insert_all(); - - subq.set_subquery(sub_subquery); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 11); - - let keys = [ - b"key1".to_vec(), - b"key2".to_vec(), - b"key3".to_vec(), - b"key4".to_vec(), - b"key5".to_vec(), - b"key6".to_vec(), - b"key7".to_vec(), - b"key8".to_vec(), - b"key9".to_vec(), - b"key10".to_vec(), - b"key11".to_vec(), - ]; - let values = [ - b"value1".to_vec(), - b"value2".to_vec(), - b"value3".to_vec(), - b"value4".to_vec(), - b"value5".to_vec(), - b"value6".to_vec(), - b"value7".to_vec(), - b"value8".to_vec(), - b"value9".to_vec(), - b"value10".to_vec(), - b"value11".to_vec(), - ]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); -} - -#[test] -fn test_path_query_proofs_with_subquery_path() { - let temp_db = make_deep_tree(); - - let mut query = Query::new(); - query.insert_all(); - - let mut subq = Query::new(); - subq.insert_all(); - - query.set_subquery_key(b"deeper_1".to_vec()); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - - let keys = [b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()]; - let values = [b"value1".to_vec(), b"value2".to_vec(), b"value3".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); - - // test subquery path with valid n > 1 valid translation - let mut query = Query::new(); - query.insert_all(); - - let mut subq = Query::new(); - subq.insert_all(); - - query.set_subquery_path(vec![b"deep_node_1".to_vec(), b"deeper_1".to_vec()]); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![], query); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - - let keys = [b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()]; - let values = [b"value1".to_vec(), b"value2".to_vec(), b"value3".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); - - // test subquery path with empty subquery path - let mut query = Query::new(); - query.insert_all(); - - let mut subq = Query::new(); - subq.insert_all(); - - query.set_subquery_path(vec![]); - query.set_subquery(subq); - - let path_query = - PathQuery::new_unsized(vec![b"deep_leaf".to_vec(), b"deep_node_1".to_vec()], query); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 6); - - let keys = [ - b"key1".to_vec(), - b"key2".to_vec(), - b"key3".to_vec(), - b"key4".to_vec(), - b"key5".to_vec(), - b"key6".to_vec(), - ]; - let values = [ - b"value1".to_vec(), - b"value2".to_vec(), - b"value3".to_vec(), - b"value4".to_vec(), - b"value5".to_vec(), - b"value6".to_vec(), - ]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); - - // test subquery path with an invalid translation - // should generate a valid absence proof with an empty result set - let mut query = Query::new(); - query.insert_all(); - - let mut subq = Query::new(); - subq.insert_all(); - - query.set_subquery_path(vec![ - b"deep_node_1".to_vec(), - b"deeper_10".to_vec(), - b"another_invalid_key".to_vec(), - ]); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![], query); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); -} + .expect("expected insert"); -#[test] -fn test_path_query_proofs_with_key_and_subquery() { - let temp_db = make_deep_tree(); - - let mut query = Query::new(); - query.insert_key(b"deep_node_1".to_vec()); - - let mut subq = Query::new(); - subq.insert_all(); + let result = db + .get([TEST_LEAF].as_ref(), &keygen(MAX_REFERENCE_HOPS + 1), None) + .unwrap(); - query.set_subquery_key(b"deeper_1".to_vec()); - query.set_subquery(subq); + assert!(matches!(result, Err(Error::ReferenceLimit))); + } - let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); + #[test] + fn test_reference_value_affects_state() { + let db_one = make_test_grovedb(); + db_one + .insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::new_item(vec![0]), + None, + None, + ) + .unwrap() + .expect("should insert item"); + db_one + .insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"ref", + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + b"key1".to_vec(), + ])), + None, + None, + ) + .unwrap() + .expect("should insert item"); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + let db_two = make_test_grovedb(); + db_two + .insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::new_item(vec![0]), + None, + None, + ) + .unwrap() + .expect("should insert item"); + db_two + .insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"ref", + Element::new_reference(ReferencePathType::UpstreamRootHeightReference( + 0, + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + )), + None, + None, + ) + .unwrap() + .expect("should insert item"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); + assert_ne!( + db_one + .root_hash(None) + .unwrap() + .expect("should return root hash"), + db_two + .root_hash(None) + .unwrap() + .expect("should return toor hash") + ); + } - let keys = [b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()]; - let values = [b"value1".to_vec(), b"value2".to_vec(), b"value3".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); -} + #[test] + fn test_tree_structure_is_persistent() { + let tmp_dir = TempDir::new().unwrap(); + let element = Element::new_item(b"ayy".to_vec()); + // Create a scoped GroveDB + let prev_root_hash = { + let mut db = GroveDb::open(tmp_dir.path()).unwrap(); + add_test_leaves(&mut db); + + // Insert some nested subtrees + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree 1 insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key2", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree 2 insert"); + // Insert an element into subtree + db.insert( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + element.clone(), + None, + None, + ) + .unwrap() + .expect("successful value insert"); + assert_eq!( + db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) + .unwrap() + .expect("successful get 1"), + element + ); + db.root_hash(None).unwrap().unwrap() + }; + // Open a persisted GroveDB + let db = GroveDb::open(tmp_dir).unwrap(); + assert_eq!( + db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) + .unwrap() + .expect("successful get 2"), + element + ); + assert!(db + .get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key4", None) + .unwrap() + .is_err()); + assert_eq!(prev_root_hash, db.root_hash(None).unwrap().unwrap()); + } -#[test] -fn test_path_query_proofs_with_conditional_subquery() { - let temp_db = make_deep_tree(); + #[test] + fn test_root_tree_leaves_are_noted() { + let db = make_test_grovedb(); + db.check_subtree_exists_path_not_found([TEST_LEAF].as_ref().into(), None) + .unwrap() + .expect("should exist"); + db.check_subtree_exists_path_not_found([ANOTHER_TEST_LEAF].as_ref().into(), None) + .unwrap() + .expect("should exist"); + } - let mut query = Query::new(); - query.insert_all(); + #[test] + fn test_proof_for_invalid_path_root_key() { + let db = make_test_grovedb(); - let mut subquery = Query::new(); - subquery.insert_all(); + let query = Query::new(); + let path_query = PathQuery::new_unsized(vec![b"invalid_path_key".to_vec()], query); - let mut final_subquery = Query::new(); - final_subquery.insert_all(); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - subquery.add_conditional_subquery( - QueryItem::Key(b"deeper_4".to_vec()), - None, - Some(final_subquery), - ); - - query.set_subquery(subquery); - - let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - - let keys = [ - b"deeper_1".to_vec(), - b"deeper_2".to_vec(), - b"deeper_3".to_vec(), - b"key10".to_vec(), - b"key11".to_vec(), - ]; - assert_eq!(result_set.len(), keys.len()); - - // TODO: Is this defined behaviour - for (index, key) in keys.iter().enumerate() { - assert_eq!(&result_set[index].key, key); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 0); } - // Default + Conditional subquery - let mut query = Query::new(); - query.insert_all(); - - let mut subquery = Query::new(); - subquery.insert_all(); - - let mut final_conditional_subquery = Query::new(); - final_conditional_subquery.insert_all(); - - let mut final_default_subquery = Query::new(); - final_default_subquery.insert_range_inclusive(b"key3".to_vec()..=b"key6".to_vec()); + #[test] + fn test_proof_for_invalid_path() { + let db = make_deep_tree(); + + let query = Query::new(); + let path_query = + PathQuery::new_unsized(vec![b"deep_leaf".to_vec(), b"invalid_key".to_vec()], query); + + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 0); + + let query = Query::new(); + let path_query = PathQuery::new_unsized( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"invalid_key".to_vec(), + ], + query, + ); - subquery.add_conditional_subquery( - QueryItem::Key(b"deeper_4".to_vec()), - None, - Some(final_conditional_subquery), - ); - subquery.set_subquery(final_default_subquery); - - query.set_subquery(subquery); - - let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 6); - - let keys = [ - b"key3".to_vec(), - b"key4".to_vec(), - b"key5".to_vec(), - b"key6".to_vec(), - b"key10".to_vec(), - b"key11".to_vec(), - ]; - let values = [ - b"value3".to_vec(), - b"value4".to_vec(), - b"value5".to_vec(), - b"value6".to_vec(), - b"value10".to_vec(), - b"value11".to_vec(), - ]; - let elements = values - .map(|x| Element::new_item(x).serialize().unwrap()) - .to_vec(); - // compare_result_sets(&elements, &result_set); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); -} + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 0); + + let query = Query::new(); + let path_query = PathQuery::new_unsized( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"deeper_1".to_vec(), + b"invalid_key".to_vec(), + ], + query, + ); -#[test] -fn test_path_query_proofs_with_sized_query() { - let temp_db = make_deep_tree(); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 0); + + let query = Query::new(); + let path_query = PathQuery::new_unsized( + vec![ + b"deep_leaf".to_vec(), + b"early_invalid_key".to_vec(), + b"deeper_1".to_vec(), + b"invalid_key".to_vec(), + ], + query, + ); - let mut query = Query::new(); - query.insert_all(); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - let mut subquery = Query::new(); - subquery.insert_all(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 0); + } - let mut final_conditional_subquery = Query::new(); - final_conditional_subquery.insert_all(); + #[test] + fn test_proof_for_non_existent_data() { + let temp_db = make_test_grovedb(); - let mut final_default_subquery = Query::new(); - final_default_subquery.insert_range_inclusive(b"key3".to_vec()..=b"key6".to_vec()); + let mut query = Query::new(); + query.insert_key(b"key1".to_vec()); - subquery.add_conditional_subquery( - QueryItem::Key(b"deeper_4".to_vec()), - None, - Some(final_conditional_subquery), - ); - subquery.set_subquery(final_default_subquery); - - query.set_subquery(subquery); - - let path_query = PathQuery::new( - vec![DEEP_LEAF.to_vec()], - SizedQuery::new(query, Some(3), Some(1)), - ); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - - let keys = [b"key4".to_vec(), b"key5".to_vec(), b"key6".to_vec()]; - let values = [b"value4".to_vec(), b"value5".to_vec(), b"value6".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); -} + // path to empty subtree + let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); -#[test] -fn test_path_query_proofs_with_direction() { - let temp_db = make_deep_tree(); + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - let mut query = Query::new_with_direction(false); - query.insert_all(); + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 0); + } - let mut subquery = Query::new_with_direction(false); - subquery.insert_all(); + #[test] + fn test_path_query_proofs_without_subquery_with_reference() { + // Tree Structure + // root + // test_leaf + // innertree + // k1,v1 + // k2,v2 + // k3,v3 + // another_test_leaf + // innertree2 + // k3,v3 + // k4, reference to k1 in innertree + // k5, reference to k4 in innertree3 + // innertree3 + // k4,v4 + + // Insert elements into grovedb instance + let temp_db = make_test_grovedb(); + // Insert level 1 nodes + temp_db + .insert( + [TEST_LEAF].as_ref(), + b"innertree", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"innertree2", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"innertree3", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + // Insert level 2 nodes + temp_db + .insert( + [TEST_LEAF, b"innertree"].as_ref(), + b"key1", + Element::new_item(b"value1".to_vec()), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [TEST_LEAF, b"innertree"].as_ref(), + b"key2", + Element::new_item(b"value2".to_vec()), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [TEST_LEAF, b"innertree"].as_ref(), + b"key3", + Element::new_item(b"value3".to_vec()), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF, b"innertree2"].as_ref(), + b"key3", + Element::new_item(b"value3".to_vec()), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF, b"innertree2"].as_ref(), + b"key4", + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + b"innertree".to_vec(), + b"key1".to_vec(), + ])), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF, b"innertree3"].as_ref(), + b"key4", + Element::new_item(b"value4".to_vec()), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF, b"innertree2"].as_ref(), + b"key5", + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + ANOTHER_TEST_LEAF.to_vec(), + b"innertree3".to_vec(), + b"key4".to_vec(), + ])), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); - let mut final_conditional_subquery = Query::new_with_direction(false); - final_conditional_subquery.insert_all(); + // Single key query + let mut query = Query::new(); + query.insert_range_from(b"key4".to_vec()..); - let mut final_default_subquery = Query::new_with_direction(false); - final_default_subquery.insert_range_inclusive(b"key3".to_vec()..=b"key6".to_vec()); + let path_query = PathQuery::new_unsized( + vec![ANOTHER_TEST_LEAF.to_vec(), b"innertree2".to_vec()], + query, + ); - subquery.add_conditional_subquery( - QueryItem::Key(b"deeper_4".to_vec()), - None, - Some(final_conditional_subquery), - ); - subquery.set_subquery(final_default_subquery); - - query.set_subquery(subquery); - - let path_query = PathQuery::new( - vec![DEEP_LEAF.to_vec()], - SizedQuery::new(query, Some(3), Some(1)), - ); - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - - let keys = [b"key10".to_vec(), b"key6".to_vec(), b"key5".to_vec()]; - let values = [b"value10".to_vec(), b"value6".to_vec(), b"value5".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); - - // combined directions - let mut query = Query::new(); - query.insert_all(); - - let mut subq = Query::new_with_direction(false); - subq.insert_all(); - - let mut sub_subquery = Query::new(); - sub_subquery.insert_all(); - - subq.set_subquery(sub_subquery); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - - let proof = temp_db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 11); - - let keys = [ - b"key4".to_vec(), - b"key5".to_vec(), - b"key6".to_vec(), - b"key1".to_vec(), - b"key2".to_vec(), - b"key3".to_vec(), - b"key10".to_vec(), - b"key11".to_vec(), - b"key7".to_vec(), - b"key8".to_vec(), - b"key9".to_vec(), - ]; - let values = [ - b"value4".to_vec(), - b"value5".to_vec(), - b"value6".to_vec(), - b"value1".to_vec(), - b"value2".to_vec(), - b"value3".to_vec(), - b"value10".to_vec(), - b"value11".to_vec(), - b"value7".to_vec(), - b"value8".to_vec(), - b"value9".to_vec(), - ]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); - let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); - compare_result_tuples(result_set, expected_result_set); -} + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + assert_eq!( + hex::encode(&proof), + "005e02cfb7d035b8f4a3631be46c597510a16770c15c74331b3dc8dcb577a206e49675040a746\ + 573745f6c65616632000e02010a696e6e657274726565320049870f2813c0c3c5c105a988c0ef1\ + 372178245152fa9a43b209a6b6d95589bdc11010a746573745f6c6561663258040a696e6e65727\ + 47265653200080201046b657934008ba21f835b2ff60f16b7fccfbda107bec3da0c4709357d40d\ + e223d769547ec21013a090155ea7d14038c7062d94930798f885a19d6ebff8a87489a1debf6656\ + 04711010a696e6e65727472656532850198ebd6dc7e1c82951c41fcfa6487711cac6a399ebb01b\ + b979cbe4a51e0b2f08d06046b6579340009000676616c75653100bf2f052b01c2bb83ff3a40504\ + d42b5b9141c582a3e0c98679189b33a24478a6f1006046b6579350009000676616c75653400f08\ + 4ffdbc429a89c9b6620e7224d73c2ee505eb7e6fb5eb574e1a8dc8b0d0884110001" + ); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); -#[test] -fn test_checkpoint() { - let db = make_test_grovedb(); - let element1 = Element::new_item(b"ayy".to_vec()); + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + let r1 = Element::new_item(b"value1".to_vec()).serialize().unwrap(); + let r2 = Element::new_item(b"value4".to_vec()).serialize().unwrap(); - db.insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, None) - .unwrap() - .expect("cannot insert a subtree 1 into GroveDB"); - db.insert( - [b"key1".as_ref()].as_ref(), - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("cannot insert a subtree 2 into GroveDB"); - db.insert( - [b"key1".as_ref(), b"key2".as_ref()].as_ref(), - b"key3", - element1.clone(), - None, - None, - ) - .unwrap() - .expect("cannot insert an item into GroveDB"); + compare_result_tuples( + result_set, + vec![(b"key4".to_vec(), r1), (b"key5".to_vec(), r2)], + ); + } - assert_eq!( - db.get([b"key1".as_ref(), b"key2".as_ref()].as_ref(), b"key3", None) + #[test] + fn test_path_query_proofs_without_subquery() { + // Tree Structure + // root + // test_leaf + // innertree + // k1,v1 + // k2,v2 + // k3,v3 + // another_test_leaf + // innertree2 + // k3,v3 + // innertree3 + // k4,v4 + + // Insert elements into grovedb instance + let temp_db = make_test_grovedb(); + // Insert level 1 nodes + temp_db + .insert( + [TEST_LEAF].as_ref(), + b"innertree", + Element::empty_tree(), + None, + None, + ) .unwrap() - .expect("cannot get from grovedb"), - element1 - ); - - let tempdir_parent = TempDir::new().expect("cannot open tempdir"); - let checkpoint_tempdir = tempdir_parent.path().join("checkpoint"); - db.create_checkpoint(&checkpoint_tempdir) - .expect("cannot create checkpoint"); - - let checkpoint_db = - GroveDb::open(checkpoint_tempdir).expect("cannot open grovedb from checkpoint"); - - assert_eq!( - db.get([b"key1".as_ref(), b"key2".as_ref()].as_ref(), b"key3", None) + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"innertree2", + Element::empty_tree(), + None, + None, + ) .unwrap() - .expect("cannot get from grovedb"), - element1 - ); - assert_eq!( - checkpoint_db - .get([b"key1".as_ref(), b"key2".as_ref()].as_ref(), b"key3", None) + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"innertree3", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + // Insert level 2 nodes + temp_db + .insert( + [TEST_LEAF, b"innertree"].as_ref(), + b"key1", + Element::new_item(b"value1".to_vec()), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [TEST_LEAF, b"innertree"].as_ref(), + b"key2", + Element::new_item(b"value2".to_vec()), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [TEST_LEAF, b"innertree"].as_ref(), + b"key3", + Element::new_item(b"value3".to_vec()), + None, + None, + ) .unwrap() - .expect("cannot get from checkpoint"), - element1 - ); + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF, b"innertree2"].as_ref(), + b"key3", + Element::new_item(b"value3".to_vec()), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + temp_db + .insert( + [ANOTHER_TEST_LEAF, b"innertree3"].as_ref(), + b"key4", + Element::new_item(b"value4".to_vec()), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); - let element2 = Element::new_item(b"ayy2".to_vec()); - let element3 = Element::new_item(b"ayy3".to_vec()); + // Single key query + let mut query = Query::new(); + query.insert_key(b"key1".to_vec()); - checkpoint_db - .insert( - [b"key1".as_ref()].as_ref(), - b"key4", - element2.clone(), - None, - None, - ) - .unwrap() - .expect("cannot insert into checkpoint"); + let path_query = + PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query); - db.insert( - [b"key1".as_ref()].as_ref(), - b"key4", - element3.clone(), - None, - None, - ) - .unwrap() - .expect("cannot insert into GroveDB"); + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + assert_eq!( + hex::encode(proof.as_slice()), + "005c0409746573745f6c656166000d020109696e6e65727472656500fafa16d06e8d8696dae443731\ + ae2a4eae521e4a9a79c331c8a7e22e34c0f1a6e01b55f830550604719833d54ce2bf139aff4bb699fa\ + 4111b9741633554318792c5110109746573745f6c656166350409696e6e65727472656500080201046\ + b657932004910536da659a3dbdbcf68c4a6630e72de4ba20cfc60b08b3dd45b4225a599b60109696e6\ + e6572747265655503046b6579310009000676616c7565310002018655e18e4555b0b65bbcec64c749d\ + b6b9ad84231969fb4fbe769a3093d10f2100198ebd6dc7e1c82951c41fcfa6487711cac6a399ebb01b\ + b979cbe4a51e0b2f08d110001" + ); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + let r1 = Element::new_item(b"value1".to_vec()).serialize().unwrap(); + compare_result_tuples(result_set, vec![(b"key1".to_vec(), r1)]); + + // Range query + limit + let mut query = Query::new(); + query.insert_range_after(b"key1".to_vec()..); + let path_query = PathQuery::new( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + SizedQuery::new(query, Some(1), None), + ); - assert_eq!( - checkpoint_db - .get([b"key1".as_ref()].as_ref(), b"key4", None) - .unwrap() - .expect("cannot get from checkpoint"), - element2, - ); + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - assert_eq!( - db.get([b"key1".as_ref()].as_ref(), b"key4", None) - .unwrap() - .expect("cannot get from GroveDB"), - element3 - ); + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + let r1 = Element::new_item(b"value2".to_vec()).serialize().unwrap(); + compare_result_tuples(result_set, vec![(b"key2".to_vec(), r1)]); - checkpoint_db - .insert( - [b"key1".as_ref()].as_ref(), - b"key5", - element3.clone(), - None, - None, - ) - .unwrap() - .expect("cannot insert into checkpoint"); + // Range query + direction + limit + let mut query = Query::new_with_direction(false); + query.insert_all(); + let path_query = PathQuery::new( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + SizedQuery::new(query, Some(2), None), + ); - db.insert([b"key1".as_ref()].as_ref(), b"key6", element3, None, None) - .unwrap() - .expect("cannot insert into GroveDB"); + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - assert!(matches!( - checkpoint_db - .get([b"key1".as_ref()].as_ref(), b"key6", None) - .unwrap(), - Err(Error::PathKeyNotFound(_)) - )); - - assert!(matches!( - db.get([b"key1".as_ref()].as_ref(), b"key5", None).unwrap(), - Err(Error::PathKeyNotFound(_)) - )); -} + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + let r1 = Element::new_item(b"value3".to_vec()).serialize().unwrap(); + let r2 = Element::new_item(b"value2".to_vec()).serialize().unwrap(); + compare_result_tuples( + result_set, + vec![(b"key3".to_vec(), r1), (b"key2".to_vec(), r2)], + ); + } -#[test] -fn test_is_empty_tree() { - let db = make_test_grovedb(); + #[test] + fn test_path_query_proofs_with_default_subquery() { + let temp_db = make_deep_tree(); - // Create an empty tree with no elements - db.insert( - [TEST_LEAF].as_ref(), - b"innertree", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .unwrap(); + let mut query = Query::new(); + query.insert_all(); - assert!(db - .is_empty_tree([TEST_LEAF, b"innertree"].as_ref(), None) - .unwrap() - .expect("path is valid tree")); + let mut subq = Query::new(); + subq.insert_all(); + query.set_subquery(subq); - // add an element to the tree to make it non empty - db.insert( - [TEST_LEAF, b"innertree"].as_ref(), - b"key1", - Element::new_item(b"hello".to_vec()), - None, - None, - ) - .unwrap() - .unwrap(); - assert!(!db - .is_empty_tree([TEST_LEAF, b"innertree"].as_ref(), None) - .unwrap() - .expect("path is valid tree")); -} + let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); -#[test] -fn transaction_should_be_aborted_when_rollback_is_called() { - let item_key = b"key3"; + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - let db = make_test_grovedb(); - let transaction = db.start_transaction(); + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 5); - let element1 = Element::new_item(b"ayy".to_vec()); + let keys = [ + b"key1".to_vec(), + b"key2".to_vec(), + b"key3".to_vec(), + b"key4".to_vec(), + b"key5".to_vec(), + ]; + let values = [ + b"value1".to_vec(), + b"value2".to_vec(), + b"value3".to_vec(), + b"value4".to_vec(), + b"value5".to_vec(), + ]; + let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + + let mut query = Query::new(); + query.insert_range_after(b"innertree".to_vec()..); + + let mut subq = Query::new(); + subq.insert_all(); + query.set_subquery(subq); + + let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); + + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 2); + + let keys = [b"key4".to_vec(), b"key5".to_vec()]; + let values = [b"value4".to_vec(), b"value5".to_vec()]; + let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + + // range subquery + let mut query = Query::new(); + query.insert_all(); + + let mut subq = Query::new(); + subq.insert_range_after_to_inclusive(b"key1".to_vec()..=b"key4".to_vec()); + query.set_subquery(subq); + + let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); + + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect( + "should + execute proof", + ); - let result = db - .insert( - [TEST_LEAF].as_ref(), - item_key, - element1, - None, - Some(&transaction), - ) - .unwrap(); + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 3); - assert!(matches!(result, Ok(()))); + let keys = [b"key2".to_vec(), b"key3".to_vec(), b"key4".to_vec()]; + let values = [b"value2".to_vec(), b"value3".to_vec(), b"value4".to_vec()]; + let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); - db.rollback_transaction(&transaction).unwrap(); + // deep tree test + let mut query = Query::new(); + query.insert_all(); - let result = db - .get([TEST_LEAF].as_ref(), item_key, Some(&transaction)) - .unwrap(); - assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); -} + let mut subq = Query::new(); + subq.insert_all(); -#[test] -fn transaction_should_be_aborted() { - let db = make_test_grovedb(); - let transaction = db.start_transaction(); + let mut sub_subquery = Query::new(); + sub_subquery.insert_all(); - let item_key = b"key3"; - let element = Element::new_item(b"ayy".to_vec()); + subq.set_subquery(sub_subquery); + query.set_subquery(subq); - db.insert( - [TEST_LEAF].as_ref(), - item_key, - element, - None, - Some(&transaction), - ) - .unwrap() - .unwrap(); + let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - drop(transaction); + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - // Transactional data shouldn't be committed to the main database - let result = db.get([TEST_LEAF].as_ref(), item_key, None).unwrap(); - assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); -} + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 14); -#[test] -fn test_subtree_pairs_iterator() { - let db = make_test_grovedb(); - let element = Element::new_item(b"ayy".to_vec()); - let element2 = Element::new_item(b"lmao".to_vec()); + let keys = [ + b"key1".to_vec(), + b"key2".to_vec(), + b"key3".to_vec(), + b"key4".to_vec(), + b"key5".to_vec(), + b"key6".to_vec(), + b"key7".to_vec(), + b"key8".to_vec(), + b"key9".to_vec(), + b"key10".to_vec(), + b"key11".to_vec(), + b"key12".to_vec(), + b"key13".to_vec(), + b"key14".to_vec(), + ]; + let values = [ + b"value1".to_vec(), + b"value2".to_vec(), + b"value3".to_vec(), + b"value4".to_vec(), + b"value5".to_vec(), + b"value6".to_vec(), + b"value7".to_vec(), + b"value8".to_vec(), + b"value9".to_vec(), + b"value10".to_vec(), + b"value11".to_vec(), + b"value12".to_vec(), + b"value13".to_vec(), + b"value14".to_vec(), + ]; + let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + } - // Insert some nested subtrees - db.insert( - [TEST_LEAF].as_ref(), - b"subtree1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 1 insert"); - db.insert( - [TEST_LEAF, b"subtree1"].as_ref(), - b"subtree11", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 2 insert"); - // Insert an element into subtree - db.insert( - [TEST_LEAF, b"subtree1", b"subtree11"].as_ref(), - b"key1", - element.clone(), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - assert_eq!( - db.get( - [TEST_LEAF, b"subtree1", b"subtree11"].as_ref(), - b"key1", - None - ) - .unwrap() - .expect("successful get 1"), - element - ); - db.insert( - [TEST_LEAF, b"subtree1", b"subtree11"].as_ref(), - b"key0", - element.clone(), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - db.insert( - [TEST_LEAF, b"subtree1"].as_ref(), - b"subtree12", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 3 insert"); - db.insert( - [TEST_LEAF, b"subtree1"].as_ref(), - b"key1", - element.clone(), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - db.insert( - [TEST_LEAF, b"subtree1"].as_ref(), - b"key2", - element2.clone(), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - - // Iterate over subtree1 to see if keys of other subtrees messed up - // let mut iter = db - // .elements_iterator([TEST_LEAF, b"subtree1"].as_ref(), None) - // .expect("cannot create iterator"); - let storage_context = db - .grove_db - .db - .get_storage_context([TEST_LEAF, b"subtree1"].as_ref().into(), None) - .unwrap(); - let mut iter = Element::iterator(storage_context.raw_iter()).unwrap(); - assert_eq!( - iter.next_element().unwrap().unwrap(), - Some((b"key1".to_vec(), element)) - ); - assert_eq!( - iter.next_element().unwrap().unwrap(), - Some((b"key2".to_vec(), element2)) - ); - let subtree_element = iter.next_element().unwrap().unwrap().unwrap(); - assert_eq!(subtree_element.0, b"subtree11".to_vec()); - assert!(matches!(subtree_element.1, Element::Tree(..))); - let subtree_element = iter.next_element().unwrap().unwrap().unwrap(); - assert_eq!(subtree_element.0, b"subtree12".to_vec()); - assert!(matches!(subtree_element.1, Element::Tree(..))); - assert!(matches!(iter.next_element().unwrap(), Ok(None))); -} + #[test] + fn test_path_query_proofs_with_subquery_path() { + let temp_db = make_deep_tree(); -#[test] -fn test_find_subtrees() { - let element = Element::new_item(b"ayy".to_vec()); - let db = make_test_grovedb(); - // Insert some nested subtrees - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 1 insert"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 2 insert"); - // Insert an element into subtree - db.insert( - [TEST_LEAF, b"key1", b"key2"].as_ref(), - b"key3", - element, - None, - None, - ) - .unwrap() - .expect("successful value insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"key4", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 3 insert"); - let subtrees = db - .find_subtrees(&[TEST_LEAF].as_ref().into(), None) - .unwrap() - .expect("cannot get subtrees"); - assert_eq!( - vec![ - vec![TEST_LEAF], - vec![TEST_LEAF, b"key1"], - vec![TEST_LEAF, b"key4"], - vec![TEST_LEAF, b"key1", b"key2"], - ], - subtrees - ); -} + let mut query = Query::new(); + query.insert_all(); -#[test] -fn test_root_subtree_has_root_key() { - let db = make_test_grovedb(); - let storage = db.db.get_storage_context(EMPTY_PATH, None).unwrap(); - let root_merk = Merk::open_base( - storage, - false, - Some(&Element::value_defined_cost_for_serialized_value), - ) - .unwrap() - .expect("expected to get root merk"); - let (_, root_key, _) = root_merk - .root_hash_key_and_sum() - .unwrap() - .expect("expected to get root hash, key and sum"); - assert!(root_key.is_some()) -} + let mut subq = Query::new(); + subq.insert_all(); + + query.set_subquery_key(b"deeper_1".to_vec()); + query.set_subquery(subq); + + let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); + + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 3); + + let keys = [b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()]; + let values = [b"value1".to_vec(), b"value2".to_vec(), b"value3".to_vec()]; + let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + + // test subquery path with valid n > 1 valid translation + let mut query = Query::new(); + query.insert_all(); + + let mut subq = Query::new(); + subq.insert_all(); + + query.set_subquery_path(vec![b"deep_node_1".to_vec(), b"deeper_1".to_vec()]); + query.set_subquery(subq); -#[test] -fn test_get_subtree() { - let db = make_test_grovedb(); - let element = Element::new_item(b"ayy".to_vec()); + let path_query = PathQuery::new_unsized(vec![], query); + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 3); - // Returns error is subtree is not valid - { - let subtree = db.get([TEST_LEAF].as_ref(), b"invalid_tree", None).unwrap(); - assert!(subtree.is_err()); + let keys = [b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()]; + let values = [b"value1".to_vec(), b"value2".to_vec(), b"value3".to_vec()]; + let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); - // Doesn't return an error for subtree that exists but empty - let subtree = db.get(EMPTY_PATH, TEST_LEAF, None).unwrap(); - assert!(subtree.is_ok()); + // test subquery path with empty subquery path + let mut query = Query::new(); + query.insert_all(); + + let mut subq = Query::new(); + subq.insert_all(); + + query.set_subquery_path(vec![]); + query.set_subquery(subq); + + let path_query = + PathQuery::new_unsized(vec![b"deep_leaf".to_vec(), b"deep_node_1".to_vec()], query); + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 6); + + let keys = [ + b"key1".to_vec(), + b"key2".to_vec(), + b"key3".to_vec(), + b"key4".to_vec(), + b"key5".to_vec(), + b"key6".to_vec(), + ]; + let values = [ + b"value1".to_vec(), + b"value2".to_vec(), + b"value3".to_vec(), + b"value4".to_vec(), + b"value5".to_vec(), + b"value6".to_vec(), + ]; + let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + + // test subquery path with an invalid translation + // should generate a valid absence proof with an empty result set + let mut query = Query::new(); + query.insert_all(); + + let mut subq = Query::new(); + subq.insert_all(); + + query.set_subquery_path(vec![ + b"deep_node_1".to_vec(), + b"deeper_10".to_vec(), + b"another_invalid_key".to_vec(), + ]); + query.set_subquery(subq); + + let path_query = PathQuery::new_unsized(vec![], query); + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 0); } - // Insert some nested subtrees - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 1 insert"); - - let key1_tree = db - .get(EMPTY_PATH, TEST_LEAF, None) - .unwrap() - .expect("expected to get a root tree"); - - assert!( - matches!(key1_tree, Element::Tree(Some(_), _)), - "{}", - format!( - "expected tree with root key, got {:?}", - if let Element::Tree(tree, ..) = key1_tree { - format!("{:?}", tree) - } else { - "not a tree".to_string() - } - ) - ); + #[test] + fn test_path_query_proofs_with_key_and_subquery() { + let temp_db = make_deep_tree(); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 2 insert"); + let mut query = Query::new(); + query.insert_key(b"deep_node_1".to_vec()); - // Insert an element into subtree - db.insert( - [TEST_LEAF, b"key1", b"key2"].as_ref(), - b"key3", - element.clone(), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"key4", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 3 insert"); + let mut subq = Query::new(); + subq.insert_all(); - // Retrieve subtree instance - // Check if it returns the same instance that was inserted - { - let subtree_storage = db - .grove_db - .db - .get_storage_context([TEST_LEAF, b"key1", b"key2"].as_ref().into(), None) - .unwrap(); - let subtree = Merk::open_layered_with_root_key( - subtree_storage, - Some(b"key3".to_vec()), - false, - Some(&Element::value_defined_cost_for_serialized_value), - ) - .unwrap() - .expect("cannot open merk"); - let result_element = Element::get(&subtree, b"key3", true).unwrap().unwrap(); - assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); + query.set_subquery_key(b"deeper_1".to_vec()); + query.set_subquery(subq); + + let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); + + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 3); + + let keys = [b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()]; + let values = [b"value1".to_vec(), b"value2".to_vec(), b"value3".to_vec()]; + let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); } - // Insert a new tree with transaction - let transaction = db.start_transaction(); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"innertree", - Element::empty_tree(), - None, - Some(&transaction), - ) - .unwrap() - .expect("successful subtree insert"); + #[test] + fn test_path_query_proofs_with_conditional_subquery() { + let temp_db = make_deep_tree(); - db.insert( - [TEST_LEAF, b"key1", b"innertree"].as_ref(), - b"key4", - element, - None, - Some(&transaction), - ) - .unwrap() - .expect("successful value insert"); + let mut query = Query::new(); + query.insert_all(); + + let mut subquery = Query::new(); + subquery.insert_all(); - // Retrieve subtree instance with transaction - let subtree_storage = db - .grove_db - .db - .get_transactional_storage_context( - [TEST_LEAF, b"key1", b"innertree"].as_ref().into(), + let mut final_subquery = Query::new(); + final_subquery.insert_all(); + + subquery.add_conditional_subquery( + QueryItem::Key(b"deeper_4".to_vec()), None, - &transaction, - ) - .unwrap(); - let subtree = Merk::open_layered_with_root_key( - subtree_storage, - Some(b"key4".to_vec()), - false, - Some(&Element::value_defined_cost_for_serialized_value), - ) - .unwrap() - .expect("cannot open merk"); - let result_element = Element::get(&subtree, b"key4", true).unwrap().unwrap(); - assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); - - // Should be able to retrieve instances created before transaction - let subtree_storage = db - .grove_db - .db - .get_storage_context([TEST_LEAF, b"key1", b"key2"].as_ref().into(), None) - .unwrap(); - let subtree = Merk::open_layered_with_root_key( - subtree_storage, - Some(b"key3".to_vec()), - false, - Some(&Element::value_defined_cost_for_serialized_value), - ) - .unwrap() - .expect("cannot open merk"); - let result_element = Element::get(&subtree, b"key3", true).unwrap().unwrap(); - assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); -} + Some(final_subquery), + ); -#[test] -fn test_get_full_query() { - let db = make_test_grovedb(); + query.set_subquery(subquery); - // Insert a couple of subtrees first - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - // Insert some elements into subtree - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"key3", - Element::new_item(b"ayya".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"key4", - Element::new_item(b"ayyb".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"key5", - Element::new_item(b"ayyc".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - db.insert( - [TEST_LEAF, b"key2"].as_ref(), - b"key6", - Element::new_item(b"ayyd".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful value insert"); - - // Test_Leaf - // ___________________________ - // / \ - // key1 key2 - // ___________________________ - // | | - // key4 key6 - // / \ - // key3 key5 - // - - let path1 = vec![TEST_LEAF.to_vec(), b"key1".to_vec()]; - let path2 = vec![TEST_LEAF.to_vec(), b"key2".to_vec()]; - let mut query1 = Query::new(); - let mut query2 = Query::new(); - query1.insert_range_inclusive(b"key3".to_vec()..=b"key4".to_vec()); - query2.insert_key(b"key6".to_vec()); - - let path_query1 = PathQuery::new_unsized(path1, query1); - // should get back key3, key4 - let path_query2 = PathQuery::new_unsized(path2, query2); - // should get back key6 - - assert_eq!( - db.query_many_raw( - &[&path_query1, &path_query2], - true, - true, - true, - QueryKeyElementPairResultType, - None + let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + + let keys = [ + b"deeper_1".to_vec(), + b"deeper_2".to_vec(), + b"deeper_3".to_vec(), + b"key10".to_vec(), + b"key11".to_vec(), + b"deeper_5".to_vec(), + ]; + assert_eq!(result_set.len(), keys.len()); + + // TODO: Is this defined behaviour + for (index, key) in keys.iter().enumerate() { + assert_eq!(&result_set[index].key, key); + } + + // Default + Conditional subquery + let mut query = Query::new(); + query.insert_all(); + + let mut subquery = Query::new(); + subquery.insert_all(); + + let mut final_conditional_subquery = Query::new(); + final_conditional_subquery.insert_all(); + + let mut final_default_subquery = Query::new(); + final_default_subquery.insert_range_inclusive(b"key3".to_vec()..=b"key6".to_vec()); + + subquery.add_conditional_subquery( + QueryItem::Key(b"deeper_4".to_vec()), + None, + Some(final_conditional_subquery), + ); + subquery.set_subquery(final_default_subquery); + + query.set_subquery(subquery); + + let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 6); + + let keys = [ + b"key3".to_vec(), + b"key4".to_vec(), + b"key5".to_vec(), + b"key6".to_vec(), + b"key10".to_vec(), + b"key11".to_vec(), + ]; + let values = [ + b"value3".to_vec(), + b"value4".to_vec(), + b"value5".to_vec(), + b"value6".to_vec(), + b"value10".to_vec(), + b"value11".to_vec(), + ]; + let elements = values + .map(|x| Element::new_item(x).serialize().unwrap()) + .to_vec(); + // compare_result_sets(&elements, &result_set); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + } + + #[test] + fn test_path_query_proofs_with_sized_query() { + let temp_db = make_deep_tree(); + + let mut query = Query::new(); + query.insert_all(); + + let mut subquery = Query::new(); + subquery.insert_all(); + + let mut final_conditional_subquery = Query::new(); + final_conditional_subquery.insert_all(); + + let mut final_default_subquery = Query::new(); + final_default_subquery.insert_range_inclusive(b"key4".to_vec()..=b"key6".to_vec()); + + subquery.add_conditional_subquery( + QueryItem::Key(b"deeper_4".to_vec()), + None, + Some(final_conditional_subquery), + ); + subquery.set_subquery(final_default_subquery); + + query.set_subquery(subquery); + + let path_query = PathQuery::new( + vec![DEEP_LEAF.to_vec()], + SizedQuery::new(query, Some(5), None), /* we need to add a bigger limit because of + * empty proved sub trees */ + ); + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 3); + + let keys = [b"key4".to_vec(), b"key5".to_vec(), b"key6".to_vec()]; + let values = [b"value4".to_vec(), b"value5".to_vec(), b"value6".to_vec()]; + let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + } + + #[test] + fn test_path_query_proof_with_range_subquery_and_limit() { + let db = make_deep_tree(); + + // Create a path query with a range query, subquery, and limit + let mut main_query = Query::new(); + main_query.insert_range_after(b"deeper_3".to_vec()..); + + let mut subquery = Query::new(); + subquery.insert_all(); + + main_query.set_subquery(subquery); + + let path_query = PathQuery::new( + vec![DEEP_LEAF.to_vec(), b"deep_node_2".to_vec()], + SizedQuery::new(main_query.clone(), Some(3), None), + ); + + // Generate proof + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + + // Verify proof + let verification_result = GroveDb::verify_query_raw(&proof, &path_query); + + match verification_result { + Ok((hash, result_set)) => { + // Check if the hash matches the root hash + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + // Check if we got the correct number of results + assert_eq!(result_set.len(), 3, "Expected 3 results due to limit"); + } + Err(e) => { + panic!("Proof verification failed: {:?}", e); + } + } + + // Now test without a limit to compare + let path_query_no_limit = PathQuery::new( + vec![DEEP_LEAF.to_vec(), b"deep_node_2".to_vec()], + SizedQuery::new(main_query.clone(), None, None), + ); + + let proof_no_limit = db.prove_query(&path_query_no_limit, None).unwrap().unwrap(); + let verification_result_no_limit = + GroveDb::verify_query_raw(&proof_no_limit, &path_query_no_limit); + + match verification_result_no_limit { + Ok((hash, result_set)) => { + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 5, "Expected 5 results without limit"); + } + Err(e) => { + panic!("Proof verification failed (no limit): {:?}", e); + } + } + } + + #[test] + fn test_path_query_proof_with_range_subquery_and_limit_with_sum_trees() { + let db = make_deep_tree_with_sum_trees(); + + // Create a path query with a range query, subquery, and limit + let mut main_query = Query::new(); + main_query.insert_key(b"a".to_vec()); + main_query.insert_range_after(b"b".to_vec()..); + + let mut subquery = Query::new(); + subquery.insert_all(); + + main_query.set_subquery(subquery); + + main_query.add_conditional_subquery(QueryItem::Key(b"a".to_vec()), None, None); + + let path_query = PathQuery::new( + vec![DEEP_LEAF.to_vec(), b"deep_node_1".to_vec()], + SizedQuery::new(main_query.clone(), Some(3), None), + ); + + let non_proved_result_elements = db + .query( + &path_query, + false, + false, + false, + QueryResultType::QueryPathKeyElementTrioResultType, + None, + ) + .unwrap() + .expect("expected query to execute") + .0; + + assert_eq!( + non_proved_result_elements.len(), + 3, + "Expected 3 results due to limit" + ); + + let key_elements = non_proved_result_elements.to_key_elements(); + + assert_eq!( + key_elements, + vec![ + (vec![97], Element::new_item("storage".as_bytes().to_vec())), + (vec![49], Element::SumTree(Some(vec![0; 32]), 2, None)), + (vec![48], Element::new_item("v1".as_bytes().to_vec())) + ] + ); + + // Generate proof + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + + // Verify proof + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query).expect("proof verification failed"); + + // Check if the hash matches the root hash + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + // Check if we got the correct number of results + assert_eq!(result_set.len(), 3, "Expected 3 results due to limit"); + + // Now test without a limit to compare + let path_query_no_limit = PathQuery::new( + vec![DEEP_LEAF.to_vec(), b"deep_node_1".to_vec()], + SizedQuery::new(main_query.clone(), None, None), + ); + + let proof_no_limit = db.prove_query(&path_query_no_limit, None).unwrap().unwrap(); + let verification_result_no_limit = + GroveDb::verify_query_raw(&proof_no_limit, &path_query_no_limit); + + match verification_result_no_limit { + Ok((hash, result_set)) => { + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 29, "Expected 29 results without limit"); + } + Err(e) => { + panic!("Proof verification failed (no limit): {:?}", e); + } + } + } + + #[test] + fn test_path_query_proofs_with_direction() { + let temp_db = make_deep_tree(); + + // root + // deep_leaf + // deep_node_1 + // deeper_1 + // k1,v1 + // k2,v2 + // k3,v3 + // deeper_2 + // k4,v4 + // k5,v5 + // k6,v6 + // deep_node_2 + // deeper_3 + // k7,v7 + // k8,v8 + // k9,v9 + // deeper_4 + // k10,v10 + // k11,v11 + // deeper_5 + // k12,v12 + // k13,v13 + // k14,v14 + + let mut query = Query::new_with_direction(false); + query.insert_all(); + + let mut subquery = Query::new_with_direction(false); + subquery.insert_all(); + + let mut final_conditional_subquery = Query::new_with_direction(false); + final_conditional_subquery.insert_all(); + + let mut final_default_subquery = Query::new_with_direction(false); + final_default_subquery.insert_range_inclusive(b"key3".to_vec()..=b"key6".to_vec()); + + subquery.add_conditional_subquery( + QueryItem::Key(b"deeper_4".to_vec()), + None, + Some(final_conditional_subquery), + ); + subquery.set_subquery(final_default_subquery); + + query.set_subquery(subquery); + + let path_query = PathQuery::new( + vec![DEEP_LEAF.to_vec()], + SizedQuery::new(query, Some(6), None), /* we need 6 because of intermediate empty + * trees in proofs */ + ); + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 4); + + let keys = [ + b"key11".to_vec(), + b"key10".to_vec(), + b"key6".to_vec(), + b"key5".to_vec(), + ]; + let values = [ + b"value11".to_vec(), + b"value10".to_vec(), + b"value6".to_vec(), + b"value5".to_vec(), + ]; + let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + + // combined directions + let mut query = Query::new(); + query.insert_all(); + + let mut subq = Query::new_with_direction(false); + subq.insert_all(); + + let mut sub_subquery = Query::new(); + sub_subquery.insert_all(); + + subq.set_subquery(sub_subquery); + query.set_subquery(subq); + + let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); + + let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + + assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 14); + + let keys = [ + b"key4".to_vec(), + b"key5".to_vec(), + b"key6".to_vec(), + b"key1".to_vec(), + b"key2".to_vec(), + b"key3".to_vec(), + b"key12".to_vec(), + b"key13".to_vec(), + b"key14".to_vec(), + b"key10".to_vec(), + b"key11".to_vec(), + b"key7".to_vec(), + b"key8".to_vec(), + b"key9".to_vec(), + ]; + let values = [ + b"value4".to_vec(), + b"value5".to_vec(), + b"value6".to_vec(), + b"value1".to_vec(), + b"value2".to_vec(), + b"value3".to_vec(), + b"value12".to_vec(), + b"value13".to_vec(), + b"value14".to_vec(), + b"value10".to_vec(), + b"value11".to_vec(), + b"value7".to_vec(), + b"value8".to_vec(), + b"value9".to_vec(), + ]; + let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); + compare_result_tuples(result_set, expected_result_set); + } + + #[test] + fn test_checkpoint() { + let db = make_test_grovedb(); + let element1 = Element::new_item(b"ayy".to_vec()); + + db.insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, None) + .unwrap() + .expect("cannot insert a subtree 1 into GroveDB"); + db.insert( + [b"key1".as_ref()].as_ref(), + b"key2", + Element::empty_tree(), + None, + None, ) .unwrap() - .expect("expected successful get_query") - .to_key_elements(), - vec![ - (b"key3".to_vec(), Element::new_item(b"ayya".to_vec())), - (b"key4".to_vec(), Element::new_item(b"ayyb".to_vec())), - (b"key6".to_vec(), Element::new_item(b"ayyd".to_vec())), - ] - ); -} + .expect("cannot insert a subtree 2 into GroveDB"); + db.insert( + [b"key1".as_ref(), b"key2".as_ref()].as_ref(), + b"key3", + element1.clone(), + None, + None, + ) + .unwrap() + .expect("cannot insert an item into GroveDB"); + + assert_eq!( + db.get([b"key1".as_ref(), b"key2".as_ref()].as_ref(), b"key3", None) + .unwrap() + .expect("cannot get from grovedb"), + element1 + ); + + let tempdir_parent = TempDir::new().expect("cannot open tempdir"); + let checkpoint_tempdir = tempdir_parent.path().join("checkpoint"); + db.create_checkpoint(&checkpoint_tempdir) + .expect("cannot create checkpoint"); + + let checkpoint_db = + GroveDb::open(checkpoint_tempdir).expect("cannot open grovedb from checkpoint"); + + assert_eq!( + db.get([b"key1".as_ref(), b"key2".as_ref()].as_ref(), b"key3", None) + .unwrap() + .expect("cannot get from grovedb"), + element1 + ); + assert_eq!( + checkpoint_db + .get([b"key1".as_ref(), b"key2".as_ref()].as_ref(), b"key3", None) + .unwrap() + .expect("cannot get from checkpoint"), + element1 + ); + + let element2 = Element::new_item(b"ayy2".to_vec()); + let element3 = Element::new_item(b"ayy3".to_vec()); + + checkpoint_db + .insert( + [b"key1".as_ref()].as_ref(), + b"key4", + element2.clone(), + None, + None, + ) + .unwrap() + .expect("cannot insert into checkpoint"); + + db.insert( + [b"key1".as_ref()].as_ref(), + b"key4", + element3.clone(), + None, + None, + ) + .unwrap() + .expect("cannot insert into GroveDB"); + + assert_eq!( + checkpoint_db + .get([b"key1".as_ref()].as_ref(), b"key4", None) + .unwrap() + .expect("cannot get from checkpoint"), + element2, + ); + + assert_eq!( + db.get([b"key1".as_ref()].as_ref(), b"key4", None) + .unwrap() + .expect("cannot get from GroveDB"), + element3 + ); + + checkpoint_db + .insert( + [b"key1".as_ref()].as_ref(), + b"key5", + element3.clone(), + None, + None, + ) + .unwrap() + .expect("cannot insert into checkpoint"); + + db.insert([b"key1".as_ref()].as_ref(), b"key6", element3, None, None) + .unwrap() + .expect("cannot insert into GroveDB"); + + assert!(matches!( + checkpoint_db + .get([b"key1".as_ref()].as_ref(), b"key6", None) + .unwrap(), + Err(Error::PathKeyNotFound(_)) + )); + + assert!(matches!( + db.get([b"key1".as_ref()].as_ref(), b"key5", None).unwrap(), + Err(Error::PathKeyNotFound(_)) + )); + } + + #[test] + fn test_is_empty_tree() { + let db = make_test_grovedb(); + + // Create an empty tree with no elements + db.insert( + [TEST_LEAF].as_ref(), + b"innertree", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .unwrap(); + + assert!(db + .is_empty_tree([TEST_LEAF, b"innertree"].as_ref(), None) + .unwrap() + .expect("path is valid tree")); + + // add an element to the tree to make it non empty + db.insert( + [TEST_LEAF, b"innertree"].as_ref(), + b"key1", + Element::new_item(b"hello".to_vec()), + None, + None, + ) + .unwrap() + .unwrap(); + assert!(!db + .is_empty_tree([TEST_LEAF, b"innertree"].as_ref(), None) + .unwrap() + .expect("path is valid tree")); + } + + #[test] + fn transaction_should_be_aborted_when_rollback_is_called() { + let item_key = b"key3"; + + let db = make_test_grovedb(); + let transaction = db.start_transaction(); + + let element1 = Element::new_item(b"ayy".to_vec()); + + let result = db + .insert( + [TEST_LEAF].as_ref(), + item_key, + element1, + None, + Some(&transaction), + ) + .unwrap(); + + assert!(matches!(result, Ok(()))); + + db.rollback_transaction(&transaction).unwrap(); + + let result = db + .get([TEST_LEAF].as_ref(), item_key, Some(&transaction)) + .unwrap(); + assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); + } + + #[test] + fn transaction_should_be_aborted() { + let db = make_test_grovedb(); + let transaction = db.start_transaction(); + + let item_key = b"key3"; + let element = Element::new_item(b"ayy".to_vec()); + + db.insert( + [TEST_LEAF].as_ref(), + item_key, + element, + None, + Some(&transaction), + ) + .unwrap() + .unwrap(); + + drop(transaction); + + // Transactional data shouldn't be committed to the main database + let result = db.get([TEST_LEAF].as_ref(), item_key, None).unwrap(); + assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); + } + + #[test] + fn test_subtree_pairs_iterator() { + let db = make_test_grovedb(); + let element = Element::new_item(b"ayy".to_vec()); + let element2 = Element::new_item(b"lmao".to_vec()); + + // Insert some nested subtrees + db.insert( + [TEST_LEAF].as_ref(), + b"subtree1", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree 1 insert"); + db.insert( + [TEST_LEAF, b"subtree1"].as_ref(), + b"subtree11", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree 2 insert"); + // Insert an element into subtree + db.insert( + [TEST_LEAF, b"subtree1", b"subtree11"].as_ref(), + b"key1", + element.clone(), + None, + None, + ) + .unwrap() + .expect("successful value insert"); + assert_eq!( + db.get( + [TEST_LEAF, b"subtree1", b"subtree11"].as_ref(), + b"key1", + None + ) + .unwrap() + .expect("successful get 1"), + element + ); + db.insert( + [TEST_LEAF, b"subtree1", b"subtree11"].as_ref(), + b"key0", + element.clone(), + None, + None, + ) + .unwrap() + .expect("successful value insert"); + db.insert( + [TEST_LEAF, b"subtree1"].as_ref(), + b"subtree12", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree 3 insert"); + db.insert( + [TEST_LEAF, b"subtree1"].as_ref(), + b"key1", + element.clone(), + None, + None, + ) + .unwrap() + .expect("successful value insert"); + db.insert( + [TEST_LEAF, b"subtree1"].as_ref(), + b"key2", + element2.clone(), + None, + None, + ) + .unwrap() + .expect("successful value insert"); + + // Iterate over subtree1 to see if keys of other subtrees messed up + // let mut iter = db + // .elements_iterator([TEST_LEAF, b"subtree1"].as_ref(), None) + // .expect("cannot create iterator"); + let storage_context = db + .grove_db + .db + .get_storage_context([TEST_LEAF, b"subtree1"].as_ref().into(), None) + .unwrap(); + let mut iter = Element::iterator(storage_context.raw_iter()).unwrap(); + assert_eq!( + iter.next_element().unwrap().unwrap(), + Some((b"key1".to_vec(), element)) + ); + assert_eq!( + iter.next_element().unwrap().unwrap(), + Some((b"key2".to_vec(), element2)) + ); + let subtree_element = iter.next_element().unwrap().unwrap().unwrap(); + assert_eq!(subtree_element.0, b"subtree11".to_vec()); + assert!(matches!(subtree_element.1, Element::Tree(..))); + let subtree_element = iter.next_element().unwrap().unwrap().unwrap(); + assert_eq!(subtree_element.0, b"subtree12".to_vec()); + assert!(matches!(subtree_element.1, Element::Tree(..))); + assert!(matches!(iter.next_element().unwrap(), Ok(None))); + } + + #[test] + fn test_find_subtrees() { + let element = Element::new_item(b"ayy".to_vec()); + let db = make_test_grovedb(); + // Insert some nested subtrees + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree 1 insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key2", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree 2 insert"); + // Insert an element into subtree + db.insert( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + element, + None, + None, + ) + .unwrap() + .expect("successful value insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key4", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree 3 insert"); + let subtrees = db + .find_subtrees(&[TEST_LEAF].as_ref().into(), None) + .unwrap() + .expect("cannot get subtrees"); + assert_eq!( + vec![ + vec![TEST_LEAF], + vec![TEST_LEAF, b"key1"], + vec![TEST_LEAF, b"key4"], + vec![TEST_LEAF, b"key1", b"key2"], + ], + subtrees + ); + } + + #[test] + fn test_root_subtree_has_root_key() { + let db = make_test_grovedb(); + let storage = db.db.get_storage_context(EMPTY_PATH, None).unwrap(); + let root_merk = Merk::open_base( + storage, + false, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .unwrap() + .expect("expected to get root merk"); + let (_, root_key, _) = root_merk + .root_hash_key_and_sum() + .unwrap() + .expect("expected to get root hash, key and sum"); + assert!(root_key.is_some()) + } + + #[test] + fn test_get_subtree() { + let db = make_test_grovedb(); + let element = Element::new_item(b"ayy".to_vec()); + + // Returns error is subtree is not valid + { + let subtree = db.get([TEST_LEAF].as_ref(), b"invalid_tree", None).unwrap(); + assert!(subtree.is_err()); + + // Doesn't return an error for subtree that exists but empty + let subtree = db.get(EMPTY_PATH, TEST_LEAF, None).unwrap(); + assert!(subtree.is_ok()); + } + + // Insert some nested subtrees + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree 1 insert"); + + let key1_tree = db + .get(EMPTY_PATH, TEST_LEAF, None) + .unwrap() + .expect("expected to get a root tree"); + + assert!( + matches!(key1_tree, Element::Tree(Some(_), _)), + "{}", + format!( + "expected tree with root key, got {:?}", + if let Element::Tree(tree, ..) = key1_tree { + format!("{:?}", tree) + } else { + "not a tree".to_string() + } + ) + ); + + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key2", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree 2 insert"); + + // Insert an element into subtree + db.insert( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + element.clone(), + None, + None, + ) + .unwrap() + .expect("successful value insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key4", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree 3 insert"); + + // Retrieve subtree instance + // Check if it returns the same instance that was inserted + { + let subtree_storage = db + .grove_db + .db + .get_storage_context([TEST_LEAF, b"key1", b"key2"].as_ref().into(), None) + .unwrap(); + let subtree = Merk::open_layered_with_root_key( + subtree_storage, + Some(b"key3".to_vec()), + false, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .unwrap() + .expect("cannot open merk"); + let result_element = Element::get(&subtree, b"key3", true).unwrap().unwrap(); + assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); + } + // Insert a new tree with transaction + let transaction = db.start_transaction(); + + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"innertree", + Element::empty_tree(), + None, + Some(&transaction), + ) + .unwrap() + .expect("successful subtree insert"); + + db.insert( + [TEST_LEAF, b"key1", b"innertree"].as_ref(), + b"key4", + element, + None, + Some(&transaction), + ) + .unwrap() + .expect("successful value insert"); + + // Retrieve subtree instance with transaction + let subtree_storage = db + .grove_db + .db + .get_transactional_storage_context( + [TEST_LEAF, b"key1", b"innertree"].as_ref().into(), + None, + &transaction, + ) + .unwrap(); + let subtree = Merk::open_layered_with_root_key( + subtree_storage, + Some(b"key4".to_vec()), + false, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .unwrap() + .expect("cannot open merk"); + let result_element = Element::get(&subtree, b"key4", true).unwrap().unwrap(); + assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); + + // Should be able to retrieve instances created before transaction + let subtree_storage = db + .grove_db + .db + .get_storage_context([TEST_LEAF, b"key1", b"key2"].as_ref().into(), None) + .unwrap(); + let subtree = Merk::open_layered_with_root_key( + subtree_storage, + Some(b"key3".to_vec()), + false, + Some(&Element::value_defined_cost_for_serialized_value), + ) + .unwrap() + .expect("cannot open merk"); + let result_element = Element::get(&subtree, b"key3", true).unwrap().unwrap(); + assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); + } -#[test] -fn test_aux_uses_separate_cf() { - let element = Element::new_item(b"ayy".to_vec()); - let db = make_test_grovedb(); - // Insert some nested subtrees - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 1 insert"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree 2 insert"); - // Insert an element into subtree - db.insert( - [TEST_LEAF, b"key1", b"key2"].as_ref(), - b"key3", - element.clone(), - None, - None, - ) - .unwrap() - .expect("successful value insert"); + #[test] + fn test_get_full_query() { + let db = make_test_grovedb(); - db.put_aux(b"key1", b"a", None, None) + // Insert a couple of subtrees first + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key2", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + // Insert some elements into subtree + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key3", + Element::new_item(b"ayya".to_vec()), + None, + None, + ) .unwrap() - .expect("cannot put aux"); - db.put_aux(b"key2", b"b", None, None) + .expect("successful value insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key4", + Element::new_item(b"ayyb".to_vec()), + None, + None, + ) .unwrap() - .expect("cannot put aux"); - db.put_aux(b"key3", b"c", None, None) + .expect("successful value insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key5", + Element::new_item(b"ayyc".to_vec()), + None, + None, + ) .unwrap() - .expect("cannot put aux"); - db.delete_aux(b"key3", None, None) + .expect("successful value insert"); + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"key6", + Element::new_item(b"ayyd".to_vec()), + None, + None, + ) .unwrap() - .expect("cannot delete from aux"); + .expect("successful value insert"); - assert_eq!( - db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) + // Test_Leaf + // ___________________________ + // / \ + // key1 key2 + // ___________________________ + // | | + // key4 key6 + // / \ + // key3 key5 + // + + let path1 = vec![TEST_LEAF.to_vec(), b"key1".to_vec()]; + let path2 = vec![TEST_LEAF.to_vec(), b"key2".to_vec()]; + let mut query1 = Query::new(); + let mut query2 = Query::new(); + query1.insert_range_inclusive(b"key3".to_vec()..=b"key4".to_vec()); + query2.insert_key(b"key6".to_vec()); + + let path_query1 = PathQuery::new_unsized(path1, query1); + // should get back key3, key4 + let path_query2 = PathQuery::new_unsized(path2, query2); + // should get back key6 + + assert_eq!( + db.query_many_raw( + &[&path_query1, &path_query2], + true, + true, + true, + QueryKeyElementPairResultType, + None + ) .unwrap() - .expect("cannot get element"), - element - ); - assert_eq!( - db.get_aux(b"key1", None) + .expect("expected successful get_query") + .to_key_elements(), + vec![ + (b"key3".to_vec(), Element::new_item(b"ayya".to_vec())), + (b"key4".to_vec(), Element::new_item(b"ayyb".to_vec())), + (b"key6".to_vec(), Element::new_item(b"ayyd".to_vec())), + ] + ); + } + + #[test] + fn test_aux_uses_separate_cf() { + let element = Element::new_item(b"ayy".to_vec()); + let db = make_test_grovedb(); + // Insert some nested subtrees + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree 1 insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"key2", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree 2 insert"); + // Insert an element into subtree + db.insert( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + element.clone(), + None, + None, + ) + .unwrap() + .expect("successful value insert"); + + db.put_aux(b"key1", b"a", None, None) .unwrap() - .expect("cannot get from aux"), - Some(b"a".to_vec()) - ); - assert_eq!( - db.get_aux(b"key2", None) + .expect("cannot put aux"); + db.put_aux(b"key2", b"b", None, None) .unwrap() - .expect("cannot get from aux"), - Some(b"b".to_vec()) - ); - assert_eq!( - db.get_aux(b"key3", None) + .expect("cannot put aux"); + db.put_aux(b"key3", b"c", None, None) .unwrap() - .expect("cannot get from aux"), - None - ); - assert_eq!( - db.get_aux(b"key4", None) + .expect("cannot put aux"); + db.delete_aux(b"key3", None, None) .unwrap() - .expect("cannot get from aux"), - None - ); -} + .expect("cannot delete from aux"); -#[test] -fn test_aux_with_transaction() { - let element = Element::new_item(b"ayy".to_vec()); - let aux_value = b"ayylmao".to_vec(); - let key = b"key".to_vec(); - let db = make_test_grovedb(); - let transaction = db.start_transaction(); + assert_eq!( + db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) + .unwrap() + .expect("cannot get element"), + element + ); + assert_eq!( + db.get_aux(b"key1", None) + .unwrap() + .expect("cannot get from aux"), + Some(b"a".to_vec()) + ); + assert_eq!( + db.get_aux(b"key2", None) + .unwrap() + .expect("cannot get from aux"), + Some(b"b".to_vec()) + ); + assert_eq!( + db.get_aux(b"key3", None) + .unwrap() + .expect("cannot get from aux"), + None + ); + assert_eq!( + db.get_aux(b"key4", None) + .unwrap() + .expect("cannot get from aux"), + None + ); + } - // Insert a regular data with aux data in the same transaction - db.insert( - [TEST_LEAF].as_ref(), - &key, - element, - None, - Some(&transaction), - ) - .unwrap() - .expect("unable to insert"); - db.put_aux(&key, &aux_value, None, Some(&transaction)) - .unwrap() - .expect("unable to insert aux value"); - assert_eq!( - db.get_aux(&key, Some(&transaction)) - .unwrap() - .expect("unable to get aux value"), - Some(aux_value.clone()) - ); - // Cannot reach the data outside of transaction - assert_eq!( - db.get_aux(&key, None) - .unwrap() - .expect("unable to get aux value"), - None - ); - // And should be able to get data when committed - db.commit_transaction(transaction) - .unwrap() - .expect("unable to commit transaction"); - assert_eq!( - db.get_aux(&key, None) - .unwrap() - .expect("unable to get committed aux value"), - Some(aux_value) - ); -} + #[test] + fn test_aux_with_transaction() { + let element = Element::new_item(b"ayy".to_vec()); + let aux_value = b"ayylmao".to_vec(); + let key = b"key".to_vec(); + let db = make_test_grovedb(); + let transaction = db.start_transaction(); -#[test] -fn test_root_hash() { - let db = make_test_grovedb(); - // Check hashes are different if tree is edited - let old_root_hash = db.root_hash(None).unwrap(); - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::new_item(b"ayy".to_vec()), - None, - None, - ) - .unwrap() - .expect("unable to insert an item"); - assert_ne!(old_root_hash.unwrap(), db.root_hash(None).unwrap().unwrap()); + // Insert a regular data with aux data in the same transaction + db.insert( + [TEST_LEAF].as_ref(), + &key, + element, + None, + Some(&transaction), + ) + .unwrap() + .expect("unable to insert"); + db.put_aux(&key, &aux_value, None, Some(&transaction)) + .unwrap() + .expect("unable to insert aux value"); + assert_eq!( + db.get_aux(&key, Some(&transaction)) + .unwrap() + .expect("unable to get aux value"), + Some(aux_value.clone()) + ); + // Cannot reach the data outside of transaction + assert_eq!( + db.get_aux(&key, None) + .unwrap() + .expect("unable to get aux value"), + None + ); + // And should be able to get data when committed + db.commit_transaction(transaction) + .unwrap() + .expect("unable to commit transaction"); + assert_eq!( + db.get_aux(&key, None) + .unwrap() + .expect("unable to get committed aux value"), + Some(aux_value) + ); + } - // Check isolation - let transaction = db.start_transaction(); + #[test] + fn test_root_hash() { + let db = make_test_grovedb(); + // Check hashes are different if tree is edited + let old_root_hash = db.root_hash(None).unwrap(); + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::new_item(b"ayy".to_vec()), + None, + None, + ) + .unwrap() + .expect("unable to insert an item"); + assert_ne!(old_root_hash.unwrap(), db.root_hash(None).unwrap().unwrap()); - db.insert( - [TEST_LEAF].as_ref(), - b"key2", - Element::new_item(b"ayy".to_vec()), - None, - Some(&transaction), - ) - .unwrap() - .expect("unable to insert an item"); - let root_hash_outside = db.root_hash(None).unwrap().unwrap(); - assert_ne!( - db.root_hash(Some(&transaction)).unwrap().unwrap(), - root_hash_outside - ); - - assert_eq!(db.root_hash(None).unwrap().unwrap(), root_hash_outside); - db.commit_transaction(transaction).unwrap().unwrap(); - assert_ne!(db.root_hash(None).unwrap().unwrap(), root_hash_outside); -} + // Check isolation + let transaction = db.start_transaction(); -#[test] -fn test_get_non_existing_root_leaf() { - let db = make_test_grovedb(); - assert!(db.get(EMPTY_PATH, b"ayy", None).unwrap().is_err()); -} + db.insert( + [TEST_LEAF].as_ref(), + b"key2", + Element::new_item(b"ayy".to_vec()), + None, + Some(&transaction), + ) + .unwrap() + .expect("unable to insert an item"); + let root_hash_outside = db.root_hash(None).unwrap().unwrap(); + assert_ne!( + db.root_hash(Some(&transaction)).unwrap().unwrap(), + root_hash_outside + ); -#[test] -fn test_check_subtree_exists_function() { - let db = make_test_grovedb(); - db.insert( - [TEST_LEAF].as_ref(), - b"key_scalar", - Element::new_item(b"ayy".to_vec()), - None, - None, - ) - .unwrap() - .expect("cannot insert item"); - db.insert( - [TEST_LEAF].as_ref(), - b"key_subtree", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("cannot insert item"); + assert_eq!(db.root_hash(None).unwrap().unwrap(), root_hash_outside); + db.commit_transaction(transaction).unwrap().unwrap(); + assert_ne!(db.root_hash(None).unwrap().unwrap(), root_hash_outside); + } - // Empty tree path means root always exist - assert!(db - .check_subtree_exists_invalid_path(EMPTY_PATH, None) - .unwrap() - .is_ok()); + #[test] + fn test_get_non_existing_root_leaf() { + let db = make_test_grovedb(); + assert!(db.get(EMPTY_PATH, b"ayy", None).unwrap().is_err()); + } - // TEST_LEAF should be a tree - assert!(db - .check_subtree_exists_invalid_path([TEST_LEAF].as_ref().into(), None) + #[test] + fn test_check_subtree_exists_function() { + let db = make_test_grovedb(); + db.insert( + [TEST_LEAF].as_ref(), + b"key_scalar", + Element::new_item(b"ayy".to_vec()), + None, + None, + ) .unwrap() - .is_ok()); - - // TEST_LEAF.key_subtree should be a tree - assert!(db - .check_subtree_exists_invalid_path([TEST_LEAF, b"key_subtree"].as_ref().into(), None) + .expect("cannot insert item"); + db.insert( + [TEST_LEAF].as_ref(), + b"key_subtree", + Element::empty_tree(), + None, + None, + ) .unwrap() - .is_ok()); + .expect("cannot insert item"); - // TEST_LEAF.key_scalar should NOT be a tree - assert!(matches!( - db.check_subtree_exists_invalid_path([TEST_LEAF, b"key_scalar"].as_ref().into(), None) - .unwrap(), - Err(Error::InvalidPath(_)) - )); -} + // Empty tree path means root always exist + assert!(db + .check_subtree_exists_invalid_path(EMPTY_PATH, None) + .unwrap() + .is_ok()); -#[test] -fn test_tree_value_exists_method_no_tx() { - let db = make_test_grovedb(); - // Test keys in non-root tree - db.insert( - [TEST_LEAF].as_ref(), - b"key", - Element::new_item(b"ayy".to_vec()), - None, - None, - ) - .unwrap() - .expect("cannot insert item"); - assert!(db - .has_raw([TEST_LEAF].as_ref(), b"key", None) - .unwrap() - .unwrap()); - assert!(!db - .has_raw([TEST_LEAF].as_ref(), b"badkey", None) - .unwrap() - .unwrap()); + // TEST_LEAF should be a tree + assert!(db + .check_subtree_exists_invalid_path([TEST_LEAF].as_ref().into(), None) + .unwrap() + .is_ok()); + + // TEST_LEAF.key_subtree should be a tree + assert!(db + .check_subtree_exists_invalid_path([TEST_LEAF, b"key_subtree"].as_ref().into(), None) + .unwrap() + .is_ok()); + + // TEST_LEAF.key_scalar should NOT be a tree + assert!(matches!( + db.check_subtree_exists_invalid_path([TEST_LEAF, b"key_scalar"].as_ref().into(), None) + .unwrap(), + Err(Error::InvalidPath(_)) + )); + } - // Test keys for a root tree - db.insert(EMPTY_PATH, b"leaf", Element::empty_tree(), None, None) + #[test] + fn test_tree_value_exists_method_no_tx() { + let db = make_test_grovedb(); + // Test keys in non-root tree + db.insert( + [TEST_LEAF].as_ref(), + b"key", + Element::new_item(b"ayy".to_vec()), + None, + None, + ) .unwrap() .expect("cannot insert item"); + assert!(db + .has_raw([TEST_LEAF].as_ref(), b"key", None) + .unwrap() + .unwrap()); + assert!(!db + .has_raw([TEST_LEAF].as_ref(), b"badkey", None) + .unwrap() + .unwrap()); - assert!(db.has_raw(EMPTY_PATH, b"leaf", None).unwrap().unwrap()); - assert!(db.has_raw(EMPTY_PATH, TEST_LEAF, None).unwrap().unwrap()); - assert!(!db.has_raw(EMPTY_PATH, b"badleaf", None).unwrap().unwrap()); -} + // Test keys for a root tree + db.insert(EMPTY_PATH, b"leaf", Element::empty_tree(), None, None) + .unwrap() + .expect("cannot insert item"); -#[test] -fn test_tree_value_exists_method_tx() { - let db = make_test_grovedb(); - let tx = db.start_transaction(); - // Test keys in non-root tree - db.insert( - [TEST_LEAF].as_ref(), - b"key", - Element::new_item(b"ayy".to_vec()), - None, - Some(&tx), - ) - .unwrap() - .expect("cannot insert item"); - assert!(db - .has_raw([TEST_LEAF].as_ref(), b"key", Some(&tx)) - .unwrap() - .unwrap()); - assert!(!db - .has_raw([TEST_LEAF].as_ref(), b"key", None) - .unwrap() - .unwrap()); + assert!(db.has_raw(EMPTY_PATH, b"leaf", None).unwrap().unwrap()); + assert!(db.has_raw(EMPTY_PATH, TEST_LEAF, None).unwrap().unwrap()); + assert!(!db.has_raw(EMPTY_PATH, b"badleaf", None).unwrap().unwrap()); + } - // Test keys for a root tree - db.insert(EMPTY_PATH, b"leaf", Element::empty_tree(), None, Some(&tx)) + #[test] + fn test_tree_value_exists_method_tx() { + let db = make_test_grovedb(); + let tx = db.start_transaction(); + // Test keys in non-root tree + db.insert( + [TEST_LEAF].as_ref(), + b"key", + Element::new_item(b"ayy".to_vec()), + None, + Some(&tx), + ) .unwrap() .expect("cannot insert item"); - assert!(db.has_raw(EMPTY_PATH, b"leaf", Some(&tx)).unwrap().unwrap()); - assert!(!db.has_raw(EMPTY_PATH, b"leaf", None).unwrap().unwrap()); + assert!(db + .has_raw([TEST_LEAF].as_ref(), b"key", Some(&tx)) + .unwrap() + .unwrap()); + assert!(!db + .has_raw([TEST_LEAF].as_ref(), b"key", None) + .unwrap() + .unwrap()); - db.commit_transaction(tx) - .unwrap() - .expect("cannot commit transaction"); - assert!(db - .has_raw([TEST_LEAF].as_ref(), b"key", None) + // Test keys for a root tree + db.insert(EMPTY_PATH, b"leaf", Element::empty_tree(), None, Some(&tx)) + .unwrap() + .expect("cannot insert item"); + assert!(db.has_raw(EMPTY_PATH, b"leaf", Some(&tx)).unwrap().unwrap()); + assert!(!db.has_raw(EMPTY_PATH, b"leaf", None).unwrap().unwrap()); + + db.commit_transaction(tx) + .unwrap() + .expect("cannot commit transaction"); + assert!(db + .has_raw([TEST_LEAF].as_ref(), b"key", None) + .unwrap() + .unwrap()); + assert!(db.has_raw(EMPTY_PATH, b"leaf", None).unwrap().unwrap()); + } + + #[test] + fn test_storage_wipe() { + let db = make_test_grovedb(); + let _path = db._tmp_dir.path(); + + // Test keys in non-root tree + db.insert( + [TEST_LEAF].as_ref(), + b"key", + Element::new_item(b"ayy".to_vec()), + None, + None, + ) .unwrap() - .unwrap()); - assert!(db.has_raw(EMPTY_PATH, b"leaf", None).unwrap().unwrap()); -} + .expect("cannot insert item"); -#[test] -fn test_storage_wipe() { - let db = make_test_grovedb(); - let _path = db._tmp_dir.path(); + // retrieve key before wipe + let elem = db.get(&[TEST_LEAF], b"key", None).unwrap().unwrap(); + assert_eq!(elem, Element::new_item(b"ayy".to_vec())); - // Test keys in non-root tree - db.insert( - [TEST_LEAF].as_ref(), - b"key", - Element::new_item(b"ayy".to_vec()), - None, - None, - ) - .unwrap() - .expect("cannot insert item"); - - // retrieve key before wipe - let elem = db.get(&[TEST_LEAF], b"key", None).unwrap().unwrap(); - assert_eq!(elem, Element::new_item(b"ayy".to_vec())); - - // wipe the database - db.grove_db.wipe().unwrap(); - - // retrieve key after wipe - let elem_result = db.get(&[TEST_LEAF], b"key", None).unwrap(); - assert!(elem_result.is_err()); - assert!(matches!( - elem_result, - Err(Error::PathParentLayerNotFound(..)) - )); + // wipe the database + db.grove_db.wipe().unwrap(); + + // retrieve key after wipe + let elem_result = db.get(&[TEST_LEAF], b"key", None).unwrap(); + assert!(elem_result.is_err()); + assert!(matches!( + elem_result, + Err(Error::PathParentLayerNotFound(..)) + )); + } } diff --git a/grovedb/src/tests/query_tests.rs b/grovedb/src/tests/query_tests.rs index 579b2e42..e7325a6a 100644 --- a/grovedb/src/tests/query_tests.rs +++ b/grovedb/src/tests/query_tests.rs @@ -1,130 +1,168 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Query tests - -use grovedb_merk::proofs::{query::QueryItem, Query}; -use rand::Rng; -use tempfile::TempDir; - -use crate::{ - batch::GroveDbOp, - query_result_type::{PathKeyOptionalElementTrio, QueryResultType}, - reference_path::ReferencePathType, - tests::{ - common::compare_result_sets, make_deep_tree, make_test_grovedb, TempGroveDb, - ANOTHER_TEST_LEAF, TEST_LEAF, - }, - Element, GroveDb, PathQuery, SizedQuery, -}; - -fn populate_tree_for_non_unique_range_subquery(db: &TempGroveDb) { - // Insert a couple of subtrees first - for i in 1985u32..2000 { - let i_vec = i.to_be_bytes().to_vec(); - db.insert( - [TEST_LEAF].as_ref(), - &i_vec, - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - // Insert element 0 - // Insert some elements into subtree - db.insert( - [TEST_LEAF, i_vec.as_slice()].as_ref(), - b"\0", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); +mod tests { + //! Query tests + + use grovedb_merk::proofs::{query::QueryItem, Query}; + use rand::Rng; + use tempfile::TempDir; + + use crate::{ + batch::GroveDbOp, + query_result_type::{ + PathKeyOptionalElementTrio, QueryResultElement::PathKeyElementTrioResultItem, + QueryResultElements, QueryResultType, + }, + reference_path::ReferencePathType, + tests::{ + common::compare_result_sets, make_deep_tree, make_test_grovedb, TempGroveDb, + ANOTHER_TEST_LEAF, TEST_LEAF, + }, + Element, GroveDb, PathQuery, SizedQuery, + }; - for j in 100u32..150 { - let mut j_vec = i_vec.clone(); - j_vec.append(&mut j.to_be_bytes().to_vec()); + fn populate_tree_for_non_unique_range_subquery(db: &TempGroveDb) { + // Insert a couple of subtrees first + for i in 1985u32..2000 { + let i_vec = i.to_be_bytes().to_vec(); db.insert( - [TEST_LEAF, i_vec.as_slice(), b"\0"].as_ref(), - &j_vec.clone(), - Element::new_item(j_vec), + [TEST_LEAF].as_ref(), + &i_vec, + Element::empty_tree(), None, None, ) .unwrap() - .expect("successful value insert"); + .expect("successful subtree insert"); + // Insert element 0 + // Insert some elements into subtree + db.insert( + [TEST_LEAF, i_vec.as_slice()].as_ref(), + b"\0", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + + for j in 100u32..150 { + let mut j_vec = i_vec.clone(); + j_vec.append(&mut j.to_be_bytes().to_vec()); + db.insert( + [TEST_LEAF, i_vec.as_slice(), b"\0"].as_ref(), + &j_vec.clone(), + Element::new_item(j_vec), + None, + None, + ) + .unwrap() + .expect("successful value insert"); + } } } -} -fn populate_tree_for_non_unique_double_range_subquery(db: &TempGroveDb) { - // Insert a couple of subtrees first - for i in 0u32..10 { - let i_vec = i.to_be_bytes().to_vec(); + fn populate_tree_for_non_unique_double_range_subquery(db: &TempGroveDb) { + // Insert a couple of subtrees first + for i in 0u32..10 { + let i_vec = i.to_be_bytes().to_vec(); + db.insert( + [TEST_LEAF].as_ref(), + &i_vec, + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + // Insert element 0 + // Insert some elements into subtree + db.insert( + [TEST_LEAF, i_vec.as_slice()].as_ref(), + b"a", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + + for j in 25u32..50 { + let j_vec = j.to_be_bytes().to_vec(); + db.insert( + [TEST_LEAF, i_vec.as_slice(), b"a"].as_ref(), + &j_vec, + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful value insert"); + + // Insert element 0 + // Insert some elements into subtree + db.insert( + [TEST_LEAF, i_vec.as_slice(), b"a", j_vec.as_slice()].as_ref(), + b"\0", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + + for k in 100u32..110 { + let k_vec = k.to_be_bytes().to_vec(); + db.insert( + [TEST_LEAF, i_vec.as_slice(), b"a", &j_vec, b"\0"].as_ref(), + &k_vec.clone(), + Element::new_item(k_vec), + None, + None, + ) + .unwrap() + .expect("successful value insert"); + } + } + } + } + + fn populate_tree_by_reference_for_non_unique_range_subquery(db: &TempGroveDb) { + // This subtree will be holding values db.insert( [TEST_LEAF].as_ref(), - &i_vec, + b"\0", Element::empty_tree(), None, None, ) .unwrap() .expect("successful subtree insert"); - // Insert element 0 - // Insert some elements into subtree + + // This subtree will be holding references db.insert( - [TEST_LEAF, i_vec.as_slice()].as_ref(), - b"a", + [TEST_LEAF].as_ref(), + b"1", Element::empty_tree(), None, None, ) .unwrap() .expect("successful subtree insert"); - - for j in 25u32..50 { - let j_vec = j.to_be_bytes().to_vec(); + // Insert a couple of subtrees first + for i in 1985u32..2000 { + let i_vec = i.to_be_bytes().to_vec(); db.insert( - [TEST_LEAF, i_vec.as_slice(), b"a"].as_ref(), - &j_vec, + [TEST_LEAF, b"1"].as_ref(), + &i_vec, Element::empty_tree(), None, None, ) .unwrap() - .expect("successful value insert"); - + .expect("successful subtree insert"); // Insert element 0 // Insert some elements into subtree db.insert( - [TEST_LEAF, i_vec.as_slice(), b"a", j_vec.as_slice()].as_ref(), + [TEST_LEAF, b"1", i_vec.as_slice()].as_ref(), b"\0", Element::empty_tree(), None, @@ -133,12 +171,30 @@ fn populate_tree_for_non_unique_double_range_subquery(db: &TempGroveDb) { .unwrap() .expect("successful subtree insert"); - for k in 100u32..110 { - let k_vec = k.to_be_bytes().to_vec(); + for j in 100u32..150 { + let random_key = rand::thread_rng().gen::<[u8; 32]>(); + let mut j_vec = i_vec.clone(); + j_vec.append(&mut j.to_be_bytes().to_vec()); + + // We should insert every item to the tree holding items + db.insert( + [TEST_LEAF, b"\0"].as_ref(), + &random_key, + Element::new_item(j_vec.clone()), + None, + None, + ) + .unwrap() + .expect("successful value insert"); + db.insert( - [TEST_LEAF, i_vec.as_slice(), b"a", &j_vec, b"\0"].as_ref(), - &k_vec.clone(), - Element::new_item(k_vec), + [TEST_LEAF, b"1", i_vec.clone().as_slice(), b"\0"].as_ref(), + &random_key, + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + b"\0".to_vec(), + random_key.to_vec(), + ])), None, None, ) @@ -147,47 +203,49 @@ fn populate_tree_for_non_unique_double_range_subquery(db: &TempGroveDb) { } } } -} -fn populate_tree_by_reference_for_non_unique_range_subquery(db: &TempGroveDb) { - // This subtree will be holding values - db.insert( - [TEST_LEAF].as_ref(), - b"\0", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - // This subtree will be holding references - db.insert( - [TEST_LEAF].as_ref(), - b"1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - // Insert a couple of subtrees first - for i in 1985u32..2000 { - let i_vec = i.to_be_bytes().to_vec(); + fn populate_tree_for_unique_range_subquery(db: &TempGroveDb) { + // Insert a couple of subtrees first + for i in 1985u32..2000 { + let i_vec = i.to_be_bytes().to_vec(); + db.insert( + [TEST_LEAF].as_ref(), + &i_vec, + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + + db.insert( + [TEST_LEAF, &i_vec.clone()].as_ref(), + b"\0", + Element::new_item(i_vec), + None, + None, + ) + .unwrap() + .expect("successful value insert"); + } + } + + fn populate_tree_by_reference_for_unique_range_subquery(db: &TempGroveDb) { + // This subtree will be holding values db.insert( - [TEST_LEAF, b"1"].as_ref(), - &i_vec, + [TEST_LEAF].as_ref(), + b"\0", Element::empty_tree(), None, None, ) .unwrap() .expect("successful subtree insert"); - // Insert element 0 - // Insert some elements into subtree + + // This subtree will be holding references db.insert( - [TEST_LEAF, b"1", i_vec.as_slice()].as_ref(), - b"\0", + [TEST_LEAF].as_ref(), + b"1", Element::empty_tree(), None, None, @@ -195,29 +253,37 @@ fn populate_tree_by_reference_for_non_unique_range_subquery(db: &TempGroveDb) { .unwrap() .expect("successful subtree insert"); - for j in 100u32..150 { - let random_key = rand::thread_rng().gen::<[u8; 32]>(); - let mut j_vec = i_vec.clone(); - j_vec.append(&mut j.to_be_bytes().to_vec()); + for i in 1985u32..2000 { + let i_vec = i.to_be_bytes().to_vec(); + db.insert( + [TEST_LEAF, b"1"].as_ref(), + &i_vec, + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); // We should insert every item to the tree holding items db.insert( [TEST_LEAF, b"\0"].as_ref(), - &random_key, - Element::new_item(j_vec.clone()), + &i_vec, + Element::new_item(i_vec.clone()), None, None, ) .unwrap() .expect("successful value insert"); + // We should insert a reference to the item db.insert( - [TEST_LEAF, b"1", i_vec.clone().as_slice(), b"\0"].as_ref(), - &random_key, + [TEST_LEAF, b"1", i_vec.clone().as_slice()].as_ref(), + b"\0", Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ TEST_LEAF.to_vec(), b"\0".to_vec(), - random_key.to_vec(), + i_vec.clone(), ])), None, None, @@ -226,2461 +292,2519 @@ fn populate_tree_by_reference_for_non_unique_range_subquery(db: &TempGroveDb) { .expect("successful value insert"); } } -} -fn populate_tree_for_unique_range_subquery(db: &TempGroveDb) { - // Insert a couple of subtrees first - for i in 1985u32..2000 { - let i_vec = i.to_be_bytes().to_vec(); + fn populate_tree_for_unique_range_subquery_with_non_unique_null_values(db: &mut TempGroveDb) { + populate_tree_for_unique_range_subquery(db); + db.insert([TEST_LEAF].as_ref(), &[], Element::empty_tree(), None, None) + .unwrap() + .expect("successful subtree insert"); db.insert( - [TEST_LEAF].as_ref(), - &i_vec, + [TEST_LEAF, &[]].as_ref(), + b"\0", Element::empty_tree(), None, None, ) .unwrap() .expect("successful subtree insert"); + // Insert a couple of subtrees first + for i in 100u32..200 { + let i_vec = i.to_be_bytes().to_vec(); + db.insert( + [TEST_LEAF, &[], b"\0"].as_ref(), + &i_vec, + Element::new_item(i_vec.clone()), + None, + None, + ) + .unwrap() + .expect("successful value insert"); + } + } + fn populate_tree_for_uneven_keys(db: &TempGroveDb) { db.insert( - [TEST_LEAF, &i_vec.clone()].as_ref(), - b"\0", - Element::new_item(i_vec), + [TEST_LEAF].as_ref(), + "b".as_ref(), + Element::new_item(1u8.to_be_bytes().to_vec()), None, None, ) .unwrap() - .expect("successful value insert"); - } -} + .expect("successful subtree insert"); -fn populate_tree_by_reference_for_unique_range_subquery(db: &TempGroveDb) { - // This subtree will be holding values - db.insert( - [TEST_LEAF].as_ref(), - b"\0", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - // This subtree will be holding references - db.insert( - [TEST_LEAF].as_ref(), - b"1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - for i in 1985u32..2000 { - let i_vec = i.to_be_bytes().to_vec(); db.insert( - [TEST_LEAF, b"1"].as_ref(), - &i_vec, - Element::empty_tree(), + [TEST_LEAF].as_ref(), + "ab".as_ref(), + Element::new_item(2u8.to_be_bytes().to_vec()), None, None, ) .unwrap() .expect("successful subtree insert"); - // We should insert every item to the tree holding items db.insert( - [TEST_LEAF, b"\0"].as_ref(), - &i_vec, - Element::new_item(i_vec.clone()), + [TEST_LEAF].as_ref(), + "x".as_ref(), + Element::new_item(3u8.to_be_bytes().to_vec()), None, None, ) .unwrap() - .expect("successful value insert"); + .expect("successful subtree insert"); - // We should insert a reference to the item db.insert( - [TEST_LEAF, b"1", i_vec.clone().as_slice()].as_ref(), - b"\0", - Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - b"\0".to_vec(), - i_vec.clone(), - ])), + [TEST_LEAF].as_ref(), + &[3; 32], + Element::new_item(4u8.to_be_bytes().to_vec()), None, None, ) - .unwrap() - .expect("successful value insert"); - } -} - -fn populate_tree_for_unique_range_subquery_with_non_unique_null_values(db: &mut TempGroveDb) { - populate_tree_for_unique_range_subquery(db); - db.insert([TEST_LEAF].as_ref(), &[], Element::empty_tree(), None, None) .unwrap() .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, &[]].as_ref(), - b"\0", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - // Insert a couple of subtrees first - for i in 100u32..200 { - let i_vec = i.to_be_bytes().to_vec(); + db.insert( - [TEST_LEAF, &[], b"\0"].as_ref(), - &i_vec, - Element::new_item(i_vec.clone()), + [TEST_LEAF].as_ref(), + "k".as_ref(), + Element::new_item(5u8.to_be_bytes().to_vec()), None, None, ) .unwrap() - .expect("successful value insert"); + .expect("successful subtree insert"); } -} -#[test] -fn test_get_range_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + #[test] + fn test_get_correct_order() { + let db = make_test_grovedb(); + populate_tree_for_uneven_keys(&db); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range(1988_u32.to_be_bytes().to_vec()..1992_u32.to_be_bytes().to_vec()); + let path = vec![TEST_LEAF.to_vec()]; + let query = Query::new_range_full(); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + let path_query = PathQuery::new_unsized(path, query.clone()); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path_query = PathQuery::new_unsized(path, query.clone()); + assert_eq!(elements, vec![vec![4], vec![2], vec![1], vec![5], vec![3]]); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + #[test] + fn test_get_range_query_with_non_unique_subquery() { + let db = make_test_grovedb(); + populate_tree_for_non_unique_range_subquery(&db); - assert_eq!(elements.len(), 200); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range(1988_u32.to_be_bytes().to_vec()..1992_u32.to_be_bytes().to_vec()); - let mut first_value = 1988_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - let mut last_value = 1991_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 200); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_query_with_unique_subquery() { - let mut db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&mut db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range(1988_u32.to_be_bytes().to_vec()..1992_u32.to_be_bytes().to_vec()); + assert_eq!(elements.len(), 200); - let subquery_key: Vec = b"\0".to_vec(); + let mut first_value = 1988_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); + let mut last_value = 1991_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 200); + compare_result_sets(&elements, &result_set); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + #[test] + fn test_get_range_query_with_unique_subquery() { + let mut db = make_test_grovedb(); + populate_tree_for_unique_range_subquery(&mut db); - assert_eq!(elements.len(), 4); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range(1988_u32.to_be_bytes().to_vec()..1992_u32.to_be_bytes().to_vec()); - let first_value = 1988_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + let subquery_key: Vec = b"\0".to_vec(); - let last_value = 1991_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 4); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_query_with_unique_subquery_on_references() { - let db = make_test_grovedb(); - populate_tree_by_reference_for_unique_range_subquery(&db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec(), b"1".to_vec()]; - let mut query = Query::new(); - query.insert_range(1988_u32.to_be_bytes().to_vec()..1992_u32.to_be_bytes().to_vec()); + assert_eq!(elements.len(), 4); - let subquery_key: Vec = b"\0".to_vec(); + let first_value = 1988_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); + let last_value = 1991_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 4); + compare_result_sets(&elements, &result_set); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + #[test] + fn test_get_range_query_with_unique_subquery_on_references() { + let db = make_test_grovedb(); + populate_tree_by_reference_for_unique_range_subquery(&db); - assert_eq!(elements.len(), 4); + let path = vec![TEST_LEAF.to_vec(), b"1".to_vec()]; + let mut query = Query::new(); + query.insert_range(1988_u32.to_be_bytes().to_vec()..1992_u32.to_be_bytes().to_vec()); - let first_value = 1988_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + let subquery_key: Vec = b"\0".to_vec(); - let last_value = 1991_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 4); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_query_with_unique_subquery_with_non_unique_null_values() { - let mut db = make_test_grovedb(); - populate_tree_for_unique_range_subquery_with_non_unique_null_values(&mut db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_all(); + assert_eq!(elements.len(), 4); - let subquery_key: Vec = b"\0".to_vec(); + let first_value = 1988_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); + let last_value = 1991_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - let mut subquery = Query::new(); - subquery.insert_all(); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 4); + compare_result_sets(&elements, &result_set); + } - query.add_conditional_subquery( - QueryItem::Key(b"".to_vec()), - Some(vec![b"\0".to_vec()]), - Some(subquery), - ); + #[test] + fn test_get_range_query_with_unique_subquery_with_non_unique_null_values() { + let mut db = make_test_grovedb(); + populate_tree_for_unique_range_subquery_with_non_unique_null_values(&mut db); - let path_query = PathQuery::new_unsized(path, query.clone()); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_all(); - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let subquery_key: Vec = b"\0".to_vec(); - assert_eq!(elements.len(), 115); + query.set_subquery_key(subquery_key); - let first_value = 100_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + let mut subquery = Query::new(); + subquery.insert_all(); - let last_value = 1999_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + query.add_conditional_subquery( + QueryItem::Key(b"".to_vec()), + Some(vec![b"\0".to_vec()]), + Some(subquery), + ); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 115); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_query_with_unique_subquery_ignore_non_unique_null_values() { - let mut db = make_test_grovedb(); - populate_tree_for_unique_range_subquery_with_non_unique_null_values(&mut db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_all(); + assert_eq!(elements.len(), 115); - let subquery_key: Vec = b"\0".to_vec(); + let first_value = 100_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); + let last_value = 1999_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - let subquery = Query::new(); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 115); + compare_result_sets(&elements, &result_set); + } - query.add_conditional_subquery( - QueryItem::Key(b"".to_vec()), - Some(vec![b"\0".to_vec()]), - Some(subquery), - ); + #[test] + fn test_get_range_query_with_unique_subquery_ignore_non_unique_null_values() { + let mut db = make_test_grovedb(); + populate_tree_for_unique_range_subquery_with_non_unique_null_values(&mut db); - let path_query = PathQuery::new_unsized(path, query.clone()); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_all(); - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let subquery_key: Vec = b"\0".to_vec(); - assert_eq!(elements.len(), 15); + query.set_subquery_key(subquery_key); - let first_value = 1985_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + let subquery = Query::new(); - let last_value = 1999_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + // This conditional subquery expresses that we do not want to get values in "" + // tree + query.add_conditional_subquery( + QueryItem::Key(b"".to_vec()), + Some(vec![b"\0".to_vec()]), // We want to go into 0 but we don't want to get anything + Some(subquery), + ); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 15); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_inclusive_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_inclusive(1988_u32.to_be_bytes().to_vec()..=1995_u32.to_be_bytes().to_vec()); + assert_eq!(elements.len(), 15); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + let first_value = 1985_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + let last_value = 1999_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 15); + compare_result_sets(&elements, &result_set); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + #[test] + fn test_get_range_inclusive_query_with_non_unique_subquery() { + let db = make_test_grovedb(); + populate_tree_for_non_unique_range_subquery(&db); - assert_eq!(elements.len(), 400); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_inclusive( + 1988_u32.to_be_bytes().to_vec()..=1995_u32.to_be_bytes().to_vec(), + ); - let mut first_value = 1988_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - let mut last_value = 1995_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 400); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_inclusive_query_with_non_unique_subquery_on_references() { - let db = make_test_grovedb(); - populate_tree_by_reference_for_non_unique_range_subquery(&db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec(), b"1".to_vec()]; - let mut query = Query::new(); - query.insert_range_inclusive(1988_u32.to_be_bytes().to_vec()..=1995_u32.to_be_bytes().to_vec()); + assert_eq!(elements.len(), 400); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + let mut first_value = 1988_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + let mut last_value = 1995_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 400); + compare_result_sets(&elements, &result_set); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); - - assert_eq!(elements.len(), 400); - - let mut first_value = 1988_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - // using contains as the elements get stored at random key locations - // hence impossible to predict the final location - // but must exist - assert!(elements.contains(&first_value)); - - let mut last_value = 1995_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert!(elements.contains(&last_value)); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 400); - compare_result_sets(&elements, &result_set); -} + #[test] + fn test_get_range_inclusive_query_with_non_unique_subquery_on_references() { + let db = make_test_grovedb(); + populate_tree_by_reference_for_non_unique_range_subquery(&db); -#[test] -fn test_get_range_inclusive_query_with_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&db); + let path = vec![TEST_LEAF.to_vec(), b"1".to_vec()]; + let mut query = Query::new(); + query.insert_range_inclusive( + 1988_u32.to_be_bytes().to_vec()..=1995_u32.to_be_bytes().to_vec(), + ); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_inclusive(1988_u32.to_be_bytes().to_vec()..=1995_u32.to_be_bytes().to_vec()); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - let subquery_key: Vec = b"\0".to_vec(); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - query.set_subquery_key(subquery_key); + let path_query = PathQuery::new_unsized(path, query.clone()); - let path_query = PathQuery::new_unsized(path, query.clone()); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); + + assert_eq!(elements.len(), 400); + + let mut first_value = 1988_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + // using contains as the elements get stored at random key locations + // hence impossible to predict the final location + // but must exist + assert!(elements.contains(&first_value)); + + let mut last_value = 1995_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert!(elements.contains(&last_value)); + + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 400); + compare_result_sets(&elements, &result_set); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + #[test] + fn test_get_range_inclusive_query_with_unique_subquery() { + let db = make_test_grovedb(); + populate_tree_for_unique_range_subquery(&db); - assert_eq!(elements.len(), 8); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_inclusive( + 1988_u32.to_be_bytes().to_vec()..=1995_u32.to_be_bytes().to_vec(), + ); - let first_value = 1988_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + let subquery_key: Vec = b"\0".to_vec(); - let last_value = 1995_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 8); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_from_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_from(1995_u32.to_be_bytes().to_vec()..); + assert_eq!(elements.len(), 8); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + let first_value = 1988_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + let last_value = 1995_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 8); + compare_result_sets(&elements, &result_set); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + #[test] + fn test_get_range_from_query_with_non_unique_subquery() { + let db = make_test_grovedb(); + populate_tree_for_non_unique_range_subquery(&db); - assert_eq!(elements.len(), 250); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_from(1995_u32.to_be_bytes().to_vec()..); - let mut first_value = 1995_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - let mut last_value = 1999_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 250); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_from_query_with_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_from(1995_u32.to_be_bytes().to_vec()..); + assert_eq!(elements.len(), 250); - let subquery_key: Vec = b"\0".to_vec(); + let mut first_value = 1995_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); + let mut last_value = 1999_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 250); + compare_result_sets(&elements, &result_set); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + #[test] + fn test_get_range_from_query_with_unique_subquery() { + let db = make_test_grovedb(); + populate_tree_for_unique_range_subquery(&db); - assert_eq!(elements.len(), 5); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_from(1995_u32.to_be_bytes().to_vec()..); - let first_value = 1995_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + let subquery_key: Vec = b"\0".to_vec(); - let last_value = 1999_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_to_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_to(..1995_u32.to_be_bytes().to_vec()); + assert_eq!(elements.len(), 5); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + let first_value = 1995_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + let last_value = 1999_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 5); + compare_result_sets(&elements, &result_set); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + #[test] + fn test_get_range_to_query_with_non_unique_subquery() { + let db = make_test_grovedb(); + populate_tree_for_non_unique_range_subquery(&db); - assert_eq!(elements.len(), 500); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_to(..1995_u32.to_be_bytes().to_vec()); - let mut first_value = 1985_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - let mut last_value = 1994_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 500); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_to_query_with_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_to(..1995_u32.to_be_bytes().to_vec()); + assert_eq!(elements.len(), 500); - let subquery_key: Vec = b"\0".to_vec(); + let mut first_value = 1985_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); + let mut last_value = 1994_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 500); + compare_result_sets(&elements, &result_set); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + #[test] + fn test_get_range_to_query_with_unique_subquery() { + let db = make_test_grovedb(); + populate_tree_for_unique_range_subquery(&db); - assert_eq!(elements.len(), 10); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_to(..1995_u32.to_be_bytes().to_vec()); - let first_value = 1985_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + let subquery_key: Vec = b"\0".to_vec(); - let last_value = 1994_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 10); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_to_inclusive_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_to_inclusive(..=1995_u32.to_be_bytes().to_vec()); + assert_eq!(elements.len(), 10); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + let first_value = 1985_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + let last_value = 1994_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 10); + compare_result_sets(&elements, &result_set); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + #[test] + fn test_get_range_to_inclusive_query_with_non_unique_subquery() { + let db = make_test_grovedb(); + populate_tree_for_non_unique_range_subquery(&db); - assert_eq!(elements.len(), 550); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_to_inclusive(..=1995_u32.to_be_bytes().to_vec()); - let mut first_value = 1985_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - let mut last_value = 1995_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 550); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_to_inclusive_query_with_non_unique_subquery_and_key_out_of_bounds() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new_with_direction(false); - query.insert_range_to_inclusive(..=5000_u32.to_be_bytes().to_vec()); + assert_eq!(elements.len(), 550); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new_with_direction(false); - subquery.insert_all(); + let mut first_value = 1985_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + let mut last_value = 1995_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 550); + compare_result_sets(&elements, &result_set); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + #[test] + fn test_get_range_to_inclusive_query_with_non_unique_subquery_and_key_out_of_bounds() { + let db = make_test_grovedb(); + populate_tree_for_non_unique_range_subquery(&db); - assert_eq!(elements.len(), 750); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new_with_direction(false); + query.insert_range_to_inclusive(..=5000_u32.to_be_bytes().to_vec()); - let mut first_value = 1999_u32.to_be_bytes().to_vec(); - first_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new_with_direction(false); + subquery.insert_all(); - let mut last_value = 1985_u32.to_be_bytes().to_vec(); - last_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 750); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_to_inclusive_query_with_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_to_inclusive(..=1995_u32.to_be_bytes().to_vec()); + assert_eq!(elements.len(), 750); - let subquery_key: Vec = b"\0".to_vec(); + let mut first_value = 1999_u32.to_be_bytes().to_vec(); + first_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); + let mut last_value = 1985_u32.to_be_bytes().to_vec(); + last_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 750); + compare_result_sets(&elements, &result_set); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + #[test] + fn test_get_range_to_inclusive_query_with_unique_subquery() { + let db = make_test_grovedb(); + populate_tree_for_unique_range_subquery(&db); - assert_eq!(elements.len(), 11); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_to_inclusive(..=1995_u32.to_be_bytes().to_vec()); - let first_value = 1985_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + let subquery_key: Vec = b"\0".to_vec(); - let last_value = 1995_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 11); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_after_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_after(1995_u32.to_be_bytes().to_vec()..); + assert_eq!(elements.len(), 11); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + let first_value = 1985_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + let last_value = 1995_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 11); + compare_result_sets(&elements, &result_set); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + #[test] + fn test_get_range_after_query_with_non_unique_subquery() { + let db = make_test_grovedb(); + populate_tree_for_non_unique_range_subquery(&db); - assert_eq!(elements.len(), 200); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_after(1995_u32.to_be_bytes().to_vec()..); - let mut first_value = 1996_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - let mut last_value = 1999_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 200); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_after_to_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_after_to(1995_u32.to_be_bytes().to_vec()..1997_u32.to_be_bytes().to_vec()); + assert_eq!(elements.len(), 200); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + let mut first_value = 1996_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + let mut last_value = 1999_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 200); + compare_result_sets(&elements, &result_set); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + #[test] + fn test_get_range_after_to_query_with_non_unique_subquery() { + let db = make_test_grovedb(); + populate_tree_for_non_unique_range_subquery(&db); - assert_eq!(elements.len(), 50); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_after_to( + 1995_u32.to_be_bytes().to_vec()..1997_u32.to_be_bytes().to_vec(), + ); - let mut first_value = 1996_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - let mut last_value = 1996_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 50); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_after_to_inclusive_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_after_to_inclusive( - 1995_u32.to_be_bytes().to_vec()..=1997_u32.to_be_bytes().to_vec(), - ); + assert_eq!(elements.len(), 50); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + let mut first_value = 1996_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + let mut last_value = 1996_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 50); + compare_result_sets(&elements, &result_set); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + #[test] + fn test_get_range_after_to_inclusive_query_with_non_unique_subquery() { + let db = make_test_grovedb(); + populate_tree_for_non_unique_range_subquery(&db); - assert_eq!(elements.len(), 100); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_after_to_inclusive( + 1995_u32.to_be_bytes().to_vec()..=1997_u32.to_be_bytes().to_vec(), + ); - let mut first_value = 1996_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - let mut last_value = 1997_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 100); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_after_to_inclusive_query_with_non_unique_subquery_and_key_out_of_bounds() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new_with_direction(false); - query.insert_range_after_to_inclusive( - 1995_u32.to_be_bytes().to_vec()..=5000_u32.to_be_bytes().to_vec(), - ); + assert_eq!(elements.len(), 100); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new_with_direction(false); - subquery.insert_all(); + let mut first_value = 1996_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key); - query.set_subquery(subquery); + let mut last_value = 1997_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let path_query = PathQuery::new_unsized(path, query.clone()); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 100); + compare_result_sets(&elements, &result_set); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + #[test] + fn test_get_range_after_to_inclusive_query_with_non_unique_subquery_and_key_out_of_bounds() { + let db = make_test_grovedb(); + populate_tree_for_non_unique_range_subquery(&db); - assert_eq!(elements.len(), 200); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new_with_direction(false); + query.insert_range_after_to_inclusive( + 1995_u32.to_be_bytes().to_vec()..=5000_u32.to_be_bytes().to_vec(), + ); - let mut first_value = 1999_u32.to_be_bytes().to_vec(); - first_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new_with_direction(false); + subquery.insert_all(); - let mut last_value = 1996_u32.to_be_bytes().to_vec(); - last_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key); + query.set_subquery(subquery); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 200); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_inclusive_query_with_double_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_double_range_subquery(&db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new(); - query.insert_range_inclusive((3u32).to_be_bytes().to_vec()..=(4u32).to_be_bytes().to_vec()); + assert_eq!(elements.len(), 200); - query.set_subquery_key(b"a".to_vec()); + let mut first_value = 1999_u32.to_be_bytes().to_vec(); + first_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - let mut subquery = Query::new(); - subquery - .insert_range_inclusive((29u32).to_be_bytes().to_vec()..=(31u32).to_be_bytes().to_vec()); + let mut last_value = 1996_u32.to_be_bytes().to_vec(); + last_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - subquery.set_subquery_key(b"\0".to_vec()); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 200); + compare_result_sets(&elements, &result_set); + } - let mut subsubquery = Query::new(); - subsubquery.insert_all(); + #[test] + fn test_get_range_inclusive_query_with_double_non_unique_subquery() { + let db = make_test_grovedb(); + populate_tree_for_non_unique_double_range_subquery(&db); - subquery.set_subquery(subsubquery); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new(); + query.insert_range_inclusive((3u32).to_be_bytes().to_vec()..=(4u32).to_be_bytes().to_vec()); - query.set_subquery(subquery); + query.set_subquery_key(b"a".to_vec()); - let path_query = PathQuery::new_unsized(path, query.clone()); + let mut subquery = Query::new(); + subquery.insert_range_inclusive( + (29u32).to_be_bytes().to_vec()..=(31u32).to_be_bytes().to_vec(), + ); - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + subquery.set_subquery_key(b"\0".to_vec()); - assert_eq!(elements.len(), 60); + let mut subsubquery = Query::new(); + subsubquery.insert_all(); - let first_value = 100_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + subquery.set_subquery(subsubquery); - let last_value = 109_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery(subquery); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 60); - compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new_unsized(path, query.clone()); -#[test] -fn test_get_range_query_with_limit_and_offset() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let path = vec![TEST_LEAF.to_vec()]; - let mut query = Query::new_with_direction(true); - query.insert_range(1990_u32.to_be_bytes().to_vec()..1995_u32.to_be_bytes().to_vec()); + assert_eq!(elements.len(), 60); - let subquery_key: Vec = b"\0".to_vec(); - let mut subquery = Query::new(); - subquery.insert_all(); + let first_value = 100_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); - query.set_subquery_key(subquery_key.clone()); - query.set_subquery(subquery.clone()); + let last_value = 109_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); - // Baseline query: no offset or limit + left to right - let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), None, None)); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 60); + compare_result_sets(&elements, &result_set); + } - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + #[test] + fn test_get_range_query_with_limit_and_offset() { + let db = make_test_grovedb(); + populate_tree_for_non_unique_range_subquery(&db); - assert_eq!(elements.len(), 250); + let path = vec![TEST_LEAF.to_vec()]; + let mut query = Query::new_with_direction(true); + query.insert_range(1990_u32.to_be_bytes().to_vec()..1995_u32.to_be_bytes().to_vec()); - let mut first_value = 1990_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let subquery_key: Vec = b"\0".to_vec(); + let mut subquery = Query::new(); + subquery.insert_all(); - let mut last_value = 1994_u32.to_be_bytes().to_vec(); - last_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key.clone()); + query.set_subquery(subquery.clone()); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 250); - compare_result_sets(&elements, &result_set); + // Baseline query: no offset or limit + left to right + let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), None, None)); - subquery.left_to_right = false; + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - query.set_subquery_key(subquery_key.clone()); - query.set_subquery(subquery.clone()); + assert_eq!(elements.len(), 250); - query.left_to_right = false; + let mut first_value = 1990_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - // Baseline query: no offset or limit + right to left - let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), None, None)); + let mut last_value = 1994_u32.to_be_bytes().to_vec(); + last_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 250); + compare_result_sets(&elements, &result_set); - assert_eq!(elements.len(), 250); + subquery.left_to_right = false; - let mut first_value = 1994_u32.to_be_bytes().to_vec(); - first_value.append(&mut 149_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + query.set_subquery_key(subquery_key.clone()); + query.set_subquery(subquery.clone()); - let mut last_value = 1990_u32.to_be_bytes().to_vec(); - last_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + query.left_to_right = false; - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 250); - compare_result_sets(&elements, &result_set); + // Baseline query: no offset or limit + right to left + let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), None, None)); - subquery.left_to_right = true; + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - query.set_subquery_key(subquery_key.clone()); - query.set_subquery(subquery.clone()); + assert_eq!(elements.len(), 250); - query.left_to_right = true; + let mut first_value = 1994_u32.to_be_bytes().to_vec(); + first_value.append(&mut 149_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - // Limit the result to just 55 elements - let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), Some(55), None)); + let mut last_value = 1990_u32.to_be_bytes().to_vec(); + last_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 250); + compare_result_sets(&elements, &result_set); - assert_eq!(elements.len(), 55); + subquery.left_to_right = true; - let mut first_value = 1990_u32.to_be_bytes().to_vec(); - first_value.append(&mut 100_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + query.set_subquery_key(subquery_key.clone()); + query.set_subquery(subquery.clone()); - // Second tree 5 element [100, 101, 102, 103, 104] - let mut last_value = 1991_u32.to_be_bytes().to_vec(); - last_value.append(&mut 104_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + query.left_to_right = true; - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 55); - compare_result_sets(&elements, &result_set); + // Limit the result to just 55 elements + let path_query = + PathQuery::new(path.clone(), SizedQuery::new(query.clone(), Some(55), None)); - query.set_subquery_key(subquery_key.clone()); - query.set_subquery(subquery.clone()); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - // Limit the result set to 60 elements but skip the first 14 elements - let path_query = PathQuery::new( - path.clone(), - SizedQuery::new(query.clone(), Some(60), Some(14)), - ); + assert_eq!(elements.len(), 55); - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); - - assert_eq!(elements.len(), 60); - - // Skips the first 14 elements, starts from the 15th - // i.e skips [100 - 113] starts from 114 - let mut first_value = 1990_u32.to_be_bytes().to_vec(); - first_value.append(&mut 114_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); - - // Continues for 60 iterations - // Takes 36 elements from the first tree (50 - 14) - // takes the remaining 24 from the second three (60 - 36) - let mut last_value = 1991_u32.to_be_bytes().to_vec(); - last_value.append(&mut 123_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 60); - compare_result_sets(&elements, &result_set); - - query.set_subquery_key(subquery_key.clone()); - query.set_subquery(subquery.clone()); - - query.left_to_right = false; - - // Limit the result set to 60 element but skip first 10 elements (this time - // right to left) - let path_query = PathQuery::new( - path.clone(), - SizedQuery::new(query.clone(), Some(60), Some(10)), - ); - - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + let mut first_value = 1990_u32.to_be_bytes().to_vec(); + first_value.append(&mut 100_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - assert_eq!(elements.len(), 60); + // Second tree 5 element [100, 101, 102, 103, 104] + let mut last_value = 1991_u32.to_be_bytes().to_vec(); + last_value.append(&mut 104_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - // Skips the first 10 elements from the back - // last tree and starts from the 11th before the end - // Underlying subquery is ascending - let mut first_value = 1994_u32.to_be_bytes().to_vec(); - first_value.append(&mut 110_u32.to_be_bytes().to_vec()); - assert_eq!(elements[0], first_value); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 55); + compare_result_sets(&elements, &result_set); - let mut last_value = 1993_u32.to_be_bytes().to_vec(); - last_value.append(&mut 119_u32.to_be_bytes().to_vec()); - assert_eq!(elements[elements.len() - 1], last_value); + query.set_subquery_key(subquery_key.clone()); + query.set_subquery(subquery.clone()); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 60); - compare_result_sets(&elements, &result_set); + // Limit the result set to 60 elements but skip the first 14 elements + let path_query = PathQuery::new( + path.clone(), + SizedQuery::new(query.clone(), Some(60), Some(14)), + ); - query.set_subquery_key(subquery_key.clone()); - query.set_subquery(subquery.clone()); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - query.left_to_right = true; + assert_eq!(elements.len(), 60); - // Offset bigger than elements in range - let path_query = PathQuery::new( - path.clone(), - SizedQuery::new(query.clone(), None, Some(5000)), - ); + // Skips the first 14 elements, starts from the 15th + // i.e skips [100 - 113] starts from 114 + let mut first_value = 1990_u32.to_be_bytes().to_vec(); + first_value.append(&mut 114_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + // Continues for 60 iterations + // Takes 36 elements from the first tree (50 - 14) + // takes the remaining 24 from the second three (60 - 36) + let mut last_value = 1991_u32.to_be_bytes().to_vec(); + last_value.append(&mut 123_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - assert_eq!(elements.len(), 0); + query.set_subquery_key(subquery_key.clone()); + query.set_subquery(subquery.clone()); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); + query.left_to_right = false; - query.set_subquery_key(subquery_key.clone()); - query.set_subquery(subquery); + // Limit the result set to 60 element but skip first 10 elements (this time + // right to left) + let path_query = PathQuery::new( + path.clone(), + SizedQuery::new(query.clone(), Some(60), Some(10)), + ); - // Limit bigger than elements in range - let path_query = PathQuery::new( - path.clone(), - SizedQuery::new(query.clone(), Some(5000), None), - ); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + assert_eq!(elements.len(), 60); - assert_eq!(elements.len(), 250); + // Skips the first 10 elements from the back + // last tree and starts from the 11th before the end + // Underlying subquery is ascending + let mut first_value = 1994_u32.to_be_bytes().to_vec(); + first_value.append(&mut 110_u32.to_be_bytes().to_vec()); + assert_eq!(elements[0], first_value); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 250); + let mut last_value = 1993_u32.to_be_bytes().to_vec(); + last_value.append(&mut 119_u32.to_be_bytes().to_vec()); + assert_eq!(elements[elements.len() - 1], last_value); - // Test on unique subtree build - let db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&db); + query.set_subquery_key(subquery_key.clone()); + query.set_subquery(subquery.clone()); - let mut query = Query::new_with_direction(true); - query.insert_range(1990_u32.to_be_bytes().to_vec()..2000_u32.to_be_bytes().to_vec()); + query.left_to_right = true; - query.set_subquery_key(subquery_key); + // Offset bigger than elements in range + let path_query = PathQuery::new( + path.clone(), + SizedQuery::new(query.clone(), None, Some(5000)), + ); - let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(5), Some(2))); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("expected successful get_path_query"); + assert_eq!(elements.len(), 0); - assert_eq!(elements.len(), 5); + query.set_subquery_key(subquery_key.clone()); + query.set_subquery(subquery); - let first_value = 1992_u32.to_be_bytes().to_vec(); - assert_eq!(elements[0], first_value); + // Limit bigger than elements in range + let path_query = PathQuery::new( + path.clone(), + SizedQuery::new(query.clone(), Some(5000), None), + ); - let last_value = 1996_u32.to_be_bytes().to_vec(); - assert_eq!(elements[elements.len() - 1], last_value); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); - compare_result_sets(&elements, &result_set); -} + assert_eq!(elements.len(), 250); -#[test] -fn test_correct_child_root_hash_propagation_for_parent_in_same_batch() { - let tmp_dir = TempDir::new().unwrap(); - let db = GroveDb::open(tmp_dir.path()).unwrap(); - let tree_name_slice: &[u8] = &[ - 2, 17, 40, 46, 227, 17, 179, 211, 98, 50, 130, 107, 246, 26, 147, 45, 234, 189, 245, 77, - 252, 86, 99, 107, 197, 226, 188, 54, 239, 64, 17, 37, - ]; - - let batch = vec![GroveDbOp::insert_op(vec![], vec![1], Element::empty_tree())]; - db.apply_batch(batch, None, None) - .unwrap() - .expect("should apply batch"); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 250); - let batch = vec![ - GroveDbOp::insert_op( - vec![vec![1]], - tree_name_slice.to_vec(), - Element::empty_tree(), - ), - GroveDbOp::insert_op( - vec![vec![1], tree_name_slice.to_vec()], - b"\0".to_vec(), - Element::empty_tree(), - ), - GroveDbOp::insert_op( - vec![vec![1], tree_name_slice.to_vec()], - vec![1], - Element::empty_tree(), - ), - GroveDbOp::insert_op( - vec![vec![1], tree_name_slice.to_vec(), vec![1]], - b"person".to_vec(), - Element::empty_tree(), - ), - GroveDbOp::insert_op( - vec![ - vec![1], - tree_name_slice.to_vec(), - vec![1], - b"person".to_vec(), - ], - b"\0".to_vec(), - Element::empty_tree(), - ), - GroveDbOp::insert_op( - vec![ - vec![1], - tree_name_slice.to_vec(), - vec![1], - b"person".to_vec(), - ], - b"firstName".to_vec(), - Element::empty_tree(), - ), - ]; - db.apply_batch(batch, None, None) - .unwrap() - .expect("should apply batch"); + // Test on unique subtree build + let db = make_test_grovedb(); + populate_tree_for_unique_range_subquery(&db); - let batch = vec![ - GroveDbOp::insert_op( - vec![ - vec![1], + let mut query = Query::new_with_direction(true); + query.insert_range(1990_u32.to_be_bytes().to_vec()..2000_u32.to_be_bytes().to_vec()); + + query.set_subquery_key(subquery_key); + + let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(5), Some(2))); + + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("expected successful get_path_query"); + + assert_eq!(elements.len(), 5); + + let first_value = 1992_u32.to_be_bytes().to_vec(); + assert_eq!(elements[0], first_value); + + let last_value = 1996_u32.to_be_bytes().to_vec(); + assert_eq!(elements[elements.len() - 1], last_value); + } + + #[test] + fn test_correct_child_root_hash_propagation_for_parent_in_same_batch() { + let tmp_dir = TempDir::new().unwrap(); + let db = GroveDb::open(tmp_dir.path()).unwrap(); + let tree_name_slice: &[u8] = &[ + 2, 17, 40, 46, 227, 17, 179, 211, 98, 50, 130, 107, 246, 26, 147, 45, 234, 189, 245, + 77, 252, 86, 99, 107, 197, 226, 188, 54, 239, 64, 17, 37, + ]; + + let batch = vec![GroveDbOp::insert_op(vec![], vec![1], Element::empty_tree())]; + db.apply_batch(batch, None, None) + .unwrap() + .expect("should apply batch"); + + let batch = vec![ + GroveDbOp::insert_op( + vec![vec![1]], tree_name_slice.to_vec(), - vec![1], - b"person".to_vec(), + Element::empty_tree(), + ), + GroveDbOp::insert_op( + vec![vec![1], tree_name_slice.to_vec()], b"\0".to_vec(), - ], - b"person_id_1".to_vec(), - Element::new_item(vec![50]), - ), - GroveDbOp::insert_op( - vec![ - vec![1], - tree_name_slice.to_vec(), - vec![1], - b"person".to_vec(), - b"firstName".to_vec(), - ], - b"cammi".to_vec(), - Element::empty_tree(), - ), - GroveDbOp::insert_op( - vec![ - vec![1], - tree_name_slice.to_vec(), - vec![1], - b"person".to_vec(), - b"firstName".to_vec(), - b"cammi".to_vec(), - ], - b"\0".to_vec(), - Element::empty_tree(), - ), - GroveDbOp::insert_op( - vec![ - vec![1], - tree_name_slice.to_vec(), + Element::empty_tree(), + ), + GroveDbOp::insert_op( + vec![vec![1], tree_name_slice.to_vec()], vec![1], + Element::empty_tree(), + ), + GroveDbOp::insert_op( + vec![vec![1], tree_name_slice.to_vec(), vec![1]], b"person".to_vec(), + Element::empty_tree(), + ), + GroveDbOp::insert_op( + vec![ + vec![1], + tree_name_slice.to_vec(), + vec![1], + b"person".to_vec(), + ], + b"\0".to_vec(), + Element::empty_tree(), + ), + GroveDbOp::insert_op( + vec![ + vec![1], + tree_name_slice.to_vec(), + vec![1], + b"person".to_vec(), + ], b"firstName".to_vec(), + Element::empty_tree(), + ), + ]; + db.apply_batch(batch, None, None) + .unwrap() + .expect("should apply batch"); + + let batch = vec![ + GroveDbOp::insert_op( + vec![ + vec![1], + tree_name_slice.to_vec(), + vec![1], + b"person".to_vec(), + b"\0".to_vec(), + ], + b"person_id_1".to_vec(), + Element::new_item(vec![50]), + ), + GroveDbOp::insert_op( + vec![ + vec![1], + tree_name_slice.to_vec(), + vec![1], + b"person".to_vec(), + b"firstName".to_vec(), + ], b"cammi".to_vec(), + Element::empty_tree(), + ), + GroveDbOp::insert_op( + vec![ + vec![1], + tree_name_slice.to_vec(), + vec![1], + b"person".to_vec(), + b"firstName".to_vec(), + b"cammi".to_vec(), + ], b"\0".to_vec(), - ], - b"person_ref_id".to_vec(), - Element::new_reference(ReferencePathType::UpstreamRootHeightReference( - 4, - vec![b"\0".to_vec(), b"person_id_1".to_vec()], - )), - ), - ]; - db.apply_batch(batch, None, None) - .unwrap() - .expect("should apply batch"); - - let path = vec![ - vec![1], - tree_name_slice.to_vec(), - vec![1], - b"person".to_vec(), - b"firstName".to_vec(), - ]; - let mut query = Query::new(); - query.insert_all(); - query.set_subquery_key(b"\0".to_vec()); - let mut subquery = Query::new(); - subquery.insert_all(); - query.set_subquery(subquery); - let path_query = PathQuery::new( - path, - SizedQuery { - query: query.clone(), - limit: Some(100), - offset: Some(0), - }, - ); + Element::empty_tree(), + ), + GroveDbOp::insert_op( + vec![ + vec![1], + tree_name_slice.to_vec(), + vec![1], + b"person".to_vec(), + b"firstName".to_vec(), + b"cammi".to_vec(), + b"\0".to_vec(), + ], + b"person_ref_id".to_vec(), + Element::new_reference(ReferencePathType::UpstreamRootHeightReference( + 4, + vec![b"\0".to_vec(), b"person_id_1".to_vec()], + )), + ), + ]; + db.apply_batch(batch, None, None) + .unwrap() + .expect("should apply batch"); - let proof = db - .prove_query(&path_query) - .unwrap() - .expect("expected successful proving"); - let (hash, _result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); -} + let path = vec![ + vec![1], + tree_name_slice.to_vec(), + vec![1], + b"person".to_vec(), + b"firstName".to_vec(), + ]; + let mut query = Query::new(); + query.insert_all(); + query.set_subquery_key(b"\0".to_vec()); + let mut subquery = Query::new(); + subquery.insert_all(); + query.set_subquery(subquery); + let path_query = PathQuery::new( + path, + SizedQuery { + query: query.clone(), + limit: Some(100), + offset: Some(0), + }, + ); -#[test] -fn test_mixed_level_proofs() { - let db = make_test_grovedb(); - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"key2", - Element::new_item(vec![1]), - None, - None, - ) - .unwrap() - .expect("successful item insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"key3", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"key4", - Element::new_reference(ReferencePathType::SiblingReference(b"key2".to_vec())), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"k1", - Element::new_item(vec![2]), - None, - None, - ) - .unwrap() - .expect("successful item insert"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"k2", - Element::new_item(vec![3]), - None, - None, - ) - .unwrap() - .expect("successful item insert"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"k3", - Element::new_item(vec![4]), - None, - None, - ) - .unwrap() - .expect("successful item insert"); - - let mut query = Query::new(); - query.insert_all(); - let mut subquery = Query::new(); - subquery.insert_all(); - query.set_subquery(subquery); - - let path = vec![TEST_LEAF.to_vec()]; - - let path_query = PathQuery::new_unsized(path.clone(), query.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("successful get_path_query"); + let proof = db + .prove_query(&path_query, None) + .unwrap() + .expect("expected successful proving"); + let (hash, _result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + } - assert_eq!(elements.len(), 5); - assert_eq!(elements, vec![vec![2], vec![3], vec![4], vec![1], vec![1]]); + #[test] + fn test_mixed_level_proofs() { + let db = make_test_grovedb(); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); - compare_result_sets(&elements, &result_set); + // TEST_LEAF + // / | | \ + // key1 key2 : [1] key3 key4 : (Ref -> Key2) + // / | \ + // k1 k2 k3 + // / / / + // 2 3 4 - // Test mixed element proofs with limit and offset - let path_query = PathQuery::new_unsized(path.clone(), query.clone()); - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + None, + ) .unwrap() - .expect("successful get_path_query"); - - assert_eq!(elements.len(), 5); - assert_eq!(elements, vec![vec![2], vec![3], vec![4], vec![1], vec![1]]); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); - compare_result_sets(&elements, &result_set); - - // TODO: Fix noticed bug when limit and offset are both set to Some(0) + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key2", + Element::new_item(vec![1]), + None, + None, + ) + .unwrap() + .expect("successful item insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key3", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key4", + Element::new_reference(ReferencePathType::SiblingReference(b"key2".to_vec())), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); - let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), Some(1), None)); - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"k1", + Element::new_item(vec![2]), + None, + None, + ) .unwrap() - .expect("successful get_path_query"); - - assert_eq!(elements.len(), 1); - assert_eq!(elements, vec![vec![2]]); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 1); - compare_result_sets(&elements, &result_set); - - let path_query = PathQuery::new( - path.clone(), - SizedQuery::new(query.clone(), Some(3), Some(0)), - ); - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .expect("successful item insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"k2", + Element::new_item(vec![3]), + None, + None, + ) .unwrap() - .expect("successful get_path_query"); - - assert_eq!(elements.len(), 3); - assert_eq!(elements, vec![vec![2], vec![3], vec![4]]); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - compare_result_sets(&elements, &result_set); - - let path_query = PathQuery::new( - path.clone(), - SizedQuery::new(query.clone(), Some(4), Some(0)), - ); - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .expect("successful item insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"k3", + Element::new_item(vec![4]), + None, + None, + ) .unwrap() - .expect("successful get_path_query"); + .expect("successful item insert"); - assert_eq!(elements.len(), 4); - assert_eq!(elements, vec![vec![2], vec![3], vec![4], vec![1]]); + let mut query = Query::new(); + query.insert_all(); + let mut subquery = Query::new(); + subquery.insert_all(); + query.set_subquery(subquery); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 4); - compare_result_sets(&elements, &result_set); + let path = vec![TEST_LEAF.to_vec()]; - let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(10), Some(4))); - let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) - .unwrap() - .expect("successful get_path_query"); + let path_query = PathQuery::new_unsized(path.clone(), query.clone()); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("successful get_path_query"); + + assert_eq!(elements.len(), 5); + assert_eq!(elements, vec![vec![2], vec![3], vec![4], vec![1], vec![1]]); + + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + // println!( + // "{}", + // result_set + // .iter() + // .map(|a| a.to_string()) + // .collect::>() + // .join(" | ") + // ); + assert_eq!(result_set.len(), 5); + compare_result_sets(&elements, &result_set); + + // Test mixed element proofs with limit and offset + let path_query = PathQuery::new_unsized(path.clone(), query.clone()); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("successful get_path_query"); - assert_eq!(elements.len(), 1); - assert_eq!(elements, vec![vec![1]]); + assert_eq!(elements.len(), 5); + assert_eq!(elements, vec![vec![2], vec![3], vec![4], vec![1], vec![1]]); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 1); - compare_result_sets(&elements, &result_set); -} + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 5); + compare_result_sets(&elements, &result_set); -#[test] -fn test_mixed_level_proofs_with_tree() { - let db = make_test_grovedb(); - db.insert( - [TEST_LEAF].as_ref(), - b"key1", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"key2", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"key3", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"k1", - Element::new_item(vec![2]), - None, - None, - ) - .unwrap() - .expect("successful item insert"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"k2", - Element::new_item(vec![3]), - None, - None, - ) - .unwrap() - .expect("successful item insert"); - db.insert( - [TEST_LEAF, b"key1"].as_ref(), - b"k3", - Element::new_item(vec![4]), - None, - None, - ) - .unwrap() - .expect("successful item insert"); - db.insert( - [TEST_LEAF, b"key2"].as_ref(), - b"k1", - Element::new_item(vec![5]), - None, - None, - ) - .unwrap() - .expect("successful item insert"); - - let mut query = Query::new(); - query.insert_all(); - let mut subquery = Query::new(); - subquery.insert_all(); - query.add_conditional_subquery(QueryItem::Key(b"key1".to_vec()), None, Some(subquery)); - - let path = vec![TEST_LEAF.to_vec()]; - - let path_query = PathQuery::new_unsized(path.clone(), query.clone()); - - let (elements, _) = db - .query_raw( - &path_query, - true, - true, - true, - QueryResultType::QueryPathKeyElementTrioResultType, - None, - ) - .unwrap() - .expect("expected successful get_path_query"); + // TODO: Fix noticed bug when limit and offset are both set to Some(0) - assert_eq!(elements.len(), 5); + let path_query = + PathQuery::new(path.clone(), SizedQuery::new(query.clone(), Some(1), None)); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("successful get_path_query"); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); + assert_eq!(elements.len(), 1); + assert_eq!(elements, vec![vec![2]]); - // TODO: verify that the result set is exactly the same - // compare_result_sets(&elements, &result_set); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 1); + compare_result_sets(&elements, &result_set); - let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(1), None)); + let path_query = PathQuery::new( + path.clone(), + SizedQuery::new(query.clone(), Some(3), Some(0)), + ); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("successful get_path_query"); - let (elements, _) = db - .query_raw( - &path_query, - true, - true, - true, - QueryResultType::QueryPathKeyElementTrioResultType, - None, - ) - .unwrap() - .expect("expected successful get_path_query"); + assert_eq!(elements.len(), 3); + assert_eq!(elements, vec![vec![2], vec![3], vec![4]]); - assert_eq!(elements.len(), 1); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 3); + compare_result_sets(&elements, &result_set); - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 1); - // TODO: verify that the result set is exactly the same - // compare_result_sets(&elements, &result_set); -} + let path_query = PathQuery::new( + path.clone(), + SizedQuery::new(query.clone(), Some(4), Some(0)), + ); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("successful get_path_query"); -#[test] -fn test_mixed_level_proofs_with_subquery_paths() { - let db = make_test_grovedb(); - db.insert( - [TEST_LEAF].as_ref(), - b"a", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"b", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF].as_ref(), - b"c", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - db.insert( - [TEST_LEAF, b"a"].as_ref(), - b"d", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, b"a"].as_ref(), - b"e", - Element::new_item(vec![2]), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, b"a"].as_ref(), - b"f", - Element::new_item(vec![3]), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - db.insert( - [TEST_LEAF, b"a", b"d"].as_ref(), - b"d", - Element::new_item(vec![6]), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - db.insert( - [TEST_LEAF, b"b"].as_ref(), - b"g", - Element::new_item(vec![4]), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, b"b"].as_ref(), - b"d", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - db.insert( - [TEST_LEAF, b"b", b"d"].as_ref(), - b"i", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, b"b", b"d"].as_ref(), - b"j", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - db.insert( - [TEST_LEAF, b"b", b"d"].as_ref(), - b"k", - Element::empty_tree(), - None, - None, - ) - .unwrap() - .expect("successful subtree insert"); - - // if you don't have an item at the subquery path translation, you shouldn't be - // added to the result set. - let mut query = Query::new(); - query.insert_all(); - query.set_subquery_path(vec![b"d".to_vec()]); - - let path = vec![TEST_LEAF.to_vec()]; - - let path_query = PathQuery::new_unsized(path, query.clone()); - - // TODO: proofs seems to be more expressive than query_raw now - // let (elements, _) = db - // .query_raw( - // &path_query, - // true, - // QueryResultType::QueryPathKeyElementTrioResultType, - // None, - // ) - // .unwrap() - // .expect("expected successful get_path_query"); - // - // assert_eq!(elements.len(), 2); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 2); - - // apply path translation then query - let mut query = Query::new(); - query.insert_all(); - let mut subquery = Query::new(); - subquery.insert_all(); - query.set_subquery_path(vec![b"d".to_vec()]); - query.set_subquery(subquery); - - let path = vec![TEST_LEAF.to_vec()]; - - let path_query = PathQuery::new_unsized(path, query.clone()); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 4); - - // apply empty path translation - let mut query = Query::new(); - query.insert_all(); - let mut subquery = Query::new(); - subquery.insert_all(); - query.set_subquery_path(vec![]); - query.set_subquery(subquery); - - let path = vec![TEST_LEAF.to_vec()]; - - let path_query = PathQuery::new_unsized(path, query.clone()); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); - - // use conditionals to return from more than 2 depth - let mut query = Query::new(); - query.insert_all(); - let mut subquery = Query::new(); - subquery.insert_all(); - let mut deeper_subquery = Query::new(); - deeper_subquery.insert_all(); - subquery.add_conditional_subquery(QueryItem::Key(b"d".to_vec()), None, Some(deeper_subquery)); - query.add_conditional_subquery(QueryItem::Key(b"a".to_vec()), None, Some(subquery.clone())); - query.add_conditional_subquery(QueryItem::Key(b"b".to_vec()), None, Some(subquery.clone())); - - let path = vec![TEST_LEAF.to_vec()]; - - let path_query = PathQuery::new_unsized(path, query.clone()); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 8); -} + assert_eq!(elements.len(), 4); + assert_eq!(elements, vec![vec![2], vec![3], vec![4], vec![1]]); -#[test] -fn test_proof_with_limit_zero() { - let db = make_deep_tree(); - let mut query = Query::new(); - query.insert_all(); - let path_query = PathQuery::new( - vec![TEST_LEAF.to_vec()], - SizedQuery::new(query, Some(0), Some(0)), - ); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 0); -} + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 4); + compare_result_sets(&elements, &result_set); -#[test] -fn test_result_set_path_after_verification() { - let db = make_deep_tree(); - let mut query = Query::new(); - query.insert_all(); - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - - // assert the result set path - assert_eq!( - result_set[0].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[1].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[2].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - - assert_eq!(result_set[0].key, b"key1".to_vec()); - assert_eq!(result_set[1].key, b"key2".to_vec()); - assert_eq!(result_set[2].key, b"key3".to_vec()); - - // Test path tracking with subquery - let mut query = Query::new(); - query.insert_all(); - let mut subq = Query::new(); - subq.insert_all(); - query.set_subquery(subq); - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); - - assert_eq!( - result_set[0].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[1].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[2].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[3].path, - vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()] - ); - assert_eq!( - result_set[4].path, - vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()] - ); - - // Test path tracking with subquery path - // perform a query, do a translation, perform another query - let mut query = Query::new(); - query.insert_key(b"deep_leaf".to_vec()); - query.set_subquery_path(vec![b"deep_node_1".to_vec(), b"deeper_1".to_vec()]); - let mut subq = Query::new(); - subq.insert_all(); - query.set_subquery(subq); - let path_query = PathQuery::new_unsized(vec![], query); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - - assert_eq!( - result_set[0].path, - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"deeper_1".to_vec() - ] - ); - assert_eq!( - result_set[1].path, - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"deeper_1".to_vec() - ] - ); - assert_eq!( - result_set[2].path, - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"deeper_1".to_vec() - ] - ); - - assert_eq!(result_set[0].key, b"key1".to_vec()); - assert_eq!(result_set[1].key, b"key2".to_vec()); - assert_eq!(result_set[2].key, b"key3".to_vec()); - - // Test path tracking for mixed level result set - let mut query = Query::new(); - query.insert_all(); - let mut subq = Query::new(); - subq.insert_all(); - query.add_conditional_subquery(QueryItem::Key(b"innertree".to_vec()), None, Some(subq)); - - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 4); - - assert_eq!( - result_set[0].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[1].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[2].path, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!(result_set[3].path, vec![TEST_LEAF.to_vec()]); - - assert_eq!(result_set[0].key, b"key1".to_vec()); - assert_eq!(result_set[1].key, b"key2".to_vec()); - assert_eq!(result_set[2].key, b"key3".to_vec()); - assert_eq!(result_set[3].key, b"innertree4".to_vec()); -} + let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(10), Some(4))); + let (elements, _) = db + .query_item_value(&path_query, true, true, true, None) + .unwrap() + .expect("successful get_path_query"); -#[test] -fn test_verification_with_path_key_optional_element_trio() { - let db = make_deep_tree(); - let mut query = Query::new(); - query.insert_all(); - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 3); - - assert_eq!( - result_set[0], - ( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - b"key1".to_vec(), - Some(Element::new_item(b"value1".to_vec())) + assert_eq!(elements.len(), 1); + assert_eq!(elements, vec![vec![1]]); + } + + #[test] + fn test_mixed_level_proofs_with_tree() { + let db = make_test_grovedb(); + db.insert( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + None, ) - ); - assert_eq!( - result_set[1], - ( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - b"key2".to_vec(), - Some(Element::new_item(b"value2".to_vec())) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key2", + Element::empty_tree(), + None, + None, ) - ); - assert_eq!( - result_set[2], - ( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - b"key3".to_vec(), - Some(Element::new_item(b"value3".to_vec())) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key3", + Element::empty_tree(), + None, + None, ) - ); -} - -#[test] -fn test_absence_proof() { - let db = make_deep_tree(); - - // simple case, request for items k2..=k5 under inner tree - // we pass them as keys as terminal keys does not handle ranges with start or - // end len greater than 1 k2, k3 should be Some, k4, k5 should be None, k1, - // k6.. should not be in map - let mut query = Query::new(); - query.insert_key(b"key2".to_vec()); - query.insert_key(b"key3".to_vec()); - query.insert_key(b"key4".to_vec()); - query.insert_key(b"key5".to_vec()); - let path_query = PathQuery::new( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - SizedQuery::new(query, Some(4), None), - ); - - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_with_absence_proof(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 4); - - assert_eq!( - result_set[0].0, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[1].0, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[2].0, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - assert_eq!( - result_set[3].0, - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] - ); - - assert_eq!(result_set[0].1, b"key2".to_vec()); - assert_eq!(result_set[1].1, b"key3".to_vec()); - assert_eq!(result_set[2].1, b"key4".to_vec()); - assert_eq!(result_set[3].1, b"key5".to_vec()); - - assert_eq!(result_set[0].2, Some(Element::new_item(b"value2".to_vec()))); - assert_eq!(result_set[1].2, Some(Element::new_item(b"value3".to_vec()))); - assert_eq!(result_set[2].2, None); - assert_eq!(result_set[3].2, None); -} + .unwrap() + .expect("successful subtree insert"); -#[test] -fn test_subset_proof_verification() { - let db = make_deep_tree(); - - // original path query - let mut query = Query::new(); - query.insert_all(); - let mut subq = Query::new(); - subq.insert_all(); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - - // first we prove non-verbose - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 5); - assert_eq!( - result_set[0], - ( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - b"key1".to_vec(), - Some(Element::new_item(b"value1".to_vec())) - ) - ); - assert_eq!( - result_set[1], - ( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - b"key2".to_vec(), - Some(Element::new_item(b"value2".to_vec())) - ) - ); - assert_eq!( - result_set[2], - ( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - b"key3".to_vec(), - Some(Element::new_item(b"value3".to_vec())) + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"k1", + Element::new_item(vec![2]), + None, + None, ) - ); - assert_eq!( - result_set[3], - ( - vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()], - b"key4".to_vec(), - Some(Element::new_item(b"value4".to_vec())) + .unwrap() + .expect("successful item insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"k2", + Element::new_item(vec![3]), + None, + None, ) - ); - assert_eq!( - result_set[4], - ( - vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()], - b"key5".to_vec(), - Some(Element::new_item(b"value5".to_vec())) + .unwrap() + .expect("successful item insert"); + db.insert( + [TEST_LEAF, b"key1"].as_ref(), + b"k3", + Element::new_item(vec![4]), + None, + None, ) - ); - - // prove verbose - let verbose_proof = db.prove_verbose(&path_query).unwrap().unwrap(); - assert!(verbose_proof.len() > proof.len()); - - // subset path query - let mut query = Query::new(); - query.insert_key(b"innertree".to_vec()); - let mut subq = Query::new(); - subq.insert_key(b"key1".to_vec()); - query.set_subquery(subq); - let subset_path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - - let (hash, result_set) = - GroveDb::verify_subset_query(&verbose_proof, &subset_path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 1); - assert_eq!( - result_set[0], - ( - vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], - b"key1".to_vec(), - Some(Element::new_item(b"value1".to_vec())) + .unwrap() + .expect("successful item insert"); + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"k1", + Element::new_item(vec![5]), + None, + None, ) - ); -} + .unwrap() + .expect("successful item insert"); -#[test] -fn test_chained_path_query_verification() { - let db = make_deep_tree(); - - let mut query = Query::new(); - query.insert_all(); - let mut subq = Query::new(); - subq.insert_all(); - let mut subsubq = Query::new(); - subsubq.insert_all(); - - subq.set_subquery(subsubq); - query.set_subquery(subq); - - let path_query = PathQuery::new_unsized(vec![b"deep_leaf".to_vec()], query); - - // first prove non verbose - let proof = db.prove_query(&path_query).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 11); - - // prove verbose - let verbose_proof = db.prove_verbose(&path_query).unwrap().unwrap(); - assert!(verbose_proof.len() > proof.len()); - - // init deeper_1 path query - let mut query = Query::new(); - query.insert_all(); - - let deeper_1_path_query = PathQuery::new_unsized( - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"deeper_1".to_vec(), - ], - query, - ); - - // define the path query generators - let mut chained_path_queries = vec![]; - chained_path_queries.push(|_elements: Vec| { let mut query = Query::new(); query.insert_all(); + let mut subquery = Query::new(); + subquery.insert_all(); + query.add_conditional_subquery(QueryItem::Key(b"key1".to_vec()), None, Some(subquery)); - let deeper_2_path_query = PathQuery::new_unsized( - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"deeper_2".to_vec(), - ], - query, - ); - Some(deeper_2_path_query) - }); - - // verify the path query chain - let (root_hash, results) = GroveDb::verify_query_with_chained_path_queries( - &verbose_proof, - &deeper_1_path_query, - chained_path_queries, - ) - .unwrap(); - assert_eq!(root_hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(results.len(), 2); - assert_eq!(results[0].len(), 3); - assert_eq!( - results[0][0], - ( - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"deeper_1".to_vec() - ], - b"key1".to_vec(), - Some(Element::new_item(b"value1".to_vec())) - ) - ); - assert_eq!( - results[0][1], - ( - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"deeper_1".to_vec() - ], - b"key2".to_vec(), - Some(Element::new_item(b"value2".to_vec())) - ) - ); - assert_eq!( - results[0][2], - ( - vec![ - b"deep_leaf".to_vec(), - b"deep_node_1".to_vec(), - b"deeper_1".to_vec() - ], - b"key3".to_vec(), - Some(Element::new_item(b"value3".to_vec())) + let path = vec![TEST_LEAF.to_vec()]; + + let path_query = PathQuery::new_unsized(path.clone(), query.clone()); + + let (elements, _) = db + .query_raw( + &path_query, + true, + true, + true, + QueryResultType::QueryPathKeyElementTrioResultType, + None, + ) + .unwrap() + .expect("expected successful get_path_query"); + + assert_eq!(elements.len(), 5); + + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + + // println!( + // "{}", + // result_set + // .iter() + // .map(|a| a.to_string()) + // .collect::>() + // .join(", ") + // ); + assert_eq!(result_set.len(), 5); + + // TODO: verify that the result set is exactly the same + // compare_result_sets(&elements, &result_set); + + let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(1), None)); + + let (elements, _) = db + .query_raw( + &path_query, + true, + true, + true, + QueryResultType::QueryPathKeyElementTrioResultType, + None, + ) + .unwrap() + .expect("expected successful get_path_query"); + + assert_eq!(elements.len(), 1); + + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 1); + // TODO: verify that the result set is exactly the same + // compare_result_sets(&elements, &result_set); + } + + #[test] + fn test_mixed_level_proofs_with_subquery_paths() { + let db = make_test_grovedb(); + + // TEST_LEAF + // / | \ + // a b c + // / | \ / \ + // d e:2 f:3 g:4 d + // / / | \ + // d:6 i j k + // + + db.insert( + [TEST_LEAF].as_ref(), + b"a", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"b", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"c", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + + db.insert( + [TEST_LEAF, b"a"].as_ref(), + b"d", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF, b"a"].as_ref(), + b"e", + Element::new_item(vec![2]), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF, b"a"].as_ref(), + b"f", + Element::new_item(vec![3]), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + + db.insert( + [TEST_LEAF, b"a", b"d"].as_ref(), + b"d", + Element::new_item(vec![6]), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + + db.insert( + [TEST_LEAF, b"b"].as_ref(), + b"g", + Element::new_item(vec![4]), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF, b"b"].as_ref(), + b"d", + Element::empty_tree(), + None, + None, ) - ); + .unwrap() + .expect("successful subtree insert"); + + db.insert( + [TEST_LEAF, b"b", b"d"].as_ref(), + b"i", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF, b"b", b"d"].as_ref(), + b"j", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + db.insert( + [TEST_LEAF, b"b", b"d"].as_ref(), + b"k", + Element::empty_tree(), + None, + None, + ) + .unwrap() + .expect("successful subtree insert"); + // // if you don't have an item at the subquery path translation, you shouldn't + // be // added to the result set. + let mut query = Query::new(); + query.insert_all(); + query.set_subquery_path(vec![b"d".to_vec()]); + + let path = vec![TEST_LEAF.to_vec()]; + + let path_query = PathQuery::new_unsized(path, query.clone()); + + let (elements, _) = db + .query_raw( + &path_query, + false, + true, + false, + QueryResultType::QueryPathKeyElementTrioResultType, + None, + ) + .unwrap() + .expect("expected successful get_path_query"); + + assert_eq!( + elements, + QueryResultElements::from_elements(vec![ + PathKeyElementTrioResultItem(( + vec![b"test_leaf".to_vec(), b"a".to_vec()], + b"d".to_vec(), + Element::Tree(Some(b"d".to_vec()), None) + )), + PathKeyElementTrioResultItem(( + vec![b"test_leaf".to_vec(), b"b".to_vec()], + b"d".to_vec(), + Element::Tree(Some(b"j".to_vec()), None) + )) + ]) + ); + + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + // println!( + // "{}", + // result_set + // .iter() + // .map(|a| a.to_string()) + // .collect::>() + // .join("| ") + // ); + assert_eq!(result_set.len(), 2); + + // apply path translation then query + let mut query = Query::new(); + query.insert_all(); + let mut subquery = Query::new(); + subquery.insert_all(); + query.set_subquery_path(vec![b"d".to_vec()]); + query.set_subquery(subquery); + + let path = vec![TEST_LEAF.to_vec()]; + + let path_query = PathQuery::new_unsized(path, query.clone()); + + let (elements, _) = db + .query_raw( + &path_query, + false, + true, + false, + QueryResultType::QueryPathKeyElementTrioResultType, + None, + ) + .unwrap() + .expect("expected successful get_path_query"); + + assert_eq!( + elements, + QueryResultElements::from_elements(vec![ + PathKeyElementTrioResultItem(( + vec![b"test_leaf".to_vec(), b"a".to_vec(), b"d".to_vec()], + b"d".to_vec(), + Element::Item(vec![6], None) + )), + PathKeyElementTrioResultItem(( + vec![b"test_leaf".to_vec(), b"b".to_vec(), b"d".to_vec()], + b"i".to_vec(), + Element::Tree(None, None) + )), + PathKeyElementTrioResultItem(( + vec![b"test_leaf".to_vec(), b"b".to_vec(), b"d".to_vec()], + b"j".to_vec(), + Element::Tree(None, None) + )), + PathKeyElementTrioResultItem(( + vec![b"test_leaf".to_vec(), b"b".to_vec(), b"d".to_vec()], + b"k".to_vec(), + Element::Tree(None, None) + )) + ]) + ); - assert_eq!(results[1].len(), 3); - assert_eq!( - results[1][0], - ( + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 4); + + // apply empty path translation + let mut query = Query::new(); + query.insert_all(); + let mut subquery = Query::new(); + subquery.insert_all(); + query.set_subquery_path(vec![]); + query.set_subquery(subquery); + + let path = vec![TEST_LEAF.to_vec()]; + + let path_query = PathQuery::new_unsized(path, query.clone()); + + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 5); + + // use conditionals to return from more than 2 depth + let mut query = Query::new(); + query.insert_all(); + let mut subquery = Query::new(); + subquery.insert_all(); + let mut deeper_subquery = Query::new(); + deeper_subquery.insert_all(); + subquery.add_conditional_subquery( + QueryItem::Key(b"d".to_vec()), + None, + Some(deeper_subquery), + ); + query.add_conditional_subquery(QueryItem::Key(b"a".to_vec()), None, Some(subquery.clone())); + query.add_conditional_subquery(QueryItem::Key(b"b".to_vec()), None, Some(subquery.clone())); + + let path = vec![TEST_LEAF.to_vec()]; + + let path_query = PathQuery::new_unsized(path, query.clone()); + + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 8); + } + + #[test] + fn test_proof_with_limit_zero() { + let db = make_deep_tree(); + let mut query = Query::new(); + query.insert_all(); + let path_query = PathQuery::new( + vec![TEST_LEAF.to_vec()], + SizedQuery::new(query, Some(0), Some(0)), + ); + + db.prove_query(&path_query, None) + .unwrap() + .expect_err("expected error when trying to prove with limit 0"); + } + + #[test] + fn test_result_set_path_after_verification() { + let db = make_deep_tree(); + let mut query = Query::new(); + query.insert_all(); + let path_query = + PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query); + + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 3); + + // assert the result set path + assert_eq!( + result_set[0].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[1].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[2].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + + assert_eq!(result_set[0].key, b"key1".to_vec()); + assert_eq!(result_set[1].key, b"key2".to_vec()); + assert_eq!(result_set[2].key, b"key3".to_vec()); + + // Test path tracking with subquery + let mut query = Query::new(); + query.insert_all(); + let mut subq = Query::new(); + subq.insert_all(); + query.set_subquery(subq); + let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); + + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 5); + + assert_eq!( + result_set[0].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[1].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[2].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[3].path, + vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()] + ); + assert_eq!( + result_set[4].path, + vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()] + ); + + // Test path tracking with subquery path + // perform a query, do a translation, perform another query + let mut query = Query::new(); + query.insert_key(b"deep_leaf".to_vec()); + query.set_subquery_path(vec![b"deep_node_1".to_vec(), b"deeper_1".to_vec()]); + let mut subq = Query::new(); + subq.insert_all(); + query.set_subquery(subq); + let path_query = PathQuery::new_unsized(vec![], query); + + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 3); + + assert_eq!( + result_set[0].path, vec![ b"deep_leaf".to_vec(), b"deep_node_1".to_vec(), - b"deeper_2".to_vec() - ], - b"key4".to_vec(), - Some(Element::new_item(b"value4".to_vec())) - ) - ); - assert_eq!( - results[1][1], - ( + b"deeper_1".to_vec() + ] + ); + assert_eq!( + result_set[1].path, vec![ b"deep_leaf".to_vec(), b"deep_node_1".to_vec(), - b"deeper_2".to_vec() - ], - b"key5".to_vec(), - Some(Element::new_item(b"value5".to_vec())) - ) - ); - assert_eq!( - results[1][2], - ( + b"deeper_1".to_vec() + ] + ); + assert_eq!( + result_set[2].path, vec![ b"deep_leaf".to_vec(), b"deep_node_1".to_vec(), - b"deeper_2".to_vec() + b"deeper_1".to_vec() + ] + ); + + assert_eq!(result_set[0].key, b"key1".to_vec()); + assert_eq!(result_set[1].key, b"key2".to_vec()); + assert_eq!(result_set[2].key, b"key3".to_vec()); + + // Test path tracking for mixed level result set + let mut query = Query::new(); + query.insert_all(); + let mut subq = Query::new(); + subq.insert_all(); + query.add_conditional_subquery(QueryItem::Key(b"innertree".to_vec()), None, Some(subq)); + + let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); + + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 4); + + assert_eq!( + result_set[0].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[1].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[2].path, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!(result_set[3].path, vec![TEST_LEAF.to_vec()]); + + assert_eq!(result_set[0].key, b"key1".to_vec()); + assert_eq!(result_set[1].key, b"key2".to_vec()); + assert_eq!(result_set[2].key, b"key3".to_vec()); + assert_eq!(result_set[3].key, b"innertree4".to_vec()); + } + + #[test] + fn test_verification_with_path_key_optional_element_trio() { + let db = make_deep_tree(); + let mut query = Query::new(); + query.insert_all(); + let path_query = + PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query); + + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 3); + + assert_eq!( + result_set[0], + ( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + b"key1".to_vec(), + Some(Element::new_item(b"value1".to_vec())) + ) + ); + assert_eq!( + result_set[1], + ( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + b"key2".to_vec(), + Some(Element::new_item(b"value2".to_vec())) + ) + ); + assert_eq!( + result_set[2], + ( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + b"key3".to_vec(), + Some(Element::new_item(b"value3".to_vec())) + ) + ); + } + + #[test] + fn test_absence_proof() { + let db = make_deep_tree(); + + // simple case, request for items k2..=k5 under inner tree + // we pass them as keys as terminal keys does not handle ranges with start or + // end len greater than 1 k2, k3 should be Some, k4, k5 should be None, k1, + // k6.. should not be in map + let mut query = Query::new(); + query.insert_key(b"key2".to_vec()); + query.insert_key(b"key3".to_vec()); + query.insert_key(b"key4".to_vec()); + query.insert_key(b"key5".to_vec()); + let path_query = PathQuery::new( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + SizedQuery::new(query, Some(4), None), + ); + + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = + GroveDb::verify_query_with_absence_proof(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 4); + + assert_eq!( + result_set[0].0, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[1].0, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[2].0, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + assert_eq!( + result_set[3].0, + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()] + ); + + assert_eq!(result_set[0].1, b"key2".to_vec()); + assert_eq!(result_set[1].1, b"key3".to_vec()); + assert_eq!(result_set[2].1, b"key4".to_vec()); + assert_eq!(result_set[3].1, b"key5".to_vec()); + + assert_eq!(result_set[0].2, Some(Element::new_item(b"value2".to_vec()))); + assert_eq!(result_set[1].2, Some(Element::new_item(b"value3".to_vec()))); + assert_eq!(result_set[2].2, None); + assert_eq!(result_set[3].2, None); + } + + #[test] + fn test_subset_proof_verification() { + let db = make_deep_tree(); + + // original path query + let mut query = Query::new(); + query.insert_all(); + let mut subq = Query::new(); + subq.insert_all(); + query.set_subquery(subq); + + let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); + + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 5); + assert_eq!( + result_set[0], + ( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + b"key1".to_vec(), + Some(Element::new_item(b"value1".to_vec())) + ) + ); + assert_eq!( + result_set[1], + ( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + b"key2".to_vec(), + Some(Element::new_item(b"value2".to_vec())) + ) + ); + assert_eq!( + result_set[2], + ( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + b"key3".to_vec(), + Some(Element::new_item(b"value3".to_vec())) + ) + ); + assert_eq!( + result_set[3], + ( + vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()], + b"key4".to_vec(), + Some(Element::new_item(b"value4".to_vec())) + ) + ); + assert_eq!( + result_set[4], + ( + vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()], + b"key5".to_vec(), + Some(Element::new_item(b"value5".to_vec())) + ) + ); + + // subset path query + let mut query = Query::new(); + query.insert_key(b"innertree".to_vec()); + let mut subq = Query::new(); + subq.insert_key(b"key1".to_vec()); + query.set_subquery(subq); + let subset_path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); + + let (hash, result_set) = GroveDb::verify_subset_query(&proof, &subset_path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 1); + assert_eq!( + result_set[0], + ( + vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], + b"key1".to_vec(), + Some(Element::new_item(b"value1".to_vec())) + ) + ); + } + #[test] + fn test_chained_path_query_verification() { + let db = make_deep_tree(); + + let mut query = Query::new(); + query.insert_all(); + let mut subq = Query::new(); + subq.insert_all(); + let mut subsubq = Query::new(); + subsubq.insert_all(); + + subq.set_subquery(subsubq); + query.set_subquery(subq); + + let path_query = PathQuery::new_unsized(vec![b"deep_leaf".to_vec()], query); + + // first prove non verbose + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query(&proof, &path_query).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 14); + + // init deeper_1 path query + let mut query = Query::new(); + query.insert_all(); + + let deeper_1_path_query = PathQuery::new_unsized( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"deeper_1".to_vec(), ], - b"key6".to_vec(), - Some(Element::new_item(b"value6".to_vec())) + query, + ); + + // define the path query generators + let mut chained_path_queries = vec![]; + chained_path_queries.push(|_elements: Vec| { + let mut query = Query::new(); + query.insert_all(); + + let deeper_2_path_query = PathQuery::new_unsized( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"deeper_2".to_vec(), + ], + query, + ); + Some(deeper_2_path_query) + }); + + // verify the path query chain + let (root_hash, results) = GroveDb::verify_query_with_chained_path_queries( + &proof, + &deeper_1_path_query, + chained_path_queries, ) - ); -} + .unwrap(); + assert_eq!(root_hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(results.len(), 2); + assert_eq!(results[0].len(), 3); + assert_eq!( + results[0][0], + ( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"deeper_1".to_vec() + ], + b"key1".to_vec(), + Some(Element::new_item(b"value1".to_vec())) + ) + ); + assert_eq!( + results[0][1], + ( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"deeper_1".to_vec() + ], + b"key2".to_vec(), + Some(Element::new_item(b"value2".to_vec())) + ) + ); + assert_eq!( + results[0][2], + ( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"deeper_1".to_vec() + ], + b"key3".to_vec(), + Some(Element::new_item(b"value3".to_vec())) + ) + ); -#[test] -fn test_query_b_depends_on_query_a() { - // we have two trees - // one with a mapping of id to name - // another with a mapping of name to age - // we want to get the age of every one after a certain id ordered by name - let db = make_test_grovedb(); - - // TEST_LEAF contains the id to name mapping - db.insert( - [TEST_LEAF].as_ref(), - &[1], - Element::new_item(b"d".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful root tree leaf insert"); - db.insert( - [TEST_LEAF].as_ref(), - &[2], - Element::new_item(b"b".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful root tree leaf insert"); - db.insert( - [TEST_LEAF].as_ref(), - &[3], - Element::new_item(b"c".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful root tree leaf insert"); - db.insert( - [TEST_LEAF].as_ref(), - &[4], - Element::new_item(b"a".to_vec()), - None, - None, - ) - .unwrap() - .expect("successful root tree leaf insert"); - - // ANOTHER_TEST_LEAF contains the name to age mapping - db.insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"a", - Element::new_item(vec![10]), - None, - None, - ) - .unwrap() - .expect("successful root tree leaf insert"); - db.insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"b", - Element::new_item(vec![30]), - None, - None, - ) - .unwrap() - .expect("successful root tree leaf insert"); - db.insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"c", - Element::new_item(vec![12]), - None, - None, - ) - .unwrap() - .expect("successful root tree leaf insert"); - db.insert( - [ANOTHER_TEST_LEAF].as_ref(), - b"d", - Element::new_item(vec![46]), - None, - None, - ) - .unwrap() - .expect("successful root tree leaf insert"); - - // Query: return the age of everyone greater than id 2 ordered by name - // id 2 - b - // so we want to return the age for c and d = 12, 46 respectively - // the proof generator knows that id 2 = b, but the verifier doesn't - // hence we need to generate two proofs - // prove that 2 - b then prove age after b - // the verifier has to use the result of the first proof 2 - b - // to generate the path query for the verification of the second proof - - // query name associated with id 2 - let mut query = Query::new(); - query.insert_key(vec![2]); - let mut path_query_one = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - - // first we show that this returns the correct output - let proof = db.prove_query(&path_query_one).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query(&proof, &path_query_one).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 1); - assert_eq!(result_set[0].2, Some(Element::new_item(b"b".to_vec()))); - - // next query should return the age for elements above b - let mut query = Query::new(); - query.insert_range_after(b"b".to_vec()..); - let path_query_two = PathQuery::new_unsized(vec![ANOTHER_TEST_LEAF.to_vec()], query); - - // show that we get the correct output - let proof = db.prove_query(&path_query_two).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query(&proof, &path_query_two).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); - assert_eq!(result_set.len(), 2); - assert_eq!(result_set[0].2, Some(Element::new_item(vec![12]))); - assert_eq!(result_set[1].2, Some(Element::new_item(vec![46]))); - - // now we merge the path queries - let mut merged_path_queries = PathQuery::merge(vec![&path_query_one, &path_query_two]).unwrap(); - merged_path_queries.query.limit = Some(3); - let proof = db.prove_verbose(&merged_path_queries).unwrap().unwrap(); - - // verifier only has access to the statement age > 2 - // need to first get the name associated with 2 from the proof - // then use that to construct the next path query - let mut chained_path_queries = vec![]; - chained_path_queries.push(|prev_elements: Vec| { - let mut query = Query::new(); - let name_element = prev_elements[0].2.as_ref().unwrap(); - if let Element::Item(name, ..) = name_element { - query.insert_range_after(name.to_owned()..); - Some(PathQuery::new( - vec![ANOTHER_TEST_LEAF.to_vec()], - SizedQuery::new(query, Some(2), None), - )) - } else { - None - } - }); - - // add limit to path query one - path_query_one.query.limit = Some(1); - - let (_, result_set) = GroveDb::verify_query_with_chained_path_queries( - proof.as_slice(), - &path_query_one, - chained_path_queries, - ) - .unwrap(); - assert_eq!(result_set.len(), 2); - assert_eq!(result_set[0].len(), 1); - assert_eq!(result_set[1].len(), 2); - - let age_result = result_set[1].clone(); - assert_eq!(age_result[0].2, Some(Element::new_item(vec![12]))); - assert_eq!(age_result[1].2, Some(Element::new_item(vec![46]))); -} + assert_eq!(results[1].len(), 3); + assert_eq!( + results[1][0], + ( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"deeper_2".to_vec() + ], + b"key4".to_vec(), + Some(Element::new_item(b"value4".to_vec())) + ) + ); + assert_eq!( + results[1][1], + ( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"deeper_2".to_vec() + ], + b"key5".to_vec(), + Some(Element::new_item(b"value5".to_vec())) + ) + ); + assert_eq!( + results[1][2], + ( + vec![ + b"deep_leaf".to_vec(), + b"deep_node_1".to_vec(), + b"deeper_2".to_vec() + ], + b"key6".to_vec(), + Some(Element::new_item(b"value6".to_vec())) + ) + ); + } -#[test] -fn test_prove_absent_path_with_intermediate_emtpy_tree() { - // root - // test_leaf (empty) - let grovedb = make_test_grovedb(); + #[test] + fn test_query_b_depends_on_query_a() { + // we have two trees + // one with a mapping of id to name + // another with a mapping of name to age + // we want to get the age of every one after a certain id ordered by name + let db = make_test_grovedb(); - // prove the absence of key "book" in ["test_leaf", "invalid"] - let mut query = Query::new(); - query.insert_key(b"book".to_vec()); - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"invalid".to_vec()], query); + // TEST_LEAF contains the id to name mapping + db.insert( + [TEST_LEAF].as_ref(), + &[1], + Element::new_item(b"d".to_vec()), + None, + None, + ) + .unwrap() + .expect("successful root tree leaf insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[2], + Element::new_item(b"b".to_vec()), + None, + None, + ) + .unwrap() + .expect("successful root tree leaf insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[3], + Element::new_item(b"c".to_vec()), + None, + None, + ) + .unwrap() + .expect("successful root tree leaf insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[4], + Element::new_item(b"a".to_vec()), + None, + None, + ) + .unwrap() + .expect("successful root tree leaf insert"); - let proof = grovedb - .prove_query(&path_query) + // ANOTHER_TEST_LEAF contains the name to age mapping + db.insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"a", + Element::new_item(vec![10]), + None, + None, + ) + .unwrap() + .expect("successful root tree leaf insert"); + db.insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"b", + Element::new_item(vec![30]), + None, + None, + ) + .unwrap() + .expect("successful root tree leaf insert"); + db.insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"c", + Element::new_item(vec![12]), + None, + None, + ) .unwrap() - .expect("should generate proofs"); + .expect("successful root tree leaf insert"); + db.insert( + [ANOTHER_TEST_LEAF].as_ref(), + b"d", + Element::new_item(vec![46]), + None, + None, + ) + .unwrap() + .expect("successful root tree leaf insert"); + + // Query: return the age of everyone greater than id 2 ordered by name + // id 2 - b + // so we want to return the age for c and d = 12, 46 respectively + // the proof generator knows that id 2 = b, but the verifier doesn't + // hence we need to generate two proofs + // prove that 2 - b then prove age after b + // the verifier has to use the result of the first proof 2 - b + // to generate the path query for the verification of the second proof + + // query name associated with id 2 + let mut query = Query::new(); + query.insert_key(vec![2]); + let mut path_query_one = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); + + // first we show that this returns the correct output + let proof = db.prove_query(&path_query_one, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query(&proof, &path_query_one).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 1); + assert_eq!(result_set[0].2, Some(Element::new_item(b"b".to_vec()))); + + // next query should return the age for elements above b + let mut query = Query::new(); + query.insert_range_after(b"b".to_vec()..); + let path_query_two = PathQuery::new_unsized(vec![ANOTHER_TEST_LEAF.to_vec()], query); + + // show that we get the correct output + let proof = db.prove_query(&path_query_two, None).unwrap().unwrap(); + let (hash, result_set) = GroveDb::verify_query(&proof, &path_query_two).unwrap(); + assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(result_set.len(), 2); + assert_eq!(result_set[0].2, Some(Element::new_item(vec![12]))); + assert_eq!(result_set[1].2, Some(Element::new_item(vec![46]))); + + // now we merge the path queries + let mut merged_path_queries = + PathQuery::merge(vec![&path_query_one, &path_query_two]).unwrap(); + merged_path_queries.query.limit = Some(3); + let proof = db.prove_query(&merged_path_queries, None).unwrap().unwrap(); + + // verifier only has access to the statement age > 2 + // need to first get the name associated with 2 from the proof + // then use that to construct the next path query + let mut chained_path_queries = vec![]; + chained_path_queries.push(|prev_elements: Vec| { + let mut query = Query::new(); + let name_element = prev_elements[0].2.as_ref().unwrap(); + if let Element::Item(name, ..) = name_element { + query.insert_range_after(name.to_owned()..); + Some(PathQuery::new( + vec![ANOTHER_TEST_LEAF.to_vec()], + SizedQuery::new(query, Some(2), None), + )) + } else { + None + } + }); + + // add limit to path query one + path_query_one.query.limit = Some(1); - let (root_hash, result_set) = - GroveDb::verify_query(proof.as_slice(), &path_query).expect("should verify proof"); - assert_eq!(result_set.len(), 0); - assert_eq!(root_hash, grovedb.root_hash(None).unwrap().unwrap()); + let (_, result_set) = GroveDb::verify_query_with_chained_path_queries( + proof.as_slice(), + &path_query_one, + chained_path_queries, + ) + .unwrap(); + assert_eq!(result_set.len(), 2); + assert_eq!(result_set[0].len(), 1); + assert_eq!(result_set[1].len(), 2); + + let age_result = result_set[1].clone(); + assert_eq!(age_result[0].2, Some(Element::new_item(vec![12]))); + assert_eq!(age_result[1].2, Some(Element::new_item(vec![46]))); + } + + #[test] + fn test_prove_absent_path_with_intermediate_emtpy_tree() { + // root + // test_leaf (empty) + let grovedb = make_test_grovedb(); + + // prove the absence of key "book" in ["test_leaf", "invalid"] + let mut query = Query::new(); + query.insert_key(b"book".to_vec()); + let path_query = + PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"invalid".to_vec()], query); + + let proof = grovedb + .prove_query(&path_query, None) + .unwrap() + .expect("should generate proofs"); + + let (root_hash, result_set) = + GroveDb::verify_query(proof.as_slice(), &path_query).expect("should verify proof"); + assert_eq!(result_set.len(), 0); + assert_eq!(root_hash, grovedb.root_hash(None).unwrap().unwrap()); + } } diff --git a/grovedb/src/tests/sum_tree_tests.rs b/grovedb/src/tests/sum_tree_tests.rs index 6c4a7589..8f28932f 100644 --- a/grovedb/src/tests/sum_tree_tests.rs +++ b/grovedb/src/tests/sum_tree_tests.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Sum tree tests use grovedb_merk::{ @@ -103,7 +75,7 @@ fn test_sum_tree_behaves_like_regular_tree() { let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"key".to_vec()], query); let proof = db - .prove_query(&path_query) + .prove_query(&path_query, None) .unwrap() .expect("should generate proof"); let (root_hash, result_set) = @@ -169,7 +141,7 @@ fn test_sum_item_behaves_like_regular_item() { let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"sumkey".to_vec()], query); let proof = db - .prove_query(&path_query) + .prove_query(&path_query, None) .unwrap() .expect("should generate proof"); let (root_hash, result_set) = diff --git a/grovedb/src/versioning.rs b/grovedb/src/versioning.rs deleted file mode 100644 index 5a724afc..00000000 --- a/grovedb/src/versioning.rs +++ /dev/null @@ -1,63 +0,0 @@ -use std::io::Cursor; - -use integer_encoding::{VarInt, VarIntReader}; - -use crate::Error; - -pub(crate) const PROOF_VERSION: u32 = 1; - -/// Reads a version number from the given byte slice using variable-length -/// encoding. Returns a Result containing the parsed u32 version number, or an -/// Error if the data is corrupted and could not be read. -pub fn read_proof_version(mut bytes: &[u8]) -> Result { - bytes - .read_varint() - .map_err(|_| Error::CorruptedData("could not read version info".to_string())) -} - -/// Reads a version number from the given byte slice using variable-length -/// encoding, and returns the version number as well as a slice of the remaining -/// bytes. -pub fn read_and_consume_proof_version(bytes: &[u8]) -> Result<(u32, &[u8]), Error> { - let mut cursor = Cursor::new(bytes); - let version_number = cursor - .read_varint() - .map_err(|_| Error::CorruptedData("sdfs".to_string()))?; - let version_length: usize = cursor.position() as usize; - Ok((version_number, &bytes[version_length..])) -} - -/// Encodes the given version number as variable-length bytes and adds it to the -/// beginning of the given Vec, returning the modified vector. -pub(crate) fn prepend_version_to_bytes(mut bytes: Vec, version: u32) -> Result, Error> { - let version_bytes = version.encode_var_vec(); - bytes.splice(..0, version_bytes); - Ok(bytes) -} - -#[cfg(test)] -mod tests { - - use crate::versioning::{ - prepend_version_to_bytes, read_and_consume_proof_version, read_proof_version, - }; - - #[test] - fn read_correct_version() { - let data = vec![1, 2, 3]; - let version = 500_u32; - - // prepend the version information to the data vector - let new_data = prepend_version_to_bytes(data, version).unwrap(); - assert_eq!(new_data, [244, 3, 1, 2, 3]); - - // show that read_version doesn't consume - assert_eq!(read_proof_version(new_data.as_slice()).unwrap(), 500); - assert_eq!(new_data, [244, 3, 1, 2, 3]); - - // show that we consume the version number and return the remaining vector - let (version_number, data_vec) = read_and_consume_proof_version(&new_data).unwrap(); - assert_eq!(version_number, 500_u32); - assert_eq!(data_vec, [1, 2, 3]); - } -} diff --git a/merk/Cargo.toml b/merk/Cargo.toml index d7864897..2d1c65be 100644 --- a/merk/Cargo.toml +++ b/merk/Cargo.toml @@ -19,15 +19,12 @@ indexmap = "2.2.6" grovedb-costs = { version = "1.0.0-rc.2", path = "../costs" } grovedb-visualize = { version = "1.0.0-rc.2", path = "../visualize" } grovedb-path = { version = "1.0.0-rc.2", path = "../path" } +hex = { version = "0.4.3" } [dependencies.time] version = "0.3.34" optional = true -[dependencies.hex] -version = "0.4.3" -optional = true - [dependencies.colored] version = "2.1.0" optional = true @@ -55,9 +52,9 @@ optional = true [features] default = ["full"] +proof_debug = [] full = ["rand", "time", - "hex", "colored", "num_cpus", "byteorder", diff --git a/merk/benches/merk.rs b/merk/benches/merk.rs index b0f9cca4..ff0fbaef 100644 --- a/merk/benches/merk.rs +++ b/merk/benches/merk.rs @@ -408,7 +408,7 @@ pub fn prove_1m_2k_rand(c: &mut Criterion) { b.iter_with_large_drop(|| { let keys = prove_keys_per_batch[i % n_batches].clone(); - merk.prove_unchecked(keys, None, None, true) + merk.prove_unchecked(keys, None, true) .unwrap() .expect("prove failed"); i += 1; diff --git a/merk/src/lib.rs b/merk/src/lib.rs index 356bd5b8..d746a885 100644 --- a/merk/src/lib.rs +++ b/merk/src/lib.rs @@ -70,12 +70,8 @@ mod visualize; #[cfg(feature = "full")] pub use ed; -#[cfg(feature = "full")] -pub use error::Error; #[cfg(any(feature = "full", feature = "verify"))] -pub use proofs::query::execute_proof; -#[cfg(any(feature = "full", feature = "verify"))] -pub use proofs::query::verify_query; +pub use error::Error; #[cfg(feature = "full")] pub use tree::{ BatchEntry, Link, MerkBatch, Op, PanicSource, HASH_BLOCK_SIZE, HASH_BLOCK_SIZE_U32, diff --git a/merk/src/merk/chunks.rs b/merk/src/merk/chunks.rs index f6b1b64c..ef94571e 100644 --- a/merk/src/merk/chunks.rs +++ b/merk/src/merk/chunks.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - use std::collections::VecDeque; use ed::Encode; diff --git a/merk/src/merk/mod.rs b/merk/src/merk/mod.rs index 28ce3f43..e9bab4f7 100644 --- a/merk/src/merk/mod.rs +++ b/merk/src/merk/mod.rs @@ -286,6 +286,14 @@ where }) } + /// Returns if the merk has a root tree set + pub fn has_root_key(&self) -> bool { + let tree = self.tree.take(); + let res = tree.is_some(); + self.tree.set(tree); + res + } + /// Returns the total sum value in the Merk tree pub fn sum(&self) -> Result, Error> { self.use_tree(|tree| match tree { diff --git a/merk/src/merk/prove.rs b/merk/src/merk/prove.rs index 7f295534..99227c13 100644 --- a/merk/src/merk/prove.rs +++ b/merk/src/merk/prove.rs @@ -29,14 +29,13 @@ where &self, query: Query, limit: Option, - offset: Option, ) -> CostResult { let left_to_right = query.left_to_right; - self.prove_unchecked(query, limit, offset, left_to_right) - .map_ok(|(proof, limit, offset)| { + self.prove_unchecked(query, limit, left_to_right) + .map_ok(|(proof, limit)| { let mut bytes = Vec::with_capacity(128); encode_into(proof.iter(), &mut bytes); - ProofConstructionResult::new(bytes, limit, offset) + ProofConstructionResult::new(bytes, limit) }) } @@ -55,11 +54,10 @@ where &self, query: Query, limit: Option, - offset: Option, ) -> CostResult { let left_to_right = query.left_to_right; - self.prove_unchecked(query, limit, offset, left_to_right) - .map_ok(|(proof, limit, offset)| ProofWithoutEncodingResult::new(proof, limit, offset)) + self.prove_unchecked(query, limit, left_to_right) + .map_ok(|(proof, limit)| ProofWithoutEncodingResult::new(proof, limit)) } /// Creates a Merkle proof for the list of queried keys. For each key in @@ -78,7 +76,6 @@ where &self, query: I, limit: Option, - offset: Option, left_to_right: bool, ) -> CostResult where @@ -95,14 +92,46 @@ where .wrap_with_cost(Default::default()) .flat_map_ok(|tree| { let mut ref_walker = RefWalker::new(tree, self.source()); - ref_walker.create_proof(query_vec.as_slice(), limit, offset, left_to_right) + ref_walker.create_proof(query_vec.as_slice(), limit, left_to_right) }) - .map_ok(|(proof, _, limit, offset, ..)| (proof, limit, offset)) + .map_ok(|(proof, _, limit, ..)| (proof, limit)) + }) + } + + /// Creates a Merkle proof for the list of queried keys. For each key in + /// the query, if the key is found in the store then the value will be + /// proven to be in the tree. For each key in the query that does not + /// exist in the tree, its absence will be proven by including + /// boundary keys. + /// The proof returned is in an encoded format which can be verified with + /// `merk::verify`. + /// + /// This is unsafe because the keys in `query` must be sorted and unique - + /// if they are not, there will be undefined behavior. For a safe version + /// of this method which checks to ensure the batch is sorted and + /// unique, see `prove`. + pub fn prove_unchecked_query_items( + &self, + query_items: &[QueryItem], + limit: Option, + left_to_right: bool, + ) -> CostResult { + self.use_tree_mut(|maybe_tree| { + maybe_tree + .ok_or(Error::CorruptedCodeExecution( + "Cannot create proof for empty tree", + )) + .wrap_with_cost(Default::default()) + .flat_map_ok(|tree| { + let mut ref_walker = RefWalker::new(tree, self.source()); + ref_walker.create_proof(query_items, limit, left_to_right) + }) + .map_ok(|(proof, _, limit, ..)| (proof, limit)) }) } } -type Proof = (LinkedList, Option, Option); +type Proof = (LinkedList, Option); /// Proof construction result pub struct ProofConstructionResult { @@ -110,18 +139,12 @@ pub struct ProofConstructionResult { pub proof: Vec, /// Limit pub limit: Option, - /// Offset - pub offset: Option, } impl ProofConstructionResult { /// New ProofConstructionResult - pub fn new(proof: Vec, limit: Option, offset: Option) -> Self { - Self { - proof, - limit, - offset, - } + pub fn new(proof: Vec, limit: Option) -> Self { + Self { proof, limit } } } @@ -131,17 +154,11 @@ pub struct ProofWithoutEncodingResult { pub proof: LinkedList, /// Limit pub limit: Option, - /// Offset - pub offset: Option, } impl ProofWithoutEncodingResult { /// New ProofWithoutEncodingResult - pub fn new(proof: LinkedList, limit: Option, offset: Option) -> Self { - Self { - proof, - limit, - offset, - } + pub fn new(proof: LinkedList, limit: Option) -> Self { + Self { proof, limit } } } diff --git a/merk/src/merk/restore.rs b/merk/src/merk/restore.rs index 9e26b1af..c5ce1286 100644 --- a/merk/src/merk/restore.rs +++ b/merk/src/merk/restore.rs @@ -710,10 +710,7 @@ mod tests { let (chunk, _) = chunk_producer.chunk_with_index(2).unwrap(); // apply second chunk let new_chunk_ids = restorer - .process_chunk( - &traversal_instruction_as_vec_bytes(&vec![LEFT, LEFT]), - chunk, - ) + .process_chunk(&traversal_instruction_as_vec_bytes(&[LEFT, LEFT]), chunk) .unwrap(); assert_eq!(new_chunk_ids.len(), 0); // chunk_map should have 1 less element @@ -726,10 +723,8 @@ mod tests { // let's try to apply the second chunk again, should not work let (chunk, _) = chunk_producer.chunk_with_index(2).unwrap(); // apply second chunk - let chunk_process_result = restorer.process_chunk( - &traversal_instruction_as_vec_bytes(&vec![LEFT, LEFT]), - chunk, - ); + let chunk_process_result = + restorer.process_chunk(&traversal_instruction_as_vec_bytes(&[LEFT, LEFT]), chunk); assert!(chunk_process_result.is_err()); assert!(matches!( chunk_process_result, @@ -739,10 +734,8 @@ mod tests { // next let's get a random but expected chunk and work with that e.g. chunk 4 // but let's apply it to the wrong place let (chunk, _) = chunk_producer.chunk_with_index(4).unwrap(); - let chunk_process_result = restorer.process_chunk( - &traversal_instruction_as_vec_bytes(&vec![LEFT, RIGHT]), - chunk, - ); + let chunk_process_result = + restorer.process_chunk(&traversal_instruction_as_vec_bytes(&[LEFT, RIGHT]), chunk); assert!(chunk_process_result.is_err()); assert!(matches!( chunk_process_result, @@ -755,10 +748,7 @@ mod tests { let (chunk, _) = chunk_producer.chunk_with_index(5).unwrap(); // apply second chunk let new_chunk_ids = restorer - .process_chunk( - &traversal_instruction_as_vec_bytes(&vec![RIGHT, RIGHT]), - chunk, - ) + .process_chunk(&traversal_instruction_as_vec_bytes(&[RIGHT, RIGHT]), chunk) .unwrap(); assert_eq!(new_chunk_ids.len(), 0); // chunk_map should have 1 less element @@ -772,10 +762,7 @@ mod tests { let (chunk, _) = chunk_producer.chunk_with_index(3).unwrap(); // apply second chunk let new_chunk_ids = restorer - .process_chunk( - &traversal_instruction_as_vec_bytes(&vec![LEFT, RIGHT]), - chunk, - ) + .process_chunk(&traversal_instruction_as_vec_bytes(&[LEFT, RIGHT]), chunk) .unwrap(); assert_eq!(new_chunk_ids.len(), 0); // chunk_map should have 1 less element @@ -789,10 +776,7 @@ mod tests { let (chunk, _) = chunk_producer.chunk_with_index(4).unwrap(); // apply second chunk let new_chunk_ids = restorer - .process_chunk( - &traversal_instruction_as_vec_bytes(&vec![RIGHT, LEFT]), - chunk, - ) + .process_chunk(&traversal_instruction_as_vec_bytes(&[RIGHT, LEFT]), chunk) .unwrap(); assert_eq!(new_chunk_ids.len(), 0); // chunk_map should have 1 less element @@ -1035,7 +1019,7 @@ mod tests { // first restore the first chunk let (chunk, next_chunk_index) = chunk_producer.chunk_with_index(1).unwrap(); let new_chunk_ids = restorer - .process_chunk(&traversal_instruction_as_vec_bytes(&vec![]), chunk) + .process_chunk(&traversal_instruction_as_vec_bytes(&[]), chunk) .expect("should process chunk"); assert_eq!(new_chunk_ids.len(), 4); assert_eq!(next_chunk_index, Some(2)); @@ -1273,7 +1257,7 @@ mod tests { // first restore the first chunk let (chunk, next_chunk_index) = chunk_producer.chunk_with_index(1).unwrap(); let new_chunk_ids = restorer - .process_chunk(&traversal_instruction_as_vec_bytes(&vec![]), chunk) + .process_chunk(&traversal_instruction_as_vec_bytes(&[]), chunk) .expect("should process chunk"); assert_eq!(new_chunk_ids.len(), 4); assert_eq!(next_chunk_index, Some(2)); diff --git a/merk/src/proofs/chunk.rs b/merk/src/proofs/chunk.rs index 22334688..063a3575 100644 --- a/merk/src/proofs/chunk.rs +++ b/merk/src/proofs/chunk.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Chunk proofs mod binary_range; diff --git a/merk/src/proofs/chunk/util.rs b/merk/src/proofs/chunk/util.rs index 39c513b7..fab2024a 100644 --- a/merk/src/proofs/chunk/util.rs +++ b/merk/src/proofs/chunk/util.rs @@ -573,11 +573,11 @@ mod test { #[test] fn test_traversal_instruction_as_string() { - assert_eq!(traversal_instruction_as_vec_bytes(&vec![]), vec![]); - assert_eq!(traversal_instruction_as_vec_bytes(&vec![LEFT]), vec![1u8]); - assert_eq!(traversal_instruction_as_vec_bytes(&vec![RIGHT]), vec![0u8]); + assert_eq!(traversal_instruction_as_vec_bytes(&[]), vec![]); + assert_eq!(traversal_instruction_as_vec_bytes(&[LEFT]), vec![1u8]); + assert_eq!(traversal_instruction_as_vec_bytes(&[RIGHT]), vec![0u8]); assert_eq!( - traversal_instruction_as_vec_bytes(&vec![RIGHT, LEFT, LEFT, RIGHT]), + traversal_instruction_as_vec_bytes(&[RIGHT, LEFT, LEFT, RIGHT]), vec![0u8, 1u8, 1u8, 0u8] ); } @@ -585,20 +585,20 @@ mod test { #[test] fn test_instruction_string_to_traversal_instruction() { assert_eq!( - vec_bytes_as_traversal_instruction(&vec![1u8]).unwrap(), + vec_bytes_as_traversal_instruction(&[1u8]).unwrap(), vec![LEFT] ); assert_eq!( - vec_bytes_as_traversal_instruction(&vec![0u8]).unwrap(), + vec_bytes_as_traversal_instruction(&[0u8]).unwrap(), vec![RIGHT] ); assert_eq!( - vec_bytes_as_traversal_instruction(&vec![0u8, 0u8, 1u8]).unwrap(), + vec_bytes_as_traversal_instruction(&[0u8, 0u8, 1u8]).unwrap(), vec![RIGHT, RIGHT, LEFT] ); - assert!(vec_bytes_as_traversal_instruction(&vec![0u8, 0u8, 2u8]).is_err()); + assert!(vec_bytes_as_traversal_instruction(&[0u8, 0u8, 2u8]).is_err()); assert_eq!( - vec_bytes_as_traversal_instruction(&vec![]).unwrap(), + vec_bytes_as_traversal_instruction(&[]).unwrap(), Vec::::new() ); } diff --git a/merk/src/proofs/encoding.rs b/merk/src/proofs/encoding.rs index d0395fe7..eb1c055b 100644 --- a/merk/src/proofs/encoding.rs +++ b/merk/src/proofs/encoding.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Proofs encoding #[cfg(any(feature = "full", feature = "verify"))] diff --git a/merk/src/proofs/mod.rs b/merk/src/proofs/mod.rs index 1bedeec5..45f4b2e9 100644 --- a/merk/src/proofs/mod.rs +++ b/merk/src/proofs/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk proofs #[cfg(feature = "full")] @@ -104,9 +76,58 @@ pub enum Node { KVValueHash(Vec, Vec, CryptoHash), /// Represents, the key, value, value_hash and feature_type of a tree node + /// Used by Sum trees KVValueHashFeatureType(Vec, Vec, CryptoHash, TreeFeatureType), /// Represents the key, value of some referenced node and value_hash of /// current tree node KVRefValueHash(Vec, Vec, CryptoHash), } + +use std::fmt; + +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for Node { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let node_string = match self { + Node::Hash(hash) => format!("Hash(HASH[{}])", hex::encode(hash)), + Node::KVHash(kv_hash) => format!("KVHash(HASH[{}])", hex::encode(kv_hash)), + Node::KV(key, value) => { + format!("KV({}, {})", hex_to_ascii(key), hex_to_ascii(value)) + } + Node::KVValueHash(key, value, value_hash) => format!( + "KVValueHash({}, {}, HASH[{}])", + hex_to_ascii(key), + hex_to_ascii(value), + hex::encode(value_hash) + ), + Node::KVDigest(key, value_hash) => format!( + "KVDigest({}, HASH[{}])", + hex_to_ascii(key), + hex::encode(value_hash) + ), + Node::KVRefValueHash(key, value, value_hash) => format!( + "KVRefValueHash({}, {}, HASH[{}])", + hex_to_ascii(key), + hex_to_ascii(value), + hex::encode(value_hash) + ), + Node::KVValueHashFeatureType(key, value, value_hash, feature_type) => format!( + "KVValueHashFeatureType({}, {}, HASH[{}], {:?})", + hex_to_ascii(key), + hex_to_ascii(value), + hex::encode(value_hash), + feature_type + ), + }; + write!(f, "{}", node_string) + } +} + +fn hex_to_ascii(hex_value: &[u8]) -> String { + if hex_value.len() == 1 && hex_value[0] < b"0"[0] { + hex::encode(hex_value) + } else { + String::from_utf8(hex_value.to_vec()).unwrap_or_else(|_| hex::encode(hex_value)) + } +} diff --git a/merk/src/proofs/query/map.rs b/merk/src/proofs/query/map.rs index 9eb716ed..757403a2 100644 --- a/merk/src/proofs/query/map.rs +++ b/merk/src/proofs/query/map.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Query #![allow(unstable_name_collisions)] @@ -264,6 +236,38 @@ impl<'a> Iterator for Range<'a> { } } +#[cfg(feature = "full")] +/// `BTreeMapExtras` provides extra functionality to work with `BTreeMap` that +/// either missed or unstable +/// NOTE: We can easily remove this when the following feature will be rolled +/// out into stable rust: https://github.com/rust-lang/rust/issues/62924 +trait BTreeMapExtras { + type K; + type V; + + /// Returns `None` if `BTreeMap` is empty otherwise the first key-value pair + /// in the map. The key in this pair is the minimum key in the map. + fn first_key_value(&self) -> Option<(&Self::K, &Self::V)>; + + /// Returns `None` if `BTreeMap` is empty otherwise the last key-value pair + /// in the map. The key in this pair is the maximum key in the map. + fn last_key_value(&self) -> Option<(&Self::K, &Self::V)>; +} + +#[cfg(feature = "full")] +impl BTreeMapExtras for BTreeMap { + type K = KK; + type V = VV; + + fn first_key_value(&self) -> Option<(&Self::K, &Self::V)> { + self.iter().next() + } + + fn last_key_value(&self) -> Option<(&Self::K, &Self::V)> { + self.iter().next_back() + } +} + #[cfg(feature = "full")] #[cfg(test)] mod tests { @@ -396,35 +400,3 @@ mod tests { assert_eq!(range.next().unwrap().unwrap(), (&[1][..], &[1][..])); } } - -#[cfg(feature = "full")] -/// `BTreeMapExtras` provides extra functionality to work with `BTreeMap` that -/// either missed or unstable -/// NOTE: We can easily remove this when the following feature will be rolled -/// out into stable rust: https://github.com/rust-lang/rust/issues/62924 -trait BTreeMapExtras { - type K; - type V; - - /// Returns `None` if `BTreeMap` is empty otherwise the first key-value pair - /// in the map. The key in this pair is the minimum key in the map. - fn first_key_value(&self) -> Option<(&Self::K, &Self::V)>; - - /// Returns `None` if `BTreeMap` is empty otherwise the last key-value pair - /// in the map. The key in this pair is the maximum key in the map. - fn last_key_value(&self) -> Option<(&Self::K, &Self::V)>; -} - -#[cfg(feature = "full")] -impl BTreeMapExtras for BTreeMap { - type K = KK; - type V = VV; - - fn first_key_value(&self) -> Option<(&Self::K, &Self::V)> { - self.iter().next() - } - - fn last_key_value(&self) -> Option<(&Self::K, &Self::V)> { - self.iter().next_back() - } -} diff --git a/merk/src/proofs/query/mod.rs b/merk/src/proofs/query/mod.rs index 29296efc..107a1ec8 100644 --- a/merk/src/proofs/query/mod.rs +++ b/merk/src/proofs/query/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Query proofs #[cfg(feature = "full")] @@ -42,11 +14,11 @@ pub mod query_item; #[cfg(any(feature = "full", feature = "verify"))] mod verify; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(feature = "full")] use std::cmp::Ordering; -use std::{collections::HashSet, ops::RangeFull}; +use std::{collections::HashSet, fmt, ops::RangeFull}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(feature = "full")] use grovedb_costs::{cost_return_on_error, CostContext, CostResult, CostsExt, OperationCost}; #[cfg(any(feature = "full", feature = "verify"))] use indexmap::IndexMap; @@ -56,17 +28,20 @@ pub use map::*; pub use query_item::intersect::QueryItemIntersectionResult; #[cfg(any(feature = "full", feature = "verify"))] pub use query_item::QueryItem; +#[cfg(feature = "full")] +use verify::ProofAbsenceLimit; #[cfg(any(feature = "full", feature = "verify"))] -use verify::ProofAbsenceLimitOffset; +pub use verify::VerifyOptions; #[cfg(any(feature = "full", feature = "verify"))] -pub use verify::{execute_proof, verify_query, ProofVerificationResult, ProvedKeyValue}; +pub use verify::{ProofVerificationResult, ProvedKeyOptionalValue, ProvedKeyValue}; #[cfg(feature = "full")] use {super::Op, std::collections::LinkedList}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(feature = "full")] use super::Node; #[cfg(any(feature = "full", feature = "verify"))] use crate::error::Error; +use crate::proofs::hex_to_ascii; #[cfg(feature = "full")] use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] @@ -109,6 +84,57 @@ pub struct Query { pub left_to_right: bool, } +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for SubqueryBranch { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "SubqueryBranch {{ ")?; + if let Some(path) = &self.subquery_path { + write!(f, "subquery_path: [")?; + for (i, path_part) in path.iter().enumerate() { + if i > 0 { + write!(f, ", ")? + } + write!(f, "{}", hex_to_ascii(path_part))?; + } + write!(f, "], ")?; + } else { + write!(f, "subquery_path: None ")?; + } + if let Some(subquery) = &self.subquery { + write!(f, "subquery: {} ", subquery)?; + } else { + write!(f, "subquery: None ")?; + } + write!(f, "}}") + } +} + +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for Query { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "Query {{")?; + writeln!(f, " items: [")?; + for item in &self.items { + writeln!(f, " {},", item)?; + } + writeln!(f, " ],")?; + writeln!( + f, + " default_subquery_branch: {},", + self.default_subquery_branch + )?; + if let Some(conditional_branches) = &self.conditional_subquery_branches { + writeln!(f, " conditional_subquery_branches: {{")?; + for (item, branch) in conditional_branches { + writeln!(f, " {}: {},", item, branch)?; + } + writeln!(f, " }},")?; + } + writeln!(f, " left_to_right: {},", self.left_to_right)?; + write!(f, "}}") + } +} + #[cfg(any(feature = "full", feature = "verify"))] impl Query { /// Creates a new query which contains no items. @@ -116,6 +142,15 @@ impl Query { Self::new_with_direction(true) } + /// Creates a new query which contains all items. + pub fn new_range_full() -> Self { + Self { + items: vec![QueryItem::RangeFull(RangeFull)], + left_to_right: true, + ..Self::default() + } + } + /// Creates a new query which contains only one key. pub fn new_single_key(key: Vec) -> Self { Self { @@ -155,6 +190,37 @@ impl Query { } } + pub fn has_subquery_on_key(&self, key: &[u8], in_path: bool) -> bool { + if in_path || self.default_subquery_branch.subquery.is_some() { + return true; + } + if let Some(conditional_subquery_branches) = self.conditional_subquery_branches.as_ref() { + for (query_item, subquery) in conditional_subquery_branches { + if query_item.contains(key) { + return subquery.subquery.is_some(); + } + } + } + false + } + + pub fn has_subquery_or_subquery_path_on_key(&self, key: &[u8], in_path: bool) -> bool { + if in_path + || self.default_subquery_branch.subquery.is_some() + || self.default_subquery_branch.subquery_path.is_some() + { + return true; + } + if let Some(conditional_subquery_branches) = self.conditional_subquery_branches.as_ref() { + for query_item in conditional_subquery_branches.keys() { + if query_item.contains(key) { + return true; + } + } + } + false + } + /// Pushes terminal key paths and keys to `result`, no more than /// `max_results`. Returns the number of terminal keys added. /// @@ -520,10 +586,9 @@ where &mut self, query: &[QueryItem], limit: Option, - offset: Option, left_to_right: bool, - ) -> CostResult { - self.create_proof(query, limit, offset, left_to_right) + ) -> CostResult { + self.create_proof(query, limit, left_to_right) } /// Generates a proof for the list of queried keys. Returns a tuple @@ -537,9 +602,8 @@ where &mut self, query: &[QueryItem], limit: Option, - offset: Option, left_to_right: bool, - ) -> CostResult { + ) -> CostResult { let mut cost = OperationCost::default(); // TODO: don't copy into vec, support comparing QI to byte slice @@ -557,8 +621,6 @@ where let current_node_in_query: bool; let mut node_on_non_inclusive_bounds = false; - // becomes true if the offset exists and is non zero - let mut skip_current_node = false; let (mut left_items, mut right_items) = match search { Ok(index) => { @@ -602,90 +664,65 @@ where } }; - if offset.is_none() || offset == Some(0) { - // when the limit hits zero, the rest of the query batch should be cleared - // so empty the left, right query batch, and set the current node to not found - if let Some(current_limit) = limit { - if current_limit == 0 { - left_items = &[]; - search = Err(Default::default()); - right_items = &[]; - } + // when the limit hits zero, the rest of the query batch should be cleared + // so empty the left, right query batch, and set the current node to not found + if let Some(current_limit) = limit { + if current_limit == 0 { + left_items = &[]; + search = Err(Default::default()); + right_items = &[]; } } let proof_direction = left_to_right; // signifies what direction the DFS should go - let (mut proof, left_absence, mut new_limit, mut new_offset) = if left_to_right { + let (mut proof, left_absence, mut new_limit) = if left_to_right { cost_return_on_error!( &mut cost, - self.create_child_proof(proof_direction, left_items, limit, offset, left_to_right) + self.create_child_proof(proof_direction, left_items, limit, left_to_right) ) } else { cost_return_on_error!( &mut cost, - self.create_child_proof(proof_direction, right_items, limit, offset, left_to_right) + self.create_child_proof(proof_direction, right_items, limit, left_to_right) ) }; - if let Some(current_offset) = new_offset { - if current_offset > 0 && current_node_in_query && !node_on_non_inclusive_bounds { - // reserve offset slot for current node before generating proof for right - // subtree - new_offset = Some(current_offset - 1); - skip_current_node = true; - } - } - - if !skip_current_node && (new_offset.is_none() || new_offset == Some(0)) { - if let Some(current_limit) = new_limit { - // if after generating proof for the left subtree, the limit becomes 0 - // clear the current node and clear the right batch - if current_limit == 0 { + if let Some(current_limit) = new_limit { + // if after generating proof for the left subtree, the limit becomes 0 + // clear the current node and clear the right batch + if current_limit == 0 { + if left_to_right { + right_items = &[]; + } else { + left_items = &[]; + } + search = Err(Default::default()); + } else if current_node_in_query && !node_on_non_inclusive_bounds { + // if limit is not zero, reserve a limit slot for the current node + // before generating proof for the right subtree + new_limit = Some(current_limit - 1); + // if after limit slot reservation, limit becomes 0, right query + // should be cleared + if current_limit - 1 == 0 { if left_to_right { right_items = &[]; } else { left_items = &[]; } - search = Err(Default::default()); - } else if current_node_in_query && !node_on_non_inclusive_bounds { - // if limit is not zero, reserve a limit slot for the current node - // before generating proof for the right subtree - new_limit = Some(current_limit - 1); - // if after limit slot reservation, limit becomes 0, right query - // should be cleared - if current_limit - 1 == 0 { - if left_to_right { - right_items = &[]; - } else { - left_items = &[]; - } - } } } } let proof_direction = !proof_direction; // search the opposite path on second pass - let (mut right_proof, right_absence, new_limit, new_offset) = if left_to_right { + let (mut right_proof, right_absence, new_limit) = if left_to_right { cost_return_on_error!( &mut cost, - self.create_child_proof( - proof_direction, - right_items, - new_limit, - new_offset, - left_to_right, - ) + self.create_child_proof(proof_direction, right_items, new_limit, left_to_right,) ) } else { cost_return_on_error!( &mut cost, - self.create_child_proof( - proof_direction, - left_items, - new_limit, - new_offset, - left_to_right, - ) + self.create_child_proof(proof_direction, left_items, new_limit, left_to_right,) ) }; @@ -693,7 +730,7 @@ where proof.push_back(match search { Ok(_) => { - if node_on_non_inclusive_bounds || skip_current_node { + if node_on_non_inclusive_bounds { if left_to_right { Op::Push(self.to_kvdigest_node()) } else { @@ -737,13 +774,7 @@ where } } - Ok(( - proof, - (left_absence.0, right_absence.1), - new_limit, - new_offset, - )) - .wrap_with_cost(cost) + Ok((proof, (left_absence.0, right_absence.1), new_limit)).wrap_with_cost(cost) } /// Similar to `create_proof`. Recurses into the child on the given side and @@ -754,16 +785,15 @@ where left: bool, query: &[QueryItem], limit: Option, - offset: Option, left_to_right: bool, - ) -> CostResult { + ) -> CostResult { if !query.is_empty() { self.walk(left, None::<&fn(&[u8]) -> Option>) .flat_map_ok(|child_opt| { if let Some(mut child) = child_opt { - child.create_proof(query, limit, offset, left_to_right) + child.create_proof(query, limit, left_to_right) } else { - Ok((LinkedList::new(), (true, true), limit, offset)) + Ok((LinkedList::new(), (true, true), limit)) .wrap_with_cost(Default::default()) } }) @@ -774,10 +804,9 @@ where } else { Op::PushInverted(link.to_hash_node()) }); - Ok((proof, (false, false), limit, offset)).wrap_with_cost(Default::default()) + Ok((proof, (false, false), limit)).wrap_with_cost(Default::default()) } else { - Ok((LinkedList::new(), (false, false), limit, offset)) - .wrap_with_cost(Default::default()) + Ok((LinkedList::new(), (false, false), limit)).wrap_with_cost(Default::default()) } } } @@ -787,32 +816,40 @@ where #[cfg(test)] mod test { + macro_rules! compare_result_tuples_not_optional { + ($result_set:expr, $expected_result_set:expr) => { + assert_eq!( + $expected_result_set.len(), + $result_set.len(), + "Result set lengths do not match" + ); + for i in 0..$expected_result_set.len() { + assert_eq!( + $expected_result_set[i].0, $result_set[i].key, + "Key mismatch at index {}", + i + ); + assert_eq!( + &$expected_result_set[i].1, + $result_set[i].value.as_ref().expect("expected value"), + "Value mismatch at index {}", + i + ); + } + }; + } + use super::{ super::{encoding::encode_into, *}, *, }; use crate::{ - proofs::query::{ - query_item::QueryItem::RangeAfter, - verify, - verify::{verify_query, ProvedKeyValue}, - }, + proofs::query::{query_item::QueryItem::RangeAfter, verify}, test_utils::make_tree_seq, tree::{NoopCommit, PanicSource, RefWalker, TreeNode}, TreeFeatureType::BasicMerkNode, }; - fn compare_result_tuples( - result_set: Vec, - expected_result_set: Vec<(Vec, Vec)>, - ) { - assert_eq!(expected_result_set.len(), result_set.len()); - for i in 0..expected_result_set.len() { - assert_eq!(expected_result_set[i].0, result_set[i].key); - assert_eq!(expected_result_set[i].1, result_set[i].value); - } - } - fn make_3_node_tree() -> TreeNode { let mut tree = TreeNode::new(vec![5], vec![5], None, BasicMerkNode) .unwrap() @@ -875,7 +912,6 @@ mod test { .collect::>() .as_slice(), None, - None, true, ) .unwrap() @@ -893,7 +929,8 @@ mod test { query.insert_key(key.clone()); } - let result = verify_query(bytes.as_slice(), &query, None, None, true, expected_hash) + let result = query + .verify_proof(bytes.as_slice(), None, true, expected_hash) .unwrap() .expect("verify failed"); @@ -905,7 +942,10 @@ mod test { } for (key, expected_value) in keys.iter().zip(expected_result.iter()) { - assert_eq!(values.get(key), expected_value.as_ref()); + assert_eq!( + values.get(key).and_then(|a| a.as_ref()), + expected_value.as_ref() + ); } } @@ -1105,7 +1145,7 @@ mod test { let mut walker = RefWalker::new(&mut tree, PanicSource {}); let (proof, absence, ..) = walker - .create_full_proof(vec![].as_slice(), None, None, true) + .create_full_proof(vec![].as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -1138,16 +1178,10 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); - let res = verify_query( - bytes.as_slice(), - &Query::new(), - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); + let res = Query::new() + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); assert!(res.result_set.is_empty()); } @@ -1156,9 +1190,9 @@ mod test { let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::Key(vec![5])]; + let query_items = vec![QueryItem::Key(vec![5])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -1196,20 +1230,14 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5])]); + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![5], vec![5])]); } #[test] @@ -1217,9 +1245,9 @@ mod test { let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::Key(vec![3])]; + let query_items = vec![QueryItem::Key(vec![3])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -1257,20 +1285,14 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![3], vec![3])]); + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![3], vec![3])]); } #[test] @@ -1278,9 +1300,9 @@ mod test { let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::Key(vec![3]), QueryItem::Key(vec![7])]; + let query_items = vec![QueryItem::Key(vec![3]), QueryItem::Key(vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -1322,20 +1344,17 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![3], vec![3]), (vec![7], vec![7])]); + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![3], vec![3]), (vec![7], vec![7])] + ); } #[test] @@ -1343,13 +1362,13 @@ mod test { let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![ + let query_items = vec![ QueryItem::Key(vec![3]), QueryItem::Key(vec![5]), QueryItem::Key(vec![7]), ]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -1395,22 +1414,16 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![3], vec![3]), (vec![5], vec![5]), (vec![7], vec![7])], + vec![(vec![3], vec![3]), (vec![5], vec![5]), (vec![7], vec![7])] ); } @@ -1419,9 +1432,9 @@ mod test { let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::Key(vec![8])]; + let query_items = vec![QueryItem::Key(vec![8])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -1458,20 +1471,14 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, Vec::<(Vec, Vec)>::new()); } #[test] @@ -1479,9 +1486,9 @@ mod test { let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::Key(vec![6])]; + let query_items = vec![QueryItem::Key(vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -1521,20 +1528,14 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, Vec::<(Vec, Vec)>::new()); } #[test] @@ -1614,14 +1615,14 @@ mod test { let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![ + let query_items = vec![ QueryItem::Key(vec![1]), QueryItem::Key(vec![2]), QueryItem::Key(vec![3]), QueryItem::Key(vec![4]), ]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -1715,27 +1716,21 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![1], vec![1]), (vec![2], vec![2]), (vec![3], vec![3]), (vec![4], vec![4]), - ], + ] ); } @@ -1787,11 +1782,11 @@ mod test { let mut tree = make_tree_seq(10); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::Range( + let query_items = vec![QueryItem::Range( vec![0, 0, 0, 0, 0, 0, 0, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], )]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -1868,162 +1863,50 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]), - ], + ] ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); - - // skip 1 element - let mut tree = make_tree_seq(10); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::Range( - vec![0, 0, 0, 0, 0, 0, 0, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], - )]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( - res.result_set, - vec![(vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60])], - ); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip 2 elements - let mut tree = make_tree_seq(10); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::Range( - vec![0, 0, 0, 0, 0, 0, 0, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], - )]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(0)); - - // skip all elements - let mut tree = make_tree_seq(10); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::Range( - vec![0, 0, 0, 0, 0, 0, 0, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], - )]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(198)); // right to left test let mut tree = make_tree_seq(10); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::Range( + let query_items = vec![QueryItem::Range( vec![0, 0, 0, 0, 0, 0, 0, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], )]; let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) + .create_full_proof(query_items.as_slice(), None, false) .unwrap() .expect("create_proof errored"); let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { + let mut query = Query::new_with_direction(false); + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), - ], + ] ); } @@ -2032,11 +1915,11 @@ mod test { let mut tree = make_tree_seq(10); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeInclusive( + let query_items = vec![QueryItem::RangeInclusive( vec![0, 0, 0, 0, 0, 0, 0, 5]..=vec![0, 0, 0, 0, 0, 0, 0, 7], )]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -2114,300 +1997,143 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 7], vec![123; 60]), - ], + ] ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); - // skip 1 element + // right_to_left proof let mut tree = make_tree_seq(10); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeInclusive( + let query_items = vec![QueryItem::RangeInclusive( vec![0, 0, 0, 0, 0, 0, 0, 5]..=vec![0, 0, 0, 0, 0, 0, 0, 7], )]; let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), true) + .create_full_proof(query_items.as_slice(), None, false) .unwrap() .expect("create_proof errored"); let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60])], + vec![ + (vec![0, 0, 0, 0, 0, 0, 0, 7], vec![123; 60]), + (vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]), + (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), + ] ); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); + } - // skip 2 elements - let mut tree = make_tree_seq(10); + #[test] + fn range_from_proof() { + let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeInclusive( - vec![0, 0, 0, 0, 0, 0, 0, 5]..=vec![0, 0, 0, 0, 0, 0, 0, 7], - )]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) + let query_items = vec![QueryItem::RangeFrom(vec![5]..)]; + let (proof, absence, ..) = walker + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); + let mut iter = proof.iter(); + assert_eq!( + iter.next(), + Some(&Op::Push(Node::Hash([ + 85, 217, 56, 226, 204, 53, 103, 145, 201, 33, 178, 80, 207, 194, 104, 128, 199, + 145, 156, 208, 152, 255, 209, 24, 140, 222, 204, 193, 211, 26, 118, 58 + ]))) + ); + assert_eq!( + iter.next(), + Some(&Op::Push(Node::KVValueHash( + vec![5], + vec![5], + [ + 116, 30, 0, 135, 25, 118, 86, 14, 12, 107, 215, 214, 133, 122, 48, 45, 180, 21, + 158, 223, 88, 148, 181, 149, 189, 65, 121, 19, 81, 118, 11, 106 + ] + ))) + ); + assert_eq!(iter.next(), Some(&Op::Parent)); + assert_eq!( + iter.next(), + Some(&Op::Push(Node::KVValueHash( + vec![7], + vec![7], + [ + 63, 193, 78, 215, 236, 222, 32, 58, 144, 66, 94, 225, 145, 233, 219, 89, 102, + 51, 109, 115, 127, 3, 152, 236, 147, 183, 100, 81, 123, 109, 244, 0 + ] + ))) + ); + assert_eq!( + iter.next(), + Some(&Op::Push(Node::KVValueHash( + vec![8], + vec![8], + [ + 205, 24, 196, 78, 21, 130, 132, 58, 44, 29, 21, 175, 68, 254, 158, 189, 49, + 158, 250, 151, 137, 22, 160, 107, 216, 238, 129, 230, 199, 251, 197, 51 + ] + ))) + ); + assert_eq!(iter.next(), Some(&Op::Parent)); + assert_eq!(iter.next(), Some(&Op::Child)); + assert!(iter.next().is_none()); + assert_eq!(absence, (false, true)); + let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![0, 0, 0, 0, 0, 0, 0, 7], vec![123; 60])], - ); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip all elements - let mut tree = make_tree_seq(10); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeInclusive( - vec![0, 0, 0, 0, 0, 0, 0, 5]..=vec![0, 0, 0, 0, 0, 0, 0, 7], - )]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(197)); - - // right_to_left proof - let mut tree = make_tree_seq(10); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeInclusive( - vec![0, 0, 0, 0, 0, 0, 0, 5]..=vec![0, 0, 0, 0, 0, 0, 0, 7], - )]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - - compare_result_tuples( - res.result_set, - vec![ - (vec![0, 0, 0, 0, 0, 0, 0, 7], vec![123; 60]), - (vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]), - (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), - ], - ); - - let mut tree = make_tree_seq(10); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeInclusive( - vec![0, 0, 0, 0, 0, 0, 0, 5]..=vec![0, 0, 0, 0, 0, 0, 0, 7], - )]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), None, Some(2), false) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - None, - Some(2), - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - - compare_result_tuples( - res.result_set, - vec![(vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60])], - ); - assert_eq!(res.limit, None); - assert_eq!(res.offset, Some(0)); - } - - #[test] - fn range_from_proof() { - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; - let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) - .unwrap() - .expect("create_proof errored"); - - let mut iter = proof.iter(); - assert_eq!( - iter.next(), - Some(&Op::Push(Node::Hash([ - 85, 217, 56, 226, 204, 53, 103, 145, 201, 33, 178, 80, 207, 194, 104, 128, 199, - 145, 156, 208, 152, 255, 209, 24, 140, 222, 204, 193, 211, 26, 118, 58 - ]))) - ); - assert_eq!( - iter.next(), - Some(&Op::Push(Node::KVValueHash( - vec![5], - vec![5], - [ - 116, 30, 0, 135, 25, 118, 86, 14, 12, 107, 215, 214, 133, 122, 48, 45, 180, 21, - 158, 223, 88, 148, 181, 149, 189, 65, 121, 19, 81, 118, 11, 106 - ] - ))) - ); - assert_eq!(iter.next(), Some(&Op::Parent)); - assert_eq!( - iter.next(), - Some(&Op::Push(Node::KVValueHash( - vec![7], - vec![7], - [ - 63, 193, 78, 215, 236, 222, 32, 58, 144, 66, 94, 225, 145, 233, 219, 89, 102, - 51, 109, 115, 127, 3, 152, 236, 147, 183, 100, 81, 123, 109, 244, 0 - ] - ))) - ); - assert_eq!( - iter.next(), - Some(&Op::Push(Node::KVValueHash( - vec![8], - vec![8], - [ - 205, 24, 196, 78, 21, 130, 132, 58, 44, 29, 21, 175, 68, 254, 158, 189, 49, - 158, 250, 151, 137, 22, 160, 107, 216, 238, 129, 230, 199, 251, 197, 51 - ] - ))) - ); - assert_eq!(iter.next(), Some(&Op::Parent)); - assert_eq!(iter.next(), Some(&Op::Child)); - assert!(iter.next().is_none()); - assert_eq!(absence, (false, true)); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( - res.result_set, - vec![(vec![5], vec![5]), (vec![7], vec![7]), (vec![8], vec![8])], + vec![(vec![5], vec![5]), (vec![7], vec![7]), (vec![8], vec![8])] ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); // Limit result set to 1 item let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; + let query_items = vec![QueryItem::RangeFrom(vec![5]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), None, true) + .create_full_proof(query_items.as_slice(), Some(1), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::Key(vec![5])]; + let equivalent_query_items = vec![QueryItem::Key(vec![5])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -2417,40 +2143,33 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5])]); + let res = query + .verify_proof(bytes.as_slice(), Some(1), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![5], vec![5])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 2 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; + let query_items = vec![QueryItem::RangeFrom(vec![5]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), None, true) + .create_full_proof(query_items.as_slice(), Some(2), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![ + let equivalent_query_items = vec![ QueryItem::Key(vec![5]), QueryItem::Key(vec![6]), QueryItem::Key(vec![7]), ]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -2460,36 +2179,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5]), (vec![7], vec![7])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![5], vec![5]), (vec![7], vec![7])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 100 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; + let query_items = vec![QueryItem::RangeFrom(vec![5]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(100), None, true) + .create_full_proof(query_items.as_slice(), Some(100), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; + let equivalent_query_items = vec![QueryItem::RangeFrom(vec![5]..)]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -2499,123 +2214,26 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(100), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), Some(100), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![5], vec![5]), (vec![7], vec![7]), (vec![8], vec![8])], + vec![(vec![5], vec![5]), (vec![7], vec![7]), (vec![8], vec![8])] ); assert_eq!(res.limit, Some(97)); - assert_eq!(res.offset, None); - - // skip 1 element - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![7], vec![7])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip 2 elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![8], vec![8])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip all elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(197)); // right_to_left test let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; + let query_items = vec![QueryItem::RangeFrom(vec![5]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) + .create_full_proof(query_items.as_slice(), None, false) .unwrap() .expect("create_proof errored"); @@ -2624,54 +2242,17 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![8], vec![8]), (vec![7], vec![7]), (vec![5], vec![5])], + vec![(vec![8], vec![8]), (vec![7], vec![7]), (vec![5], vec![5])] ); - - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFrom(vec![5]..)]; - let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), Some(1), false) - .unwrap() - .expect("create_proof errored"); - - assert_eq!(absence, (true, false)); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - Some(1), - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![7], vec![7]), (vec![5], vec![5])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); } #[test] @@ -2679,9 +2260,9 @@ mod test { let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; + let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -2758,44 +2339,37 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![2], vec![2]), (vec![3], vec![3]), (vec![4], vec![4]), (vec![5], vec![5]), - ], + ] ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); // Limit result set to 1 item let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; + let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), None, true) + .create_full_proof(query_items.as_slice(), Some(1), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeToInclusive(..=vec![2])]; + let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![2])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -2805,36 +2379,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![2], vec![2])]); + let res = query + .verify_proof(bytes.as_slice(), Some(1), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![2], vec![2])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 2 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; + let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), None, true) + .create_full_proof(query_items.as_slice(), Some(2), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeToInclusive(..=vec![3])]; + let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![3])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -2844,167 +2411,66 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![2], vec![2]), (vec![3], vec![3])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![2], vec![2]), (vec![3], vec![3])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 100 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; + let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(100), None, true) + .create_full_proof(query_items.as_slice(), Some(100), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeTo(..vec![6])]; + let equivalent_query_items = vec![QueryItem::RangeTo(..vec![6])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) - .unwrap() - .expect("create_proof errored"); - - assert_eq!(proof, equivalent_proof); - assert_eq!(absence, equivalent_absence); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(100), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( - res.result_set, - vec![ - (vec![2], vec![2]), - (vec![3], vec![3]), - (vec![4], vec![4]), - (vec![5], vec![5]), - ], - ); - assert_eq!(res.limit, Some(96)); - assert_eq!(res.offset, None); - - // skip 1 element - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![3], vec![3])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip 2 elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip all elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) - .unwrap() - .expect("create_proof errored"); + assert_eq!(proof, equivalent_proof); + assert_eq!(absence, equivalent_absence); let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(196)); + let res = query + .verify_proof(bytes.as_slice(), Some(100), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![ + (vec![2], vec![2]), + (vec![3], vec![3]), + (vec![4], vec![4]), + (vec![5], vec![5]), + ] + ); + assert_eq!(res.limit, Some(96)); // right_to_left proof let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; + let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) + .create_full_proof(query_items.as_slice(), None, false) .unwrap() .expect("create_proof errored"); @@ -3013,35 +2479,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![5], vec![5]), (vec![4], vec![4]), (vec![3], vec![3]), (vec![2], vec![2]), - ], + ] ); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeTo(..vec![6])]; + let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), None, false) + .create_full_proof(query_items.as_slice(), Some(2), false) .unwrap() .expect("create_proof errored"); @@ -3050,22 +2510,18 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5]), (vec![4], vec![4])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![5], vec![5]), (vec![4], vec![4])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); } #[test] @@ -3073,9 +2529,9 @@ mod test { let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; + let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -3152,44 +2608,37 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![2], vec![2]), (vec![3], vec![3]), (vec![4], vec![4]), (vec![5], vec![5]), - ], + ] ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); // Limit result set to 1 item let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; + let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), None, true) + .create_full_proof(query_items.as_slice(), Some(1), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeToInclusive(..=vec![2])]; + let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![2])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -3199,36 +2648,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![2], vec![2])]); + let res = query + .verify_proof(bytes.as_slice(), Some(1), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![2], vec![2])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 2 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; + let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), None, true) + .create_full_proof(query_items.as_slice(), Some(2), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeToInclusive(..=vec![3])]; + let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![3])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -3238,36 +2680,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![2], vec![2]), (vec![3], vec![3])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![2], vec![2]), (vec![3], vec![3])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 100 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; + let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(100), None, true) + .create_full_proof(query_items.as_slice(), Some(100), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; + let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -3277,128 +2715,31 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(100), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), Some(100), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![2], vec![2]), (vec![3], vec![3]), (vec![4], vec![4]), (vec![5], vec![5]), - ], + ] ); assert_eq!(res.limit, Some(96)); - assert_eq!(res.offset, None); - - // skip 1 element - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![3], vec![3])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip 2 elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip all elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(196)); // right_to_left proof let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; + let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) + .create_full_proof(query_items.as_slice(), None, false) .unwrap() .expect("create_proof errored"); @@ -3407,35 +2748,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![5], vec![5]), (vec![4], vec![4]), (vec![3], vec![3]), (vec![2], vec![2]), - ], + ] ); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeToInclusive(..=vec![6])]; + let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), false) + .create_full_proof(query_items.as_slice(), Some(1), false) .unwrap() .expect("create_proof errored"); @@ -3444,22 +2779,15 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4])]); + let res = query + .verify_proof(bytes.as_slice(), Some(1), false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![5], vec![5])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); } #[test] @@ -3467,9 +2795,9 @@ mod test { let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![RangeAfter(vec![3]..)]; + let query_items = vec![RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -3546,44 +2874,37 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![4], vec![4]), (vec![5], vec![5]), (vec![7], vec![7]), (vec![8], vec![8]), - ], + ] ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); // Limit result set to 1 item let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfter(vec![3]..)]; + let query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), None, true) + .create_full_proof(query_items.as_slice(), Some(1), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![4])]; + let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![4])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -3593,36 +2914,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4])]); + let res = query + .verify_proof(bytes.as_slice(), Some(1), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![4], vec![4])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 2 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfter(vec![3]..)]; + let query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), None, true) + .create_full_proof(query_items.as_slice(), Some(2), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![5])]; + let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![5])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -3632,36 +2946,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4]), (vec![5], vec![5])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![4], vec![4]), (vec![5], vec![5])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 100 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfter(vec![3]..)]; + let query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(100), None, true) + .create_full_proof(query_items.as_slice(), Some(100), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfter(vec![3]..)]; + let equivalent_query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -3671,128 +2981,31 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(100), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), Some(100), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![4], vec![4]), (vec![5], vec![5]), (vec![7], vec![7]), (vec![8], vec![8]), - ], + ] ); assert_eq!(res.limit, Some(96)); - assert_eq!(res.offset, None); - - // skip 1 element - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeAfter(vec![3]..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip 2 elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeAfter(vec![3]..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![7], vec![7])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip all elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeAfter(vec![3]..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(196)); // right_to_left proof let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![RangeAfter(vec![3]..)]; + let query_items = vec![RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) + .create_full_proof(query_items.as_slice(), None, false) .unwrap() .expect("create_proof errored"); @@ -3801,35 +3014,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![8], vec![8]), (vec![7], vec![7]), (vec![5], vec![5]), (vec![4], vec![4]), - ], + ] ); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![RangeAfter(vec![3]..)]; + let query_items = vec![RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(3), None, false) + .create_full_proof(query_items.as_slice(), Some(3), false) .unwrap() .expect("create_proof errored"); @@ -3838,25 +3045,18 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(3), - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), Some(3), false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![8], vec![8]), (vec![7], vec![7]), (vec![5], vec![5])], + vec![(vec![8], vec![8]), (vec![7], vec![7]), (vec![5], vec![5])] ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); } #[test] @@ -3864,9 +3064,9 @@ mod test { let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; + let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -3938,36 +3138,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4]), (vec![5], vec![5])]); + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![4], vec![4]), (vec![5], vec![5])] + ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); // Limit result set to 1 item let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; + let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), None, true) + .create_full_proof(query_items.as_slice(), Some(1), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![4])]; + let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![4])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -3977,36 +3173,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4])]); + let res = query + .verify_proof(bytes.as_slice(), Some(1), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![4], vec![4])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 2 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; + let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), None, true) + .create_full_proof(query_items.as_slice(), Some(2), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![5])]; + let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![5])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -4016,36 +3205,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4]), (vec![5], vec![5])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![4], vec![4]), (vec![5], vec![5])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 100 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; + let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(100), None, true) + .create_full_proof(query_items.as_slice(), Some(100), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; + let equivalent_query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -4055,120 +3240,26 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(100), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4]), (vec![5], vec![5])]); - assert_eq!(res.limit, Some(98)); - assert_eq!(res.offset, None); - - // skip 1 element - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip 2 elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(0)); - - // skip all elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) + let res = query + .verify_proof(bytes.as_slice(), Some(100), true, tree.hash().unwrap()) .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(198)); + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![4], vec![4]), (vec![5], vec![5])] + ); + assert_eq!(res.limit, Some(98)); // right_to_left let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; + let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) + .create_full_proof(query_items.as_slice(), None, false) .unwrap() .expect("create_proof errored"); @@ -4177,27 +3268,24 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5]), (vec![4], vec![4])]); + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![5], vec![5]), (vec![4], vec![4])] + ); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; + let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(300), Some(1), false) + .create_full_proof(query_items.as_slice(), Some(300), false) .unwrap() .expect("create_proof errored"); @@ -4206,22 +3294,18 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(300), - Some(1), - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4])]); - assert_eq!(res.limit, Some(299)); - assert_eq!(res.offset, Some(0)); + let res = query + .verify_proof(bytes.as_slice(), Some(300), false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![5], vec![5]), (vec![4], vec![4])] + ); + assert_eq!(res.limit, Some(298)); } #[test] @@ -4229,9 +3313,9 @@ mod test { let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; + let query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -4302,39 +3386,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![4], vec![4]), (vec![5], vec![5]), (vec![7], vec![7])], + vec![(vec![4], vec![4]), (vec![5], vec![5]), (vec![7], vec![7])] ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); // Limit result set to 1 item let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; + let query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), None, true) + .create_full_proof(query_items.as_slice(), Some(1), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![4])]; + let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![4])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -4344,36 +3421,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4])]); + let res = query + .verify_proof(bytes.as_slice(), Some(1), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![4], vec![4])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 2 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; + let query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), None, true) + .create_full_proof(query_items.as_slice(), Some(2), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![5])]; + let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![5])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -4383,36 +3453,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4]), (vec![5], vec![5])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![4], vec![4]), (vec![5], vec![5])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 100 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; + let query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(100), None, true) + .create_full_proof(query_items.as_slice(), Some(100), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; + let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -4422,149 +3488,45 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(100), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), Some(100), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![4], vec![4]), (vec![5], vec![5]), (vec![7], vec![7])], + vec![(vec![4], vec![4]), (vec![5], vec![5]), (vec![7], vec![7])] ); assert_eq!(res.limit, Some(97)); - assert_eq!(res.offset, None); - // skip 1 element + // right_to_left proof let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(1), true) + let query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; + let (proof, absence, ..) = walker + .create_full_proof(query_items.as_slice(), None, false) .unwrap() .expect("create_proof errored"); - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip 2 elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) - .unwrap() - .expect("create_proof errored"); + assert_eq!(absence, (false, false)); let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![7], vec![7])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip all elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(197)); - - // right_to_left proof - // let mut tree = make_6_node_tree(); - // let mut walker = RefWalker::new(&mut tree, PanicSource {}); - // - // let queryitems = - // vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; - // let (proof, absence, ..) = walker - // .create_full_proof(queryitems.as_slice(), None, None, false) - // .unwrap() - // .expect("create_proof errored"); - // - // assert_eq!(absence, (false, false)); - // - // let mut bytes = vec![]; - // encode_into(proof.iter(), &mut bytes); - // let mut query = Query::new(); - // for item in queryitems { - // query.insert_item(item); - // } - // let res = verify_query( - // bytes.as_slice(), - // &query, - // None, - // None, - // false, - // tree.hash().unwrap(), - // ) - // .unwrap() - // .unwrap(); - // compare_result_tuples( - // res.result_set, - // vec![(vec![7], vec![7]), (vec![5], vec![5]), (vec![4], vec![4])], - // ); + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![7], vec![7]), (vec![5], vec![5]), (vec![4], vec![4])] + ); } #[test] @@ -4572,9 +3534,9 @@ mod test { let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFull(..)]; + let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -4657,20 +3619,14 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![2], vec![2]), @@ -4679,24 +3635,23 @@ mod test { (vec![5], vec![5]), (vec![7], vec![7]), (vec![8], vec![8]), - ], + ] ); assert_eq!(res.limit, None); - assert_eq!(res.offset, None); // Limit result set to 1 item let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFull(..)]; + let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), None, true) + .create_full_proof(query_items.as_slice(), Some(1), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeToInclusive(..=vec![2])]; + let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![2])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -4706,36 +3661,29 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![2], vec![2])]); + let res = query + .verify_proof(bytes.as_slice(), Some(1), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![2], vec![2])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 2 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFull(..)]; + let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), None, true) + .create_full_proof(query_items.as_slice(), Some(2), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeToInclusive(..=vec![3])]; + let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![3])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -4745,36 +3693,32 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![2], vec![2]), (vec![3], vec![3])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![2], vec![2]), (vec![3], vec![3])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); // Limit result set to 100 items let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFull(..)]; + let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(100), None, true) + .create_full_proof(query_items.as_slice(), Some(100), true) .unwrap() .expect("create_proof errored"); - let equivalent_queryitems = vec![QueryItem::RangeFull(..)]; + let equivalent_query_items = vec![QueryItem::RangeFull(..)]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_queryitems.as_slice(), None, None, true) + .create_full_proof(equivalent_query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -4784,20 +3728,14 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(100), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), Some(100), true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![2], vec![2]), @@ -4806,111 +3744,17 @@ mod test { (vec![5], vec![5]), (vec![7], vec![7]), (vec![8], vec![8]), - ], + ] ); assert_eq!(res.limit, Some(94)); - assert_eq!(res.offset, None); - - // skip 1 element - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFull(..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(3), Some(1), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(3), - Some(1), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( - res.result_set, - vec![(vec![3], vec![3]), (vec![4], vec![4]), (vec![5], vec![5])], - ); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip 2 elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFull(..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), Some(2), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4]), (vec![5], vec![5])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); - - // skip all elements - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFull(..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(200), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(200), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![]); - assert_eq!(res.limit, Some(1)); - assert_eq!(res.offset, Some(194)); // right_to_left proof let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFull(..)]; + let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) + .create_full_proof(query_items.as_slice(), None, false) .unwrap() .expect("create_proof errored"); @@ -4919,20 +3763,14 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![8], vec![8]), @@ -4941,15 +3779,15 @@ mod test { (vec![4], vec![4]), (vec![3], vec![3]), (vec![2], vec![2]), - ], + ] ); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFull(..)]; + let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(2), Some(2), false) + .create_full_proof(query_items.as_slice(), Some(2), false) .unwrap() .expect("create_proof errored"); @@ -4958,22 +3796,18 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(2), - Some(2), - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![5], vec![5]), (vec![4], vec![4])]); + let res = query + .verify_proof(bytes.as_slice(), Some(2), false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( + res.result_set, + vec![(vec![8], vec![8]), (vec![7], vec![7])] + ); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); } #[test] @@ -4981,15 +3815,14 @@ mod test { let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFrom(vec![2]..)]; - let (proof, _, limit, offset) = walker - .create_full_proof(queryitems.as_slice(), Some(1), None, true) + let query_items = vec![QueryItem::RangeFrom(vec![2]..)]; + let (proof, _, limit) = walker + .create_full_proof(query_items.as_slice(), Some(1), true) .unwrap() .expect("create_proof errored"); // TODO: Add this test for other range types assert_eq!(limit, Some(0)); - assert_eq!(offset, None); let mut iter = proof.iter(); assert_eq!( @@ -5040,106 +3873,15 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![2], vec![2])]); - assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, None); - } - - #[test] - fn proof_with_offset() { - let mut tree = make_6_node_tree(); - let mut walker = RefWalker::new(&mut tree, PanicSource {}); - - let queryitems = vec![QueryItem::RangeFrom(vec![2]..)]; - let (proof, ..) = walker - .create_full_proof(queryitems.as_slice(), Some(1), Some(2), true) + let res = query + .verify_proof(bytes.as_slice(), Some(1), true, tree.hash().unwrap()) .unwrap() - .expect("create_proof errored"); - - let mut iter = proof.iter(); - assert_eq!( - iter.next(), - Some(&Op::Push(Node::KVDigest( - vec![2], - [ - 183, 215, 112, 4, 15, 120, 14, 157, 239, 246, 188, 3, 138, 190, 166, 110, 16, - 139, 136, 208, 152, 209, 109, 36, 205, 116, 134, 235, 103, 16, 96, 178 - ] - ))) - ); - assert_eq!( - iter.next(), - Some(&Op::Push(Node::KVDigest( - vec![3], - [ - 210, 173, 26, 11, 185, 253, 244, 69, 11, 216, 113, 81, 192, 139, 153, 104, 205, - 4, 107, 218, 102, 84, 170, 189, 186, 36, 48, 176, 169, 129, 231, 144 - ] - ))) - ); - assert_eq!(iter.next(), Some(&Op::Parent)); - assert_eq!( - iter.next(), - Some(&Op::Push(Node::KVValueHash( - vec![4], - vec![4], - [ - 198, 129, 51, 156, 134, 199, 7, 21, 172, 89, 146, 71, 4, 16, 82, 205, 89, 51, - 227, 215, 139, 195, 237, 202, 159, 191, 209, 172, 156, 38, 239, 192 - ] - ))) - ); - assert_eq!(iter.next(), Some(&Op::Child)); - assert_eq!( - iter.next(), - Some(&Op::Push(Node::KVHash([ - 61, 233, 169, 61, 231, 15, 78, 53, 219, 99, 131, 45, 44, 165, 68, 87, 7, 52, 238, - 68, 142, 211, 110, 161, 111, 220, 108, 11, 17, 31, 88, 197 - ]))) - ); - assert_eq!(iter.next(), Some(&Op::Parent)); - assert_eq!( - iter.next(), - Some(&Op::Push(Node::Hash([ - 133, 188, 175, 131, 60, 89, 221, 135, 133, 53, 205, 110, 58, 56, 128, 58, 1, 227, - 75, 122, 83, 20, 125, 44, 149, 44, 62, 130, 252, 134, 105, 200 - ]))) - ); - assert_eq!(iter.next(), Some(&Op::Child)); - assert!(iter.next().is_none()); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { - query.insert_item(item); - } - let res = verify_query( - bytes.as_slice(), - &query, - Some(1), - Some(2), - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples(res.result_set, vec![(vec![4], vec![4])]); + .unwrap(); + compare_result_tuples_not_optional!(res.result_set, vec![(vec![2], vec![2])]); assert_eq!(res.limit, Some(0)); - assert_eq!(res.offset, Some(0)); } #[test] @@ -5147,9 +3889,9 @@ mod test { let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::RangeFrom(vec![3]..)]; + let query_items = vec![QueryItem::RangeFrom(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, false) + .create_full_proof(query_items.as_slice(), None, false) .unwrap() .expect("create_proof errored"); @@ -5227,21 +3969,15 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); - let mut query = Query::new(); - for item in queryitems { + let mut query = Query::new_with_direction(false); + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - false, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, false, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![8], vec![8]), @@ -5249,7 +3985,7 @@ mod test { (vec![5], vec![5]), (vec![4], vec![4]), (vec![3], vec![3]), - ], + ] ); } @@ -5258,11 +3994,11 @@ mod test { let mut tree = make_tree_seq(10); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![QueryItem::Range( + let query_items = vec![QueryItem::Range( vec![0, 0, 0, 0, 0, 0, 0, 5]..vec![0, 0, 0, 0, 0, 0, 0, 6, 5], )]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -5339,25 +4075,19 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]), - ], + ] ); } @@ -5366,12 +4096,12 @@ mod test { let mut tree = make_tree_seq(10); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let queryitems = vec![ + let query_items = vec![ // 7 is not inclusive QueryItem::Range(vec![0, 0, 0, 0, 0, 0, 0, 5, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7]), ]; let (proof, absence, ..) = walker - .create_full_proof(queryitems.as_slice(), None, None, true) + .create_full_proof(query_items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -5447,22 +4177,16 @@ mod test { let mut bytes = vec![]; encode_into(proof.iter(), &mut bytes); let mut query = Query::new(); - for item in queryitems { + for item in query_items { query.insert_item(item); } - let res = verify_query( - bytes.as_slice(), - &query, - None, - None, - true, - tree.hash().unwrap(), - ) - .unwrap() - .unwrap(); - compare_result_tuples( + let res = query + .verify_proof(bytes.as_slice(), None, true, tree.hash().unwrap()) + .unwrap() + .unwrap(); + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60])], + vec![(vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60])] ); } @@ -5477,7 +4201,7 @@ mod test { query.insert_all(); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), None, None, true) + .create_full_proof(query.items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -5488,14 +4212,15 @@ mod test { let mut query = Query::new(); query.insert_key(vec![0, 0, 0, 0, 0, 0, 0, 6]); - let res = verify_query(bytes.as_slice(), &query, None, None, true, expected_hash) + let res = query + .verify_proof(bytes.as_slice(), None, true, expected_hash) .unwrap() .unwrap(); assert_eq!(res.result_set.len(), 1); - compare_result_tuples( + compare_result_tuples_not_optional!( res.result_set, - vec![(vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60])], + vec![(vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60])] ); // 1..10 prove (2..=5, 7..10) subset (3..=4, 7..=8) @@ -5503,7 +4228,7 @@ mod test { query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 2]..=vec![0, 0, 0, 0, 0, 0, 0, 5]); query.insert_range(vec![0, 0, 0, 0, 0, 0, 0, 7]..vec![0, 0, 0, 0, 0, 0, 0, 10]); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), None, None, true) + .create_full_proof(query.items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -5513,19 +4238,20 @@ mod test { let mut query = Query::new(); query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 3]..=vec![0, 0, 0, 0, 0, 0, 0, 4]); query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 7]..=vec![0, 0, 0, 0, 0, 0, 0, 8]); - let res = verify_query(bytes.as_slice(), &query, None, None, true, expected_hash) + let res = query + .verify_proof(bytes.as_slice(), None, true, expected_hash) .unwrap() .unwrap(); assert_eq!(res.result_set.len(), 4); - compare_result_tuples( + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![0, 0, 0, 0, 0, 0, 0, 3], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 4], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 7], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 8], vec![123; 60]), - ], + ] ); // 1..10 prove (2..=5, 6..10) subset (4..=8) @@ -5533,7 +4259,7 @@ mod test { query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 2]..=vec![0, 0, 0, 0, 0, 0, 0, 5]); query.insert_range(vec![0, 0, 0, 0, 0, 0, 0, 6]..vec![0, 0, 0, 0, 0, 0, 0, 10]); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), None, None, true) + .create_full_proof(query.items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -5542,12 +4268,13 @@ mod test { let mut query = Query::new(); query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 4]..=vec![0, 0, 0, 0, 0, 0, 0, 8]); - let res = verify_query(bytes.as_slice(), &query, None, None, true, expected_hash) + let res = query + .verify_proof(bytes.as_slice(), None, true, expected_hash) .unwrap() .unwrap(); assert_eq!(res.result_set.len(), 5); - compare_result_tuples( + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![0, 0, 0, 0, 0, 0, 0, 4], vec![123; 60]), @@ -5555,7 +4282,7 @@ mod test { (vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 7], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 8], vec![123; 60]), - ], + ] ); // 1..10 prove (1..=3, 2..=5) subset (1..=5) @@ -5563,7 +4290,7 @@ mod test { query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 1]..=vec![0, 0, 0, 0, 0, 0, 0, 3]); query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 2]..=vec![0, 0, 0, 0, 0, 0, 0, 5]); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), None, None, true) + .create_full_proof(query.items.as_slice(), None, true) .unwrap() .expect("create_proof errored"); @@ -5572,12 +4299,13 @@ mod test { let mut query = Query::new(); query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 1]..=vec![0, 0, 0, 0, 0, 0, 0, 5]); - let res = verify_query(bytes.as_slice(), &query, None, None, true, expected_hash) + let res = query + .verify_proof(bytes.as_slice(), None, true, expected_hash) .unwrap() .unwrap(); assert_eq!(res.result_set.len(), 5); - compare_result_tuples( + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![0, 0, 0, 0, 0, 0, 0, 1], vec![123; 60]), @@ -5585,14 +4313,14 @@ mod test { (vec![0, 0, 0, 0, 0, 0, 0, 3], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 4], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), - ], + ] ); // 1..10 prove full (..) limit to 5, subset (1..=5) let mut query = Query::new(); query.insert_range_from(vec![0, 0, 0, 0, 0, 0, 0, 1]..); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), Some(5), None, true) + .create_full_proof(query.items.as_slice(), Some(5), true) .unwrap() .expect("create_proof errored"); @@ -5601,12 +4329,13 @@ mod test { let mut query = Query::new(); query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 1]..=vec![0, 0, 0, 0, 0, 0, 0, 5]); - let res = verify_query(bytes.as_slice(), &query, Some(5), None, true, expected_hash) + let res = query + .verify_proof(bytes.as_slice(), Some(5), true, expected_hash) .unwrap() .unwrap(); assert_eq!(res.result_set.len(), 5); - compare_result_tuples( + compare_result_tuples_not_optional!( res.result_set, vec![ (vec![0, 0, 0, 0, 0, 0, 0, 1], vec![123; 60]), @@ -5614,35 +4343,7 @@ mod test { (vec![0, 0, 0, 0, 0, 0, 0, 3], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 4], vec![123; 60]), (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), - ], - ); - - // 1..10 prove full (..) limit to 5, subset (1..=5) - let mut query = Query::new(); - query.insert_range_from(vec![0, 0, 0, 0, 0, 0, 0, 1]..); - let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), None, Some(1), true) - .unwrap() - .expect("create_proof errored"); - - let mut bytes = vec![]; - encode_into(proof.iter(), &mut bytes); - - let mut query = Query::new(); - query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 1]..=vec![0, 0, 0, 0, 0, 0, 0, 5]); - let res = verify_query(bytes.as_slice(), &query, None, Some(1), true, expected_hash) - .unwrap() - .unwrap(); - - assert_eq!(res.result_set.len(), 4); - compare_result_tuples( - res.result_set, - vec![ - (vec![0, 0, 0, 0, 0, 0, 0, 2], vec![123; 60]), - (vec![0, 0, 0, 0, 0, 0, 0, 3], vec![123; 60]), - (vec![0, 0, 0, 0, 0, 0, 0, 4], vec![123; 60]), - (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]), - ], + ] ); } @@ -5665,7 +4366,7 @@ mod test { let mut query = Query::new(); query.insert_range_from(vec![0, 0, 0, 0, 0, 0, 0, 1]..); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), Some(3), None, true) + .create_full_proof(query.items.as_slice(), Some(3), true) .unwrap() .expect("create_proof errored"); @@ -5675,38 +4376,35 @@ mod test { // Try to query 4 let mut query = Query::new(); query.insert_key(vec![0, 0, 0, 0, 0, 0, 0, 4]); - assert!( - verify_query(bytes.as_slice(), &query, Some(3), None, true, expected_hash) - .unwrap() - .is_err() - ); + assert!(query + .verify_proof(bytes.as_slice(), Some(3), true, expected_hash) + .unwrap() + .is_err()); // if limit offset parameters are different from generation then proof // verification returns an error Try superset proof with increased limit let mut query = Query::new(); query.insert_range_from(vec![0, 0, 0, 0, 0, 0, 0, 1]..); - assert!( - verify_query(bytes.as_slice(), &query, Some(4), None, true, expected_hash) - .unwrap() - .is_err() - ); + assert!(query + .verify_proof(bytes.as_slice(), Some(4), true, expected_hash) + .unwrap() + .is_err()); // Try superset proof with less limit let mut query = Query::new(); query.insert_range_from(vec![0, 0, 0, 0, 0, 0, 0, 1]..); - assert!( - verify_query(bytes.as_slice(), &query, Some(2), None, true, expected_hash) - .unwrap() - .is_err() - ); + assert!(query + .verify_proof(bytes.as_slice(), Some(2), true, expected_hash) + .unwrap() + .is_err()); } #[test] fn query_from_vec() { - let queryitems = vec![QueryItem::Range( + let query_items = vec![QueryItem::Range( vec![0, 0, 0, 0, 0, 0, 0, 5, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], )]; - let query = Query::from(queryitems); + let query = Query::from(query_items); let mut expected = Vec::new(); expected.push(QueryItem::Range( @@ -5722,7 +4420,7 @@ mod test { vec![0, 0, 0, 0, 0, 0, 5, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], )); let query_vec: Vec = query.into(); - let expected = vec![QueryItem::Range( + let expected = [QueryItem::Range( vec![0, 0, 0, 0, 0, 0, 5, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], )]; assert_eq!( @@ -5737,8 +4435,8 @@ mod test { #[test] fn query_item_from_vec_u8() { - let queryitems: Vec = vec![42]; - let query = QueryItem::from(queryitems); + let query_items: Vec = vec![42]; + let query = QueryItem::from(query_items); let expected = QueryItem::Key(vec![42]); assert_eq!(query, expected); @@ -5755,7 +4453,7 @@ mod test { let mut walker = RefWalker::new(&mut tree, PanicSource {}); let (proof, ..) = walker - .create_full_proof(vec![QueryItem::Key(vec![5])].as_slice(), None, None, true) + .create_full_proof(vec![QueryItem::Key(vec![5])].as_slice(), None, true) .unwrap() .expect("failed to create proof"); let mut bytes = vec![]; @@ -5780,7 +4478,7 @@ mod test { let mut walker = RefWalker::new(&mut tree, PanicSource {}); let (proof, ..) = walker - .create_full_proof(vec![QueryItem::Key(vec![5])].as_slice(), None, None, true) + .create_full_proof(vec![QueryItem::Key(vec![5])].as_slice(), None, true) .unwrap() .expect("failed to create proof"); let mut bytes = vec![]; @@ -5806,7 +4504,6 @@ mod test { .collect::>() .as_slice(), None, - None, true, ) .unwrap() @@ -5819,7 +4516,8 @@ mod test { query.insert_key(key.clone()); } - let _result = verify_query(bytes.as_slice(), &query, None, None, true, [42; 32]) + let _result = query + .verify_proof(bytes.as_slice(), None, true, [42; 32]) .unwrap() .expect("verify failed"); } diff --git a/merk/src/proofs/query/query_item/mod.rs b/merk/src/proofs/query/query_item/mod.rs index 63f3cc0a..7c81a27e 100644 --- a/merk/src/proofs/query/query_item/mod.rs +++ b/merk/src/proofs/query/query_item/mod.rs @@ -5,17 +5,19 @@ mod merge; use std::{ cmp, cmp::Ordering, + fmt, hash::Hash, ops::{Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive}, }; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(feature = "full")] use grovedb_costs::{CostContext, CostsExt, OperationCost}; #[cfg(feature = "full")] use grovedb_storage::RawIterator; #[cfg(any(feature = "full", feature = "verify"))] use crate::error::Error; +use crate::proofs::hex_to_ascii; #[cfg(any(feature = "full", feature = "verify"))] /// A `QueryItem` represents a key or range of keys to be included in a proof. @@ -33,6 +35,50 @@ pub enum QueryItem { RangeAfterToInclusive(RangeInclusive>), } +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for QueryItem { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + QueryItem::Key(key) => write!(f, "Key({})", hex_to_ascii(key)), + QueryItem::Range(range) => write!( + f, + "Range({} .. {})", + hex_to_ascii(&range.start), + hex_to_ascii(&range.end) + ), + QueryItem::RangeInclusive(range) => write!( + f, + "RangeInclusive({} ..= {})", + hex_to_ascii(range.start()), + hex_to_ascii(range.end()) + ), + QueryItem::RangeFull(_) => write!(f, "RangeFull"), + QueryItem::RangeFrom(range) => { + write!(f, "RangeFrom({} ..)", hex_to_ascii(&range.start)) + } + QueryItem::RangeTo(range) => write!(f, "RangeTo(.. {})", hex_to_ascii(&range.end)), + QueryItem::RangeToInclusive(range) => { + write!(f, "RangeToInclusive(..= {})", hex_to_ascii(&range.end)) + } + QueryItem::RangeAfter(range) => { + write!(f, "RangeAfter({} <..)", hex_to_ascii(&range.start)) + } + QueryItem::RangeAfterTo(range) => write!( + f, + "RangeAfterTo({} <.. {})", + hex_to_ascii(&range.start), + hex_to_ascii(&range.end) + ), + QueryItem::RangeAfterToInclusive(range) => write!( + f, + "RangeAfterToInclusive({} <..= {})", + hex_to_ascii(range.start()), + hex_to_ascii(range.end()) + ), + } + } +} + #[cfg(any(feature = "full", feature = "verify"))] impl Hash for QueryItem { fn hash(&self, state: &mut H) { @@ -381,7 +427,7 @@ impl QueryItem { } #[cfg(any(feature = "full", feature = "verify"))] - fn compare(a: &[u8], b: &[u8]) -> cmp::Ordering { + pub fn compare(a: &[u8], b: &[u8]) -> cmp::Ordering { for (ai, bi) in a.iter().zip(b.iter()) { match ai.cmp(bi) { Ordering::Equal => continue, diff --git a/merk/src/proofs/query/verify.rs b/merk/src/proofs/query/verify.rs index 39ff471a..e1d56511 100644 --- a/merk/src/proofs/query/verify.rs +++ b/merk/src/proofs/query/verify.rs @@ -1,18 +1,23 @@ +#[cfg(feature = "full")] use std::collections::LinkedList; +use std::fmt; use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; #[cfg(feature = "full")] -use crate::proofs::query::{Map, MapBuilder}; +use crate::proofs::{ + query::{Map, MapBuilder}, + Op, +}; use crate::{ error::Error, - proofs::{tree::execute, Decoder, Node, Op, Query}, + proofs::{hex_to_ascii, tree::execute, Decoder, Node, Query}, tree::value_hash, CryptoHash as MerkHash, CryptoHash, }; -#[cfg(any(feature = "full", feature = "verify"))] -pub type ProofAbsenceLimitOffset = (LinkedList, (bool, bool), Option, Option); +#[cfg(feature = "full")] +pub type ProofAbsenceLimit = (LinkedList, (bool, bool), Option); #[cfg(feature = "full")] /// Verify proof against expected hash @@ -37,266 +42,406 @@ pub fn verify(bytes: &[u8], expected_hash: MerkHash) -> CostResult { }) } -#[cfg(any(feature = "full", feature = "verify"))] -/// Verifies the encoded proof with the given query -/// -/// Every key in `keys` is checked to either have a key/value pair in the proof, -/// or to have its absence in the tree proven. -/// -/// Returns `Err` if the proof is invalid, or a list of proven values associated -/// with `keys`. For example, if `keys` contains keys `A` and `B`, the returned -/// list will contain 2 elements, the value of `A` and the value of `B`. Keys -/// proven to be absent in the tree will have an entry of `None`, keys that have -/// a proven value will have an entry of `Some(value)`. -pub fn execute_proof( - bytes: &[u8], - query: &Query, - limit: Option, - offset: Option, - left_to_right: bool, -) -> CostResult<(MerkHash, ProofVerificationResult), Error> { - let mut cost = OperationCost::default(); - - let mut output = Vec::with_capacity(query.len()); - let mut last_push = None; - let mut query = query.directional_iter(left_to_right).peekable(); - let mut in_range = false; - let mut current_limit = limit; - let mut current_offset = offset; +#[derive(Copy, Clone, Debug)] +pub struct VerifyOptions { + /// When set to true, this will give back absence proofs for any query items + /// that are keys. This means QueryItem::Key(), and not the ranges. + pub absence_proofs_for_non_existing_searched_keys: bool, + /// Verifies that we have all the data. Todo: verify that this works + /// properly + pub verify_proof_succinctness: bool, + /// Should return empty trees in the result? + pub include_empty_trees_in_result: bool, +} - let ops = Decoder::new(bytes); +impl Default for VerifyOptions { + fn default() -> Self { + VerifyOptions { + absence_proofs_for_non_existing_searched_keys: true, + verify_proof_succinctness: true, + include_empty_trees_in_result: false, + } + } +} - let root_wrapped = execute(ops, true, |node| { - let mut execute_node = |key: &Vec, - value: Option<&Vec>, - value_hash: CryptoHash| - -> Result<_, Error> { - while let Some(item) = query.peek() { - // get next item in query - let query_item = *item; - let (lower_bound, start_non_inclusive) = query_item.lower_bound(); - let (upper_bound, end_inclusive) = query_item.upper_bound(); - - // terminate if we encounter a node before the current query item. - // this means a node less than the current query item for left to right. - // and a node greater than the current query item for right to left. - let terminate = if left_to_right { - // if the query item is lower unbounded, then a node cannot be less than it. - // checks that the lower bound of the query item not greater than the key - // if they are equal make sure the start is inclusive - !query_item.lower_unbounded() - && ((lower_bound.expect("confirmed not unbounded") > key.as_slice()) - || (start_non_inclusive - && lower_bound.expect("confirmed not unbounded") == key.as_slice())) +impl Query { + #[cfg(any(feature = "full", feature = "verify"))] + /// Verifies the encoded proof with the given query + /// + /// Every key in `keys` is checked to either have a key/value pair in the + /// proof, or to have its absence in the tree proven. + /// + /// Returns `Err` if the proof is invalid, or a list of proven values + /// associated with `keys`. For example, if `keys` contains keys `A` and + /// `B`, the returned list will contain 2 elements, the value of `A` and + /// the value of `B`. Keys proven to be absent in the tree will have an + /// entry of `None`, keys that have a proven value will have an entry of + /// `Some(value)`. + pub fn execute_proof( + &self, + bytes: &[u8], + limit: Option, + left_to_right: bool, + ) -> CostResult<(MerkHash, ProofVerificationResult), Error> { + #[cfg(feature = "proof_debug")] + { + println!( + "executing proof with limit {:?} going {} using query {}", + limit, + if left_to_right { + "left to right" } else { - !query_item.upper_unbounded() - && ((upper_bound.expect("confirmed not unbounded") < key.as_slice()) - || (!end_inclusive - && upper_bound.expect("confirmed not unbounded") == key.as_slice())) - }; - if terminate { - break; - } + "right to left" + }, + self + ); + } + let mut cost = OperationCost::default(); - if !in_range { - // this is the first data we have encountered for this query item - if left_to_right { - // ensure lower bound of query item is proven - match last_push { - // lower bound is proven - we have an exact match - // ignoring the case when the lower bound is unbounded - // as it's not possible the get an exact key match for - // an unbounded value - _ if Some(key.as_slice()) == query_item.lower_bound().0 => {} - - // lower bound is proven - this is the leftmost node - // in the tree - None => {} - - // lower bound is proven - the preceding tree node - // is lower than the bound - Some(Node::KV(..)) => {} - Some(Node::KVDigest(..)) => {} - Some(Node::KVRefValueHash(..)) => {} - Some(Node::KVValueHash(..)) => {} - - // cannot verify lower bound - we have an abridged - // tree so we cannot tell what the preceding key was - Some(_) => { - return Err(Error::InvalidProofError( - "Cannot verify lower bound of queried range".to_string(), - )); - } - } + let mut output = Vec::with_capacity(self.len()); + let mut last_push = None; + let mut query = self.directional_iter(left_to_right).peekable(); + let mut in_range = false; + let original_limit = limit; + let mut current_limit = limit; + + let ops = Decoder::new(bytes); + + let root_wrapped = execute(ops, true, |node| { + let mut execute_node = |key: &Vec, + value: Option<&Vec>, + value_hash: CryptoHash| + -> Result<_, Error> { + while let Some(item) = query.peek() { + // get next item in query + let query_item = *item; + let (lower_bound, start_non_inclusive) = query_item.lower_bound(); + let (upper_bound, end_inclusive) = query_item.upper_bound(); + + // terminate if we encounter a node before the current query item. + // this means a node less than the current query item for left to right. + // and a node greater than the current query item for right to left. + let terminate = if left_to_right { + // if the query item is lower unbounded, then a node cannot be less than it. + // checks that the lower bound of the query item not greater than the key + // if they are equal make sure the start is inclusive + !query_item.lower_unbounded() + && ((lower_bound.expect("confirmed not unbounded") > key.as_slice()) + || (start_non_inclusive + && lower_bound.expect("confirmed not unbounded") + == key.as_slice())) } else { - // ensure upper bound of query item is proven - match last_push { - // upper bound is proven - we have an exact match - // ignoring the case when the upper bound is unbounded - // as it's not possible the get an exact key match for - // an unbounded value - _ if Some(key.as_slice()) == query_item.upper_bound().0 => {} - - // lower bound is proven - this is the rightmost node - // in the tree - None => {} - - // upper bound is proven - the preceding tree node - // is greater than the bound - Some(Node::KV(..)) => {} - Some(Node::KVDigest(..)) => {} - Some(Node::KVRefValueHash(..)) => {} - Some(Node::KVValueHash(..)) => {} - - // cannot verify upper bound - we have an abridged - // tree so we cannot tell what the previous key was - Some(_) => { - return Err(Error::InvalidProofError( - "Cannot verify upper bound of queried range".to_string(), - )); + !query_item.upper_unbounded() + && ((upper_bound.expect("confirmed not unbounded") < key.as_slice()) + || (!end_inclusive + && upper_bound.expect("confirmed not unbounded") + == key.as_slice())) + }; + if terminate { + break; + } + + if !in_range { + // this is the first data we have encountered for this query item + if left_to_right { + // ensure lower bound of query item is proven + match last_push { + // lower bound is proven - we have an exact match + // ignoring the case when the lower bound is unbounded + // as it's not possible the get an exact key match for + // an unbounded value + _ if Some(key.as_slice()) == query_item.lower_bound().0 => {} + + // lower bound is proven - this is the leftmost node + // in the tree + None => {} + + // lower bound is proven - the preceding tree node + // is lower than the bound + Some(Node::KV(..)) => {} + Some(Node::KVDigest(..)) => {} + Some(Node::KVRefValueHash(..)) => {} + Some(Node::KVValueHash(..)) => {} + + // cannot verify lower bound - we have an abridged + // tree so we cannot tell what the preceding key was + Some(_) => { + return Err(Error::InvalidProofError( + "Cannot verify lower bound of queried range".to_string(), + )); + } + } + } else { + // ensure upper bound of query item is proven + match last_push { + // upper bound is proven - we have an exact match + // ignoring the case when the upper bound is unbounded + // as it's not possible the get an exact key match for + // an unbounded value + _ if Some(key.as_slice()) == query_item.upper_bound().0 => {} + + // lower bound is proven - this is the rightmost node + // in the tree + None => {} + + // upper bound is proven - the preceding tree node + // is greater than the bound + Some(Node::KV(..)) => {} + Some(Node::KVDigest(..)) => {} + Some(Node::KVRefValueHash(..)) => {} + Some(Node::KVValueHash(..)) => {} + + // cannot verify upper bound - we have an abridged + // tree so we cannot tell what the previous key was + Some(_) => { + return Err(Error::InvalidProofError( + "Cannot verify upper bound of queried range".to_string(), + )); + } } } } - } - if left_to_right { - if query_item.upper_bound().0.is_some() - && Some(key.as_slice()) >= query_item.upper_bound().0 + if left_to_right { + if query_item.upper_bound().0.is_some() + && Some(key.as_slice()) >= query_item.upper_bound().0 + { + // at or past upper bound of range (or this was an exact + // match on a single-key queryitem), advance to next query + // item + query.next(); + in_range = false; + } else { + // have not reached upper bound, we expect more values + // to be proven in the range (and all pushes should be + // unabridged until we reach end of range) + in_range = true; + } + } else if query_item.lower_bound().0.is_some() + && Some(key.as_slice()) <= query_item.lower_bound().0 { - // at or past upper bound of range (or this was an exact + // at or before lower bound of range (or this was an exact // match on a single-key queryitem), advance to next query // item query.next(); in_range = false; } else { - // have not reached upper bound, we expect more values + // have not reached lower bound, we expect more values // to be proven in the range (and all pushes should be // unabridged until we reach end of range) in_range = true; } - } else if query_item.lower_bound().0.is_some() - && Some(key.as_slice()) <= query_item.lower_bound().0 - { - // at or before lower bound of range (or this was an exact - // match on a single-key queryitem), advance to next query - // item - query.next(); - in_range = false; - } else { - // have not reached lower bound, we expect more values - // to be proven in the range (and all pushes should be - // unabridged until we reach end of range) - in_range = true; - } - // this push matches the queried item - if query_item.contains(key) { - // if there are still offset slots, and node is of type kvdigest - // reduce the offset counter - // also, verify that a kv node was not pushed before offset is exhausted - if let Some(offset) = current_offset { - if offset > 0 && value.is_none() { - current_offset = Some(offset - 1); + // this push matches the queried item + if query_item.contains(key) { + if let Some(val) = value { + if let Some(limit) = current_limit { + if limit == 0 { + return Err(Error::InvalidProofError(format!( + "Proof returns more data than limit {:?}", + original_limit + ))); + } else { + current_limit = Some(limit - 1); + if current_limit == Some(0) { + in_range = false; + } + } + } + #[cfg(feature = "proof_debug")] + { + println!( + "pushing {}", + ProvedKeyOptionalValue { + key: key.clone(), + value: Some(val.clone()), + proof: value_hash, + } + ); + } + // add data to output + output.push(ProvedKeyOptionalValue { + key: key.clone(), + value: Some(val.clone()), + proof: value_hash, + }); + + // continue to next push break; - } else if offset > 0 && value.is_some() { - // inserting a kv node before exhausting offset + } else { return Err(Error::InvalidProofError( - "Proof returns data before offset is exhausted".to_string(), + "Proof is missing data for query".to_string(), )); } } + {} + // continue to next queried item + } + Ok(()) + }; - // offset is equal to zero or none - if let Some(val) = value { - if let Some(limit) = current_limit { - if limit == 0 { - return Err(Error::InvalidProofError( - "Proof returns more data than limit".to_string(), - )); - } else { - current_limit = Some(limit - 1); - if current_limit == Some(0) { - in_range = false; - } - } - } - // add data to output - output.push(ProvedKeyValue { - key: key.clone(), - value: val.clone(), - proof: value_hash, - }); - - // continue to next push - break; - } else { - return Err(Error::InvalidProofError( - "Proof is missing data for query".to_string(), - )); + match node { + Node::KV(key, value) => { + #[cfg(feature = "proof_debug")] + { + println!("Processing KV node"); + } + execute_node(key, Some(value), value_hash(value).unwrap())?; + } + Node::KVValueHash(key, value, value_hash) => { + #[cfg(feature = "proof_debug")] + { + println!("Processing KVValueHash node"); + } + execute_node(key, Some(value), *value_hash)?; + } + Node::KVDigest(key, value_hash) => { + #[cfg(feature = "proof_debug")] + { + println!("Processing KVDigest node"); + } + execute_node(key, None, *value_hash)?; + } + Node::KVRefValueHash(key, value, value_hash) => { + #[cfg(feature = "proof_debug")] + { + println!("Processing KVRefValueHash node"); + } + execute_node(key, Some(value), *value_hash)?; + } + Node::Hash(_) | Node::KVHash(_) | Node::KVValueHashFeatureType(..) => { + if in_range { + return Err(Error::InvalidProofError(format!( + "Proof is missing data for query range. Encountered unexpected node \ + type: {}", + node + ))); } } - {} - // continue to next queried item } - Ok(()) - }; - if let Node::KV(key, value) = node { - execute_node(key, Some(value), value_hash(value).unwrap())?; - } else if let Node::KVValueHash(key, value, value_hash) = node { - execute_node(key, Some(value), *value_hash)?; - } else if let Node::KVDigest(key, value_hash) = node { - execute_node(key, None, *value_hash)?; - } else if let Node::KVRefValueHash(key, value, value_hash) = node { - execute_node(key, Some(value), *value_hash)?; - } else if in_range { - // we encountered a queried range but the proof was abridged (saw a - // non-KV push), we are missing some part of the range - return Err(Error::InvalidProofError( - "Proof is missing data for query for range".to_string(), - )); - } + last_push = Some(node.clone()); - last_push = Some(node.clone()); + Ok(()) + }); - Ok(()) - }); + let root = cost_return_on_error!(&mut cost, root_wrapped); - let root = cost_return_on_error!(&mut cost, root_wrapped); + // we have remaining query items, check absence proof against right edge of + // tree + if query.peek().is_some() { + if current_limit == Some(0) { + } else { + match last_push { + // last node in tree was less than queried item + Some(Node::KV(..)) => {} + Some(Node::KVDigest(..)) => {} + Some(Node::KVRefValueHash(..)) => {} + Some(Node::KVValueHash(..)) => {} - // we have remaining query items, check absence proof against right edge of - // tree - if query.peek().is_some() { - if current_limit == Some(0) { - } else { - match last_push { - // last node in tree was less than queried item - Some(Node::KV(..)) => {} - Some(Node::KVDigest(..)) => {} - Some(Node::KVRefValueHash(..)) => {} - Some(Node::KVValueHash(..)) => {} - - // proof contains abridged data so we cannot verify absence of - // remaining query items - _ => { - return Err(Error::InvalidProofError( - "Proof is missing data for query".to_string(), - )) - .wrap_with_cost(cost) + // proof contains abridged data so we cannot verify absence of + // remaining query items + _ => { + return Err(Error::InvalidProofError( + "Proof is missing data for query".to_string(), + )) + .wrap_with_cost(cost) + } } } } + + Ok(( + root.hash().unwrap_add_cost(&mut cost), + ProofVerificationResult { + result_set: output, + limit: current_limit, + }, + )) + .wrap_with_cost(cost) } - Ok(( - root.hash().unwrap_add_cost(&mut cost), - ProofVerificationResult { - result_set: output, - limit: current_limit, - offset: current_offset, - }, - )) - .wrap_with_cost(cost) + #[cfg(any(feature = "full", feature = "verify"))] + /// Verifies the encoded proof with the given query and expected hash + pub fn verify_proof( + &self, + bytes: &[u8], + limit: Option, + left_to_right: bool, + expected_hash: MerkHash, + ) -> CostResult { + self.execute_proof(bytes, limit, left_to_right) + .map_ok(|(root_hash, verification_result)| { + if root_hash == expected_hash { + Ok(verification_result) + } else { + Err(Error::InvalidProofError(format!( + "Proof did not match expected hash\n\tExpected: \ + {expected_hash:?}\n\tActual: {root_hash:?}" + ))) + } + }) + .flatten() + } +} + +#[cfg(any(feature = "full", feature = "verify"))] +#[derive(PartialEq, Eq, Debug)] +/// Proved key-value +pub struct ProvedKeyOptionalValue { + /// Key + pub key: Vec, + /// Value + pub value: Option>, + /// Proof + pub proof: CryptoHash, +} + +impl From for ProvedKeyOptionalValue { + fn from(value: ProvedKeyValue) -> Self { + let ProvedKeyValue { key, value, proof } = value; + + ProvedKeyOptionalValue { + key, + value: Some(value), + proof, + } + } +} + +impl TryFrom for ProvedKeyValue { + type Error = Error; + + fn try_from(value: ProvedKeyOptionalValue) -> Result { + let ProvedKeyOptionalValue { key, value, proof } = value; + let value = value.ok_or(Error::InvalidProofError(format!( + "expected {}", + hex_to_ascii(&key) + )))?; + Ok(ProvedKeyValue { key, value, proof }) + } +} + +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for ProvedKeyOptionalValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let key_string = if self.key.len() == 1 && self.key[0] < b"0"[0] { + hex::encode(&self.key) + } else { + String::from_utf8(self.key.clone()).unwrap_or_else(|_| hex::encode(&self.key)) + }; + write!( + f, + "ProvedKeyOptionalValue {{ key: {}, value: {}, proof: {} }}", + key_string, + if let Some(value) = &self.value { + hex::encode(value) + } else { + "None".to_string() + }, + hex::encode(self.proof) + ) + } } #[cfg(any(feature = "full", feature = "verify"))] @@ -311,38 +456,39 @@ pub struct ProvedKeyValue { pub proof: CryptoHash, } +#[cfg(any(feature = "full", feature = "verify"))] +impl fmt::Display for ProvedKeyValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "ProvedKeyValue {{ key: {}, value: {}, proof: {} }}", + String::from_utf8(self.key.clone()).unwrap_or_else(|_| hex::encode(&self.key)), + hex::encode(&self.value), + hex::encode(self.proof) + ) + } +} + #[cfg(any(feature = "full", feature = "verify"))] #[derive(PartialEq, Eq, Debug)] /// Proof verification result pub struct ProofVerificationResult { /// Result set - pub result_set: Vec, + pub result_set: Vec, /// Limit pub limit: Option, - /// Offset - pub offset: Option, } #[cfg(any(feature = "full", feature = "verify"))] -/// Verifies the encoded proof with the given query and expected hash -pub fn verify_query( - bytes: &[u8], - query: &Query, - limit: Option, - offset: Option, - left_to_right: bool, - expected_hash: MerkHash, -) -> CostResult { - execute_proof(bytes, query, limit, offset, left_to_right) - .map_ok(|(root_hash, verification_result)| { - if root_hash == expected_hash { - Ok(verification_result) - } else { - Err(Error::InvalidProofError(format!( - "Proof did not match expected hash\n\tExpected: {expected_hash:?}\n\tActual: \ - {root_hash:?}" - ))) - } - }) - .flatten() +impl fmt::Display for ProofVerificationResult { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "ProofVerificationResult {{")?; + writeln!(f, " result_set: [")?; + for (index, proved_key_value) in self.result_set.iter().enumerate() { + writeln!(f, " {}: {},", index, proved_key_value)?; + } + writeln!(f, " ],")?; + writeln!(f, " limit: {:?}", self.limit)?; + write!(f, "}}") + } } diff --git a/merk/src/proofs/tree.rs b/merk/src/proofs/tree.rs index b3bf9cf1..16655a6d 100644 --- a/merk/src/proofs/tree.rs +++ b/merk/src/proofs/tree.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Tree proofs #[cfg(feature = "full")] diff --git a/merk/src/test_utils/mod.rs b/merk/src/test_utils/mod.rs index 49a492e2..d5d76673 100644 --- a/merk/src/test_utils/mod.rs +++ b/merk/src/test_utils/mod.rs @@ -164,7 +164,6 @@ pub fn apply_to_memonly( }) .unwrap() .expect("commit failed"); - println!("{:?}", &tree); assert_tree_invariants(&tree); tree }) diff --git a/merk/src/tree/commit.rs b/merk/src/tree/commit.rs index 24c1d996..31b0df5c 100644 --- a/merk/src/tree/commit.rs +++ b/merk/src/tree/commit.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree commit #[cfg(feature = "full")] diff --git a/merk/src/tree/encoding.rs b/merk/src/tree/encoding.rs index 29307246..3a97c895 100644 --- a/merk/src/tree/encoding.rs +++ b/merk/src/tree/encoding.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree encoding #[cfg(feature = "full")] diff --git a/merk/src/tree/fuzz_tests.rs b/merk/src/tree/fuzz_tests.rs index 631918ff..2f3067d1 100644 --- a/merk/src/tree/fuzz_tests.rs +++ b/merk/src/tree/fuzz_tests.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Fuzz tests #![cfg(tests)] diff --git a/merk/src/tree/hash.rs b/merk/src/tree/hash.rs index d6d45c9f..e23566a9 100644 --- a/merk/src/tree/hash.rs +++ b/merk/src/tree/hash.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree hash #[cfg(any(feature = "full", feature = "verify"))] diff --git a/merk/src/tree/iter.rs b/merk/src/tree/iter.rs index 6ca58df7..03cca6ea 100644 --- a/merk/src/tree/iter.rs +++ b/merk/src/tree/iter.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree iterator #[cfg(feature = "full")] diff --git a/merk/src/tree/kv.rs b/merk/src/tree/kv.rs index ff020abc..b10733fc 100644 --- a/merk/src/tree/kv.rs +++ b/merk/src/tree/kv.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree key-values #[cfg(feature = "full")] diff --git a/merk/src/tree/link.rs b/merk/src/tree/link.rs index fa0d1563..f445dd11 100644 --- a/merk/src/tree/link.rs +++ b/merk/src/tree/link.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree link #[cfg(feature = "full")] diff --git a/merk/src/tree/mod.rs b/merk/src/tree/mod.rs index 401b8722..9a29dc8e 100644 --- a/merk/src/tree/mod.rs +++ b/merk/src/tree/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk trees #[cfg(feature = "full")] diff --git a/merk/src/tree/ops.rs b/merk/src/tree/ops.rs index da481d0a..738a89df 100644 --- a/merk/src/tree/ops.rs +++ b/merk/src/tree/ops.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree ops #[cfg(feature = "full")] diff --git a/merk/src/tree/tree_feature_type.rs b/merk/src/tree/tree_feature_type.rs index c1fceed3..c47fb0d6 100644 --- a/merk/src/tree/tree_feature_type.rs +++ b/merk/src/tree/tree_feature_type.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree feature type #[cfg(any(feature = "full", feature = "verify"))] diff --git a/merk/src/tree/walk/fetch.rs b/merk/src/tree/walk/fetch.rs index 08b66d99..e99df5bd 100644 --- a/merk/src/tree/walk/fetch.rs +++ b/merk/src/tree/walk/fetch.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Walk #[cfg(feature = "full")] diff --git a/merk/src/tree/walk/mod.rs b/merk/src/tree/walk/mod.rs index e5401814..adf2a07d 100644 --- a/merk/src/tree/walk/mod.rs +++ b/merk/src/tree/walk/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree walk #[cfg(feature = "full")] diff --git a/merk/src/tree/walk/ref_walker.rs b/merk/src/tree/walk/ref_walker.rs index d9fb1bcd..17f4e6c4 100644 --- a/merk/src/tree/walk/ref_walker.rs +++ b/merk/src/tree/walk/ref_walker.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk reference walker #[cfg(feature = "full")] diff --git a/tutorials/src/bin/proofs.rs b/tutorials/src/bin/proofs.rs index 173b700d..d56abbda 100644 --- a/tutorials/src/bin/proofs.rs +++ b/tutorials/src/bin/proofs.rs @@ -33,7 +33,7 @@ fn main() { .expect("expected successful get_path_query"); // Generate proof. - let proof = db.prove_query(&path_query).unwrap().unwrap(); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); // Get hash from query proof and print to terminal along with GroveDB root hash. let (hash, _result_set) = GroveDb::verify_query(&proof, &path_query).unwrap(); diff --git a/tutorials/src/bin/replication.rs b/tutorials/src/bin/replication.rs index bfdc1782..6b5f0626 100644 --- a/tutorials/src/bin/replication.rs +++ b/tutorials/src/bin/replication.rs @@ -229,7 +229,7 @@ fn query_db(db: &GroveDb, path: &[&[u8]], key: Vec) { println!(">> {:?}", e); } - let proof = db.prove_query(&path_query).unwrap().unwrap(); + let proof = db.prove_query(&path_query, None).unwrap().unwrap(); // Get hash from query proof and print to terminal along with GroveDB root hash. let (verify_hash, _) = GroveDb::verify_query(&proof, &path_query).unwrap(); println!("verify_hash: {:?}", hex::encode(verify_hash)); From 57fec699d4e6fd73a45d0f43a2b845f79d4eb47e Mon Sep 17 00:00:00 2001 From: fominok Date: Tue, 9 Jul 2024 16:50:19 +0200 Subject: [PATCH 35/37] update visualizer commit (#312) --- grovedb/grovedbg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grovedb/grovedbg b/grovedb/grovedbg index 33c40cf0..e810139c 160000 --- a/grovedb/grovedbg +++ b/grovedb/grovedbg @@ -1 +1 @@ -Subproject commit 33c40cf0117ab3cf446da00000658e7118c5e648 +Subproject commit e810139cbe7fc75f351d1003254c0f04555bda00 From 4035d34edcbe3fe45067deec63ea726a1bbb2420 Mon Sep 17 00:00:00 2001 From: fominok Date: Wed, 10 Jul 2024 02:04:36 +0200 Subject: [PATCH 36/37] Grovedbg build fix (#313) * wip * wip --- grovedb/build.rs | 3 ++- grovedb/grovedbg | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/grovedb/build.rs b/grovedb/build.rs index 45197586..cbdc11ac 100644 --- a/grovedb/build.rs +++ b/grovedb/build.rs @@ -24,7 +24,8 @@ fn main() { if !status.success() { let stdout_msg = String::from_utf8_lossy(&stdout); let stderr_msg = String::from_utf8_lossy(&stderr); - panic!("Error running `trunk build --release`\n{stdout_msg}\n{stderr_msg}"); + let bindgen_version = env::var_os("TRUNK_TOOLS_WASM_BINDGEN").unwrap_or_default(); + panic!("Error running `trunk build --release`\nbindgen version:{bindgen_version:?}\n{stdout_msg}\n{stderr_msg}"); } let zip_file = out_dir.join("grovedbg.zip"); diff --git a/grovedb/grovedbg b/grovedb/grovedbg index e810139c..954be745 160000 --- a/grovedb/grovedbg +++ b/grovedb/grovedbg @@ -1 +1 @@ -Subproject commit e810139cbe7fc75f351d1003254c0f04555bda00 +Subproject commit 954be74510d3c3bb79a7e622e55af66aae5c6ad4 From 8ada131a1079c763e83d2b11ab1f6abbd5d82e06 Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Thu, 11 Jul 2024 12:14:15 +0300 Subject: [PATCH 37/37] feat: runtime versioning (#314) * temp work * temp work * fmt * more work * more work * finish of versioning * finish of versioning * fixed debugger --- Cargo.toml | 1 + grovedb-version/Cargo.toml | 14 + grovedb-version/src/error.rs | 25 + grovedb-version/src/lib.rs | 106 ++ .../src/version/grovedb_versions.rs | 226 ++++ grovedb-version/src/version/merk_versions.rs | 2 + grovedb-version/src/version/mod.rs | 26 + grovedb-version/src/version/v1.rs | 187 +++ grovedb/Cargo.toml | 1 + grovedb/benches/insertion_benchmark.rs | 58 +- grovedb/src/batch/batch_structure.rs | 28 - .../estimated_costs/average_case_costs.rs | 154 ++- grovedb/src/batch/estimated_costs/mod.rs | 28 - .../batch/estimated_costs/worst_case_costs.rs | 138 ++- grovedb/src/batch/just_in_time_cost_tests.rs | 192 +-- grovedb/src/batch/key_info.rs | 28 - grovedb/src/batch/mod.rs | 722 +++++++---- grovedb/src/batch/mode.rs | 28 - grovedb/src/batch/multi_insert_cost_tests.rs | 85 +- grovedb/src/batch/options.rs | 30 +- .../src/batch/single_deletion_cost_tests.rs | 109 +- grovedb/src/batch/single_insert_cost_tests.rs | 169 ++- .../single_sum_item_deletion_cost_tests.rs | 62 +- .../single_sum_item_insert_cost_tests.rs | 173 ++- grovedb/src/debugger.rs | 10 +- grovedb/src/element/constructor.rs | 28 - grovedb/src/element/delete.rs | 60 +- grovedb/src/element/exists.rs | 47 +- grovedb/src/element/get.rs | 98 +- grovedb/src/element/helpers.rs | 124 +- grovedb/src/element/insert.rs | 228 ++-- grovedb/src/element/query.rs | 378 ++++-- grovedb/src/element/serialize.rs | 118 +- grovedb/src/error.rs | 11 + .../src/estimated_costs/average_case_costs.rs | 272 ++++- .../src/estimated_costs/worst_case_costs.rs | 252 +++- grovedb/src/lib.rs | 353 ++++-- grovedb/src/operations/auxiliary.rs | 4 +- grovedb/src/operations/delete/average_case.rs | 88 +- .../src/operations/delete/delete_up_tree.rs | 75 +- grovedb/src/operations/delete/mod.rs | 498 ++++++-- grovedb/src/operations/delete/worst_case.rs | 84 +- grovedb/src/operations/get/average_case.rs | 110 +- grovedb/src/operations/get/mod.rs | 246 ++-- grovedb/src/operations/get/query.rs | 386 ++++-- grovedb/src/operations/get/worst_case.rs | 78 +- grovedb/src/operations/insert/mod.rs | 451 +++++-- grovedb/src/operations/is_empty_tree.rs | 51 +- grovedb/src/operations/proof/generate.rs | 90 +- grovedb/src/operations/proof/util.rs | 5 +- grovedb/src/operations/proof/verify.rs | 134 ++- grovedb/src/query/mod.rs | 344 ++++-- grovedb/src/query_result_type.rs | 24 +- grovedb/src/reference_path.rs | 17 +- grovedb/src/replication.rs | 86 +- grovedb/src/tests/common.rs | 3 +- grovedb/src/tests/mod.rs | 1071 +++++++++++++---- grovedb/src/tests/query_tests.rs | 833 +++++++++---- grovedb/src/tests/sum_tree_tests.rs | 242 +++- grovedb/src/tests/tree_hashes_tests.rs | 83 +- grovedb/src/util.rs | 74 +- grovedb/src/visualize.rs | 18 +- merk/Cargo.toml | 1 + merk/benches/merk.rs | 89 +- merk/benches/ops.rs | 32 +- merk/src/debugger.rs | 4 +- merk/src/error.rs | 38 +- .../src/estimated_costs/average_case_costs.rs | 31 +- merk/src/estimated_costs/mod.rs | 28 - merk/src/estimated_costs/worst_case_costs.rs | 3 +- merk/src/merk/apply.rs | 89 +- merk/src/merk/chunks.rs | 228 ++-- merk/src/merk/get.rs | 130 +- merk/src/merk/mod.rs | 191 ++- merk/src/merk/open.rs | 42 +- merk/src/merk/prove.rs | 18 +- merk/src/merk/restore.rs | 337 ++++-- merk/src/merk/source.rs | 17 +- merk/src/owner.rs | 28 - merk/src/proofs/chunk/chunk.rs | 292 +++-- merk/src/proofs/chunk/util.rs | 30 +- merk/src/proofs/query/map.rs | 32 - merk/src/proofs/query/mod.rs | 311 +++-- merk/src/test_utils/mod.rs | 45 +- merk/src/test_utils/temp_merk.rs | 13 +- merk/src/tree/encoding.rs | 46 +- merk/src/tree/fuzz_tests.rs | 10 +- merk/src/tree/mod.rs | 9 +- merk/src/tree/ops.rs | 284 +++-- merk/src/tree/walk/fetch.rs | 6 +- merk/src/tree/walk/mod.rs | 52 +- merk/src/tree/walk/ref_walker.rs | 6 +- node-grove/Cargo.toml | 1 + node-grove/src/lib.rs | 39 +- tutorials/src/bin/delete.rs | 10 +- tutorials/src/bin/insert.rs | 6 +- tutorials/src/bin/proofs.rs | 11 +- tutorials/src/bin/query-complex.rs | 10 +- tutorials/src/bin/query-simple.rs | 5 +- tutorials/src/bin/replication.rs | 37 +- 100 files changed, 8319 insertions(+), 3908 deletions(-) create mode 100644 grovedb-version/Cargo.toml create mode 100644 grovedb-version/src/error.rs create mode 100644 grovedb-version/src/lib.rs create mode 100644 grovedb-version/src/version/grovedb_versions.rs create mode 100644 grovedb-version/src/version/merk_versions.rs create mode 100644 grovedb-version/src/version/mod.rs create mode 100644 grovedb-version/src/version/v1.rs diff --git a/Cargo.toml b/Cargo.toml index 6f5b5260..17e25e98 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,4 +10,5 @@ members = [ "visualize", "path", "grovedbg-types", + "grovedb-version" ] diff --git a/grovedb-version/Cargo.toml b/grovedb-version/Cargo.toml new file mode 100644 index 00000000..06189c44 --- /dev/null +++ b/grovedb-version/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "grovedb-version" +authors = ["Samuel Westrich "] +description = "Versioning library for Platform" +version = "1.0.0-rc.2" +edition = "2021" +license = "MIT" + +[dependencies] +thiserror = { version = "1.0.59" } +versioned-feature-core = { git = "https://github.com/dashpay/versioned-feature-core", version = "1.0.0" } + +[features] +mock-versions = [] diff --git a/grovedb-version/src/error.rs b/grovedb-version/src/error.rs new file mode 100644 index 00000000..0d3d4c9a --- /dev/null +++ b/grovedb-version/src/error.rs @@ -0,0 +1,25 @@ +use thiserror::Error; +use versioned_feature_core::FeatureVersion; + +#[derive(Error, Debug)] +pub enum GroveVersionError { + /// Expected some specific versions + #[error("grove unknown version on {method}, received: {received}")] + UnknownVersionMismatch { + /// method + method: String, + /// the allowed versions for this method + known_versions: Vec, + /// requested core height + received: FeatureVersion, + }, + + /// Expected some specific versions + #[error("{method} not active for grove version")] + VersionNotActive { + /// method + method: String, + /// the allowed versions for this method + known_versions: Vec, + }, +} diff --git a/grovedb-version/src/lib.rs b/grovedb-version/src/lib.rs new file mode 100644 index 00000000..48b80a52 --- /dev/null +++ b/grovedb-version/src/lib.rs @@ -0,0 +1,106 @@ +use crate::version::GroveVersion; + +pub mod error; +pub mod version; + +#[macro_export] +macro_rules! check_grovedb_v0_with_cost { + ($method:expr, $version:expr) => {{ + const EXPECTED_VERSION: u16 = 0; + if $version != EXPECTED_VERSION { + return Err(GroveVersionError::UnknownVersionMismatch { + method: $method.to_string(), + known_versions: vec![EXPECTED_VERSION], + received: $version, + } + .into()) + .wrap_with_cost(OperationCost::default()); + } + }}; +} + +#[macro_export] +macro_rules! check_grovedb_v0 { + ($method:expr, $version:expr) => {{ + const EXPECTED_VERSION: u16 = 0; + if $version != EXPECTED_VERSION { + return Err(GroveVersionError::UnknownVersionMismatch { + method: $method.to_string(), + known_versions: vec![EXPECTED_VERSION], + received: $version, + } + .into()); + } + }}; +} + +#[macro_export] +macro_rules! check_merk_v0_with_cost { + ($method:expr, $version:expr) => {{ + const EXPECTED_VERSION: u16 = 0; + if $version != EXPECTED_VERSION { + return Err(GroveVersionError::UnknownVersionMismatch { + method: $method.to_string(), + known_versions: vec![EXPECTED_VERSION], + received: $version, + } + .into()) + .wrap_with_cost(OperationCost::default()); + } + }}; +} + +#[macro_export] +macro_rules! check_merk_v0 { + ($method:expr, $version:expr) => {{ + const EXPECTED_VERSION: u16 = 0; + if $version != EXPECTED_VERSION { + return Err(GroveVersionError::UnknownVersionMismatch { + method: $method.to_string(), + known_versions: vec![EXPECTED_VERSION], + received: $version, + } + .into()); + } + }}; +} + +pub trait TryFromVersioned: Sized { + /// The type returned in the event of a conversion error. + type Error; + + /// Performs the conversion. + fn try_from_versioned(value: T, grove_version: &GroveVersion) -> Result; +} + +pub trait TryIntoVersioned: Sized { + /// The type returned in the event of a conversion error. + type Error; + + /// Performs the conversion. + fn try_into_versioned(self, grove_version: &GroveVersion) -> Result; +} + +impl TryIntoVersioned for T +where + U: TryFromVersioned, +{ + type Error = U::Error; + + #[inline] + fn try_into_versioned(self, grove_version: &GroveVersion) -> Result { + U::try_from_versioned(self, grove_version) + } +} + +impl TryFromVersioned for T +where + T: TryFrom, +{ + type Error = T::Error; + + #[inline] + fn try_from_versioned(value: U, _grove_version: &GroveVersion) -> Result { + T::try_from(value) + } +} diff --git a/grovedb-version/src/version/grovedb_versions.rs b/grovedb-version/src/version/grovedb_versions.rs new file mode 100644 index 00000000..51bbdcc6 --- /dev/null +++ b/grovedb-version/src/version/grovedb_versions.rs @@ -0,0 +1,226 @@ +use versioned_feature_core::FeatureVersion; + +#[derive(Clone, Debug, Default)] +pub struct GroveDBVersions { + pub apply_batch: GroveDBApplyBatchVersions, + pub element: GroveDBElementMethodVersions, + pub operations: GroveDBOperationsVersions, + pub path_query_methods: GroveDBPathQueryMethodVersions, + pub replication: GroveDBReplicationVersions, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBPathQueryMethodVersions { + pub terminal_keys: FeatureVersion, + pub merge: FeatureVersion, + pub query_items_at_path: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBApplyBatchVersions { + pub apply_batch_structure: FeatureVersion, + pub apply_body: FeatureVersion, + pub continue_partial_apply_body: FeatureVersion, + pub apply_operations_without_batching: FeatureVersion, + pub apply_batch: FeatureVersion, + pub apply_partial_batch: FeatureVersion, + pub open_batch_transactional_merk_at_path: FeatureVersion, + pub open_batch_merk_at_path: FeatureVersion, + pub apply_batch_with_element_flags_update: FeatureVersion, + pub apply_partial_batch_with_element_flags_update: FeatureVersion, + pub estimated_case_operations_for_batch: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsVersions { + pub get: GroveDBOperationsGetVersions, + pub insert: GroveDBOperationsInsertVersions, + pub delete: GroveDBOperationsDeleteVersions, + pub delete_up_tree: GroveDBOperationsDeleteUpTreeVersions, + pub query: GroveDBOperationsQueryVersions, + pub proof: GroveDBOperationsProofVersions, + pub average_case: GroveDBOperationsAverageCaseVersions, + pub worst_case: GroveDBOperationsWorstCaseVersions, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsGetVersions { + pub get: FeatureVersion, + pub get_caching_optional: FeatureVersion, + pub follow_reference: FeatureVersion, + pub get_raw: FeatureVersion, + pub get_raw_caching_optional: FeatureVersion, + pub get_raw_optional: FeatureVersion, + pub get_raw_optional_caching_optional: FeatureVersion, + pub has_raw: FeatureVersion, + pub check_subtree_exists_invalid_path: FeatureVersion, + pub average_case_for_has_raw: FeatureVersion, + pub average_case_for_has_raw_tree: FeatureVersion, + pub average_case_for_get_raw: FeatureVersion, + pub average_case_for_get: FeatureVersion, + pub average_case_for_get_tree: FeatureVersion, + pub worst_case_for_has_raw: FeatureVersion, + pub worst_case_for_get_raw: FeatureVersion, + pub worst_case_for_get: FeatureVersion, + pub is_empty_tree: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsProofVersions { + pub prove_query: FeatureVersion, + pub prove_query_many: FeatureVersion, + pub verify_query_with_options: FeatureVersion, + pub verify_query_raw: FeatureVersion, + pub verify_layer_proof: FeatureVersion, + pub verify_query: FeatureVersion, + pub verify_subset_query: FeatureVersion, + pub verify_query_with_absence_proof: FeatureVersion, + pub verify_subset_query_with_absence_proof: FeatureVersion, + pub verify_query_with_chained_path_queries: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsQueryVersions { + pub query_encoded_many: FeatureVersion, + pub query_many_raw: FeatureVersion, + pub get_proved_path_query: FeatureVersion, + pub query: FeatureVersion, + pub query_item_value: FeatureVersion, + pub query_item_value_or_sum: FeatureVersion, + pub query_sums: FeatureVersion, + pub query_raw: FeatureVersion, + pub query_keys_optional: FeatureVersion, + pub query_raw_keys_optional: FeatureVersion, + pub follow_element: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsAverageCaseVersions { + pub add_average_case_get_merk_at_path: FeatureVersion, + pub average_case_merk_replace_tree: FeatureVersion, + pub average_case_merk_insert_tree: FeatureVersion, + pub average_case_merk_delete_tree: FeatureVersion, + pub average_case_merk_insert_element: FeatureVersion, + pub average_case_merk_replace_element: FeatureVersion, + pub average_case_merk_patch_element: FeatureVersion, + pub average_case_merk_delete_element: FeatureVersion, + pub add_average_case_has_raw_cost: FeatureVersion, + pub add_average_case_has_raw_tree_cost: FeatureVersion, + pub add_average_case_get_raw_cost: FeatureVersion, + pub add_average_case_get_raw_tree_cost: FeatureVersion, + pub add_average_case_get_cost: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsWorstCaseVersions { + pub add_worst_case_get_merk_at_path: FeatureVersion, + pub worst_case_merk_replace_tree: FeatureVersion, + pub worst_case_merk_insert_tree: FeatureVersion, + pub worst_case_merk_delete_tree: FeatureVersion, + pub worst_case_merk_insert_element: FeatureVersion, + pub worst_case_merk_replace_element: FeatureVersion, + pub worst_case_merk_patch_element: FeatureVersion, + pub worst_case_merk_delete_element: FeatureVersion, + pub add_worst_case_has_raw_cost: FeatureVersion, + pub add_worst_case_get_raw_tree_cost: FeatureVersion, + pub add_worst_case_get_raw_cost: FeatureVersion, + pub add_worst_case_get_cost: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsInsertVersions { + pub insert: FeatureVersion, + pub insert_on_transaction: FeatureVersion, + pub insert_without_transaction: FeatureVersion, + pub add_element_on_transaction: FeatureVersion, + pub add_element_without_transaction: FeatureVersion, + pub insert_if_not_exists: FeatureVersion, + pub insert_if_changed_value: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsDeleteVersions { + pub delete: FeatureVersion, + pub clear_subtree: FeatureVersion, + pub delete_with_sectional_storage_function: FeatureVersion, + pub delete_if_empty_tree: FeatureVersion, + pub delete_if_empty_tree_with_sectional_storage_function: FeatureVersion, + pub delete_operation_for_delete_internal: FeatureVersion, + pub delete_internal_on_transaction: FeatureVersion, + pub delete_internal_without_transaction: FeatureVersion, + pub average_case_delete_operation_for_delete: FeatureVersion, + pub worst_case_delete_operation_for_delete: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsDeleteUpTreeVersions { + pub delete_up_tree_while_empty: FeatureVersion, + pub delete_up_tree_while_empty_with_sectional_storage: FeatureVersion, + pub delete_operations_for_delete_up_tree_while_empty: FeatureVersion, + pub add_delete_operations_for_delete_up_tree_while_empty: FeatureVersion, + pub average_case_delete_operations_for_delete_up_tree_while_empty: FeatureVersion, + pub worst_case_delete_operations_for_delete_up_tree_while_empty: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBOperationsApplyBatchVersions { + pub apply_batch_structure: FeatureVersion, + pub apply_body: FeatureVersion, + pub continue_partial_apply_body: FeatureVersion, + pub apply_operations_without_batching: FeatureVersion, + pub apply_batch: FeatureVersion, + pub apply_partial_batch: FeatureVersion, + pub open_batch_transactional_merk_at_path: FeatureVersion, + pub open_batch_merk_at_path: FeatureVersion, + pub apply_batch_with_element_flags_update: FeatureVersion, + pub apply_partial_batch_with_element_flags_update: FeatureVersion, + pub estimated_case_operations_for_batch: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBElementMethodVersions { + pub delete: FeatureVersion, + pub delete_with_sectioned_removal_bytes: FeatureVersion, + pub delete_into_batch_operations: FeatureVersion, + pub element_at_key_already_exists: FeatureVersion, + pub get: FeatureVersion, + pub get_optional: FeatureVersion, + pub get_from_storage: FeatureVersion, + pub get_optional_from_storage: FeatureVersion, + pub get_with_absolute_refs: FeatureVersion, + pub get_value_hash: FeatureVersion, + pub get_specialized_cost: FeatureVersion, + pub value_defined_cost: FeatureVersion, + pub value_defined_cost_for_serialized_value: FeatureVersion, + pub specialized_costs_for_key_value: FeatureVersion, + pub required_item_space: FeatureVersion, + pub insert: FeatureVersion, + pub insert_into_batch_operations: FeatureVersion, + pub insert_if_not_exists: FeatureVersion, + pub insert_if_not_exists_into_batch_operations: FeatureVersion, + pub insert_if_changed_value: FeatureVersion, + pub insert_if_changed_value_into_batch_operations: FeatureVersion, + pub insert_reference: FeatureVersion, + pub insert_reference_into_batch_operations: FeatureVersion, + pub insert_subtree: FeatureVersion, + pub insert_subtree_into_batch_operations: FeatureVersion, + pub get_query: FeatureVersion, + pub get_query_values: FeatureVersion, + pub get_query_apply_function: FeatureVersion, + pub get_path_query: FeatureVersion, + pub get_sized_query: FeatureVersion, + pub path_query_push: FeatureVersion, + pub query_item: FeatureVersion, + pub basic_push: FeatureVersion, + pub serialize: FeatureVersion, + pub serialized_size: FeatureVersion, + pub deserialize: FeatureVersion, +} + +#[derive(Clone, Debug, Default)] +pub struct GroveDBReplicationVersions { + pub get_subtrees_metadata: FeatureVersion, + pub fetch_chunk: FeatureVersion, + pub start_snapshot_syncing: FeatureVersion, + pub apply_chunk: FeatureVersion, +} diff --git a/grovedb-version/src/version/merk_versions.rs b/grovedb-version/src/version/merk_versions.rs new file mode 100644 index 00000000..fac25f91 --- /dev/null +++ b/grovedb-version/src/version/merk_versions.rs @@ -0,0 +1,2 @@ +#[derive(Clone, Debug, Default)] +pub struct MerkVersions {} diff --git a/grovedb-version/src/version/mod.rs b/grovedb-version/src/version/mod.rs new file mode 100644 index 00000000..06ac4e12 --- /dev/null +++ b/grovedb-version/src/version/mod.rs @@ -0,0 +1,26 @@ +pub mod grovedb_versions; +pub mod merk_versions; +pub mod v1; + +pub use versioned_feature_core::*; + +use crate::version::{ + grovedb_versions::GroveDBVersions, merk_versions::MerkVersions, v1::GROVE_V1, +}; + +#[derive(Clone, Debug, Default)] +pub struct GroveVersion { + pub protocol_version: u32, + pub grovedb_versions: GroveDBVersions, + pub merk_versions: MerkVersions, +} + +impl GroveVersion { + pub fn latest<'a>() -> &'a Self { + GROVE_VERSIONS + .last() + .expect("expected to have a platform version") + } +} + +pub const GROVE_VERSIONS: &[GroveVersion] = &[GROVE_V1]; diff --git a/grovedb-version/src/version/v1.rs b/grovedb-version/src/version/v1.rs new file mode 100644 index 00000000..19bf135e --- /dev/null +++ b/grovedb-version/src/version/v1.rs @@ -0,0 +1,187 @@ +use crate::version::{ + grovedb_versions::{ + GroveDBApplyBatchVersions, GroveDBElementMethodVersions, + GroveDBOperationsAverageCaseVersions, GroveDBOperationsDeleteUpTreeVersions, + GroveDBOperationsDeleteVersions, GroveDBOperationsGetVersions, + GroveDBOperationsInsertVersions, GroveDBOperationsProofVersions, + GroveDBOperationsQueryVersions, GroveDBOperationsVersions, + GroveDBOperationsWorstCaseVersions, GroveDBPathQueryMethodVersions, + GroveDBReplicationVersions, GroveDBVersions, + }, + merk_versions::MerkVersions, + GroveVersion, +}; + +pub const GROVE_V1: GroveVersion = GroveVersion { + protocol_version: 0, + grovedb_versions: GroveDBVersions { + apply_batch: GroveDBApplyBatchVersions { + apply_batch_structure: 0, + apply_body: 0, + continue_partial_apply_body: 0, + apply_operations_without_batching: 0, + apply_batch: 0, + apply_partial_batch: 0, + open_batch_transactional_merk_at_path: 0, + open_batch_merk_at_path: 0, + apply_batch_with_element_flags_update: 0, + apply_partial_batch_with_element_flags_update: 0, + estimated_case_operations_for_batch: 0, + }, + element: GroveDBElementMethodVersions { + delete: 0, + delete_with_sectioned_removal_bytes: 0, + delete_into_batch_operations: 0, + element_at_key_already_exists: 0, + get: 0, + get_optional: 0, + get_from_storage: 0, + get_optional_from_storage: 0, + get_with_absolute_refs: 0, + get_value_hash: 0, + get_specialized_cost: 0, + value_defined_cost: 0, + value_defined_cost_for_serialized_value: 0, + specialized_costs_for_key_value: 0, + required_item_space: 0, + insert: 0, + insert_into_batch_operations: 0, + insert_if_not_exists: 0, + insert_if_not_exists_into_batch_operations: 0, + insert_if_changed_value: 0, + insert_if_changed_value_into_batch_operations: 0, + insert_reference: 0, + insert_reference_into_batch_operations: 0, + insert_subtree: 0, + insert_subtree_into_batch_operations: 0, + get_query: 0, + get_query_values: 0, + get_query_apply_function: 0, + get_path_query: 0, + get_sized_query: 0, + path_query_push: 0, + query_item: 0, + basic_push: 0, + serialize: 0, + serialized_size: 0, + deserialize: 0, + }, + operations: GroveDBOperationsVersions { + get: GroveDBOperationsGetVersions { + get: 0, + get_caching_optional: 0, + follow_reference: 0, + get_raw: 0, + get_raw_caching_optional: 0, + get_raw_optional: 0, + get_raw_optional_caching_optional: 0, + has_raw: 0, + check_subtree_exists_invalid_path: 0, + average_case_for_has_raw: 0, + average_case_for_has_raw_tree: 0, + average_case_for_get_raw: 0, + average_case_for_get: 0, + average_case_for_get_tree: 0, + worst_case_for_has_raw: 0, + worst_case_for_get_raw: 0, + worst_case_for_get: 0, + is_empty_tree: 0, + }, + insert: GroveDBOperationsInsertVersions { + insert: 0, + insert_on_transaction: 0, + insert_without_transaction: 0, + add_element_on_transaction: 0, + add_element_without_transaction: 0, + insert_if_not_exists: 0, + insert_if_changed_value: 0, + }, + delete: GroveDBOperationsDeleteVersions { + delete: 0, + clear_subtree: 0, + delete_with_sectional_storage_function: 0, + delete_if_empty_tree: 0, + delete_if_empty_tree_with_sectional_storage_function: 0, + delete_operation_for_delete_internal: 0, + delete_internal_on_transaction: 0, + delete_internal_without_transaction: 0, + average_case_delete_operation_for_delete: 0, + worst_case_delete_operation_for_delete: 0, + }, + delete_up_tree: GroveDBOperationsDeleteUpTreeVersions { + delete_up_tree_while_empty: 0, + delete_up_tree_while_empty_with_sectional_storage: 0, + delete_operations_for_delete_up_tree_while_empty: 0, + add_delete_operations_for_delete_up_tree_while_empty: 0, + average_case_delete_operations_for_delete_up_tree_while_empty: 0, + worst_case_delete_operations_for_delete_up_tree_while_empty: 0, + }, + query: GroveDBOperationsQueryVersions { + query_encoded_many: 0, + query_many_raw: 0, + get_proved_path_query: 0, + query: 0, + query_item_value: 0, + query_item_value_or_sum: 0, + query_sums: 0, + query_raw: 0, + query_keys_optional: 0, + query_raw_keys_optional: 0, + follow_element: 0, + }, + proof: GroveDBOperationsProofVersions { + prove_query: 0, + prove_query_many: 0, + verify_query_with_options: 0, + verify_query_raw: 0, + verify_layer_proof: 0, + verify_query: 0, + verify_subset_query: 0, + verify_query_with_absence_proof: 0, + verify_subset_query_with_absence_proof: 0, + verify_query_with_chained_path_queries: 0, + }, + average_case: GroveDBOperationsAverageCaseVersions { + add_average_case_get_merk_at_path: 0, + average_case_merk_replace_tree: 0, + average_case_merk_insert_tree: 0, + average_case_merk_delete_tree: 0, + average_case_merk_insert_element: 0, + average_case_merk_replace_element: 0, + average_case_merk_patch_element: 0, + average_case_merk_delete_element: 0, + add_average_case_has_raw_cost: 0, + add_average_case_has_raw_tree_cost: 0, + add_average_case_get_raw_cost: 0, + add_average_case_get_raw_tree_cost: 0, + add_average_case_get_cost: 0, + }, + worst_case: GroveDBOperationsWorstCaseVersions { + add_worst_case_get_merk_at_path: 0, + worst_case_merk_replace_tree: 0, + worst_case_merk_insert_tree: 0, + worst_case_merk_delete_tree: 0, + worst_case_merk_insert_element: 0, + worst_case_merk_replace_element: 0, + worst_case_merk_patch_element: 0, + worst_case_merk_delete_element: 0, + add_worst_case_has_raw_cost: 0, + add_worst_case_get_raw_tree_cost: 0, + add_worst_case_get_raw_cost: 0, + add_worst_case_get_cost: 0, + }, + }, + path_query_methods: GroveDBPathQueryMethodVersions { + terminal_keys: 0, + merge: 0, + query_items_at_path: 0, + }, + replication: GroveDBReplicationVersions { + get_subtrees_metadata: 0, + fetch_chunk: 0, + start_snapshot_syncing: 0, + apply_chunk: 0, + }, + }, + merk_versions: MerkVersions {}, +}; diff --git a/grovedb/Cargo.toml b/grovedb/Cargo.toml index 28ba9995..57a27479 100644 --- a/grovedb/Cargo.toml +++ b/grovedb/Cargo.toml @@ -33,6 +33,7 @@ tower-http = { version = "0.5.2", features = ["fs"], optional = true } blake3 = "1.4.0" bitvec = "1" zip-extensions = { version ="0.6.2", optional = true } +grovedb-version = { version = "1.0.0-rc.2", path = "../grovedb-version" } [dev-dependencies] rand = "0.8.5" diff --git a/grovedb/benches/insertion_benchmark.rs b/grovedb/benches/insertion_benchmark.rs index b073508c..051a32d1 100644 --- a/grovedb/benches/insertion_benchmark.rs +++ b/grovedb/benches/insertion_benchmark.rs @@ -50,9 +50,16 @@ pub fn insertion_benchmark_without_transaction(c: &mut Criterion) { let dir = TempDir::new().unwrap(); let db = GroveDb::open(dir.path()).unwrap(); let test_leaf: &[u8] = b"leaf1"; - db.insert(EMPTY_PATH, test_leaf, Element::empty_tree(), None, None) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + test_leaf, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .unwrap(); let keys = std::iter::repeat_with(|| rand::thread_rng().gen::<[u8; 32]>()).take(N_ITEMS); c.bench_function("scalars insertion without transaction", |b| { @@ -64,6 +71,7 @@ pub fn insertion_benchmark_without_transaction(c: &mut Criterion) { Element::new_item(k.to_vec()), None, None, + grove_version, ) .unwrap() .unwrap(); @@ -79,9 +87,16 @@ pub fn insertion_benchmark_with_transaction(c: &mut Criterion) { let dir = TempDir::new().unwrap(); let db = GroveDb::open(dir.path()).unwrap(); let test_leaf: &[u8] = b"leaf1"; - db.insert(EMPTY_PATH, test_leaf, Element::empty_tree(), None, None) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + test_leaf, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .unwrap(); let keys = std::iter::repeat_with(|| rand::thread_rng().gen::<[u8; 32]>()).take(N_ITEMS); c.bench_function("scalars insertion with transaction", |b| { @@ -94,6 +109,7 @@ pub fn insertion_benchmark_with_transaction(c: &mut Criterion) { Element::new_item(k.to_vec()), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -113,9 +129,16 @@ pub fn root_leaf_insertion_benchmark_without_transaction(c: &mut Criterion) { c.bench_function("root leaves insertion without transaction", |b| { b.iter(|| { for k in keys.clone() { - db.insert(EMPTY_PATH, &k, Element::empty_tree(), None, None) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + &k, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .unwrap(); } }) }); @@ -132,9 +155,16 @@ pub fn root_leaf_insertion_benchmark_with_transaction(c: &mut Criterion) { b.iter(|| { let tx = db.start_transaction(); for k in keys.clone() { - db.insert(EMPTY_PATH, &k, Element::empty_tree(), None, Some(&tx)) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + &k, + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .unwrap(); } db.commit_transaction(tx).unwrap().unwrap(); }) @@ -155,6 +185,7 @@ pub fn deeply_nested_insertion_benchmark_without_transaction(c: &mut Criterion) Element::empty_tree(), None, None, + grove_version, ) .unwrap() .unwrap(); @@ -172,6 +203,7 @@ pub fn deeply_nested_insertion_benchmark_without_transaction(c: &mut Criterion) Element::new_item(k.to_vec()), None, None, + grove_version, ) .unwrap() .unwrap(); @@ -194,6 +226,7 @@ pub fn deeply_nested_insertion_benchmark_with_transaction(c: &mut Criterion) { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .unwrap(); @@ -212,6 +245,7 @@ pub fn deeply_nested_insertion_benchmark_with_transaction(c: &mut Criterion) { Element::new_item(k.to_vec()), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); diff --git a/grovedb/src/batch/batch_structure.rs b/grovedb/src/batch/batch_structure.rs index f07aad17..eb00f1e8 100644 --- a/grovedb/src/batch/batch_structure.rs +++ b/grovedb/src/batch/batch_structure.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Batch structure #[cfg(feature = "full")] diff --git a/grovedb/src/batch/estimated_costs/average_case_costs.rs b/grovedb/src/batch/estimated_costs/average_case_costs.rs index 7f4521a7..2f50186b 100644 --- a/grovedb/src/batch/estimated_costs/average_case_costs.rs +++ b/grovedb/src/batch/estimated_costs/average_case_costs.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Average case costs #[cfg(feature = "full")] @@ -46,6 +18,7 @@ use grovedb_merk::{ }; #[cfg(feature = "full")] use grovedb_storage::rocksdb_storage::RocksDbStorage; +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] use itertools::Itertools; @@ -69,6 +42,7 @@ impl Op { key: &KeyInfo, layer_element_estimates: &EstimatedLayerInformation, propagate: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { let in_tree_using_sums = layer_element_estimates.is_sum_tree; let propagate_if_input = || { @@ -84,6 +58,7 @@ impl Op { layer_element_estimates, sum.is_some(), propagate, + grove_version, ), Op::InsertTreeWithRootHash { flags, sum, .. } => { GroveDb::average_case_merk_insert_tree( @@ -92,6 +67,7 @@ impl Op { sum.is_some(), in_tree_using_sums, propagate_if_input(), + grove_version, ) } Op::Insert { element } => GroveDb::average_case_merk_insert_element( @@ -99,6 +75,7 @@ impl Op { element, in_tree_using_sums, propagate_if_input(), + grove_version, ), Op::RefreshReference { reference_path_type, @@ -114,12 +91,14 @@ impl Op { ), in_tree_using_sums, propagate_if_input(), + grove_version, ), Op::Replace { element } => GroveDb::average_case_merk_replace_element( key, element, in_tree_using_sums, propagate_if_input(), + grove_version, ), Op::Patch { element, @@ -130,21 +109,27 @@ impl Op { *change_in_bytes, in_tree_using_sums, propagate_if_input(), + grove_version, + ), + Op::Delete => GroveDb::average_case_merk_delete_element( + key, + layer_element_estimates, + propagate, + grove_version, ), - Op::Delete => { - GroveDb::average_case_merk_delete_element(key, layer_element_estimates, propagate) - } Op::DeleteTree => GroveDb::average_case_merk_delete_tree( key, false, layer_element_estimates, propagate, + grove_version, ), Op::DeleteSumTree => GroveDb::average_case_merk_delete_tree( key, true, layer_element_estimates, propagate, + grove_version, ), } } @@ -204,6 +189,7 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { _batch_apply_options: &BatchApplyOptions, _flags_update: &mut G, _split_removal_bytes: &mut SR, + grove_version: &GroveVersion, ) -> CostResult { let mut cost = OperationCost::default(); @@ -244,11 +230,15 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { )) }) ); - GroveDb::add_average_case_get_merk_at_path::( - &mut cost, - path, - layer_should_be_empty, - layer_info.is_sum_tree, + cost_return_on_error_no_add!( + &cost, + GroveDb::add_average_case_get_merk_at_path::( + &mut cost, + path, + layer_should_be_empty, + layer_info.is_sum_tree, + grove_version, + ) ); self.cached_merks .insert(path.clone(), layer_info.is_sum_tree); @@ -257,7 +247,7 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { for (key, op) in ops_at_path_by_key.into_iter() { cost_return_on_error!( &mut cost, - op.average_case_cost(&key, layer_element_estimates, false) + op.average_case_cost(&key, layer_element_estimates, false, grove_version) ); } @@ -268,20 +258,28 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { Ok(([0u8; 32], None, None)).wrap_with_cost(cost) } - fn update_base_merk_root_key(&mut self, _root_key: Option>) -> CostResult<(), Error> { + fn update_base_merk_root_key( + &mut self, + _root_key: Option>, + grove_version: &GroveVersion, + ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); cost.seek_count += 1; let base_path = KeyInfoPath(vec![]); if let Some(estimated_layer_info) = self.paths.get(&base_path) { // Then we have to get the tree if !self.cached_merks.contains_key(&base_path) { - GroveDb::add_average_case_get_merk_at_path::( - &mut cost, - &base_path, - estimated_layer_info - .estimated_layer_count - .estimated_to_be_empty(), - estimated_layer_info.is_sum_tree, + cost_return_on_error_no_add!( + &cost, + GroveDb::add_average_case_get_merk_at_path::( + &mut cost, + &base_path, + estimated_layer_info + .estimated_layer_count + .estimated_to_be_empty(), + estimated_layer_info.is_sum_tree, + grove_version + ) ); self.cached_merks .insert(base_path, estimated_layer_info.is_sum_tree); @@ -306,6 +304,7 @@ mod tests { EstimatedLayerSizes::{AllItems, AllSubtrees}, EstimatedSumTrees::{NoSumTrees, SomeSumTrees}, }; + use grovedb_version::version::GroveVersion; use crate::{ batch::{ @@ -318,6 +317,7 @@ mod tests { #[test] fn test_batch_root_one_tree_insert_op_average_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -343,11 +343,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get average case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert!( average_case_cost.eq(&cost), "average cost not eq {:?} \n to cost {:?}", @@ -385,6 +386,7 @@ mod tests { #[test] fn test_batch_root_one_tree_with_flags_insert_op_average_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -418,11 +420,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get average case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert!( average_case_cost.worse_or_eq_than(&cost), "not worse {:?} \n than {:?}", @@ -450,6 +453,7 @@ mod tests { #[test] fn test_batch_root_one_item_insert_op_average_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -475,11 +479,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get average case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; // because we know the object we are inserting we can know the average // case cost if it doesn't already exist assert_eq!( @@ -510,12 +515,20 @@ mod tests { #[test] fn test_batch_root_one_tree_insert_op_under_element_average_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![], @@ -540,11 +553,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get average case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; // because we know the object we are inserting we can know the average // case cost if it doesn't already exist assert_eq!(cost.storage_cost, average_case_cost.storage_cost); @@ -587,12 +601,20 @@ mod tests { #[test] fn test_batch_root_one_tree_insert_op_in_sub_tree_average_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![b"0".to_vec()], @@ -626,11 +648,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get average case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert!( average_case_cost.worse_or_eq_than(&cost), "not worse {:?} \n than {:?}", @@ -671,6 +694,7 @@ mod tests { #[test] fn test_batch_root_one_sum_item_replace_op_average_case_costs() { + let grove_version = GroveVersion::latest(); let ops = vec![GroveDbOp::replace_op( vec![vec![7]], hex::decode("46447a3b4c8939fd4cf8b610ba7da3d3f6b52b39ab2549bf91503b9b07814055") @@ -709,6 +733,7 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get average case costs"); @@ -734,12 +759,20 @@ mod tests { #[test] fn test_batch_average_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"keyb", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"keyb", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![], @@ -773,10 +806,11 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to estimate costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; // at the moment we just check the added bytes are the same assert_eq!( average_case_cost.storage_cost.added_bytes, diff --git a/grovedb/src/batch/estimated_costs/mod.rs b/grovedb/src/batch/estimated_costs/mod.rs index f0f505bc..54fc109c 100644 --- a/grovedb/src/batch/estimated_costs/mod.rs +++ b/grovedb/src/batch/estimated_costs/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Estimated costs #[cfg(feature = "full")] diff --git a/grovedb/src/batch/estimated_costs/worst_case_costs.rs b/grovedb/src/batch/estimated_costs/worst_case_costs.rs index f45bbff7..1b1d42e7 100644 --- a/grovedb/src/batch/estimated_costs/worst_case_costs.rs +++ b/grovedb/src/batch/estimated_costs/worst_case_costs.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Worst case costs #[cfg(feature = "full")] @@ -45,6 +17,7 @@ use grovedb_merk::estimated_costs::worst_case_costs::{ use grovedb_merk::RootHashKeyAndSum; #[cfg(feature = "full")] use grovedb_storage::rocksdb_storage::RocksDbStorage; +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] use itertools::Itertools; @@ -66,6 +39,7 @@ impl Op { is_in_parent_sum_tree: bool, worst_case_layer_element_estimates: &WorstCaseLayerInformation, propagate: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { let propagate_if_input = || { if propagate { @@ -81,6 +55,7 @@ impl Op { is_in_parent_sum_tree, worst_case_layer_element_estimates, propagate, + grove_version, ), Op::InsertTreeWithRootHash { flags, sum, .. } => GroveDb::worst_case_merk_insert_tree( key, @@ -88,12 +63,14 @@ impl Op { sum.is_some(), is_in_parent_sum_tree, propagate_if_input(), + grove_version, ), Op::Insert { element } => GroveDb::worst_case_merk_insert_element( key, element, is_in_parent_sum_tree, propagate_if_input(), + grove_version, ), Op::RefreshReference { reference_path_type, @@ -109,12 +86,14 @@ impl Op { ), is_in_parent_sum_tree, propagate_if_input(), + grove_version, ), Op::Replace { element } => GroveDb::worst_case_merk_replace_element( key, element, is_in_parent_sum_tree, propagate_if_input(), + grove_version, ), Op::Patch { element, @@ -124,23 +103,27 @@ impl Op { element, is_in_parent_sum_tree, propagate_if_input(), + grove_version, ), Op::Delete => GroveDb::worst_case_merk_delete_element( key, worst_case_layer_element_estimates, propagate, + grove_version, ), Op::DeleteTree => GroveDb::worst_case_merk_delete_tree( key, false, worst_case_layer_element_estimates, propagate, + grove_version, ), Op::DeleteSumTree => GroveDb::worst_case_merk_delete_tree( key, true, worst_case_layer_element_estimates, propagate, + grove_version, ), } } @@ -200,6 +183,7 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { _batch_apply_options: &BatchApplyOptions, _flags_update: &mut G, _split_removal_bytes: &mut SR, + grove_version: &GroveVersion, ) -> CostResult { let mut cost = OperationCost::default(); @@ -215,14 +199,28 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { // Then we have to get the tree if !self.cached_merks.contains(path) { - GroveDb::add_worst_case_get_merk_at_path::(&mut cost, path, false); + cost_return_on_error_no_add!( + &cost, + GroveDb::add_worst_case_get_merk_at_path::( + &mut cost, + path, + false, + grove_version, + ) + ); self.cached_merks.insert(path.clone()); } for (key, op) in ops_at_path_by_key.into_iter() { cost_return_on_error!( &mut cost, - op.worst_case_cost(&key, false, worst_case_layer_element_estimates, false) + op.worst_case_cost( + &key, + false, + worst_case_layer_element_estimates, + false, + grove_version + ) ); } @@ -233,15 +231,25 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { Ok(([0u8; 32], None, None)).wrap_with_cost(cost) } - fn update_base_merk_root_key(&mut self, _root_key: Option>) -> CostResult<(), Error> { + fn update_base_merk_root_key( + &mut self, + _root_key: Option>, + grove_version: &GroveVersion, + ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); cost.seek_count += 1; let base_path = KeyInfoPath(vec![]); if let Some(_estimated_layer_info) = self.paths.get(&base_path) { // Then we have to get the tree if !self.cached_merks.contains(&base_path) { - GroveDb::add_worst_case_get_merk_at_path::( - &mut cost, &base_path, false, + cost_return_on_error_no_add!( + &cost, + GroveDb::add_worst_case_get_merk_at_path::( + &mut cost, + &base_path, + false, + grove_version, + ) ); self.cached_merks.insert(base_path); } @@ -261,6 +269,7 @@ mod tests { }; #[rustfmt::skip] use grovedb_merk::estimated_costs::worst_case_costs::WorstCaseLayerInformation::MaxElementsNumber; + use grovedb_version::version::GroveVersion; use crate::{ batch::{ @@ -273,6 +282,7 @@ mod tests { #[test] fn test_batch_root_one_tree_insert_op_worst_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -291,11 +301,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get worst case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert!( worst_case_cost.worse_or_eq_than(&cost), "not worse {:?} \n than {:?}", @@ -326,6 +337,7 @@ mod tests { #[test] fn test_batch_root_one_tree_with_flags_insert_op_worst_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -344,11 +356,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get worst case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert!( worst_case_cost.worse_or_eq_than(&cost), "not worse {:?} \n than {:?}", @@ -379,6 +392,7 @@ mod tests { #[test] fn test_batch_root_one_item_insert_op_worst_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -397,11 +411,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get worst case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert!( worst_case_cost.worse_or_eq_than(&cost), "not worse {:?} \n than {:?}", @@ -432,12 +447,20 @@ mod tests { #[test] fn test_batch_root_one_tree_insert_op_under_element_worst_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![], @@ -454,11 +477,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get worst case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert!( worst_case_cost.worse_or_eq_than(&cost), "not worse {:?} \n than {:?}", @@ -489,12 +513,20 @@ mod tests { #[test] fn test_batch_root_one_tree_insert_op_in_sub_tree_worst_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![b"0".to_vec()], @@ -515,11 +547,12 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ) .cost_as_result() .expect("expected to get worst case costs"); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert!( worst_case_cost.worse_or_eq_than(&cost), "not worse {:?} \n than {:?}", @@ -544,12 +577,20 @@ mod tests { #[test] fn test_batch_worst_case_costs() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"keyb", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"keyb", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![], @@ -566,9 +607,10 @@ mod tests { |_flags, _removed_key_bytes, _removed_value_bytes| { Ok((NoStorageRemoval, NoStorageRemoval)) }, + grove_version, ); assert!(worst_case_cost_result.value.is_ok()); - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; // at the moment we just check the added bytes are the same assert_eq!( worst_case_cost_result.cost.storage_cost.added_bytes, diff --git a/grovedb/src/batch/just_in_time_cost_tests.rs b/grovedb/src/batch/just_in_time_cost_tests.rs index 2321b467..e1fddf5c 100644 --- a/grovedb/src/batch/just_in_time_cost_tests.rs +++ b/grovedb/src/batch/just_in_time_cost_tests.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2023 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! This tests just in time costs //! Just in time costs modify the tree in the same batch @@ -33,6 +5,8 @@ mod tests { use std::option::Option::None; + use grovedb_version::version::GroveVersion; + use crate::{ batch::GroveDbOp, reference_path::ReferencePathType::UpstreamFromElementHeightReference, @@ -42,15 +16,30 @@ mod tests { #[test] fn test_partial_costs_with_no_new_operations_are_same_as_apply_batch() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"documents", Element::empty_tree(), None, None) - .cost_as_result() - .expect("expected to insert successfully"); - db.insert(EMPTY_PATH, b"balances", Element::empty_tree(), None, None) - .cost_as_result() - .expect("expected to insert successfully"); + db.insert( + EMPTY_PATH, + b"documents", + Element::empty_tree(), + None, + None, + grove_version, + ) + .cost_as_result() + .expect("expected to insert successfully"); + db.insert( + EMPTY_PATH, + b"balances", + Element::empty_tree(), + None, + None, + grove_version, + ) + .cost_as_result() + .expect("expected to insert successfully"); let ops = vec![ GroveDbOp::insert_op( vec![b"documents".to_vec()], @@ -73,27 +62,38 @@ mod tests { ]; let full_cost = db - .apply_batch(ops.clone(), None, Some(&tx)) + .apply_batch(ops.clone(), None, Some(&tx), grove_version) .cost_as_result() .expect("expected to apply batch"); let apply_root_hash = db - .root_hash(Some(&tx)) + .root_hash(Some(&tx), grove_version) .unwrap() .expect("expected to get root hash"); - db.get([b"documents".as_slice()].as_ref(), b"key2", Some(&tx)) - .unwrap() - .expect("cannot get element"); + db.get( + [b"documents".as_slice()].as_ref(), + b"key2", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); - db.get([b"documents".as_slice()].as_ref(), b"key3", Some(&tx)) - .unwrap() - .expect("cannot get element"); + db.get( + [b"documents".as_slice()].as_ref(), + b"key3", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); db.get( [b"documents".as_slice(), b"key3".as_slice()].as_ref(), b"key4", Some(&tx), + grove_version, ) .unwrap() .expect("cannot get element"); @@ -101,27 +101,44 @@ mod tests { tx.rollback().expect("expected to rollback"); let cost = db - .apply_partial_batch(ops, None, |_cost, _left_over_ops| Ok(vec![]), Some(&tx)) + .apply_partial_batch( + ops, + None, + |_cost, _left_over_ops| Ok(vec![]), + Some(&tx), + grove_version, + ) .cost_as_result() .expect("expected to apply batch"); let apply_partial_root_hash = db - .root_hash(Some(&tx)) + .root_hash(Some(&tx), grove_version) .unwrap() .expect("expected to get root hash"); - db.get([b"documents".as_slice()].as_ref(), b"key2", Some(&tx)) - .unwrap() - .expect("cannot get element"); + db.get( + [b"documents".as_slice()].as_ref(), + b"key2", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); - db.get([b"documents".as_slice()].as_ref(), b"key3", Some(&tx)) - .unwrap() - .expect("cannot get element"); + db.get( + [b"documents".as_slice()].as_ref(), + b"key3", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); db.get( [b"documents".as_slice(), b"key3".as_slice()].as_ref(), b"key4", Some(&tx), + grove_version, ) .unwrap() .expect("cannot get element"); @@ -133,18 +150,27 @@ mod tests { #[test] fn test_partial_costs_with_add_balance_operations() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"documents", Element::empty_tree(), None, None) - .cost_as_result() - .expect("expected to insert successfully"); + db.insert( + EMPTY_PATH, + b"documents", + Element::empty_tree(), + None, + None, + grove_version, + ) + .cost_as_result() + .expect("expected to insert successfully"); db.insert( EMPTY_PATH, b"balances", Element::empty_sum_tree(), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); @@ -170,27 +196,38 @@ mod tests { ]; let full_cost = db - .apply_batch(ops.clone(), None, Some(&tx)) + .apply_batch(ops.clone(), None, Some(&tx), grove_version) .cost_as_result() .expect("expected to apply batch"); let apply_root_hash = db - .root_hash(Some(&tx)) + .root_hash(Some(&tx), grove_version) .unwrap() .expect("expected to get root hash"); - db.get([b"documents".as_slice()].as_ref(), b"key2", Some(&tx)) - .unwrap() - .expect("cannot get element"); + db.get( + [b"documents".as_slice()].as_ref(), + b"key2", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); - db.get([b"documents".as_slice()].as_ref(), b"key3", Some(&tx)) - .unwrap() - .expect("cannot get element"); + db.get( + [b"documents".as_slice()].as_ref(), + b"key3", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); db.get( [b"documents".as_slice(), b"key3".as_slice()].as_ref(), b"key4", Some(&tx), + grove_version, ) .unwrap() .expect("cannot get element"); @@ -218,33 +255,50 @@ mod tests { Ok(new_ops) }, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to apply batch"); let apply_partial_root_hash = db - .root_hash(Some(&tx)) + .root_hash(Some(&tx), grove_version) .unwrap() .expect("expected to get root hash"); - db.get([b"documents".as_slice()].as_ref(), b"key2", Some(&tx)) - .unwrap() - .expect("cannot get element"); + db.get( + [b"documents".as_slice()].as_ref(), + b"key2", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); - db.get([b"documents".as_slice()].as_ref(), b"key3", Some(&tx)) - .unwrap() - .expect("cannot get element"); + db.get( + [b"documents".as_slice()].as_ref(), + b"key3", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); db.get( [b"documents".as_slice(), b"key3".as_slice()].as_ref(), b"key4", Some(&tx), + grove_version, ) .unwrap() .expect("cannot get element"); let balance = db - .get([b"balances".as_slice()].as_ref(), b"person", Some(&tx)) + .get( + [b"balances".as_slice()].as_ref(), + b"person", + Some(&tx), + grove_version, + ) .unwrap() .expect("cannot get element"); diff --git a/grovedb/src/batch/key_info.rs b/grovedb/src/batch/key_info.rs index a8eb50af..e7dd25b5 100644 --- a/grovedb/src/batch/key_info.rs +++ b/grovedb/src/batch/key_info.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Key info #[cfg(feature = "full")] diff --git a/grovedb/src/batch/mod.rs b/grovedb/src/batch/mod.rs index 474a304e..7f9c119e 100644 --- a/grovedb/src/batch/mod.rs +++ b/grovedb/src/batch/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Apply multiple GroveDB operations atomically. mod batch_structure; @@ -87,6 +59,9 @@ use grovedb_storage::{ rocksdb_storage::{PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext}, Storage, StorageBatch, StorageContext, }; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; use grovedb_visualize::{Drawer, Visualize}; use integer_encoding::VarInt; use itertools::Itertools; @@ -694,9 +669,14 @@ trait TreeCache { batch_apply_options: &BatchApplyOptions, flags_update: &mut G, split_removal_bytes: &mut SR, + grove_version: &GroveVersion, ) -> CostResult; - fn update_base_merk_root_key(&mut self, root_key: Option>) -> CostResult<(), Error>; + fn update_base_merk_root_key( + &mut self, + root_key: Option>, + grove_version: &GroveVersion, + ) -> CostResult<(), Error>; } impl<'db, S, F> TreeCacheMerkByPath @@ -751,6 +731,7 @@ where ops_by_qualified_paths: &'a BTreeMap>, Op>, recursions_allowed: u8, intermediate_reference_info: Option<&'a ReferencePathType>, + grove_version: &GroveVersion, ) -> CostResult { let mut cost = OperationCost::default(); let (key, reference_path) = qualified_path.split_last().unwrap(); // already checked @@ -771,7 +752,8 @@ where merk.get_value_hash( key.as_ref(), true, - Some(Element::value_defined_cost_for_serialized_value) + Some(Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|e| Error::CorruptedData(e.to_string())) ); @@ -804,6 +786,7 @@ where path.as_slice(), ops_by_qualified_paths, recursions_allowed - 1, + grove_version, ) } else { // Here the element being referenced doesn't change in the same batch @@ -815,7 +798,8 @@ where merk.get( key.as_ref(), true, - Some(Element::value_defined_cost_for_serialized_value) + Some(Element::value_defined_cost_for_serialized_value), + grove_version ) .map_err(|e| Error::CorruptedData(e.to_string())) ); @@ -838,14 +822,15 @@ where let element = cost_return_on_error_no_add!( &cost, - Element::deserialize(referenced_element.as_slice()).map_err(|_| { + Element::deserialize(referenced_element.as_slice(), grove_version).map_err(|_| { Error::CorruptedData(String::from("unable to deserialize element")) }) ); match element { Element::Item(..) | Element::SumItem(..) => { - let serialized = cost_return_on_error_no_add!(&cost, element.serialize()); + let serialized = + cost_return_on_error_no_add!(&cost, element.serialize(grove_version)); let val_hash = value_hash(&serialized).unwrap_add_cost(&mut cost); Ok(val_hash).wrap_with_cost(cost) } @@ -858,6 +843,7 @@ where path.as_slice(), ops_by_qualified_paths, recursions_allowed - 1, + grove_version, ) } Element::Tree(..) | Element::SumTree(..) => Err(Error::InvalidBatchOperation( @@ -883,6 +869,7 @@ where qualified_path: &[Vec], ops_by_qualified_paths: &'a BTreeMap>, Op>, recursions_allowed: u8, + grove_version: &GroveVersion, ) -> CostResult { let mut cost = OperationCost::default(); if recursions_allowed == 0 { @@ -900,8 +887,10 @@ where Op::Insert { element } | Op::Replace { element } | Op::Patch { element, .. } => { match element { Element::Item(..) | Element::SumItem(..) => { - let serialized = - cost_return_on_error_no_add!(&cost, element.serialize()); + let serialized = cost_return_on_error_no_add!( + &cost, + element.serialize(grove_version) + ); let val_hash = value_hash(&serialized).unwrap_add_cost(&mut cost); Ok(val_hash).wrap_with_cost(cost) } @@ -917,6 +906,7 @@ where path.as_slice(), ops_by_qualified_paths, recursions_allowed - 1, + grove_version, ) } Element::Tree(..) | Element::SumTree(..) => { @@ -943,6 +933,7 @@ where ops_by_qualified_paths, recursions_allowed, reference_info, + grove_version, ) } Op::Delete | Op::DeleteTree | Op::DeleteSumTree => { @@ -958,6 +949,7 @@ where ops_by_qualified_paths, recursions_allowed, None, + grove_version, ) } } @@ -989,7 +981,11 @@ where Ok(()).wrap_with_cost(cost) } - fn update_base_merk_root_key(&mut self, root_key: Option>) -> CostResult<(), Error> { + fn update_base_merk_root_key( + &mut self, + root_key: Option>, + _grove_version: &GroveVersion, + ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); let base_path = vec![]; let merk_wrapped = self @@ -1011,6 +1007,7 @@ where batch_apply_options: &BatchApplyOptions, flags_update: &mut G, split_removal_bytes: &mut SR, + grove_version: &GroveVersion, ) -> CostResult { let mut cost = OperationCost::default(); // todo: fix this @@ -1058,7 +1055,8 @@ where self.follow_reference_get_value_hash( path_reference.as_slice(), ops_by_qualified_paths, - element_max_reference_hop.unwrap_or(MAX_REFERENCE_HOPS as u8) + element_max_reference_hop.unwrap_or(MAX_REFERENCE_HOPS as u8), + grove_version, ) ); @@ -1068,7 +1066,8 @@ where key_info.get_key_clone(), referenced_element_value_hash, &mut batch_operations, - merk_feature_type + merk_feature_type, + grove_version, ) ); } @@ -1086,7 +1085,8 @@ where NULL_HASH, false, &mut batch_operations, - merk_feature_type + merk_feature_type, + grove_version, ) ); } @@ -1104,7 +1104,8 @@ where &mut merk, key_info.get_key(), &mut batch_operations, - merk_feature_type + merk_feature_type, + grove_version, ) ); if !inserted { @@ -1119,7 +1120,8 @@ where element.insert_into_batch_operations( key_info.get_key(), &mut batch_operations, - merk_feature_type + merk_feature_type, + grove_version, ) ); } @@ -1143,7 +1145,8 @@ where merk.get( key_info.as_slice(), true, - Some(Element::value_defined_cost_for_serialized_value) + Some(Element::value_defined_cost_for_serialized_value), + grove_version ) .map( |result_value| result_value.map_err(Error::MerkError).and_then( @@ -1155,7 +1158,7 @@ where ); cost_return_on_error_no_add!( &cost, - Element::deserialize(value.as_slice()).map_err(|_| { + Element::deserialize(value.as_slice(), grove_version).map_err(|_| { Error::CorruptedData(String::from("unable to deserialize element")) }) ) @@ -1195,7 +1198,8 @@ where self.follow_reference_get_value_hash( path_reference.as_slice(), ops_by_qualified_paths, - max_reference_hop.unwrap_or(MAX_REFERENCE_HOPS as u8) + max_reference_hop.unwrap_or(MAX_REFERENCE_HOPS as u8), + grove_version ) ); @@ -1205,7 +1209,8 @@ where key_info.get_key_clone(), referenced_element_value_hash, &mut batch_operations, - merk_feature_type + merk_feature_type, + grove_version ) ); } @@ -1217,7 +1222,8 @@ where false, is_sum_tree, /* we are in a sum tree, this might or might not be a * sum item */ - &mut batch_operations + &mut batch_operations, + grove_version ) ); } @@ -1228,7 +1234,8 @@ where key_info.get_key(), true, false, - &mut batch_operations + &mut batch_operations, + grove_version ) ); } @@ -1239,7 +1246,8 @@ where key_info.get_key(), true, true, - &mut batch_operations + &mut batch_operations, + grove_version ) ); } @@ -1256,7 +1264,8 @@ where root_key, hash, sum, - &mut batch_operations + &mut batch_operations, + grove_version ) ); } @@ -1282,7 +1291,8 @@ where hash, false, &mut batch_operations, - merk_feature_type + merk_feature_type, + grove_version ) ); } @@ -1295,17 +1305,17 @@ where &[], Some(batch_apply_options.as_merk_options()), &|key, value| { - Element::specialized_costs_for_key_value(key, value, is_sum_tree) + Element::specialized_costs_for_key_value(key, value, is_sum_tree, grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, Some(&Element::value_defined_cost_for_serialized_value), &mut |storage_costs, old_value, new_value| { // todo: change the flags without full deserialization - let old_element = Element::deserialize(old_value.as_slice()) + let old_element = Element::deserialize(old_value.as_slice(), grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string()))?; let maybe_old_flags = old_element.get_flags_owned(); - let mut new_element = Element::deserialize(new_value.as_slice()) + let mut new_element = Element::deserialize(new_value.as_slice(), grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string()))?; let maybe_new_flags = new_element.get_flags_mut(); match maybe_new_flags { @@ -1322,9 +1332,11 @@ where })?; if changed { let flags_len = new_flags.len() as u32; - new_value.clone_from(&new_element.serialize().map_err(|e| { - MerkError::ClientCorruptionError(e.to_string()) - })?); + new_value.clone_from( + &new_element.serialize(grove_version).map_err(|e| { + MerkError::ClientCorruptionError(e.to_string()) + })?, + ); // we need to give back the value defined cost in the case that the // new element is a tree match new_element { @@ -1357,7 +1369,7 @@ where } }, &mut |value, removed_key_bytes, removed_value_bytes| { - let mut element = Element::deserialize(value.as_slice()) + let mut element = Element::deserialize(value.as_slice(), grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string()))?; let maybe_flags = element.get_flags_mut(); match maybe_flags { @@ -1371,6 +1383,7 @@ where } } }, + grove_version, ) .map_err(|e| Error::CorruptedData(e.to_string())) ); @@ -1395,6 +1408,7 @@ impl GroveDb { fn apply_batch_structure, F, SR>( batch_structure: BatchStructure, batch_apply_options: Option, + grove_version: &GroveVersion, ) -> CostResult, Error> where F: FnMut(&StorageCost, Option, &mut ElementFlags) -> Result, @@ -1404,6 +1418,13 @@ impl GroveDb { u32, ) -> Result<(StorageRemovedBytes, StorageRemovedBytes), Error>, { + check_grovedb_v0_with_cost!( + "apply_batch_structure", + grove_version + .grovedb_versions + .apply_batch + .apply_batch_structure + ); let mut cost = OperationCost::default(); let BatchStructure { mut ops_by_level_paths, @@ -1433,6 +1454,7 @@ impl GroveDb { &batch_apply_options, &mut flags_update, &mut split_removal_bytes, + grove_version, ) ); if batch_apply_options.base_root_storage_is_free { @@ -1440,7 +1462,7 @@ impl GroveDb { let mut update_root_cost = cost_return_on_error_no_add!( &cost, merk_tree_cache - .update_base_merk_root_key(calculated_root_key) + .update_base_merk_root_key(calculated_root_key, grove_version) .cost_as_result() ); update_root_cost.storage_cost = StorageCost::default(); @@ -1448,7 +1470,8 @@ impl GroveDb { } else { cost_return_on_error!( &mut cost, - merk_tree_cache.update_base_merk_root_key(calculated_root_key) + merk_tree_cache + .update_base_merk_root_key(calculated_root_key, grove_version) ); } } else { @@ -1461,6 +1484,7 @@ impl GroveDb { &batch_apply_options, &mut flags_update, &mut split_removal_bytes, + grove_version, ) ); @@ -1609,7 +1633,12 @@ impl GroveDb { Error, >, get_merk_fn: impl FnMut(&[Vec], bool) -> CostResult, Error>, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "apply_body", + grove_version.grovedb_versions.apply_batch.apply_body + ); let mut cost = OperationCost::default(); let batch_structure = cost_return_on_error!( &mut cost, @@ -1623,7 +1652,8 @@ impl GroveDb { } ) ); - Self::apply_batch_structure(batch_structure, batch_apply_options).add_cost(cost) + Self::apply_batch_structure(batch_structure, batch_apply_options, grove_version) + .add_cost(cost) } /// Method to propagate updated subtree root hashes up to GroveDB root @@ -1648,7 +1678,15 @@ impl GroveDb { Error, >, get_merk_fn: impl FnMut(&[Vec], bool) -> CostResult, Error>, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "continue_partial_apply_body", + grove_version + .grovedb_versions + .apply_batch + .continue_partial_apply_body + ); let mut cost = OperationCost::default(); let batch_structure = cost_return_on_error!( &mut cost, @@ -1663,7 +1701,8 @@ impl GroveDb { } ) ); - Self::apply_batch_structure(batch_structure, batch_apply_options).add_cost(cost) + Self::apply_batch_structure(batch_structure, batch_apply_options, grove_version) + .add_cost(cost) } /// Applies operations on GroveDB without batching @@ -1672,7 +1711,15 @@ impl GroveDb { ops: Vec, options: Option, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "apply_operations_without_batching", + grove_version + .grovedb_versions + .apply_batch + .apply_operations_without_batching + ); let mut cost = OperationCost::default(); for op in ops.into_iter() { match op.op { @@ -1688,6 +1735,7 @@ impl GroveDb { element.to_owned(), options.clone().map(|o| o.as_insert_options()), transaction, + grove_version, ) ); } @@ -1700,7 +1748,8 @@ impl GroveDb { path_slices.as_slice(), op.key.as_slice(), options.clone().map(|o| o.as_delete_options()), - transaction + transaction, + grove_version ) ); } @@ -1716,7 +1765,12 @@ impl GroveDb { ops: Vec, batch_apply_options: Option, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "apply_batch", + grove_version.grovedb_versions.apply_batch.apply_batch + ); self.apply_batch_with_element_flags_update( ops, batch_apply_options, @@ -1728,6 +1782,7 @@ impl GroveDb { )) }, transaction, + grove_version, ) } @@ -1741,7 +1796,15 @@ impl GroveDb { &Option, ) -> Result, Error>, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "apply_partial_batch", + grove_version + .grovedb_versions + .apply_batch + .apply_partial_batch + ); self.apply_partial_batch_with_element_flags_update( ops, batch_apply_options, @@ -1754,6 +1817,7 @@ impl GroveDb { }, cost_based_add_on_operations, transaction, + grove_version, ) } @@ -1765,7 +1829,15 @@ impl GroveDb { path: SubtreePath, tx: &'db Transaction, new_merk: bool, + grove_version: &GroveVersion, ) -> CostResult>, Error> { + check_grovedb_v0_with_cost!( + "open_batch_transactional_merk_at_path", + grove_version + .grovedb_versions + .apply_batch + .open_batch_transactional_merk_at_path + ); let mut cost = OperationCost::default(); let storage = self .db @@ -1783,12 +1855,14 @@ impl GroveDb { .unwrap_add_cost(&mut cost); let element = cost_return_on_error!( &mut cost, - Element::get_from_storage(&parent_storage, parent_key).map_err(|_| { - Error::InvalidPath(format!( - "could not get key for parent of subtree for batch at path {}", - parent_path.to_vec().into_iter().map(hex::encode).join("/") - )) - }) + Element::get_from_storage(&parent_storage, parent_key, grove_version).map_err( + |_| { + Error::InvalidPath(format!( + "could not get key for parent of subtree for batch at path {}", + parent_path.to_vec().into_iter().map(hex::encode).join("/") + )) + } + ) ); let is_sum_tree = element.is_sum_tree(); if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { @@ -1797,6 +1871,7 @@ impl GroveDb { root_key, is_sum_tree, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|_| { Error::CorruptedData("cannot open a subtree with given root key".to_owned()) @@ -1816,6 +1891,7 @@ impl GroveDb { storage, false, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) .add_cost(cost) @@ -1828,7 +1904,15 @@ impl GroveDb { storage_batch: &'a StorageBatch, path: SubtreePath, new_merk: bool, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "open_batch_merk_at_path", + grove_version + .grovedb_versions + .apply_batch + .open_batch_merk_at_path + ); let mut local_cost = OperationCost::default(); let storage = self .db @@ -1849,7 +1933,7 @@ impl GroveDb { .unwrap_add_cost(&mut local_cost); let element = cost_return_on_error!( &mut local_cost, - Element::get_from_storage(&parent_storage, last) + Element::get_from_storage(&parent_storage, last, grove_version) ); let is_sum_tree = element.is_sum_tree(); if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { @@ -1858,6 +1942,7 @@ impl GroveDb { root_key, is_sum_tree, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|_| { Error::CorruptedData("cannot open a subtree with given root key".to_owned()) @@ -1874,6 +1959,7 @@ impl GroveDb { storage, false, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|_| Error::CorruptedData("cannot open a subtree".to_owned())) .add_cost(local_cost) @@ -1899,7 +1985,15 @@ impl GroveDb { Error, >, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "apply_batch_with_element_flags_update", + grove_version + .grovedb_versions + .apply_batch + .apply_batch_with_element_flags_update + ); let mut cost = OperationCost::default(); if ops.is_empty() { @@ -1952,8 +2046,10 @@ impl GroveDb { path.into(), tx, new_merk, + grove_version, ) - } + }, + grove_version ) ); @@ -1973,8 +2069,14 @@ impl GroveDb { update_element_flags_function, split_removal_bytes_function, |path, new_merk| { - self.open_batch_merk_at_path(&storage_batch, path.into(), new_merk) - } + self.open_batch_merk_at_path( + &storage_batch, + path.into(), + new_merk, + grove_version, + ) + }, + grove_version ) ); @@ -2015,7 +2117,15 @@ impl GroveDb { &Option, ) -> Result, Error>, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "apply_partial_batch_with_element_flags_update", + grove_version + .grovedb_versions + .apply_batch + .apply_partial_batch_with_element_flags_update + ); let mut cost = OperationCost::default(); if ops.is_empty() { @@ -2072,8 +2182,10 @@ impl GroveDb { path.into(), tx, new_merk, + grove_version, ) - } + }, + grove_version ) ); // if we paused at the root height, the left over operations would be to replace @@ -2118,8 +2230,10 @@ impl GroveDb { path.into(), tx, new_merk, + grove_version, ) - } + }, + grove_version ) ); @@ -2149,8 +2263,14 @@ impl GroveDb { &mut update_element_flags_function, &mut split_removal_bytes_function, |path, new_merk| { - self.open_batch_merk_at_path(&storage_batch, path.into(), new_merk) - } + self.open_batch_merk_at_path( + &storage_batch, + path.into(), + new_merk, + grove_version, + ) + }, + grove_version ) ); @@ -2189,8 +2309,14 @@ impl GroveDb { update_element_flags_function, split_removal_bytes_function, |path, new_merk| { - self.open_batch_merk_at_path(&continue_storage_batch, path.into(), new_merk) - } + self.open_batch_merk_at_path( + &continue_storage_batch, + path.into(), + new_merk, + grove_version, + ) + }, + grove_version ) ); @@ -2235,7 +2361,15 @@ impl GroveDb { (StorageRemovedBytes, StorageRemovedBytes), Error, >, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "estimated_case_operations_for_batch", + grove_version + .grovedb_versions + .apply_batch + .estimated_case_operations_for_batch + ); let mut cost = OperationCost::default(); if ops.is_empty() { @@ -2257,7 +2391,11 @@ impl GroveDb { ); cost_return_on_error!( &mut cost, - Self::apply_batch_structure(batch_structure, batch_apply_options) + Self::apply_batch_structure( + batch_structure, + batch_apply_options, + grove_version + ) ); } @@ -2275,7 +2413,11 @@ impl GroveDb { ); cost_return_on_error!( &mut cost, - Self::apply_batch_structure(batch_structure, batch_apply_options) + Self::apply_batch_structure( + batch_structure, + batch_apply_options, + grove_version + ) ); } } @@ -2300,7 +2442,8 @@ mod tests { #[test] fn test_batch_validation_ok() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); let element2 = Element::new_item(b"ayy2".to_vec()); let ops = vec![ @@ -2331,32 +2474,47 @@ mod tests { element2.clone(), ), ]; - db.apply_batch(ops, None, None) + db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("cannot apply batch"); // visualize_stderr(&db); - db.get(EMPTY_PATH, b"key1", None) - .unwrap() - .expect("cannot get element"); - db.get([b"key1".as_ref()].as_ref(), b"key2", None) + db.get(EMPTY_PATH, b"key1", None, grove_version) .unwrap() .expect("cannot get element"); - db.get([b"key1".as_ref(), b"key2"].as_ref(), b"key3", None) - .unwrap() - .expect("cannot get element"); - db.get([b"key1".as_ref(), b"key2", b"key3"].as_ref(), b"key4", None) + db.get([b"key1".as_ref()].as_ref(), b"key2", None, grove_version) .unwrap() .expect("cannot get element"); + db.get( + [b"key1".as_ref(), b"key2"].as_ref(), + b"key3", + None, + grove_version, + ) + .unwrap() + .expect("cannot get element"); + db.get( + [b"key1".as_ref(), b"key2", b"key3"].as_ref(), + b"key4", + None, + grove_version, + ) + .unwrap() + .expect("cannot get element"); assert_eq!( - db.get([b"key1".as_ref(), b"key2", b"key3"].as_ref(), b"key4", None) - .unwrap() - .expect("cannot get element"), + db.get( + [b"key1".as_ref(), b"key2", b"key3"].as_ref(), + b"key4", + None, + grove_version + ) + .unwrap() + .expect("cannot get element"), element ); assert_eq!( - db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", None) + db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", None, grove_version) .unwrap() .expect("cannot get element"), element2 @@ -2365,7 +2523,8 @@ mod tests { #[test] fn test_batch_operation_consistency_checker() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); // No two operations should be the same let ops = vec![ @@ -2373,7 +2532,7 @@ mod tests { GroveDbOp::insert_op(vec![b"a".to_vec()], b"b".to_vec(), Element::empty_tree()), ]; assert!(matches!( - db.apply_batch(ops, None, None).unwrap(), + db.apply_batch(ops, None, None, grove_version).unwrap(), Err(Error::InvalidBatchOperation( "batch operations fail consistency checks" )) @@ -2389,7 +2548,7 @@ mod tests { GroveDbOp::insert_op(vec![b"a".to_vec()], b"b".to_vec(), Element::empty_tree()), ]; assert!(matches!( - db.apply_batch(ops, None, None).unwrap(), + db.apply_batch(ops, None, None, grove_version).unwrap(), Err(Error::InvalidBatchOperation( "batch operations fail consistency checks" )) @@ -2405,7 +2564,7 @@ mod tests { GroveDbOp::delete_op(vec![], TEST_LEAF.to_vec()), ]; assert!(matches!( - db.apply_batch(ops, None, None).unwrap(), + db.apply_batch(ops, None, None, grove_version).unwrap(), Err(Error::InvalidBatchOperation( "batch operations fail consistency checks" )) @@ -2436,7 +2595,8 @@ mod tests { base_root_storage_is_free: true, batch_pause_height: None, }), - None + None, + grove_version ) .unwrap() .is_ok()); @@ -2444,12 +2604,20 @@ mod tests { #[test] fn test_batch_validation_ok_on_transaction() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"keyb", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"keyb", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let element = Element::new_item(b"ayy".to_vec()); let element2 = Element::new_item(b"ayy2".to_vec()); @@ -2481,32 +2649,43 @@ mod tests { element2.clone(), ), ]; - db.apply_batch(ops, None, Some(&tx)) + db.apply_batch(ops, None, Some(&tx), grove_version) .unwrap() .expect("cannot apply batch"); - db.get(EMPTY_PATH, b"keyb", None) + db.get(EMPTY_PATH, b"keyb", None, grove_version) .unwrap() .expect_err("we should not get an element"); - db.get(EMPTY_PATH, b"keyb", Some(&tx)) + db.get(EMPTY_PATH, b"keyb", Some(&tx), grove_version) .unwrap() .expect("we should get an element"); - db.get(EMPTY_PATH, b"key1", None) + db.get(EMPTY_PATH, b"key1", None, grove_version) .unwrap() .expect_err("we should not get an element"); - db.get(EMPTY_PATH, b"key1", Some(&tx)) - .unwrap() - .expect("cannot get element"); - db.get([b"key1".as_ref()].as_ref(), b"key2", Some(&tx)) - .unwrap() - .expect("cannot get element"); - db.get([b"key1".as_ref(), b"key2"].as_ref(), b"key3", Some(&tx)) + db.get(EMPTY_PATH, b"key1", Some(&tx), grove_version) .unwrap() .expect("cannot get element"); + db.get( + [b"key1".as_ref()].as_ref(), + b"key2", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); + db.get( + [b"key1".as_ref(), b"key2"].as_ref(), + b"key3", + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot get element"); db.get( [b"key1".as_ref(), b"key2", b"key3"].as_ref(), b"key4", Some(&tx), + grove_version, ) .unwrap() .expect("cannot get element"); @@ -2515,22 +2694,29 @@ mod tests { db.get( [b"key1".as_ref(), b"key2", b"key3"].as_ref(), b"key4", - Some(&tx) + Some(&tx), + grove_version ) .unwrap() .expect("cannot get element"), element ); assert_eq!( - db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", Some(&tx)) - .unwrap() - .expect("cannot get element"), + db.get( + [TEST_LEAF, b"key1"].as_ref(), + b"key2", + Some(&tx), + grove_version + ) + .unwrap() + .expect("cannot get element"), element2 ); } #[test] fn test_batch_add_other_element_in_sub_tree() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); // let's start by inserting a tree structure @@ -2586,6 +2772,7 @@ mod tests { Ok((NoStorageRemoval, NoStorageRemoval)) }, Some(&tx), + grove_version, ) .unwrap() .expect("expected to do tree form insert"); @@ -2661,6 +2848,7 @@ mod tests { Ok((NoStorageRemoval, NoStorageRemoval)) }, Some(&tx), + grove_version, ) .unwrap() .expect("expected to do first insert"); @@ -2736,6 +2924,7 @@ mod tests { Ok((NoStorageRemoval, NoStorageRemoval)) }, Some(&tx), + grove_version, ) .unwrap() .expect("successful batch apply"); @@ -2880,37 +3069,40 @@ mod tests { #[ignore] #[test] fn test_batch_produces_same_result() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let tx = db.start_transaction(); let ops = grove_db_ops_for_contract_insert(); - db.apply_batch(ops, None, Some(&tx)) + db.apply_batch(ops, None, Some(&tx), grove_version) .unwrap() .expect("expected to apply batch"); - db.root_hash(None).unwrap().expect("cannot get root hash"); + db.root_hash(None, grove_version) + .unwrap() + .expect("cannot get root hash"); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let tx = db.start_transaction(); let ops = grove_db_ops_for_contract_insert(); - db.apply_batch(ops.clone(), None, Some(&tx)) + db.apply_batch(ops.clone(), None, Some(&tx), grove_version) .unwrap() .expect("expected to apply batch"); let batch_hash = db - .root_hash(Some(&tx)) + .root_hash(Some(&tx), grove_version) .unwrap() .expect("cannot get root hash"); db.rollback_transaction(&tx).expect("expected to rollback"); - db.apply_operations_without_batching(ops, None, Some(&tx)) + db.apply_operations_without_batching(ops, None, Some(&tx), grove_version) .unwrap() .expect("expected to apply batch"); let no_batch_hash = db - .root_hash(Some(&tx)) + .root_hash(Some(&tx), grove_version) .unwrap() .expect("cannot get root hash"); @@ -2920,44 +3112,47 @@ mod tests { #[ignore] #[test] fn test_batch_contract_with_document_produces_same_result() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let tx = db.start_transaction(); let ops = grove_db_ops_for_contract_insert(); - db.apply_batch(ops, None, Some(&tx)) + db.apply_batch(ops, None, Some(&tx), grove_version) .unwrap() .expect("expected to apply batch"); - db.root_hash(None).unwrap().expect("cannot get root hash"); + db.root_hash(None, grove_version) + .unwrap() + .expect("cannot get root hash"); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let tx = db.start_transaction(); let ops = grove_db_ops_for_contract_insert(); let document_ops = grove_db_ops_for_contract_document_insert(); - db.apply_batch(ops.clone(), None, Some(&tx)) + db.apply_batch(ops.clone(), None, Some(&tx), grove_version) .unwrap() .expect("expected to apply batch"); - db.apply_batch(document_ops.clone(), None, Some(&tx)) + db.apply_batch(document_ops.clone(), None, Some(&tx), grove_version) .unwrap() .expect("expected to apply batch"); let batch_hash = db - .root_hash(Some(&tx)) + .root_hash(Some(&tx), grove_version) .unwrap() .expect("cannot get root hash"); db.rollback_transaction(&tx).expect("expected to rollback"); - db.apply_operations_without_batching(ops, None, Some(&tx)) + db.apply_operations_without_batching(ops, None, Some(&tx), grove_version) .unwrap() .expect("expected to apply batch"); - db.apply_operations_without_batching(document_ops, None, Some(&tx)) + db.apply_operations_without_batching(document_ops, None, Some(&tx), grove_version) .unwrap() .expect("expected to apply batch"); let no_batch_hash = db - .root_hash(Some(&tx)) + .root_hash(Some(&tx), grove_version) .unwrap() .expect("cannot get root hash"); @@ -2966,7 +3161,8 @@ mod tests { #[test] fn test_batch_validation_broken_chain() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); let ops = vec![ GroveDbOp::insert_op(vec![], b"key1".to_vec(), Element::empty_tree()), @@ -2981,16 +3177,20 @@ mod tests { Element::empty_tree(), ), ]; - assert!(db.apply_batch(ops, None, None).unwrap().is_err()); assert!(db - .get([b"key1".as_ref()].as_ref(), b"key2", None) + .apply_batch(ops, None, None, grove_version) + .unwrap() + .is_err()); + assert!(db + .get([b"key1".as_ref()].as_ref(), b"key2", None, grove_version) .unwrap() .is_err()); } #[test] fn test_batch_validation_broken_chain_aborts_whole_batch() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); let ops = vec![ GroveDbOp::insert_op( @@ -3015,31 +3215,43 @@ mod tests { Element::empty_tree(), ), ]; - assert!(db.apply_batch(ops, None, None).unwrap().is_err()); assert!(db - .get([b"key1".as_ref()].as_ref(), b"key2", None) + .apply_batch(ops, None, None, grove_version) .unwrap() .is_err()); assert!(db - .get([TEST_LEAF, b"key1"].as_ref(), b"key2", None) + .get([b"key1".as_ref()].as_ref(), b"key2", None, grove_version) + .unwrap() + .is_err()); + assert!(db + .get([TEST_LEAF, b"key1"].as_ref(), b"key2", None, grove_version) .unwrap() .is_err(),); } #[test] fn test_batch_validation_deletion_brokes_chain() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); - db.insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, None) - .unwrap() - .expect("cannot insert a subtree"); + db.insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("cannot insert a subtree"); db.insert( [b"key1".as_ref()].as_ref(), b"key2", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("cannot insert a subtree"); @@ -3057,12 +3269,16 @@ mod tests { ), GroveDbOp::delete_op(vec![b"key1".to_vec()], b"key2".to_vec()), ]; - assert!(db.apply_batch(ops, None, None).unwrap().is_err()); + assert!(db + .apply_batch(ops, None, None, grove_version) + .unwrap() + .is_err()); } #[test] fn test_batch_validation_insertion_under_deleted_tree() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); let ops = vec![ GroveDbOp::insert_op(vec![], b"key1".to_vec(), Element::empty_tree()), @@ -3083,17 +3299,23 @@ mod tests { ), GroveDbOp::delete_op(vec![b"key1".to_vec()], b"key2".to_vec()), ]; - db.apply_batch(ops, None, None) + db.apply_batch(ops, None, None, grove_version) .unwrap() .expect_err("insertion of element under a deleted tree should not be allowed"); - db.get([b"key1".as_ref(), b"key2", b"key3"].as_ref(), b"key4", None) - .unwrap() - .expect_err("nothing should have been inserted"); + db.get( + [b"key1".as_ref(), b"key2", b"key3"].as_ref(), + b"key4", + None, + grove_version, + ) + .unwrap() + .expect_err("nothing should have been inserted"); } #[test] fn test_batch_validation_insert_into_existing_tree() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); db.insert( @@ -3102,6 +3324,7 @@ mod tests { element.clone(), None, None, + grove_version, ) .unwrap() .expect("cannot insert value"); @@ -3111,6 +3334,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("cannot insert value"); @@ -3121,7 +3345,10 @@ mod tests { b"key1".to_vec(), element.clone(), )]; - assert!(db.apply_batch(ops, None, None).unwrap().is_err()); + assert!(db + .apply_batch(ops, None, None, grove_version) + .unwrap() + .is_err()); // Insertion into a tree is correct let ops = vec![GroveDbOp::insert_op( @@ -3129,11 +3356,11 @@ mod tests { b"key1".to_vec(), element.clone(), )]; - db.apply_batch(ops, None, None) + db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("cannot apply batch"); assert_eq!( - db.get([TEST_LEAF, b"valid"].as_ref(), b"key1", None) + db.get([TEST_LEAF, b"valid"].as_ref(), b"key1", None, grove_version) .unwrap() .expect("cannot get element"), element @@ -3142,7 +3369,8 @@ mod tests { #[test] fn test_batch_validation_nested_subtree_overwrite() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); let element2 = Element::new_item(b"ayy2".to_vec()); db.insert( @@ -3151,6 +3379,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("cannot insert a subtree"); @@ -3160,6 +3389,7 @@ mod tests { element, None, None, + grove_version, ) .unwrap() .expect("cannot insert an item"); @@ -3185,7 +3415,8 @@ mod tests { base_root_storage_is_free: true, batch_pause_height: None, }), - None + None, + grove_version ) .unwrap() .is_err()); @@ -3199,7 +3430,10 @@ mod tests { Element::empty_tree(), ), ]; - assert!(db.apply_batch(ops, None, None).unwrap().is_err()); + assert!(db + .apply_batch(ops, None, None, grove_version) + .unwrap() + .is_err()); // TEST_LEAF will be deleted so you can not insert underneath it // We are testing with the batch apply option @@ -3224,7 +3458,8 @@ mod tests { base_root_storage_is_free: true, batch_pause_height: None, }), - None + None, + grove_version ) .unwrap() .is_err()); @@ -3232,7 +3467,8 @@ mod tests { #[test] fn test_batch_validation_root_leaf_removal() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let ops = vec![ GroveDbOp::insert_op( vec![], @@ -3257,7 +3493,8 @@ mod tests { base_root_storage_is_free: true, batch_pause_height: None, }), - None + None, + grove_version ) .unwrap() .is_err()); @@ -3265,7 +3502,8 @@ mod tests { #[test] fn test_merk_data_is_deleted() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); db.insert( @@ -3274,6 +3512,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("cannot insert a subtree"); @@ -3283,6 +3522,7 @@ mod tests { element.clone(), None, None, + grove_version, ) .unwrap() .expect("cannot insert an item"); @@ -3293,40 +3533,59 @@ mod tests { )]; assert_eq!( - db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", None) + db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", None, grove_version) .unwrap() .expect("cannot get item"), element ); - db.apply_batch(ops, None, None) + db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("cannot apply batch"); assert!(db - .get([TEST_LEAF, b"key1"].as_ref(), b"key2", None) + .get([TEST_LEAF, b"key1"].as_ref(), b"key2", None, grove_version) .unwrap() .is_err()); } #[test] fn test_multi_tree_insertion_deletion_with_propagation_no_tx() { - let db = make_test_grovedb(); - db.insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, None) - .unwrap() - .expect("cannot insert root leaf"); - db.insert(EMPTY_PATH, b"key2", Element::empty_tree(), None, None) - .unwrap() - .expect("cannot insert root leaf"); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + db.insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("cannot insert root leaf"); + db.insert( + EMPTY_PATH, + b"key2", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("cannot insert root leaf"); db.insert( [ANOTHER_TEST_LEAF].as_ref(), b"key1", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("cannot insert root leaf"); - let hash = db.root_hash(None).unwrap().expect("cannot get root hash"); + let hash = db + .root_hash(None, grove_version) + .unwrap() + .expect("cannot get root hash"); let element = Element::new_item(b"ayy".to_vec()); let element2 = Element::new_item(b"ayy2".to_vec()); @@ -3349,43 +3608,66 @@ mod tests { GroveDbOp::insert_op(vec![TEST_LEAF.to_vec()], b"key".to_vec(), element2.clone()), GroveDbOp::delete_op(vec![ANOTHER_TEST_LEAF.to_vec()], b"key1".to_vec()), ]; - db.apply_batch(ops, None, None) + db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("cannot apply batch"); assert!(db - .get([ANOTHER_TEST_LEAF].as_ref(), b"key1", None) + .get([ANOTHER_TEST_LEAF].as_ref(), b"key1", None, grove_version) .unwrap() .is_err()); assert_eq!( - db.get([b"key1".as_ref(), b"key2", b"key3"].as_ref(), b"key4", None) - .unwrap() - .expect("cannot get element"), + db.get( + [b"key1".as_ref(), b"key2", b"key3"].as_ref(), + b"key4", + None, + grove_version + ) + .unwrap() + .expect("cannot get element"), element ); assert_eq!( - db.get([TEST_LEAF].as_ref(), b"key", None) + db.get([TEST_LEAF].as_ref(), b"key", None, grove_version) .unwrap() .expect("cannot get element"), element2 ); assert_ne!( - db.root_hash(None).unwrap().expect("cannot get root hash"), + db.root_hash(None, grove_version) + .unwrap() + .expect("cannot get root hash"), hash ); // verify root leaves - assert!(db.get(EMPTY_PATH, TEST_LEAF, None).unwrap().is_ok()); - assert!(db.get(EMPTY_PATH, ANOTHER_TEST_LEAF, None).unwrap().is_ok()); - assert!(db.get(EMPTY_PATH, b"key1", None).unwrap().is_ok()); - assert!(db.get(EMPTY_PATH, b"key2", None).unwrap().is_ok()); - assert!(db.get(EMPTY_PATH, b"key3", None).unwrap().is_err()); + assert!(db + .get(EMPTY_PATH, TEST_LEAF, None, grove_version) + .unwrap() + .is_ok()); + assert!(db + .get(EMPTY_PATH, ANOTHER_TEST_LEAF, None, grove_version) + .unwrap() + .is_ok()); + assert!(db + .get(EMPTY_PATH, b"key1", None, grove_version) + .unwrap() + .is_ok()); + assert!(db + .get(EMPTY_PATH, b"key2", None, grove_version) + .unwrap() + .is_ok()); + assert!(db + .get(EMPTY_PATH, b"key3", None, grove_version) + .unwrap() + .is_err()); } #[test] fn test_nested_batch_insertion_corrupts_state() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let full_path = vec![ b"leaf1".to_vec(), b"sub1".to_vec(), @@ -3396,9 +3678,16 @@ mod tests { ]; let mut acc_path: Vec> = vec![]; for p in full_path.into_iter() { - db.insert(acc_path.as_slice(), &p, Element::empty_tree(), None, None) - .unwrap() - .expect("expected to insert"); + db.insert( + acc_path.as_slice(), + &p, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert"); acc_path.push(p); } @@ -3408,29 +3697,37 @@ mod tests { b"key".to_vec(), element.clone(), )]; - db.apply_batch(batch, None, None) + db.apply_batch(batch, None, None, grove_version) .unwrap() .expect("cannot apply batch"); let batch = vec![GroveDbOp::insert_op(acc_path, b"key".to_vec(), element)]; - db.apply_batch(batch, None, None) + db.apply_batch(batch, None, None, grove_version) .unwrap() .expect("cannot apply same batch twice"); } #[test] fn test_apply_sorted_pre_validated_batch_propagation() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let full_path = vec![b"leaf1".to_vec(), b"sub1".to_vec()]; let mut acc_path: Vec> = vec![]; for p in full_path.into_iter() { - db.insert(acc_path.as_slice(), &p, Element::empty_tree(), None, None) - .unwrap() - .expect("expected to insert"); + db.insert( + acc_path.as_slice(), + &p, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert"); acc_path.push(p); } - let root_hash = db.root_hash(None).unwrap().unwrap(); + let root_hash = db.root_hash(None, grove_version).unwrap().unwrap(); let element = Element::new_item(b"ayy".to_vec()); let batch = vec![GroveDbOp::insert_op( @@ -3438,17 +3735,21 @@ mod tests { b"key".to_vec(), element, )]; - db.apply_batch(batch, None, None) + db.apply_batch(batch, None, None, grove_version) .unwrap() .expect("cannot apply batch"); - assert_ne!(db.root_hash(None).unwrap().unwrap(), root_hash); + assert_ne!( + db.root_hash(None, grove_version).unwrap().unwrap(), + root_hash + ); } #[test] fn test_references() { + let grove_version = GroveVersion::latest(); // insert reference that points to non-existent item - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let batch = vec![GroveDbOp::insert_op( vec![TEST_LEAF.to_vec()], b"key1".to_vec(), @@ -3458,12 +3759,12 @@ mod tests { ])), )]; assert!(matches!( - db.apply_batch(batch, None, None).unwrap(), + db.apply_batch(batch, None, None, grove_version).unwrap(), Err(Error::MissingReference(String { .. })) )); // insert reference with item it points to in the same batch - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let elem = Element::new_item(b"ayy".to_vec()); let batch = vec![ GroveDbOp::insert_op( @@ -3480,9 +3781,12 @@ mod tests { elem.clone(), ), ]; - assert!(db.apply_batch(batch, None, None).unwrap().is_ok()); + assert!(db + .apply_batch(batch, None, None, grove_version) + .unwrap() + .is_ok()); assert_eq!( - db.get([TEST_LEAF].as_ref(), b"key1", None) + db.get([TEST_LEAF].as_ref(), b"key1", None, grove_version) .unwrap() .unwrap(), elem @@ -3493,15 +3797,15 @@ mod tests { reference_key_query.insert_key(b"key1".to_vec()); let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], reference_key_query); let proof = db - .prove_query(&path_query, None) + .prove_query(&path_query, None, grove_version) .unwrap() .expect("should generate proof"); - let verification_result = GroveDb::verify_query_raw(&proof, &path_query); + let verification_result = GroveDb::verify_query_raw(&proof, &path_query, grove_version); assert!(verification_result.is_ok()); // Hit reference limit when you specify max reference hop, lower than actual hop // count - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let elem = Element::new_item(b"ayy".to_vec()); let batch = vec![ GroveDbOp::insert_op( @@ -3529,7 +3833,7 @@ mod tests { GroveDbOp::insert_op(vec![TEST_LEAF.to_vec()], b"invalid_path".to_vec(), elem), ]; assert!(matches!( - db.apply_batch(batch, None, None).unwrap(), + db.apply_batch(batch, None, None, grove_version).unwrap(), Err(Error::ReferenceLimit) )); } diff --git a/grovedb/src/batch/mode.rs b/grovedb/src/batch/mode.rs index 76f15b6d..897d15f2 100644 --- a/grovedb/src/batch/mode.rs +++ b/grovedb/src/batch/mode.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Batch running mode #[cfg(feature = "estimated_costs")] diff --git a/grovedb/src/batch/multi_insert_cost_tests.rs b/grovedb/src/batch/multi_insert_cost_tests.rs index 501cc50a..ad171d6d 100644 --- a/grovedb/src/batch/multi_insert_cost_tests.rs +++ b/grovedb/src/batch/multi_insert_cost_tests.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Multi insert cost tests #[cfg(feature = "full")] @@ -36,6 +8,7 @@ mod tests { storage_cost::{removal::StorageRemovedBytes::NoStorageRemoval, StorageCost}, OperationCost, }; + use grovedb_version::version::GroveVersion; use crate::{ batch::GroveDbOp, @@ -46,14 +19,29 @@ mod tests { #[test] fn test_batch_two_insert_empty_tree_same_level_added_bytes_match_non_batch() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); let non_batch_cost_1 = db - .insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, Some(&tx)) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) .cost; let non_batch_cost_2 = db - .insert(EMPTY_PATH, b"key2", Element::empty_tree(), None, Some(&tx)) + .insert( + EMPTY_PATH, + b"key2", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) .cost; let non_batch_cost = non_batch_cost_1.add(non_batch_cost_2); tx.rollback().expect("expected to rollback"); @@ -61,7 +49,7 @@ mod tests { GroveDbOp::insert_op(vec![], b"key1".to_vec(), Element::empty_tree()), GroveDbOp::insert_op(vec![], b"key2".to_vec(), Element::empty_tree()), ]; - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert_eq!( non_batch_cost.storage_cost.added_bytes, cost.storage_cost.added_bytes @@ -72,11 +60,19 @@ mod tests { #[test] fn test_batch_three_inserts_elements_same_level_added_bytes_match_non_batch() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); let non_batch_cost_1 = db - .insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, Some(&tx)) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) .cost; let non_batch_cost_2 = db .insert( @@ -85,6 +81,7 @@ mod tests { Element::new_item_with_flags(b"pizza".to_vec(), Some([0, 1].to_vec())), None, Some(&tx), + grove_version, ) .cost; let non_batch_cost_3 = db @@ -94,6 +91,7 @@ mod tests { Element::new_reference(SiblingReference(b"key2".to_vec())), None, Some(&tx), + grove_version, ) .cost; let non_batch_cost = non_batch_cost_1.add(non_batch_cost_2).add(non_batch_cost_3); @@ -111,7 +109,7 @@ mod tests { Element::new_reference(SiblingReference(b"key2".to_vec())), ), ]; - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert_eq!( non_batch_cost.storage_cost.added_bytes, cost.storage_cost.added_bytes @@ -122,11 +120,19 @@ mod tests { #[test] fn test_batch_four_inserts_elements_multi_level_added_bytes_match_non_batch() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); let non_batch_cost_1 = db - .insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, Some(&tx)) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) .cost; let non_batch_cost_2 = db .insert( @@ -135,6 +141,7 @@ mod tests { Element::new_item_with_flags(b"pizza".to_vec(), Some([0, 1].to_vec())), None, Some(&tx), + grove_version, ) .cost; let non_batch_cost_3 = db @@ -144,6 +151,7 @@ mod tests { Element::empty_tree(), None, Some(&tx), + grove_version, ) .cost; let non_batch_cost_4 = db @@ -156,6 +164,7 @@ mod tests { )), None, Some(&tx), + grove_version, ) .cost; let non_batch_cost = non_batch_cost_1 @@ -185,7 +194,7 @@ mod tests { ), ]; let cost = db - .apply_batch(ops, None, Some(&tx)) + .apply_batch(ops, None, Some(&tx), grove_version) .cost_as_result() .expect("expected to apply batch"); assert_eq!( @@ -198,6 +207,7 @@ mod tests { #[test] fn test_batch_root_two_insert_tree_cost_same_level() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -205,7 +215,7 @@ mod tests { GroveDbOp::insert_op(vec![], b"key1".to_vec(), Element::empty_tree()), GroveDbOp::insert_op(vec![], b"key2".to_vec(), Element::empty_tree()), ]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 214 storage_written_bytes @@ -253,6 +263,7 @@ mod tests { #[test] fn test_batch_root_two_insert_tree_cost_different_level() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -264,7 +275,7 @@ mod tests { Element::empty_tree(), ), ]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 214 storage_written_bytes diff --git a/grovedb/src/batch/options.rs b/grovedb/src/batch/options.rs index b3916eb8..1f60aeb4 100644 --- a/grovedb/src/batch/options.rs +++ b/grovedb/src/batch/options.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Options #[cfg(feature = "full")] @@ -42,7 +14,7 @@ pub struct BatchApplyOptions { pub validate_insertion_does_not_override: bool, /// Validate insertion does not override tree pub validate_insertion_does_not_override_tree: bool, - /// Allow deleting non empty trees + /// Allow deleting non-empty trees pub allow_deleting_non_empty_trees: bool, /// Deleting non empty trees returns error pub deleting_non_empty_trees_returns_error: bool, diff --git a/grovedb/src/batch/single_deletion_cost_tests.rs b/grovedb/src/batch/single_deletion_cost_tests.rs index 593ac04f..fac9682f 100644 --- a/grovedb/src/batch/single_deletion_cost_tests.rs +++ b/grovedb/src/batch/single_deletion_cost_tests.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Tests #[cfg(feature = "full")] @@ -35,6 +7,7 @@ mod tests { Identifier, StorageRemovalPerEpochByIdentifier, StorageRemovedBytes::SectionedStorageRemoval, }; + use grovedb_version::version::GroveVersion; use intmap::IntMap; use crate::{ @@ -45,17 +18,25 @@ mod tests { #[test] fn test_batch_one_deletion_tree_costs_match_non_batch_on_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db - .insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, None) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) .cost_as_result() .expect("expected to insert successfully"); let tx = db.start_transaction(); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, Some(&tx)) + .delete(EMPTY_PATH, b"key1", None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -93,7 +74,7 @@ mod tests { tx.rollback().expect("expected to rollback"); let ops = vec![GroveDbOp::delete_tree_op(vec![], b"key1".to_vec(), false)]; let batch_cost = db - .apply_batch(ops, None, Some(&tx)) + .apply_batch(ops, None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -101,6 +82,7 @@ mod tests { #[test] fn test_batch_one_deletion_item_costs_match_non_batch_on_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db @@ -110,6 +92,7 @@ mod tests { Element::new_item(b"cat".to_vec()), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); @@ -117,7 +100,7 @@ mod tests { let tx = db.start_transaction(); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, Some(&tx)) + .delete(EMPTY_PATH, b"key1", None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -156,7 +139,7 @@ mod tests { tx.rollback().expect("expected to rollback"); let ops = vec![GroveDbOp::delete_op(vec![], b"key1".to_vec())]; let batch_cost = db - .apply_batch(ops, None, Some(&tx)) + .apply_batch(ops, None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -164,15 +147,23 @@ mod tests { #[test] fn test_batch_one_deletion_tree_costs_match_non_batch_without_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db - .insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, None) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) .cost_as_result() .expect("expected to insert successfully"); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, None) + .delete(EMPTY_PATH, b"key1", None, None, grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -210,13 +201,20 @@ mod tests { let db = make_empty_grovedb(); let _insertion_cost = db - .insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, None) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) .cost_as_result() .expect("expected to insert successfully"); let ops = vec![GroveDbOp::delete_tree_op(vec![], b"key1".to_vec(), false)]; let batch_cost = db - .apply_batch(ops, None, None) + .apply_batch(ops, None, None, grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -224,6 +222,7 @@ mod tests { #[test] fn test_batch_one_deletion_item_costs_match_non_batch_without_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db @@ -233,12 +232,13 @@ mod tests { Element::new_item(b"cat".to_vec()), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, None) + .delete(EMPTY_PATH, b"key1", None, None, grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -283,13 +283,14 @@ mod tests { Element::new_item(b"cat".to_vec()), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); let ops = vec![GroveDbOp::delete_op(vec![], b"key1".to_vec())]; let batch_cost = db - .apply_batch(ops, None, None) + .apply_batch(ops, None, None, grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -297,6 +298,7 @@ mod tests { #[test] fn test_batch_one_deletion_tree_with_flags_costs_match_non_batch_on_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db @@ -306,6 +308,7 @@ mod tests { Element::empty_tree_with_flags(Some(b"dog".to_vec())), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); @@ -313,7 +316,7 @@ mod tests { let tx = db.start_transaction(); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, Some(&tx)) + .delete(EMPTY_PATH, b"key1", None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -356,7 +359,7 @@ mod tests { tx.rollback().expect("expected to rollback"); let ops = vec![GroveDbOp::delete_tree_op(vec![], b"key1".to_vec(), false)]; let batch_cost = db - .apply_batch(ops, None, Some(&tx)) + .apply_batch(ops, None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -365,6 +368,7 @@ mod tests { #[test] fn test_batch_one_deletion_tree_with_identity_cost_flags_costs_match_non_batch_on_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db @@ -374,6 +378,7 @@ mod tests { Element::empty_tree_with_flags(Some(vec![0, 0])), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); @@ -402,6 +407,7 @@ mod tests { let value_sectioned = SectionedStorageRemoval(removed_bytes); Ok((key_sectioned, value_sectioned)) }, + grove_version, ) .cost_as_result() .expect("expected to delete successfully"); @@ -469,6 +475,7 @@ mod tests { Ok((key_sectioned, value_sectioned)) }, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to delete successfully"); @@ -481,6 +488,7 @@ mod tests { #[test] fn test_batch_one_deletion_item_with_flags_costs_match_non_batch_on_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db @@ -490,6 +498,7 @@ mod tests { Element::new_item_with_flags(b"cat".to_vec(), Some(b"apple".to_vec())), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); @@ -497,7 +506,7 @@ mod tests { let tx = db.start_transaction(); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, Some(&tx)) + .delete(EMPTY_PATH, b"key1", None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -536,7 +545,7 @@ mod tests { tx.rollback().expect("expected to rollback"); let ops = vec![GroveDbOp::delete_op(vec![], b"key1".to_vec())]; let batch_cost = db - .apply_batch(ops, None, Some(&tx)) + .apply_batch(ops, None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -544,6 +553,7 @@ mod tests { #[test] fn test_batch_one_deletion_tree_with_flags_costs_match_non_batch_without_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db @@ -553,12 +563,13 @@ mod tests { Element::empty_tree_with_flags(Some(b"dog".to_vec())), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, None) + .delete(EMPTY_PATH, b"key1", None, None, grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -607,13 +618,14 @@ mod tests { Element::empty_tree_with_flags(Some(b"dog".to_vec())), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); let ops = vec![GroveDbOp::delete_tree_op(vec![], b"key1".to_vec(), false)]; let batch_cost = db - .apply_batch(ops, None, None) + .apply_batch(ops, None, None, grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -621,6 +633,7 @@ mod tests { #[test] fn test_batch_one_deletion_item_with_flags_costs_match_non_batch_without_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db @@ -630,12 +643,13 @@ mod tests { Element::new_item_with_flags(b"cat".to_vec(), Some(b"apple".to_vec())), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, None) + .delete(EMPTY_PATH, b"key1", None, None, grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -680,13 +694,14 @@ mod tests { Element::new_item_with_flags(b"cat".to_vec(), Some(b"apple".to_vec())), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); let ops = vec![GroveDbOp::delete_op(vec![], b"key1".to_vec())]; let batch_cost = db - .apply_batch(ops, None, None) + .apply_batch(ops, None, None, grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); diff --git a/grovedb/src/batch/single_insert_cost_tests.rs b/grovedb/src/batch/single_insert_cost_tests.rs index 1dd2d43c..c025fb27 100644 --- a/grovedb/src/batch/single_insert_cost_tests.rs +++ b/grovedb/src/batch/single_insert_cost_tests.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Tests #[cfg(feature = "full")] @@ -43,6 +15,7 @@ mod tests { }, OperationCost, }; + use grovedb_version::version::GroveVersion; use integer_encoding::VarInt; use intmap::IntMap; @@ -54,11 +27,19 @@ mod tests { #[test] fn test_batch_one_insert_costs_match_non_batch() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); let non_batch_cost = db - .insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, Some(&tx)) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) .cost; tx.rollback().expect("expected to rollback"); let ops = vec![GroveDbOp::insert_op( @@ -66,12 +47,13 @@ mod tests { b"key1".to_vec(), Element::empty_tree(), )]; - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert_eq!(non_batch_cost.storage_cost, cost.storage_cost); } #[test] fn test_batch_root_one_insert_tree_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -80,7 +62,7 @@ mod tests { b"key1".to_vec(), Element::empty_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 113 storage_written_bytes @@ -136,6 +118,7 @@ mod tests { #[test] fn test_batch_root_one_insert_item_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -144,7 +127,7 @@ mod tests { b"key1".to_vec(), Element::new_item(b"cat".to_vec()), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 214 storage_written_bytes @@ -199,6 +182,7 @@ mod tests { #[test] fn test_batch_root_one_insert_tree_under_parent_item_in_same_merk_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -209,6 +193,7 @@ mod tests { Element::new_item(b"cat".to_vec()), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("successful root tree leaf insert"); @@ -220,7 +205,7 @@ mod tests { b"key1".to_vec(), Element::empty_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 115 storage_written_bytes @@ -293,19 +278,27 @@ mod tests { #[test] fn test_batch_root_one_insert_tree_under_parent_tree_in_same_merk_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![], b"key1".to_vec(), Element::empty_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 113 storage_written_bytes @@ -367,19 +360,27 @@ mod tests { #[test] fn test_batch_root_one_insert_tree_under_parent_tree_in_different_merk_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![b"0".to_vec()], b"key1".to_vec(), Element::empty_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 113 storage_written_bytes @@ -448,6 +449,7 @@ mod tests { #[test] fn test_batch_root_one_insert_cost_right_below_value_required_cost_of_2() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -456,7 +458,7 @@ mod tests { b"key1".to_vec(), Element::new_item([0u8; 59].to_vec()), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 243 storage_written_bytes @@ -510,6 +512,7 @@ mod tests { #[test] fn test_batch_root_one_insert_cost_right_above_value_required_cost_of_2() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -518,7 +521,7 @@ mod tests { b"key1".to_vec(), Element::new_item([0u8; 60].to_vec()), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 243 storage_written_bytes @@ -572,11 +575,19 @@ mod tests { #[test] fn test_batch_root_one_update_item_bigger_cost_no_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -584,6 +595,7 @@ mod tests { Element::new_item_with_flags(b"value1".to_vec(), Some(vec![0])), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -604,6 +616,7 @@ mod tests { Ok((NoStorageRemoval, NoStorageRemoval)) }, Some(&tx), + grove_version, ) .cost; @@ -628,11 +641,19 @@ mod tests { #[test] fn test_batch_root_one_update_item_bigger_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -640,6 +661,7 @@ mod tests { Element::new_item_with_flags(b"value1".to_vec(), Some(vec![0, 0])), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -683,6 +705,7 @@ mod tests { )) }, Some(&tx), + grove_version, ) .cost; @@ -707,11 +730,19 @@ mod tests { #[test] fn test_batch_root_one_update_item_smaller_cost_no_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -719,6 +750,7 @@ mod tests { Element::new_item_with_flags(b"value1".to_vec(), Some(vec![0])), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -742,6 +774,7 @@ mod tests { )) }, Some(&tx), + grove_version, ) .cost; @@ -762,11 +795,19 @@ mod tests { #[test] fn test_batch_root_one_update_item_smaller_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -774,6 +815,7 @@ mod tests { Element::new_item_with_flags(b"value1".to_vec(), Some(vec![0, 0])), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -816,6 +858,7 @@ mod tests { Ok((NoStorageRemoval, SectionedStorageRemoval(removed_bytes))) }, Some(&tx), + grove_version, ) .cost; @@ -842,11 +885,19 @@ mod tests { #[test] fn test_batch_root_one_update_tree_bigger_flags_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -854,6 +905,7 @@ mod tests { Element::new_tree_with_flags(None, Some(vec![0, 0])), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -895,6 +947,7 @@ mod tests { Ok((NoStorageRemoval, BasicStorageRemoval(removed_value_bytes))) }, Some(&tx), + grove_version, ) .cost; diff --git a/grovedb/src/batch/single_sum_item_deletion_cost_tests.rs b/grovedb/src/batch/single_sum_item_deletion_cost_tests.rs index b049bf50..bf5637d0 100644 --- a/grovedb/src/batch/single_sum_item_deletion_cost_tests.rs +++ b/grovedb/src/batch/single_sum_item_deletion_cost_tests.rs @@ -1,35 +1,8 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Tests #[cfg(feature = "full")] mod tests { + use grovedb_version::version::GroveVersion; use crate::{ batch::GroveDbOp, @@ -39,17 +12,25 @@ mod tests { #[test] fn test_batch_one_deletion_sum_tree_costs_match_non_batch_on_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db - .insert(EMPTY_PATH, b"key1", Element::empty_sum_tree(), None, None) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_sum_tree(), + None, + None, + grove_version, + ) .cost_as_result() .expect("expected to insert successfully"); let tx = db.start_transaction(); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, Some(&tx)) + .delete(EMPTY_PATH, b"key1", None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -64,7 +45,7 @@ mod tests { tx.rollback().expect("expected to rollback"); let ops = vec![GroveDbOp::delete_tree_op(vec![], b"key1".to_vec(), false)]; let batch_cost = db - .apply_batch(ops, None, Some(&tx)) + .apply_batch(ops, None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -72,6 +53,7 @@ mod tests { #[test] fn test_batch_one_deletion_sum_item_costs_match_non_batch_on_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); db.insert( @@ -80,6 +62,7 @@ mod tests { Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("expected to insert sum tree"); @@ -91,6 +74,7 @@ mod tests { Element::new_sum_item(15), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); @@ -98,7 +82,13 @@ mod tests { let tx = db.start_transaction(); let non_batch_cost = db - .delete([b"sum_tree".as_slice()].as_ref(), b"key1", None, Some(&tx)) + .delete( + [b"sum_tree".as_slice()].as_ref(), + b"key1", + None, + Some(&tx), + grove_version, + ) .cost_as_result() .expect("expected to delete successfully"); @@ -116,7 +106,7 @@ mod tests { b"key1".to_vec(), )]; let batch_cost = db - .apply_batch(ops, None, Some(&tx)) + .apply_batch(ops, None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); @@ -124,6 +114,7 @@ mod tests { #[test] fn test_batch_one_deletion_sum_tree_with_flags_costs_match_non_batch_on_transaction() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let insertion_cost = db @@ -133,6 +124,7 @@ mod tests { Element::empty_sum_tree_with_flags(Some(b"dog".to_vec())), None, None, + grove_version, ) .cost_as_result() .expect("expected to insert successfully"); @@ -140,7 +132,7 @@ mod tests { let tx = db.start_transaction(); let non_batch_cost = db - .delete(EMPTY_PATH, b"key1", None, Some(&tx)) + .delete(EMPTY_PATH, b"key1", None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); @@ -156,7 +148,7 @@ mod tests { tx.rollback().expect("expected to rollback"); let ops = vec![GroveDbOp::delete_tree_op(vec![], b"key1".to_vec(), false)]; let batch_cost = db - .apply_batch(ops, None, Some(&tx)) + .apply_batch(ops, None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete successfully"); assert_eq!(non_batch_cost.storage_cost, batch_cost.storage_cost); diff --git a/grovedb/src/batch/single_sum_item_insert_cost_tests.rs b/grovedb/src/batch/single_sum_item_insert_cost_tests.rs index d1e13fea..0ba3da44 100644 --- a/grovedb/src/batch/single_sum_item_insert_cost_tests.rs +++ b/grovedb/src/batch/single_sum_item_insert_cost_tests.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Tests #[cfg(feature = "full")] @@ -34,6 +6,7 @@ mod tests { storage_cost::{removal::StorageRemovedBytes::NoStorageRemoval, StorageCost}, OperationCost, }; + use grovedb_version::version::GroveVersion; use crate::{ batch::GroveDbOp, @@ -43,6 +16,7 @@ mod tests { #[test] fn test_batch_one_sum_item_insert_costs_match_non_batch() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -52,6 +26,7 @@ mod tests { Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("expected to insert sum tree"); @@ -63,6 +38,7 @@ mod tests { Element::new_sum_item(150), None, Some(&tx), + grove_version, ) .cost; tx.rollback().expect("expected to rollback"); @@ -71,12 +47,13 @@ mod tests { b"key1".to_vec(), Element::new_sum_item(150), )]; - let cost = db.apply_batch(ops, None, Some(&tx)).cost; + let cost = db.apply_batch(ops, None, Some(&tx), grove_version).cost; assert_eq!(non_batch_cost.storage_cost, cost.storage_cost); } #[test] fn test_batch_one_insert_sum_tree_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -85,7 +62,7 @@ mod tests { b"key1".to_vec(), Element::empty_sum_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 124 storage_written_bytes @@ -142,19 +119,27 @@ mod tests { #[test] fn test_batch_one_insert_sum_tree_under_parent_tree_in_same_merk_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![], b"key1".to_vec(), Element::empty_sum_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 124 storage_written_bytes @@ -220,19 +205,27 @@ mod tests { #[test] fn test_batch_one_insert_sum_tree_under_parent_sum_tree_in_same_merk_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_sum_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_sum_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![], b"key1".to_vec(), Element::empty_sum_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 124 storage_written_bytes @@ -298,19 +291,27 @@ mod tests { #[test] fn test_batch_one_insert_sum_tree_under_parent_tree_in_different_merk_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![b"0".to_vec()], b"key1".to_vec(), Element::empty_sum_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 124 storage_written_bytes @@ -380,19 +381,27 @@ mod tests { #[test] fn test_batch_one_insert_sum_tree_under_parent_sum_tree_in_different_merk_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"0", Element::empty_sum_tree(), None, Some(&tx)) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + b"0", + Element::empty_sum_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); let ops = vec![GroveDbOp::insert_op( vec![b"0".to_vec()], b"key1".to_vec(), Element::empty_sum_tree(), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 124 storage_written_bytes @@ -463,6 +472,7 @@ mod tests { #[test] fn test_batch_one_insert_sum_item_cost_right_below_value_required_cost_of_2() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -472,6 +482,7 @@ mod tests { Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("expected to insert sum tree"); @@ -481,7 +492,7 @@ mod tests { b"key1".to_vec(), Element::new_sum_item_with_flags(15, Some([0; 42].to_vec())), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 243 storage_written_bytes @@ -536,6 +547,7 @@ mod tests { #[test] fn test_batch_one_insert_sum_item_cost_right_above_value_required_cost_of_2() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -545,6 +557,7 @@ mod tests { Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("expected to insert sum tree"); @@ -554,7 +567,7 @@ mod tests { b"key1".to_vec(), Element::new_sum_item_with_flags(15, Some([0; 43].to_vec())), )]; - let cost_result = db.apply_batch(ops, None, Some(&tx)); + let cost_result = db.apply_batch(ops, None, Some(&tx), grove_version); cost_result.value.expect("expected to execute batch"); let cost = cost_result.cost; // Explanation for 243 storage_written_bytes @@ -609,11 +622,19 @@ mod tests { #[test] fn test_batch_one_update_sum_item_bigger_no_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_sum_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_sum_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -621,6 +642,7 @@ mod tests { Element::new_sum_item_with_flags(100, None), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -641,6 +663,7 @@ mod tests { Ok((NoStorageRemoval, NoStorageRemoval)) }, Some(&tx), + grove_version, ) .cost; @@ -665,11 +688,19 @@ mod tests { #[test] fn test_batch_one_update_sum_item_bigger_with_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_sum_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_sum_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -677,6 +708,7 @@ mod tests { Element::new_sum_item_with_flags(100, Some(vec![0])), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -697,6 +729,7 @@ mod tests { Ok((NoStorageRemoval, NoStorageRemoval)) }, Some(&tx), + grove_version, ) .cost; @@ -721,11 +754,19 @@ mod tests { #[test] fn test_batch_one_update_sum_item_smaller_no_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_sum_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_sum_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -733,6 +774,7 @@ mod tests { Element::new_sum_item_with_flags(1000000, None), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -753,6 +795,7 @@ mod tests { Ok((NoStorageRemoval, NoStorageRemoval)) }, Some(&tx), + grove_version, ) .cost; @@ -777,11 +820,19 @@ mod tests { #[test] fn test_batch_one_update_sum_item_smaller_with_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_sum_tree(), None, None) - .unwrap() - .expect("expected to insert tree"); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_sum_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to insert tree"); db.insert( [b"tree".as_slice()].as_ref(), @@ -789,6 +840,7 @@ mod tests { Element::new_sum_item_with_flags(10000000, Some(vec![0])), None, None, + grove_version, ) .unwrap() .expect("expected to insert item"); @@ -809,6 +861,7 @@ mod tests { Ok((NoStorageRemoval, NoStorageRemoval)) }, Some(&tx), + grove_version, ) .cost; diff --git a/grovedb/src/debugger.rs b/grovedb/src/debugger.rs index 23acf447..de76c6df 100644 --- a/grovedb/src/debugger.rs +++ b/grovedb/src/debugger.rs @@ -5,6 +5,7 @@ use std::{fs, net::Ipv4Addr, sync::Weak}; use axum::{extract::State, http::StatusCode, response::IntoResponse, routing::post, Json, Router}; use grovedb_merk::debugger::NodeDbg; use grovedb_path::SubtreePath; +use grovedb_version::version::GroveVersion; use grovedbg_types::{NodeFetchRequest, NodeUpdate, Path}; use tokio::{ net::ToSocketAddrs, @@ -85,8 +86,9 @@ async fn fetch_node( return Err(AppError::Closed); }; + // todo: GroveVersion::latest() to actual version let merk = db - .open_non_transactional_merk_at_path(path.as_slice().into(), None) + .open_non_transactional_merk_at_path(path.as_slice().into(), None, GroveVersion::latest()) .unwrap()?; let node = merk.get_node_dbg(&key)?; @@ -106,8 +108,9 @@ async fn fetch_root_node( return Err(AppError::Closed); }; + // todo: GroveVersion::latest() to actual version let merk = db - .open_non_transactional_merk_at_path(SubtreePath::empty(), None) + .open_non_transactional_merk_at_path(SubtreePath::empty(), None, GroveVersion::latest()) .unwrap()?; let node = merk.get_root_node_dbg()?; @@ -129,7 +132,8 @@ fn node_to_update( right_child, }: NodeDbg, ) -> Result { - let grovedb_element = crate::Element::deserialize(&value)?; + // todo: GroveVersion::latest() to actual version + let grovedb_element = crate::Element::deserialize(&value, GroveVersion::latest())?; let element = match grovedb_element { crate::Element::Item(value, ..) => grovedbg_types::Element::Item { value }, diff --git a/grovedb/src/element/constructor.rs b/grovedb/src/element/constructor.rs index 09976e98..91143ec8 100644 --- a/grovedb/src/element/constructor.rs +++ b/grovedb/src/element/constructor.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Constructor //! Functions for setting an element's type diff --git a/grovedb/src/element/delete.rs b/grovedb/src/element/delete.rs index 92087bc4..9c0879a7 100644 --- a/grovedb/src/element/delete.rs +++ b/grovedb/src/element/delete.rs @@ -1,40 +1,20 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Delete //! Implements functions in Element for deleting +#[cfg(feature = "full")] +use grovedb_costs::OperationCost; #[cfg(feature = "full")] use grovedb_costs::{storage_cost::removal::StorageRemovedBytes, CostResult, CostsExt}; #[cfg(feature = "full")] use grovedb_merk::{BatchEntry, Error as MerkError, Merk, MerkOptions, Op}; #[cfg(feature = "full")] use grovedb_storage::StorageContext; +#[cfg(feature = "full")] +use grovedb_version::check_grovedb_v0_with_cost; +#[cfg(feature = "full")] +use grovedb_version::error::GroveVersionError; +#[cfg(feature = "full")] +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] use crate::{Element, Error}; @@ -48,7 +28,9 @@ impl Element { merk_options: Option, is_layered: bool, is_sum: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!("delete", grove_version.grovedb_versions.element.delete); let op = match (is_sum, is_layered) { (true, true) => Op::DeleteLayeredMaybeSpecialized, (true, false) => Op::DeleteMaybeSpecialized, @@ -62,10 +44,11 @@ impl Element { &[], merk_options, &|key, value| { - Self::specialized_costs_for_key_value(key, value, uses_sum_nodes) + Self::specialized_costs_for_key_value(key, value, uses_sum_nodes, grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|e| Error::CorruptedData(e.to_string())) } @@ -86,7 +69,15 @@ impl Element { (StorageRemovedBytes, StorageRemovedBytes), MerkError, >, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "delete_with_sectioned_removal_bytes", + grove_version + .grovedb_versions + .element + .delete_with_sectioned_removal_bytes + ); let op = match (is_in_sum_tree, is_layered) { (true, true) => Op::DeleteLayeredMaybeSpecialized, (true, false) => Op::DeleteMaybeSpecialized, @@ -100,12 +91,13 @@ impl Element { &[], merk_options, &|key, value| { - Self::specialized_costs_for_key_value(key, value, uses_sum_nodes) + Self::specialized_costs_for_key_value(key, value, uses_sum_nodes, grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, Some(&Element::value_defined_cost_for_serialized_value), &mut |_costs, _old_value, _value| Ok((false, None)), sectioned_removal, + grove_version, ) .map_err(|e| Error::CorruptedData(e.to_string())) } @@ -117,7 +109,15 @@ impl Element { is_layered: bool, is_sum: bool, batch_operations: &mut Vec>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "delete_into_batch_operations", + grove_version + .grovedb_versions + .element + .delete_into_batch_operations + ); let op = match (is_sum, is_layered) { (true, true) => Op::DeleteLayeredMaybeSpecialized, (true, false) => Op::DeleteMaybeSpecialized, diff --git a/grovedb/src/element/exists.rs b/grovedb/src/element/exists.rs index c3bf61fa..63dcfe4b 100644 --- a/grovedb/src/element/exists.rs +++ b/grovedb/src/element/exists.rs @@ -1,56 +1,35 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Exists //! Implements in Element functions for checking if stuff exists -#[cfg(feature = "full")] -use grovedb_costs::CostResult; -#[cfg(feature = "full")] +use grovedb_costs::{CostResult, CostsExt, OperationCost}; use grovedb_merk::Merk; -#[cfg(feature = "full")] use grovedb_storage::StorageContext; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; -#[cfg(feature = "full")] use crate::{Element, Error}; impl Element { - #[cfg(feature = "full")] /// Helper function that returns whether an element at the key for the /// element already exists. pub fn element_at_key_already_exists<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( &self, merk: &mut Merk, key: K, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "element_at_key_already_exists", + grove_version + .grovedb_versions + .element + .element_at_key_already_exists + ); merk.exists( key.as_ref(), Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|e| Error::CorruptedData(e.to_string())) } diff --git a/grovedb/src/element/get.rs b/grovedb/src/element/get.rs index 957618d0..1fda91dd 100644 --- a/grovedb/src/element/get.rs +++ b/grovedb/src/element/get.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Get //! Implements functions in Element for getting @@ -40,6 +12,9 @@ use grovedb_merk::Merk; use grovedb_merk::{ed::Decode, tree::TreeNodeInner}; #[cfg(feature = "full")] use grovedb_storage::StorageContext; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; use integer_encoding::VarInt; use crate::element::{SUM_ITEM_COST_SIZE, SUM_TREE_COST_SIZE, TREE_COST_SIZE}; @@ -54,8 +29,10 @@ impl Element { merk: &Merk, key: K, allow_cache: bool, + grove_version: &GroveVersion, ) -> CostResult { - Self::get_optional(merk, key.as_ref(), allow_cache).map(|result| { + check_grovedb_v0_with_cost!("get", grove_version.grovedb_versions.element.get); + Self::get_optional(merk, key.as_ref(), allow_cache, grove_version).map(|result| { let value = result?; value.ok_or_else(|| { Error::PathKeyNotFound(format!( @@ -77,7 +54,12 @@ impl Element { merk: &Merk, key: K, allow_cache: bool, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "get_optional", + grove_version.grovedb_versions.element.get_optional + ); let mut cost = OperationCost::default(); let value_opt = cost_return_on_error!( @@ -85,7 +67,8 @@ impl Element { merk.get( key.as_ref(), allow_cache, - Some(&Element::value_defined_cost_for_serialized_value) + Some(&Element::value_defined_cost_for_serialized_value), + grove_version ) .map_err(|e| Error::CorruptedData(e.to_string())) ); @@ -93,7 +76,7 @@ impl Element { &cost, value_opt .map(|value| { - Self::deserialize(value.as_slice()).map_err(|_| { + Self::deserialize(value.as_slice(), grove_version).map_err(|_| { Error::CorruptedData(String::from("unable to deserialize element")) }) }) @@ -110,8 +93,13 @@ impl Element { pub fn get_from_storage<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( storage: &S, key: K, + grove_version: &GroveVersion, ) -> CostResult { - Self::get_optional_from_storage(storage, key.as_ref()).map(|result| { + check_grovedb_v0_with_cost!( + "get_from_storage", + grove_version.grovedb_versions.element.get_from_storage + ); + Self::get_optional_from_storage(storage, key.as_ref(), grove_version).map(|result| { let value = result?; value.ok_or_else(|| { Error::PathKeyNotFound(format!( @@ -128,7 +116,15 @@ impl Element { pub fn get_optional_from_storage<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( storage: &S, key: K, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "get_optional_from_storage", + grove_version + .grovedb_versions + .element + .get_optional_from_storage + ); let mut cost = OperationCost::default(); let key_ref = key.as_ref(); let node_value_opt = cost_return_on_error!( @@ -153,7 +149,7 @@ impl Element { value .as_ref() .map(|value| { - Self::deserialize(value.as_slice()).map_err(|_| { + Self::deserialize(value.as_slice(), grove_version).map_err(|_| { Error::CorruptedData(String::from("unable to deserialize element")) }) }) @@ -214,10 +210,21 @@ impl Element { path: &[&[u8]], key: K, allow_cache: bool, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "get_with_absolute_refs", + grove_version + .grovedb_versions + .element + .get_with_absolute_refs + ); let mut cost = OperationCost::default(); - let element = cost_return_on_error!(&mut cost, Self::get(merk, key.as_ref(), allow_cache)); + let element = cost_return_on_error!( + &mut cost, + Self::get(merk, key.as_ref(), allow_cache, grove_version) + ); let absolute_element = cost_return_on_error_no_add!( &cost, @@ -233,7 +240,12 @@ impl Element { merk: &Merk, key: K, allow_cache: bool, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "get_value_hash", + grove_version.grovedb_versions.element.get_value_hash + ); let mut cost = OperationCost::default(); let value_hash = cost_return_on_error!( @@ -241,7 +253,8 @@ impl Element { merk.get_value_hash( key.as_ref(), allow_cache, - Some(&Element::value_defined_cost_for_serialized_value) + Some(&Element::value_defined_cost_for_serialized_value), + grove_version ) .map_err(|e| Error::CorruptedData(e.to_string())) ); @@ -260,6 +273,7 @@ mod tests { #[test] fn test_cache_changes_cost() { + let grove_version = GroveVersion::latest(); let storage = TempStorage::new(); let batch = StorageBatch::new(); let ctx = storage @@ -269,15 +283,16 @@ mod tests { ctx, false, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .unwrap() .unwrap(); Element::empty_tree() - .insert(&mut merk, b"mykey", None) + .insert(&mut merk, b"mykey", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"value".to_vec()) - .insert(&mut merk, b"another-key", None) + .insert(&mut merk, b"another-key", None, grove_version) .unwrap() .expect("expected successful insertion 2"); @@ -293,12 +308,13 @@ mod tests { ctx, false, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .unwrap() .unwrap(); assert_eq!( - Element::get(&merk, b"another-key", true) + Element::get(&merk, b"another-key", true, grove_version) .unwrap() .expect("expected successful get"), Element::new_item(b"value".to_vec()), @@ -306,14 +322,14 @@ mod tests { // Warm up cache because the Merk was reopened. Element::new_item(b"value".to_vec()) - .insert(&mut merk, b"another-key", None) + .insert(&mut merk, b"another-key", None, grove_version) .unwrap() .expect("expected successful insertion 2"); - let cost_with_cache = Element::get(&merk, b"another-key", true) + let cost_with_cache = Element::get(&merk, b"another-key", true, grove_version) .cost_as_result() .expect("expected to get cost"); - let cost_without_cache = Element::get(&merk, b"another-key", false) + let cost_without_cache = Element::get(&merk, b"another-key", false, grove_version) .cost_as_result() .expect("expected to get cost"); assert_ne!(cost_with_cache, cost_without_cache); diff --git a/grovedb/src/element/helpers.rs b/grovedb/src/element/helpers.rs index 59cc2563..2d2db076 100644 --- a/grovedb/src/element/helpers.rs +++ b/grovedb/src/element/helpers.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Helpers //! Implements helper functions in Element @@ -40,6 +12,7 @@ use grovedb_merk::{ TreeFeatureType, TreeFeatureType::{BasicMerkNode, SummedMerkNode}, }; +use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; #[cfg(feature = "full")] use integer_encoding::VarInt; @@ -216,55 +189,18 @@ impl Element { } } - #[cfg(feature = "full")] - /// Get the size of an element in bytes - #[deprecated] - pub fn byte_size(&self) -> u32 { - match self { - Element::Item(item, element_flag) => { - if let Some(flag) = element_flag { - flag.len() as u32 + item.len() as u32 - } else { - item.len() as u32 - } - } - Element::SumItem(item, element_flag) => { - if let Some(flag) = element_flag { - flag.len() as u32 + item.required_space() as u32 - } else { - item.required_space() as u32 - } - } - Element::Reference(path_reference, _, element_flag) => { - let path_length = path_reference.serialized_size() as u32; - - if let Some(flag) = element_flag { - flag.len() as u32 + path_length - } else { - path_length - } - } - Element::Tree(_, element_flag) => { - if let Some(flag) = element_flag { - flag.len() as u32 + 32 - } else { - 32 - } - } - Element::SumTree(_, _, element_flag) => { - if let Some(flag) = element_flag { - flag.len() as u32 + 32 + 8 - } else { - 32 + 8 - } - } - } - } - #[cfg(feature = "full")] /// Get the required item space - pub fn required_item_space(len: u32, flag_len: u32) -> u32 { - len + len.required_space() as u32 + flag_len + flag_len.required_space() as u32 + 1 + pub fn required_item_space( + len: u32, + flag_len: u32, + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "required_item_space", + grove_version.grovedb_versions.element.required_item_space + ); + Ok(len + len.required_space() as u32 + flag_len + flag_len.required_space() as u32 + 1) } #[cfg(feature = "full")] @@ -274,9 +210,9 @@ impl Element { path: &[&[u8]], key: Option<&[u8]>, ) -> Result { - // Convert any non absolute reference type to an absolute one + // Convert any non-absolute reference type to an absolute one // we do this here because references are aggregated first then followed later - // to follow non absolute references, we need the path they are stored at + // to follow non-absolute references, we need the path they are stored at // this information is lost during the aggregation phase. Ok(match &self { Element::Reference(reference_path_type, ..) => match reference_path_type { @@ -304,9 +240,17 @@ impl Element { key: &Vec, value: &[u8], is_sum_node: bool, + grove_version: &GroveVersion, ) -> Result { + check_grovedb_v0!( + "specialized_costs_for_key_value", + grove_version + .grovedb_versions + .element + .specialized_costs_for_key_value + ); // todo: we actually don't need to deserialize the whole element - let element = Element::deserialize(value)?; + let element = Element::deserialize(value, grove_version)?; let cost = match element { Element::Tree(_, flags) => { let flags_len = flags.map_or(0, |flags| { @@ -358,7 +302,11 @@ impl Element { #[cfg(feature = "full")] /// Get tree cost for the element - pub fn get_specialized_cost(&self) -> Result { + pub fn get_specialized_cost(&self, grove_version: &GroveVersion) -> Result { + check_grovedb_v0!( + "get_specialized_cost", + grove_version.grovedb_versions.element.get_specialized_cost + ); match self { Element::Tree(..) => Ok(TREE_COST_SIZE), Element::SumTree(..) => Ok(SUM_TREE_COST_SIZE), @@ -371,8 +319,8 @@ impl Element { #[cfg(feature = "full")] /// Get the value defined cost for a serialized value - pub fn value_defined_cost(&self) -> Option { - let Some(value_cost) = self.get_specialized_cost().ok() else { + pub fn value_defined_cost(&self, grove_version: &GroveVersion) -> Option { + let Some(value_cost) = self.get_specialized_cost(grove_version).ok() else { return None; }; @@ -391,21 +339,25 @@ impl Element { #[cfg(feature = "full")] /// Get the value defined cost for a serialized value - pub fn value_defined_cost_for_serialized_value(value: &[u8]) -> Option { - let element = Element::deserialize(value).ok()?; - element.value_defined_cost() + pub fn value_defined_cost_for_serialized_value( + value: &[u8], + grove_version: &GroveVersion, + ) -> Option { + let element = Element::deserialize(value, grove_version).ok()?; + element.value_defined_cost(grove_version) } } #[cfg(feature = "full")] /// Decode from bytes -pub fn raw_decode(bytes: &[u8]) -> Result { +pub fn raw_decode(bytes: &[u8], grove_version: &GroveVersion) -> Result { let tree = TreeNode::decode_raw( bytes, vec![], Some(Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|e| Error::CorruptedData(e.to_string()))?; - let element: Element = Element::deserialize(tree.value_as_slice())?; + let element: Element = Element::deserialize(tree.value_as_slice(), grove_version)?; Ok(element) } diff --git a/grovedb/src/element/insert.rs b/grovedb/src/element/insert.rs index 2ba7f92d..ce0144a2 100644 --- a/grovedb/src/element/insert.rs +++ b/grovedb/src/element/insert.rs @@ -1,49 +1,18 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Insert //! Implements functions in Element for inserting into Merk -use grovedb_costs::cost_return_on_error_default; -#[cfg(feature = "full")] use grovedb_costs::{ - cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, + cost_return_on_error, cost_return_on_error_default, cost_return_on_error_no_add, CostResult, + CostsExt, OperationCost, }; -#[cfg(feature = "full")] use grovedb_merk::{BatchEntry, Error as MerkError, Merk, MerkOptions, Op, TreeFeatureType}; -#[cfg(feature = "full")] use grovedb_storage::StorageContext; -#[cfg(feature = "full")] +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; use integer_encoding::VarInt; -use crate::Element::SumItem; -#[cfg(feature = "full")] -use crate::{Element, Error, Hash}; +use crate::{Element, Element::SumItem, Error, Hash}; impl Element { #[cfg(feature = "full")] @@ -57,8 +26,11 @@ impl Element { merk: &mut Merk, key: K, options: Option, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { - let serialized = cost_return_on_error_default!(self.serialize()); + check_grovedb_v0_with_cost!("insert", grove_version.grovedb_versions.element.insert); + + let serialized = cost_return_on_error_default!(self.serialize(grove_version)); if !merk.is_sum_tree && self.is_sum_item() { return Err(Error::InvalidInput("cannot add sum item to non sum tree")) @@ -68,7 +40,8 @@ impl Element { let merk_feature_type = cost_return_on_error_default!(self.get_feature_type(merk.is_sum_tree)); let batch_operations = if matches!(self, SumItem(..)) { - let value_cost = cost_return_on_error_default!(self.get_specialized_cost()); + let value_cost = + cost_return_on_error_default!(self.get_specialized_cost(grove_version)); let cost = value_cost + self.get_flags().as_ref().map_or(0, |flags| { @@ -89,10 +62,11 @@ impl Element { options, &|key, value| { // it is possible that a normal item was being replaced with a - Self::specialized_costs_for_key_value(key, value, uses_sum_nodes) + Self::specialized_costs_for_key_value(key, value, uses_sum_nodes, grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|e| Error::CorruptedData(e.to_string())) } @@ -105,14 +79,24 @@ impl Element { key: K, batch_operations: &mut Vec>, feature_type: TreeFeatureType, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { - let serialized = match self.serialize() { + check_grovedb_v0_with_cost!( + "insert_into_batch_operations", + grove_version + .grovedb_versions + .element + .insert_into_batch_operations + ); + + let serialized = match self.serialize(grove_version) { Ok(s) => s, Err(e) => return Err(e).wrap_with_cost(Default::default()), }; let entry = if matches!(self, SumItem(..)) { - let value_cost = cost_return_on_error_default!(self.get_specialized_cost()); + let value_cost = + cost_return_on_error_default!(self.get_specialized_cost(grove_version)); let cost = value_cost + self.get_flags().as_ref().map_or(0, |flags| { @@ -141,14 +125,22 @@ impl Element { merk: &mut Merk, key: &[u8], options: Option, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "insert_if_not_exists", + grove_version.grovedb_versions.element.insert_if_not_exists + ); + let mut cost = OperationCost::default(); - let exists = - cost_return_on_error!(&mut cost, self.element_at_key_already_exists(merk, key)); + let exists = cost_return_on_error!( + &mut cost, + self.element_at_key_already_exists(merk, key, grove_version) + ); if exists { Ok(false).wrap_with_cost(cost) } else { - cost_return_on_error!(&mut cost, self.insert(merk, key, options)); + cost_return_on_error!(&mut cost, self.insert(merk, key, options, grove_version)); Ok(true).wrap_with_cost(cost) } } @@ -166,18 +158,32 @@ impl Element { key: K, batch_operations: &mut Vec>, feature_type: TreeFeatureType, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "insert_if_not_exists_into_batch_operations", + grove_version + .grovedb_versions + .element + .insert_if_not_exists_into_batch_operations + ); + let mut cost = OperationCost::default(); let exists = cost_return_on_error!( &mut cost, - self.element_at_key_already_exists(merk, key.as_ref()) + self.element_at_key_already_exists(merk, key.as_ref(), grove_version) ); if exists { Ok(false).wrap_with_cost(cost) } else { cost_return_on_error!( &mut cost, - self.insert_into_batch_operations(key, batch_operations, feature_type) + self.insert_into_batch_operations( + key, + batch_operations, + feature_type, + grove_version + ) ); Ok(true).wrap_with_cost(cost) } @@ -196,11 +202,20 @@ impl Element { merk: &mut Merk, key: &[u8], options: Option, + grove_version: &GroveVersion, ) -> CostResult<(bool, Option), Error> { + check_grovedb_v0_with_cost!( + "insert_if_changed_value", + grove_version + .grovedb_versions + .element + .insert_if_changed_value + ); + let mut cost = OperationCost::default(); let previous_element = cost_return_on_error!( &mut cost, - Self::get_optional_from_storage(&merk.storage, key) + Self::get_optional_from_storage(&merk.storage, key, grove_version) ); let needs_insert = match &previous_element { None => true, @@ -209,7 +224,7 @@ impl Element { if !needs_insert { Ok((false, None)).wrap_with_cost(cost) } else { - cost_return_on_error!(&mut cost, self.insert(merk, key, options)); + cost_return_on_error!(&mut cost, self.insert(merk, key, options, grove_version)); Ok((true, previous_element)).wrap_with_cost(cost) } } @@ -229,11 +244,20 @@ impl Element { key: K, batch_operations: &mut Vec>, feature_type: TreeFeatureType, + grove_version: &GroveVersion, ) -> CostResult<(bool, Option), Error> { + check_grovedb_v0_with_cost!( + "insert_if_changed_value_into_batch_operations", + grove_version + .grovedb_versions + .element + .insert_if_changed_value_into_batch_operations + ); + let mut cost = OperationCost::default(); let previous_element = cost_return_on_error!( &mut cost, - Self::get_optional_from_storage(&merk.storage, key.as_ref()) + Self::get_optional_from_storage(&merk.storage, key.as_ref(), grove_version) ); let needs_insert = match &previous_element { None => true, @@ -244,7 +268,12 @@ impl Element { } else { cost_return_on_error!( &mut cost, - self.insert_into_batch_operations(key, batch_operations, feature_type) + self.insert_into_batch_operations( + key, + batch_operations, + feature_type, + grove_version + ) ); Ok((true, previous_element)).wrap_with_cost(cost) } @@ -262,8 +291,14 @@ impl Element { key: K, referenced_value: Hash, options: Option, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { - let serialized = match self.serialize() { + check_grovedb_v0_with_cost!( + "insert_reference", + grove_version.grovedb_versions.element.insert_reference + ); + + let serialized = match self.serialize(grove_version) { Ok(s) => s, Err(e) => return Err(e).wrap_with_cost(Default::default()), }; @@ -285,10 +320,11 @@ impl Element { &[], options, &|key, value| { - Self::specialized_costs_for_key_value(key, value, uses_sum_nodes) + Self::specialized_costs_for_key_value(key, value, uses_sum_nodes, grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|e| Error::CorruptedData(e.to_string())) } @@ -302,8 +338,17 @@ impl Element { referenced_value: Hash, batch_operations: &mut Vec>, feature_type: TreeFeatureType, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { - let serialized = match self.serialize() { + check_grovedb_v0_with_cost!( + "insert_reference_into_batch_operations", + grove_version + .grovedb_versions + .element + .insert_reference_into_batch_operations + ); + + let serialized = match self.serialize(grove_version) { Ok(s) => s, Err(e) => return Err(e).wrap_with_cost(Default::default()), }; @@ -328,8 +373,14 @@ impl Element { key: K, subtree_root_hash: Hash, options: Option, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { - let serialized = match self.serialize() { + check_grovedb_v0_with_cost!( + "insert_subtree", + grove_version.grovedb_versions.element.insert_subtree + ); + + let serialized = match self.serialize(grove_version) { Ok(s) => s, Err(e) => return Err(e).wrap_with_cost(Default::default()), }; @@ -338,7 +389,8 @@ impl Element { let merk_feature_type = cost_return_on_error_no_add!(&cost, self.get_feature_type(merk.is_sum_tree)); - let tree_cost = cost_return_on_error_no_add!(&cost, self.get_specialized_cost()); + let tree_cost = + cost_return_on_error_no_add!(&cost, self.get_specialized_cost(grove_version)); let cost = tree_cost + self.get_flags().as_ref().map_or(0, |flags| { @@ -355,10 +407,11 @@ impl Element { &[], options, &|key, value| { - Self::specialized_costs_for_key_value(key, value, uses_sum_nodes) + Self::specialized_costs_for_key_value(key, value, uses_sum_nodes, grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|e| Error::CorruptedData(e.to_string())) } @@ -372,13 +425,22 @@ impl Element { is_replace: bool, batch_operations: &mut Vec>, feature_type: TreeFeatureType, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { - let serialized = match self.serialize() { + check_grovedb_v0_with_cost!( + "insert_subtree_into_batch_operations", + grove_version + .grovedb_versions + .element + .insert_subtree_into_batch_operations + ); + + let serialized = match self.serialize(grove_version) { Ok(s) => s, Err(e) => return Err(e).wrap_with_cost(Default::default()), }; - let tree_cost = cost_return_on_error_default!(self.get_specialized_cost()); + let tree_cost = cost_return_on_error_default!(self.get_specialized_cost(grove_version)); let cost = tree_cost + self.get_flags().as_ref().map_or(0, |flags| { @@ -413,18 +475,19 @@ mod tests { #[test] fn test_success_insert() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); Element::empty_tree() - .insert(&mut merk, b"mykey", None) + .insert(&mut merk, b"mykey", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"value".to_vec()) - .insert(&mut merk, b"another-key", None) + .insert(&mut merk, b"another-key", None, grove_version) .unwrap() .expect("expected successful insertion 2"); assert_eq!( - Element::get(&merk, b"another-key", true) + Element::get(&merk, b"another-key", true, grove_version) .unwrap() .expect("expected successful get"), Element::new_item(b"value".to_vec()), @@ -433,30 +496,31 @@ mod tests { #[test] fn test_insert_if_changed_value_does_not_insert_when_value_does_not_change() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); Element::empty_tree() - .insert(&mut merk, b"mykey", None) + .insert(&mut merk, b"mykey", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"value".to_vec()) - .insert(&mut merk, b"another-key", None) + .insert(&mut merk, b"another-key", None, grove_version) .unwrap() .expect("expected successful insertion 2"); - merk.commit(); + merk.commit(grove_version); let (inserted, previous) = Element::new_item(b"value".to_vec()) - .insert_if_changed_value(&mut merk, b"another-key", None) + .insert_if_changed_value(&mut merk, b"another-key", None, grove_version) .unwrap() .expect("expected successful insertion 2"); - merk.commit(); + merk.commit(grove_version); assert!(!inserted); assert_eq!(previous, None); assert_eq!( - Element::get(&merk, b"another-key", true) + Element::get(&merk, b"another-key", true, grove_version) .unwrap() .expect("expected successful get"), Element::new_item(b"value".to_vec()), @@ -465,16 +529,17 @@ mod tests { #[test] fn test_insert_if_changed_value_inserts_when_value_changed() { + let grove_version = GroveVersion::latest(); let storage = TempStorage::new(); let batch = StorageBatch::new(); - let mut merk = empty_path_merk(&*storage, &batch); + let mut merk = empty_path_merk(&*storage, &batch, grove_version); Element::empty_tree() - .insert(&mut merk, b"mykey", None) + .insert(&mut merk, b"mykey", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"value".to_vec()) - .insert(&mut merk, b"another-key", None) + .insert(&mut merk, b"another-key", None, grove_version) .unwrap() .expect("expected successful insertion 2"); @@ -484,9 +549,9 @@ mod tests { .unwrap(); let batch = StorageBatch::new(); - let mut merk = empty_path_merk(&*storage, &batch); + let mut merk = empty_path_merk(&*storage, &batch, grove_version); let (inserted, previous) = Element::new_item(b"value2".to_vec()) - .insert_if_changed_value(&mut merk, b"another-key", None) + .insert_if_changed_value(&mut merk, b"another-key", None, grove_version) .unwrap() .expect("expected successful insertion 2"); @@ -497,10 +562,10 @@ mod tests { .commit_multi_context_batch(batch, None) .unwrap() .unwrap(); - let merk = empty_path_merk_read_only(&*storage); + let merk = empty_path_merk_read_only(&*storage, grove_version); assert_eq!( - Element::get(&merk, b"another-key", true) + Element::get(&merk, b"another-key", true, grove_version) .unwrap() .expect("expected successful get"), Element::new_item(b"value2".to_vec()), @@ -509,13 +574,14 @@ mod tests { #[test] fn test_insert_if_changed_value_inserts_when_no_value() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); Element::empty_tree() - .insert(&mut merk, b"mykey", None) + .insert(&mut merk, b"mykey", None, grove_version) .unwrap() .expect("expected successful insertion"); let (inserted, previous) = Element::new_item(b"value2".to_vec()) - .insert_if_changed_value(&mut merk, b"another-key", None) + .insert_if_changed_value(&mut merk, b"another-key", None, grove_version) .unwrap() .expect("expected successful insertion 2"); @@ -523,7 +589,7 @@ mod tests { assert_eq!(previous, None); assert_eq!( - Element::get(&merk, b"another-key", true) + Element::get(&merk, b"another-key", true, grove_version) .unwrap() .expect("expected successful get"), Element::new_item(b"value2".to_vec()), diff --git a/grovedb/src/element/query.rs b/grovedb/src/element/query.rs index 48d9e34d..39c0494c 100644 --- a/grovedb/src/element/query.rs +++ b/grovedb/src/element/query.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Query //! Implements functions in Element for querying @@ -46,6 +18,9 @@ use grovedb_merk::proofs::Query; use grovedb_path::SubtreePath; #[cfg(feature = "full")] use grovedb_storage::{rocksdb_storage::RocksDbStorage, RawIterator, StorageContext}; +use grovedb_version::{ + check_grovedb_v0, check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; #[cfg(feature = "full")] use crate::operations::proof::util::hex_to_ascii; @@ -261,7 +236,13 @@ impl Element { query_options: QueryOptions, result_type: QueryResultType, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "insert_subtree_into_batch_operations", + grove_version.grovedb_versions.element.get_query + ); + let sized_query = SizedQuery::new(query.clone(), None, None); Element::get_sized_query( storage, @@ -270,6 +251,7 @@ impl Element { query_options, result_type, transaction, + grove_version, ) .map_ok(|(elements, _)| elements) } @@ -282,7 +264,13 @@ impl Element { query: &Query, query_options: QueryOptions, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "get_query_values", + grove_version.grovedb_versions.element.get_query_values + ); + Element::get_query( storage, merk_path, @@ -290,6 +278,7 @@ impl Element { query_options, QueryElementResultType, transaction, + grove_version, ) .flat_map_ok(|result_items| { let elements: Vec = result_items @@ -315,8 +304,17 @@ impl Element { query_options: QueryOptions, result_type: QueryResultType, transaction: TransactionArg, - add_element_function: fn(PathQueryPushArgs) -> CostResult<(), Error>, + add_element_function: fn(PathQueryPushArgs, &GroveVersion) -> CostResult<(), Error>, + grove_version: &GroveVersion, ) -> CostResult<(QueryResultElements, u16), Error> { + check_grovedb_v0_with_cost!( + "get_query_apply_function", + grove_version + .grovedb_versions + .element + .get_query_apply_function + ); + let mut cost = OperationCost::default(); let mut results = Vec::new(); @@ -341,6 +339,7 @@ impl Element { query_options, result_type, add_element_function, + grove_version, ) ); if limit == Some(0) { @@ -363,6 +362,7 @@ impl Element { query_options, result_type, add_element_function, + grove_version, ) ); if limit == Some(0) { @@ -388,7 +388,13 @@ impl Element { query_options: QueryOptions, result_type: QueryResultType, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(QueryResultElements, u16), Error> { + check_grovedb_v0_with_cost!( + "get_path_query", + grove_version.grovedb_versions.element.get_path_query + ); + let path_slices = path_query .path .iter() @@ -402,6 +408,7 @@ impl Element { result_type, transaction, Element::path_query_push, + grove_version, ) } @@ -414,7 +421,13 @@ impl Element { query_options: QueryOptions, result_type: QueryResultType, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(QueryResultElements, u16), Error> { + check_grovedb_v0_with_cost!( + "get_sized_query", + grove_version.grovedb_versions.element.get_sized_query + ); + Element::get_query_apply_function( storage, path, @@ -423,12 +436,21 @@ impl Element { result_type, transaction, Element::path_query_push, + grove_version, ) } #[cfg(feature = "full")] /// Push arguments to path query - fn path_query_push(args: PathQueryPushArgs) -> CostResult<(), Error> { + fn path_query_push( + args: PathQueryPushArgs, + grove_version: &GroveVersion, + ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "path_query_push", + grove_version.grovedb_versions.element.path_query_push + ); + // println!("path_query_push {} \n", args); let mut cost = OperationCost::default(); @@ -480,7 +502,8 @@ impl Element { &inner_path_query, query_options, result_type, - transaction + transaction, + grove_version, ) ); @@ -514,6 +537,7 @@ impl Element { None, transaction, subtree, + grove_version, { results.push(QueryResultElement::ElementResultItem( cost_return_on_error!( @@ -523,6 +547,7 @@ impl Element { path_vec.as_slice(), subquery_path_last_key.as_slice(), allow_cache, + grove_version, ) ), )); @@ -537,6 +562,7 @@ impl Element { None, transaction, subtree, + grove_version, { results.push(QueryResultElement::KeyElementPairResultItem( ( @@ -548,6 +574,7 @@ impl Element { path_vec.as_slice(), subquery_path_last_key.as_slice(), allow_cache, + grove_version, ) ), ), @@ -563,6 +590,7 @@ impl Element { None, transaction, subtree, + grove_version, { results.push( QueryResultElement::PathKeyElementTrioResultItem(( @@ -575,6 +603,7 @@ impl Element { path_vec.as_slice(), subquery_path_last_key.as_slice(), allow_cache, + grove_version, ) ), )), @@ -599,21 +628,24 @@ impl Element { } else if allow_get_raw { cost_return_on_error_no_add!( &cost, - Element::basic_push(PathQueryPushArgs { - storage, - transaction, - key: Some(key), - element, - path, - subquery_path, - subquery, - left_to_right, - query_options, - result_type, - results, - limit, - offset, - }) + Element::basic_push( + PathQueryPushArgs { + storage, + transaction, + key: Some(key), + element, + path, + subquery_path, + subquery, + left_to_right, + query_options, + result_type, + results, + limit, + offset, + }, + grove_version + ) ); } else { return Err(Error::InvalidPath( @@ -626,21 +658,24 @@ impl Element { } else { cost_return_on_error_no_add!( &cost, - Element::basic_push(PathQueryPushArgs { - storage, - transaction, - key, - element, - path, - subquery_path, - subquery, - left_to_right, - query_options, - result_type, - results, - limit, - offset, - }) + Element::basic_push( + PathQueryPushArgs { + storage, + transaction, + key, + element, + path, + subquery_path, + subquery, + left_to_right, + query_options, + result_type, + results, + limit, + offset, + }, + grove_version + ) ); } Ok(()).wrap_with_cost(cost) @@ -649,7 +684,7 @@ impl Element { #[cfg(any(feature = "full", feature = "verify"))] /// Takes a sized query and a key and returns subquery key and subquery as /// tuple - pub fn subquery_paths_and_value_for_sized_query( + fn subquery_paths_and_value_for_sized_query( sized_query: &SizedQuery, key: &[u8], ) -> (Option, Option) { @@ -700,8 +735,14 @@ impl Element { offset: &mut Option, query_options: QueryOptions, result_type: QueryResultType, - add_element_function: fn(PathQueryPushArgs) -> CostResult<(), Error>, + add_element_function: fn(PathQueryPushArgs, &GroveVersion) -> CostResult<(), Error>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "query_item", + grove_version.grovedb_versions.element.query_item + ); + let mut cost = OperationCost::default(); let subtree_path: SubtreePath<_> = path.into(); @@ -716,8 +757,9 @@ impl Element { None, transaction, subtree, + grove_version, { - Element::get(&subtree, key, query_options.allow_cache) + Element::get(&subtree, key, query_options.allow_cache, grove_version) .unwrap_add_cost(&mut cost) } ); @@ -725,21 +767,24 @@ impl Element { Ok(element) => { let (subquery_path, subquery) = Self::subquery_paths_and_value_for_sized_query(sized_query, key); - match add_element_function(PathQueryPushArgs { - storage, - transaction, - key: Some(key.as_slice()), - element, - path, - subquery_path, - subquery, - left_to_right: sized_query.query.left_to_right, - query_options, - result_type, - results, - limit, - offset, - }) + match add_element_function( + PathQueryPushArgs { + storage, + transaction, + key: Some(key.as_slice()), + element, + path, + subquery_path, + subquery, + left_to_right: sized_query.query.left_to_right, + query_options, + result_type, + results, + limit, + offset, + }, + grove_version, + ) .unwrap_add_cost(&mut cost) { Ok(_) => Ok(()), @@ -790,7 +835,8 @@ impl Element { raw_decode( iter.value() .unwrap_add_cost(&mut cost) - .expect("if key exists then value should too") + .expect("if key exists then value should too"), + grove_version ) ); let key = iter @@ -799,21 +845,24 @@ impl Element { .expect("key should exist"); let (subquery_path, subquery) = Self::subquery_paths_and_value_for_sized_query(sized_query, key); - let result_with_cost = add_element_function(PathQueryPushArgs { - storage, - transaction, - key: Some(key), - element, - path, - subquery_path, - subquery, - left_to_right: sized_query.query.left_to_right, - query_options, - result_type, - results, - limit, - offset, - }); + let result_with_cost = add_element_function( + PathQueryPushArgs { + storage, + transaction, + key: Some(key), + element, + path, + subquery_path, + subquery, + left_to_right: sized_query.query.left_to_right, + query_options, + result_type, + results, + limit, + offset, + }, + grove_version, + ); let result = result_with_cost.unwrap_add_cost(&mut cost); match result { Ok(x) => x, @@ -843,7 +892,12 @@ impl Element { } #[cfg(feature = "full")] - fn basic_push(args: PathQueryPushArgs) -> Result<(), Error> { + fn basic_push(args: PathQueryPushArgs, grove_version: &GroveVersion) -> Result<(), Error> { + check_grovedb_v0!( + "basic_push", + grove_version.grovedb_versions.element.basic_push + ); + // println!("basic_push {}", args); let PathQueryPushArgs { path, @@ -907,6 +961,7 @@ impl Element { mod tests { use grovedb_merk::proofs::Query; use grovedb_storage::{Storage, StorageBatch}; + use grovedb_version::version::GroveVersion; use crate::{ element::{query::QueryOptions, *}, @@ -920,7 +975,8 @@ mod tests { #[test] fn test_get_query() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -928,6 +984,7 @@ mod tests { Element::new_item(b"ayyd".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -937,6 +994,7 @@ mod tests { Element::new_item(b"ayyc".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -946,6 +1004,7 @@ mod tests { Element::new_item(b"ayya".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -955,6 +1014,7 @@ mod tests { Element::new_item(b"ayyb".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -965,9 +1025,16 @@ mod tests { query.insert_key(b"a".to_vec()); assert_eq!( - Element::get_query_values(&db.db, &[TEST_LEAF], &query, QueryOptions::default(), None) - .unwrap() - .expect("expected successful get_query"), + Element::get_query_values( + &db.db, + &[TEST_LEAF], + &query, + QueryOptions::default(), + None, + grove_version + ) + .unwrap() + .expect("expected successful get_query"), vec![ Element::new_item(b"ayya".to_vec()), Element::new_item(b"ayyc".to_vec()) @@ -979,9 +1046,16 @@ mod tests { query.insert_range(b"b".to_vec()..b"d".to_vec()); query.insert_range(b"a".to_vec()..b"c".to_vec()); assert_eq!( - Element::get_query_values(&db.db, &[TEST_LEAF], &query, QueryOptions::default(), None) - .unwrap() - .expect("expected successful get_query"), + Element::get_query_values( + &db.db, + &[TEST_LEAF], + &query, + QueryOptions::default(), + None, + grove_version + ) + .unwrap() + .expect("expected successful get_query"), vec![ Element::new_item(b"ayya".to_vec()), Element::new_item(b"ayyb".to_vec()), @@ -994,9 +1068,16 @@ mod tests { query.insert_range_inclusive(b"b".to_vec()..=b"d".to_vec()); query.insert_range(b"b".to_vec()..b"c".to_vec()); assert_eq!( - Element::get_query_values(&db.db, &[TEST_LEAF], &query, QueryOptions::default(), None) - .unwrap() - .expect("expected successful get_query"), + Element::get_query_values( + &db.db, + &[TEST_LEAF], + &query, + QueryOptions::default(), + None, + grove_version + ) + .unwrap() + .expect("expected successful get_query"), vec![ Element::new_item(b"ayyb".to_vec()), Element::new_item(b"ayyc".to_vec()), @@ -1010,9 +1091,16 @@ mod tests { query.insert_range(b"b".to_vec()..b"d".to_vec()); query.insert_range(b"a".to_vec()..b"c".to_vec()); assert_eq!( - Element::get_query_values(&db.db, &[TEST_LEAF], &query, QueryOptions::default(), None) - .unwrap() - .expect("expected successful get_query"), + Element::get_query_values( + &db.db, + &[TEST_LEAF], + &query, + QueryOptions::default(), + None, + grove_version + ) + .unwrap() + .expect("expected successful get_query"), vec![ Element::new_item(b"ayya".to_vec()), Element::new_item(b"ayyb".to_vec()), @@ -1023,7 +1111,8 @@ mod tests { #[test] fn test_get_query_with_path() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -1031,6 +1120,7 @@ mod tests { Element::new_item(b"ayyd".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -1040,6 +1130,7 @@ mod tests { Element::new_item(b"ayyc".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -1049,6 +1140,7 @@ mod tests { Element::new_item(b"ayya".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -1058,6 +1150,7 @@ mod tests { Element::new_item(b"ayyb".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -1073,7 +1166,8 @@ mod tests { &query, QueryOptions::default(), QueryPathKeyElementTrioResultType, - None + None, + grove_version ) .unwrap() .expect("expected successful get_query") @@ -1095,29 +1189,34 @@ mod tests { #[test] fn test_get_range_query() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let batch = StorageBatch::new(); let storage = &db.db; let mut merk = db - .open_non_transactional_merk_at_path([TEST_LEAF].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("cannot open Merk"); // TODO implement costs Element::new_item(b"ayyd".to_vec()) - .insert(&mut merk, b"d", None) + .insert(&mut merk, b"d", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"ayyc".to_vec()) - .insert(&mut merk, b"c", None) + .insert(&mut merk, b"c", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"ayya".to_vec()) - .insert(&mut merk, b"a", None) + .insert(&mut merk, b"a", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"ayyb".to_vec()) - .insert(&mut merk, b"b", None) + .insert(&mut merk, b"b", None, grove_version) .unwrap() .expect("expected successful insertion"); @@ -1138,6 +1237,7 @@ mod tests { QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1172,6 +1272,7 @@ mod tests { QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1199,30 +1300,35 @@ mod tests { #[test] fn test_get_range_inclusive_query() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let batch = StorageBatch::new(); let storage = &db.db; let mut merk = db - .open_non_transactional_merk_at_path([TEST_LEAF].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("cannot open Merk"); Element::new_item(b"ayyd".to_vec()) - .insert(&mut merk, b"d", None) + .insert(&mut merk, b"d", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"ayyc".to_vec()) - .insert(&mut merk, b"c", None) + .insert(&mut merk, b"c", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"ayya".to_vec()) - .insert(&mut merk, b"a", None) + .insert(&mut merk, b"a", None, grove_version) .unwrap() .expect("expected successful insertion"); Element::new_item(b"ayyb".to_vec()) - .insert(&mut merk, b"b", None) + .insert(&mut merk, b"b", None, grove_version) .unwrap() .expect("expected successful insertion"); @@ -1261,6 +1367,7 @@ mod tests { QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"), @@ -1278,6 +1385,7 @@ mod tests { QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"), @@ -1298,6 +1406,7 @@ mod tests { QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"), @@ -1307,7 +1416,8 @@ mod tests { #[test] fn test_get_limit_query() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -1315,6 +1425,7 @@ mod tests { Element::new_item(b"ayyd".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -1324,6 +1435,7 @@ mod tests { Element::new_item(b"ayyc".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -1333,6 +1445,7 @@ mod tests { Element::new_item(b"ayya".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -1342,6 +1455,7 @@ mod tests { Element::new_item(b"ayyb".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert element"); @@ -1360,6 +1474,7 @@ mod tests { QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1386,6 +1501,7 @@ mod tests { QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1407,6 +1523,7 @@ mod tests { QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1428,6 +1545,7 @@ mod tests { QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1448,6 +1566,7 @@ mod tests { QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1473,6 +1592,7 @@ mod tests { QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1497,6 +1617,7 @@ mod tests { QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1522,6 +1643,7 @@ mod tests { QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1547,6 +1669,7 @@ mod tests { QueryOptions::default(), QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_query"); @@ -1572,7 +1695,10 @@ impl ElementsIterator { ElementsIterator { raw_iter } } - pub fn next_element(&mut self) -> CostResult, Error> { + pub fn next_element( + &mut self, + grove_version: &GroveVersion, + ) -> CostResult, Error> { let mut cost = OperationCost::default(); Ok(if self.raw_iter.valid().unwrap_add_cost(&mut cost) { @@ -1582,7 +1708,7 @@ impl ElementsIterator { .unwrap_add_cost(&mut cost) .zip(self.raw_iter.value().unwrap_add_cost(&mut cost)) { - let element = cost_return_on_error_no_add!(&cost, raw_decode(value)); + let element = cost_return_on_error_no_add!(&cost, raw_decode(value, grove_version)); let key_vec = key.to_vec(); self.raw_iter.next().unwrap_add_cost(&mut cost); Some((key_vec, element)) diff --git a/grovedb/src/element/serialize.rs b/grovedb/src/element/serialize.rs index ab798054..395fea8d 100644 --- a/grovedb/src/element/serialize.rs +++ b/grovedb/src/element/serialize.rs @@ -1,35 +1,8 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Serialize //! Implements serialization functions in Element use bincode::config; +use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; #[cfg(any(feature = "full", feature = "verify"))] use crate::{Element, Error}; @@ -37,23 +10,34 @@ use crate::{Element, Error}; impl Element { #[cfg(feature = "full")] /// Serializes self. Returns vector of u8s. - pub fn serialize(&self) -> Result, Error> { - let config = bincode::config::standard() - .with_big_endian() - .with_no_limit(); + pub fn serialize(&self, grove_version: &GroveVersion) -> Result, Error> { + check_grovedb_v0!( + "Element::serialize", + grove_version.grovedb_versions.element.serialize + ); + let config = config::standard().with_big_endian().with_no_limit(); bincode::encode_to_vec(self, config) .map_err(|e| Error::CorruptedData(format!("unable to serialize element {}", e))) } #[cfg(feature = "full")] /// Serializes self. Returns usize. - pub fn serialized_size(&self) -> Result { - self.serialize().map(|serialized| serialized.len()) + pub fn serialized_size(&self, grove_version: &GroveVersion) -> Result { + check_grovedb_v0!( + "Element::serialized_size", + grove_version.grovedb_versions.element.serialized_size + ); + self.serialize(grove_version) + .map(|serialized| serialized.len()) } #[cfg(any(feature = "full", feature = "verify"))] /// Deserializes given bytes and sets as self - pub fn deserialize(bytes: &[u8]) -> Result { + pub fn deserialize(bytes: &[u8], grove_version: &GroveVersion) -> Result { + check_grovedb_v0!( + "Element::deserialize", + grove_version.grovedb_versions.element.deserialize + ); let config = config::standard().with_big_endian().with_no_limit(); Ok(bincode::decode_from_slice(bytes, config) .map_err(|e| Error::CorruptedData(format!("unable to deserialize element {}", e)))? @@ -71,32 +55,53 @@ mod tests { #[test] fn test_serialization() { + let grove_version = GroveVersion::latest(); let empty_tree = Element::empty_tree(); - let serialized = empty_tree.serialize().expect("expected to serialize"); + let serialized = empty_tree + .serialize(grove_version) + .expect("expected to serialize"); assert_eq!(serialized.len(), 3); - assert_eq!(serialized.len(), empty_tree.serialized_size().unwrap()); + assert_eq!( + serialized.len(), + empty_tree.serialized_size(grove_version).unwrap() + ); // The tree is fixed length 32 bytes, so it's enum 2 then 32 bytes of zeroes assert_eq!(hex::encode(serialized), "020000"); let empty_tree = Element::new_tree_with_flags(None, Some(vec![5])); - let serialized = empty_tree.serialize().expect("expected to serialize"); + let serialized = empty_tree + .serialize(grove_version) + .expect("expected to serialize"); assert_eq!(serialized.len(), 5); - assert_eq!(serialized.len(), empty_tree.serialized_size().unwrap()); + assert_eq!( + serialized.len(), + empty_tree.serialized_size(grove_version).unwrap() + ); assert_eq!(hex::encode(serialized), "0200010105"); let item = Element::new_item(hex::decode("abcdef").expect("expected to decode")); - let serialized = item.serialize().expect("expected to serialize"); + let serialized = item + .serialize(grove_version) + .expect("expected to serialize"); assert_eq!(serialized.len(), 6); - assert_eq!(serialized.len(), item.serialized_size().unwrap()); + assert_eq!( + serialized.len(), + item.serialized_size(grove_version).unwrap() + ); // The item is variable length 3 bytes, so it's enum 2 then 32 bytes of zeroes assert_eq!(hex::encode(serialized), "0003abcdef00"); assert_eq!(hex::encode(5.encode_var_vec()), "0a"); let item = Element::new_sum_item(5); - let serialized = item.serialize().expect("expected to serialize"); + let serialized = item + .serialize(grove_version) + .expect("expected to serialize"); assert_eq!(serialized.len(), 3); - assert_eq!(serialized.len(), item.serialized_size().unwrap()); + assert_eq!( + serialized.len(), + item.serialized_size(grove_version).unwrap() + ); // The item is variable length 3 bytes, so it's enum 2 then 32 bytes of zeroes assert_eq!(hex::encode(serialized), "030a00"); @@ -104,9 +109,14 @@ mod tests { hex::decode("abcdef").expect("expected to decode"), Some(vec![1]), ); - let serialized = item.serialize().expect("expected to serialize"); + let serialized = item + .serialize(grove_version) + .expect("expected to serialize"); assert_eq!(serialized.len(), 8); - assert_eq!(serialized.len(), item.serialized_size().unwrap()); + assert_eq!( + serialized.len(), + item.serialized_size(grove_version).unwrap() + ); assert_eq!(hex::encode(serialized), "0003abcdef010101"); let reference = Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ @@ -114,9 +124,14 @@ mod tests { hex::decode("abcd").expect("expected to decode"), vec![5], ])); - let serialized = reference.serialize().expect("expected to serialize"); + let serialized = reference + .serialize(grove_version) + .expect("expected to serialize"); assert_eq!(serialized.len(), 12); - assert_eq!(serialized.len(), reference.serialized_size().unwrap()); + assert_eq!( + serialized.len(), + reference.serialized_size(grove_version).unwrap() + ); // The item is variable length 2 bytes, so it's enum 1 then 1 byte for length, // then 1 byte for 0, then 1 byte 02 for abcd, then 1 byte '1' for 05 assert_eq!(hex::encode(serialized), "010003010002abcd01050000"); @@ -129,9 +144,14 @@ mod tests { ]), Some(vec![1, 2, 3]), ); - let serialized = reference.serialize().expect("expected to serialize"); + let serialized = reference + .serialize(grove_version) + .expect("expected to serialize"); assert_eq!(serialized.len(), 16); - assert_eq!(serialized.len(), reference.serialized_size().unwrap()); + assert_eq!( + serialized.len(), + reference.serialized_size(grove_version).unwrap() + ); assert_eq!(hex::encode(serialized), "010003010002abcd0105000103010203"); } } diff --git a/grovedb/src/error.rs b/grovedb/src/error.rs index c430c5ae..0f6cd5d1 100644 --- a/grovedb/src/error.rs +++ b/grovedb/src/error.rs @@ -147,6 +147,11 @@ pub enum Error { #[error("merk error: {0}")] /// Merk error MerkError(grovedb_merk::error::Error), + + // Version errors + #[error(transparent)] + /// Version error + VersionError(grovedb_version::error::GroveVersionError), } impl From for Error { @@ -160,3 +165,9 @@ impl From for Error { Error::MerkError(value) } } + +impl From for Error { + fn from(value: grovedb_version::error::GroveVersionError) -> Self { + Error::VersionError(value) + } +} diff --git a/grovedb/src/estimated_costs/average_case_costs.rs b/grovedb/src/estimated_costs/average_case_costs.rs index 8d803daf..32d5a315 100644 --- a/grovedb/src/estimated_costs/average_case_costs.rs +++ b/grovedb/src/estimated_costs/average_case_costs.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Average case costs //! Implements average case cost functions in GroveDb @@ -44,6 +16,9 @@ use grovedb_merk::{ HASH_LENGTH, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; +use grovedb_version::{ + check_grovedb_v0, check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; use integer_encoding::VarInt; use crate::{ @@ -59,7 +34,17 @@ impl GroveDb { path: &KeyInfoPath, merk_should_be_empty: bool, is_sum_tree: bool, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_average_case_get_merk_at_path", + grove_version + .grovedb_versions + .operations + .average_case + .add_average_case_get_merk_at_path + ); + cost.seek_count += 1; // If the merk is not empty we load the tree if !merk_should_be_empty { @@ -76,6 +61,8 @@ impl GroveDb { } } *cost += S::get_storage_context_cost(path.as_vec()); + + Ok(()) } /// Add average case for insertion into merk @@ -84,7 +71,17 @@ impl GroveDb { estimated_layer_information: &EstimatedLayerInformation, _is_sum_tree: bool, propagate: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "average_case_merk_replace_tree", + grove_version + .grovedb_versions + .operations + .average_case + .average_case_merk_replace_tree + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let flags_size = cost_return_on_error_no_add!( @@ -124,7 +121,17 @@ impl GroveDb { is_sum_tree: bool, in_tree_using_sums: bool, propagate_if_input: Option<&EstimatedLayerInformation>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "average_case_merk_insert_tree", + grove_version + .grovedb_versions + .operations + .average_case + .average_case_merk_insert_tree + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let flags_len = flags.as_ref().map_or(0, |flags| { @@ -152,7 +159,17 @@ impl GroveDb { is_sum_tree: bool, estimated_layer_information: &EstimatedLayerInformation, propagate: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "average_case_merk_delete_tree", + grove_version + .grovedb_versions + .operations + .average_case + .average_case_merk_delete_tree + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let flags_size = cost_return_on_error_no_add!( @@ -188,7 +205,17 @@ impl GroveDb { value: &Element, in_tree_using_sums: bool, propagate_for_level: Option<&EstimatedLayerInformation>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "average_case_merk_insert_element", + grove_version + .grovedb_versions + .operations + .average_case + .average_case_merk_insert_element + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; match value { @@ -208,7 +235,7 @@ impl GroveDb { _ => add_cost_case_merk_insert( &mut cost, key_len, - cost_return_on_error_no_add!(&cost, value.serialized_size()) as u32, + cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, in_tree_using_sums, ), }; @@ -228,7 +255,17 @@ impl GroveDb { value: &Element, in_tree_using_sums: bool, propagate_for_level: Option<&EstimatedLayerInformation>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "average_case_merk_replace_element", + grove_version + .grovedb_versions + .operations + .average_case + .average_case_merk_replace_element + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; match value { @@ -259,7 +296,7 @@ impl GroveDb { let sum_item_cost_size = if value.is_sum_item() { SUM_ITEM_COST_SIZE } else { - cost_return_on_error_no_add!(&cost, value.serialized_size()) as u32 + cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32 }; let value_len = sum_item_cost_size + flags_len; add_cost_case_merk_replace_same_size( @@ -272,7 +309,7 @@ impl GroveDb { _ => add_cost_case_merk_replace_same_size( &mut cost, key_len, - cost_return_on_error_no_add!(&cost, value.serialized_size()) as u32, + cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, in_tree_using_sums, ), }; @@ -293,7 +330,17 @@ impl GroveDb { change_in_bytes: i32, in_tree_using_sums: bool, propagate_for_level: Option<&EstimatedLayerInformation>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "average_case_merk_patch_element", + grove_version + .grovedb_versions + .operations + .average_case + .average_case_merk_patch_element + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; match value { @@ -304,7 +351,8 @@ impl GroveDb { }); // Items need to be always the same serialized size for this to work let item_cost_size = - cost_return_on_error_no_add!(&cost, value.serialized_size()) as u32; + cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) + as u32; let value_len = item_cost_size + flags_len; add_cost_case_merk_patch( &mut cost, @@ -332,7 +380,17 @@ impl GroveDb { key: &KeyInfo, estimated_layer_information: &EstimatedLayerInformation, propagate: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "average_case_merk_delete_element", + grove_version + .grovedb_versions + .operations + .average_case + .average_case_merk_delete_element + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let value_size = cost_return_on_error_no_add!( @@ -359,7 +417,17 @@ impl GroveDb { key: &KeyInfo, estimated_element_size: u32, in_parent_tree_using_sums: bool, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_average_case_has_raw_cost", + grove_version + .grovedb_versions + .operations + .average_case + .add_average_case_has_raw_cost + ); + let value_size = TreeNode::average_case_encoded_tree_size( key.max_length() as u32, estimated_element_size, @@ -368,6 +436,7 @@ impl GroveDb { cost.seek_count += 1; cost.storage_loaded_bytes += value_size; *cost += S::get_storage_context_cost(path.as_vec()); + Ok(()) } /// Adds the average case of checking to see if a tree exists @@ -378,7 +447,17 @@ impl GroveDb { estimated_flags_size: u32, is_sum_tree: bool, in_parent_tree_using_sums: bool, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_average_case_has_raw_tree_cost", + grove_version + .grovedb_versions + .operations + .average_case + .add_average_case_has_raw_tree_cost + ); + let estimated_element_size = if is_sum_tree { SUM_TREE_COST_SIZE + estimated_flags_size } else { @@ -390,7 +469,8 @@ impl GroveDb { key, estimated_element_size, in_parent_tree_using_sums, - ); + grove_version, + ) } /// Add average case to get raw cost into merk @@ -400,14 +480,25 @@ impl GroveDb { key: &KeyInfo, estimated_element_size: u32, in_parent_tree_using_sums: bool, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_average_case_get_raw_cost", + grove_version + .grovedb_versions + .operations + .average_case + .add_average_case_get_raw_cost + ); + cost.seek_count += 1; add_average_case_get_merk_node( cost, key.max_length() as u32, estimated_element_size, in_parent_tree_using_sums, - ); + ) + .map_err(Error::MerkError) } /// adds the average cost of getting a tree @@ -418,7 +509,17 @@ impl GroveDb { estimated_flags_size: u32, is_sum_tree: bool, in_parent_tree_using_sums: bool, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_average_case_get_raw_tree_cost", + grove_version + .grovedb_versions + .operations + .average_case + .add_average_case_get_raw_tree_cost + ); + let estimated_element_size = if is_sum_tree { SUM_TREE_COST_SIZE + estimated_flags_size } else { @@ -430,7 +531,8 @@ impl GroveDb { key.max_length() as u32, estimated_element_size, in_parent_tree_using_sums, - ); + ) + .map_err(Error::MerkError) } /// adds the average cost of getting an element knowing there can be @@ -442,7 +544,17 @@ impl GroveDb { in_parent_tree_using_sums: bool, estimated_element_size: u32, estimated_references_sizes: Vec, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_average_case_get_cost", + grove_version + .grovedb_versions + .operations + .average_case + .add_average_case_get_cost + ); + // todo: verify let value_size: u32 = TreeNode::average_case_encoded_tree_size( key.max_length() as u32, @@ -452,6 +564,7 @@ impl GroveDb { cost.seek_count += 1 + estimated_references_sizes.len() as u16; cost.storage_loaded_bytes += value_size + estimated_references_sizes.iter().sum::(); *cost += S::get_storage_context_cost(path.as_vec()); + Ok(()) } } @@ -467,6 +580,7 @@ mod test { use grovedb_storage::{ rocksdb_storage::RocksDbStorage, worst_case_costs::WorstKeyLength, Storage, StorageBatch, }; + use grovedb_version::version::GroveVersion; use tempfile::TempDir; use crate::{ @@ -477,6 +591,7 @@ mod test { #[test] fn test_get_merk_node_average_case() { + let grove_version = GroveVersion::latest(); // Open a merk and insert 10 elements. let tmp_dir = TempDir::new().expect("cannot open tempdir"); let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) @@ -488,12 +603,13 @@ mod test { .get_storage_context(EMPTY_PATH, Some(&batch)) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); let merk_batch = make_batch_seq(1..10); - merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None) + merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None, grove_version) .unwrap() .unwrap(); @@ -507,7 +623,8 @@ mod test { let merk = Merk::open_base( storage.get_storage_context(EMPTY_PATH, None).unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); @@ -520,7 +637,8 @@ mod test { let node_result = merk.get( &8_u64.to_be_bytes(), true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ); // By tweaking the max element size, we can adapt the average case function to @@ -528,33 +646,63 @@ mod test { // (this will be the max_element_size) let mut cost = OperationCost::default(); let key = KnownKey(8_u64.to_be_bytes().to_vec()); - add_average_case_get_merk_node(&mut cost, key.max_length() as u32, 60, false); + add_average_case_get_merk_node(&mut cost, key.max_length() as u32, 60, false) + .expect("expected to add cost"); assert_eq!(cost, node_result.cost); } #[test] fn test_has_raw_average_case() { + let grove_version = GroveVersion::latest(); let tmp_dir = TempDir::new().unwrap(); let db = GroveDb::open(tmp_dir.path()).unwrap(); // insert empty tree to start - db.insert(EMPTY_PATH, TEST_LEAF, Element::empty_tree(), None, None) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + TEST_LEAF, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); // In this tree, we insert 3 items with keys [1, 2, 3] // after tree rotation, 2 will be at the top hence would have both left and // right links this will serve as our average case candidate. let elem = Element::new_item(b"value".to_vec()); - db.insert([TEST_LEAF].as_ref(), &[1], elem.clone(), None, None) - .unwrap() - .expect("expected insert"); - db.insert([TEST_LEAF].as_ref(), &[2], elem.clone(), None, None) - .unwrap() - .expect("expected insert"); - db.insert([TEST_LEAF].as_ref(), &[3], elem.clone(), None, None) - .unwrap() - .expect("expected insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[1], + elem.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[2], + elem.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[3], + elem.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected insert"); let path = KeyInfoPath::from_vec(vec![KnownKey(TEST_LEAF.to_vec())]); let key = KnownKey(vec![1]); @@ -563,11 +711,13 @@ mod test { &mut average_case_has_raw_cost, &path, &key, - elem.serialized_size().expect("expected size") as u32, + elem.serialized_size(grove_version).expect("expected size") as u32, false, - ); + GroveVersion::latest(), + ) + .expect("expected to add cost"); - let actual_cost = db.has_raw([TEST_LEAF].as_ref(), &[2], None); + let actual_cost = db.has_raw([TEST_LEAF].as_ref(), &[2], None, GroveVersion::latest()); assert_eq!(average_case_has_raw_cost, actual_cost.cost); } diff --git a/grovedb/src/estimated_costs/worst_case_costs.rs b/grovedb/src/estimated_costs/worst_case_costs.rs index 106c2bb6..2daf18b6 100644 --- a/grovedb/src/estimated_costs/worst_case_costs.rs +++ b/grovedb/src/estimated_costs/worst_case_costs.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Worst case costs //! Implements worst case cost functions in GroveDb @@ -46,6 +18,9 @@ use grovedb_merk::{ HASH_LENGTH, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; +use grovedb_version::{ + check_grovedb_v0, check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; use integer_encoding::VarInt; use crate::{ @@ -62,7 +37,17 @@ impl GroveDb { cost: &mut OperationCost, path: &KeyInfoPath, is_sum_tree: bool, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_worst_case_get_merk_at_path", + grove_version + .grovedb_versions + .operations + .worst_case + .add_worst_case_get_merk_at_path + ); + cost.seek_count += 2; match path.last() { None => {} @@ -75,6 +60,7 @@ impl GroveDb { } } *cost += S::get_storage_context_cost(path.as_vec()); + Ok(()) } /// Add worst case for insertion into merk @@ -84,7 +70,17 @@ impl GroveDb { is_in_parent_sum_tree: bool, worst_case_layer_information: &WorstCaseLayerInformation, propagate: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "worst_case_merk_replace_tree", + grove_version + .grovedb_versions + .operations + .worst_case + .worst_case_merk_replace_tree + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let tree_cost = if is_sum_tree { @@ -115,7 +111,17 @@ impl GroveDb { is_sum_tree: bool, is_in_parent_sum_tree: bool, propagate_if_input: Option<&WorstCaseLayerInformation>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "worst_case_merk_insert_tree", + grove_version + .grovedb_versions + .operations + .worst_case + .worst_case_merk_insert_tree + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let flags_len = flags.as_ref().map_or(0, |flags| { @@ -143,7 +149,17 @@ impl GroveDb { is_sum_tree: bool, worst_case_layer_information: &WorstCaseLayerInformation, propagate: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "worst_case_merk_delete_tree", + grove_version + .grovedb_versions + .operations + .worst_case + .worst_case_merk_delete_tree + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let tree_cost = if is_sum_tree { @@ -170,7 +186,17 @@ impl GroveDb { value: &Element, in_parent_tree_using_sums: bool, propagate_for_level: Option<&WorstCaseLayerInformation>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "worst_case_merk_insert_element", + grove_version + .grovedb_versions + .operations + .worst_case + .worst_case_merk_insert_element + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; match value { @@ -195,7 +221,7 @@ impl GroveDb { _ => add_cost_case_merk_insert( &mut cost, key_len, - cost_return_on_error_no_add!(&cost, value.serialized_size()) as u32, + cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, in_parent_tree_using_sums, ), }; @@ -215,7 +241,17 @@ impl GroveDb { value: &Element, in_parent_tree_using_sums: bool, propagate_for_level: Option<&WorstCaseLayerInformation>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "worst_case_merk_replace_element", + grove_version + .grovedb_versions + .operations + .worst_case + .worst_case_merk_replace_element + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; match value { @@ -253,7 +289,7 @@ impl GroveDb { _ => add_cost_case_merk_replace( &mut cost, key_len, - cost_return_on_error_no_add!(&cost, value.serialized_size()) as u32, + cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, in_parent_tree_using_sums, ), }; @@ -274,7 +310,17 @@ impl GroveDb { change_in_bytes: i32, in_tree_using_sums: bool, propagate_for_level: Option<&WorstCaseLayerInformation>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "worst_case_merk_patch_element", + grove_version + .grovedb_versions + .operations + .worst_case + .worst_case_merk_patch_element + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; match value { @@ -285,7 +331,8 @@ impl GroveDb { }); // Items need to be always the same serialized size for this to work let sum_item_cost_size = - cost_return_on_error_no_add!(&cost, value.serialized_size()) as u32; + cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) + as u32; let value_len = sum_item_cost_size + flags_len; add_cost_case_merk_patch( &mut cost, @@ -313,7 +360,17 @@ impl GroveDb { key: &KeyInfo, worst_case_layer_information: &WorstCaseLayerInformation, propagate: bool, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "worst_case_merk_delete_element", + grove_version + .grovedb_versions + .operations + .worst_case + .worst_case_merk_delete_element + ); + let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; add_worst_case_merk_delete(&mut cost, key_len, MERK_BIGGEST_VALUE_SIZE); @@ -333,7 +390,17 @@ impl GroveDb { key: &KeyInfo, max_element_size: u32, in_parent_tree_using_sums: bool, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_worst_case_has_raw_cost", + grove_version + .grovedb_versions + .operations + .worst_case + .add_worst_case_has_raw_cost + ); + let value_size = TreeNode::worst_case_encoded_tree_size( key.max_length() as u32, max_element_size, @@ -342,6 +409,7 @@ impl GroveDb { cost.seek_count += 1; cost.storage_loaded_bytes += value_size; *cost += S::get_storage_context_cost(path.as_vec()); + Ok(()) } /// Add worst case cost for get raw tree into merk @@ -351,7 +419,17 @@ impl GroveDb { key: &KeyInfo, is_sum_tree: bool, in_parent_tree_using_sums: bool, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_worst_case_get_raw_tree_cost", + grove_version + .grovedb_versions + .operations + .worst_case + .add_worst_case_get_raw_tree_cost + ); + cost.seek_count += 1; let tree_cost_size = if is_sum_tree { SUM_TREE_COST_SIZE @@ -363,7 +441,8 @@ impl GroveDb { key.max_length() as u32, tree_cost_size, in_parent_tree_using_sums, - ); + ) + .map_err(Error::MerkError) } /// Add worst case cost for get raw into merk @@ -373,14 +452,25 @@ impl GroveDb { key: &KeyInfo, max_element_size: u32, in_parent_tree_using_sums: bool, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_worst_case_get_raw_cost", + grove_version + .grovedb_versions + .operations + .worst_case + .add_worst_case_get_raw_cost + ); + cost.seek_count += 1; add_worst_case_get_merk_node( cost, key.max_length() as u32, max_element_size, in_parent_tree_using_sums, - ); + ) + .map_err(Error::MerkError) } /// Add worst case cost for get into merk @@ -391,7 +481,17 @@ impl GroveDb { max_element_size: u32, in_parent_tree_using_sums: bool, max_references_sizes: Vec, - ) { + grove_version: &GroveVersion, + ) -> Result<(), Error> { + check_grovedb_v0!( + "add_worst_case_get_cost", + grove_version + .grovedb_versions + .operations + .worst_case + .add_worst_case_get_cost + ); + // todo: verify let value_size: u32 = TreeNode::worst_case_encoded_tree_size( key.max_length() as u32, @@ -401,6 +501,7 @@ impl GroveDb { cost.seek_count += 1 + max_references_sizes.len() as u16; cost.storage_loaded_bytes += value_size + max_references_sizes.iter().sum::(); *cost += S::get_storage_context_cost(path.as_vec()); + Ok(()) } } @@ -419,6 +520,7 @@ mod test { worst_case_costs::WorstKeyLength, Storage, StorageBatch, }; + use grovedb_version::version::GroveVersion; use tempfile::TempDir; use crate::{ @@ -429,13 +531,14 @@ mod test { #[test] fn test_get_merk_node_worst_case() { + let grove_version = GroveVersion::latest(); // Open a merk and insert 10 elements. let storage = TempStorage::new(); let batch = StorageBatch::new(); - let mut merk = empty_path_merk(&*storage, &batch); + let mut merk = empty_path_merk(&*storage, &batch, grove_version); let merk_batch = make_batch_seq(1..10); - merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None) + merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None, grove_version) .unwrap() .unwrap(); @@ -446,7 +549,7 @@ mod test { .unwrap(); // Reopen merk: this time, only root node is loaded to memory - let merk = empty_path_merk_read_only(&*storage); + let merk = empty_path_merk_read_only(&*storage, grove_version); // To simulate worst case, we need to pick a node that: // 1. Is not in memory @@ -456,7 +559,8 @@ mod test { let node_result = merk.get( &8_u64.to_be_bytes(), true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ); // By tweaking the max element size, we can adapt the worst case function to @@ -464,33 +568,63 @@ mod test { // (this will be the max_element_size) let mut cost = OperationCost::default(); let key = KnownKey(8_u64.to_be_bytes().to_vec()); - add_worst_case_get_merk_node(&mut cost, key.max_length() as u32, 60, false); + add_worst_case_get_merk_node(&mut cost, key.max_length() as u32, 60, false) + .expect("no issue with version"); assert_eq!(cost, node_result.cost); } #[test] fn test_has_raw_worst_case() { + let grove_version = GroveVersion::latest(); let tmp_dir = TempDir::new().unwrap(); let db = GroveDb::open(tmp_dir.path()).unwrap(); // insert empty tree to start - db.insert(EMPTY_PATH, TEST_LEAF, Element::empty_tree(), None, None) - .unwrap() - .expect("successful root tree leaf insert"); + db.insert( + EMPTY_PATH, + TEST_LEAF, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); // In this tree, we insert 3 items with keys [1, 2, 3] // after tree rotation, 2 will be at the top hence would have both left and // right links this will serve as our worst case candidate. let elem = Element::new_item(b"value".to_vec()); - db.insert([TEST_LEAF].as_ref(), &[1], elem.clone(), None, None) - .unwrap() - .expect("expected insert"); - db.insert([TEST_LEAF].as_ref(), &[2], elem.clone(), None, None) - .unwrap() - .expect("expected insert"); - db.insert([TEST_LEAF].as_ref(), &[3], elem.clone(), None, None) - .unwrap() - .expect("expected insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[1], + elem.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[2], + elem.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected insert"); + db.insert( + [TEST_LEAF].as_ref(), + &[3], + elem.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("expected insert"); let path = KeyInfoPath::from_vec(vec![KnownKey(TEST_LEAF.to_vec())]); let key = KnownKey(vec![1]); @@ -499,11 +633,13 @@ mod test { &mut worst_case_has_raw_cost, &path, &key, - elem.serialized_size().expect("expected size") as u32, + elem.serialized_size(grove_version).expect("expected size") as u32, false, - ); + GroveVersion::latest(), + ) + .expect("expected to add cost"); - let actual_cost = db.has_raw([TEST_LEAF].as_ref(), &[2], None); + let actual_cost = db.has_raw([TEST_LEAF].as_ref(), &[2], None, GroveVersion::latest()); assert_eq!(worst_case_has_raw_cost, actual_cost.cost); } diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index 206ace71..012785ca 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! GroveDB is a database that enables cryptographic proofs for complex queries. //! //! # Examples @@ -48,8 +20,11 @@ //! Insert, Update, Delete and Prove elements. //! ``` //! use grovedb::{Element, GroveDb}; +//! use grovedb_version::version::GroveVersion; //! use tempfile::TempDir; //! +//! let grove_version = GroveVersion::latest(); +//! //! // Specify the path where you want to set up the GroveDB instance //! let tmp_dir = TempDir::new().unwrap(); //! let path = tmp_dir.path(); @@ -60,9 +35,16 @@ //! let root_path: &[&[u8]] = &[]; //! //! // Insert new tree to root -//! db.insert(root_path, b"tree1", Element::empty_tree(), None, None) -//! .unwrap() -//! .expect("successful tree insert"); +//! db.insert( +//! root_path, +//! b"tree1", +//! Element::empty_tree(), +//! None, +//! None, +//! grove_version, +//! ) +//! .unwrap() +//! .expect("successful tree insert"); //! //! // Insert key-value 1 into tree1 //! // key - hello, value - world @@ -72,6 +54,7 @@ //! Element::new_item(b"world".to_vec()), //! None, //! None, +//! grove_version, //! ) //! .unwrap() //! .expect("successful key1 insert"); @@ -84,19 +67,20 @@ //! Element::new_item(b"rocks".to_vec()), //! None, //! None, +//! grove_version, //! ) //! .unwrap() //! .expect("successful key2 insert"); //! //! // Retrieve inserted elements //! let elem = db -//! .get(&[b"tree1"], b"hello", None) +//! .get(&[b"tree1"], b"hello", None, grove_version) //! .unwrap() //! .expect("successful get"); //! assert_eq!(elem, Element::new_item(b"world".to_vec())); //! //! let elem = db -//! .get(&[b"tree1"], b"grovedb", None) +//! .get(&[b"tree1"], b"grovedb", None, grove_version) //! .unwrap() //! .expect("successful get"); //! assert_eq!(elem, Element::new_item(b"rocks".to_vec())); @@ -109,27 +93,28 @@ //! Element::new_item(b"WORLD".to_vec()), //! None, //! None, +//! grove_version, //! ) //! .unwrap() //! .expect("successful update"); //! //! // Retrieve updated element //! let elem = db -//! .get(&[b"tree1"], b"hello", None) +//! .get(&[b"tree1"], b"hello", None, grove_version) //! .unwrap() //! .expect("successful get"); //! assert_eq!(elem, Element::new_item(b"WORLD".to_vec())); //! //! // Deletion -//! db.delete(&[b"tree1"], b"hello", None, None) +//! db.delete(&[b"tree1"], b"hello", None, None, grove_version) //! .unwrap() //! .expect("successful delete"); -//! let elem_result = db.get(&[b"tree1"], b"hello", None).unwrap(); +//! let elem_result = db.get(&[b"tree1"], b"hello", None, grove_version).unwrap(); //! assert_eq!(elem_result.is_err(), true); //! //! // State Root //! // Get the GroveDB root hash -//! let root_hash = db.root_hash(None).unwrap().unwrap(); +//! let root_hash = db.root_hash(None, grove_version).unwrap().unwrap(); //! assert_eq!( //! hex::encode(root_hash), //! "3884be3d197ac49981e54b21ea423351fc4ccdb770aaf7cf40f5e65dc3e2e1aa" @@ -214,6 +199,7 @@ use grovedb_storage::{ }; #[cfg(feature = "full")] use grovedb_storage::{Storage, StorageContext}; +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] use grovedb_visualize::DebugByteVectors; #[cfg(any(feature = "full", feature = "verify"))] @@ -279,6 +265,7 @@ impl GroveDb { path: SubtreePath<'b, B>, tx: &'db Transaction, batch: Option<&'db StorageBatch>, + grove_version: &GroveVersion, ) -> CostResult>, Error> where B: AsRef<[u8]> + 'b, @@ -296,14 +283,16 @@ impl GroveDb { .unwrap_add_cost(&mut cost); let element = cost_return_on_error!( &mut cost, - Element::get_from_storage(&parent_storage, parent_key).map_err(|e| { - Error::InvalidParentLayerPath(format!( - "could not get key {} for parent {:?} of subtree: {}", - hex::encode(parent_key), - DebugByteVectors(parent_path.to_vec()), - e - )) - }) + Element::get_from_storage(&parent_storage, parent_key, grove_version).map_err( + |e| { + Error::InvalidParentLayerPath(format!( + "could not get key {} for parent {:?} of subtree: {}", + hex::encode(parent_key), + DebugByteVectors(parent_path.to_vec()), + e + )) + } + ) ); let is_sum_tree = element.is_sum_tree(); if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { @@ -312,6 +301,7 @@ impl GroveDb { root_key, is_sum_tree, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|_| { Error::CorruptedData("cannot open a subtree with given root key".to_owned()) @@ -328,6 +318,7 @@ impl GroveDb { storage, false, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) .add_cost(cost) @@ -340,6 +331,7 @@ impl GroveDb { &'db self, path: SubtreePath<'b, B>, tx: &'tx Transaction<'db>, + grove_version: &GroveVersion, ) -> Result>, Error> where B: AsRef<[u8]> + 'b, @@ -355,7 +347,7 @@ impl GroveDb { .db .get_immediate_storage_context(parent_path.clone(), tx) .unwrap_add_cost(&mut cost); - let element = Element::get_from_storage(&parent_storage, parent_key) + let element = Element::get_from_storage(&parent_storage, parent_key, grove_version) .map_err(|e| { Error::InvalidParentLayerPath(format!( "could not get key {} for parent {:?} of subtree: {}", @@ -372,6 +364,7 @@ impl GroveDb { root_key, is_sum_tree, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|_| { Error::CorruptedData("cannot open a subtree with given root key".to_owned()) @@ -386,7 +379,8 @@ impl GroveDb { Merk::open_base( storage, false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) .unwrap() @@ -398,6 +392,7 @@ impl GroveDb { &'db self, path: SubtreePath<'b, B>, batch: Option<&'db StorageBatch>, + grove_version: &GroveVersion, ) -> CostResult, Error> where B: AsRef<[u8]> + 'b, @@ -416,14 +411,16 @@ impl GroveDb { .unwrap_add_cost(&mut cost); let element = cost_return_on_error!( &mut cost, - Element::get_from_storage(&parent_storage, parent_key).map_err(|e| { - Error::InvalidParentLayerPath(format!( - "could not get key {} for parent {:?} of subtree: {}", - hex::encode(parent_key), - DebugByteVectors(parent_path.to_vec()), - e - )) - }) + Element::get_from_storage(&parent_storage, parent_key, grove_version).map_err( + |e| { + Error::InvalidParentLayerPath(format!( + "could not get key {} for parent {:?} of subtree: {}", + hex::encode(parent_key), + DebugByteVectors(parent_path.to_vec()), + e + )) + } + ) ); let is_sum_tree = element.is_sum_tree(); if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { @@ -432,6 +429,7 @@ impl GroveDb { root_key, is_sum_tree, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|_| { Error::CorruptedData("cannot open a subtree with given root key".to_owned()) @@ -448,6 +446,7 @@ impl GroveDb { storage, false, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) .add_cost(cost) @@ -461,28 +460,52 @@ impl GroveDb { /// Returns root key of GroveDb. /// Will be `None` if GroveDb is empty. - pub fn root_key(&self, transaction: TransactionArg) -> CostResult, Error> { + pub fn root_key( + &self, + transaction: TransactionArg, + grove_version: &GroveVersion, + ) -> CostResult, Error> { let mut cost = OperationCost { ..Default::default() }; - root_merk_optional_tx!(&mut cost, self.db, None, transaction, subtree, { - let root_key = subtree.root_key().unwrap(); - Ok(root_key).wrap_with_cost(cost) - }) + root_merk_optional_tx!( + &mut cost, + self.db, + None, + transaction, + subtree, + grove_version, + { + let root_key = subtree.root_key().unwrap(); + Ok(root_key).wrap_with_cost(cost) + } + ) } /// Returns root hash of GroveDb. /// Will be `None` if GroveDb is empty. - pub fn root_hash(&self, transaction: TransactionArg) -> CostResult { + pub fn root_hash( + &self, + transaction: TransactionArg, + grove_version: &GroveVersion, + ) -> CostResult { let mut cost = OperationCost { ..Default::default() }; - root_merk_optional_tx!(&mut cost, self.db, None, transaction, subtree, { - let root_hash = subtree.root_hash().unwrap_add_cost(&mut cost); - Ok(root_hash).wrap_with_cost(cost) - }) + root_merk_optional_tx!( + &mut cost, + self.db, + None, + transaction, + subtree, + grove_version, + { + let root_hash = subtree.root_hash().unwrap_add_cost(&mut cost); + Ok(root_hash).wrap_with_cost(cost) + } + ) } /// Method to propagate updated subtree key changes one level up inside a @@ -493,6 +516,7 @@ impl GroveDb { mut merk_cache: HashMap, Merk>, path: &SubtreePath<'b, B>, transaction: &Transaction, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); @@ -512,7 +536,8 @@ impl GroveDb { storage_batch, parent_path.clone(), transaction, - false + false, + grove_version, ) ); let (root_hash, root_key, sum) = cost_return_on_error!( @@ -526,7 +551,8 @@ impl GroveDb { parent_key, root_key, root_hash, - sum + sum, + grove_version, ) ); child_tree = parent_tree; @@ -543,6 +569,7 @@ impl GroveDb { path: SubtreePath<'b, B>, transaction: &Transaction, batch: &StorageBatch, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); @@ -560,7 +587,12 @@ impl GroveDb { while let Some((parent_path, parent_key)) = current_path.derive_parent() { let mut parent_tree: Merk = cost_return_on_error!( &mut cost, - self.open_transactional_merk_at_path(parent_path.clone(), transaction, Some(batch)) + self.open_transactional_merk_at_path( + parent_path.clone(), + transaction, + Some(batch), + grove_version + ) ); let (root_hash, root_key, sum) = cost_return_on_error!( &mut cost, @@ -573,7 +605,8 @@ impl GroveDb { parent_key, root_key, root_hash, - sum + sum, + grove_version, ) ); child_tree = parent_tree; @@ -588,6 +621,7 @@ impl GroveDb { mut merk_cache: HashMap, Merk>, path: SubtreePath<'b, B>, batch: &StorageBatch, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); @@ -605,7 +639,11 @@ impl GroveDb { while let Some((parent_path, parent_key)) = current_path.derive_parent() { let mut parent_tree: Merk = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(parent_path.clone(), Some(batch)) + self.open_non_transactional_merk_at_path( + parent_path.clone(), + Some(batch), + grove_version + ) ); let (root_hash, root_key, sum) = cost_return_on_error!( &mut cost, @@ -618,7 +656,8 @@ impl GroveDb { parent_key, root_key, root_hash, - sum + sum, + grove_version, ) ); child_tree = parent_tree; @@ -634,20 +673,27 @@ impl GroveDb { maybe_root_key: Option>, root_tree_hash: Hash, sum: Option, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { let key_ref = key.as_ref(); - Self::get_element_from_subtree(parent_tree, key_ref).flat_map_ok(|element| { + Self::get_element_from_subtree(parent_tree, key_ref, grove_version).flat_map_ok(|element| { if let Element::Tree(_, flag) = element { let tree = Element::new_tree_with_flags(maybe_root_key, flag); - tree.insert_subtree(parent_tree, key_ref, root_tree_hash, None) + tree.insert_subtree(parent_tree, key_ref, root_tree_hash, None, grove_version) } else if let Element::SumTree(.., flag) = element { let tree = Element::new_sum_tree_with_flags_and_sum_value( maybe_root_key, sum.unwrap_or_default(), flag, ); - tree.insert_subtree(parent_tree, key.as_ref(), root_tree_hash, None) + tree.insert_subtree( + parent_tree, + key.as_ref(), + root_tree_hash, + None, + grove_version, + ) } else { Err(Error::InvalidPath( "can only propagate on tree items".to_owned(), @@ -670,60 +716,67 @@ impl GroveDb { root_tree_hash: Hash, sum: Option, batch_operations: &mut Vec>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); - Self::get_element_from_subtree(parent_tree, key.as_ref()).flat_map_ok(|element| { - if let Element::Tree(_, flag) = element { - let tree = Element::new_tree_with_flags(maybe_root_key, flag); - let merk_feature_type = cost_return_on_error!( - &mut cost, - tree.get_feature_type(parent_tree.is_sum_tree) - .wrap_with_cost(OperationCost::default()) - ); - tree.insert_subtree_into_batch_operations( - key, - root_tree_hash, - true, - batch_operations, - merk_feature_type, - ) - } else if let Element::SumTree(.., flag) = element { - let tree = Element::new_sum_tree_with_flags_and_sum_value( - maybe_root_key, - sum.unwrap_or_default(), - flag, - ); - let merk_feature_type = cost_return_on_error!( - &mut cost, - tree.get_feature_type(parent_tree.is_sum_tree) - .wrap_with_cost(OperationCost::default()) - ); - tree.insert_subtree_into_batch_operations( - key, - root_tree_hash, - true, - batch_operations, - merk_feature_type, - ) - } else { - Err(Error::InvalidPath( - "can only propagate on tree items".to_owned(), - )) - .wrap_with_cost(Default::default()) - } - }) + Self::get_element_from_subtree(parent_tree, key.as_ref(), grove_version).flat_map_ok( + |element| { + if let Element::Tree(_, flag) = element { + let tree = Element::new_tree_with_flags(maybe_root_key, flag); + let merk_feature_type = cost_return_on_error!( + &mut cost, + tree.get_feature_type(parent_tree.is_sum_tree) + .wrap_with_cost(OperationCost::default()) + ); + tree.insert_subtree_into_batch_operations( + key, + root_tree_hash, + true, + batch_operations, + merk_feature_type, + grove_version, + ) + } else if let Element::SumTree(.., flag) = element { + let tree = Element::new_sum_tree_with_flags_and_sum_value( + maybe_root_key, + sum.unwrap_or_default(), + flag, + ); + let merk_feature_type = cost_return_on_error!( + &mut cost, + tree.get_feature_type(parent_tree.is_sum_tree) + .wrap_with_cost(OperationCost::default()) + ); + tree.insert_subtree_into_batch_operations( + key, + root_tree_hash, + true, + batch_operations, + merk_feature_type, + grove_version, + ) + } else { + Err(Error::InvalidPath( + "can only propagate on tree items".to_owned(), + )) + .wrap_with_cost(Default::default()) + } + }, + ) } /// Get element from subtree. Return CostResult. fn get_element_from_subtree<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( subtree: &Merk, key: K, + grove_version: &GroveVersion, ) -> CostResult { subtree .get( key.as_ref(), true, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|_| { Error::InvalidPath("can't find subtree in parent during propagation".to_owned()) @@ -745,7 +798,7 @@ impl GroveDb { }) .flatten() .map_ok(|element_bytes| { - Element::deserialize(&element_bytes).map_err(|_| { + Element::deserialize(&element_bytes, grove_version).map_err(|_| { Error::CorruptedData( "failed to deserialized parent during propagation".to_owned(), ) @@ -768,11 +821,16 @@ impl GroveDb { /// # use std::convert::TryFrom; /// # use tempfile::TempDir; /// # use grovedb_path::SubtreePath; + /// # use grovedb_version::version::GroveVersion; /// # /// # fn main() -> Result<(), Box> { /// use std::option::Option::None; + /// /// + /// /// const TEST_LEAF: &[u8] = b"test_leaf"; /// + /// let grove_version = GroveVersion::latest(); + /// /// let tmp_dir = TempDir::new().unwrap(); /// let mut db = GroveDb::open(tmp_dir.path())?; /// db.insert( @@ -781,6 +839,7 @@ impl GroveDb { /// Element::empty_tree(), /// None, /// None, + /// grove_version, /// ) /// .unwrap()?; /// @@ -793,22 +852,27 @@ impl GroveDb { /// Element::empty_tree(), /// None, /// Some(&tx), + /// grove_version, /// ) /// .unwrap()?; /// /// // This action exists only inside the transaction for now - /// let result = db.get([TEST_LEAF].as_ref(), subtree_key, None).unwrap(); + /// let result = db + /// .get([TEST_LEAF].as_ref(), subtree_key, None, grove_version) + /// .unwrap(); /// assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); /// /// // To access values inside the transaction, transaction needs to be passed to the `db::get` /// let result_with_transaction = db - /// .get([TEST_LEAF].as_ref(), subtree_key, Some(&tx)) + /// .get([TEST_LEAF].as_ref(), subtree_key, Some(&tx), grove_version) /// .unwrap()?; /// assert_eq!(result_with_transaction, Element::empty_tree()); /// /// // After transaction is committed, the value from it can be accessed normally. - /// db.commit_transaction(tx); - /// let result = db.get([TEST_LEAF].as_ref(), subtree_key, None).unwrap()?; + /// let _ = db.commit_transaction(tx); + /// let result = db + /// .get([TEST_LEAF].as_ref(), subtree_key, None, grove_version) + /// .unwrap()?; /// assert_eq!(result, Element::empty_tree()); /// /// # Ok(()) @@ -834,9 +898,10 @@ impl GroveDb { /// Method to visualize hash mismatch after verification pub fn visualize_verify_grovedb( &self, + grove_version: &GroveVersion, ) -> Result, Error> { Ok(self - .verify_grovedb(None)? + .verify_grovedb(None, grove_version)? .iter() .map(|(path, (root_hash, expected, actual))| { ( @@ -859,22 +924,29 @@ impl GroveDb { pub fn verify_grovedb( &self, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> Result>, (CryptoHash, CryptoHash, CryptoHash)>, Error> { if let Some(transaction) = transaction { let root_merk = self - .open_transactional_merk_at_path(SubtreePath::empty(), transaction, None) + .open_transactional_merk_at_path( + SubtreePath::empty(), + transaction, + None, + grove_version, + ) .unwrap()?; self.verify_merk_and_submerks_in_transaction( root_merk, &SubtreePath::empty(), None, transaction, + grove_version, ) } else { let root_merk = self - .open_non_transactional_merk_at_path(SubtreePath::empty(), None) + .open_non_transactional_merk_at_path(SubtreePath::empty(), None, grove_version) .unwrap()?; - self.verify_merk_and_submerks(root_merk, &SubtreePath::empty(), None) + self.verify_merk_and_submerks(root_merk, &SubtreePath::empty(), None, grove_version) } } @@ -885,6 +957,7 @@ impl GroveDb { merk: Merk, path: &SubtreePath, batch: Option<&'db StorageBatch>, + grove_version: &GroveVersion, ) -> Result>, (CryptoHash, CryptoHash, CryptoHash)>, Error> { let mut all_query = Query::new(); all_query.insert_all(); @@ -894,13 +967,14 @@ impl GroveDb { let mut element_iterator = KVIterator::new(merk.storage.raw_iter(), &all_query).unwrap(); while let Some((key, element_value)) = element_iterator.next_kv().unwrap() { - let element = raw_decode(&element_value)?; + let element = raw_decode(&element_value, grove_version)?; if element.is_any_tree() { let (kv_value, element_value_hash) = merk .get_value_and_value_hash( &key, true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .map_err(MerkError)? @@ -911,7 +985,7 @@ impl GroveDb { let new_path_ref = SubtreePath::from(&new_path); let inner_merk = self - .open_non_transactional_merk_at_path(new_path_ref.clone(), batch) + .open_non_transactional_merk_at_path(new_path_ref.clone(), batch, grove_version) .unwrap()?; let root_hash = inner_merk.root_hash().unwrap(); @@ -924,13 +998,19 @@ impl GroveDb { (root_hash, combined_value_hash, element_value_hash), ); } - issues.extend(self.verify_merk_and_submerks(inner_merk, &new_path_ref, batch)?); + issues.extend(self.verify_merk_and_submerks( + inner_merk, + &new_path_ref, + batch, + grove_version, + )?); } else if element.is_any_item() { let (kv_value, element_value_hash) = merk .get_value_and_value_hash( &key, true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .map_err(MerkError)? @@ -955,6 +1035,7 @@ impl GroveDb { path: &SubtreePath, batch: Option<&'db StorageBatch>, transaction: &Transaction, + grove_version: &GroveVersion, ) -> Result>, (CryptoHash, CryptoHash, CryptoHash)>, Error> { let mut all_query = Query::new(); all_query.insert_all(); @@ -964,13 +1045,14 @@ impl GroveDb { let mut element_iterator = KVIterator::new(merk.storage.raw_iter(), &all_query).unwrap(); while let Some((key, element_value)) = element_iterator.next_kv().unwrap() { - let element = raw_decode(&element_value)?; + let element = raw_decode(&element_value, grove_version)?; if element.is_any_tree() { let (kv_value, element_value_hash) = merk .get_value_and_value_hash( &key, true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .map_err(MerkError)? @@ -981,7 +1063,12 @@ impl GroveDb { let new_path_ref = SubtreePath::from(&new_path); let inner_merk = self - .open_transactional_merk_at_path(new_path_ref.clone(), transaction, batch) + .open_transactional_merk_at_path( + new_path_ref.clone(), + transaction, + batch, + grove_version, + ) .unwrap()?; let root_hash = inner_merk.root_hash().unwrap(); @@ -999,13 +1086,15 @@ impl GroveDb { &new_path_ref, batch, transaction, + grove_version, )?); } else if element.is_any_item() { let (kv_value, element_value_hash) = merk .get_value_and_value_hash( &key, true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .map_err(MerkError)? diff --git a/grovedb/src/operations/auxiliary.rs b/grovedb/src/operations/auxiliary.rs index 6f9fd576..516796ed 100644 --- a/grovedb/src/operations/auxiliary.rs +++ b/grovedb/src/operations/auxiliary.rs @@ -37,6 +37,7 @@ use grovedb_path::SubtreePath; #[cfg(feature = "full")] use grovedb_storage::StorageContext; use grovedb_storage::{Storage, StorageBatch}; +use grovedb_version::version::GroveVersion; use crate::util::storage_context_optional_tx; #[cfg(feature = "full")] @@ -129,6 +130,7 @@ impl GroveDb { &self, path: &SubtreePath, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult>>, Error> { let mut cost = OperationCost::default(); @@ -153,7 +155,7 @@ impl GroveDb { let storage = storage.unwrap_add_cost(&mut cost); let mut raw_iter = Element::iterator(storage.raw_iter()).unwrap_add_cost(&mut cost); while let Some((key, value)) = - cost_return_on_error!(&mut cost, raw_iter.next_element()) + cost_return_on_error!(&mut cost, raw_iter.next_element(grove_version)) { if value.is_any_tree() { let mut sub_path = q.clone(); diff --git a/grovedb/src/operations/delete/average_case.rs b/grovedb/src/operations/delete/average_case.rs index 5b1dba7c..3ed1abd1 100644 --- a/grovedb/src/operations/delete/average_case.rs +++ b/grovedb/src/operations/delete/average_case.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Average case delete cost use grovedb_costs::{ @@ -39,6 +11,9 @@ use grovedb_merk::{ HASH_LENGTH_U32, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; use intmap::IntMap; use crate::{ @@ -58,7 +33,16 @@ impl GroveDb { stop_path_height: Option, validate: bool, estimated_layer_info: IntMap, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "average_case_delete_operations_for_delete_up_tree_while_empty", + grove_version + .grovedb_versions + .operations + .delete_up_tree + .average_case_delete_operations_for_delete_up_tree_while_empty + ); let mut cost = OperationCost::default(); let stop_path_height = stop_path_height.unwrap_or_default(); @@ -134,14 +118,15 @@ impl GroveDb { ); let op = cost_return_on_error!( &mut cost, - Self::average_case_delete_operation_for_delete_internal::( + Self::average_case_delete_operation_for_delete::( &KeyInfoPath::from_vec(path_at_level.to_vec()), key_at_level, is_sum_tree, validate, check_if_tree, except_keys_count, - (key_len, estimated_element_size) + (key_len, estimated_element_size), + grove_version, ) ); ops.push(op); @@ -150,8 +135,8 @@ impl GroveDb { } } - /// Average case delete operation for delete internal - pub fn average_case_delete_operation_for_delete_internal<'db, S: Storage<'db>>( + /// Average case delete operation for delete + pub fn average_case_delete_operation_for_delete<'db, S: Storage<'db>>( path: &KeyInfoPath, key: &KeyInfo, parent_tree_is_sum_tree: bool, @@ -159,24 +144,41 @@ impl GroveDb { check_if_tree: bool, except_keys_count: u16, estimated_key_element_size: EstimatedKeyAndElementSize, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "average_case_delete_operation_for_delete", + grove_version + .grovedb_versions + .operations + .delete + .average_case_delete_operation_for_delete + ); let mut cost = OperationCost::default(); if validate { - GroveDb::add_average_case_get_merk_at_path::( - &mut cost, - path, - false, - parent_tree_is_sum_tree, + cost_return_on_error_no_add!( + &cost, + GroveDb::add_average_case_get_merk_at_path::( + &mut cost, + path, + false, + parent_tree_is_sum_tree, + grove_version, + ) ); } if check_if_tree { - GroveDb::add_average_case_get_raw_cost::( - &mut cost, - path, - key, - estimated_key_element_size.1, - parent_tree_is_sum_tree, + cost_return_on_error_no_add!( + &cost, + GroveDb::add_average_case_get_raw_cost::( + &mut cost, + path, + key, + estimated_key_element_size.1, + parent_tree_is_sum_tree, + grove_version, + ) ); } // in the worst case this is a tree diff --git a/grovedb/src/operations/delete/delete_up_tree.rs b/grovedb/src/operations/delete/delete_up_tree.rs index 5e0439b2..dd331b69 100644 --- a/grovedb/src/operations/delete/delete_up_tree.rs +++ b/grovedb/src/operations/delete/delete_up_tree.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Delete up tree use grovedb_costs::{ @@ -34,6 +6,9 @@ use grovedb_costs::{ CostResult, CostsExt, OperationCost, }; use grovedb_path::SubtreePath; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; use crate::{ batch::GroveDbOp, operations::delete::DeleteOptions, ElementFlags, Error, GroveDb, @@ -91,11 +66,20 @@ impl GroveDb { key: &[u8], options: &DeleteUpTreeOptions, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult where B: AsRef<[u8]> + 'b, P: Into>, { + check_grovedb_v0_with_cost!( + "delete", + grove_version + .grovedb_versions + .operations + .delete_up_tree + .delete_up_tree_while_empty + ); self.delete_up_tree_while_empty_with_sectional_storage( path.into(), key, @@ -107,6 +91,7 @@ impl GroveDb { (BasicStorageRemoval(removed_value_bytes)), )) }, + grove_version, ) } @@ -126,7 +111,16 @@ impl GroveDb { (StorageRemovedBytes, StorageRemovedBytes), Error, >, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "delete", + grove_version + .grovedb_versions + .operations + .delete_up_tree + .delete_up_tree_while_empty_with_sectional_storage + ); let mut cost = OperationCost::default(); let mut batch_operations: Vec = Vec::new(); @@ -139,6 +133,7 @@ impl GroveDb { None, &mut batch_operations, transaction, + grove_version, ) ); @@ -163,6 +158,7 @@ impl GroveDb { |_, _, _| Ok(false), split_removal_bytes_function, transaction, + grove_version, ) .map_ok(|_| ops_len as u16) } @@ -176,7 +172,16 @@ impl GroveDb { is_known_to_be_subtree_with_sum: Option<(bool, bool)>, mut current_batch_operations: Vec, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "delete", + grove_version + .grovedb_versions + .operations + .delete_up_tree + .delete_operations_for_delete_up_tree_while_empty + ); self.add_delete_operations_for_delete_up_tree_while_empty( path, key, @@ -184,6 +189,7 @@ impl GroveDb { is_known_to_be_subtree_with_sum, &mut current_batch_operations, transaction, + grove_version, ) .map_ok(|ops| ops.unwrap_or_default()) } @@ -198,7 +204,16 @@ impl GroveDb { is_known_to_be_subtree_with_sum: Option<(bool, bool)>, current_batch_operations: &mut Vec, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult>, Error> { + check_grovedb_v0_with_cost!( + "delete", + grove_version + .grovedb_versions + .operations + .delete_up_tree + .add_delete_operations_for_delete_up_tree_while_empty + ); let mut cost = OperationCost::default(); if let Some(stop_path_height) = options.stop_path_height { @@ -210,7 +225,7 @@ impl GroveDb { if options.validate_tree_at_path_exists { cost_return_on_error!( &mut cost, - self.check_subtree_exists_path_not_found(path.clone(), transaction) + self.check_subtree_exists_path_not_found(path.clone(), transaction, grove_version) ); } if let Some(delete_operation_this_level) = cost_return_on_error!( @@ -222,6 +237,7 @@ impl GroveDb { is_known_to_be_subtree_with_sum, current_batch_operations, transaction, + grove_version, ) ) { let mut delete_operations = vec![delete_operation_this_level.clone()]; @@ -240,6 +256,7 @@ impl GroveDb { None, // todo: maybe we can know this? current_batch_operations, transaction, + grove_version, ) ) { delete_operations.append(&mut delete_operations_upper_level); diff --git a/grovedb/src/operations/delete/mod.rs b/grovedb/src/operations/delete/mod.rs index d13fdd61..31d96b85 100644 --- a/grovedb/src/operations/delete/mod.rs +++ b/grovedb/src/operations/delete/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Delete operations and costs #[cfg(feature = "estimated_costs")] @@ -55,6 +27,9 @@ use grovedb_storage::{ rocksdb_storage::{PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext}, Storage, StorageBatch, StorageContext, }; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; #[cfg(feature = "full")] use crate::{ @@ -70,7 +45,7 @@ use crate::{raw_decode, util::merk_optional_tx_path_not_empty}; pub struct ClearOptions { /// Check for Subtrees pub check_for_subtrees: bool, - /// Allow deleting non empty trees if we check for subtrees + /// Allow deleting non-empty trees if we check for subtrees pub allow_deleting_subtrees: bool, /// If we check for subtrees, and we don't allow deleting and there are /// some, should we error? @@ -92,7 +67,7 @@ impl Default for ClearOptions { #[derive(Clone)] /// Delete options pub struct DeleteOptions { - /// Allow deleting non empty trees + /// Allow deleting non-empty trees pub allow_deleting_non_empty_trees: bool, /// Deleting non empty trees returns error pub deleting_non_empty_trees_returns_error: bool, @@ -132,11 +107,17 @@ impl GroveDb { key: &[u8], options: Option, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> where B: AsRef<[u8]> + 'b, P: Into>, { + check_grovedb_v0_with_cost!( + "delete", + grove_version.grovedb_versions.operations.delete.delete + ); + let options = options.unwrap_or_default(); let batch = StorageBatch::new(); @@ -153,6 +134,7 @@ impl GroveDb { )) }, &batch, + grove_version, ) .map_ok(|_| ()); @@ -170,12 +152,13 @@ impl GroveDb { path: P, options: Option, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> Result where B: AsRef<[u8]> + 'b, P: Into>, { - self.clear_subtree_with_costs(path, options, transaction) + self.clear_subtree_with_costs(path, options, transaction, grove_version) .unwrap() } @@ -188,11 +171,21 @@ impl GroveDb { path: P, options: Option, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult where B: AsRef<[u8]> + 'b, P: Into>, { + check_grovedb_v0_with_cost!( + "clear_subtree", + grove_version + .grovedb_versions + .operations + .delete + .clear_subtree + ); + let subtree_path: SubtreePath = path.into(); let mut cost = OperationCost::default(); let batch = StorageBatch::new(); @@ -205,7 +198,8 @@ impl GroveDb { self.open_transactional_merk_at_path( subtree_path.clone(), transaction, - Some(&batch) + Some(&batch), + grove_version, ) ); @@ -220,7 +214,7 @@ impl GroveDb { while let Some((key, element_value)) = element_iterator.next_kv().unwrap_add_cost(&mut cost) { - let element = raw_decode(&element_value).unwrap(); + let element = raw_decode(&element_value, grove_version).unwrap(); if element.is_any_tree() { if options.allow_deleting_subtrees { cost_return_on_error!( @@ -234,6 +228,7 @@ impl GroveDb { ..Default::default() }), Some(transaction), + grove_version, ) ); } else if options.trying_to_clear_with_subtrees_returns_error { @@ -263,12 +258,17 @@ impl GroveDb { subtree_path.clone(), transaction, &batch, + grove_version, ) ); } else { let mut merk_to_clear = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(subtree_path.clone(), Some(&batch)) + self.open_non_transactional_merk_at_path( + subtree_path.clone(), + Some(&batch), + grove_version + ) ); if options.check_for_subtrees { @@ -282,7 +282,7 @@ impl GroveDb { while let Some((key, element_value)) = element_iterator.next_kv().unwrap_add_cost(&mut cost) { - let element = raw_decode(&element_value).unwrap(); + let element = raw_decode(&element_value, grove_version).unwrap(); if options.allow_deleting_subtrees { if element.is_any_tree() { cost_return_on_error!( @@ -295,7 +295,8 @@ impl GroveDb { deleting_non_empty_trees_returns_error: false, ..Default::default() }), - None + None, + grove_version, ) ); } @@ -323,6 +324,7 @@ impl GroveDb { merk_cache, subtree_path.clone(), &batch, + grove_version, ) ); } @@ -352,7 +354,17 @@ impl GroveDb { (StorageRemovedBytes, StorageRemovedBytes), Error, >, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "delete_with_sectional_storage_function", + grove_version + .grovedb_versions + .operations + .delete + .delete_with_sectional_storage_function + ); + let options = options.unwrap_or_default(); let batch = StorageBatch::new(); @@ -363,7 +375,7 @@ impl GroveDb { &options, transaction, &mut |value, removed_key_bytes, removed_value_bytes| { - let mut element = Element::deserialize(value.as_slice()) + let mut element = Element::deserialize(value.as_slice(), grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string()))?; let maybe_flags = element.get_flags_mut(); match maybe_flags { @@ -371,7 +383,7 @@ impl GroveDb { BasicStorageRemoval(removed_key_bytes), BasicStorageRemoval(removed_value_bytes), )), - Some(flags) => (split_removal_bytes_function)( + Some(flags) => split_removal_bytes_function( flags, removed_key_bytes, removed_value_bytes, @@ -380,6 +392,7 @@ impl GroveDb { } }, &batch, + grove_version, ) .map_ok(|_| ()); @@ -396,11 +409,21 @@ impl GroveDb { path: P, key: &[u8], transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult where B: AsRef<[u8]> + 'b, P: Into>, { + check_grovedb_v0_with_cost!( + "delete_if_empty_tree", + grove_version + .grovedb_versions + .operations + .delete + .delete_if_empty_tree + ); + let batch = StorageBatch::new(); let collect_costs = self.delete_if_empty_tree_with_sectional_storage_function( @@ -410,10 +433,11 @@ impl GroveDb { &mut |_, removed_key_bytes, removed_value_bytes| { Ok(( BasicStorageRemoval(removed_key_bytes), - (BasicStorageRemoval(removed_value_bytes)), + BasicStorageRemoval(removed_value_bytes), )) }, &batch, + grove_version, ); collect_costs.flat_map_ok(|r| { @@ -439,7 +463,17 @@ impl GroveDb { Error, >, batch: &StorageBatch, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "delete_if_empty_tree_with_sectional_storage_function", + grove_version + .grovedb_versions + .operations + .delete + .delete_if_empty_tree_with_sectional_storage_function + ); + let options = DeleteOptions { allow_deleting_non_empty_trees: false, deleting_non_empty_trees_returns_error: false, @@ -452,7 +486,7 @@ impl GroveDb { &options, transaction, &mut |value, removed_key_bytes, removed_value_bytes| { - let mut element = Element::deserialize(value.as_slice()) + let mut element = Element::deserialize(value.as_slice(), grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string()))?; let maybe_flags = element.get_flags_mut(); match maybe_flags { @@ -460,15 +494,14 @@ impl GroveDb { BasicStorageRemoval(removed_key_bytes), BasicStorageRemoval(removed_value_bytes), )), - Some(flags) => (split_removal_bytes_function)( - flags, - removed_key_bytes, - removed_value_bytes, - ) - .map_err(|e| MerkError::ClientCorruptionError(e.to_string())), + Some(flags) => { + split_removal_bytes_function(flags, removed_key_bytes, removed_value_bytes) + .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) + } } }, batch, + grove_version, ) } @@ -481,7 +514,17 @@ impl GroveDb { is_known_to_be_subtree_with_sum: Option<(bool, bool)>, current_batch_operations: &[GroveDbOp], transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "delete_operation_for_delete_internal", + grove_version + .grovedb_versions + .operations + .delete + .delete_operation_for_delete_internal + ); + let mut cost = OperationCost::default(); if path.is_root() { @@ -494,14 +537,18 @@ impl GroveDb { if options.validate_tree_at_path_exists { cost_return_on_error!( &mut cost, - self.check_subtree_exists_path_not_found(path.clone(), transaction) + self.check_subtree_exists_path_not_found( + path.clone(), + transaction, + grove_version + ) ); } let (is_subtree, is_subtree_with_sum) = match is_known_to_be_subtree_with_sum { None => { let element = cost_return_on_error!( &mut cost, - self.get_raw(path.clone(), key.as_ref(), transaction) + self.get_raw(path.clone(), key.as_ref(), transaction, grove_version) ); match element { Element::Tree(..) => (true, false), @@ -537,6 +584,7 @@ impl GroveDb { None, transaction, subtree, + grove_version, { subtree .is_empty_tree_except(batch_deleted_keys) @@ -594,6 +642,7 @@ impl GroveDb { MerkError, >, batch: &StorageBatch, + grove_version: &GroveVersion, ) -> CostResult { if let Some(transaction) = transaction { self.delete_internal_on_transaction( @@ -603,9 +652,17 @@ impl GroveDb { transaction, sectioned_removal, batch, + grove_version, ) } else { - self.delete_internal_without_transaction(path, key, options, sectioned_removal, batch) + self.delete_internal_without_transaction( + path, + key, + options, + sectioned_removal, + batch, + grove_version, + ) } } @@ -624,16 +681,31 @@ impl GroveDb { MerkError, >, batch: &StorageBatch, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "delete_internal_on_transaction", + grove_version + .grovedb_versions + .operations + .delete + .delete_internal_on_transaction + ); + let mut cost = OperationCost::default(); let element = cost_return_on_error!( &mut cost, - self.get_raw(path.clone(), key.as_ref(), Some(transaction)) + self.get_raw(path.clone(), key.as_ref(), Some(transaction), grove_version) ); let mut subtree_to_delete_from = cost_return_on_error!( &mut cost, - self.open_transactional_merk_at_path(path.clone(), transaction, Some(batch)) + self.open_transactional_merk_at_path( + path.clone(), + transaction, + Some(batch), + grove_version + ) ); let uses_sum_tree = subtree_to_delete_from.is_sum_tree; if element.is_any_tree() { @@ -645,7 +717,8 @@ impl GroveDb { self.open_transactional_merk_at_path( subtree_merk_path_ref.clone(), transaction, - Some(batch) + Some(batch), + grove_version, ) ); let is_empty = subtree_of_tree_we_are_deleting @@ -665,7 +738,7 @@ impl GroveDb { } else if !is_empty { let subtrees_paths = cost_return_on_error!( &mut cost, - self.find_subtrees(&subtree_merk_path_ref, Some(transaction)) + self.find_subtrees(&subtree_merk_path_ref, Some(transaction), grove_version) ); for subtree_path in subtrees_paths { let p: SubtreePath<_> = subtree_path.as_slice().into(); @@ -696,6 +769,7 @@ impl GroveDb { subtree_to_delete_from.root_key(), element.is_sum_tree(), Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|_| { Error::CorruptedData("cannot open a subtree with given root key".to_owned()) @@ -710,7 +784,8 @@ impl GroveDb { Some(options.as_merk_options()), true, uses_sum_tree, - sectioned_removal + sectioned_removal, + grove_version, ) ); let mut merk_cache: HashMap< @@ -724,7 +799,8 @@ impl GroveDb { batch, merk_cache, &path, - transaction + transaction, + grove_version, ) ); } else { @@ -737,7 +813,8 @@ impl GroveDb { Some(options.as_merk_options()), true, uses_sum_tree, - sectioned_removal + sectioned_removal, + grove_version, ) ); let mut merk_cache: HashMap< @@ -747,7 +824,13 @@ impl GroveDb { merk_cache.insert(path.clone(), subtree_to_delete_from); cost_return_on_error!( &mut cost, - self.propagate_changes_with_transaction(merk_cache, path, transaction, batch) + self.propagate_changes_with_transaction( + merk_cache, + path, + transaction, + batch, + grove_version + ) ); } } else { @@ -760,6 +843,7 @@ impl GroveDb { false, uses_sum_tree, sectioned_removal, + grove_version, ) ); let mut merk_cache: HashMap, Merk> = @@ -767,7 +851,13 @@ impl GroveDb { merk_cache.insert(path.clone(), subtree_to_delete_from); cost_return_on_error!( &mut cost, - self.propagate_changes_with_transaction(merk_cache, path, transaction, batch) + self.propagate_changes_with_transaction( + merk_cache, + path, + transaction, + batch, + grove_version + ) ); } @@ -788,16 +878,28 @@ impl GroveDb { MerkError, >, batch: &StorageBatch, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "delete_internal_without_transaction", + grove_version + .grovedb_versions + .operations + .delete + .delete_internal_without_transaction + ); + let mut cost = OperationCost::default(); - let element = - cost_return_on_error!(&mut cost, self.get_raw(path.clone(), key.as_ref(), None)); + let element = cost_return_on_error!( + &mut cost, + self.get_raw(path.clone(), key.as_ref(), None, grove_version) + ); let mut merk_cache: HashMap, Merk> = HashMap::default(); let mut subtree_to_delete_from = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(path.clone(), Some(batch)) + self.open_non_transactional_merk_at_path(path.clone(), Some(batch), grove_version) ); let uses_sum_tree = subtree_to_delete_from.is_sum_tree; if element.is_any_tree() { @@ -806,7 +908,8 @@ impl GroveDb { &mut cost, self.open_non_transactional_merk_at_path( SubtreePath::from(&subtree_merk_path), - Some(batch) + Some(batch), + grove_version, ) ); let is_empty = subtree_of_tree_we_are_deleting @@ -827,14 +930,18 @@ impl GroveDb { if !is_empty { let subtrees_paths = cost_return_on_error!( &mut cost, - self.find_subtrees(&SubtreePath::from(&subtree_merk_path), None) + self.find_subtrees( + &SubtreePath::from(&subtree_merk_path), + None, + grove_version + ) ); // TODO: dumb traversal should not be tolerated for subtree_path in subtrees_paths.into_iter().rev() { let p: SubtreePath<_> = subtree_path.as_slice().into(); let mut inner_subtree_to_delete_from = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(p, Some(batch)) + self.open_non_transactional_merk_at_path(p, Some(batch), grove_version) ); cost_return_on_error!( &mut cost, @@ -855,6 +962,7 @@ impl GroveDb { true, uses_sum_tree, sectioned_removal, + grove_version, ) ); } @@ -868,13 +976,14 @@ impl GroveDb { false, uses_sum_tree, sectioned_removal, + grove_version, ) ); } merk_cache.insert(path.clone(), subtree_to_delete_from); cost_return_on_error!( &mut cost, - self.propagate_changes_without_transaction(merk_cache, path, batch) + self.propagate_changes_without_transaction(merk_cache, path, batch, grove_version) ); Ok(true).wrap_with_cost(cost) @@ -888,6 +997,7 @@ mod tests { storage_cost::{removal::StorageRemovedBytes::BasicStorageRemoval, StorageCost}, OperationCost, }; + use grovedb_version::version::GroveVersion; use pretty_assertions::assert_eq; use crate::{ @@ -900,8 +1010,9 @@ mod tests { #[test] fn test_empty_subtree_deletion_without_transaction() { + let grove_version = GroveVersion::latest(); let _element = Element::new_item(b"ayy".to_vec()); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); // Insert some nested subtrees db.insert( [TEST_LEAF].as_ref(), @@ -909,6 +1020,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 1 insert"); @@ -918,32 +1030,51 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 3 insert"); - let root_hash = db.root_hash(None).unwrap().unwrap(); - db.delete([TEST_LEAF].as_ref(), b"key1", None, None) + let root_hash = db.root_hash(None, grove_version).unwrap().unwrap(); + db.delete([TEST_LEAF].as_ref(), b"key1", None, None, grove_version) .unwrap() .expect("unable to delete subtree"); assert!(matches!( - db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) - .unwrap(), + db.get( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap(), Err(Error::PathParentLayerNotFound(_)) )); // assert_eq!(db.subtrees.len().unwrap(), 3); // TEST_LEAF, ANOTHER_TEST_LEAF // TEST_LEAF.key4 stay - assert!(db.get(EMPTY_PATH, TEST_LEAF, None).unwrap().is_ok()); - assert!(db.get(EMPTY_PATH, ANOTHER_TEST_LEAF, None).unwrap().is_ok()); - assert!(db.get([TEST_LEAF].as_ref(), b"key4", None).unwrap().is_ok()); - assert_ne!(root_hash, db.root_hash(None).unwrap().unwrap()); + assert!(db + .get(EMPTY_PATH, TEST_LEAF, None, grove_version) + .unwrap() + .is_ok()); + assert!(db + .get(EMPTY_PATH, ANOTHER_TEST_LEAF, None, grove_version) + .unwrap() + .is_ok()); + assert!(db + .get([TEST_LEAF].as_ref(), b"key4", None, grove_version) + .unwrap() + .is_ok()); + assert_ne!( + root_hash, + db.root_hash(None, grove_version).unwrap().unwrap() + ); } #[test] fn test_empty_subtree_deletion_with_transaction() { + let grove_version = GroveVersion::latest(); let _element = Element::new_item(b"ayy".to_vec()); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let transaction = db.start_transaction(); // Insert some nested subtrees @@ -953,6 +1084,7 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree 1 insert"); @@ -962,34 +1094,47 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree 3 insert"); - db.delete([TEST_LEAF].as_ref(), b"key1", None, Some(&transaction)) - .unwrap() - .expect("unable to delete subtree"); + db.delete( + [TEST_LEAF].as_ref(), + b"key1", + None, + Some(&transaction), + grove_version, + ) + .unwrap() + .expect("unable to delete subtree"); assert!(matches!( db.get( [TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", - Some(&transaction) + Some(&transaction), + grove_version ) .unwrap(), Err(Error::PathParentLayerNotFound(_)) )); transaction.commit().expect("cannot commit transaction"); assert!(matches!( - db.get([TEST_LEAF].as_ref(), b"key1", None).unwrap(), + db.get([TEST_LEAF].as_ref(), b"key1", None, grove_version) + .unwrap(), Err(Error::PathKeyNotFound(_)) )); - assert!(db.get([TEST_LEAF].as_ref(), b"key4", None).unwrap().is_ok()); + assert!(db + .get([TEST_LEAF].as_ref(), b"key4", None, grove_version) + .unwrap() + .is_ok()); } #[test] fn test_subtree_deletion_if_empty_with_transaction() { + let grove_version = GroveVersion::latest(); let element = Element::new_item(b"value".to_vec()); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let transaction = db.start_transaction(); @@ -1000,6 +1145,7 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree insert A on level 1"); @@ -1009,6 +1155,7 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree insert A on level 2"); @@ -1018,6 +1165,7 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree insert B on level 2"); @@ -1028,6 +1176,7 @@ mod tests { element, None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful value insert"); @@ -1037,6 +1186,7 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree insert B on level 1"); @@ -1055,7 +1205,12 @@ mod tests { let transaction = db.start_transaction(); let deleted = db - .delete_if_empty_tree([TEST_LEAF].as_ref(), b"level1-A", Some(&transaction)) + .delete_if_empty_tree( + [TEST_LEAF].as_ref(), + b"level1-A", + Some(&transaction), + grove_version, + ) .unwrap() .expect("unable to delete subtree"); assert!(!deleted); @@ -1069,6 +1224,7 @@ mod tests { ..Default::default() }, Some(&transaction), + grove_version, ) .unwrap() .expect("unable to delete subtree"); @@ -1078,7 +1234,8 @@ mod tests { db.get( [TEST_LEAF, b"level1-A", b"level2-A"].as_ref(), b"level3-A", - Some(&transaction) + Some(&transaction), + grove_version ) .unwrap(), Err(Error::PathParentLayerNotFound(_)) @@ -1088,23 +1245,30 @@ mod tests { db.get( [TEST_LEAF, b"level1-A"].as_ref(), b"level2-A", - Some(&transaction) + Some(&transaction), + grove_version ) .unwrap(), Err(Error::PathKeyNotFound(_)) )); assert!(matches!( - db.get([TEST_LEAF].as_ref(), b"level1-A", Some(&transaction)) - .unwrap(), + db.get( + [TEST_LEAF].as_ref(), + b"level1-A", + Some(&transaction), + grove_version + ) + .unwrap(), Ok(Element::Tree(..)), )); } #[test] fn test_subtree_deletion_if_empty_without_transaction() { + let grove_version = GroveVersion::latest(); let element = Element::new_item(b"value".to_vec()); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); // Insert some nested subtrees db.insert( @@ -1113,6 +1277,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert A on level 1"); @@ -1122,6 +1287,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert A on level 2"); @@ -1131,6 +1297,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert B on level 2"); @@ -1141,6 +1308,7 @@ mod tests { element, None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -1150,6 +1318,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert B on level 1"); @@ -1162,7 +1331,7 @@ mod tests { // Level 3: A: value let deleted = db - .delete_if_empty_tree([TEST_LEAF].as_ref(), b"level1-A", None) + .delete_if_empty_tree([TEST_LEAF].as_ref(), b"level1-A", None, grove_version) .unwrap() .expect("unable to delete subtree"); assert!(!deleted); @@ -1176,6 +1345,7 @@ mod tests { ..Default::default() }, None, + grove_version, ) .unwrap() .expect("unable to delete subtree"); @@ -1186,28 +1356,36 @@ mod tests { [TEST_LEAF, b"level1-A", b"level2-A"].as_ref(), b"level3-A", None, + grove_version ) .unwrap(), Err(Error::PathParentLayerNotFound(_)) )); assert!(matches!( - db.get([TEST_LEAF, b"level1-A"].as_ref(), b"level2-A", None) - .unwrap(), + db.get( + [TEST_LEAF, b"level1-A"].as_ref(), + b"level2-A", + None, + grove_version + ) + .unwrap(), Err(Error::PathKeyNotFound(_)) )); assert!(matches!( - db.get([TEST_LEAF].as_ref(), b"level1-A", None).unwrap(), + db.get([TEST_LEAF].as_ref(), b"level1-A", None, grove_version) + .unwrap(), Ok(Element::Tree(..)), )); } #[test] fn test_recurring_deletion_through_subtrees_with_transaction() { + let grove_version = GroveVersion::latest(); let element = Element::new_item(b"ayy".to_vec()); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let transaction = db.start_transaction(); // Insert some nested subtrees @@ -1217,6 +1395,7 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree 1 insert"); @@ -1226,6 +1405,7 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree 2 insert"); @@ -1237,6 +1417,7 @@ mod tests { element, None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful value insert"); @@ -1246,6 +1427,7 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree 3 insert"); @@ -1259,6 +1441,7 @@ mod tests { ..Default::default() }), Some(&transaction), + grove_version, ) .unwrap() .expect("unable to delete subtree"); @@ -1266,26 +1449,29 @@ mod tests { db.get( [TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", - Some(&transaction) + Some(&transaction), + grove_version ) .unwrap(), Err(Error::PathParentLayerNotFound(_)) )); transaction.commit().expect("cannot commit transaction"); assert!(matches!( - db.get([TEST_LEAF].as_ref(), b"key1", None).unwrap(), + db.get([TEST_LEAF].as_ref(), b"key1", None, grove_version) + .unwrap(), Err(Error::PathKeyNotFound(_)) )); - db.get([TEST_LEAF].as_ref(), b"key4", None) + db.get([TEST_LEAF].as_ref(), b"key4", None, grove_version) .unwrap() .expect("expected to get key4"); } #[test] fn test_recurring_deletion_through_subtrees_without_transaction() { + let grove_version = GroveVersion::latest(); let element = Element::new_item(b"ayy".to_vec()); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); // Insert some nested subtrees db.insert( @@ -1294,6 +1480,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 1 insert"); @@ -1303,6 +1490,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 2 insert"); @@ -1314,6 +1502,7 @@ mod tests { element, None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -1323,6 +1512,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 3 insert"); @@ -1336,42 +1526,65 @@ mod tests { ..Default::default() }), None, + grove_version, ) .unwrap() .expect("unable to delete subtree"); assert!(matches!( - db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) - .unwrap(), + db.get( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap(), Err(Error::PathParentLayerNotFound(_)) )); assert!(matches!( - db.get([TEST_LEAF].as_ref(), b"key1", None).unwrap(), + db.get([TEST_LEAF].as_ref(), b"key1", None, grove_version) + .unwrap(), Err(Error::PathKeyNotFound(_)) )); - assert!(db.get([TEST_LEAF].as_ref(), b"key4", None).unwrap().is_ok()); + assert!(db + .get([TEST_LEAF].as_ref(), b"key4", None, grove_version) + .unwrap() + .is_ok()); } #[test] fn test_item_deletion() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); - db.insert([TEST_LEAF].as_ref(), b"key", element, None, None) - .unwrap() - .expect("successful insert"); - let root_hash = db.root_hash(None).unwrap().unwrap(); + db.insert( + [TEST_LEAF].as_ref(), + b"key", + element, + None, + None, + grove_version, + ) + .unwrap() + .expect("successful insert"); + let root_hash = db.root_hash(None, grove_version).unwrap().unwrap(); assert!(db - .delete([TEST_LEAF].as_ref(), b"key", None, None) + .delete([TEST_LEAF].as_ref(), b"key", None, None, grove_version) .unwrap() .is_ok()); assert!(matches!( - db.get([TEST_LEAF].as_ref(), b"key", None).unwrap(), + db.get([TEST_LEAF].as_ref(), b"key", None, grove_version) + .unwrap(), Err(Error::PathKeyNotFound(_)) )); - assert_ne!(root_hash, db.root_hash(None).unwrap().unwrap()); + assert_ne!( + root_hash, + db.root_hash(None, grove_version).unwrap().unwrap() + ); } #[test] fn test_delete_one_item_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1382,12 +1595,13 @@ mod tests { Element::new_item(b"cat".to_vec()), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); let cost = db - .delete(EMPTY_PATH, b"key1", None, Some(&tx)) + .delete(EMPTY_PATH, b"key1", None, Some(&tx), grove_version) .cost_as_result() .expect("expected to delete"); @@ -1440,6 +1654,7 @@ mod tests { #[test] fn test_delete_one_sum_item_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1449,6 +1664,7 @@ mod tests { Element::empty_sum_tree(), None, Some(&tx), + grove_version, ) .unwrap() .expect("expected to insert"); @@ -1460,12 +1676,19 @@ mod tests { Element::new_sum_item(15000), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); let cost = db - .delete([b"sum_tree".as_slice()].as_ref(), b"key1", None, Some(&tx)) + .delete( + [b"sum_tree".as_slice()].as_ref(), + b"key1", + None, + Some(&tx), + grove_version, + ) .cost_as_result() .expect("expected to delete"); @@ -1517,6 +1740,7 @@ mod tests { #[test] fn test_delete_one_item_in_sum_tree_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1526,6 +1750,7 @@ mod tests { Element::empty_sum_tree(), None, Some(&tx), + grove_version, ) .unwrap() .expect("expected to insert"); @@ -1537,12 +1762,19 @@ mod tests { Element::new_item(b"hello".to_vec()), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); let cost = db - .delete([b"sum_tree".as_slice()].as_ref(), b"key1", None, Some(&tx)) + .delete( + [b"sum_tree".as_slice()].as_ref(), + b"key1", + None, + Some(&tx), + grove_version, + ) .cost_as_result() .expect("expected to delete"); @@ -1595,9 +1827,10 @@ mod tests { #[test] fn test_subtree_clear() { + let grove_version = GroveVersion::latest(); let element = Element::new_item(b"ayy".to_vec()); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); // Insert some nested subtrees db.insert( @@ -1606,6 +1839,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 1 insert"); @@ -1615,6 +1849,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 2 insert"); @@ -1626,6 +1861,7 @@ mod tests { element, None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -1635,23 +1871,28 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 3 insert"); let key1_tree = db - .get([TEST_LEAF].as_ref(), b"key1", None) + .get([TEST_LEAF].as_ref(), b"key1", None, grove_version) .unwrap() .unwrap(); assert!(!matches!(key1_tree, Element::Tree(None, _))); let key1_merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key1"].as_ref().into(), None) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + None, + grove_version, + ) .unwrap() .unwrap(); assert_ne!(key1_merk.root_hash().unwrap(), [0; 32]); - let root_hash_before_clear = db.root_hash(None).unwrap().unwrap(); - db.clear_subtree([TEST_LEAF, b"key1"].as_ref(), None, None) + let root_hash_before_clear = db.root_hash(None, grove_version).unwrap().unwrap(); + db.clear_subtree([TEST_LEAF, b"key1"].as_ref(), None, None, grove_version) .expect_err("unable to delete subtree"); let success = db @@ -1663,6 +1904,7 @@ mod tests { trying_to_clear_with_subtrees_returns_error: false, }), None, + grove_version, ) .expect("expected no error"); assert!(!success); @@ -1676,34 +1918,44 @@ mod tests { trying_to_clear_with_subtrees_returns_error: false, }), None, + grove_version, ) .expect("unable to delete subtree"); assert!(success); assert!(matches!( - db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", None) + db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", None, grove_version) .unwrap(), Err(Error::PathKeyNotFound(_)) )); assert!(matches!( - db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) - .unwrap(), + db.get( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap(), Err(Error::PathParentLayerNotFound(_)) )); let key1_tree = db - .get([TEST_LEAF].as_ref(), b"key1", None) + .get([TEST_LEAF].as_ref(), b"key1", None, grove_version) .unwrap() .unwrap(); assert!(matches!(key1_tree, Element::Tree(None, _))); let key1_merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key1"].as_ref().into(), None) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + None, + grove_version, + ) .unwrap() .unwrap(); assert_eq!(key1_merk.root_hash().unwrap(), [0; 32]); - let root_hash_after_clear = db.root_hash(None).unwrap().unwrap(); + let root_hash_after_clear = db.root_hash(None, grove_version).unwrap().unwrap(); assert_ne!(root_hash_before_clear, root_hash_after_clear); } } diff --git a/grovedb/src/operations/delete/worst_case.rs b/grovedb/src/operations/delete/worst_case.rs index a887a469..b2a50bb2 100644 --- a/grovedb/src/operations/delete/worst_case.rs +++ b/grovedb/src/operations/delete/worst_case.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Worst case delete costs use grovedb_costs::{ @@ -35,6 +7,9 @@ use grovedb_merk::{ estimated_costs::worst_case_costs::add_worst_case_cost_for_is_empty_tree_except, tree::kv::KV, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; use intmap::IntMap; use crate::{ @@ -53,7 +28,16 @@ impl GroveDb { validate: bool, intermediate_tree_info: IntMap<(bool, u32)>, max_element_size: u32, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "delete", + grove_version + .grovedb_versions + .operations + .delete_up_tree + .worst_case_delete_operations_for_delete_up_tree_while_empty + ); let mut cost = OperationCost::default(); let stop_path_height = stop_path_height.unwrap_or_default(); @@ -116,14 +100,15 @@ impl GroveDb { ); let op = cost_return_on_error!( &mut cost, - Self::worst_case_delete_operation_for_delete_internal::( + Self::worst_case_delete_operation_for_delete::( &KeyInfoPath::from_vec(path_at_level.to_vec()), key_at_level, is_sum_tree, validate, check_if_tree, except_keys_count, - max_element_size + max_element_size, + grove_version ) ); ops.push(op); @@ -132,8 +117,8 @@ impl GroveDb { } } - /// Worst case costs for delete operation for delete internal - pub fn worst_case_delete_operation_for_delete_internal<'db, S: Storage<'db>>( + /// Worst case costs for delete operation for delete + pub fn worst_case_delete_operation_for_delete<'db, S: Storage<'db>>( path: &KeyInfoPath, key: &KeyInfo, parent_tree_is_sum_tree: bool, @@ -141,19 +126,40 @@ impl GroveDb { check_if_tree: bool, except_keys_count: u16, max_element_size: u32, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "worst_case_delete_operation_for_delete", + grove_version + .grovedb_versions + .operations + .delete + .worst_case_delete_operation_for_delete + ); let mut cost = OperationCost::default(); if validate { - GroveDb::add_worst_case_get_merk_at_path::(&mut cost, path, parent_tree_is_sum_tree); + cost_return_on_error_no_add!( + &cost, + GroveDb::add_worst_case_get_merk_at_path::( + &mut cost, + path, + parent_tree_is_sum_tree, + grove_version, + ) + ); } if check_if_tree { - GroveDb::add_worst_case_get_raw_cost::( - &mut cost, - path, - key, - max_element_size, - parent_tree_is_sum_tree, + cost_return_on_error_no_add!( + &cost, + GroveDb::add_worst_case_get_raw_cost::( + &mut cost, + path, + key, + max_element_size, + parent_tree_is_sum_tree, + grove_version, + ) ); } // in the worst case this is a tree diff --git a/grovedb/src/operations/get/average_case.rs b/grovedb/src/operations/get/average_case.rs index 4a6ee2ee..aca4426d 100644 --- a/grovedb/src/operations/get/average_case.rs +++ b/grovedb/src/operations/get/average_case.rs @@ -1,38 +1,12 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Average case get costs #[cfg(feature = "full")] use grovedb_costs::OperationCost; #[cfg(feature = "full")] use grovedb_storage::rocksdb_storage::RocksDbStorage; +use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; +use crate::Error; #[cfg(feature = "full")] use crate::{ batch::{key_info::KeyInfo, KeyInfoPath}, @@ -48,7 +22,16 @@ impl GroveDb { key: &KeyInfo, estimated_element_size: u32, in_parent_tree_using_sums: bool, - ) -> OperationCost { + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "average_case_for_has_raw", + grove_version + .grovedb_versions + .operations + .get + .average_case_for_has_raw + ); let mut cost = OperationCost::default(); GroveDb::add_average_case_has_raw_cost::( &mut cost, @@ -56,8 +39,9 @@ impl GroveDb { key, estimated_element_size, in_parent_tree_using_sums, - ); - cost + grove_version, + )?; + Ok(cost) } /// Get the Operation Cost for a has query where we estimate that we @@ -68,7 +52,16 @@ impl GroveDb { estimated_flags_size: u32, is_sum_tree: bool, in_parent_tree_using_sums: bool, - ) -> OperationCost { + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "average_case_for_has_raw_tree", + grove_version + .grovedb_versions + .operations + .get + .average_case_for_has_raw_tree + ); let mut cost = OperationCost::default(); GroveDb::add_average_case_has_raw_tree_cost::( &mut cost, @@ -77,8 +70,9 @@ impl GroveDb { estimated_flags_size, is_sum_tree, in_parent_tree_using_sums, - ); - cost + grove_version, + )?; + Ok(cost) } /// Get the Operation Cost for a get query that doesn't follow @@ -88,7 +82,16 @@ impl GroveDb { key: &KeyInfo, estimated_element_size: u32, in_parent_tree_using_sums: bool, - ) -> OperationCost { + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "average_case_for_get_raw", + grove_version + .grovedb_versions + .operations + .get + .average_case_for_get_raw + ); let mut cost = OperationCost::default(); GroveDb::add_average_case_get_raw_cost::( &mut cost, @@ -96,8 +99,9 @@ impl GroveDb { key, estimated_element_size, in_parent_tree_using_sums, - ); - cost + grove_version, + )?; + Ok(cost) } /// Get the Operation Cost for a get query with the following parameters @@ -107,7 +111,16 @@ impl GroveDb { in_parent_tree_using_sums: bool, estimated_element_size: u32, estimated_references_sizes: Vec, - ) -> OperationCost { + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "average_case_for_get", + grove_version + .grovedb_versions + .operations + .get + .average_case_for_get + ); let mut cost = OperationCost::default(); GroveDb::add_average_case_get_cost::( &mut cost, @@ -116,8 +129,9 @@ impl GroveDb { in_parent_tree_using_sums, estimated_element_size, estimated_references_sizes, - ); - cost + grove_version, + )?; + Ok(cost) } /// Get the Operation Cost for a get query with the following parameters @@ -127,7 +141,16 @@ impl GroveDb { estimated_flags_size: u32, is_sum_tree: bool, in_parent_tree_using_sums: bool, - ) -> OperationCost { + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "average_case_for_get", + grove_version + .grovedb_versions + .operations + .get + .average_case_for_get_tree + ); let mut cost = OperationCost::default(); GroveDb::add_average_case_get_raw_tree_cost::( &mut cost, @@ -136,7 +159,8 @@ impl GroveDb { estimated_flags_size, is_sum_tree, in_parent_tree_using_sums, - ); - cost + grove_version, + )?; + Ok(cost) } } diff --git a/grovedb/src/operations/get/mod.rs b/grovedb/src/operations/get/mod.rs index 4cc9f949..b6289699 100644 --- a/grovedb/src/operations/get/mod.rs +++ b/grovedb/src/operations/get/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Get operations and costs #[cfg(feature = "estimated_costs")] @@ -46,6 +18,9 @@ use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; use grovedb_path::SubtreePath; #[cfg(feature = "full")] use grovedb_storage::StorageContext; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; #[cfg(feature = "full")] use crate::{ @@ -68,12 +43,15 @@ impl GroveDb { path: P, key: &[u8], transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult where B: AsRef<[u8]> + 'b, P: Into>, { - self.get_caching_optional(path.into(), key, true, transaction) + check_grovedb_v0_with_cost!("get", grove_version.grovedb_versions.operations.get.get); + + self.get_caching_optional(path.into(), key, true, transaction, grove_version) } /// Get an element from the backing store @@ -84,12 +62,28 @@ impl GroveDb { key: &[u8], allow_cache: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "get_caching_optional", + grove_version + .grovedb_versions + .operations + .get + .get_caching_optional + ); + let mut cost = OperationCost::default(); match cost_return_on_error!( &mut cost, - self.get_raw_caching_optional(path.clone(), key, allow_cache, transaction) + self.get_raw_caching_optional( + path.clone(), + key, + allow_cache, + transaction, + grove_version + ) ) { Element::Reference(reference_path, ..) => { let path_owned = cost_return_on_error!( @@ -97,8 +91,13 @@ impl GroveDb { path_from_reference_path_type(reference_path, &path.to_vec(), Some(key)) .wrap_with_cost(OperationCost::default()) ); - self.follow_reference(path_owned.as_slice().into(), allow_cache, transaction) - .add_cost(cost) + self.follow_reference( + path_owned.as_slice().into(), + allow_cache, + transaction, + grove_version, + ) + .add_cost(cost) } other => Ok(other).wrap_with_cost(cost), } @@ -112,7 +111,17 @@ impl GroveDb { path: SubtreePath, allow_cache: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "follow_reference", + grove_version + .grovedb_versions + .operations + .get + .follow_reference + ); + let mut cost = OperationCost::default(); let mut hops_left = MAX_REFERENCE_HOPS; @@ -128,19 +137,25 @@ impl GroveDb { if let Some((key, path_slice)) = current_path.split_last() { current_element = cost_return_on_error!( &mut cost, - self.get_raw_caching_optional(path_slice.into(), key, allow_cache, transaction) - .map_err(|e| match e { - Error::PathParentLayerNotFound(p) => { - Error::CorruptedReferencePathParentLayerNotFound(p) - } - Error::PathKeyNotFound(p) => { - Error::CorruptedReferencePathKeyNotFound(p) - } - Error::PathNotFound(p) => { - Error::CorruptedReferencePathNotFound(p) - } - _ => e, - }) + self.get_raw_caching_optional( + path_slice.into(), + key, + allow_cache, + transaction, + grove_version + ) + .map_err(|e| match e { + Error::PathParentLayerNotFound(p) => { + Error::CorruptedReferencePathParentLayerNotFound(p) + } + Error::PathKeyNotFound(p) => { + Error::CorruptedReferencePathKeyNotFound(p) + } + Error::PathNotFound(p) => { + Error::CorruptedReferencePathNotFound(p) + } + _ => e, + }) ) } else { return Err(Error::CorruptedPath("empty path".to_string())).wrap_with_cost(cost); @@ -168,8 +183,14 @@ impl GroveDb { path: SubtreePath, key: &[u8], transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult { - self.get_raw_caching_optional(path, key, true, transaction) + check_grovedb_v0_with_cost!( + "get_raw", + grove_version.grovedb_versions.operations.get.get_raw + ); + + self.get_raw_caching_optional(path, key, true, transaction, grove_version) } /// Get tree item without following references @@ -179,11 +200,27 @@ impl GroveDb { key: &[u8], allow_cache: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult { + check_grovedb_v0_with_cost!( + "get_raw_caching_optional", + grove_version + .grovedb_versions + .operations + .get + .get_raw_caching_optional + ); + if let Some(transaction) = transaction { - self.get_raw_on_transaction_caching_optional(path, key, allow_cache, transaction) + self.get_raw_on_transaction_caching_optional( + path, + key, + allow_cache, + transaction, + grove_version, + ) } else { - self.get_raw_without_transaction_caching_optional(path, key, allow_cache) + self.get_raw_without_transaction_caching_optional(path, key, allow_cache, grove_version) } } @@ -195,8 +232,18 @@ impl GroveDb { path: SubtreePath, key: &[u8], transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult, Error> { - self.get_raw_optional_caching_optional(path, key, true, transaction) + check_grovedb_v0_with_cost!( + "get_raw_optional", + grove_version + .grovedb_versions + .operations + .get + .get_raw_optional + ); + + self.get_raw_optional_caching_optional(path, key, true, transaction, grove_version) } /// Get tree item without following references @@ -206,16 +253,32 @@ impl GroveDb { key: &[u8], allow_cache: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "get_raw_optional_caching_optional", + grove_version + .grovedb_versions + .operations + .get + .get_raw_optional_caching_optional + ); + if let Some(transaction) = transaction { self.get_raw_optional_on_transaction_caching_optional( path, key, allow_cache, transaction, + grove_version, ) } else { - self.get_raw_optional_without_transaction_caching_optional(path, key, allow_cache) + self.get_raw_optional_without_transaction_caching_optional( + path, + key, + allow_cache, + grove_version, + ) } } @@ -226,12 +289,13 @@ impl GroveDb { key: &[u8], allow_cache: bool, transaction: &Transaction, + grove_version: &GroveVersion, ) -> CostResult { let mut cost = OperationCost::default(); let merk_to_get_from = cost_return_on_error!( &mut cost, - self.open_transactional_merk_at_path(path, transaction, None) + self.open_transactional_merk_at_path(path, transaction, None, grove_version) .map_err(|e| match e { Error::InvalidParentLayerPath(s) => { Error::PathParentLayerNotFound(s) @@ -240,7 +304,7 @@ impl GroveDb { }) ); - Element::get(&merk_to_get_from, key, allow_cache).add_cost(cost) + Element::get(&merk_to_get_from, key, allow_cache, grove_version).add_cost(cost) } /// Get tree item without following references @@ -250,10 +314,11 @@ impl GroveDb { key: &[u8], allow_cache: bool, transaction: &Transaction, + grove_version: &GroveVersion, ) -> CostResult, Error> { let mut cost = OperationCost::default(); let merk_result = self - .open_transactional_merk_at_path(path, transaction, None) + .open_transactional_merk_at_path(path, transaction, None, grove_version) .map_err(|e| match e { Error::InvalidParentLayerPath(s) => Error::PathParentLayerNotFound(s), _ => e, @@ -270,7 +335,7 @@ impl GroveDb { ); if let Some(merk_to_get_from) = merk { - Element::get_optional(&merk_to_get_from, key, allow_cache).add_cost(cost) + Element::get_optional(&merk_to_get_from, key, allow_cache, grove_version).add_cost(cost) } else { Ok(None).wrap_with_cost(cost) } @@ -282,12 +347,13 @@ impl GroveDb { path: SubtreePath, key: &[u8], allow_cache: bool, + grove_version: &GroveVersion, ) -> CostResult { let mut cost = OperationCost::default(); let merk_to_get_from = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(path, None) + self.open_non_transactional_merk_at_path(path, None, grove_version) .map_err(|e| match e { Error::InvalidParentLayerPath(s) => { Error::PathParentLayerNotFound(s) @@ -296,7 +362,7 @@ impl GroveDb { }) ); - Element::get(&merk_to_get_from, key, allow_cache).add_cost(cost) + Element::get(&merk_to_get_from, key, allow_cache, grove_version).add_cost(cost) } /// Get tree item without following references @@ -305,11 +371,12 @@ impl GroveDb { path: SubtreePath, key: &[u8], allow_cache: bool, + grove_version: &GroveVersion, ) -> CostResult, Error> { let mut cost = OperationCost::default(); let merk_result = self - .open_non_transactional_merk_at_path(path, None) + .open_non_transactional_merk_at_path(path, None, grove_version) .map_err(|e| match e { Error::InvalidParentLayerPath(s) => Error::PathParentLayerNotFound(s), _ => e, @@ -326,7 +393,7 @@ impl GroveDb { ); if let Some(merk_to_get_from) = merk { - Element::get_optional(&merk_to_get_from, key, allow_cache).add_cost(cost) + Element::get_optional(&merk_to_get_from, key, allow_cache, grove_version).add_cost(cost) } else { Ok(None).wrap_with_cost(cost) } @@ -339,11 +406,17 @@ impl GroveDb { path: P, key: &[u8], transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult where B: AsRef<[u8]> + 'b, P: Into>, { + check_grovedb_v0_with_cost!( + "has_raw", + grove_version.grovedb_versions.operations.get.has_raw + ); + // Merk's items should be written into data storage and checked accordingly storage_context_optional_tx!(self.db, path.into(), None, transaction, storage, { storage.flat_map(|s| s.get(key).map_err(|e| e.into()).map_ok(|x| x.is_some())) @@ -355,6 +428,7 @@ impl GroveDb { path: SubtreePath, transaction: TransactionArg, error_fn: impl FnOnce() -> Error, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); @@ -362,17 +436,22 @@ impl GroveDb { let element = if let Some(transaction) = transaction { let merk_to_get_from = cost_return_on_error!( &mut cost, - self.open_transactional_merk_at_path(parent_path, transaction, None) + self.open_transactional_merk_at_path( + parent_path, + transaction, + None, + grove_version + ) ); - Element::get(&merk_to_get_from, parent_key, true) + Element::get(&merk_to_get_from, parent_key, true, grove_version) } else { let merk_to_get_from = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(parent_path, None) + self.open_non_transactional_merk_at_path(parent_path, None, grove_version) ); - Element::get(&merk_to_get_from, parent_key, true) + Element::get(&merk_to_get_from, parent_key, true, grove_version) } .unwrap_add_cost(&mut cost); match element { @@ -390,19 +469,25 @@ impl GroveDb { &self, path: SubtreePath<'b, B>, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> where B: AsRef<[u8]> + 'b, { - self.check_subtree_exists(path.clone(), transaction, || { - Error::PathNotFound(format!( - "subtree doesn't exist at path {:?}", - path.to_vec() - .into_iter() - .map(hex::encode) - .collect::>() - )) - }) + self.check_subtree_exists( + path.clone(), + transaction, + || { + Error::PathNotFound(format!( + "subtree doesn't exist at path {:?}", + path.to_vec() + .into_iter() + .map(hex::encode) + .collect::>() + )) + }, + grove_version, + ) } /// Check subtree exists with invalid path error @@ -410,9 +495,22 @@ impl GroveDb { &self, path: SubtreePath, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { - self.check_subtree_exists(path, transaction, || { - Error::InvalidPath("subtree doesn't exist".to_owned()) - }) + check_grovedb_v0_with_cost!( + "check_subtree_exists_invalid_path", + grove_version + .grovedb_versions + .operations + .get + .check_subtree_exists_invalid_path + ); + + self.check_subtree_exists( + path, + transaction, + || Error::InvalidPath("subtree doesn't exist".to_owned()), + grove_version, + ) } } diff --git a/grovedb/src/operations/get/query.rs b/grovedb/src/operations/get/query.rs index 6ba914ef..81046dbf 100644 --- a/grovedb/src/operations/get/query.rs +++ b/grovedb/src/operations/get/query.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Query operations use grovedb_costs::cost_return_on_error_default; @@ -33,6 +5,9 @@ use grovedb_costs::cost_return_on_error_default; use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, }; +use grovedb_version::{ + check_grovedb_v0, check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; #[cfg(feature = "full")] use integer_encoding::VarInt; @@ -69,7 +44,17 @@ impl GroveDb { decrease_limit_on_range_with_no_sub_elements: bool, error_if_intermediate_path_tree_not_present: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult>, Error> { + check_grovedb_v0_with_cost!( + "query_encoded_many", + grove_version + .grovedb_versions + .operations + .query + .query_encoded_many + ); + let mut cost = OperationCost::default(); let elements = cost_return_on_error!( @@ -80,7 +65,8 @@ impl GroveDb { decrease_limit_on_range_with_no_sub_elements, error_if_intermediate_path_tree_not_present, QueryResultType::QueryElementResultType, - transaction + transaction, + grove_version ) ); let results_wrapped = elements @@ -98,6 +84,7 @@ impl GroveDb { absolute_path.as_slice().into(), allow_cache, transaction, + grove_version, ) .unwrap_add_cost(&mut cost)?; @@ -132,11 +119,23 @@ impl GroveDb { error_if_intermediate_path_tree_not_present: bool, result_type: QueryResultType, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult where { + check_grovedb_v0_with_cost!( + "query_many_raw", + grove_version + .grovedb_versions + .operations + .query + .query_many_raw + ); let mut cost = OperationCost::default(); - let query = cost_return_on_error_no_add!(&cost, PathQuery::merge(path_queries.to_vec())); + let query = cost_return_on_error_no_add!( + &cost, + PathQuery::merge(path_queries.to_vec(), grove_version) + ); let (result, _) = cost_return_on_error!( &mut cost, self.query_raw( @@ -145,26 +144,36 @@ where { decrease_limit_on_range_with_no_sub_elements, error_if_intermediate_path_tree_not_present, result_type, - transaction + transaction, + grove_version ) ); Ok(result).wrap_with_cost(cost) } - /// Prove a path query as either verbose or non verbose + /// Prove a path query as either verbose or non-verbose pub fn get_proved_path_query( &self, path_query: &PathQuery, prove_options: Option, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "get_proved_path_query", + grove_version + .grovedb_versions + .operations + .query + .get_proved_path_query + ); if transaction.is_some() { Err(Error::NotSupported( "transactions are not currently supported".to_string(), )) .wrap_with_cost(Default::default()) } else { - self.prove_query(path_query, prove_options) + self.prove_query(path_query, prove_options, grove_version) } } @@ -174,7 +183,16 @@ where { allow_cache: bool, cost: &mut OperationCost, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> Result { + check_grovedb_v0!( + "follow_element", + grove_version + .grovedb_versions + .operations + .query + .follow_element + ); match element { Element::Reference(reference_path, ..) => { match reference_path { @@ -189,6 +207,7 @@ where { absolute_path.as_slice().into(), allow_cache, transaction, + grove_version, ) .unwrap_add_cost(cost)?; @@ -217,7 +236,12 @@ where { error_if_intermediate_path_tree_not_present: bool, result_type: QueryResultType, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(QueryResultElements, u16), Error> { + check_grovedb_v0_with_cost!( + "query", + grove_version.grovedb_versions.operations.query.query + ); let mut cost = OperationCost::default(); let (elements, skipped) = cost_return_on_error!( @@ -228,7 +252,8 @@ where { decrease_limit_on_range_with_no_sub_elements, error_if_intermediate_path_tree_not_present, result_type, - transaction + transaction, + grove_version ) ); @@ -236,7 +261,7 @@ where { .into_iterator() .map(|result_item| { result_item.map_element(|element| { - self.follow_element(element, allow_cache, &mut cost, transaction) + self.follow_element(element, allow_cache, &mut cost, transaction, grove_version) }) }) .collect::, Error>>(); @@ -254,7 +279,16 @@ where { decrease_limit_on_range_with_no_sub_elements: bool, error_if_intermediate_path_tree_not_present: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(Vec>, u16), Error> { + check_grovedb_v0_with_cost!( + "query_item_value", + grove_version + .grovedb_versions + .operations + .query + .query_item_value + ); let mut cost = OperationCost::default(); let (elements, skipped) = cost_return_on_error!( @@ -265,7 +299,8 @@ where { decrease_limit_on_range_with_no_sub_elements, error_if_intermediate_path_tree_not_present, QueryResultType::QueryElementResultType, - transaction + transaction, + grove_version ) ); @@ -287,6 +322,7 @@ where { absolute_path.as_slice().into(), allow_cache, transaction, + grove_version, ) .unwrap_add_cost(&mut cost)?; @@ -329,7 +365,16 @@ where { decrease_limit_on_range_with_no_sub_elements: bool, error_if_intermediate_path_tree_not_present: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(Vec, u16), Error> { + check_grovedb_v0_with_cost!( + "query_item_value_or_sum", + grove_version + .grovedb_versions + .operations + .query + .query_item_value_or_sum + ); let mut cost = OperationCost::default(); let (elements, skipped) = cost_return_on_error!( @@ -340,7 +385,8 @@ where { decrease_limit_on_range_with_no_sub_elements, error_if_intermediate_path_tree_not_present, QueryResultType::QueryElementResultType, - transaction + transaction, + grove_version ) ); @@ -362,6 +408,7 @@ where { absolute_path.as_slice().into(), allow_cache, transaction, + grove_version, ) .unwrap_add_cost(&mut cost)?; @@ -416,7 +463,12 @@ where { decrease_limit_on_range_with_no_sub_elements: bool, error_if_intermediate_path_tree_not_present: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(Vec, u16), Error> { + check_grovedb_v0_with_cost!( + "query_sums", + grove_version.grovedb_versions.operations.query.query_sums + ); let mut cost = OperationCost::default(); let (elements, skipped) = cost_return_on_error!( @@ -427,7 +479,8 @@ where { decrease_limit_on_range_with_no_sub_elements, error_if_intermediate_path_tree_not_present, QueryResultType::QueryElementResultType, - transaction + transaction, + grove_version ) ); @@ -449,6 +502,7 @@ where { absolute_path.as_slice().into(), allow_cache, transaction, + grove_version, ) .unwrap_add_cost(&mut cost)?; @@ -493,7 +547,12 @@ where { error_if_intermediate_path_tree_not_present: bool, result_type: QueryResultType, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(QueryResultElements, u16), Error> { + check_grovedb_v0_with_cost!( + "query_raw", + grove_version.grovedb_versions.operations.query.query_raw + ); Element::get_path_query( &self.db, path_query, @@ -505,6 +564,7 @@ where { }, result_type, transaction, + grove_version, ) } @@ -517,7 +577,16 @@ where { decrease_limit_on_range_with_no_sub_elements: bool, error_if_intermediate_path_tree_not_present: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "query_keys_optional", + grove_version + .grovedb_versions + .operations + .query + .query_keys_optional + ); let max_results = cost_return_on_error_default!(path_query.query.limit.ok_or( Error::NotSupported("limits must be set in query_keys_optional".to_string()) )) as usize; @@ -529,8 +598,10 @@ where { } let mut cost = OperationCost::default(); - let terminal_keys = - cost_return_on_error_no_add!(&cost, path_query.terminal_keys(max_results)); + let terminal_keys = cost_return_on_error_no_add!( + &cost, + path_query.terminal_keys(max_results, grove_version) + ); let (elements, _) = cost_return_on_error!( &mut cost, @@ -540,7 +611,8 @@ where { decrease_limit_on_range_with_no_sub_elements, error_if_intermediate_path_tree_not_present, QueryResultType::QueryPathKeyElementTrioResultType, - transaction + transaction, + grove_version ) ); @@ -564,7 +636,16 @@ where { decrease_limit_on_range_with_no_sub_elements: bool, error_if_intermediate_path_tree_not_present: bool, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "query_raw_keys_optional", + grove_version + .grovedb_versions + .operations + .query + .query_raw_keys_optional + ); let max_results = cost_return_on_error_default!(path_query.query.limit.ok_or( Error::NotSupported("limits must be set in query_raw_keys_optional".to_string()) )) as usize; @@ -576,8 +657,10 @@ where { } let mut cost = OperationCost::default(); - let terminal_keys = - cost_return_on_error_no_add!(&cost, path_query.terminal_keys(max_results)); + let terminal_keys = cost_return_on_error_no_add!( + &cost, + path_query.terminal_keys(max_results, grove_version) + ); let (elements, _) = cost_return_on_error!( &mut cost, @@ -587,7 +670,8 @@ where { decrease_limit_on_range_with_no_sub_elements, error_if_intermediate_path_tree_not_present, QueryResultType::QueryPathKeyElementTrioResultType, - transaction + transaction, + grove_version ) ); @@ -610,6 +694,7 @@ mod tests { use std::collections::HashMap; use grovedb_merk::proofs::{query::query_item::QueryItem, Query}; + use grovedb_version::version::GroveVersion; use pretty_assertions::assert_eq; use crate::{ @@ -620,7 +705,8 @@ mod tests { #[test] fn test_query_raw_keys_options() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -628,6 +714,7 @@ mod tests { Element::new_item(b"hello".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -637,6 +724,7 @@ mod tests { Element::new_item(b"hello too".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -646,6 +734,7 @@ mod tests { Element::new_item(b"bye".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -657,7 +746,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path.clone(), SizedQuery::new(query, Some(5), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, true, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("should get successfully"); @@ -677,7 +766,8 @@ mod tests { #[test] fn test_query_raw_keys_options_with_range() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -685,6 +775,7 @@ mod tests { Element::new_item(b"hello".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -694,6 +785,7 @@ mod tests { Element::new_item(b"hello too".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -703,6 +795,7 @@ mod tests { Element::new_item(b"bye".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -713,7 +806,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path.clone(), SizedQuery::new(query, Some(5), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, true, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("should get successfully"); @@ -734,7 +827,8 @@ mod tests { #[test] fn test_query_raw_keys_options_with_range_inclusive() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -742,6 +836,7 @@ mod tests { Element::new_item(b"hello".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -751,6 +846,7 @@ mod tests { Element::new_item(b"hello too".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -760,6 +856,7 @@ mod tests { Element::new_item(b"bye".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -770,7 +867,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path.clone(), SizedQuery::new(query, Some(5), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, true, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("should get successfully"); @@ -794,7 +891,8 @@ mod tests { #[test] fn test_query_raw_keys_options_with_range_bounds() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -802,6 +900,7 @@ mod tests { Element::new_item(b"empty".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -811,6 +910,7 @@ mod tests { Element::new_item(b"hello".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -820,6 +920,7 @@ mod tests { Element::new_item(b"hello too".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -829,6 +930,7 @@ mod tests { Element::new_item(b"bye".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -838,7 +940,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(4), None)); - db.query_raw_keys_optional(&path_query, true, true, true, None) + db.query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect_err("range a should error"); @@ -847,7 +949,7 @@ mod tests { query.insert_key(b"5".to_vec()); // 3 let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(3), None)); - db.query_raw_keys_optional(&path_query, true, true, true, None) + db.query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("range b should not error"); @@ -856,7 +958,7 @@ mod tests { query.insert_key(b"5".to_vec()); // 4 let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(3), None)); - db.query_raw_keys_optional(&path_query, true, true, true, None) + db.query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect_err("range c should error"); @@ -865,7 +967,7 @@ mod tests { query.insert_key(b"5".to_vec()); // 3 let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(2), None)); - db.query_raw_keys_optional(&path_query, true, true, true, None) + db.query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect_err("range d should error"); @@ -873,14 +975,15 @@ mod tests { query.insert_range(b"z".to_vec()..b"10".to_vec()); let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); - db.query_raw_keys_optional(&path_query, true, true, true, None) + db.query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect_err("range using 2 bytes should error"); } #[test] fn test_query_raw_keys_options_with_empty_start_range() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -888,6 +991,7 @@ mod tests { Element::new_item(b"empty".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -897,6 +1001,7 @@ mod tests { Element::new_item(b"hello".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -906,6 +1011,7 @@ mod tests { Element::new_item(b"hello too".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -915,6 +1021,7 @@ mod tests { Element::new_item(b"bye".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -924,7 +1031,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path.clone(), SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, true, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("range starting with null should not error"); @@ -952,17 +1059,26 @@ mod tests { #[test] fn test_query_raw_keys_options_with_subquery_path() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); - db.insert([TEST_LEAF].as_ref(), b"", Element::empty_tree(), None, None) - .unwrap() - .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF].as_ref(), + b"", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); db.insert( [TEST_LEAF, b""].as_ref(), b"", Element::new_item(b"null in null".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -972,6 +1088,7 @@ mod tests { Element::new_item(b"1 in null".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -981,6 +1098,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -990,6 +1108,7 @@ mod tests { Element::new_item(b"1 in 2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -999,6 +1118,7 @@ mod tests { Element::new_item(b"5 in 2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1007,7 +1127,7 @@ mod tests { query.insert_range(b"".to_vec()..b"c".to_vec()); let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); - db.query_keys_optional(&path_query, true, true, true, None) + db.query_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect_err("range should error because we didn't subquery"); @@ -1017,7 +1137,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, true, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("query with subquery should not error"); @@ -1038,7 +1158,7 @@ mod tests { assert_eq!( raw_result.get(&(vec![TEST_LEAF.to_vec(), b"4".to_vec()], b"1".to_vec())), Some(&None) - ); // because we are subquerying 1 + ); // because we are sub-querying 1 assert_eq!( raw_result.get(&(vec![TEST_LEAF.to_vec(), b"4".to_vec()], b"4".to_vec())), None @@ -1056,17 +1176,26 @@ mod tests { #[test] fn test_query_raw_keys_options_with_subquery() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); - db.insert([TEST_LEAF].as_ref(), b"", Element::empty_tree(), None, None) - .unwrap() - .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF].as_ref(), + b"", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); db.insert( [TEST_LEAF, b""].as_ref(), b"", Element::new_item(b"null in null".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1076,6 +1205,7 @@ mod tests { Element::new_item(b"1 in null".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1085,6 +1215,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1094,6 +1225,7 @@ mod tests { Element::new_item(b"1 in 2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1103,6 +1235,7 @@ mod tests { Element::new_item(b"5 in 2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1112,6 +1245,7 @@ mod tests { Element::new_item(b"2 in 2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1125,7 +1259,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, true, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("query with subquery should not error"); @@ -1147,11 +1281,11 @@ mod tests { assert_eq!( raw_result.get(&(vec![TEST_LEAF.to_vec(), b"4".to_vec()], b"1".to_vec())), Some(&None) - ); // because we are subquerying 1 + ); // because we are sub-querying 1 assert_eq!( raw_result.get(&(vec![TEST_LEAF.to_vec(), b"4".to_vec()], b"2".to_vec())), Some(&None) - ); // because we are subquerying 1 + ); // because we are sub-querying 1 assert_eq!( raw_result.get(&(vec![TEST_LEAF.to_vec(), b"4".to_vec()], b"4".to_vec())), None @@ -1177,17 +1311,26 @@ mod tests { #[test] fn test_query_raw_keys_options_with_subquery_having_intermediate_paths_missing() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); - db.insert([TEST_LEAF].as_ref(), b"", Element::empty_tree(), None, None) - .unwrap() - .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF].as_ref(), + b"", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); db.insert( [TEST_LEAF].as_ref(), b"1", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1197,6 +1340,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1206,6 +1350,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1215,6 +1360,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1224,6 +1370,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1233,6 +1380,7 @@ mod tests { Element::new_item(b"found_me".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1242,6 +1390,7 @@ mod tests { Element::new_item(b"1 in 2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1251,6 +1400,7 @@ mod tests { Element::new_item(b"5 in 2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1260,6 +1410,7 @@ mod tests { Element::new_item(b"2 in 2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1274,7 +1425,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); - db.query_raw_keys_optional(&path_query, true, true, true, None) + db.query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect_err( "query with subquery should error if error_if_intermediate_path_tree_not_present \ @@ -1282,7 +1433,7 @@ mod tests { ); let raw_result = db - .query_raw_keys_optional(&path_query, true, true, false, None) + .query_raw_keys_optional(&path_query, true, true, false, None, GroveVersion::latest()) .unwrap() .expect("query with subquery should not error"); @@ -1357,17 +1508,26 @@ mod tests { #[test] fn test_query_raw_keys_options_with_subquery_and_subquery_path() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); - db.insert([TEST_LEAF].as_ref(), b"", Element::empty_tree(), None, None) - .unwrap() - .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF].as_ref(), + b"", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); db.insert( [TEST_LEAF, b""].as_ref(), b"", Element::new_item(b"null in null".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1377,6 +1537,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1386,6 +1547,7 @@ mod tests { Element::new_item(b"2 in null/1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1395,6 +1557,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1404,6 +1567,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1413,6 +1577,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1422,6 +1587,7 @@ mod tests { Element::new_item(b"2 in 2/1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1431,6 +1597,7 @@ mod tests { Element::new_item(b"5 in 2/1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1453,7 +1620,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, true, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("query with subquery should not error"); @@ -1517,17 +1684,26 @@ mod tests { #[test] fn test_query_raw_keys_options_with_subquery_and_conditional_subquery() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); - db.insert([TEST_LEAF].as_ref(), b"", Element::empty_tree(), None, None) - .unwrap() - .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF].as_ref(), + b"", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); db.insert( [TEST_LEAF, b""].as_ref(), b"", Element::new_item(b"null in null".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1537,6 +1713,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1546,6 +1723,7 @@ mod tests { Element::new_item(b"2 in null/1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1555,6 +1733,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1564,6 +1743,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1573,6 +1753,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1582,6 +1763,7 @@ mod tests { Element::new_item(b"2 in 2/1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1591,6 +1773,7 @@ mod tests { Element::new_item(b"5 in 2/1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1620,7 +1803,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let raw_result = db - .query_raw_keys_optional(&path_query, true, true, true, None) + .query_raw_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("query with subquery should not error"); @@ -1685,26 +1868,36 @@ mod tests { #[test] fn test_query_keys_options_with_subquery_and_conditional_subquery_and_reference() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [ANOTHER_TEST_LEAF].as_ref(), b"5", Element::new_item(b"ref result".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); - db.insert([TEST_LEAF].as_ref(), b"", Element::empty_tree(), None, None) - .unwrap() - .expect("should insert subtree successfully"); + db.insert( + [TEST_LEAF].as_ref(), + b"", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert subtree successfully"); db.insert( [TEST_LEAF, b""].as_ref(), b"", Element::new_item(b"null in null".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1714,6 +1907,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1723,6 +1917,7 @@ mod tests { Element::new_item(b"2 in null/1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1732,6 +1927,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1741,6 +1937,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1750,6 +1947,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1759,6 +1957,7 @@ mod tests { Element::new_item(b"2 in 2/1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1771,6 +1970,7 @@ mod tests { ), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -1800,7 +2000,7 @@ mod tests { let path = vec![TEST_LEAF.to_vec()]; let path_query = PathQuery::new(path, SizedQuery::new(query, Some(1000), None)); let result = db - .query_keys_optional(&path_query, true, true, true, None) + .query_keys_optional(&path_query, true, true, true, None, GroveVersion::latest()) .unwrap() .expect("query with subquery should not error"); diff --git a/grovedb/src/operations/get/worst_case.rs b/grovedb/src/operations/get/worst_case.rs index 126f2b5b..7554a911 100644 --- a/grovedb/src/operations/get/worst_case.rs +++ b/grovedb/src/operations/get/worst_case.rs @@ -1,38 +1,12 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Worst case get costs #[cfg(feature = "full")] use grovedb_costs::OperationCost; #[cfg(feature = "full")] use grovedb_storage::rocksdb_storage::RocksDbStorage; +use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; +use crate::Error; #[cfg(feature = "full")] use crate::{ batch::{key_info::KeyInfo, KeyInfoPath}, @@ -47,7 +21,16 @@ impl GroveDb { key: &KeyInfo, max_element_size: u32, in_parent_tree_using_sums: bool, - ) -> OperationCost { + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "worst_case_for_has_raw", + grove_version + .grovedb_versions + .operations + .get + .worst_case_for_has_raw + ); let mut cost = OperationCost::default(); GroveDb::add_worst_case_has_raw_cost::( &mut cost, @@ -55,8 +38,9 @@ impl GroveDb { key, max_element_size, in_parent_tree_using_sums, - ); - cost + grove_version, + )?; + Ok(cost) } /// Worst case cost for get raw @@ -65,7 +49,16 @@ impl GroveDb { key: &KeyInfo, max_element_size: u32, in_parent_tree_using_sums: bool, - ) -> OperationCost { + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "worst_case_for_get_raw", + grove_version + .grovedb_versions + .operations + .get + .worst_case_for_get_raw + ); let mut cost = OperationCost::default(); GroveDb::add_worst_case_get_raw_cost::( &mut cost, @@ -73,8 +66,9 @@ impl GroveDb { key, max_element_size, in_parent_tree_using_sums, - ); - cost + grove_version, + )?; + Ok(cost) } /// Worst case cost for get @@ -84,7 +78,16 @@ impl GroveDb { max_element_size: u32, max_references_sizes: Vec, in_parent_tree_using_sums: bool, - ) -> OperationCost { + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "worst_case_for_get", + grove_version + .grovedb_versions + .operations + .get + .worst_case_for_get + ); let mut cost = OperationCost::default(); GroveDb::add_worst_case_get_cost::( &mut cost, @@ -93,7 +96,8 @@ impl GroveDb { max_element_size, in_parent_tree_using_sums, max_references_sizes, - ); - cost + grove_version, + )?; + Ok(cost) } } diff --git a/grovedb/src/operations/insert/mod.rs b/grovedb/src/operations/insert/mod.rs index 5670a939..b7e00fc1 100644 --- a/grovedb/src/operations/insert/mod.rs +++ b/grovedb/src/operations/insert/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Insert operations #[cfg(feature = "full")] @@ -43,6 +15,9 @@ use grovedb_storage::rocksdb_storage::{ PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext, }; use grovedb_storage::{Storage, StorageBatch}; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; #[cfg(feature = "full")] use crate::{ @@ -97,11 +72,17 @@ impl GroveDb { element: Element, options: Option, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(), Error> where B: AsRef<[u8]> + 'b, P: Into>, { + check_grovedb_v0_with_cost!( + "insert", + grove_version.grovedb_versions.operations.insert.insert + ); + let subtree_path: SubtreePath = path.into(); let batch = StorageBatch::new(); @@ -113,6 +94,7 @@ impl GroveDb { options.unwrap_or_default(), transaction, &batch, + grove_version, ) } else { self.insert_without_transaction( @@ -121,6 +103,7 @@ impl GroveDb { element, options.unwrap_or_default(), &batch, + grove_version, ) }; @@ -139,7 +122,17 @@ impl GroveDb { options: InsertOptions, transaction: &'db Transaction, batch: &StorageBatch, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "insert_on_transaction", + grove_version + .grovedb_versions + .operations + .insert + .insert_on_transaction + ); + let mut cost = OperationCost::default(); let mut merk_cache: HashMap, Merk> = @@ -153,13 +146,20 @@ impl GroveDb { element, options, transaction, - batch + batch, + grove_version ) ); merk_cache.insert(path.clone(), merk); cost_return_on_error!( &mut cost, - self.propagate_changes_with_transaction(merk_cache, path, transaction, batch) + self.propagate_changes_with_transaction( + merk_cache, + path, + transaction, + batch, + grove_version + ) ); Ok(()).wrap_with_cost(cost) @@ -172,7 +172,17 @@ impl GroveDb { element: Element, options: InsertOptions, batch: &StorageBatch, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { + check_grovedb_v0_with_cost!( + "insert_without_transaction", + grove_version + .grovedb_versions + .operations + .insert + .insert_without_transaction + ); + let mut cost = OperationCost::default(); let mut merk_cache: HashMap, Merk> = @@ -180,13 +190,20 @@ impl GroveDb { let merk = cost_return_on_error!( &mut cost, - self.add_element_without_transaction(&path.to_vec(), key, element, options, batch) + self.add_element_without_transaction( + &path.to_vec(), + key, + element, + options, + batch, + grove_version + ) ); merk_cache.insert(path.clone(), merk); cost_return_on_error!( &mut cost, - self.propagate_changes_without_transaction(merk_cache, path, batch) + self.propagate_changes_without_transaction(merk_cache, path, batch, grove_version) ); Ok(()).wrap_with_cost(cost) @@ -205,12 +222,27 @@ impl GroveDb { options: InsertOptions, transaction: &'db Transaction, batch: &'db StorageBatch, + grove_version: &GroveVersion, ) -> CostResult>, Error> { + check_grovedb_v0_with_cost!( + "add_element_on_transaction", + grove_version + .grovedb_versions + .operations + .insert + .add_element_on_transaction + ); + let mut cost = OperationCost::default(); let mut subtree_to_insert_into = cost_return_on_error!( &mut cost, - self.open_transactional_merk_at_path(path.clone(), transaction, Some(batch)) + self.open_transactional_merk_at_path( + path.clone(), + transaction, + Some(batch), + grove_version + ) ); // if we don't allow a tree override then we should check @@ -221,7 +253,8 @@ impl GroveDb { .get( key, true, - Some(&Element::value_defined_cost_for_serialized_value) + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .map_err(|e| Error::CorruptedData(e.to_string())) ); @@ -235,9 +268,11 @@ impl GroveDb { if options.validate_insertion_does_not_override_tree { let element = cost_return_on_error_no_add!( &cost, - Element::deserialize(element_bytes.as_slice()).map_err(|_| { - Error::CorruptedData(String::from("unable to deserialize element")) - }) + Element::deserialize(element_bytes.as_slice(), grove_version).map_err( + |_| { + Error::CorruptedData(String::from("unable to deserialize element")) + } + ) ); if element.is_any_tree() { return Err(Error::OverrideNotAllowed( @@ -264,13 +299,19 @@ impl GroveDb { self.open_transactional_merk_at_path( referenced_path.into(), transaction, - Some(batch) + Some(batch), + grove_version, ) ); let referenced_element_value_hash_opt = cost_return_on_error!( &mut cost, - Element::get_value_hash(&subtree_for_reference, referenced_key, true) + Element::get_value_hash( + &subtree_for_reference, + referenced_key, + true, + grove_version + ) ); let referenced_element_value_hash = cost_return_on_error!( @@ -298,6 +339,7 @@ impl GroveDb { key, referenced_element_value_hash, Some(options.as_merk_options()), + grove_version, ) ); } @@ -314,7 +356,8 @@ impl GroveDb { &mut subtree_to_insert_into, key, NULL_HASH, - Some(options.as_merk_options()) + Some(options.as_merk_options()), + grove_version ) ); } @@ -325,7 +368,8 @@ impl GroveDb { element.insert( &mut subtree_to_insert_into, key, - Some(options.as_merk_options()) + Some(options.as_merk_options()), + grove_version ) ); } @@ -346,11 +390,21 @@ impl GroveDb { element: Element, options: InsertOptions, batch: &'db StorageBatch, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "add_element_without_transaction", + grove_version + .grovedb_versions + .operations + .insert + .add_element_without_transaction + ); + let mut cost = OperationCost::default(); let mut subtree_to_insert_into = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(path.into(), Some(batch)) + self.open_non_transactional_merk_at_path(path.into(), Some(batch), grove_version) ); if options.checks_for_override() { @@ -360,7 +414,8 @@ impl GroveDb { .get( key, true, - Some(&Element::value_defined_cost_for_serialized_value) + Some(&Element::value_defined_cost_for_serialized_value), + grove_version ) .map_err(|e| Error::CorruptedData(e.to_string())) ); @@ -374,9 +429,11 @@ impl GroveDb { if options.validate_insertion_does_not_override_tree { let element = cost_return_on_error_no_add!( &cost, - Element::deserialize(element_bytes.as_slice()).map_err(|_| { - Error::CorruptedData(String::from("unable to deserialize element")) - }) + Element::deserialize(element_bytes.as_slice(), grove_version).map_err( + |_| { + Error::CorruptedData(String::from("unable to deserialize element")) + } + ) ); if element.is_any_tree() { return Err(Error::OverrideNotAllowed( @@ -400,13 +457,22 @@ impl GroveDb { let (referenced_key, referenced_path) = reference_path.split_last().unwrap(); let subtree_for_reference = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(referenced_path.into(), Some(batch)) + self.open_non_transactional_merk_at_path( + referenced_path.into(), + Some(batch), + grove_version + ) ); // when there is no transaction, we don't want to use caching let referenced_element_value_hash_opt = cost_return_on_error!( &mut cost, - Element::get_value_hash(&subtree_for_reference, referenced_key, false) + Element::get_value_hash( + &subtree_for_reference, + referenced_key, + false, + grove_version + ) ); let referenced_element_value_hash = cost_return_on_error!( @@ -433,7 +499,8 @@ impl GroveDb { &mut subtree_to_insert_into, key, referenced_element_value_hash, - Some(options.as_merk_options()) + Some(options.as_merk_options()), + grove_version ) ); } @@ -450,7 +517,8 @@ impl GroveDb { &mut subtree_to_insert_into, key, NULL_HASH, - Some(options.as_merk_options()) + Some(options.as_merk_options()), + grove_version ) ); } @@ -461,7 +529,8 @@ impl GroveDb { element.insert( &mut subtree_to_insert_into, key, - Some(options.as_merk_options()) + Some(options.as_merk_options()), + grove_version ) ); } @@ -477,21 +546,31 @@ impl GroveDb { key: &[u8], element: Element, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult where B: AsRef<[u8]> + 'b, P: Into>, { + check_grovedb_v0_with_cost!( + "insert_if_not_exists", + grove_version + .grovedb_versions + .operations + .insert + .insert_if_not_exists + ); + let mut cost = OperationCost::default(); let subtree_path: SubtreePath<_> = path.into(); if cost_return_on_error!( &mut cost, - self.has_raw(subtree_path.clone(), key, transaction) + self.has_raw(subtree_path.clone(), key, transaction, grove_version) ) { Ok(false).wrap_with_cost(cost) } else { - self.insert(subtree_path, key, element, None, transaction) + self.insert(subtree_path, key, element, None, transaction, grove_version) .map_ok(|_| true) .add_cost(cost) } @@ -506,17 +585,27 @@ impl GroveDb { key: &[u8], element: Element, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult<(bool, Option), Error> where B: AsRef<[u8]> + 'b, P: Into>, { + check_grovedb_v0_with_cost!( + "insert_if_changed_value", + grove_version + .grovedb_versions + .operations + .insert + .insert_if_changed_value + ); + let mut cost = OperationCost::default(); let subtree_path: SubtreePath = path.into(); let previous_element = cost_return_on_error!( &mut cost, - self.get_raw_optional(subtree_path.clone(), key, transaction) + self.get_raw_optional(subtree_path.clone(), key, transaction, grove_version) ); let needs_insert = match &previous_element { None => true, @@ -525,7 +614,7 @@ impl GroveDb { if !needs_insert { Ok((false, None)).wrap_with_cost(cost) } else { - self.insert(subtree_path, key, element, None, transaction) + self.insert(subtree_path, key, element, None, transaction, grove_version) .map_ok(|_| (true, previous_element)) .add_cost(cost) } @@ -539,6 +628,7 @@ mod tests { storage_cost::{removal::StorageRemovedBytes::NoStorageRemoval, StorageCost}, OperationCost, }; + use grovedb_version::version::GroveVersion; use pretty_assertions::assert_eq; use crate::{ @@ -549,13 +639,21 @@ mod tests { #[test] fn test_non_root_insert_item_without_transaction() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); - db.insert([TEST_LEAF].as_ref(), b"key", element.clone(), None, None) - .unwrap() - .expect("successful insert"); + db.insert( + [TEST_LEAF].as_ref(), + b"key", + element.clone(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful insert"); assert_eq!( - db.get([TEST_LEAF].as_ref(), b"key", None) + db.get([TEST_LEAF].as_ref(), b"key", None, grove_version) .unwrap() .expect("successful get"), element @@ -564,7 +662,8 @@ mod tests { #[test] fn test_non_root_insert_subtree_then_insert_item_without_transaction() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); // Insert a subtree first @@ -574,6 +673,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -584,11 +684,12 @@ mod tests { element.clone(), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); assert_eq!( - db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", None) + db.get([TEST_LEAF, b"key1"].as_ref(), b"key2", None, grove_version) .unwrap() .expect("successful get"), element @@ -597,13 +698,16 @@ mod tests { #[test] fn test_non_root_insert_item_with_transaction() { + let grove_version = GroveVersion::latest(); let item_key = b"key3"; - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let transaction = db.start_transaction(); // Check that there's no such key in the DB - let result = db.get([TEST_LEAF].as_ref(), item_key, None).unwrap(); + let result = db + .get([TEST_LEAF].as_ref(), item_key, None, grove_version) + .unwrap(); assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); let element1 = Element::new_item(b"ayy".to_vec()); @@ -614,17 +718,25 @@ mod tests { element1, None, Some(&transaction), + grove_version, ) .unwrap() .expect("cannot insert an item into GroveDB"); // The key was inserted inside the transaction, so it shouldn't be // possible to get it back without committing or using transaction - let result = db.get([TEST_LEAF].as_ref(), item_key, None).unwrap(); + let result = db + .get([TEST_LEAF].as_ref(), item_key, None, grove_version) + .unwrap(); assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); // Check that the element can be retrieved when transaction is passed let result_with_transaction = db - .get([TEST_LEAF].as_ref(), item_key, Some(&transaction)) + .get( + [TEST_LEAF].as_ref(), + item_key, + Some(&transaction), + grove_version, + ) .unwrap() .expect("Expected to work"); assert_eq!(result_with_transaction, Element::new_item(b"ayy".to_vec())); @@ -634,7 +746,7 @@ mod tests { // Check that the change was committed let result = db - .get([TEST_LEAF].as_ref(), item_key, None) + .get([TEST_LEAF].as_ref(), item_key, None, grove_version) .unwrap() .expect("Expected transaction to work"); assert_eq!(result, Element::new_item(b"ayy".to_vec())); @@ -642,13 +754,16 @@ mod tests { #[test] fn test_non_root_insert_subtree_with_transaction() { + let grove_version = GroveVersion::latest(); let subtree_key = b"subtree_key"; - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let transaction = db.start_transaction(); // Check that there's no such key in the DB - let result = db.get([TEST_LEAF].as_ref(), subtree_key, None).unwrap(); + let result = db + .get([TEST_LEAF].as_ref(), subtree_key, None, grove_version) + .unwrap(); assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); db.insert( @@ -657,15 +772,23 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("cannot insert an item into GroveDB"); - let result = db.get([TEST_LEAF].as_ref(), subtree_key, None).unwrap(); + let result = db + .get([TEST_LEAF].as_ref(), subtree_key, None, grove_version) + .unwrap(); assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); let result_with_transaction = db - .get([TEST_LEAF].as_ref(), subtree_key, Some(&transaction)) + .get( + [TEST_LEAF].as_ref(), + subtree_key, + Some(&transaction), + grove_version, + ) .unwrap() .expect("Expected to work"); assert_eq!(result_with_transaction, Element::empty_tree()); @@ -673,7 +796,7 @@ mod tests { db.commit_transaction(transaction).unwrap().unwrap(); let result = db - .get([TEST_LEAF].as_ref(), subtree_key, None) + .get([TEST_LEAF].as_ref(), subtree_key, None, grove_version) .unwrap() .expect("Expected transaction to work"); assert_eq!(result, Element::empty_tree()); @@ -681,15 +804,28 @@ mod tests { #[test] fn test_insert_if_not_exists() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); // Insert twice at the same path assert!(db - .insert_if_not_exists([TEST_LEAF].as_ref(), b"key1", Element::empty_tree(), None) + .insert_if_not_exists( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + grove_version + ) .unwrap() .expect("Provided valid path")); assert!(!db - .insert_if_not_exists([TEST_LEAF].as_ref(), b"key1", Element::empty_tree(), None) + .insert_if_not_exists( + [TEST_LEAF].as_ref(), + b"key1", + Element::empty_tree(), + None, + grove_version + ) .unwrap() .expect("Provided valid path")); @@ -700,6 +836,7 @@ mod tests { b"key1", Element::empty_tree(), None, + grove_version, ) .unwrap(); assert!(matches!(result, Err(Error::InvalidParentLayerPath(_)))); @@ -707,6 +844,7 @@ mod tests { #[test] fn test_one_insert_item_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -717,6 +855,7 @@ mod tests { Element::new_item(b"cat".to_vec()), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("should insert"); @@ -766,12 +905,20 @@ mod tests { #[test] fn test_one_insert_sum_item_in_sum_tree_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"s", Element::empty_sum_tree(), None, Some(&tx)) - .unwrap() - .expect("expected to add upper tree"); + db.insert( + EMPTY_PATH, + b"s", + Element::empty_sum_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("expected to add upper tree"); let cost = db .insert( @@ -780,6 +927,7 @@ mod tests { Element::new_sum_item(5), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("should insert"); @@ -824,12 +972,20 @@ mod tests { #[test] fn test_one_insert_sum_item_under_sum_item_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"s", Element::empty_sum_tree(), None, Some(&tx)) - .unwrap() - .expect("expected to add upper tree"); + db.insert( + EMPTY_PATH, + b"s", + Element::empty_sum_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("expected to add upper tree"); db.insert( [b"s".as_slice()].as_ref(), @@ -837,6 +993,7 @@ mod tests { Element::new_sum_item(5), None, Some(&tx), + grove_version, ) .unwrap() .expect("should insert"); @@ -848,6 +1005,7 @@ mod tests { Element::new_sum_item(6), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("should insert"); @@ -897,12 +1055,20 @@ mod tests { #[test] fn test_one_insert_bigger_sum_item_under_sum_item_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"s", Element::empty_sum_tree(), None, Some(&tx)) - .unwrap() - .expect("expected to add upper tree"); + db.insert( + EMPTY_PATH, + b"s", + Element::empty_sum_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("expected to add upper tree"); db.insert( [b"s".as_slice()].as_ref(), @@ -910,6 +1076,7 @@ mod tests { Element::new_sum_item(126), None, Some(&tx), + grove_version, ) .unwrap() .expect("should insert"); @@ -922,6 +1089,7 @@ mod tests { Element::new_sum_item(32768), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("should insert"); @@ -966,6 +1134,7 @@ mod tests { #[test] fn test_one_insert_item_cost_with_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -976,6 +1145,7 @@ mod tests { Element::new_item_with_flags(b"cat".to_vec(), Some(b"dog".to_vec())), None, Some(&tx), + grove_version, ) .cost; // Explanation for 183 storage_written_bytes @@ -1025,11 +1195,19 @@ mod tests { #[test] fn test_one_insert_empty_tree_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); let cost = db - .insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, Some(&tx)) + .insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) .cost; // Explanation for 183 storage_written_bytes @@ -1077,6 +1255,7 @@ mod tests { #[test] fn test_one_insert_empty_sum_tree_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1087,6 +1266,7 @@ mod tests { Element::empty_sum_tree(), None, Some(&tx), + grove_version, ) .cost; // Explanation for 183 storage_written_bytes @@ -1136,6 +1316,7 @@ mod tests { #[test] fn test_one_insert_empty_tree_cost_with_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1146,6 +1327,7 @@ mod tests { Element::empty_tree_with_flags(Some(b"cat".to_vec())), None, Some(&tx), + grove_version, ) .cost; // Explanation for 183 storage_written_bytes @@ -1198,12 +1380,20 @@ mod tests { #[test] fn test_one_insert_item_cost_under_tree() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .unwrap(); let cost = db .insert( @@ -1212,6 +1402,7 @@ mod tests { Element::new_item(b"test".to_vec()), None, Some(&tx), + grove_version, ) .cost_as_result() .unwrap(); @@ -1269,6 +1460,7 @@ mod tests { #[test] fn test_one_insert_item_with_apple_flags_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1279,6 +1471,7 @@ mod tests { Element::new_item_with_flags(b"test".to_vec(), Some(b"apple".to_vec())), None, Some(&tx), + grove_version, ) .cost_as_result() .unwrap(); @@ -1332,12 +1525,20 @@ mod tests { #[test] fn test_one_insert_item_with_flags_cost_under_tree() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .unwrap(); let cost = db .insert( @@ -1346,6 +1547,7 @@ mod tests { Element::new_item_with_flags(b"test".to_vec(), Some(b"apple".to_vec())), None, Some(&tx), + grove_version, ) .cost_as_result() .unwrap(); @@ -1417,6 +1619,7 @@ mod tests { #[test] fn test_one_insert_item_with_flags_cost_under_tree_with_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1426,6 +1629,7 @@ mod tests { Element::empty_tree_with_flags(Some(b"cat".to_vec())), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1437,6 +1641,7 @@ mod tests { Element::new_item_with_flags(b"test".to_vec(), Some(b"apple".to_vec())), None, Some(&tx), + grove_version, ) .cost_as_result() .unwrap(); @@ -1510,6 +1715,7 @@ mod tests { #[test] fn test_one_update_item_same_cost_at_root() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1519,6 +1725,7 @@ mod tests { Element::new_item(b"cat".to_vec()), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1530,6 +1737,7 @@ mod tests { Element::new_item(b"dog".to_vec()), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); @@ -1576,12 +1784,20 @@ mod tests { #[test] fn test_one_update_same_cost_in_underlying_tree() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .unwrap(); db.insert( [b"tree".as_slice()].as_ref(), @@ -1589,6 +1805,7 @@ mod tests { Element::new_item(b"cat".to_vec()), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1600,6 +1817,7 @@ mod tests { Element::new_item(b"dog".to_vec()), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); @@ -1620,6 +1838,7 @@ mod tests { #[test] fn test_one_update_same_cost_in_underlying_sum_tree_bigger_sum_item() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1629,6 +1848,7 @@ mod tests { Element::empty_sum_tree(), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1639,6 +1859,7 @@ mod tests { Element::new_sum_item(15), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1650,6 +1871,7 @@ mod tests { Element::new_sum_item(1000000), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); @@ -1671,6 +1893,7 @@ mod tests { #[test] fn test_one_update_same_cost_in_underlying_sum_tree_bigger_sum_item_parent_sum_tree_already_big( ) { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1680,6 +1903,7 @@ mod tests { Element::empty_sum_tree(), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1690,6 +1914,7 @@ mod tests { Element::new_sum_item(1000000), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1700,6 +1925,7 @@ mod tests { Element::new_sum_item(15), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1711,6 +1937,7 @@ mod tests { Element::new_sum_item(1000000), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); @@ -1731,6 +1958,7 @@ mod tests { #[test] fn test_one_update_same_cost_in_underlying_sum_tree_smaller_sum_item() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); @@ -1740,6 +1968,7 @@ mod tests { Element::empty_sum_tree(), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1750,6 +1979,7 @@ mod tests { Element::new_sum_item(1000000), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1761,6 +1991,7 @@ mod tests { Element::new_sum_item(15), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); @@ -1781,12 +2012,20 @@ mod tests { #[test] fn test_one_update_bigger_cost() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .unwrap(); db.insert( [b"tree".as_slice()].as_ref(), @@ -1794,6 +2033,7 @@ mod tests { Element::new_item(b"test".to_vec()), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1805,6 +2045,7 @@ mod tests { Element::new_item(b"test1".to_vec()), None, Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); @@ -1825,12 +2066,20 @@ mod tests { #[test] fn test_one_update_tree_bigger_cost_with_flags() { + let grove_version = GroveVersion::latest(); let db = make_empty_grovedb(); let tx = db.start_transaction(); - db.insert(EMPTY_PATH, b"tree", Element::empty_tree(), None, Some(&tx)) - .unwrap() - .unwrap(); + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .unwrap(); db.insert( [b"tree".as_slice()].as_ref(), @@ -1838,6 +2087,7 @@ mod tests { Element::new_tree(None), None, Some(&tx), + grove_version, ) .unwrap() .unwrap(); @@ -1853,6 +2103,7 @@ mod tests { base_root_storage_is_free: true, }), Some(&tx), + grove_version, ) .cost_as_result() .expect("expected to insert"); diff --git a/grovedb/src/operations/is_empty_tree.rs b/grovedb/src/operations/is_empty_tree.rs index 40abd62f..07c34999 100644 --- a/grovedb/src/operations/is_empty_tree.rs +++ b/grovedb/src/operations/is_empty_tree.rs @@ -1,36 +1,11 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Check if empty tree operations #[cfg(feature = "full")] use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; use grovedb_path::SubtreePath; +#[cfg(feature = "full")] +use grovedb_version::error::GroveVersionError; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; #[cfg(feature = "full")] use crate::{util::merk_optional_tx, Element, Error, GroveDb, TransactionArg}; @@ -42,20 +17,32 @@ impl GroveDb { &self, path: P, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> CostResult where B: AsRef<[u8]> + 'b, P: Into>, { + check_grovedb_v0_with_cost!( + "is_empty_tree", + grove_version.grovedb_versions.operations.get.is_empty_tree + ); let mut cost = OperationCost::default(); let path: SubtreePath = path.into(); cost_return_on_error!( &mut cost, - self.check_subtree_exists_path_not_found(path.clone(), transaction) + self.check_subtree_exists_path_not_found(path.clone(), transaction, grove_version) ); - merk_optional_tx!(&mut cost, self.db, path, None, transaction, subtree, { - Ok(subtree.is_empty_tree().unwrap_add_cost(&mut cost)).wrap_with_cost(cost) - }) + merk_optional_tx!( + &mut cost, + self.db, + path, + None, + transaction, + subtree, + grove_version, + { Ok(subtree.is_empty_tree().unwrap_add_cost(&mut cost)).wrap_with_cost(cost) } + ) } } diff --git a/grovedb/src/operations/proof/generate.rs b/grovedb/src/operations/proof/generate.rs index fe5866e8..a5297eaf 100644 --- a/grovedb/src/operations/proof/generate.rs +++ b/grovedb/src/operations/proof/generate.rs @@ -12,6 +12,9 @@ use grovedb_merk::{ Merk, ProofWithoutEncodingResult, }; use grovedb_storage::StorageContext; +use grovedb_version::{ + check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, +}; #[cfg(feature = "proof_debug")] use crate::query_result_type::QueryResultType; @@ -31,12 +34,21 @@ impl GroveDb { &self, query: Vec<&PathQuery>, prove_options: Option, + grove_version: &GroveVersion, ) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "prove_query_many", + grove_version + .grovedb_versions + .operations + .proof + .prove_query_many + ); if query.len() > 1 { - let query = cost_return_on_error_default!(PathQuery::merge(query)); - self.prove_query(&query, prove_options) + let query = cost_return_on_error_default!(PathQuery::merge(query, grove_version)); + self.prove_query(&query, prove_options, grove_version) } else { - self.prove_query(query[0], prove_options) + self.prove_query(query[0], prove_options, grove_version) } } @@ -48,8 +60,13 @@ impl GroveDb { &self, query: &PathQuery, prove_options: Option, + grove_version: &GroveVersion, ) -> CostResult, Error> { - self.prove_internal_serialized(query, prove_options) + check_grovedb_v0_with_cost!( + "prove_query_many", + grove_version.grovedb_versions.operations.proof.prove_query + ); + self.prove_internal_serialized(query, prove_options, grove_version) } /// Generates a proof and serializes it @@ -57,10 +74,13 @@ impl GroveDb { &self, path_query: &PathQuery, prove_options: Option, + grove_version: &GroveVersion, ) -> CostResult, Error> { let mut cost = OperationCost::default(); - let proof = - cost_return_on_error!(&mut cost, self.prove_internal(path_query, prove_options)); + let proof = cost_return_on_error!( + &mut cost, + self.prove_internal(path_query, prove_options, grove_version) + ); #[cfg(feature = "proof_debug")] { println!("constructed proof is {}", proof); @@ -81,6 +101,7 @@ impl GroveDb { &self, path_query: &PathQuery, prove_options: Option, + grove_version: &GroveVersion, ) -> CostResult { let mut cost = OperationCost::default(); @@ -113,7 +134,8 @@ impl GroveDb { prove_options.decrease_limit_on_empty_sub_query_result, false, QueryResultType::QueryPathKeyElementTrioResultType, - None + None, + grove_version, ) ) .0; @@ -128,7 +150,8 @@ impl GroveDb { prove_options.decrease_limit_on_empty_sub_query_result, false, QueryResultType::QueryPathKeyElementTrioResultType, - None + None, + grove_version, ) ) .0 @@ -141,7 +164,13 @@ impl GroveDb { let root_layer = cost_return_on_error!( &mut cost, - self.prove_subqueries(vec![], path_query, &mut limit, &prove_options) + self.prove_subqueries( + vec![], + path_query, + &mut limit, + &prove_options, + grove_version + ) ); Ok(GroveDBProofV0 { @@ -160,26 +189,29 @@ impl GroveDb { path_query: &PathQuery, overall_limit: &mut Option, prove_options: &ProveOptions, + grove_version: &GroveVersion, ) -> CostResult { let mut cost = OperationCost::default(); let query = cost_return_on_error_no_add!( &cost, path_query - .query_items_at_path(path.as_slice()) - .ok_or(Error::CorruptedPath(format!( - "prove subqueries: path {} should be part of path_query {}", - path.iter() - .map(|a| hex_to_ascii(a)) - .collect::>() - .join("/"), - path_query - ))) + .query_items_at_path(path.as_slice(), grove_version) + .and_then(|query_items| { + query_items.ok_or(Error::CorruptedPath(format!( + "prove subqueries: path {} should be part of path_query {}", + path.iter() + .map(|a| hex_to_ascii(a)) + .collect::>() + .join("/"), + path_query + ))) + }) ); let subtree = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(path.as_slice().into(), None) + self.open_non_transactional_merk_at_path(path.as_slice().into(), None, grove_version) ); let limit = if path.len() < path_query.path.len() { @@ -191,7 +223,13 @@ impl GroveDb { let mut merk_proof = cost_return_on_error!( &mut cost, - self.generate_merk_proof(&subtree, &query.items, query.left_to_right, limit) + self.generate_merk_proof( + &subtree, + &query.items, + query.left_to_right, + limit, + grove_version + ) ); #[cfg(feature = "proof_debug")] @@ -223,7 +261,7 @@ impl GroveDb { Node::KV(key, value) | Node::KVValueHash(key, value, ..) if !done_with_results => { - let elem = Element::deserialize(value); + let elem = Element::deserialize(value, grove_version); match elem { Ok(Element::Reference(reference_path, ..)) => { let absolute_path = cost_return_on_error!( @@ -241,11 +279,13 @@ impl GroveDb { self.follow_reference( absolute_path.as_slice().into(), true, - None + None, + grove_version ) ); - let serialized_referenced_elem = referenced_elem.serialize(); + let serialized_referenced_elem = + referenced_elem.serialize(grove_version); if serialized_referenced_elem.is_err() { return Err(Error::CorruptedData(String::from( "unable to serialize element", @@ -300,6 +340,7 @@ impl GroveDb { path_query, overall_limit, prove_options, + grove_version, ) ); @@ -379,12 +420,13 @@ impl GroveDb { query_items: &[QueryItem], left_to_right: bool, limit: Option, + grove_version: &GroveVersion, ) -> CostResult where S: StorageContext<'a> + 'a, { subtree - .prove_unchecked_query_items(query_items, limit, left_to_right) + .prove_unchecked_query_items(query_items, limit, left_to_right, grove_version) .map_ok(|(proof, limit)| ProofWithoutEncodingResult::new(proof, limit)) .map_err(|e| { Error::InternalError(format!( diff --git a/grovedb/src/operations/proof/util.rs b/grovedb/src/operations/proof/util.rs index 50480c13..d6b34ecc 100644 --- a/grovedb/src/operations/proof/util.rs +++ b/grovedb/src/operations/proof/util.rs @@ -4,6 +4,7 @@ use grovedb_merk::{ proofs::query::{Key, Path, ProvedKeyOptionalValue, ProvedKeyValue}, CryptoHash, Error, }; +use grovedb_version::version::GroveVersion; use crate::Element; @@ -313,14 +314,14 @@ pub fn path_as_slices_hex_to_ascii(path: &[&[u8]]) -> String { pub fn optional_element_hex_to_ascii(hex_value: Option<&Vec>) -> String { match hex_value { None => "None".to_string(), - Some(hex_value) => Element::deserialize(hex_value) + Some(hex_value) => Element::deserialize(hex_value, GroveVersion::latest()) .map(|e| e.to_string()) .unwrap_or_else(|_| hex::encode(hex_value)), } } pub fn element_hex_to_ascii(hex_value: &[u8]) -> String { - Element::deserialize(hex_value) + Element::deserialize(hex_value, GroveVersion::latest()) .map(|e| e.to_string()) .unwrap_or_else(|_| hex::encode(hex_value)) } diff --git a/grovedb/src/operations/proof/verify.rs b/grovedb/src/operations/proof/verify.rs index 4e0375e3..31cd9ae5 100644 --- a/grovedb/src/operations/proof/verify.rs +++ b/grovedb/src/operations/proof/verify.rs @@ -8,6 +8,10 @@ use grovedb_merk::{ tree::{combine_hash, value_hash}, CryptoHash, }; +use grovedb_version::{ + check_grovedb_v0, error::GroveVersionError, version::GroveVersion, TryFromVersioned, + TryIntoVersioned, +}; #[cfg(feature = "proof_debug")] use crate::operations::proof::util::{ @@ -27,7 +31,16 @@ impl GroveDb { proof: &[u8], query: &PathQuery, options: VerifyOptions, + grove_version: &GroveVersion, ) -> Result<(CryptoHash, Vec), Error> { + check_grovedb_v0!( + "verify_query_with_options", + grove_version + .grovedb_versions + .operations + .proof + .verify_query_with_options + ); if options.absence_proofs_for_non_existing_searched_keys { // must have a limit query.query.limit.ok_or(Error::NotSupported( @@ -49,7 +62,8 @@ impl GroveDb { .map_err(|e| Error::CorruptedData(format!("unable to decode proof: {}", e)))? .0; - let (root_hash, result) = Self::verify_proof_internal(&grovedb_proof, query, options)?; + let (root_hash, result) = + Self::verify_proof_internal(&grovedb_proof, query, options, grove_version)?; Ok((root_hash, result)) } @@ -57,7 +71,16 @@ impl GroveDb { pub fn verify_query_raw( proof: &[u8], query: &PathQuery, + grove_version: &GroveVersion, ) -> Result<(CryptoHash, ProvedPathKeyValues), Error> { + check_grovedb_v0!( + "verify_query_raw", + grove_version + .grovedb_versions + .operations + .proof + .verify_query_raw + ); let config = bincode::config::standard() .with_big_endian() .with_no_limit(); @@ -73,6 +96,7 @@ impl GroveDb { verify_proof_succinctness: false, include_empty_trees_in_result: true, }, + grove_version, )?; Ok((root_hash, result)) @@ -82,16 +106,20 @@ impl GroveDb { proof: &GroveDBProof, query: &PathQuery, options: VerifyOptions, + grove_version: &GroveVersion, ) -> Result<(CryptoHash, Vec), Error> { match proof { - GroveDBProof::V0(proof_v0) => Self::verify_proof_internal_v0(proof_v0, query, options), + GroveDBProof::V0(proof_v0) => { + Self::verify_proof_v0_internal(proof_v0, query, options, grove_version) + } } } - fn verify_proof_internal_v0( + fn verify_proof_v0_internal( proof: &GroveDBProofV0, query: &PathQuery, options: VerifyOptions, + grove_version: &GroveVersion, ) -> Result<(CryptoHash, Vec), Error> { let mut result = Vec::new(); let mut limit = query.query.limit; @@ -103,6 +131,7 @@ impl GroveDb { &[], &mut result, &options, + grove_version, )?; if options.absence_proofs_for_non_existing_searched_keys { @@ -111,7 +140,7 @@ impl GroveDb { "limits must be set in verify_query_with_absence_proof".to_string(), ))? as usize; - let terminal_keys = query.terminal_keys(max_results)?; + let terminal_keys = query.terminal_keys(max_results, grove_version)?; // convert the result set to a btree map let mut result_set_as_map: BTreeMap> = result @@ -167,10 +196,11 @@ impl GroveDb { proof: &GroveDBProof, query: &PathQuery, options: VerifyOptions, + grove_version: &GroveVersion, ) -> Result<(CryptoHash, ProvedPathKeyValues), Error> { match proof { GroveDBProof::V0(proof_v0) => { - Self::verify_proof_raw_internal_v0(proof_v0, query, options) + Self::verify_proof_raw_internal_v0(proof_v0, query, options, grove_version) } } } @@ -179,6 +209,7 @@ impl GroveDb { proof: &GroveDBProofV0, query: &PathQuery, options: VerifyOptions, + grove_version: &GroveVersion, ) -> Result<(CryptoHash, ProvedPathKeyValues), Error> { let mut result = Vec::new(); let mut limit = query.query.limit; @@ -190,6 +221,7 @@ impl GroveDb { &[], &mut result, &options, + grove_version, )?; Ok((root_hash, result)) } @@ -202,23 +234,31 @@ impl GroveDb { current_path: &[&[u8]], result: &mut Vec, options: &VerifyOptions, + grove_version: &GroveVersion, ) -> Result where - T: TryFrom, - Error: From<>::Error>, + T: TryFromVersioned, + Error: From<>::Error>, { - let internal_query = - query - .query_items_at_path(current_path) - .ok_or(Error::CorruptedPath(format!( - "verify raw: path {} should be part of path_query {}", - current_path - .iter() - .map(hex::encode) - .collect::>() - .join("/"), - query - )))?; + check_grovedb_v0!( + "verify_layer_proof", + grove_version + .grovedb_versions + .operations + .proof + .verify_layer_proof + ); + let internal_query = query + .query_items_at_path(current_path, grove_version)? + .ok_or(Error::CorruptedPath(format!( + "verify raw: path {} should be part of path_query {}", + current_path + .iter() + .map(hex::encode) + .collect::>() + .join("/"), + query + )))?; let level_query = Query { items: internal_query.items.to_vec(), @@ -258,7 +298,7 @@ impl GroveDb { let key = &proved_key_value.key; let hash = &proved_key_value.proof; if let Some(value_bytes) = &proved_key_value.value { - let element = Element::deserialize(value_bytes)?; + let element = Element::deserialize(value_bytes, grove_version)?; verified_keys.insert(key.clone()); @@ -278,6 +318,7 @@ impl GroveDb { &path, result, options, + grove_version, )?; let combined_root_hash = combine_hash(value_hash(value_bytes).value(), &lower_hash) @@ -321,7 +362,7 @@ impl GroveDb { &path_key_optional_value, limit_left ); } - result.push(path_key_optional_value.try_into()?); + result.push(path_key_optional_value.try_into_versioned(grove_version)?); limit_left.as_mut().map(|limit| *limit -= 1); if limit_left == &Some(0) { @@ -348,7 +389,12 @@ impl GroveDb { pub fn verify_query( proof: &[u8], query: &PathQuery, + grove_version: &GroveVersion, ) -> Result<(CryptoHash, Vec), Error> { + check_grovedb_v0!( + "verify_query", + grove_version.grovedb_versions.operations.proof.verify_query + ); Self::verify_query_with_options( proof, query, @@ -357,13 +403,23 @@ impl GroveDb { verify_proof_succinctness: true, include_empty_trees_in_result: false, }, + grove_version, ) } pub fn verify_subset_query( proof: &[u8], query: &PathQuery, + grove_version: &GroveVersion, ) -> Result<(CryptoHash, Vec), Error> { + check_grovedb_v0!( + "verify_subset_query", + grove_version + .grovedb_versions + .operations + .proof + .verify_subset_query + ); Self::verify_query_with_options( proof, query, @@ -372,13 +428,23 @@ impl GroveDb { verify_proof_succinctness: false, include_empty_trees_in_result: false, }, + grove_version, ) } pub fn verify_query_with_absence_proof( proof: &[u8], query: &PathQuery, + grove_version: &GroveVersion, ) -> Result<(CryptoHash, Vec), Error> { + check_grovedb_v0!( + "verify_query_with_absence_proof", + grove_version + .grovedb_versions + .operations + .proof + .verify_query_with_absence_proof + ); Self::verify_query_with_options( proof, query, @@ -387,13 +453,23 @@ impl GroveDb { verify_proof_succinctness: true, include_empty_trees_in_result: false, }, + grove_version, ) } pub fn verify_subset_query_with_absence_proof( proof: &[u8], query: &PathQuery, + grove_version: &GroveVersion, ) -> Result<(CryptoHash, Vec), Error> { + check_grovedb_v0!( + "verify_subset_query_with_absence_proof", + grove_version + .grovedb_versions + .operations + .proof + .verify_subset_query_with_absence_proof + ); Self::verify_query_with_options( proof, query, @@ -402,6 +478,7 @@ impl GroveDb { verify_proof_succinctness: false, include_empty_trees_in_result: false, }, + grove_version, ) } @@ -415,13 +492,23 @@ impl GroveDb { proof: &[u8], first_query: &PathQuery, chained_path_queries: Vec, + grove_version: &GroveVersion, ) -> Result<(CryptoHash, Vec>), Error> where C: Fn(Vec) -> Option, { + check_grovedb_v0!( + "verify_query_with_chained_path_queries", + grove_version + .grovedb_versions + .operations + .proof + .verify_query_with_chained_path_queries + ); let mut results = vec![]; - let (last_root_hash, elements) = Self::verify_subset_query(proof, first_query)?; + let (last_root_hash, elements) = + Self::verify_subset_query(proof, first_query, grove_version)?; results.push(elements); // we should iterate over each chained path queries @@ -429,7 +516,8 @@ impl GroveDb { let new_path_query = path_query_generator(results[results.len() - 1].clone()).ok_or( Error::InvalidInput("one of the path query generators returns no path query"), )?; - let (new_root_hash, new_elements) = Self::verify_subset_query(proof, &new_path_query)?; + let (new_root_hash, new_elements) = + Self::verify_subset_query(proof, &new_path_query, grove_version)?; if new_root_hash != last_root_hash { return Err(Error::InvalidProof(format!( "root hash for different path queries do no match, first is {}, this one is {}", diff --git a/grovedb/src/query/mod.rs b/grovedb/src/query/mod.rs index 7b4fe42a..f140bb05 100644 --- a/grovedb/src/query/mod.rs +++ b/grovedb/src/query/mod.rs @@ -11,6 +11,7 @@ use grovedb_merk::proofs::query::query_item::QueryItem; use grovedb_merk::proofs::query::{Key, SubqueryBranch}; #[cfg(any(feature = "full", feature = "verify"))] use grovedb_merk::proofs::Query; +use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; use indexmap::IndexMap; use crate::operations::proof::util::hex_to_ascii; @@ -134,7 +135,18 @@ impl PathQuery { } /// Gets the path of all terminal keys - pub fn terminal_keys(&self, max_results: usize) -> Result, Error> { + pub fn terminal_keys( + &self, + max_results: usize, + grove_version: &GroveVersion, + ) -> Result, Error> { + check_grovedb_v0!( + "merge", + grove_version + .grovedb_versions + .path_query_methods + .terminal_keys + ); let mut result: Vec<(Vec>, Vec)> = vec![]; self.query .query @@ -144,7 +156,14 @@ impl PathQuery { } /// Combines multiple path queries into one equivalent path query - pub fn merge(mut path_queries: Vec<&PathQuery>) -> Result { + pub fn merge( + mut path_queries: Vec<&PathQuery>, + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "merge", + grove_version.grovedb_versions.path_query_methods.merge + ); if path_queries.is_empty() { return Err(Error::InvalidInput( "merge function requires at least 1 path query", @@ -276,7 +295,18 @@ impl PathQuery { } } - pub fn query_items_at_path(&self, path: &[&[u8]]) -> Option { + pub fn query_items_at_path( + &self, + path: &[&[u8]], + grove_version: &GroveVersion, + ) -> Result, Error> { + check_grovedb_v0!( + "query_items_at_path", + grove_version + .grovedb_versions + .path_query_methods + .query_items_at_path + ); fn recursive_query_items<'b>( query: &'b Query, path: &[&[u8]], @@ -386,7 +416,7 @@ impl PathQuery { let self_path_len = self.path.len(); let given_path_len = path.len(); - match given_path_len.cmp(&self_path_len) { + Ok(match given_path_len.cmp(&self_path_len) { Ordering::Less => { if path.iter().zip(&self.path).all(|(a, b)| *a == b.as_slice()) { Some(SinglePathSubquery::from_key_when_in_path( @@ -407,11 +437,11 @@ impl PathQuery { } Ordering::Greater => { if !self.path.iter().zip(path).all(|(a, b)| a.as_slice() == *b) { - return None; + return Ok(None); } recursive_query_items(&self.query.query, &path[self_path_len..]) } - } + }) } } @@ -551,6 +581,7 @@ mod tests { query::{query_item::QueryItem, SubqueryBranch}, Query, }; + use grovedb_version::version::GroveVersion; use indexmap::IndexMap; use crate::{ @@ -562,7 +593,8 @@ mod tests { #[test] fn test_same_path_different_query_merge() { - let temp_db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); // starting with no subquery, just a single path and a key query let mut query_one = Query::new(); @@ -570,9 +602,13 @@ mod tests { let path_query_one = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_one); - let proof = temp_db.prove_query(&path_query_one, None).unwrap().unwrap(); - let (_, result_set_one) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_one, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_one) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_one, grove_version) + .expect("should execute proof"); assert_eq!(result_set_one.len(), 1); let mut query_two = Query::new(); @@ -580,38 +616,49 @@ mod tests { let path_query_two = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_two); - let proof = temp_db.prove_query(&path_query_two, None).unwrap().unwrap(); - let (_, result_set_two) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_two, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_two) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_two, grove_version) + .expect("should execute proof"); assert_eq!(result_set_two.len(), 1); - let merged_path_query = PathQuery::merge(vec![&path_query_one, &path_query_two]) - .expect("should merge path queries"); + let merged_path_query = + PathQuery::merge(vec![&path_query_one, &path_query_two], grove_version) + .expect("should merge path queries"); let proof = temp_db - .prove_query(&merged_path_query, None) + .prove_query(&merged_path_query, None, grove_version) .unwrap() .unwrap(); - let (_, result_set_tree) = GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) - .expect("should execute proof"); + let (_, result_set_tree) = + GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query, grove_version) + .expect("should execute proof"); assert_eq!(result_set_tree.len(), 2); } #[test] fn test_different_same_length_path_with_different_query_merge() { + let grove_version = GroveVersion::latest(); // Tests for // [a, c, Q] // [a, m, Q] - let temp_db = make_deep_tree(); + let temp_db = make_deep_tree(grove_version); let mut query_one = Query::new(); query_one.insert_key(b"key1".to_vec()); let path_query_one = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_one); - let proof = temp_db.prove_query(&path_query_one, None).unwrap().unwrap(); - let (_, result_set_one) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_one, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_one) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_one, grove_version) + .expect("should execute proof"); assert_eq!(result_set_one.len(), 1); let mut query_two = Query::new(); @@ -619,28 +666,33 @@ mod tests { let path_query_two = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()], query_two); - let proof = temp_db.prove_query(&path_query_two, None).unwrap().unwrap(); - let (_, result_set_two) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_two, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_two) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_two, grove_version) + .expect("should execute proof"); assert_eq!(result_set_two.len(), 1); - let merged_path_query = PathQuery::merge(vec![&path_query_one, &path_query_two]) - .expect("expect to merge path queries"); + let merged_path_query = + PathQuery::merge(vec![&path_query_one, &path_query_two], grove_version) + .expect("expect to merge path queries"); assert_eq!(merged_path_query.path, vec![TEST_LEAF.to_vec()]); assert_eq!(merged_path_query.query.query.items.len(), 2); let proof = temp_db - .prove_query(&merged_path_query, None) + .prove_query(&merged_path_query, None, grove_version) .unwrap() .unwrap(); let (_, result_set_merged) = - GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) + GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query, grove_version) .expect("should execute proof"); assert_eq!(result_set_merged.len(), 2); let keys = [b"key1".to_vec(), b"key4".to_vec()]; let values = [b"value1".to_vec(), b"value4".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); compare_result_tuples(result_set_merged, expected_result_set); @@ -656,9 +708,13 @@ mod tests { query_one.clone(), ); - let proof = temp_db.prove_query(&path_query_one, None).unwrap().unwrap(); - let (_, result_set_one) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_one, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_one) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_one, grove_version) + .expect("should execute proof"); assert_eq!(result_set_one.len(), 3); let mut query_two = Query::new(); @@ -673,9 +729,13 @@ mod tests { query_two.clone(), ); - let proof = temp_db.prove_query(&path_query_two, None).unwrap().unwrap(); - let (_, result_set_two) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_two, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_two) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_two, grove_version) + .expect("should execute proof"); assert_eq!(result_set_two.len(), 2); let mut query_three = Query::new(); @@ -691,11 +751,12 @@ mod tests { ); let proof = temp_db - .prove_query(&path_query_three, None) + .prove_query(&path_query_three, None, grove_version) .unwrap() .unwrap(); - let (_, result_set_two) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_three) - .expect("should execute proof"); + let (_, result_set_two) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_three, grove_version) + .expect("should execute proof"); assert_eq!(result_set_two.len(), 2); #[rustfmt::skip] @@ -725,9 +786,11 @@ mod tests { } - let merged_path_query = - PathQuery::merge(vec![&path_query_one, &path_query_two, &path_query_three]) - .expect("expect to merge path queries"); + let merged_path_query = PathQuery::merge( + vec![&path_query_one, &path_query_two, &path_query_three], + grove_version, + ) + .expect("expect to merge path queries"); assert_eq!(merged_path_query.path, vec![b"deep_leaf".to_vec()]); assert_eq!(merged_path_query.query.query.items.len(), 2); let conditional_subquery_branches = merged_path_query @@ -824,17 +887,18 @@ mod tests { true, QueryResultType::QueryPathKeyElementTrioResultType, None, + grove_version, ) .value .expect("expected to get results"); assert_eq!(result_set_merged.len(), 7); let proof = temp_db - .prove_query(&merged_path_query, None) + .prove_query(&merged_path_query, None, grove_version) .unwrap() .unwrap(); let (_, proved_result_set_merged) = - GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) + GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query, grove_version) .expect("should execute proof"); assert_eq!(proved_result_set_merged.len(), 7); @@ -856,14 +920,15 @@ mod tests { b"value10".to_vec(), b"value11".to_vec(), ]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); compare_result_tuples(proved_result_set_merged, expected_result_set); } #[test] fn test_different_length_paths_merge() { - let temp_db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); let mut query_one = Query::new(); query_one.insert_all(); @@ -877,9 +942,13 @@ mod tests { query_one, ); - let proof = temp_db.prove_query(&path_query_one, None).unwrap().unwrap(); - let (_, result_set_one) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_one, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_one) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_one, grove_version) + .expect("should execute proof"); assert_eq!(result_set_one.len(), 6); let mut query_two = Query::new(); @@ -894,21 +963,26 @@ mod tests { query_two, ); - let proof = temp_db.prove_query(&path_query_two, None).unwrap().unwrap(); - let (_, result_set_two) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_two, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set_two) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_two, grove_version) + .expect("should execute proof"); assert_eq!(result_set_two.len(), 2); - let merged_path_query = PathQuery::merge(vec![&path_query_one, &path_query_two]) - .expect("expect to merge path queries"); + let merged_path_query = + PathQuery::merge(vec![&path_query_one, &path_query_two], grove_version) + .expect("expect to merge path queries"); assert_eq!(merged_path_query.path, vec![b"deep_leaf".to_vec()]); let proof = temp_db - .prove_query(&merged_path_query, None) + .prove_query(&merged_path_query, None, grove_version) .unwrap() .unwrap(); let (_, result_set_merged) = - GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) + GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query, grove_version) .expect("should execute proof"); assert_eq!(result_set_merged.len(), 8); @@ -932,23 +1006,28 @@ mod tests { b"value10".to_vec(), b"value11".to_vec(), ]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); compare_result_tuples(result_set_merged, expected_result_set); } #[test] fn test_same_path_and_different_path_query_merge() { - let temp_db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); let mut query_one = Query::new(); query_one.insert_key(b"key1".to_vec()); let path_query_one = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_one); - let proof = temp_db.prove_query(&path_query_one, None).unwrap().unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_one, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_one, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 1); let mut query_two = Query::new(); @@ -956,9 +1035,13 @@ mod tests { let path_query_two = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_two); - let proof = temp_db.prove_query(&path_query_two, None).unwrap().unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_two, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_two, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 1); let mut query_three = Query::new(); @@ -969,42 +1052,51 @@ mod tests { ); let proof = temp_db - .prove_query(&path_query_three, None) + .prove_query(&path_query_three, None, grove_version) .unwrap() .unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_three) - .expect("should execute proof"); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_three, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 2); - let merged_path_query = - PathQuery::merge(vec![&path_query_one, &path_query_two, &path_query_three]) - .expect("should merge three queries"); + let merged_path_query = PathQuery::merge( + vec![&path_query_one, &path_query_two, &path_query_three], + grove_version, + ) + .expect("should merge three queries"); let proof = temp_db - .prove_query(&merged_path_query, None) + .prove_query(&merged_path_query, None, grove_version) .unwrap() .unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) - .expect("should execute proof"); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 4); } #[test] fn test_equal_path_merge() { + let grove_version = GroveVersion::latest(); // [a, b, Q] // [a, b, Q2] // We should be able to merge this if Q and Q2 have no subqueries. - let temp_db = make_deep_tree(); + let temp_db = make_deep_tree(grove_version); let mut query_one = Query::new(); query_one.insert_key(b"key1".to_vec()); let path_query_one = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_one); - let proof = temp_db.prove_query(&path_query_one, None).unwrap().unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_one, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_one, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 1); let mut query_two = Query::new(); @@ -1012,20 +1104,26 @@ mod tests { let path_query_two = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query_two); - let proof = temp_db.prove_query(&path_query_two, None).unwrap().unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_two, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_two, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 1); - let merged_path_query = PathQuery::merge(vec![&path_query_one, &path_query_two]) - .expect("should merge three queries"); + let merged_path_query = + PathQuery::merge(vec![&path_query_one, &path_query_two], grove_version) + .expect("should merge three queries"); let proof = temp_db - .prove_query(&merged_path_query, None) + .prove_query(&merged_path_query, None, grove_version) .unwrap() .unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) - .expect("should execute proof"); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 2); // [a, b, Q] @@ -1038,9 +1136,13 @@ mod tests { query_one, ); - let proof = temp_db.prove_query(&path_query_one, None).unwrap().unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_one) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_one, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_one, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 2); let mut query_one = Query::new(); @@ -1055,9 +1157,13 @@ mod tests { query_one, ); - let proof = temp_db.prove_query(&path_query_two, None).unwrap().unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query_two) - .expect("should execute proof"); + let proof = temp_db + .prove_query(&path_query_two, None, grove_version) + .unwrap() + .unwrap(); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query_two, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 3); #[rustfmt::skip] @@ -1087,8 +1193,9 @@ mod tests { } - let merged_path_query = PathQuery::merge(vec![&path_query_one, &path_query_two]) - .expect("expected to be able to merge path_query"); + let merged_path_query = + PathQuery::merge(vec![&path_query_one, &path_query_two], grove_version) + .expect("expected to be able to merge path_query"); // we expect the common path to be the path of both before merge assert_eq!( @@ -1133,22 +1240,25 @@ mod tests { true, QueryResultType::QueryPathKeyElementTrioResultType, None, + grove_version, ) .value .expect("expected to get results"); assert_eq!(result_set_merged.len(), 4); let proof = temp_db - .prove_query(&merged_path_query, None) + .prove_query(&merged_path_query, None, grove_version) .unwrap() .unwrap(); - let (_, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query) - .expect("should execute proof"); + let (_, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &merged_path_query, grove_version) + .expect("should execute proof"); assert_eq!(result_set.len(), 4); } #[test] fn test_path_query_items_with_subquery_and_inner_subquery_path() { + let grove_version = GroveVersion::latest(); // Constructing the keys and paths let root_path_key_1 = b"root_path_key_1".to_vec(); let root_path_key_2 = b"root_path_key_2".to_vec(); @@ -1193,7 +1303,8 @@ mod tests { { let path = vec![root_path_key_1.as_slice()]; let first = path_query - .query_items_at_path(&path) + .query_items_at_path(&path, grove_version) + .expect("expected valid version") .expect("expected query items"); assert_eq!( @@ -1211,7 +1322,8 @@ mod tests { let path = vec![root_path_key_1.as_slice(), root_path_key_2.as_slice()]; let second = path_query - .query_items_at_path(&path) + .query_items_at_path(&path, grove_version) + .expect("expected valid version") .expect("expected query items"); assert_eq!( @@ -1234,7 +1346,8 @@ mod tests { ]; let third = path_query - .query_items_at_path(&path) + .query_items_at_path(&path, grove_version) + .expect("expected valid version") .expect("expected query items"); assert_eq!( @@ -1257,7 +1370,8 @@ mod tests { ]; let fourth = path_query - .query_items_at_path(&path) + .query_items_at_path(&path, grove_version) + .expect("expected valid version") .expect("expected query items"); assert_eq!( @@ -1281,7 +1395,8 @@ mod tests { ]; let fifth = path_query - .query_items_at_path(&path) + .query_items_at_path(&path, grove_version) + .expect("expected valid version") .expect("expected query items"); assert_eq!( @@ -1307,7 +1422,8 @@ mod tests { ]; let sixth = path_query - .query_items_at_path(&path) + .query_items_at_path(&path, grove_version) + .expect("expected valid version") .expect("expected query items"); assert_eq!( @@ -1324,6 +1440,7 @@ mod tests { #[test] fn test_path_query_items_with_subquery_path() { + let grove_version = GroveVersion::latest(); // Constructing the keys and paths let root_path_key = b"higher".to_vec(); let dash_key = b"dash".to_vec(); @@ -1351,7 +1468,8 @@ mod tests { { let path = vec![root_path_key.as_slice()]; let first = path_query - .query_items_at_path(&path) + .query_items_at_path(&path, grove_version) + .expect("expected valid version") .expect("expected query items"); assert_eq!( @@ -1369,7 +1487,8 @@ mod tests { let path = vec![root_path_key.as_slice(), dash_key.as_slice()]; let second = path_query - .query_items_at_path(&path) + .query_items_at_path(&path, grove_version) + .expect("expected valid version") .expect("expected query items"); assert_eq!( @@ -1386,6 +1505,7 @@ mod tests { #[test] fn test_conditional_subquery_refusing_elements() { + let grove_version = GroveVersion::latest(); let empty_vec: Vec = vec![]; let zero_vec: Vec = vec![0]; @@ -1419,7 +1539,8 @@ mod tests { let path = vec![TEST_LEAF, empty_vec.as_slice()]; let second = path_query - .query_items_at_path(&path) + .query_items_at_path(&path, grove_version) + .expect("expected valid version") .expect("expected query items"); assert_eq!( @@ -1436,6 +1557,7 @@ mod tests { #[test] fn test_complex_path_query_with_conditional_subqueries() { + let grove_version = GroveVersion::latest(); let identity_id = hex::decode("8b8948a6801501bbe0431e3d994dcf71cf5a2a0939fe51b0e600076199aba4fb") .unwrap(); @@ -1515,7 +1637,8 @@ mod tests { { let path = vec![]; let first = path_query - .query_items_at_path(&path) + .query_items_at_path(&path, grove_version) + .expect("expected valid version") .expect("expected query items"); assert_eq!( @@ -1534,7 +1657,8 @@ mod tests { { let path = vec![key_20.as_slice()]; let query = path_query - .query_items_at_path(&path) + .query_items_at_path(&path, grove_version) + .expect("expected valid version") .expect("expected query items"); assert_eq!( @@ -1551,7 +1675,8 @@ mod tests { { let path = vec![key_20.as_slice(), identity_id.as_slice()]; let query = path_query - .query_items_at_path(&path) + .query_items_at_path(&path, grove_version) + .expect("expected valid version") .expect("expected query items"); assert_eq!( @@ -1570,7 +1695,8 @@ mod tests { { let path = vec![key_20.as_slice(), identity_id.as_slice(), key_80.as_slice()]; let query = path_query - .query_items_at_path(&path) + .query_items_at_path(&path, grove_version) + .expect("expected valid version") .expect("expected query items"); assert_eq!( diff --git a/grovedb/src/query_result_type.rs b/grovedb/src/query_result_type.rs index 6bf8bd5b..02035238 100644 --- a/grovedb/src/query_result_type.rs +++ b/grovedb/src/query_result_type.rs @@ -7,6 +7,7 @@ use std::{ }; pub use grovedb_merk::proofs::query::{Key, Path, PathKey}; +use grovedb_version::{version::GroveVersion, TryFromVersioned}; use crate::{ operations::proof::util::{ @@ -503,11 +504,14 @@ pub type PathKeyElementTrio = (Path, Key, Element); pub type PathKeyOptionalElementTrio = (Path, Key, Option); #[cfg(any(feature = "full", feature = "verify"))] -impl TryFrom for PathKeyOptionalElementTrio { +impl TryFromVersioned for PathKeyOptionalElementTrio { type Error = Error; - fn try_from(proved_path_key_value: ProvedPathKeyValue) -> Result { - let element = Element::deserialize(proved_path_key_value.value.as_slice())?; + fn try_from_versioned( + proved_path_key_value: ProvedPathKeyValue, + grove_version: &GroveVersion, + ) -> Result { + let element = Element::deserialize(proved_path_key_value.value.as_slice(), grove_version)?; Ok(( proved_path_key_value.path, proved_path_key_value.key, @@ -517,13 +521,16 @@ impl TryFrom for PathKeyOptionalElementTrio { } #[cfg(any(feature = "full", feature = "verify"))] -impl TryFrom for PathKeyOptionalElementTrio { +impl TryFromVersioned for PathKeyOptionalElementTrio { type Error = Error; - fn try_from(proved_path_key_value: ProvedPathKeyOptionalValue) -> Result { + fn try_from_versioned( + proved_path_key_value: ProvedPathKeyOptionalValue, + grove_version: &GroveVersion, + ) -> Result { let element = proved_path_key_value .value - .map(|e| Element::deserialize(e.as_slice())) + .map(|e| Element::deserialize(e.as_slice(), grove_version)) .transpose()?; Ok(( proved_path_key_value.path, @@ -536,6 +543,8 @@ impl TryFrom for PathKeyOptionalElementTrio { #[cfg(feature = "full")] #[cfg(test)] mod tests { + use grovedb_version::{version::GroveVersion, TryIntoVersioned}; + use crate::{ operations::proof::util::ProvedPathKeyValue, query_result_type::PathKeyOptionalElementTrio, Element, @@ -543,6 +552,7 @@ mod tests { #[test] fn test_single_proved_path_key_value_to_path_key_optional_element() { + let grove_version = GroveVersion::latest(); let path = vec![b"1".to_vec(), b"2".to_vec()]; let proved_path_key_value = ProvedPathKeyValue { path: path.clone(), @@ -551,7 +561,7 @@ mod tests { proof: [0; 32], }; let path_key_element_trio: PathKeyOptionalElementTrio = proved_path_key_value - .try_into() + .try_into_versioned(grove_version) .expect("should convert to path key optional element trio"); assert_eq!( path_key_element_trio, diff --git a/grovedb/src/reference_path.rs b/grovedb/src/reference_path.rs index 09fc1684..aa01d400 100644 --- a/grovedb/src/reference_path.rs +++ b/grovedb/src/reference_path.rs @@ -351,6 +351,7 @@ impl ReferencePathType { #[cfg(test)] mod tests { use grovedb_merk::proofs::Query; + use grovedb_version::version::GroveVersion; use crate::{ reference_path::{path_from_reference_path_type, ReferencePathType}, @@ -465,7 +466,8 @@ mod tests { #[test] fn test_query_many_with_different_reference_types() { - let db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); db.insert( [TEST_LEAF, b"innertree4"].as_ref(), @@ -477,6 +479,7 @@ mod tests { ])), None, None, + grove_version, ) .unwrap() .expect("should insert successfully"); @@ -490,6 +493,7 @@ mod tests { )), None, None, + grove_version, ) .unwrap() .expect("should insert successfully"); @@ -503,6 +507,7 @@ mod tests { )), None, None, + grove_version, ) .unwrap() .expect("should insert successfully"); @@ -513,7 +518,7 @@ mod tests { let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree4".to_vec()], query); let result = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("should query items"); assert_eq!(result.0.len(), 5); @@ -529,12 +534,12 @@ mod tests { ); let proof = db - .prove_query(&path_query, None) + .prove_query(&path_query, None, grove_version) .unwrap() .expect("should generate proof"); - let (hash, result) = - GroveDb::verify_query_raw(&proof, &path_query).expect("should verify proof"); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let (hash, result) = GroveDb::verify_query_raw(&proof, &path_query, grove_version) + .expect("should verify proof"); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result.len(), 5); } } diff --git a/grovedb/src/replication.rs b/grovedb/src/replication.rs index b6533868..876fe62c 100644 --- a/grovedb/src/replication.rs +++ b/grovedb/src/replication.rs @@ -14,6 +14,7 @@ use grovedb_path::SubtreePath; use grovedb_storage::rocksdb_storage::RocksDbStorage; #[rustfmt::skip] use grovedb_storage::rocksdb_storage::storage_context::context_immediate::PrefixedRocksDbImmediateStorageContext; +use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; use crate::{replication, Error, GroveDb, Transaction, TransactionArg}; @@ -88,7 +89,7 @@ impl fmt::Debug for SubtreesMetadata { " prefix:{:?} -> path:{:?}", hex::encode(prefix), metadata_path_str - ); + )?; } Ok(()) } @@ -166,10 +167,23 @@ impl GroveDb { // tx: Transaction. Function returns the data by opening merks at given tx. // TODO: Add a SubTreePath as param and start searching from that path instead // of root (as it is now) - pub fn get_subtrees_metadata(&self, tx: TransactionArg) -> Result { - let mut subtrees_metadata = crate::replication::SubtreesMetadata::new(); + pub fn get_subtrees_metadata( + &self, + tx: TransactionArg, + grove_version: &GroveVersion, + ) -> Result { + check_grovedb_v0!( + "is_empty_tree", + grove_version + .grovedb_versions + .replication + .get_subtrees_metadata + ); + let mut subtrees_metadata = SubtreesMetadata::new(); - let subtrees_root = self.find_subtrees(&SubtreePath::empty(), tx).value?; + let subtrees_root = self + .find_subtrees(&SubtreePath::empty(), tx, grove_version) + .value?; for subtree in subtrees_root.into_iter() { let subtree_path: Vec<&[u8]> = subtree.iter().map(|vec| vec.as_slice()).collect(); let path: &[&[u8]] = &subtree_path; @@ -181,13 +195,14 @@ impl GroveDb { (Some((parent_path, _)), Some(parent_key)) => match tx { None => { let parent_merk = self - .open_non_transactional_merk_at_path(parent_path, None) + .open_non_transactional_merk_at_path(parent_path, None, grove_version) .value?; if let Ok(Some((elem_value, elem_value_hash))) = parent_merk .get_value_and_value_hash( parent_key, true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .value { @@ -200,13 +215,14 @@ impl GroveDb { } Some(t) => { let parent_merk = self - .open_transactional_merk_at_path(parent_path, t, None) + .open_transactional_merk_at_path(parent_path, t, None, grove_version) .value?; if let Ok(Some((elem_value, elem_value_hash))) = parent_merk .get_value_and_value_hash( parent_key, true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .value { @@ -248,7 +264,12 @@ impl GroveDb { global_chunk_id: &[u8], tx: TransactionArg, version: u16, + grove_version: &GroveVersion, ) -> Result, Error> { + check_grovedb_v0!( + "fetch_chunk", + grove_version.grovedb_versions.replication.fetch_chunk + ); // For now, only CURRENT_STATE_SYNC_VERSION is supported if version != CURRENT_STATE_SYNC_VERSION { return Err(Error::CorruptedData( @@ -256,11 +277,11 @@ impl GroveDb { )); } - let root_app_hash = self.root_hash(tx).value?; + let root_app_hash = self.root_hash(tx, grove_version).value?; let (chunk_prefix, chunk_id) = replication::util_split_global_chunk_id(global_chunk_id, &root_app_hash)?; - let subtrees_metadata = self.get_subtrees_metadata(tx)?; + let subtrees_metadata = self.get_subtrees_metadata(tx, grove_version)?; match subtrees_metadata.data.get(&chunk_prefix) { Some(path_data) => { @@ -271,7 +292,7 @@ impl GroveDb { match tx { None => { let merk = self - .open_non_transactional_merk_at_path(path.into(), None) + .open_non_transactional_merk_at_path(path.into(), None, grove_version) .value?; if merk.is_empty_tree().unwrap() { @@ -281,7 +302,7 @@ impl GroveDb { let chunk_producer_res = ChunkProducer::new(&merk); match chunk_producer_res { Ok(mut chunk_producer) => { - let chunk_res = chunk_producer.chunk(&chunk_id); + let chunk_res = chunk_producer.chunk(&chunk_id, grove_version); match chunk_res { Ok((chunk, _)) => match util_encode_vec_ops(chunk) { Ok(op_bytes) => Ok(op_bytes), @@ -301,7 +322,7 @@ impl GroveDb { } Some(t) => { let merk = self - .open_transactional_merk_at_path(path.into(), t, None) + .open_transactional_merk_at_path(path.into(), t, None, grove_version) .value?; if merk.is_empty_tree().unwrap() { @@ -311,7 +332,7 @@ impl GroveDb { let chunk_producer_res = ChunkProducer::new(&merk); match chunk_producer_res { Ok(mut chunk_producer) => { - let chunk_res = chunk_producer.chunk(&chunk_id); + let chunk_res = chunk_producer.chunk(&chunk_id, grove_version); match chunk_res { Ok((chunk, _)) => match util_encode_vec_ops(chunk) { Ok(op_bytes) => Ok(op_bytes), @@ -347,7 +368,15 @@ impl GroveDb { app_hash: CryptoHash, tx: &'db Transaction, version: u16, + grove_version: &GroveVersion, ) -> Result { + check_grovedb_v0!( + "start_snapshot_syncing", + grove_version + .grovedb_versions + .replication + .start_snapshot_syncing + ); // For now, only CURRENT_STATE_SYNC_VERSION is supported if version != CURRENT_STATE_SYNC_VERSION { return Err(Error::CorruptedData( @@ -375,7 +404,7 @@ impl GroveDb { let mut root_prefix_state_sync_info = SubtreeStateSyncInfo::default(); let root_prefix = [0u8; 32]; - if let Ok(merk) = self.open_merk_for_replication(SubtreePath::empty(), tx) { + if let Ok(merk) = self.open_merk_for_replication(SubtreePath::empty(), tx, grove_version) { let restorer = Restorer::new(merk, app_hash, None); root_prefix_state_sync_info.restorer = Some(restorer); root_prefix_state_sync_info.pending_chunks.insert(vec![]); @@ -407,7 +436,12 @@ impl GroveDb { chunk: Vec, tx: &'db Transaction, version: u16, + grove_version: &GroveVersion, ) -> Result<(Vec>, MultiStateSyncInfo), Error> { + check_grovedb_v0!( + "apply_chunk", + grove_version.grovedb_versions.replication.apply_chunk + ); // For now, only CURRENT_STATE_SYNC_VERSION is supported if version != CURRENT_STATE_SYNC_VERSION { return Err(Error::CorruptedData( @@ -432,7 +466,7 @@ impl GroveDb { } if let Some(subtree_state_sync) = state_sync_info.current_prefixes.remove(&chunk_prefix) { if let Ok((res, mut new_subtree_state_sync)) = - self.apply_inner_chunk(subtree_state_sync, &chunk_id, chunk) + self.apply_inner_chunk(subtree_state_sync, &chunk_id, chunk, grove_version) { if !res.is_empty() { for local_chunk_id in res.iter() { @@ -462,7 +496,7 @@ impl GroveDb { )), Some(restorer) => { if (new_subtree_state_sync.num_processed_chunks > 0) - && (restorer.finalize().is_err()) + && (restorer.finalize(grove_version).is_err()) { return Err(Error::InternalError( "Unable to finalize Merk".to_string(), @@ -472,7 +506,8 @@ impl GroveDb { // Subtree was successfully save. Time to discover new subtrees that // need to be processed - let subtrees_metadata = self.get_subtrees_metadata(Some(tx))?; + let subtrees_metadata = + self.get_subtrees_metadata(Some(tx), grove_version)?; if let Some(value) = subtrees_metadata.data.get(&chunk_prefix) { println!( " path:{:?} done (num_processed_chunks:{:?})", @@ -481,9 +516,12 @@ impl GroveDb { ); } - if let Ok((res, new_state_sync_info)) = - self.discover_subtrees(state_sync_info, subtrees_metadata, tx) - { + if let Ok((res, new_state_sync_info)) = self.discover_subtrees( + state_sync_info, + subtrees_metadata, + tx, + grove_version, + ) { next_chunk_ids.extend(res); Ok((next_chunk_ids, new_state_sync_info)) } else { @@ -515,6 +553,7 @@ impl GroveDb { mut state_sync_info: SubtreeStateSyncInfo<'db>, chunk_id: &[u8], chunk_data: Vec, + grove_version: &GroveVersion, ) -> Result<(Vec>, SubtreeStateSyncInfo), Error> { let mut res = vec![]; @@ -529,7 +568,7 @@ impl GroveDb { if !chunk_data.is_empty() { match util_decode_vec_ops(chunk_data) { Ok(ops) => { - match restorer.process_chunk(chunk_id, ops) { + match restorer.process_chunk(chunk_id, ops, grove_version) { Ok(next_chunk_ids) => { state_sync_info.num_processed_chunks += 1; for next_chunk_id in next_chunk_ids { @@ -576,6 +615,7 @@ impl GroveDb { mut state_sync_info: MultiStateSyncInfo<'db>, subtrees_metadata: SubtreesMetadata, tx: &'db Transaction, + grove_version: &GroveVersion, ) -> Result<(Vec>, MultiStateSyncInfo), Error> { let mut res = vec![]; @@ -594,7 +634,7 @@ impl GroveDb { ); let mut subtree_state_sync_info = SubtreeStateSyncInfo::default(); - if let Ok(merk) = self.open_merk_for_replication(path.into(), tx) { + if let Ok(merk) = self.open_merk_for_replication(path.into(), tx, grove_version) { let restorer = Restorer::new(merk, *s_elem_value_hash, Some(*s_actual_value_hash)); subtree_state_sync_info.restorer = Some(restorer); diff --git a/grovedb/src/tests/common.rs b/grovedb/src/tests/common.rs index 2fe8dfde..a02ef9c6 100644 --- a/grovedb/src/tests/common.rs +++ b/grovedb/src/tests/common.rs @@ -1,6 +1,7 @@ //! Common tests use grovedb_path::SubtreePath; +use grovedb_version::version::GroveVersion; use crate::{operations::proof::util::ProvedPathKeyValues, Element, Error}; @@ -17,7 +18,7 @@ pub fn compare_result_tuples( } fn deserialize_and_extract_item_bytes(raw_bytes: &[u8]) -> Result, Error> { - let elem = Element::deserialize(raw_bytes)?; + let elem = Element::deserialize(raw_bytes, GroveVersion::latest())?; match elem { Element::Item(item, _) => Ok(item), _ => Err(Error::CorruptedPath("expected only item type".to_string())), diff --git a/grovedb/src/tests/mod.rs b/grovedb/src/tests/mod.rs index 43a9c34d..226a047b 100644 --- a/grovedb/src/tests/mod.rs +++ b/grovedb/src/tests/mod.rs @@ -13,6 +13,7 @@ use std::{ option::Option::None, }; +use grovedb_version::version::GroveVersion; use grovedb_visualize::{Drawer, Visualize}; use tempfile::TempDir; @@ -67,36 +68,44 @@ pub fn make_empty_grovedb() -> TempGroveDb { } /// A helper method to create GroveDB with one leaf for a root tree -pub fn make_test_grovedb() -> TempGroveDb { +pub fn make_test_grovedb(grove_version: &GroveVersion) -> TempGroveDb { // Tree Structure // root // test_leaf // another_test_leaf let tmp_dir = TempDir::new().unwrap(); let mut db = GroveDb::open(tmp_dir.path()).unwrap(); - add_test_leaves(&mut db); + add_test_leaves(&mut db, grove_version); TempGroveDb { _tmp_dir: tmp_dir, grove_db: db, } } -fn add_test_leaves(db: &mut GroveDb) { - db.insert(EMPTY_PATH, TEST_LEAF, Element::empty_tree(), None, None) - .unwrap() - .expect("successful root tree leaf insert"); +fn add_test_leaves(db: &mut GroveDb, grove_version: &GroveVersion) { + db.insert( + EMPTY_PATH, + TEST_LEAF, + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful root tree leaf insert"); db.insert( EMPTY_PATH, ANOTHER_TEST_LEAF, Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful root tree leaf 2 insert"); } -pub fn make_deep_tree() -> TempGroveDb { +pub fn make_deep_tree(grove_version: &GroveVersion) -> TempGroveDb { // Tree Structure // root // test_leaf @@ -136,11 +145,18 @@ pub fn make_deep_tree() -> TempGroveDb { // k14,v14 // Insert elements into grovedb instance - let temp_db = make_test_grovedb(); + let temp_db = make_test_grovedb(grove_version); // add an extra root leaf temp_db - .insert(EMPTY_PATH, DEEP_LEAF, Element::empty_tree(), None, None) + .insert( + EMPTY_PATH, + DEEP_LEAF, + Element::empty_tree(), + None, + None, + grove_version, + ) .unwrap() .expect("successful root tree leaf insert"); @@ -152,6 +168,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -162,6 +179,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -172,6 +190,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -182,6 +201,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -192,6 +212,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -202,6 +223,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -213,6 +235,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -223,6 +246,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -233,6 +257,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value3".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -243,6 +268,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value4".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -253,6 +279,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value5".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -263,6 +290,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value3".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -273,6 +301,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value4".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -283,6 +312,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -293,6 +323,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -303,6 +334,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -313,6 +345,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -323,6 +356,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -334,6 +368,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -344,6 +379,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -354,6 +390,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value3".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -364,6 +401,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value4".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -374,6 +412,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value5".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -384,6 +423,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value6".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -395,6 +435,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value7".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -405,6 +446,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value8".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -415,6 +457,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value9".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -425,6 +468,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value10".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -435,6 +479,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value11".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -445,6 +490,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value12".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -455,6 +501,7 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value13".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -465,13 +512,14 @@ pub fn make_deep_tree() -> TempGroveDb { Element::new_item(b"value14".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); temp_db } -pub fn make_deep_tree_with_sum_trees() -> TempGroveDb { +pub fn make_deep_tree_with_sum_trees(grove_version: &GroveVersion) -> TempGroveDb { // Tree Structure // root // deep_leaf @@ -506,11 +554,18 @@ pub fn make_deep_tree_with_sum_trees() -> TempGroveDb { // .. -> .. // z -> "z" - let temp_db = make_test_grovedb(); + let temp_db = make_test_grovedb(grove_version); // Add deep_leaf to root temp_db - .insert(EMPTY_PATH, DEEP_LEAF, Element::empty_tree(), None, None) + .insert( + EMPTY_PATH, + DEEP_LEAF, + Element::empty_tree(), + None, + None, + grove_version, + ) .unwrap() .expect("successful root tree leaf insert"); @@ -522,6 +577,7 @@ pub fn make_deep_tree_with_sum_trees() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -534,6 +590,7 @@ pub fn make_deep_tree_with_sum_trees() -> TempGroveDb { Element::new_item("empty".as_bytes().to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful item insert"); @@ -546,6 +603,7 @@ pub fn make_deep_tree_with_sum_trees() -> TempGroveDb { Element::new_item("storage".as_bytes().to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful item insert"); @@ -559,6 +617,7 @@ pub fn make_deep_tree_with_sum_trees() -> TempGroveDb { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -572,6 +631,7 @@ pub fn make_deep_tree_with_sum_trees() -> TempGroveDb { Element::new_sum_tree(None), None, None, + grove_version, ) .unwrap() .expect("successful sum tree insert"); @@ -584,6 +644,7 @@ pub fn make_deep_tree_with_sum_trees() -> TempGroveDb { Element::SumItem(1, None), None, None, + grove_version, ) .unwrap() .expect("successful sum item insert"); @@ -594,6 +655,7 @@ pub fn make_deep_tree_with_sum_trees() -> TempGroveDb { Element::SumItem(1, None), None, None, + grove_version, ) .unwrap() .expect("successful sum item insert"); @@ -607,6 +669,7 @@ pub fn make_deep_tree_with_sum_trees() -> TempGroveDb { Element::new_item(value.to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful item insert"); @@ -618,6 +681,7 @@ pub fn make_deep_tree_with_sum_trees() -> TempGroveDb { Element::new_sum_tree(None), None, None, + grove_version, ) .unwrap() .expect("successful sum tree insert"); @@ -634,6 +698,7 @@ pub fn make_deep_tree_with_sum_trees() -> TempGroveDb { Element::SumItem(value1, None), None, None, + grove_version, ) .unwrap() .expect("successful sum item insert"); @@ -644,6 +709,7 @@ pub fn make_deep_tree_with_sum_trees() -> TempGroveDb { Element::SumItem(value2, None), None, None, + grove_version, ) .unwrap() .expect("successful sum item insert"); @@ -657,6 +723,7 @@ pub fn make_deep_tree_with_sum_trees() -> TempGroveDb { Element::SumItem(4, None), None, None, + grove_version, ) .unwrap() .expect("successful sum item insert"); @@ -667,6 +734,7 @@ pub fn make_deep_tree_with_sum_trees() -> TempGroveDb { Element::SumItem(4, None), None, None, + grove_version, ) .unwrap() .expect("successful sum item insert"); @@ -680,6 +748,7 @@ pub fn make_deep_tree_with_sum_trees() -> TempGroveDb { Element::new_item(vec![letter]), None, None, + grove_version, ) .unwrap() .expect(&format!("successful item insert for {}", letter as char)); @@ -699,7 +768,8 @@ mod tests { #[test] fn test_element_with_flags() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -707,6 +777,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -716,6 +787,7 @@ mod tests { Element::new_item(b"flagless".to_vec()), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -725,6 +797,7 @@ mod tests { Element::new_item_with_flags(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -734,6 +807,7 @@ mod tests { Element::new_tree_with_flags(None, Some([1].to_vec())), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); @@ -750,24 +824,30 @@ mod tests { ), None, None, + grove_version, ) .unwrap() .expect("should insert subtree successfully"); let element_without_flag = db - .get([TEST_LEAF, b"key1"].as_ref(), b"elem1", None) + .get([TEST_LEAF, b"key1"].as_ref(), b"elem1", None, grove_version) .unwrap() .expect("should get successfully"); let element_with_flag = db - .get([TEST_LEAF, b"key1"].as_ref(), b"elem2", None) + .get([TEST_LEAF, b"key1"].as_ref(), b"elem2", None, grove_version) .unwrap() .expect("should get successfully"); let tree_element_with_flag = db - .get([TEST_LEAF, b"key1"].as_ref(), b"elem3", None) + .get([TEST_LEAF, b"key1"].as_ref(), b"elem3", None, grove_version) .unwrap() .expect("should get successfully"); let flagged_ref_follow = db - .get([TEST_LEAF, b"key1", b"elem3"].as_ref(), b"elem4", None) + .get( + [TEST_LEAF, b"key1", b"elem3"].as_ref(), + b"elem4", + None, + grove_version, + ) .unwrap() .expect("should get successfully"); @@ -785,6 +865,7 @@ mod tests { true, QueryKeyElementPairResultType, None, + grove_version, ) .unwrap() .expect("should get successfully"); @@ -827,23 +908,28 @@ mod tests { SizedQuery::new(query, None, None), ); let proof = db - .prove_query(&path_query, None) + .prove_query(&path_query, None, grove_version) .unwrap() .expect("should successfully create proof"); - let (root_hash, result_set) = - GroveDb::verify_query_raw(&proof, &path_query).expect("should verify proof"); - assert_eq!(root_hash, db.grove_db.root_hash(None).unwrap().unwrap()); + let (root_hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query, grove_version) + .expect("should verify proof"); + assert_eq!( + root_hash, + db.grove_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 3); assert_eq!( - Element::deserialize(&result_set[0].value).expect("should deserialize element"), + Element::deserialize(&result_set[0].value, grove_version) + .expect("should deserialize element"), Element::Item(b"flagless".to_vec(), None) ); assert_eq!( - Element::deserialize(&result_set[1].value).expect("should deserialize element"), + Element::deserialize(&result_set[1].value, grove_version) + .expect("should deserialize element"), Element::Item(b"flagged".to_vec(), Some([4, 5, 6, 7, 8].to_vec())) ); assert_eq!( - Element::deserialize(&result_set[2].value) + Element::deserialize(&result_set[2].value, grove_version) .expect("should deserialize element") .get_flags(), &Some([1].to_vec()) @@ -852,13 +938,14 @@ mod tests { #[test] fn test_cannot_update_populated_tree_item() { + let grove_version = GroveVersion::latest(); // This test shows that you cannot update a tree item - // in a way that disconnects it's root hash from that of + // in a way that disconnects its root hash from that of // the merk it points to. - let db = make_deep_tree(); + let db = make_deep_tree(grove_version); let old_element = db - .get([TEST_LEAF].as_ref(), b"innertree", None) + .get([TEST_LEAF].as_ref(), b"innertree", None, grove_version) .unwrap() .expect("should fetch item"); @@ -869,12 +956,13 @@ mod tests { new_element.clone(), None, None, + grove_version, ) .unwrap() .expect_err("should not override tree"); let current_element = db - .get([TEST_LEAF].as_ref(), b"innertree", None) + .get([TEST_LEAF].as_ref(), b"innertree", None, grove_version) .unwrap() .expect("should fetch item"); @@ -884,8 +972,9 @@ mod tests { #[test] fn test_changes_propagated() { - let db = make_test_grovedb(); - let old_hash = db.root_hash(None).unwrap().unwrap(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + let old_hash = db.root_hash(None, grove_version).unwrap().unwrap(); let element = Element::new_item(b"ayy".to_vec()); // Insert some nested subtrees @@ -895,6 +984,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 1 insert"); @@ -905,6 +995,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 2 insert"); @@ -915,30 +1006,41 @@ mod tests { element.clone(), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); assert_eq!( - db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) - .unwrap() - .expect("successful get"), + db.get( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap() + .expect("successful get"), element ); - assert_ne!(old_hash, db.root_hash(None).unwrap().unwrap()); + assert_ne!( + old_hash, + db.root_hash(None, grove_version).unwrap().unwrap() + ); } // TODO: Add solid test cases to this #[test] fn test_references() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), b"merk_1", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -948,6 +1050,7 @@ mod tests { Element::new_item(b"value1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -957,6 +1060,7 @@ mod tests { Element::new_item(b"value2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -967,6 +1071,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -983,6 +1088,7 @@ mod tests { ])), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -996,22 +1102,24 @@ mod tests { ])), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); assert!(db - .get([TEST_LEAF].as_ref(), b"merk_1", None) + .get([TEST_LEAF].as_ref(), b"merk_1", None, grove_version) .unwrap() .is_ok()); assert!(db - .get([TEST_LEAF].as_ref(), b"merk_2", None) + .get([TEST_LEAF].as_ref(), b"merk_2", None, grove_version) .unwrap() .is_ok()); } #[test] fn test_follow_references() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); // Insert an item to refer to @@ -1021,6 +1129,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 1 insert"); @@ -1030,6 +1139,7 @@ mod tests { element.clone(), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -1045,12 +1155,13 @@ mod tests { ])), None, None, + grove_version, ) .unwrap() .expect("successful reference insert"); assert_eq!( - db.get([TEST_LEAF].as_ref(), b"reference_key", None) + db.get([TEST_LEAF].as_ref(), b"reference_key", None, grove_version) .unwrap() .expect("successful get"), element @@ -1059,7 +1170,8 @@ mod tests { #[test] fn test_reference_must_point_to_item() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let result = db .insert( @@ -1071,6 +1183,7 @@ mod tests { ])), None, None, + grove_version, ) .unwrap(); @@ -1079,8 +1192,9 @@ mod tests { #[test] fn test_too_many_indirections() { + let grove_version = GroveVersion::latest(); use crate::operations::get::MAX_REFERENCE_HOPS; - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let keygen = |idx| format!("key{}", idx).bytes().collect::>(); @@ -1090,6 +1204,7 @@ mod tests { Element::new_item(b"oops".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful item insert"); @@ -1104,6 +1219,7 @@ mod tests { ])), None, None, + grove_version, ) .unwrap() .expect("successful reference insert"); @@ -1119,12 +1235,18 @@ mod tests { ])), None, None, + grove_version, ) .unwrap() .expect("expected insert"); let result = db - .get([TEST_LEAF].as_ref(), &keygen(MAX_REFERENCE_HOPS + 1), None) + .get( + [TEST_LEAF].as_ref(), + &keygen(MAX_REFERENCE_HOPS + 1), + None, + grove_version, + ) .unwrap(); assert!(matches!(result, Err(Error::ReferenceLimit))); @@ -1132,7 +1254,8 @@ mod tests { #[test] fn test_reference_value_affects_state() { - let db_one = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db_one = make_test_grovedb(grove_version); db_one .insert( [TEST_LEAF].as_ref(), @@ -1140,6 +1263,7 @@ mod tests { Element::new_item(vec![0]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -1153,11 +1277,12 @@ mod tests { ])), None, None, + grove_version, ) .unwrap() .expect("should insert item"); - let db_two = make_test_grovedb(); + let db_two = make_test_grovedb(grove_version); db_two .insert( [TEST_LEAF].as_ref(), @@ -1165,6 +1290,7 @@ mod tests { Element::new_item(vec![0]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -1178,17 +1304,18 @@ mod tests { )), None, None, + grove_version, ) .unwrap() .expect("should insert item"); assert_ne!( db_one - .root_hash(None) + .root_hash(None, grove_version) .unwrap() .expect("should return root hash"), db_two - .root_hash(None) + .root_hash(None, grove_version) .unwrap() .expect("should return toor hash") ); @@ -1196,12 +1323,13 @@ mod tests { #[test] fn test_tree_structure_is_persistent() { + let grove_version = GroveVersion::latest(); let tmp_dir = TempDir::new().unwrap(); let element = Element::new_item(b"ayy".to_vec()); // Create a scoped GroveDB let prev_root_hash = { let mut db = GroveDb::open(tmp_dir.path()).unwrap(); - add_test_leaves(&mut db); + add_test_leaves(&mut db, grove_version); // Insert some nested subtrees db.insert( @@ -1210,6 +1338,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 1 insert"); @@ -1219,6 +1348,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 2 insert"); @@ -1229,71 +1359,105 @@ mod tests { element.clone(), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); assert_eq!( - db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) - .unwrap() - .expect("successful get 1"), + db.get( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap() + .expect("successful get 1"), element ); - db.root_hash(None).unwrap().unwrap() + db.root_hash(None, grove_version).unwrap().unwrap() }; // Open a persisted GroveDB let db = GroveDb::open(tmp_dir).unwrap(); assert_eq!( - db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) - .unwrap() - .expect("successful get 2"), + db.get( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap() + .expect("successful get 2"), element ); assert!(db - .get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key4", None) + .get( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key4", + None, + grove_version + ) .unwrap() .is_err()); - assert_eq!(prev_root_hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!( + prev_root_hash, + db.root_hash(None, grove_version).unwrap().unwrap() + ); } #[test] fn test_root_tree_leaves_are_noted() { - let db = make_test_grovedb(); - db.check_subtree_exists_path_not_found([TEST_LEAF].as_ref().into(), None) - .unwrap() - .expect("should exist"); - db.check_subtree_exists_path_not_found([ANOTHER_TEST_LEAF].as_ref().into(), None) + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + db.check_subtree_exists_path_not_found([TEST_LEAF].as_ref().into(), None, grove_version) .unwrap() .expect("should exist"); + db.check_subtree_exists_path_not_found( + [ANOTHER_TEST_LEAF].as_ref().into(), + None, + grove_version, + ) + .unwrap() + .expect("should exist"); } #[test] fn test_proof_for_invalid_path_root_key() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let query = Query::new(); let path_query = PathQuery::new_unsized(vec![b"invalid_path_key".to_vec()], query); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 0); } #[test] fn test_proof_for_invalid_path() { - let db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); let query = Query::new(); let path_query = PathQuery::new_unsized(vec![b"deep_leaf".to_vec(), b"invalid_key".to_vec()], query); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 0); let query = Query::new(); @@ -1306,11 +1470,15 @@ mod tests { query, ); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 0); let query = Query::new(); @@ -1324,11 +1492,15 @@ mod tests { query, ); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 0); let query = Query::new(); @@ -1342,17 +1514,22 @@ mod tests { query, ); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 0); } #[test] fn test_proof_for_non_existent_data() { - let temp_db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let temp_db = make_test_grovedb(grove_version); let mut query = Query::new(); query.insert_key(b"key1".to_vec()); @@ -1360,16 +1537,24 @@ mod tests { // path to empty subtree let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 0); } #[test] fn test_path_query_proofs_without_subquery_with_reference() { + let grove_version = GroveVersion::latest(); // Tree Structure // root // test_leaf @@ -1386,7 +1571,7 @@ mod tests { // k4,v4 // Insert elements into grovedb instance - let temp_db = make_test_grovedb(); + let temp_db = make_test_grovedb(grove_version); // Insert level 1 nodes temp_db .insert( @@ -1395,6 +1580,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1405,6 +1591,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1415,6 +1602,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1426,6 +1614,7 @@ mod tests { Element::new_item(b"value1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1436,6 +1625,7 @@ mod tests { Element::new_item(b"value2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1446,6 +1636,7 @@ mod tests { Element::new_item(b"value3".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1456,6 +1647,7 @@ mod tests { Element::new_item(b"value3".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1470,6 +1662,7 @@ mod tests { ])), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1480,6 +1673,7 @@ mod tests { Element::new_item(b"value4".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1494,6 +1688,7 @@ mod tests { ])), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1507,7 +1702,10 @@ mod tests { query, ); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); assert_eq!( hex::encode(&proof), "005e02cfb7d035b8f4a3631be46c597510a16770c15c74331b3dc8dcb577a206e49675040a746\ @@ -1521,11 +1719,19 @@ mod tests { 4ffdbc429a89c9b6620e7224d73c2ee505eb7e6fb5eb574e1a8dc8b0d0884110001" ); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - let r1 = Element::new_item(b"value1".to_vec()).serialize().unwrap(); - let r2 = Element::new_item(b"value4".to_vec()).serialize().unwrap(); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + let r1 = Element::new_item(b"value1".to_vec()) + .serialize(grove_version) + .unwrap(); + let r2 = Element::new_item(b"value4".to_vec()) + .serialize(grove_version) + .unwrap(); compare_result_tuples( result_set, @@ -1535,6 +1741,7 @@ mod tests { #[test] fn test_path_query_proofs_without_subquery() { + let grove_version = GroveVersion::latest(); // Tree Structure // root // test_leaf @@ -1549,7 +1756,7 @@ mod tests { // k4,v4 // Insert elements into grovedb instance - let temp_db = make_test_grovedb(); + let temp_db = make_test_grovedb(grove_version); // Insert level 1 nodes temp_db .insert( @@ -1558,6 +1765,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1568,6 +1776,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1578,6 +1787,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1589,6 +1799,7 @@ mod tests { Element::new_item(b"value1".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1599,6 +1810,7 @@ mod tests { Element::new_item(b"value2".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1609,6 +1821,7 @@ mod tests { Element::new_item(b"value3".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1619,6 +1832,7 @@ mod tests { Element::new_item(b"value3".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1629,6 +1843,7 @@ mod tests { Element::new_item(b"value4".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1640,7 +1855,10 @@ mod tests { let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); assert_eq!( hex::encode(proof.as_slice()), "005c0409746573745f6c656166000d020109696e6e65727472656500fafa16d06e8d8696dae443731\ @@ -1652,10 +1870,16 @@ mod tests { b979cbe4a51e0b2f08d110001" ); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - let r1 = Element::new_item(b"value1".to_vec()).serialize().unwrap(); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + let r1 = Element::new_item(b"value1".to_vec()) + .serialize(grove_version) + .unwrap(); compare_result_tuples(result_set, vec![(b"key1".to_vec(), r1)]); // Range query + limit @@ -1666,12 +1890,21 @@ mod tests { SizedQuery::new(query, Some(1), None), ); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - let r1 = Element::new_item(b"value2".to_vec()).serialize().unwrap(); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + let r1 = Element::new_item(b"value2".to_vec()) + .serialize(grove_version) + .unwrap(); compare_result_tuples(result_set, vec![(b"key2".to_vec(), r1)]); // Range query + direction + limit @@ -1682,13 +1915,24 @@ mod tests { SizedQuery::new(query, Some(2), None), ); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); - let r1 = Element::new_item(b"value3".to_vec()).serialize().unwrap(); - let r2 = Element::new_item(b"value2".to_vec()).serialize().unwrap(); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); + let r1 = Element::new_item(b"value3".to_vec()) + .serialize(grove_version) + .unwrap(); + let r2 = Element::new_item(b"value2".to_vec()) + .serialize(grove_version) + .unwrap(); compare_result_tuples( result_set, vec![(b"key3".to_vec(), r1), (b"key2".to_vec(), r2)], @@ -1697,7 +1941,8 @@ mod tests { #[test] fn test_path_query_proofs_with_default_subquery() { - let temp_db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); let mut query = Query::new(); query.insert_all(); @@ -1708,11 +1953,18 @@ mod tests { let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 5); let keys = [ @@ -1729,7 +1981,7 @@ mod tests { b"value4".to_vec(), b"value5".to_vec(), ]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); compare_result_tuples(result_set, expected_result_set); @@ -1742,16 +1994,23 @@ mod tests { let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 2); let keys = [b"key4".to_vec(), b"key5".to_vec()]; let values = [b"value4".to_vec(), b"value5".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); compare_result_tuples(result_set, expected_result_set); @@ -1765,18 +2024,25 @@ mod tests { let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect( - "should + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version).expect( + "should execute proof", - ); + ); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 3); let keys = [b"key2".to_vec(), b"key3".to_vec(), b"key4".to_vec()]; let values = [b"value2".to_vec(), b"value3".to_vec(), b"value4".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); compare_result_tuples(result_set, expected_result_set); @@ -1795,11 +2061,18 @@ mod tests { let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 14); let keys = [ @@ -1834,14 +2107,15 @@ mod tests { b"value13".to_vec(), b"value14".to_vec(), ]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); compare_result_tuples(result_set, expected_result_set); } #[test] fn test_path_query_proofs_with_subquery_path() { - let temp_db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); let mut query = Query::new(); query.insert_all(); @@ -1854,16 +2128,23 @@ mod tests { let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 3); let keys = [b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()]; let values = [b"value1".to_vec(), b"value2".to_vec(), b"value3".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); compare_result_tuples(result_set, expected_result_set); @@ -1878,15 +2159,22 @@ mod tests { query.set_subquery(subq); let path_query = PathQuery::new_unsized(vec![], query); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 3); let keys = [b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()]; let values = [b"value1".to_vec(), b"value2".to_vec(), b"value3".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); compare_result_tuples(result_set, expected_result_set); @@ -1902,10 +2190,17 @@ mod tests { let path_query = PathQuery::new_unsized(vec![b"deep_leaf".to_vec(), b"deep_node_1".to_vec()], query); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 6); let keys = [ @@ -1924,7 +2219,7 @@ mod tests { b"value5".to_vec(), b"value6".to_vec(), ]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); compare_result_tuples(result_set, expected_result_set); @@ -1944,16 +2239,24 @@ mod tests { query.set_subquery(subq); let path_query = PathQuery::new_unsized(vec![], query); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 0); } #[test] fn test_path_query_proofs_with_key_and_subquery() { - let temp_db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); let mut query = Query::new(); query.insert_key(b"deep_node_1".to_vec()); @@ -1966,23 +2269,31 @@ mod tests { let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 3); let keys = [b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()]; let values = [b"value1".to_vec(), b"value2".to_vec(), b"value3".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); compare_result_tuples(result_set, expected_result_set); } #[test] fn test_path_query_proofs_with_conditional_subquery() { - let temp_db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); let mut query = Query::new(); query.insert_all(); @@ -2002,11 +2313,18 @@ mod tests { query.set_subquery(subquery); let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); let keys = [ b"deeper_1".to_vec(), @@ -2046,11 +2364,18 @@ mod tests { query.set_subquery(subquery); let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 6); let keys = [ @@ -2070,7 +2395,7 @@ mod tests { b"value11".to_vec(), ]; let elements = values - .map(|x| Element::new_item(x).serialize().unwrap()) + .map(|x| Element::new_item(x).serialize(grove_version).unwrap()) .to_vec(); // compare_result_sets(&elements, &result_set); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); @@ -2079,7 +2404,8 @@ mod tests { #[test] fn test_path_query_proofs_with_sized_query() { - let temp_db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); let mut query = Query::new(); query.insert_all(); @@ -2105,25 +2431,33 @@ mod tests { let path_query = PathQuery::new( vec![DEEP_LEAF.to_vec()], SizedQuery::new(query, Some(5), None), /* we need to add a bigger limit because of - * empty proved sub trees */ + * empty proved subtrees */ ); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 3); let keys = [b"key4".to_vec(), b"key5".to_vec(), b"key6".to_vec()]; let values = [b"value4".to_vec(), b"value5".to_vec(), b"value6".to_vec()]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); compare_result_tuples(result_set, expected_result_set); } #[test] fn test_path_query_proof_with_range_subquery_and_limit() { - let db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); // Create a path query with a range query, subquery, and limit let mut main_query = Query::new(); @@ -2140,15 +2474,18 @@ mod tests { ); // Generate proof - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); // Verify proof - let verification_result = GroveDb::verify_query_raw(&proof, &path_query); + let verification_result = GroveDb::verify_query_raw(&proof, &path_query, grove_version); match verification_result { Ok((hash, result_set)) => { // Check if the hash matches the root hash - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); // Check if we got the correct number of results assert_eq!(result_set.len(), 3, "Expected 3 results due to limit"); } @@ -2163,13 +2500,16 @@ mod tests { SizedQuery::new(main_query.clone(), None, None), ); - let proof_no_limit = db.prove_query(&path_query_no_limit, None).unwrap().unwrap(); + let proof_no_limit = db + .prove_query(&path_query_no_limit, None, grove_version) + .unwrap() + .unwrap(); let verification_result_no_limit = - GroveDb::verify_query_raw(&proof_no_limit, &path_query_no_limit); + GroveDb::verify_query_raw(&proof_no_limit, &path_query_no_limit, grove_version); match verification_result_no_limit { Ok((hash, result_set)) => { - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 5, "Expected 5 results without limit"); } Err(e) => { @@ -2180,7 +2520,8 @@ mod tests { #[test] fn test_path_query_proof_with_range_subquery_and_limit_with_sum_trees() { - let db = make_deep_tree_with_sum_trees(); + let grove_version = GroveVersion::latest(); + let db = make_deep_tree_with_sum_trees(grove_version); // Create a path query with a range query, subquery, and limit let mut main_query = Query::new(); @@ -2207,6 +2548,7 @@ mod tests { false, QueryResultType::QueryPathKeyElementTrioResultType, None, + grove_version, ) .unwrap() .expect("expected query to execute") @@ -2230,14 +2572,17 @@ mod tests { ); // Generate proof - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); // Verify proof - let (hash, result_set) = - GroveDb::verify_query_raw(&proof, &path_query).expect("proof verification failed"); + let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query, grove_version) + .expect("proof verification failed"); // Check if the hash matches the root hash - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); // Check if we got the correct number of results assert_eq!(result_set.len(), 3, "Expected 3 results due to limit"); @@ -2247,13 +2592,16 @@ mod tests { SizedQuery::new(main_query.clone(), None, None), ); - let proof_no_limit = db.prove_query(&path_query_no_limit, None).unwrap().unwrap(); + let proof_no_limit = db + .prove_query(&path_query_no_limit, None, grove_version) + .unwrap() + .unwrap(); let verification_result_no_limit = - GroveDb::verify_query_raw(&proof_no_limit, &path_query_no_limit); + GroveDb::verify_query_raw(&proof_no_limit, &path_query_no_limit, grove_version); match verification_result_no_limit { Ok((hash, result_set)) => { - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 29, "Expected 29 results without limit"); } Err(e) => { @@ -2264,7 +2612,8 @@ mod tests { #[test] fn test_path_query_proofs_with_direction() { - let temp_db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let temp_db = make_deep_tree(grove_version); // root // deep_leaf @@ -2316,11 +2665,18 @@ mod tests { SizedQuery::new(query, Some(6), None), /* we need 6 because of intermediate empty * trees in proofs */ ); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 4); let keys = [ @@ -2335,7 +2691,7 @@ mod tests { b"value6".to_vec(), b"value5".to_vec(), ]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); compare_result_tuples(result_set, expected_result_set); @@ -2354,11 +2710,18 @@ mod tests { let path_query = PathQuery::new_unsized(vec![DEEP_LEAF.to_vec()], query); - let proof = temp_db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = temp_db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_raw(proof.as_slice(), &path_query).expect("should execute proof"); + GroveDb::verify_query_raw(proof.as_slice(), &path_query, grove_version) + .expect("should execute proof"); - assert_eq!(hash, temp_db.root_hash(None).unwrap().unwrap()); + assert_eq!( + hash, + temp_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 14); let keys = [ @@ -2393,25 +2756,34 @@ mod tests { b"value8".to_vec(), b"value9".to_vec(), ]; - let elements = values.map(|x| Element::new_item(x).serialize().unwrap()); + let elements = values.map(|x| Element::new_item(x).serialize(grove_version).unwrap()); let expected_result_set: Vec<(Vec, Vec)> = keys.into_iter().zip(elements).collect(); compare_result_tuples(result_set, expected_result_set); } #[test] fn test_checkpoint() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element1 = Element::new_item(b"ayy".to_vec()); - db.insert(EMPTY_PATH, b"key1", Element::empty_tree(), None, None) - .unwrap() - .expect("cannot insert a subtree 1 into GroveDB"); + db.insert( + EMPTY_PATH, + b"key1", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("cannot insert a subtree 1 into GroveDB"); db.insert( [b"key1".as_ref()].as_ref(), b"key2", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("cannot insert a subtree 2 into GroveDB"); @@ -2421,14 +2793,20 @@ mod tests { element1.clone(), None, None, + grove_version, ) .unwrap() .expect("cannot insert an item into GroveDB"); assert_eq!( - db.get([b"key1".as_ref(), b"key2".as_ref()].as_ref(), b"key3", None) - .unwrap() - .expect("cannot get from grovedb"), + db.get( + [b"key1".as_ref(), b"key2".as_ref()].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap() + .expect("cannot get from grovedb"), element1 ); @@ -2441,14 +2819,24 @@ mod tests { GroveDb::open(checkpoint_tempdir).expect("cannot open grovedb from checkpoint"); assert_eq!( - db.get([b"key1".as_ref(), b"key2".as_ref()].as_ref(), b"key3", None) - .unwrap() - .expect("cannot get from grovedb"), + db.get( + [b"key1".as_ref(), b"key2".as_ref()].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap() + .expect("cannot get from grovedb"), element1 ); assert_eq!( checkpoint_db - .get([b"key1".as_ref(), b"key2".as_ref()].as_ref(), b"key3", None) + .get( + [b"key1".as_ref(), b"key2".as_ref()].as_ref(), + b"key3", + None, + grove_version + ) .unwrap() .expect("cannot get from checkpoint"), element1 @@ -2464,6 +2852,7 @@ mod tests { element2.clone(), None, None, + grove_version, ) .unwrap() .expect("cannot insert into checkpoint"); @@ -2474,20 +2863,21 @@ mod tests { element3.clone(), None, None, + grove_version, ) .unwrap() .expect("cannot insert into GroveDB"); assert_eq!( checkpoint_db - .get([b"key1".as_ref()].as_ref(), b"key4", None) + .get([b"key1".as_ref()].as_ref(), b"key4", None, grove_version) .unwrap() .expect("cannot get from checkpoint"), element2, ); assert_eq!( - db.get([b"key1".as_ref()].as_ref(), b"key4", None) + db.get([b"key1".as_ref()].as_ref(), b"key4", None, grove_version) .unwrap() .expect("cannot get from GroveDB"), element3 @@ -2500,30 +2890,40 @@ mod tests { element3.clone(), None, None, + grove_version, ) .unwrap() .expect("cannot insert into checkpoint"); - db.insert([b"key1".as_ref()].as_ref(), b"key6", element3, None, None) - .unwrap() - .expect("cannot insert into GroveDB"); + db.insert( + [b"key1".as_ref()].as_ref(), + b"key6", + element3, + None, + None, + grove_version, + ) + .unwrap() + .expect("cannot insert into GroveDB"); assert!(matches!( checkpoint_db - .get([b"key1".as_ref()].as_ref(), b"key6", None) + .get([b"key1".as_ref()].as_ref(), b"key6", None, grove_version) .unwrap(), Err(Error::PathKeyNotFound(_)) )); assert!(matches!( - db.get([b"key1".as_ref()].as_ref(), b"key5", None).unwrap(), + db.get([b"key1".as_ref()].as_ref(), b"key5", None, grove_version) + .unwrap(), Err(Error::PathKeyNotFound(_)) )); } #[test] fn test_is_empty_tree() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); // Create an empty tree with no elements db.insert( @@ -2532,36 +2932,39 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .unwrap(); assert!(db - .is_empty_tree([TEST_LEAF, b"innertree"].as_ref(), None) + .is_empty_tree([TEST_LEAF, b"innertree"].as_ref(), None, grove_version) .unwrap() .expect("path is valid tree")); - // add an element to the tree to make it non empty + // add an element to the tree to make it non-empty db.insert( [TEST_LEAF, b"innertree"].as_ref(), b"key1", Element::new_item(b"hello".to_vec()), None, None, + grove_version, ) .unwrap() .unwrap(); assert!(!db - .is_empty_tree([TEST_LEAF, b"innertree"].as_ref(), None) + .is_empty_tree([TEST_LEAF, b"innertree"].as_ref(), None, grove_version) .unwrap() .expect("path is valid tree")); } #[test] fn transaction_should_be_aborted_when_rollback_is_called() { + let grove_version = GroveVersion::latest(); let item_key = b"key3"; - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let transaction = db.start_transaction(); let element1 = Element::new_item(b"ayy".to_vec()); @@ -2573,6 +2976,7 @@ mod tests { element1, None, Some(&transaction), + grove_version, ) .unwrap(); @@ -2581,14 +2985,20 @@ mod tests { db.rollback_transaction(&transaction).unwrap(); let result = db - .get([TEST_LEAF].as_ref(), item_key, Some(&transaction)) + .get( + [TEST_LEAF].as_ref(), + item_key, + Some(&transaction), + grove_version, + ) .unwrap(); assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); } #[test] fn transaction_should_be_aborted() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let transaction = db.start_transaction(); let item_key = b"key3"; @@ -2600,6 +3010,7 @@ mod tests { element, None, Some(&transaction), + grove_version, ) .unwrap() .unwrap(); @@ -2607,13 +3018,16 @@ mod tests { drop(transaction); // Transactional data shouldn't be committed to the main database - let result = db.get([TEST_LEAF].as_ref(), item_key, None).unwrap(); + let result = db + .get([TEST_LEAF].as_ref(), item_key, None, grove_version) + .unwrap(); assert!(matches!(result, Err(Error::PathKeyNotFound(_)))); } #[test] fn test_subtree_pairs_iterator() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); let element2 = Element::new_item(b"lmao".to_vec()); @@ -2624,6 +3038,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 1 insert"); @@ -2633,6 +3048,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 2 insert"); @@ -2643,6 +3059,7 @@ mod tests { element.clone(), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -2650,7 +3067,8 @@ mod tests { db.get( [TEST_LEAF, b"subtree1", b"subtree11"].as_ref(), b"key1", - None + None, + grove_version ) .unwrap() .expect("successful get 1"), @@ -2662,6 +3080,7 @@ mod tests { element.clone(), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -2671,6 +3090,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 3 insert"); @@ -2680,6 +3100,7 @@ mod tests { element.clone(), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -2689,6 +3110,7 @@ mod tests { element2.clone(), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -2704,26 +3126,30 @@ mod tests { .unwrap(); let mut iter = Element::iterator(storage_context.raw_iter()).unwrap(); assert_eq!( - iter.next_element().unwrap().unwrap(), + iter.next_element(grove_version).unwrap().unwrap(), Some((b"key1".to_vec(), element)) ); assert_eq!( - iter.next_element().unwrap().unwrap(), + iter.next_element(grove_version).unwrap().unwrap(), Some((b"key2".to_vec(), element2)) ); - let subtree_element = iter.next_element().unwrap().unwrap().unwrap(); + let subtree_element = iter.next_element(grove_version).unwrap().unwrap().unwrap(); assert_eq!(subtree_element.0, b"subtree11".to_vec()); assert!(matches!(subtree_element.1, Element::Tree(..))); - let subtree_element = iter.next_element().unwrap().unwrap().unwrap(); + let subtree_element = iter.next_element(grove_version).unwrap().unwrap().unwrap(); assert_eq!(subtree_element.0, b"subtree12".to_vec()); assert!(matches!(subtree_element.1, Element::Tree(..))); - assert!(matches!(iter.next_element().unwrap(), Ok(None))); + assert!(matches!( + iter.next_element(grove_version).unwrap(), + Ok(None) + )); } #[test] fn test_find_subtrees() { + let grove_version = GroveVersion::latest(); let element = Element::new_item(b"ayy".to_vec()); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); // Insert some nested subtrees db.insert( [TEST_LEAF].as_ref(), @@ -2731,6 +3157,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 1 insert"); @@ -2740,6 +3167,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 2 insert"); @@ -2750,6 +3178,7 @@ mod tests { element, None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -2759,11 +3188,12 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 3 insert"); let subtrees = db - .find_subtrees(&[TEST_LEAF].as_ref().into(), None) + .find_subtrees(&[TEST_LEAF].as_ref().into(), None, grove_version) .unwrap() .expect("cannot get subtrees"); assert_eq!( @@ -2779,12 +3209,14 @@ mod tests { #[test] fn test_root_subtree_has_root_key() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let storage = db.db.get_storage_context(EMPTY_PATH, None).unwrap(); let root_merk = Merk::open_base( storage, false, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .unwrap() .expect("expected to get root merk"); @@ -2797,16 +3229,19 @@ mod tests { #[test] fn test_get_subtree() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let element = Element::new_item(b"ayy".to_vec()); // Returns error is subtree is not valid { - let subtree = db.get([TEST_LEAF].as_ref(), b"invalid_tree", None).unwrap(); + let subtree = db + .get([TEST_LEAF].as_ref(), b"invalid_tree", None, grove_version) + .unwrap(); assert!(subtree.is_err()); // Doesn't return an error for subtree that exists but empty - let subtree = db.get(EMPTY_PATH, TEST_LEAF, None).unwrap(); + let subtree = db.get(EMPTY_PATH, TEST_LEAF, None, grove_version).unwrap(); assert!(subtree.is_ok()); } @@ -2817,12 +3252,13 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 1 insert"); let key1_tree = db - .get(EMPTY_PATH, TEST_LEAF, None) + .get(EMPTY_PATH, TEST_LEAF, None, grove_version) .unwrap() .expect("expected to get a root tree"); @@ -2845,6 +3281,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 2 insert"); @@ -2856,6 +3293,7 @@ mod tests { element.clone(), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -2865,6 +3303,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 3 insert"); @@ -2882,10 +3321,13 @@ mod tests { Some(b"key3".to_vec()), false, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .unwrap() .expect("cannot open merk"); - let result_element = Element::get(&subtree, b"key3", true).unwrap().unwrap(); + let result_element = Element::get(&subtree, b"key3", true, grove_version) + .unwrap() + .unwrap(); assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); } // Insert a new tree with transaction @@ -2897,6 +3339,7 @@ mod tests { Element::empty_tree(), None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -2907,6 +3350,7 @@ mod tests { element, None, Some(&transaction), + grove_version, ) .unwrap() .expect("successful value insert"); @@ -2926,10 +3370,13 @@ mod tests { Some(b"key4".to_vec()), false, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .unwrap() .expect("cannot open merk"); - let result_element = Element::get(&subtree, b"key4", true).unwrap().unwrap(); + let result_element = Element::get(&subtree, b"key4", true, grove_version) + .unwrap() + .unwrap(); assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); // Should be able to retrieve instances created before transaction @@ -2943,16 +3390,20 @@ mod tests { Some(b"key3".to_vec()), false, Some(&Element::value_defined_cost_for_serialized_value), + grove_version, ) .unwrap() .expect("cannot open merk"); - let result_element = Element::get(&subtree, b"key3", true).unwrap().unwrap(); + let result_element = Element::get(&subtree, b"key3", true, grove_version) + .unwrap() + .unwrap(); assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); } #[test] fn test_get_full_query() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); // Insert a couple of subtrees first db.insert( @@ -2961,6 +3412,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -2970,6 +3422,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -2980,6 +3433,7 @@ mod tests { Element::new_item(b"ayya".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -2989,6 +3443,7 @@ mod tests { Element::new_item(b"ayyb".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -2998,6 +3453,7 @@ mod tests { Element::new_item(b"ayyc".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -3007,6 +3463,7 @@ mod tests { Element::new_item(b"ayyd".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -3041,7 +3498,8 @@ mod tests { true, true, QueryKeyElementPairResultType, - None + None, + grove_version ) .unwrap() .expect("expected successful get_query") @@ -3056,8 +3514,9 @@ mod tests { #[test] fn test_aux_uses_separate_cf() { + let grove_version = GroveVersion::latest(); let element = Element::new_item(b"ayy".to_vec()); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); // Insert some nested subtrees db.insert( [TEST_LEAF].as_ref(), @@ -3065,6 +3524,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 1 insert"); @@ -3074,6 +3534,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree 2 insert"); @@ -3084,6 +3545,7 @@ mod tests { element.clone(), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -3102,9 +3564,14 @@ mod tests { .expect("cannot delete from aux"); assert_eq!( - db.get([TEST_LEAF, b"key1", b"key2"].as_ref(), b"key3", None) - .unwrap() - .expect("cannot get element"), + db.get( + [TEST_LEAF, b"key1", b"key2"].as_ref(), + b"key3", + None, + grove_version + ) + .unwrap() + .expect("cannot get element"), element ); assert_eq!( @@ -3135,10 +3602,11 @@ mod tests { #[test] fn test_aux_with_transaction() { + let grove_version = GroveVersion::latest(); let element = Element::new_item(b"ayy".to_vec()); let aux_value = b"ayylmao".to_vec(); let key = b"key".to_vec(); - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); let transaction = db.start_transaction(); // Insert a regular data with aux data in the same transaction @@ -3148,6 +3616,7 @@ mod tests { element, None, Some(&transaction), + grove_version, ) .unwrap() .expect("unable to insert"); @@ -3181,19 +3650,24 @@ mod tests { #[test] fn test_root_hash() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); // Check hashes are different if tree is edited - let old_root_hash = db.root_hash(None).unwrap(); + let old_root_hash = db.root_hash(None, grove_version).unwrap(); db.insert( [TEST_LEAF].as_ref(), b"key1", Element::new_item(b"ayy".to_vec()), None, None, + grove_version, ) .unwrap() .expect("unable to insert an item"); - assert_ne!(old_root_hash.unwrap(), db.root_hash(None).unwrap().unwrap()); + assert_ne!( + old_root_hash.unwrap(), + db.root_hash(None, grove_version).unwrap().unwrap() + ); // Check isolation let transaction = db.start_transaction(); @@ -3204,35 +3678,50 @@ mod tests { Element::new_item(b"ayy".to_vec()), None, Some(&transaction), + grove_version, ) .unwrap() .expect("unable to insert an item"); - let root_hash_outside = db.root_hash(None).unwrap().unwrap(); + let root_hash_outside = db.root_hash(None, grove_version).unwrap().unwrap(); assert_ne!( - db.root_hash(Some(&transaction)).unwrap().unwrap(), + db.root_hash(Some(&transaction), grove_version) + .unwrap() + .unwrap(), root_hash_outside ); - assert_eq!(db.root_hash(None).unwrap().unwrap(), root_hash_outside); + assert_eq!( + db.root_hash(None, grove_version).unwrap().unwrap(), + root_hash_outside + ); db.commit_transaction(transaction).unwrap().unwrap(); - assert_ne!(db.root_hash(None).unwrap().unwrap(), root_hash_outside); + assert_ne!( + db.root_hash(None, grove_version).unwrap().unwrap(), + root_hash_outside + ); } #[test] fn test_get_non_existing_root_leaf() { - let db = make_test_grovedb(); - assert!(db.get(EMPTY_PATH, b"ayy", None).unwrap().is_err()); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + assert!(db + .get(EMPTY_PATH, b"ayy", None, grove_version) + .unwrap() + .is_err()); } #[test] fn test_check_subtree_exists_function() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), b"key_scalar", Element::new_item(b"ayy".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert item"); @@ -3242,39 +3731,49 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("cannot insert item"); // Empty tree path means root always exist assert!(db - .check_subtree_exists_invalid_path(EMPTY_PATH, None) + .check_subtree_exists_invalid_path(EMPTY_PATH, None, grove_version) .unwrap() .is_ok()); // TEST_LEAF should be a tree assert!(db - .check_subtree_exists_invalid_path([TEST_LEAF].as_ref().into(), None) + .check_subtree_exists_invalid_path([TEST_LEAF].as_ref().into(), None, grove_version) .unwrap() .is_ok()); // TEST_LEAF.key_subtree should be a tree assert!(db - .check_subtree_exists_invalid_path([TEST_LEAF, b"key_subtree"].as_ref().into(), None) + .check_subtree_exists_invalid_path( + [TEST_LEAF, b"key_subtree"].as_ref().into(), + None, + grove_version + ) .unwrap() .is_ok()); // TEST_LEAF.key_scalar should NOT be a tree assert!(matches!( - db.check_subtree_exists_invalid_path([TEST_LEAF, b"key_scalar"].as_ref().into(), None) - .unwrap(), + db.check_subtree_exists_invalid_path( + [TEST_LEAF, b"key_scalar"].as_ref().into(), + None, + grove_version + ) + .unwrap(), Err(Error::InvalidPath(_)) )); } #[test] fn test_tree_value_exists_method_no_tx() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); // Test keys in non-root tree db.insert( [TEST_LEAF].as_ref(), @@ -3282,31 +3781,49 @@ mod tests { Element::new_item(b"ayy".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert item"); assert!(db - .has_raw([TEST_LEAF].as_ref(), b"key", None) + .has_raw([TEST_LEAF].as_ref(), b"key", None, grove_version) .unwrap() .unwrap()); assert!(!db - .has_raw([TEST_LEAF].as_ref(), b"badkey", None) + .has_raw([TEST_LEAF].as_ref(), b"badkey", None, grove_version) .unwrap() .unwrap()); // Test keys for a root tree - db.insert(EMPTY_PATH, b"leaf", Element::empty_tree(), None, None) - .unwrap() - .expect("cannot insert item"); + db.insert( + EMPTY_PATH, + b"leaf", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("cannot insert item"); - assert!(db.has_raw(EMPTY_PATH, b"leaf", None).unwrap().unwrap()); - assert!(db.has_raw(EMPTY_PATH, TEST_LEAF, None).unwrap().unwrap()); - assert!(!db.has_raw(EMPTY_PATH, b"badleaf", None).unwrap().unwrap()); + assert!(db + .has_raw(EMPTY_PATH, b"leaf", None, grove_version) + .unwrap() + .unwrap()); + assert!(db + .has_raw(EMPTY_PATH, TEST_LEAF, None, grove_version) + .unwrap() + .unwrap()); + assert!(!db + .has_raw(EMPTY_PATH, b"badleaf", None, grove_version) + .unwrap() + .unwrap()); } #[test] fn test_tree_value_exists_method_tx() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let tx = db.start_transaction(); // Test keys in non-root tree db.insert( @@ -3315,38 +3832,56 @@ mod tests { Element::new_item(b"ayy".to_vec()), None, Some(&tx), + grove_version, ) .unwrap() .expect("cannot insert item"); assert!(db - .has_raw([TEST_LEAF].as_ref(), b"key", Some(&tx)) + .has_raw([TEST_LEAF].as_ref(), b"key", Some(&tx), grove_version) .unwrap() .unwrap()); assert!(!db - .has_raw([TEST_LEAF].as_ref(), b"key", None) + .has_raw([TEST_LEAF].as_ref(), b"key", None, grove_version) .unwrap() .unwrap()); // Test keys for a root tree - db.insert(EMPTY_PATH, b"leaf", Element::empty_tree(), None, Some(&tx)) + db.insert( + EMPTY_PATH, + b"leaf", + Element::empty_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .expect("cannot insert item"); + assert!(db + .has_raw(EMPTY_PATH, b"leaf", Some(&tx), grove_version) .unwrap() - .expect("cannot insert item"); - assert!(db.has_raw(EMPTY_PATH, b"leaf", Some(&tx)).unwrap().unwrap()); - assert!(!db.has_raw(EMPTY_PATH, b"leaf", None).unwrap().unwrap()); + .unwrap()); + assert!(!db + .has_raw(EMPTY_PATH, b"leaf", None, grove_version) + .unwrap() + .unwrap()); db.commit_transaction(tx) .unwrap() .expect("cannot commit transaction"); assert!(db - .has_raw([TEST_LEAF].as_ref(), b"key", None) + .has_raw([TEST_LEAF].as_ref(), b"key", None, grove_version) + .unwrap() + .unwrap()); + assert!(db + .has_raw(EMPTY_PATH, b"leaf", None, grove_version) .unwrap() .unwrap()); - assert!(db.has_raw(EMPTY_PATH, b"leaf", None).unwrap().unwrap()); } #[test] fn test_storage_wipe() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let _path = db._tmp_dir.path(); // Test keys in non-root tree @@ -3356,19 +3891,23 @@ mod tests { Element::new_item(b"ayy".to_vec()), None, None, + grove_version, ) .unwrap() .expect("cannot insert item"); // retrieve key before wipe - let elem = db.get(&[TEST_LEAF], b"key", None).unwrap().unwrap(); + let elem = db + .get(&[TEST_LEAF], b"key", None, grove_version) + .unwrap() + .unwrap(); assert_eq!(elem, Element::new_item(b"ayy".to_vec())); // wipe the database db.grove_db.wipe().unwrap(); // retrieve key after wipe - let elem_result = db.get(&[TEST_LEAF], b"key", None).unwrap(); + let elem_result = db.get(&[TEST_LEAF], b"key", None, grove_version).unwrap(); assert!(elem_result.is_err()); assert!(matches!( elem_result, diff --git a/grovedb/src/tests/query_tests.rs b/grovedb/src/tests/query_tests.rs index e7325a6a..48c358c6 100644 --- a/grovedb/src/tests/query_tests.rs +++ b/grovedb/src/tests/query_tests.rs @@ -2,7 +2,8 @@ mod tests { //! Query tests use grovedb_merk::proofs::{query::QueryItem, Query}; - use rand::Rng; + use grovedb_version::version::GroveVersion; + use rand::random; use tempfile::TempDir; use crate::{ @@ -19,7 +20,7 @@ mod tests { Element, GroveDb, PathQuery, SizedQuery, }; - fn populate_tree_for_non_unique_range_subquery(db: &TempGroveDb) { + fn populate_tree_for_non_unique_range_subquery(db: &TempGroveDb, grove_version: &GroveVersion) { // Insert a couple of subtrees first for i in 1985u32..2000 { let i_vec = i.to_be_bytes().to_vec(); @@ -29,6 +30,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -40,6 +42,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -53,6 +56,7 @@ mod tests { Element::new_item(j_vec), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -60,7 +64,10 @@ mod tests { } } - fn populate_tree_for_non_unique_double_range_subquery(db: &TempGroveDb) { + fn populate_tree_for_non_unique_double_range_subquery( + db: &TempGroveDb, + grove_version: &GroveVersion, + ) { // Insert a couple of subtrees first for i in 0u32..10 { let i_vec = i.to_be_bytes().to_vec(); @@ -70,6 +77,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -81,6 +89,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -93,6 +102,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -105,6 +115,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -117,6 +128,7 @@ mod tests { Element::new_item(k_vec), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -125,7 +137,10 @@ mod tests { } } - fn populate_tree_by_reference_for_non_unique_range_subquery(db: &TempGroveDb) { + fn populate_tree_by_reference_for_non_unique_range_subquery( + db: &TempGroveDb, + grove_version: &GroveVersion, + ) { // This subtree will be holding values db.insert( [TEST_LEAF].as_ref(), @@ -133,6 +148,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -144,6 +160,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -156,6 +173,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -167,12 +185,13 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); for j in 100u32..150 { - let random_key = rand::thread_rng().gen::<[u8; 32]>(); + let random_key = random::<[u8; 32]>(); let mut j_vec = i_vec.clone(); j_vec.append(&mut j.to_be_bytes().to_vec()); @@ -183,6 +202,7 @@ mod tests { Element::new_item(j_vec.clone()), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -197,6 +217,7 @@ mod tests { ])), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -204,7 +225,7 @@ mod tests { } } - fn populate_tree_for_unique_range_subquery(db: &TempGroveDb) { + fn populate_tree_for_unique_range_subquery(db: &TempGroveDb, grove_version: &GroveVersion) { // Insert a couple of subtrees first for i in 1985u32..2000 { let i_vec = i.to_be_bytes().to_vec(); @@ -214,6 +235,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -224,13 +246,17 @@ mod tests { Element::new_item(i_vec), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); } } - fn populate_tree_by_reference_for_unique_range_subquery(db: &TempGroveDb) { + fn populate_tree_by_reference_for_unique_range_subquery( + db: &TempGroveDb, + grove_version: &GroveVersion, + ) { // This subtree will be holding values db.insert( [TEST_LEAF].as_ref(), @@ -238,6 +264,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -249,6 +276,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -261,6 +289,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -272,6 +301,7 @@ mod tests { Element::new_item(i_vec.clone()), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); @@ -287,23 +317,35 @@ mod tests { ])), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); } } - fn populate_tree_for_unique_range_subquery_with_non_unique_null_values(db: &mut TempGroveDb) { - populate_tree_for_unique_range_subquery(db); - db.insert([TEST_LEAF].as_ref(), &[], Element::empty_tree(), None, None) - .unwrap() - .expect("successful subtree insert"); + fn populate_tree_for_unique_range_subquery_with_non_unique_null_values( + db: &mut TempGroveDb, + grove_version: &GroveVersion, + ) { + populate_tree_for_unique_range_subquery(db, grove_version); + db.insert( + [TEST_LEAF].as_ref(), + &[], + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("successful subtree insert"); db.insert( [TEST_LEAF, &[]].as_ref(), b"\0", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -316,19 +358,21 @@ mod tests { Element::new_item(i_vec.clone()), None, None, + grove_version, ) .unwrap() .expect("successful value insert"); } } - fn populate_tree_for_uneven_keys(db: &TempGroveDb) { + fn populate_tree_for_uneven_keys(db: &TempGroveDb, grove_version: &GroveVersion) { db.insert( [TEST_LEAF].as_ref(), "b".as_ref(), Element::new_item(1u8.to_be_bytes().to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -339,6 +383,7 @@ mod tests { Element::new_item(2u8.to_be_bytes().to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -349,6 +394,7 @@ mod tests { Element::new_item(3u8.to_be_bytes().to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -359,6 +405,7 @@ mod tests { Element::new_item(4u8.to_be_bytes().to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -369,6 +416,7 @@ mod tests { Element::new_item(5u8.to_be_bytes().to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -376,8 +424,9 @@ mod tests { #[test] fn test_get_correct_order() { - let db = make_test_grovedb(); - populate_tree_for_uneven_keys(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_uneven_keys(&db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let query = Query::new_range_full(); @@ -385,7 +434,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -394,8 +443,9 @@ mod tests { #[test] fn test_get_range_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new(); @@ -411,7 +461,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -425,17 +475,22 @@ mod tests { last_value.append(&mut 149_u32.to_be_bytes().to_vec()); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 200); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_query_with_unique_subquery() { - let mut db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&mut db); + let grove_version = GroveVersion::latest(); + let mut db = make_test_grovedb(grove_version); + populate_tree_for_unique_range_subquery(&mut db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new(); @@ -448,7 +503,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -460,17 +515,22 @@ mod tests { let last_value = 1991_u32.to_be_bytes().to_vec(); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 4); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_query_with_unique_subquery_on_references() { - let db = make_test_grovedb(); - populate_tree_by_reference_for_unique_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_by_reference_for_unique_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec(), b"1".to_vec()]; let mut query = Query::new(); @@ -483,7 +543,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -495,17 +555,22 @@ mod tests { let last_value = 1991_u32.to_be_bytes().to_vec(); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 4); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_query_with_unique_subquery_with_non_unique_null_values() { - let mut db = make_test_grovedb(); - populate_tree_for_unique_range_subquery_with_non_unique_null_values(&mut db); + let grove_version = GroveVersion::latest(); + let mut db = make_test_grovedb(grove_version); + populate_tree_for_unique_range_subquery_with_non_unique_null_values(&mut db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new(); @@ -527,7 +592,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -539,17 +604,22 @@ mod tests { let last_value = 1999_u32.to_be_bytes().to_vec(); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 115); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_query_with_unique_subquery_ignore_non_unique_null_values() { - let mut db = make_test_grovedb(); - populate_tree_for_unique_range_subquery_with_non_unique_null_values(&mut db); + let grove_version = GroveVersion::latest(); + let mut db = make_test_grovedb(grove_version); + populate_tree_for_unique_range_subquery_with_non_unique_null_values(&mut db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new(); @@ -565,14 +635,14 @@ mod tests { // tree query.add_conditional_subquery( QueryItem::Key(b"".to_vec()), - Some(vec![b"\0".to_vec()]), // We want to go into 0 but we don't want to get anything + Some(vec![b"\0".to_vec()]), // We want to go into 0, but we don't want to get anything Some(subquery), ); let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -584,17 +654,22 @@ mod tests { let last_value = 1999_u32.to_be_bytes().to_vec(); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 15); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_inclusive_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new(); @@ -612,7 +687,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -626,17 +701,22 @@ mod tests { last_value.append(&mut 149_u32.to_be_bytes().to_vec()); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 400); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_inclusive_query_with_non_unique_subquery_on_references() { - let db = make_test_grovedb(); - populate_tree_by_reference_for_non_unique_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_by_reference_for_non_unique_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec(), b"1".to_vec()]; let mut query = Query::new(); @@ -654,7 +734,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -671,17 +751,22 @@ mod tests { last_value.append(&mut 149_u32.to_be_bytes().to_vec()); assert!(elements.contains(&last_value)); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 400); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_inclusive_query_with_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_unique_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new(); @@ -696,7 +781,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -708,17 +793,22 @@ mod tests { let last_value = 1995_u32.to_be_bytes().to_vec(); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 8); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_from_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new(); @@ -734,7 +824,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -748,17 +838,22 @@ mod tests { last_value.append(&mut 149_u32.to_be_bytes().to_vec()); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 250); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_from_query_with_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_unique_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new(); @@ -771,7 +866,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -783,17 +878,22 @@ mod tests { let last_value = 1999_u32.to_be_bytes().to_vec(); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 5); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_to_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new(); @@ -809,7 +909,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -823,17 +923,22 @@ mod tests { last_value.append(&mut 149_u32.to_be_bytes().to_vec()); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 500); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_to_query_with_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_unique_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new(); @@ -846,7 +951,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -858,17 +963,22 @@ mod tests { let last_value = 1994_u32.to_be_bytes().to_vec(); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 10); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_to_inclusive_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new(); @@ -884,7 +994,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -898,17 +1008,22 @@ mod tests { last_value.append(&mut 149_u32.to_be_bytes().to_vec()); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 550); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_to_inclusive_query_with_non_unique_subquery_and_key_out_of_bounds() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new_with_direction(false); @@ -924,7 +1039,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -938,17 +1053,22 @@ mod tests { last_value.append(&mut 100_u32.to_be_bytes().to_vec()); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 750); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_to_inclusive_query_with_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_unique_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new(); @@ -961,7 +1081,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -973,17 +1093,22 @@ mod tests { let last_value = 1995_u32.to_be_bytes().to_vec(); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 11); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_after_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new(); @@ -999,7 +1124,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -1013,17 +1138,22 @@ mod tests { last_value.append(&mut 149_u32.to_be_bytes().to_vec()); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 200); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_after_to_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new(); @@ -1041,7 +1171,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -1055,17 +1185,22 @@ mod tests { last_value.append(&mut 149_u32.to_be_bytes().to_vec()); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 50); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_after_to_inclusive_query_with_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new(); @@ -1083,7 +1218,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -1097,17 +1232,22 @@ mod tests { last_value.append(&mut 149_u32.to_be_bytes().to_vec()); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 100); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_after_to_inclusive_query_with_non_unique_subquery_and_key_out_of_bounds() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new_with_direction(false); @@ -1125,7 +1265,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -1139,28 +1279,32 @@ mod tests { last_value.append(&mut 100_u32.to_be_bytes().to_vec()); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 200); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_inclusive_query_with_double_non_unique_subquery() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_double_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_double_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new(); - query.insert_range_inclusive((3u32).to_be_bytes().to_vec()..=(4u32).to_be_bytes().to_vec()); + query.insert_range_inclusive(3u32.to_be_bytes().to_vec()..=4u32.to_be_bytes().to_vec()); query.set_subquery_key(b"a".to_vec()); let mut subquery = Query::new(); - subquery.insert_range_inclusive( - (29u32).to_be_bytes().to_vec()..=(31u32).to_be_bytes().to_vec(), - ); + subquery + .insert_range_inclusive(29u32.to_be_bytes().to_vec()..=31u32.to_be_bytes().to_vec()); subquery.set_subquery_key(b"\0".to_vec()); @@ -1174,7 +1318,7 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -1186,17 +1330,22 @@ mod tests { let last_value = 109_u32.to_be_bytes().to_vec(); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 60); compare_result_sets(&elements, &result_set); } #[test] fn test_get_range_query_with_limit_and_offset() { - let db = make_test_grovedb(); - populate_tree_for_non_unique_range_subquery(&db); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + populate_tree_for_non_unique_range_subquery(&db, grove_version); let path = vec![TEST_LEAF.to_vec()]; let mut query = Query::new_with_direction(true); @@ -1213,7 +1362,7 @@ mod tests { let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), None, None)); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -1227,9 +1376,13 @@ mod tests { last_value.append(&mut 149_u32.to_be_bytes().to_vec()); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 250); compare_result_sets(&elements, &result_set); @@ -1244,7 +1397,7 @@ mod tests { let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), None, None)); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -1258,9 +1411,13 @@ mod tests { last_value.append(&mut 100_u32.to_be_bytes().to_vec()); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 250); compare_result_sets(&elements, &result_set); @@ -1276,7 +1433,7 @@ mod tests { PathQuery::new(path.clone(), SizedQuery::new(query.clone(), Some(55), None)); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -1291,9 +1448,13 @@ mod tests { last_value.append(&mut 104_u32.to_be_bytes().to_vec()); assert_eq!(elements[elements.len() - 1], last_value); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 55); compare_result_sets(&elements, &result_set); @@ -1307,14 +1468,14 @@ mod tests { ); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); assert_eq!(elements.len(), 60); // Skips the first 14 elements, starts from the 15th - // i.e skips [100 - 113] starts from 114 + // i.e. skips [100 - 113] starts from 114 let mut first_value = 1990_u32.to_be_bytes().to_vec(); first_value.append(&mut 114_u32.to_be_bytes().to_vec()); assert_eq!(elements[0], first_value); @@ -1339,7 +1500,7 @@ mod tests { ); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -1368,7 +1529,7 @@ mod tests { ); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -1384,20 +1545,24 @@ mod tests { ); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); assert_eq!(elements.len(), 250); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 250); // Test on unique subtree build - let db = make_test_grovedb(); - populate_tree_for_unique_range_subquery(&db); + let db = make_test_grovedb(grove_version); + populate_tree_for_unique_range_subquery(&db, grove_version); let mut query = Query::new_with_direction(true); query.insert_range(1990_u32.to_be_bytes().to_vec()..2000_u32.to_be_bytes().to_vec()); @@ -1407,7 +1572,7 @@ mod tests { let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(5), Some(2))); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("expected successful get_path_query"); @@ -1422,6 +1587,7 @@ mod tests { #[test] fn test_correct_child_root_hash_propagation_for_parent_in_same_batch() { + let grove_version = GroveVersion::latest(); let tmp_dir = TempDir::new().unwrap(); let db = GroveDb::open(tmp_dir.path()).unwrap(); let tree_name_slice: &[u8] = &[ @@ -1430,7 +1596,7 @@ mod tests { ]; let batch = vec![GroveDbOp::insert_op(vec![], vec![1], Element::empty_tree())]; - db.apply_batch(batch, None, None) + db.apply_batch(batch, None, None, grove_version) .unwrap() .expect("should apply batch"); @@ -1476,7 +1642,7 @@ mod tests { Element::empty_tree(), ), ]; - db.apply_batch(batch, None, None) + db.apply_batch(batch, None, None, grove_version) .unwrap() .expect("should apply batch"); @@ -1532,7 +1698,7 @@ mod tests { )), ), ]; - db.apply_batch(batch, None, None) + db.apply_batch(batch, None, None, grove_version) .unwrap() .expect("should apply batch"); @@ -1559,16 +1725,18 @@ mod tests { ); let proof = db - .prove_query(&path_query, None) + .prove_query(&path_query, None, grove_version) .unwrap() .expect("expected successful proving"); - let (hash, _result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let (hash, _result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); } #[test] fn test_mixed_level_proofs() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); // TEST_LEAF // / | | \ @@ -1584,6 +1752,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1593,6 +1762,7 @@ mod tests { Element::new_item(vec![1]), None, None, + grove_version, ) .unwrap() .expect("successful item insert"); @@ -1602,6 +1772,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1611,6 +1782,7 @@ mod tests { Element::new_reference(ReferencePathType::SiblingReference(b"key2".to_vec())), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1621,6 +1793,7 @@ mod tests { Element::new_item(vec![2]), None, None, + grove_version, ) .unwrap() .expect("successful item insert"); @@ -1630,6 +1803,7 @@ mod tests { Element::new_item(vec![3]), None, None, + grove_version, ) .unwrap() .expect("successful item insert"); @@ -1639,6 +1813,7 @@ mod tests { Element::new_item(vec![4]), None, None, + grove_version, ) .unwrap() .expect("successful item insert"); @@ -1653,16 +1828,20 @@ mod tests { let path_query = PathQuery::new_unsized(path.clone(), query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("successful get_path_query"); assert_eq!(elements.len(), 5); assert_eq!(elements, vec![vec![2], vec![3], vec![4], vec![1], vec![1]]); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); // println!( // "{}", // result_set @@ -1677,16 +1856,20 @@ mod tests { // Test mixed element proofs with limit and offset let path_query = PathQuery::new_unsized(path.clone(), query.clone()); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("successful get_path_query"); assert_eq!(elements.len(), 5); assert_eq!(elements, vec![vec![2], vec![3], vec![4], vec![1], vec![1]]); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 5); compare_result_sets(&elements, &result_set); @@ -1695,16 +1878,20 @@ mod tests { let path_query = PathQuery::new(path.clone(), SizedQuery::new(query.clone(), Some(1), None)); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("successful get_path_query"); assert_eq!(elements.len(), 1); assert_eq!(elements, vec![vec![2]]); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 1); compare_result_sets(&elements, &result_set); @@ -1713,16 +1900,20 @@ mod tests { SizedQuery::new(query.clone(), Some(3), Some(0)), ); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("successful get_path_query"); assert_eq!(elements.len(), 3); assert_eq!(elements, vec![vec![2], vec![3], vec![4]]); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 3); compare_result_sets(&elements, &result_set); @@ -1731,22 +1922,26 @@ mod tests { SizedQuery::new(query.clone(), Some(4), Some(0)), ); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("successful get_path_query"); assert_eq!(elements.len(), 4); assert_eq!(elements, vec![vec![2], vec![3], vec![4], vec![1]]); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 4); compare_result_sets(&elements, &result_set); let path_query = PathQuery::new(path, SizedQuery::new(query.clone(), Some(10), Some(4))); let (elements, _) = db - .query_item_value(&path_query, true, true, true, None) + .query_item_value(&path_query, true, true, true, None, grove_version) .unwrap() .expect("successful get_path_query"); @@ -1756,13 +1951,15 @@ mod tests { #[test] fn test_mixed_level_proofs_with_tree() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), b"key1", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1772,6 +1969,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1781,6 +1979,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1791,6 +1990,7 @@ mod tests { Element::new_item(vec![2]), None, None, + grove_version, ) .unwrap() .expect("successful item insert"); @@ -1800,6 +2000,7 @@ mod tests { Element::new_item(vec![3]), None, None, + grove_version, ) .unwrap() .expect("successful item insert"); @@ -1809,6 +2010,7 @@ mod tests { Element::new_item(vec![4]), None, None, + grove_version, ) .unwrap() .expect("successful item insert"); @@ -1818,6 +2020,7 @@ mod tests { Element::new_item(vec![5]), None, None, + grove_version, ) .unwrap() .expect("successful item insert"); @@ -1840,15 +2043,20 @@ mod tests { true, QueryResultType::QueryPathKeyElementTrioResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_path_query"); assert_eq!(elements.len(), 5); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); // println!( // "{}", @@ -1873,15 +2081,20 @@ mod tests { true, QueryResultType::QueryPathKeyElementTrioResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_path_query"); assert_eq!(elements.len(), 1); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 1); // TODO: verify that the result set is exactly the same // compare_result_sets(&elements, &result_set); @@ -1889,7 +2102,8 @@ mod tests { #[test] fn test_mixed_level_proofs_with_subquery_paths() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); // TEST_LEAF // / | \ @@ -1906,6 +2120,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1915,6 +2130,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1924,6 +2140,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1934,6 +2151,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1943,6 +2161,7 @@ mod tests { Element::new_item(vec![2]), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1952,6 +2171,7 @@ mod tests { Element::new_item(vec![3]), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1962,6 +2182,7 @@ mod tests { Element::new_item(vec![6]), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1972,6 +2193,7 @@ mod tests { Element::new_item(vec![4]), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1981,6 +2203,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -1991,6 +2214,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -2000,6 +2224,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -2009,6 +2234,7 @@ mod tests { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -2030,6 +2256,7 @@ mod tests { false, QueryResultType::QueryPathKeyElementTrioResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_path_query"); @@ -2050,9 +2277,13 @@ mod tests { ]) ); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); // println!( // "{}", // result_set @@ -2083,6 +2314,7 @@ mod tests { false, QueryResultType::QueryPathKeyElementTrioResultType, None, + grove_version, ) .unwrap() .expect("expected successful get_path_query"); @@ -2113,9 +2345,13 @@ mod tests { ]) ); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 4); // apply empty path translation @@ -2130,9 +2366,13 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 5); // use conditionals to return from more than 2 depth @@ -2154,15 +2394,20 @@ mod tests { let path_query = PathQuery::new_unsized(path, query.clone()); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 8); } #[test] fn test_proof_with_limit_zero() { - let db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); let mut query = Query::new(); query.insert_all(); let path_query = PathQuery::new( @@ -2170,22 +2415,27 @@ mod tests { SizedQuery::new(query, Some(0), Some(0)), ); - db.prove_query(&path_query, None) + db.prove_query(&path_query, None, grove_version) .unwrap() .expect_err("expected error when trying to prove with limit 0"); } #[test] fn test_result_set_path_after_verification() { - let db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); let mut query = Query::new(); query.insert_all(); let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 3); // assert the result set path @@ -2214,9 +2464,13 @@ mod tests { query.set_subquery(subq); let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 5); assert_eq!( @@ -2250,9 +2504,13 @@ mod tests { query.set_subquery(subq); let path_query = PathQuery::new_unsized(vec![], query); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 3); assert_eq!( @@ -2293,9 +2551,13 @@ mod tests { let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query_raw(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 4); assert_eq!( @@ -2320,15 +2582,19 @@ mod tests { #[test] fn test_verification_with_path_key_optional_element_trio() { - let db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); let mut query = Query::new(); query.insert_all(); let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"innertree".to_vec()], query); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = GroveDb::verify_query(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 3); assert_eq!( @@ -2359,7 +2625,8 @@ mod tests { #[test] fn test_absence_proof() { - let db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); // simple case, request for items k2..=k5 under inner tree // we pass them as keys as terminal keys does not handle ranges with start or @@ -2375,10 +2642,13 @@ mod tests { SizedQuery::new(query, Some(4), None), ); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); let (hash, result_set) = - GroveDb::verify_query_with_absence_proof(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + GroveDb::verify_query_with_absence_proof(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 4); assert_eq!( @@ -2411,7 +2681,8 @@ mod tests { #[test] fn test_subset_proof_verification() { - let db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); // original path query let mut query = Query::new(); @@ -2422,9 +2693,12 @@ mod tests { let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = GroveDb::verify_query(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 5); assert_eq!( result_set[0], @@ -2475,8 +2749,9 @@ mod tests { query.set_subquery(subq); let subset_path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); - let (hash, result_set) = GroveDb::verify_subset_query(&proof, &subset_path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let (hash, result_set) = + GroveDb::verify_subset_query(&proof, &subset_path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 1); assert_eq!( result_set[0], @@ -2489,7 +2764,8 @@ mod tests { } #[test] fn test_chained_path_query_verification() { - let db = make_deep_tree(); + let grove_version = GroveVersion::latest(); + let db = make_deep_tree(grove_version); let mut query = Query::new(); query.insert_all(); @@ -2504,9 +2780,12 @@ mod tests { let path_query = PathQuery::new_unsized(vec![b"deep_leaf".to_vec()], query); // first prove non verbose - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query(&proof, &path_query).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = GroveDb::verify_query(&proof, &path_query, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 14); // init deeper_1 path query @@ -2544,9 +2823,13 @@ mod tests { &proof, &deeper_1_path_query, chained_path_queries, + grove_version, ) .unwrap(); - assert_eq!(root_hash, db.root_hash(None).unwrap().unwrap()); + assert_eq!( + root_hash, + db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(results.len(), 2); assert_eq!(results[0].len(), 3); assert_eq!( @@ -2627,11 +2910,12 @@ mod tests { #[test] fn test_query_b_depends_on_query_a() { + let grove_version = GroveVersion::latest(); // we have two trees // one with a mapping of id to name // another with a mapping of name to age // we want to get the age of every one after a certain id ordered by name - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); // TEST_LEAF contains the id to name mapping db.insert( @@ -2640,6 +2924,7 @@ mod tests { Element::new_item(b"d".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful root tree leaf insert"); @@ -2649,6 +2934,7 @@ mod tests { Element::new_item(b"b".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful root tree leaf insert"); @@ -2658,6 +2944,7 @@ mod tests { Element::new_item(b"c".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful root tree leaf insert"); @@ -2667,6 +2954,7 @@ mod tests { Element::new_item(b"a".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful root tree leaf insert"); @@ -2678,6 +2966,7 @@ mod tests { Element::new_item(vec![10]), None, None, + grove_version, ) .unwrap() .expect("successful root tree leaf insert"); @@ -2687,6 +2976,7 @@ mod tests { Element::new_item(vec![30]), None, None, + grove_version, ) .unwrap() .expect("successful root tree leaf insert"); @@ -2696,6 +2986,7 @@ mod tests { Element::new_item(vec![12]), None, None, + grove_version, ) .unwrap() .expect("successful root tree leaf insert"); @@ -2705,13 +2996,14 @@ mod tests { Element::new_item(vec![46]), None, None, + grove_version, ) .unwrap() .expect("successful root tree leaf insert"); // Query: return the age of everyone greater than id 2 ordered by name // id 2 - b - // so we want to return the age for c and d = 12, 46 respectively + // we want to return the age for c and d = 12, 46 respectively // the proof generator knows that id 2 = b, but the verifier doesn't // hence we need to generate two proofs // prove that 2 - b then prove age after b @@ -2724,9 +3016,13 @@ mod tests { let mut path_query_one = PathQuery::new_unsized(vec![TEST_LEAF.to_vec()], query); // first we show that this returns the correct output - let proof = db.prove_query(&path_query_one, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query(&proof, &path_query_one).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query_one, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query(&proof, &path_query_one, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 1); assert_eq!(result_set[0].2, Some(Element::new_item(b"b".to_vec()))); @@ -2736,18 +3032,25 @@ mod tests { let path_query_two = PathQuery::new_unsized(vec![ANOTHER_TEST_LEAF.to_vec()], query); // show that we get the correct output - let proof = db.prove_query(&path_query_two, None).unwrap().unwrap(); - let (hash, result_set) = GroveDb::verify_query(&proof, &path_query_two).unwrap(); - assert_eq!(hash, db.root_hash(None).unwrap().unwrap()); + let proof = db + .prove_query(&path_query_two, None, grove_version) + .unwrap() + .unwrap(); + let (hash, result_set) = + GroveDb::verify_query(&proof, &path_query_two, grove_version).unwrap(); + assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result_set.len(), 2); assert_eq!(result_set[0].2, Some(Element::new_item(vec![12]))); assert_eq!(result_set[1].2, Some(Element::new_item(vec![46]))); // now we merge the path queries let mut merged_path_queries = - PathQuery::merge(vec![&path_query_one, &path_query_two]).unwrap(); + PathQuery::merge(vec![&path_query_one, &path_query_two], grove_version).unwrap(); merged_path_queries.query.limit = Some(3); - let proof = db.prove_query(&merged_path_queries, None).unwrap().unwrap(); + let proof = db + .prove_query(&merged_path_queries, None, grove_version) + .unwrap() + .unwrap(); // verifier only has access to the statement age > 2 // need to first get the name associated with 2 from the proof @@ -2774,6 +3077,7 @@ mod tests { proof.as_slice(), &path_query_one, chained_path_queries, + grove_version, ) .unwrap(); assert_eq!(result_set.len(), 2); @@ -2787,9 +3091,10 @@ mod tests { #[test] fn test_prove_absent_path_with_intermediate_emtpy_tree() { + let grove_version = GroveVersion::latest(); // root // test_leaf (empty) - let grovedb = make_test_grovedb(); + let grovedb = make_test_grovedb(grove_version); // prove the absence of key "book" in ["test_leaf", "invalid"] let mut query = Query::new(); @@ -2798,13 +3103,17 @@ mod tests { PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"invalid".to_vec()], query); let proof = grovedb - .prove_query(&path_query, None) + .prove_query(&path_query, None, grove_version) .unwrap() .expect("should generate proofs"); let (root_hash, result_set) = - GroveDb::verify_query(proof.as_slice(), &path_query).expect("should verify proof"); + GroveDb::verify_query(proof.as_slice(), &path_query, grove_version) + .expect("should verify proof"); assert_eq!(result_set.len(), 0); - assert_eq!(root_hash, grovedb.root_hash(None).unwrap().unwrap()); + assert_eq!( + root_hash, + grovedb.root_hash(None, grove_version).unwrap().unwrap() + ); } } diff --git a/grovedb/src/tests/sum_tree_tests.rs b/grovedb/src/tests/sum_tree_tests.rs index 8f28932f..92df7d73 100644 --- a/grovedb/src/tests/sum_tree_tests.rs +++ b/grovedb/src/tests/sum_tree_tests.rs @@ -6,6 +6,7 @@ use grovedb_merk::{ TreeFeatureType::{BasicMerkNode, SummedMerkNode}, }; use grovedb_storage::StorageBatch; +use grovedb_version::version::GroveVersion; use crate::{ batch::GroveDbOp, @@ -16,20 +17,22 @@ use crate::{ #[test] fn test_sum_tree_behaves_like_regular_tree() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), b"key", Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); // Can fetch sum tree let sum_tree = db - .get([TEST_LEAF].as_ref(), b"key", None) + .get([TEST_LEAF].as_ref(), b"key", None, grove_version) .unwrap() .expect("should get tree"); assert!(matches!(sum_tree, Element::SumTree(..))); @@ -40,6 +43,7 @@ fn test_sum_tree_behaves_like_regular_tree() { Element::new_item(vec![1]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -49,6 +53,7 @@ fn test_sum_tree_behaves_like_regular_tree() { Element::new_item(vec![3]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -58,13 +63,19 @@ fn test_sum_tree_behaves_like_regular_tree() { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert item"); // Test proper item retrieval let item = db - .get([TEST_LEAF, b"key"].as_ref(), b"innerkey", None) + .get( + [TEST_LEAF, b"key"].as_ref(), + b"innerkey", + None, + grove_version, + ) .unwrap() .expect("should get item"); assert_eq!(item, Element::new_item(vec![1])); @@ -75,28 +86,34 @@ fn test_sum_tree_behaves_like_regular_tree() { let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"key".to_vec()], query); let proof = db - .prove_query(&path_query, None) + .prove_query(&path_query, None, grove_version) .unwrap() .expect("should generate proof"); let (root_hash, result_set) = - GroveDb::verify_query_raw(&proof, &path_query).expect("should verify proof"); - assert_eq!(root_hash, db.grove_db.root_hash(None).unwrap().unwrap()); + GroveDb::verify_query_raw(&proof, &path_query, grove_version).expect("should verify proof"); + assert_eq!( + root_hash, + db.grove_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 1); assert_eq!( - Element::deserialize(&result_set[0].value).expect("should deserialize element"), + Element::deserialize(&result_set[0].value, grove_version) + .expect("should deserialize element"), Element::new_item(vec![3]) ); } #[test] fn test_sum_item_behaves_like_regular_item() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), b"sumkey", Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -106,6 +123,7 @@ fn test_sum_item_behaves_like_regular_item() { Element::new_item(vec![1]), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -115,6 +133,7 @@ fn test_sum_item_behaves_like_regular_item() { Element::new_sum_item(5), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -124,13 +143,14 @@ fn test_sum_item_behaves_like_regular_item() { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); // Test proper item retrieval let item = db - .get([TEST_LEAF, b"sumkey"].as_ref(), b"k2", None) + .get([TEST_LEAF, b"sumkey"].as_ref(), b"k2", None, grove_version) .unwrap() .expect("should get item"); assert_eq!(item, Element::new_sum_item(5)); @@ -141,28 +161,33 @@ fn test_sum_item_behaves_like_regular_item() { let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"sumkey".to_vec()], query); let proof = db - .prove_query(&path_query, None) + .prove_query(&path_query, None, grove_version) .unwrap() .expect("should generate proof"); let (root_hash, result_set) = - GroveDb::verify_query_raw(&proof, &path_query).expect("should verify proof"); - assert_eq!(root_hash, db.grove_db.root_hash(None).unwrap().unwrap()); + GroveDb::verify_query_raw(&proof, &path_query, grove_version).expect("should verify proof"); + assert_eq!( + root_hash, + db.grove_db.root_hash(None, grove_version).unwrap().unwrap() + ); assert_eq!(result_set.len(), 1); - let element_from_proof = - Element::deserialize(&result_set[0].value).expect("should deserialize element"); + let element_from_proof = Element::deserialize(&result_set[0].value, grove_version) + .expect("should deserialize element"); assert_eq!(element_from_proof, Element::new_sum_item(5)); assert_eq!(element_from_proof.sum_value_or_default(), 5); } #[test] fn test_cannot_insert_sum_item_in_regular_tree() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), b"sumkey", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -173,6 +198,7 @@ fn test_cannot_insert_sum_item_in_regular_tree() { Element::new_sum_item(5), None, None, + grove_version ) .unwrap(), Err(Error::InvalidInput("cannot add sum item to non sum tree")) @@ -181,14 +207,16 @@ fn test_cannot_insert_sum_item_in_regular_tree() { #[test] fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { + let grove_version = GroveVersion::latest(); // All elements in a sum tree must have a summed feature type - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), b"key", Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -199,6 +227,7 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { Element::new_sum_item(30), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -208,6 +237,7 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { Element::new_sum_item(10), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -218,6 +248,7 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { Element::new_item(vec![10]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -227,6 +258,7 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { Element::new_item(vec![15]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -235,14 +267,19 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { // Open merk and check all elements in it let merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert!(matches!( merk.get_feature_type( b"item1", true, - None::<&fn(&[u8]) -> Option> + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version ) .unwrap() .expect("node should exist"), @@ -252,7 +289,8 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { merk.get_feature_type( b"item2", true, - None::<&fn(&[u8]) -> Option> + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version ) .unwrap() .expect("node should exist"), @@ -262,7 +300,8 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { merk.get_feature_type( b"item3", true, - None::<&fn(&[u8]) -> Option> + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version ) .unwrap() .expect("node should exist"), @@ -272,7 +311,8 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { merk.get_feature_type( b"item4", true, - None::<&fn(&[u8]) -> Option> + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version ) .unwrap() .expect("node should exist"), @@ -281,13 +321,14 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { assert_eq!(merk.sum().expect("expected to get sum"), Some(40)); // Perform the same test on regular trees - let db = make_test_grovedb(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), b"key", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -297,6 +338,7 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { Element::new_item(vec![30]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -306,19 +348,25 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { Element::new_item(vec![10]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert!(matches!( merk.get_feature_type( b"item1", true, - Some(&Element::value_defined_cost_for_serialized_value) + Some(&Element::value_defined_cost_for_serialized_value), + grove_version ) .unwrap() .expect("node should exist"), @@ -328,7 +376,8 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { merk.get_feature_type( b"item2", true, - Some(&Element::value_defined_cost_for_serialized_value) + Some(&Element::value_defined_cost_for_serialized_value), + grove_version ) .unwrap() .expect("node should exist"), @@ -339,13 +388,15 @@ fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { #[test] fn test_sum_tree_feature() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), b"key", Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -355,7 +406,11 @@ fn test_sum_tree_feature() { // Sum should be non for non sum tree // TODO: change interface to retrieve element directly let merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert_eq!(merk.sum().expect("expected to get sum"), None); @@ -367,11 +422,12 @@ fn test_sum_tree_feature() { Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert sum tree"); let sum_tree = db - .get([TEST_LEAF].as_ref(), b"key2", None) + .get([TEST_LEAF].as_ref(), b"key2", None, grove_version) .unwrap() .expect("should retrieve tree"); assert_eq!(sum_tree.sum_value_or_default(), 0); @@ -383,12 +439,17 @@ fn test_sum_tree_feature() { Element::new_sum_item(30), None, None, + grove_version, ) .unwrap() .expect("should insert item"); // TODO: change interface to retrieve element directly let merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key2"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert_eq!(merk.sum().expect("expected to get sum"), Some(30)); @@ -400,6 +461,7 @@ fn test_sum_tree_feature() { Element::new_sum_item(-10), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -409,11 +471,16 @@ fn test_sum_tree_feature() { Element::new_sum_item(50), None, None, + grove_version, ) .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key2"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert_eq!(merk.sum().expect("expected to get sum"), Some(70)); // 30 - 10 + 50 = 70 @@ -425,11 +492,16 @@ fn test_sum_tree_feature() { Element::new_item(vec![29]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key2"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert_eq!(merk.sum().expect("expected to get sum"), Some(70)); @@ -441,6 +513,7 @@ fn test_sum_tree_feature() { Element::new_sum_item(10), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -450,19 +523,30 @@ fn test_sum_tree_feature() { Element::new_sum_item(-100), None, None, + grove_version, ) .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key2"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert_eq!(merk.sum().expect("expected to get sum"), Some(-60)); // 30 + 10 - 100 = -60 // We can not replace a normal item with a sum item, so let's delete it first - db.delete([TEST_LEAF, b"key2"].as_ref(), b"item4", None, None) - .unwrap() - .expect("expected to delete"); + db.delete( + [TEST_LEAF, b"key2"].as_ref(), + b"item4", + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to delete"); // Use a large value db.insert( [TEST_LEAF, b"key2"].as_ref(), @@ -470,11 +554,16 @@ fn test_sum_tree_feature() { Element::new_sum_item(10000000), None, None, + grove_version, ) .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key2"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert_eq!(merk.sum().expect("expected to get sum"), Some(9999940)); // 30 + @@ -487,7 +576,8 @@ fn test_sum_tree_feature() { #[test] fn test_sum_tree_propagation() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); // Tree // SumTree // SumTree @@ -501,6 +591,7 @@ fn test_sum_tree_propagation() { Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -510,6 +601,7 @@ fn test_sum_tree_propagation() { Element::empty_sum_tree(), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -519,6 +611,7 @@ fn test_sum_tree_propagation() { Element::new_sum_item(20), None, None, + grove_version, ) .unwrap() .expect("should insert tree"); @@ -528,6 +621,7 @@ fn test_sum_tree_propagation() { Element::new_item(vec![2]), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -537,6 +631,7 @@ fn test_sum_tree_propagation() { Element::new_sum_item(5), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -546,6 +641,7 @@ fn test_sum_tree_propagation() { Element::new_sum_item(10), None, None, + grove_version, ) .unwrap() .expect("should insert item"); @@ -560,12 +656,13 @@ fn test_sum_tree_propagation() { ])), None, None, + grove_version, ) .unwrap() .expect("should insert item"); let sum_tree = db - .get([TEST_LEAF].as_ref(), b"key", None) + .get([TEST_LEAF].as_ref(), b"key", None, grove_version) .unwrap() .expect("should fetch tree"); assert_eq!(sum_tree.sum_value_or_default(), 35); @@ -574,7 +671,11 @@ fn test_sum_tree_propagation() { // Assert node feature types let test_leaf_merk = db - .open_non_transactional_merk_at_path([TEST_LEAF].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert!(matches!( @@ -582,7 +683,8 @@ fn test_sum_tree_propagation() { .get_feature_type( b"key", true, - Some(&Element::value_defined_cost_for_serialized_value) + Some(&Element::value_defined_cost_for_serialized_value), + grove_version ) .unwrap() .expect("node should exist"), @@ -590,7 +692,11 @@ fn test_sum_tree_propagation() { )); let parent_sum_tree = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert!(matches!( @@ -598,7 +704,8 @@ fn test_sum_tree_propagation() { .get_feature_type( b"tree2", true, - Some(&Element::value_defined_cost_for_serialized_value) + Some(&Element::value_defined_cost_for_serialized_value), + grove_version ) .unwrap() .expect("node should exist"), @@ -611,6 +718,7 @@ fn test_sum_tree_propagation() { .open_non_transactional_merk_at_path( [TEST_LEAF, b"key", b"tree2"].as_ref().into(), Some(&batch), + grove_version, ) .unwrap() .expect("should open tree"); @@ -619,7 +727,8 @@ fn test_sum_tree_propagation() { .get_feature_type( b"item1", true, - None::<&fn(&[u8]) -> Option> + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version ) .unwrap() .expect("node should exist"), @@ -630,7 +739,8 @@ fn test_sum_tree_propagation() { .get_feature_type( b"sumitem1", true, - None::<&fn(&[u8]) -> Option> + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version ) .unwrap() .expect("node should exist"), @@ -641,7 +751,8 @@ fn test_sum_tree_propagation() { .get_feature_type( b"sumitem2", true, - None::<&fn(&[u8]) -> Option> + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version ) .unwrap() .expect("node should exist"), @@ -654,7 +765,8 @@ fn test_sum_tree_propagation() { .get_feature_type( b"item2", true, - None::<&fn(&[u8]) -> Option> + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version ) .unwrap() .expect("node should exist"), @@ -664,7 +776,8 @@ fn test_sum_tree_propagation() { #[test] fn test_sum_tree_with_batches() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); let ops = vec![ GroveDbOp::insert_op( vec![TEST_LEAF.to_vec()], @@ -682,13 +795,17 @@ fn test_sum_tree_with_batches() { Element::new_sum_item(10), ), ]; - db.apply_batch(ops, None, None) + db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key1"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); @@ -697,7 +814,8 @@ fn test_sum_tree_with_batches() { .get_feature_type( b"a", true, - Some(&Element::value_defined_cost_for_serialized_value) + Some(&Element::value_defined_cost_for_serialized_value), + grove_version ) .unwrap() .expect("node should exist"), @@ -708,7 +826,8 @@ fn test_sum_tree_with_batches() { .get_feature_type( b"b", true, - Some(&Element::value_defined_cost_for_serialized_value) + Some(&Element::value_defined_cost_for_serialized_value), + grove_version ) .unwrap() .expect("node should exist"), @@ -721,13 +840,17 @@ fn test_sum_tree_with_batches() { b"c".to_vec(), Element::new_sum_item(10), )]; - db.apply_batch(ops, None, None) + db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key1"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert!(matches!( @@ -735,7 +858,8 @@ fn test_sum_tree_with_batches() { .get_feature_type( b"c", true, - None::<&fn(&[u8]) -> Option> + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version ) .unwrap() .expect("node should exist"), @@ -744,7 +868,7 @@ fn test_sum_tree_with_batches() { assert_eq!(sum_tree.sum().expect("expected to get sum"), Some(20)); // Test propagation - // Add a new sum tree with it's own sum items, should affect sum of original + // Add a new sum tree with its own sum items, should affect sum of original // tree let ops = vec![ GroveDbOp::insert_op( @@ -803,13 +927,17 @@ fn test_sum_tree_with_batches() { Element::new_item(vec![5]), ), ]; - db.apply_batch(ops, None, None) + db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key1"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open tree"); assert_eq!(sum_tree.sum().expect("expected to get sum"), Some(41)); diff --git a/grovedb/src/tests/tree_hashes_tests.rs b/grovedb/src/tests/tree_hashes_tests.rs index d2418132..e86b8fd0 100644 --- a/grovedb/src/tests/tree_hashes_tests.rs +++ b/grovedb/src/tests/tree_hashes_tests.rs @@ -32,6 +32,7 @@ use grovedb_merk::tree::{ combine_hash, kv::ValueDefinedCostType, kv_digest_to_kv_hash, node_hash, value_hash, NULL_HASH, }; use grovedb_storage::StorageBatch; +use grovedb_version::version::GroveVersion; use crate::{ tests::{make_test_grovedb, TEST_LEAF}, @@ -40,7 +41,8 @@ use crate::{ #[test] fn test_node_hashes_when_inserting_item() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -48,6 +50,7 @@ fn test_node_hashes_when_inserting_item() { Element::new_item(b"baguette".to_vec()), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -55,7 +58,11 @@ fn test_node_hashes_when_inserting_item() { let batch = StorageBatch::new(); let test_leaf_merk = db - .open_non_transactional_merk_at_path([TEST_LEAF].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open merk"); @@ -63,7 +70,8 @@ fn test_node_hashes_when_inserting_item() { .get_value_and_value_hash( b"key1", true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("should get value hash") @@ -73,7 +81,8 @@ fn test_node_hashes_when_inserting_item() { .get_kv_hash( b"key1", true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("should get value hash") @@ -83,7 +92,8 @@ fn test_node_hashes_when_inserting_item() { .get_hash( b"key1", true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("should get value hash") @@ -104,7 +114,8 @@ fn test_node_hashes_when_inserting_item() { #[test] fn test_tree_hashes_when_inserting_empty_tree() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -112,6 +123,7 @@ fn test_tree_hashes_when_inserting_empty_tree() { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -119,7 +131,11 @@ fn test_tree_hashes_when_inserting_empty_tree() { let batch = StorageBatch::new(); let test_leaf_merk = db - .open_non_transactional_merk_at_path([TEST_LEAF].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open merk"); @@ -127,7 +143,8 @@ fn test_tree_hashes_when_inserting_empty_tree() { .get_value_and_value_hash( b"key1", true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("should get value hash") @@ -137,7 +154,8 @@ fn test_tree_hashes_when_inserting_empty_tree() { .get_kv_hash( b"key1", true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("should get value hash") @@ -147,14 +165,19 @@ fn test_tree_hashes_when_inserting_empty_tree() { .get_hash( b"key1", true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("should get value hash") .expect("value hash should be some"); let underlying_merk = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key1"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open merk"); @@ -176,7 +199,8 @@ fn test_tree_hashes_when_inserting_empty_tree() { #[test] fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { - let db = make_test_grovedb(); + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); db.insert( [TEST_LEAF].as_ref(), @@ -184,6 +208,7 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -194,6 +219,7 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { Element::empty_tree(), None, None, + grove_version, ) .unwrap() .expect("successful subtree insert"); @@ -201,12 +227,20 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { let batch = StorageBatch::new(); let under_top_merk = db - .open_non_transactional_merk_at_path([TEST_LEAF].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open merk"); let middle_merk_key1 = db - .open_non_transactional_merk_at_path([TEST_LEAF, b"key1"].as_ref().into(), Some(&batch)) + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) .unwrap() .expect("should open merk"); @@ -216,7 +250,8 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { .get_value_and_value_hash( b"key2", true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("should get value hash") @@ -226,6 +261,7 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { .open_non_transactional_merk_at_path( [TEST_LEAF, b"key1", b"key2"].as_ref().into(), Some(&batch), + grove_version, ) .unwrap() .expect("should open merk"); @@ -243,7 +279,8 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { .get_kv_hash( b"key2", true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("should get kv hash") @@ -257,7 +294,8 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { .get_hash( b"key2", true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("should get kv hash") @@ -279,7 +317,8 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { .get_value_and_value_hash( b"key1", true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("should get value hash") @@ -290,7 +329,7 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { "0201046b65793200" ); - let element = Element::deserialize(middle_elem_value_key1.as_slice()) + let element = Element::deserialize(middle_elem_value_key1.as_slice(), grove_version) .expect("expected to deserialize element"); assert_eq!(element, Element::new_tree(Some(b"key2".to_vec()))); @@ -321,7 +360,8 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { .get_kv_hash( b"key1", true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("should get value hash") @@ -339,7 +379,8 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { .get_hash( b"key1", true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("should get value hash") diff --git a/grovedb/src/util.rs b/grovedb/src/util.rs index d05f2396..b9b624a4 100644 --- a/grovedb/src/util.rs +++ b/grovedb/src/util.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - /// Macro to execute same piece of code on different storage contexts /// (transactional or not) using path argument. macro_rules! storage_context_optional_tx { @@ -57,6 +29,7 @@ macro_rules! storage_context_with_parent_optional_tx { $storage:ident, $root_key:ident, $is_sum_tree:ident, + $grove_version:ident, { $($body:tt)* } ) => { { @@ -71,12 +44,13 @@ macro_rules! storage_context_with_parent_optional_tx { .unwrap_add_cost(&mut $cost); let element = cost_return_on_error!( &mut $cost, - Element::get_from_storage(&parent_storage, parent_key).map_err(|e| { + Element::get_from_storage(&parent_storage, parent_key, $grove_version) + .map_err(|e| { Error::PathParentLayerNotFound( format!( - "could not get key for parent of subtree optional on tx: {}", - e - ) + "could not get key for parent of subtree optional on tx: {}", + e + ) ) }) ); @@ -112,7 +86,7 @@ macro_rules! storage_context_with_parent_optional_tx { ).unwrap_add_cost(&mut $cost); let element = cost_return_on_error!( &mut $cost, - Element::get_from_storage(&parent_storage, parent_key).map_err(|e| { + Element::get_from_storage(&parent_storage, parent_key, $grove_version).map_err(|e| { Error::PathParentLayerNotFound( format!( "could not get key for parent of subtree optional no tx: {}", @@ -161,6 +135,7 @@ macro_rules! storage_context_with_parent_optional_tx_internal_error { $storage:ident, $root_key:ident, $is_sum_tree:ident, + $grove_version:ident, { $($body:tt)* } ) => { { @@ -173,8 +148,11 @@ macro_rules! storage_context_with_parent_optional_tx_internal_error { let parent_storage = $db .get_transactional_storage_context(parent_path, $batch, tx) .unwrap_add_cost(&mut $cost); - let result = Element::get_from_storage(&parent_storage, parent_key) - .map_err(|e| { + let result = Element::get_from_storage( + &parent_storage, + parent_key, + $grove_version + ).map_err(|e| { Error::PathParentLayerNotFound( format!( "could not get key for parent of subtree optional on tx: {}", @@ -218,8 +196,11 @@ macro_rules! storage_context_with_parent_optional_tx_internal_error { parent_path, $batch ).unwrap_add_cost(&mut $cost); - let result = Element::get_from_storage(&parent_storage, parent_key) - .map_err(|e| { + let result = Element::get_from_storage( + &parent_storage, + parent_key, + $grove_version + ).map_err(|e| { Error::PathParentLayerNotFound( format!( "could not get key for parent of subtree optional no tx: {}", @@ -296,6 +277,7 @@ macro_rules! merk_optional_tx { $batch:expr, $transaction:ident, $subtree:ident, + $grove_version:ident, { $($body:tt)* } ) => { if $path.is_root() { @@ -312,7 +294,8 @@ macro_rules! merk_optional_tx { ::grovedb_merk::Merk::open_base( storage.unwrap_add_cost(&mut $cost), false, - Some(&Element::value_defined_cost_for_serialized_value) + Some(&Element::value_defined_cost_for_serialized_value), + $grove_version, ).map(|merk_res| merk_res .map_err(|_| crate::Error::CorruptedData( @@ -333,6 +316,7 @@ macro_rules! merk_optional_tx { storage, root_key, is_sum_tree, + $grove_version, { #[allow(unused_mut)] let mut $subtree = cost_return_on_error!( @@ -342,6 +326,7 @@ macro_rules! merk_optional_tx { root_key, is_sum_tree, Some(&Element::value_defined_cost_for_serialized_value), + $grove_version, ).map(|merk_res| merk_res .map_err(|_| crate::Error::CorruptedData( @@ -366,6 +351,7 @@ macro_rules! merk_optional_tx_internal_error { $batch:expr, $transaction:ident, $subtree:ident, + $grove_version:ident, { $($body:tt)* } ) => { if $path.is_root() { @@ -382,7 +368,8 @@ macro_rules! merk_optional_tx_internal_error { ::grovedb_merk::Merk::open_base( storage.unwrap_add_cost(&mut $cost), false, - Some(&Element::value_defined_cost_for_serialized_value) + Some(&Element::value_defined_cost_for_serialized_value), + $grove_version ).map(|merk_res| merk_res .map_err(|_| crate::Error::CorruptedData( @@ -403,6 +390,7 @@ macro_rules! merk_optional_tx_internal_error { storage, root_key, is_sum_tree, + $grove_version, { #[allow(unused_mut)] let mut $subtree = cost_return_on_error!( @@ -412,6 +400,7 @@ macro_rules! merk_optional_tx_internal_error { root_key, is_sum_tree, Some(&Element::value_defined_cost_for_serialized_value), + $grove_version, ).map(|merk_res| merk_res .map_err(|_| crate::Error::CorruptedData( @@ -436,6 +425,7 @@ macro_rules! merk_optional_tx_path_not_empty { $batch:expr, $transaction:ident, $subtree:ident, + $grove_version:ident, { $($body:tt)* } ) => { { @@ -449,6 +439,7 @@ macro_rules! merk_optional_tx_path_not_empty { storage, root_key, is_sum_tree, + $grove_version, { #[allow(unused_mut)] let mut $subtree = cost_return_on_error!( @@ -458,6 +449,7 @@ macro_rules! merk_optional_tx_path_not_empty { root_key, is_sum_tree, Some(&Element::value_defined_cost_for_serialized_value), + $grove_version, ).map(|merk_res| merk_res .map_err(|_| crate::Error::CorruptedData( @@ -481,6 +473,7 @@ macro_rules! root_merk_optional_tx { $batch:expr, $transaction:ident, $subtree:ident, + $grove_version:ident, { $($body:tt)* } ) => { { @@ -497,7 +490,8 @@ macro_rules! root_merk_optional_tx { ::grovedb_merk::Merk::open_base( storage.unwrap_add_cost(&mut $cost), false, - Some(&Element::value_defined_cost_for_serialized_value) + Some(&Element::value_defined_cost_for_serialized_value), + $grove_version, ).map(|merk_res| merk_res .map_err(|_| crate::Error::CorruptedData( diff --git a/grovedb/src/visualize.rs b/grovedb/src/visualize.rs index 9eb1c00b..39cf3432 100644 --- a/grovedb/src/visualize.rs +++ b/grovedb/src/visualize.rs @@ -37,6 +37,7 @@ use bincode::{ use grovedb_merk::{Merk, VisualizeableMerk}; use grovedb_path::SubtreePathBuilder; use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; use grovedb_visualize::{visualize_stdout, Drawer, Visualize}; use crate::{ @@ -187,13 +188,14 @@ impl GroveDb { mut drawer: Drawer, path: SubtreePathBuilder<'_, B>, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> Result> { drawer.down(); storage_context_optional_tx!(self.db, (&path).into(), None, transaction, storage, { let mut iter = Element::iterator(storage.unwrap().raw_iter()).unwrap(); while let Some((key, element)) = iter - .next_element() + .next_element(grove_version) .unwrap() .expect("cannot get next element") { @@ -209,6 +211,7 @@ impl GroveDb { drawer, path.derive_owned_with_child(key), transaction, + grove_version, )?; drawer.up(); } @@ -227,10 +230,16 @@ impl GroveDb { &self, mut drawer: Drawer, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> Result> { drawer.down(); - drawer = self.draw_subtree(drawer, SubtreePathBuilder::new(), transaction)?; + drawer = self.draw_subtree( + drawer, + SubtreePathBuilder::new(), + transaction, + grove_version, + )?; drawer.up(); Ok(drawer) @@ -240,9 +249,10 @@ impl GroveDb { &self, mut drawer: Drawer, transaction: TransactionArg, + grove_version: &GroveVersion, ) -> Result> { drawer.write(b"root")?; - drawer = self.draw_root_tree(drawer, transaction)?; + drawer = self.draw_root_tree(drawer, transaction, grove_version)?; drawer.flush()?; Ok(drawer) } @@ -250,7 +260,7 @@ impl GroveDb { impl Visualize for GroveDb { fn visualize(&self, drawer: Drawer) -> Result> { - self.visualize_start(drawer, None) + self.visualize_start(drawer, None, GroveVersion::latest()) } } diff --git a/merk/Cargo.toml b/merk/Cargo.toml index 2d1c65be..4364a564 100644 --- a/merk/Cargo.toml +++ b/merk/Cargo.toml @@ -20,6 +20,7 @@ grovedb-costs = { version = "1.0.0-rc.2", path = "../costs" } grovedb-visualize = { version = "1.0.0-rc.2", path = "../visualize" } grovedb-path = { version = "1.0.0-rc.2", path = "../path" } hex = { version = "0.4.3" } +grovedb-version = { version = "1.0.0-rc.2", path = "../grovedb-version" } [dependencies.time] version = "0.3.34" diff --git a/merk/benches/merk.rs b/merk/benches/merk.rs index ff0fbaef..e2d55219 100644 --- a/merk/benches/merk.rs +++ b/merk/benches/merk.rs @@ -46,7 +46,7 @@ pub fn get(c: &mut Criterion) { let batch_size = 2_000; let num_batches = initial_size / batch_size; - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let mut batches = vec![]; for i in 0..num_batches { @@ -56,7 +56,7 @@ pub fn get(c: &mut Criterion) { &[], None, &|_k, _v| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -64,6 +64,7 @@ pub fn get(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -78,9 +79,14 @@ pub fn get(c: &mut Criterion) { let key_index = (i / num_batches) as usize; let key = &batches[batch_index][key_index].0; - merk.get(key, true, None:: Option>) - .unwrap() - .expect("get failed"); + merk.get( + key, + true, + None:: Option>, + grove_version, + ) + .unwrap() + .expect("get failed"); i = (i + 1) % initial_size; }) @@ -101,7 +107,7 @@ pub fn insert_1m_2k_seq(c: &mut Criterion) { } c.bench_function("insert_1m_2k_seq", |b| { - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let mut i = 0; b.iter_with_large_drop(|| { @@ -111,7 +117,7 @@ pub fn insert_1m_2k_seq(c: &mut Criterion) { &[], None, &|_k, _v| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -119,6 +125,7 @@ pub fn insert_1m_2k_seq(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -141,7 +148,7 @@ pub fn insert_1m_2k_rand(c: &mut Criterion) { } c.bench_function("insert_1m_2k_rand", |b| { - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let mut i = 0; b.iter_with_large_drop(|| { @@ -151,7 +158,7 @@ pub fn insert_1m_2k_rand(c: &mut Criterion) { &[], None, &|_k, _v| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -159,6 +166,7 @@ pub fn insert_1m_2k_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -175,7 +183,7 @@ pub fn update_1m_2k_seq(c: &mut Criterion) { let n_batches: usize = initial_size / batch_size; let mut batches = Vec::with_capacity(n_batches); - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..n_batches { let batch = make_batch_seq(((i * batch_size) as u64)..((i + 1) * batch_size) as u64); @@ -184,7 +192,7 @@ pub fn update_1m_2k_seq(c: &mut Criterion) { &[], None, &|_k, _v| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -192,6 +200,7 @@ pub fn update_1m_2k_seq(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -209,7 +218,7 @@ pub fn update_1m_2k_seq(c: &mut Criterion) { &[], None, &|_k, _v| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -217,6 +226,7 @@ pub fn update_1m_2k_seq(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -233,7 +243,7 @@ pub fn update_1m_2k_rand(c: &mut Criterion) { let n_batches: usize = initial_size / batch_size; let mut batches = Vec::with_capacity(n_batches); - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); @@ -242,7 +252,7 @@ pub fn update_1m_2k_rand(c: &mut Criterion) { &[], None, &|_k, _v| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -250,6 +260,7 @@ pub fn update_1m_2k_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -267,7 +278,7 @@ pub fn update_1m_2k_rand(c: &mut Criterion) { &[], None, &|_k, _v| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -275,6 +286,7 @@ pub fn update_1m_2k_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -292,7 +304,7 @@ pub fn delete_1m_2k_rand(c: &mut Criterion) { let mut batches = Vec::with_capacity(n_batches); let mut delete_batches = Vec::with_capacity(n_batches); - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); @@ -302,7 +314,7 @@ pub fn delete_1m_2k_rand(c: &mut Criterion) { &[], None, &|_k, _v| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -310,6 +322,7 @@ pub fn delete_1m_2k_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -331,7 +344,7 @@ pub fn delete_1m_2k_rand(c: &mut Criterion) { &[], None, &|_k, _v| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -339,6 +352,7 @@ pub fn delete_1m_2k_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -349,7 +363,7 @@ pub fn delete_1m_2k_rand(c: &mut Criterion) { &[], None, &|_k, _v| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -357,6 +371,7 @@ pub fn delete_1m_2k_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -374,7 +389,7 @@ pub fn prove_1m_2k_rand(c: &mut Criterion) { let mut batches = Vec::with_capacity(n_batches); let mut prove_keys_per_batch = Vec::with_capacity(n_batches); - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); @@ -383,7 +398,7 @@ pub fn prove_1m_2k_rand(c: &mut Criterion) { &[], None, &|_k, _v| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -391,6 +406,7 @@ pub fn prove_1m_2k_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -408,7 +424,7 @@ pub fn prove_1m_2k_rand(c: &mut Criterion) { b.iter_with_large_drop(|| { let keys = prove_keys_per_batch[i % n_batches].clone(); - merk.prove_unchecked(keys, None, true) + merk.prove_unchecked(keys, None, true, grove_version) .unwrap() .expect("prove failed"); i += 1; @@ -423,7 +439,7 @@ pub fn build_trunk_chunk_1m_2k_rand(c: &mut Criterion) { let n_batches: usize = initial_size / batch_size; - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); @@ -432,7 +448,7 @@ pub fn build_trunk_chunk_1m_2k_rand(c: &mut Criterion) { &[], None, &|_k, _v| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -440,6 +456,7 @@ pub fn build_trunk_chunk_1m_2k_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed") @@ -465,7 +482,7 @@ pub fn chunkproducer_rand_1m_1_rand(c: &mut Criterion) { let n_batches: usize = initial_size / batch_size; - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); @@ -474,7 +491,7 @@ pub fn chunkproducer_rand_1m_1_rand(c: &mut Criterion) { &[], None, &|_k, _v| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -482,6 +499,7 @@ pub fn chunkproducer_rand_1m_1_rand(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed") @@ -493,7 +511,7 @@ pub fn chunkproducer_rand_1m_1_rand(c: &mut Criterion) { c.bench_function("chunkproducer_rand_1m_1_rand", |b| { b.iter_with_large_drop(|| { let i = rng.gen::() % chunks.len(); - let _chunk = chunks.chunk(i).unwrap(); + let _chunk = chunks.chunk(i, grove_version).unwrap(); }); }); } @@ -505,7 +523,7 @@ pub fn chunk_iter_1m_1(c: &mut Criterion) { let n_batches: usize = initial_size / batch_size; - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..n_batches { let batch = make_batch_rand(batch_size as u64, i as u64); @@ -514,7 +532,7 @@ pub fn chunk_iter_1m_1(c: &mut Criterion) { &[], None, &|_k, _v| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -522,6 +540,7 @@ pub fn chunk_iter_1m_1(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed") @@ -529,11 +548,11 @@ pub fn chunk_iter_1m_1(c: &mut Criterion) { let mut chunks = merk.chunks().unwrap().into_iter(); - let mut next = || match chunks.next() { + let mut next = || match chunks.next(grove_version) { Some(chunk) => chunk, None => { chunks = merk.chunks().unwrap().into_iter(); - chunks.next().unwrap() + chunks.next(grove_version).unwrap() } }; @@ -548,7 +567,7 @@ pub fn chunk_iter_1m_1(c: &mut Criterion) { pub fn restore_500_1(c: &mut Criterion) { let merk_size = 500; - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_rand(merk_size as u64, 0_u64); merk.apply_unchecked::<_, Vec, _, _, _, _>( @@ -556,7 +575,7 @@ pub fn restore_500_1(c: &mut Criterion) { &[], None, &|_k, _v| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -564,6 +583,7 @@ pub fn restore_500_1(c: &mut Criterion) { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed"); @@ -585,7 +605,8 @@ pub fn restore_500_1(c: &mut Criterion) { let m = Merk::open_standalone( ctx, false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .unwrap(); diff --git a/merk/benches/ops.rs b/merk/benches/ops.rs index f9576fba..d194e6ff 100644 --- a/merk/benches/ops.rs +++ b/merk/benches/ops.rs @@ -42,7 +42,7 @@ fn insert_1m_10k_seq_memonly(c: &mut Criterion) { let batch_size = 10_000; let n_batches = initial_size / batch_size; - let mut tree = Owner::new(make_tree_seq(initial_size)); + let mut tree = Owner::new(make_tree_seq(initial_size, grove_version)); let mut batches = Vec::new(); for i in 0..n_batches { @@ -54,7 +54,7 @@ fn insert_1m_10k_seq_memonly(c: &mut Criterion) { b.iter(|| { let batch = &batches[i % n_batches as usize]; - tree.own(|tree| apply_memonly_unchecked(tree, batch)); + tree.own(|tree| apply_memonly_unchecked(tree, batch, grove_version)); i += 1; }); }); @@ -66,7 +66,13 @@ fn insert_1m_10k_rand_memonly(c: &mut Criterion) { let batch_size = 10_000; let n_batches = initial_size / batch_size; - let mut tree = Owner::new(make_tree_rand(initial_size, batch_size, 0, false)); + let mut tree = Owner::new(make_tree_rand( + initial_size, + batch_size, + 0, + false, + grove_version, + )); let mut batches = Vec::new(); for i in 0..n_batches { @@ -78,7 +84,7 @@ fn insert_1m_10k_rand_memonly(c: &mut Criterion) { b.iter(|| { let batch = &batches[i % n_batches as usize]; - tree.own(|tree| apply_memonly_unchecked(tree, batch)); + tree.own(|tree| apply_memonly_unchecked(tree, batch, grove_version)); i += 1; }); }); @@ -90,12 +96,12 @@ fn update_1m_10k_seq_memonly(c: &mut Criterion) { let batch_size = 10_000; let n_batches = initial_size / batch_size; - let mut tree = Owner::new(make_tree_seq(initial_size)); + let mut tree = Owner::new(make_tree_seq(initial_size, grove_version)); let mut batches = Vec::new(); for i in 0..n_batches { let batch = make_batch_seq((i * batch_size)..((i + 1) * batch_size)); - tree.own(|tree| apply_memonly_unchecked(tree, &batch)); + tree.own(|tree| apply_memonly_unchecked(tree, &batch, grove_version)); batches.push(batch); } @@ -104,7 +110,7 @@ fn update_1m_10k_seq_memonly(c: &mut Criterion) { b.iter(|| { let batch = &batches[i % n_batches as usize]; - tree.own(|tree| apply_memonly_unchecked(tree, batch)); + tree.own(|tree| apply_memonly_unchecked(tree, batch, grove_version)); i += 1; }); }); @@ -116,12 +122,18 @@ fn update_1m_10k_rand_memonly(c: &mut Criterion) { let batch_size = 10_000; let n_batches = initial_size / batch_size; - let mut tree = Owner::new(make_tree_rand(initial_size, batch_size, 0, false)); + let mut tree = Owner::new(make_tree_rand( + initial_size, + batch_size, + 0, + false, + grove_version, + )); let mut batches = Vec::new(); for i in 0..n_batches { let batch = make_batch_rand(batch_size, i); - tree.own(|tree| apply_memonly_unchecked(tree, &batch)); + tree.own(|tree| apply_memonly_unchecked(tree, &batch, grove_version)); batches.push(batch); } @@ -130,7 +142,7 @@ fn update_1m_10k_rand_memonly(c: &mut Criterion) { b.iter(|| { let batch = &batches[i % n_batches as usize]; - tree.own(|tree| apply_memonly_unchecked(tree, batch)); + tree.own(|tree| apply_memonly_unchecked(tree, batch, grove_version)); i += 1; }); }); diff --git a/merk/src/debugger.rs b/merk/src/debugger.rs index fc710c0a..c5d322c0 100644 --- a/merk/src/debugger.rs +++ b/merk/src/debugger.rs @@ -2,6 +2,7 @@ use grovedb_costs::CostsExt; use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; use crate::{tree::kv::ValueDefinedCostType, Error, Merk}; @@ -18,7 +19,8 @@ impl<'a, S: StorageContext<'a>> Merk { } .wrap_with_cost(Default::default()) }, - None:: Option>, // I wish I knew why + None:: Option>, + GroveVersion::latest(), ) .unwrap() } diff --git a/merk/src/error.rs b/merk/src/error.rs index 83fb3bde..c365b898 100644 --- a/merk/src/error.rs +++ b/merk/src/error.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Errors #[cfg(feature = "full")] use crate::proofs::chunk::error::ChunkError; @@ -141,4 +113,14 @@ pub enum Error { /// Costs errors #[error("costs error: {0}")] CostsError(grovedb_costs::error::Error), + // Version errors + #[error(transparent)] + /// Version error + VersionError(grovedb_version::error::GroveVersionError), +} + +impl From for Error { + fn from(value: grovedb_version::error::GroveVersionError) -> Self { + Error::VersionError(value) + } } diff --git a/merk/src/estimated_costs/average_case_costs.rs b/merk/src/estimated_costs/average_case_costs.rs index 1453d708..12f8c2c9 100644 --- a/merk/src/estimated_costs/average_case_costs.rs +++ b/merk/src/estimated_costs/average_case_costs.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Average case costs for Merk #[cfg(feature = "full")] @@ -336,7 +308,7 @@ pub fn add_average_case_get_merk_node( not_prefixed_key_len: u32, approximate_element_size: u32, is_sum_tree: bool, -) { +) -> Result<(), Error> { // Worst case scenario, the element is not already in memory. // One direct seek has to be performed to read the node from storage. cost.seek_count += 1; @@ -348,6 +320,7 @@ pub fn add_average_case_get_merk_node( approximate_element_size, is_sum_tree, ); + Ok(()) } #[cfg(feature = "full")] diff --git a/merk/src/estimated_costs/mod.rs b/merk/src/estimated_costs/mod.rs index faabce81..bd669db1 100644 --- a/merk/src/estimated_costs/mod.rs +++ b/merk/src/estimated_costs/mod.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Estimated costs for Merk #[cfg(feature = "full")] diff --git a/merk/src/estimated_costs/worst_case_costs.rs b/merk/src/estimated_costs/worst_case_costs.rs index 407e2c70..f4623c8d 100644 --- a/merk/src/estimated_costs/worst_case_costs.rs +++ b/merk/src/estimated_costs/worst_case_costs.rs @@ -74,7 +74,7 @@ pub fn add_worst_case_get_merk_node( not_prefixed_key_len: u32, max_element_size: u32, is_sum_node: bool, -) { +) -> Result<(), Error> { // Worst case scenario, the element is not already in memory. // One direct seek has to be performed to read the node from storage. cost.seek_count += 1; @@ -83,6 +83,7 @@ pub fn add_worst_case_get_merk_node( // worst case, the node has both the left and right link present. cost.storage_loaded_bytes += TreeNode::worst_case_encoded_tree_size(not_prefixed_key_len, max_element_size, is_sum_node); + Ok(()) } #[cfg(feature = "full")] diff --git a/merk/src/merk/apply.rs b/merk/src/merk/apply.rs index a33dfcc4..84b4cb9a 100644 --- a/merk/src/merk/apply.rs +++ b/merk/src/merk/apply.rs @@ -8,6 +8,7 @@ use grovedb_costs::{ CostResult, CostsExt, }; use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; use crate::{ tree::{ @@ -30,12 +31,19 @@ where /// /// # Example /// ``` - /// # let mut store = grovedb_merk::test_utils::TempMerk::new(); - /// # store.apply::<_, Vec<_>>(&[(vec![4,5,6], Op::Put(vec![0], BasicMerkNode))], &[], None) - /// .unwrap().expect(""); + /// # let grove_version = GroveVersion::latest(); + /// # let mut store = grovedb_merk::test_utils::TempMerk::new(grove_version); + /// # store.apply::<_, Vec<_>>( + /// &[(vec![4,5,6], + /// Op::Put(vec![0], BasicMerkNode))], + /// &[], + /// None, + /// grove_version + /// ).unwrap().expect(""); /// /// use grovedb_merk::Op; /// use grovedb_merk::TreeFeatureType::BasicMerkNode; + /// use grovedb_version::version::GroveVersion; /// /// let batch = &[ /// // puts value [4,5,6] to key[1,2,3] @@ -43,13 +51,14 @@ where /// // deletes key [4,5,6] /// (vec![4, 5, 6], Op::Delete), /// ]; - /// store.apply::<_, Vec<_>>(batch, &[], None).unwrap().expect(""); + /// store.apply::<_, Vec<_>>(batch, &[], None,grove_version).unwrap().expect(""); /// ``` pub fn apply( &mut self, batch: &MerkBatch, aux: &AuxMerkBatch, options: Option, + grove_version: &GroveVersion, ) -> CostResult<(), Error> where KB: AsRef<[u8]>, @@ -67,7 +76,7 @@ where use_sum_nodes, )) }, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_costs, _old_value, _value| Ok((false, None)), &mut |_a, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -75,6 +84,7 @@ where BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) } @@ -87,12 +97,19 @@ where /// /// # Example /// ``` - /// # let mut store = grovedb_merk::test_utils::TempMerk::new(); - /// # store.apply::<_, Vec<_>>(&[(vec![4,5,6], Op::Put(vec![0], BasicMerkNode))], &[], None) - /// .unwrap().expect(""); + /// # let grove_version = GroveVersion::latest(); + /// # let mut store = grovedb_merk::test_utils::TempMerk::new(grove_version); + /// # store.apply::<_, Vec<_>>( + /// &[(vec![4,5,6], + /// Op::Put(vec![0], BasicMerkNode))], + /// &[], + /// None, + /// grove_version + /// ).unwrap().expect(""); /// /// use grovedb_merk::Op; /// use grovedb_merk::TreeFeatureType::BasicMerkNode; + /// use grovedb_version::version::GroveVersion; /// /// let batch = &[ /// // puts value [4,5,6] to key[1,2,3] @@ -100,7 +117,7 @@ where /// // deletes key [4,5,6] /// (vec![4, 5, 6], Op::Delete), /// ]; - /// store.apply::<_, Vec<_>>(batch, &[], None).unwrap().expect(""); + /// store.apply::<_, Vec<_>>(batch, &[], None,grove_version).unwrap().expect(""); /// ``` pub fn apply_with_specialized_costs( &mut self, @@ -108,7 +125,10 @@ where aux: &AuxMerkBatch, options: Option, old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, - value_defined_cost_fn: Option<&impl Fn(&[u8]) -> Option>, + value_defined_cost_fn: Option< + &impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult<(), Error> where KB: AsRef<[u8]>, @@ -127,6 +147,7 @@ where BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) } @@ -140,21 +161,24 @@ where /// /// # Example /// ``` - /// # let mut store = grovedb_merk::test_utils::TempMerk::new(); - /// # store.apply_with_costs_just_in_time_value_update::<_, Vec<_>>( + /// # let grove_version = GroveVersion::latest(); + /// # let mut store = grovedb_merk::test_utils::TempMerk::new(grove_version); + /// # store.apply_with_costs_just_in_time_value_update::<_, Vec<_>>( /// /// /// /// /// &[(vec![4,5,6], Op::Put(vec![0], BasicMerkNode))], /// &[], /// None, /// &|k, v| Ok(0), - /// None::<&fn(&[u8]) -> Option>, + /// None::<&fn(&[u8], &GroveVersion) -> Option>, /// &mut |s, v, o| Ok((false, None)), - /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)) + /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)), + /// grove_version, /// ).unwrap().expect(""); /// /// use grovedb_costs::storage_cost::removal::StorageRemovedBytes::NoStorageRemoval; /// use grovedb_merk::Op; /// use grovedb_merk::tree::kv::ValueDefinedCostType; /// use grovedb_merk::TreeFeatureType::BasicMerkNode; + /// use grovedb_version::version::GroveVersion; /// /// let batch = &[ /// // puts value [4,5,6] to key[1,2,3] @@ -164,13 +188,14 @@ where /// ]; /// /// store.apply_with_costs_just_in_time_value_update::<_, Vec<_>>( - /// batch, + /// batch, /// &[], /// None, /// &|k, v| Ok(0), - /// None::<&fn(&[u8]) -> Option>, + /// None::<&fn(&[u8], &GroveVersion) -> Option>, /// &mut |s, v, o| Ok((false, None)), - /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)) + /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)), + /// grove_version, /// ).unwrap().expect(""); /// ``` pub fn apply_with_costs_just_in_time_value_update( @@ -179,7 +204,9 @@ where aux: &AuxMerkBatch, options: Option, old_specialized_cost: &impl Fn(&Vec, &Vec) -> Result, - value_defined_cost_fn: Option<&impl Fn(&[u8]) -> Option>, + value_defined_cost_fn: Option< + &impl Fn(&[u8], &GroveVersion) -> Option, + >, update_tree_value_based_on_costs: &mut impl FnMut( &StorageCost, &Vec, @@ -196,6 +223,7 @@ where (StorageRemovedBytes, StorageRemovedBytes), Error, >, + grove_version: &GroveVersion, ) -> CostResult<(), Error> where KB: AsRef<[u8]>, @@ -228,6 +256,7 @@ where value_defined_cost_fn, update_tree_value_based_on_costs, section_removal_bytes, + grove_version, ) } @@ -241,21 +270,24 @@ where /// /// # Example /// ``` - /// # let mut store = grovedb_merk::test_utils::TempMerk::new(); - /// # store.apply_with_costs_just_in_time_value_update::<_, Vec<_>>( + /// # let grove_version = GroveVersion::latest(); + /// # let mut store = grovedb_merk::test_utils::TempMerk::new(grove_version); + /// # store.apply_with_costs_just_in_time_value_update::<_, Vec<_>>( /// /// /// /// /// &[(vec![4,5,6], Op::Put(vec![0], BasicMerkNode))], /// &[], /// None, /// &|k, v| Ok(0), - /// None::<&fn(&[u8]) -> Option>, + /// None::<&fn(&[u8], &GroveVersion) -> Option>, /// &mut |s, o, v| Ok((false, None)), - /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)) + /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)), + /// grove_version, /// ).unwrap().expect(""); /// /// use grovedb_costs::storage_cost::removal::StorageRemovedBytes::NoStorageRemoval; /// use grovedb_merk::Op; /// use grovedb_merk::tree::kv::ValueDefinedCostType; /// use grovedb_merk::TreeFeatureType::BasicMerkNode; + /// use grovedb_version::version::GroveVersion; /// /// let batch = &[ /// // puts value [4,5,6] to key [1,2,3] @@ -263,14 +295,15 @@ where /// // deletes key [4,5,6] /// (vec![4, 5, 6], Op::Delete), /// ]; - /// unsafe { store.apply_unchecked::<_, Vec<_>, _, _, _, _>( /// /// /// - /// batch, + /// unsafe { store.apply_unchecked::<_, Vec<_>, _, _, _, _>( /// /// /// /// /// ////// + /// batch, /// &[], /// None, /// &|k, v| Ok(0), - /// None::<&fn(&[u8]) -> Option>, + /// None::<&fn(&[u8], &GroveVersion) -> Option>, /// &mut |s, o, v| Ok((false, None)), - /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)) + /// &mut |s, k, v| Ok((NoStorageRemoval, NoStorageRemoval)), + /// grove_version, /// ).unwrap().expect(""); /// } /// ``` @@ -283,12 +316,13 @@ where value_defined_cost_fn: Option<&V>, update_tree_value_based_on_costs: &mut U, section_removal_bytes: &mut R, + grove_version: &GroveVersion, ) -> CostResult<(), Error> where KB: AsRef<[u8]>, KA: AsRef<[u8]>, C: Fn(&Vec, &Vec) -> Result, - V: Fn(&[u8]) -> Option, + V: Fn(&[u8], &GroveVersion) -> Option, U: FnMut( &StorageCost, &Vec, @@ -310,6 +344,7 @@ where value_defined_cost_fn, update_tree_value_based_on_costs, section_removal_bytes, + grove_version, ) .flat_map_ok(|(maybe_tree, key_updates)| { // we set the new root node of the merk tree diff --git a/merk/src/merk/chunks.rs b/merk/src/merk/chunks.rs index ef94571e..20c6cc39 100644 --- a/merk/src/merk/chunks.rs +++ b/merk/src/merk/chunks.rs @@ -2,6 +2,7 @@ use std::collections::VecDeque; use ed::Encode; use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; use crate::{ error::Error, @@ -97,19 +98,25 @@ where pub fn chunk_with_index( &mut self, chunk_index: usize, + grove_version: &GroveVersion, ) -> Result<(Vec, Option), Error> { let traversal_instructions = generate_traversal_instruction(self.height, chunk_index)?; - self.chunk_internal(chunk_index, traversal_instructions) + self.chunk_internal(chunk_index, traversal_instructions, grove_version) } /// Returns the chunk at a given chunk id. - pub fn chunk(&mut self, chunk_id: &[u8]) -> Result<(Vec, Option>), Error> { + pub fn chunk( + &mut self, + chunk_id: &[u8], + grove_version: &GroveVersion, + ) -> Result<(Vec, Option>), Error> { let traversal_instructions = vec_bytes_as_traversal_instruction(chunk_id)?; let chunk_index = chunk_index_from_traversal_instruction_with_recovery( traversal_instructions.as_slice(), self.height, )?; - let (chunk, next_index) = self.chunk_internal(chunk_index, traversal_instructions)?; + let (chunk, next_index) = + self.chunk_internal(chunk_index, traversal_instructions, grove_version)?; let next_chunk_id = next_index .map(|index| generate_traversal_instruction_as_vec_bytes(self.height, index)) .transpose()?; @@ -122,6 +129,7 @@ where &mut self, index: usize, traversal_instructions: Vec, + grove_version: &GroveVersion, ) -> Result<(Vec, Option), Error> { // ensure that the chunk index is within bounds let max_chunk_index = self.len(); @@ -136,9 +144,11 @@ where let chunk_height = chunk_height(self.height, index).unwrap(); let chunk = self.merk.walk(|maybe_walker| match maybe_walker { - Some(mut walker) => { - walker.traverse_and_build_chunk(&traversal_instructions, chunk_height) - } + Some(mut walker) => walker.traverse_and_build_chunk( + &traversal_instructions, + chunk_height, + grove_version, + ), None => Err(Error::ChunkingError(ChunkError::EmptyTree( "cannot create chunk producer for empty Merk", ))), @@ -160,12 +170,13 @@ where &mut self, chunk_id: &[u8], limit: Option, + grove_version: &GroveVersion, ) -> Result { // we want to convert the chunk id to the index let chunk_index = vec_bytes_as_traversal_instruction(chunk_id).and_then(|instruction| { chunk_index_from_traversal_instruction(instruction.as_slice(), self.height) })?; - self.multi_chunk_with_limit_and_index(chunk_index, limit) + self.multi_chunk_with_limit_and_index(chunk_index, limit, grove_version) } /// Generate multichunk with chunk index @@ -175,6 +186,7 @@ where &mut self, index: usize, limit: Option, + grove_version: &GroveVersion, ) -> Result { // TODO: what happens if the vec is filled? // we need to have some kind of hardhoc limit value if none is supplied. @@ -210,6 +222,7 @@ where let subtree_multi_chunk_result = self.subtree_multi_chunk_with_limit( current_index.expect("confirmed is not None"), temp_limit, + grove_version, ); let limit_too_small_error = matches!( @@ -253,13 +266,14 @@ where &mut self, index: usize, limit: Option, + grove_version: &GroveVersion, ) -> Result { let max_chunk_index = number_of_chunks(self.height); let mut chunk_index = index; // we first get the chunk at the given index // TODO: use the returned chunk index rather than tracking - let (chunk_ops, _) = self.chunk_with_index(chunk_index)?; + let (chunk_ops, _) = self.chunk_with_index(chunk_index, grove_version)?; let mut chunk_byte_length = chunk_ops.encoding_length().map_err(|_e| { Error::ChunkingError(ChunkError::InternalError("can't get encoding length")) })?; @@ -282,7 +296,7 @@ where // we only perform replacements on Hash nodes if matches!(chunk[iteration_index], Op::Push(Node::Hash(..))) { // TODO: use the returned chunk index rather than tracking - let (replacement_chunk, _) = self.chunk_with_index(chunk_index)?; + let (replacement_chunk, _) = self.chunk_with_index(chunk_index, grove_version)?; // calculate the new total let new_total = replacement_chunk.encoding_length().map_err(|_e| { @@ -343,7 +357,10 @@ where /// optimizing throughput compared to random access. // TODO: this is not better than random access, as we are not keeping state // that will make this more efficient, decide if this should be fixed or not - fn next_chunk(&mut self) -> Option, Option>), Error>> { + fn next_chunk( + &mut self, + grove_version: &GroveVersion, + ) -> Option, Option>), Error>> { let max_index = number_of_chunks(self.height); if self.index > max_index { return None; @@ -352,7 +369,7 @@ where // get the chunk at the given index // return the next index as a string Some( - self.chunk_with_index(self.index) + self.chunk_with_index(self.index, grove_version) .and_then(|(chunk, chunk_index)| { chunk_index .map(|index| { @@ -366,14 +383,15 @@ where } /// Iterate over each chunk, returning `None` after last chunk -impl<'db, S> Iterator for ChunkProducer<'db, S> +impl<'db, S> ChunkProducer<'db, S> where S: StorageContext<'db>, { - type Item = Result<(Vec, Option>), Error>; - - fn next(&mut self) -> Option { - self.next_chunk() + pub fn next( + &mut self, + grove_version: &GroveVersion, + ) -> Option, Option>), Error>> { + self.next_chunk(grove_version) } } @@ -451,13 +469,14 @@ mod test { #[test] fn test_merk_chunk_len() { + let grove_version = GroveVersion::latest(); // Tree of height 5 - max of 31 elements, min of 16 elements // 5 will be broken into 2 layers = [3, 2] // exit nodes from first layer = 2^3 = 8 // total_chunk = 1 + 8 = 9 chunks - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..20); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(5)); @@ -468,9 +487,9 @@ mod test { // 4 layers -> [3,3,2,2] // chunk_count_per_layer -> [1, 8, 64, 256] // total = 341 chunks - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..1000); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(10)); @@ -480,6 +499,7 @@ mod test { #[test] fn test_chunk_producer_iter() { + let grove_version = GroveVersion::latest(); // tree with height 4 // full tree // 7 @@ -492,9 +512,9 @@ mod test { // going to be broken into [2, 2] // that's a total of 5 chunks - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..15); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(4)); @@ -508,17 +528,18 @@ mod test { // as that from the chunk producer for i in 1..=5 { assert_eq!( - chunks.next().unwrap().unwrap().0, - chunk_producer.chunk_with_index(i).unwrap().0 + chunks.next(grove_version).unwrap().unwrap().0, + chunk_producer.chunk_with_index(i, grove_version).unwrap().0 ); } // returns None after max - assert!(chunks.next().is_none()); + assert!(chunks.next(grove_version).is_none()); } #[test] fn test_random_chunk_access() { + let grove_version = GroveVersion::latest(); // tree with height 4 // full tree // 7 @@ -531,9 +552,9 @@ mod test { // going to be broken into [2, 2] // that's a total of 5 chunks - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..15); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(4)); @@ -548,8 +569,8 @@ mod test { assert_eq!(chunk_producer.len(), 5); // assert bounds - assert!(chunk_producer.chunk_with_index(0).is_err()); - assert!(chunk_producer.chunk_with_index(6).is_err()); + assert!(chunk_producer.chunk_with_index(0, grove_version).is_err()); + assert!(chunk_producer.chunk_with_index(6, grove_version).is_err()); // first chunk // expected: @@ -559,24 +580,52 @@ mod test { // / \ / \ // H(1) H(5) H(9) H(13) let (chunk, next_chunk) = chunk_producer - .chunk_with_index(1) + .chunk_with_index(1, grove_version) .expect("should generate chunk"); assert_eq!(chunk.len(), 13); assert_eq!(next_chunk, Some(2)); assert_eq!( chunk, vec![ - Op::Push(traverse_get_node_hash(&mut tree_walker, &[LEFT, LEFT])), - Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[LEFT])), + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[LEFT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT], + grove_version + )), Op::Parent, - Op::Push(traverse_get_node_hash(&mut tree_walker, &[LEFT, RIGHT])), + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[LEFT, RIGHT], + grove_version + )), Op::Child, - Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[])), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[], + grove_version + )), Op::Parent, - Op::Push(traverse_get_node_hash(&mut tree_walker, &[RIGHT, LEFT])), - Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[RIGHT])), + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT], + grove_version + )), Op::Parent, - Op::Push(traverse_get_node_hash(&mut tree_walker, &[RIGHT, RIGHT])), + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, RIGHT], + grove_version + )), Op::Child, Op::Child ] @@ -588,7 +637,7 @@ mod test { // / \ // 0 2 let (chunk, next_chunk) = chunk_producer - .chunk_with_index(2) + .chunk_with_index(2, grove_version) .expect("should generate chunk"); assert_eq!(chunk.len(), 5); assert_eq!(next_chunk, Some(3)); @@ -597,16 +646,19 @@ mod test { vec![ Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[LEFT, LEFT, LEFT] + &[LEFT, LEFT, LEFT], + grove_version )), Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[LEFT, LEFT] + &[LEFT, LEFT], + grove_version )), Op::Parent, Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[LEFT, LEFT, RIGHT] + &[LEFT, LEFT, RIGHT], + grove_version )), Op::Child ] @@ -618,7 +670,7 @@ mod test { // / \ // 4 6 let (chunk, next_chunk) = chunk_producer - .chunk_with_index(3) + .chunk_with_index(3, grove_version) .expect("should generate chunk"); assert_eq!(chunk.len(), 5); assert_eq!(next_chunk, Some(4)); @@ -627,16 +679,19 @@ mod test { vec![ Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[LEFT, RIGHT, LEFT] + &[LEFT, RIGHT, LEFT], + grove_version )), Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[LEFT, RIGHT] + &[LEFT, RIGHT], + grove_version )), Op::Parent, Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[LEFT, RIGHT, RIGHT] + &[LEFT, RIGHT, RIGHT], + grove_version )), Op::Child ] @@ -648,7 +703,7 @@ mod test { // / \ // 8 10 let (chunk, next_chunk) = chunk_producer - .chunk_with_index(4) + .chunk_with_index(4, grove_version) .expect("should generate chunk"); assert_eq!(chunk.len(), 5); assert_eq!(next_chunk, Some(5)); @@ -657,16 +712,19 @@ mod test { vec![ Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[RIGHT, LEFT, LEFT] + &[RIGHT, LEFT, LEFT], + grove_version )), Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[RIGHT, LEFT] + &[RIGHT, LEFT], + grove_version )), Op::Parent, Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[RIGHT, LEFT, RIGHT] + &[RIGHT, LEFT, RIGHT], + grove_version )), Op::Child ] @@ -678,7 +736,7 @@ mod test { // / \ // 12 14 let (chunk, next_chunk) = chunk_producer - .chunk_with_index(5) + .chunk_with_index(5, grove_version) .expect("should generate chunk"); assert_eq!(chunk.len(), 5); assert_eq!(next_chunk, None); @@ -687,16 +745,19 @@ mod test { vec![ Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[RIGHT, RIGHT, LEFT] + &[RIGHT, RIGHT, LEFT], + grove_version )), Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[RIGHT, RIGHT] + &[RIGHT, RIGHT], + grove_version )), Op::Parent, Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[RIGHT, RIGHT, RIGHT] + &[RIGHT, RIGHT, RIGHT], + grove_version )), Op::Child ] @@ -705,11 +766,12 @@ mod test { #[test] fn test_subtree_chunk_no_limit() { + let grove_version = GroveVersion::latest(); // tree of height 4 // 5 chunks - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..15); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(4)); @@ -717,7 +779,7 @@ mod test { // generate multi chunk with no limit let mut chunk_producer = ChunkProducer::new(&merk).expect("should create chunk producer"); let chunk_result = chunk_producer - .subtree_multi_chunk_with_limit(1, None) + .subtree_multi_chunk_with_limit(1, None, grove_version) .expect("should generate chunk with limit"); assert_eq!(chunk_result.remaining_limit, None); @@ -741,11 +803,12 @@ mod test { #[test] fn test_subtree_chunk_with_limit() { + let grove_version = GroveVersion::latest(); // tree of height 4 // 5 chunks - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..15); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(4)); @@ -754,12 +817,12 @@ mod test { // initial chunk is of size 453, so limit of 10 is too small // should return an error - let chunk = chunk_producer.subtree_multi_chunk_with_limit(1, Some(10)); + let chunk = chunk_producer.subtree_multi_chunk_with_limit(1, Some(10), grove_version); assert!(chunk.is_err()); // get just the fist chunk let chunk_result = chunk_producer - .subtree_multi_chunk_with_limit(1, Some(453)) + .subtree_multi_chunk_with_limit(1, Some(453), grove_version) .expect("should generate chunk with limit"); assert_eq!(chunk_result.remaining_limit, Some(0)); assert_eq!(chunk_result.next_index, Some(2)); @@ -779,7 +842,7 @@ mod test { // get up to second chunk let chunk_result = chunk_producer - .subtree_multi_chunk_with_limit(1, Some(737)) + .subtree_multi_chunk_with_limit(1, Some(737), grove_version) .expect("should generate chunk with limit"); assert_eq!(chunk_result.remaining_limit, Some(0)); assert_eq!(chunk_result.next_index, Some(3)); @@ -799,7 +862,7 @@ mod test { // get up to third chunk let chunk_result = chunk_producer - .subtree_multi_chunk_with_limit(1, Some(1021)) + .subtree_multi_chunk_with_limit(1, Some(1021), grove_version) .expect("should generate chunk with limit"); assert_eq!(chunk_result.remaining_limit, Some(0)); assert_eq!(chunk_result.next_index, Some(4)); @@ -819,7 +882,7 @@ mod test { // get up to fourth chunk let chunk_result = chunk_producer - .subtree_multi_chunk_with_limit(1, Some(1305)) + .subtree_multi_chunk_with_limit(1, Some(1305), grove_version) .expect("should generate chunk with limit"); assert_eq!(chunk_result.remaining_limit, Some(0)); assert_eq!(chunk_result.next_index, Some(5)); @@ -839,7 +902,7 @@ mod test { // get up to fifth chunk let chunk_result = chunk_producer - .subtree_multi_chunk_with_limit(1, Some(1589)) + .subtree_multi_chunk_with_limit(1, Some(1589), grove_version) .expect("should generate chunk with limit"); assert_eq!(chunk_result.remaining_limit, Some(0)); assert_eq!(chunk_result.next_index, None); @@ -859,7 +922,7 @@ mod test { // limit larger than total chunk let chunk_result = chunk_producer - .subtree_multi_chunk_with_limit(1, Some(usize::MAX)) + .subtree_multi_chunk_with_limit(1, Some(usize::MAX), grove_version) .expect("should generate chunk with limit"); assert_eq!(chunk_result.remaining_limit, Some(18446744073709550026)); assert_eq!(chunk_result.next_index, None); @@ -880,11 +943,12 @@ mod test { #[test] fn test_multi_chunk_with_no_limit_trunk() { + let grove_version = GroveVersion::latest(); // tree of height 4 // 5 chunks - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..15); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(4)); @@ -894,7 +958,7 @@ mod test { // we generate the chunk starting from index 1, this has no hash nodes // so no multi chunk will be generated let chunk_result = chunk_producer - .multi_chunk_with_limit_and_index(1, None) + .multi_chunk_with_limit_and_index(1, None, grove_version) .expect("should generate chunk with limit"); assert_eq!(chunk_result.remaining_limit, None); @@ -917,11 +981,12 @@ mod test { #[test] fn test_multi_chunk_with_no_limit_not_trunk() { + let grove_version = GroveVersion::latest(); // tree of height 4 // 5 chunks - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..15); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(4)); @@ -931,7 +996,7 @@ mod test { // we generate the chunk starting from index 2, this has no hash nodes // so no multi chunk will be generated let chunk_result = chunk_producer - .multi_chunk_with_limit_and_index(2, None) + .multi_chunk_with_limit_and_index(2, None, grove_version) .expect("should generate chunk with limit"); assert_eq!(chunk_result.remaining_limit, None); @@ -952,7 +1017,7 @@ mod test { chunk_result.chunk[1], ChunkOp::Chunk( chunk_producer - .chunk_with_index(2) + .chunk_with_index(2, grove_version) .expect("should generate chunk") .0 ) @@ -961,7 +1026,7 @@ mod test { chunk_result.chunk[3], ChunkOp::Chunk( chunk_producer - .chunk_with_index(3) + .chunk_with_index(3, grove_version) .expect("should generate chunk") .0 ) @@ -970,7 +1035,7 @@ mod test { chunk_result.chunk[5], ChunkOp::Chunk( chunk_producer - .chunk_with_index(4) + .chunk_with_index(4, grove_version) .expect("should generate chunk") .0 ) @@ -979,7 +1044,7 @@ mod test { chunk_result.chunk[7], ChunkOp::Chunk( chunk_producer - .chunk_with_index(5) + .chunk_with_index(5, grove_version) .expect("should generate chunk") .0 ) @@ -988,11 +1053,12 @@ mod test { #[test] fn test_multi_chunk_with_limit() { + let grove_version = GroveVersion::latest(); // tree of height 4 // 5 chunks - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..15); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(4)); @@ -1001,7 +1067,8 @@ mod test { // ensure that the remaining limit, next index and values given are correct // if limit is smaller than first chunk, we should get an error - let chunk_result = chunk_producer.multi_chunk_with_limit(vec![].as_slice(), Some(5)); + let chunk_result = + chunk_producer.multi_chunk_with_limit(vec![].as_slice(), Some(5), grove_version); assert!(matches!( chunk_result, Err(Error::ChunkingError(ChunkError::LimitTooSmall(..))) @@ -1011,7 +1078,8 @@ mod test { // data size of chunk 2 is exactly 317 // chunk op encoding for chunk 2 = 321 // hence limit of 317 will be insufficient - let chunk_result = chunk_producer.multi_chunk_with_limit_and_index(2, Some(317)); + let chunk_result = + chunk_producer.multi_chunk_with_limit_and_index(2, Some(317), grove_version); assert!(matches!( chunk_result, Err(Error::ChunkingError(ChunkError::LimitTooSmall(..))) @@ -1022,7 +1090,7 @@ mod test { // chunk 3 chunk op = 321 // padding = 5 let chunk_result = chunk_producer - .multi_chunk_with_limit_and_index(2, Some(321 + 321 + 5)) + .multi_chunk_with_limit_and_index(2, Some(321 + 321 + 5), grove_version) .expect("should generate chunk"); assert_eq!( chunk_result.next_index, diff --git a/merk/src/merk/get.rs b/merk/src/merk/get.rs index 4f953d9f..f38b6fc7 100644 --- a/merk/src/merk/get.rs +++ b/merk/src/merk/get.rs @@ -1,5 +1,6 @@ use grovedb_costs::{CostContext, CostResult, CostsExt, OperationCost}; use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; use crate::{ tree::{kv::ValueDefinedCostType, TreeNode}, @@ -24,9 +25,12 @@ where pub fn exists( &self, key: &[u8], - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult { - self.has_node_direct(key, value_defined_cost_fn) + self.has_node_direct(key, value_defined_cost_fn, grove_version) } /// Returns if the value at the given key exists @@ -38,9 +42,12 @@ where pub fn exists_by_traversing_tree( &self, key: &[u8], - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult { - self.has_node(key, value_defined_cost_fn) + self.has_node(key, value_defined_cost_fn, grove_version) } /// Gets a value for the given key. If the key is not found, `None` is @@ -52,7 +59,10 @@ where &self, key: &[u8], allow_cache: bool, - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult>, Error> { if allow_cache { self.get_node_fn( @@ -63,6 +73,7 @@ where .wrap_with_cost(Default::default()) }, value_defined_cost_fn, + grove_version, ) } else { self.get_node_direct_fn( @@ -73,6 +84,7 @@ where .wrap_with_cost(Default::default()) }, value_defined_cost_fn, + grove_version, ) } } @@ -82,19 +94,24 @@ where &self, key: &[u8], allow_cache: bool, - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult, Error> { if allow_cache { self.get_node_fn( key, |node| node.feature_type().wrap_with_cost(Default::default()), value_defined_cost_fn, + grove_version, ) } else { self.get_node_direct_fn( key, |node| node.feature_type().wrap_with_cost(Default::default()), value_defined_cost_fn, + grove_version, ) } } @@ -105,12 +122,25 @@ where &self, key: &[u8], allow_cache: bool, - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult, Error> { if allow_cache { - self.get_node_fn(key, |node| node.hash(), value_defined_cost_fn) + self.get_node_fn( + key, + |node| node.hash(), + value_defined_cost_fn, + grove_version, + ) } else { - self.get_node_direct_fn(key, |node| node.hash(), value_defined_cost_fn) + self.get_node_direct_fn( + key, + |node| node.hash(), + value_defined_cost_fn, + grove_version, + ) } } @@ -120,19 +150,24 @@ where &self, key: &[u8], allow_cache: bool, - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult, Error> { if allow_cache { self.get_node_fn( key, |node| (*node.value_hash()).wrap_with_cost(OperationCost::default()), value_defined_cost_fn, + grove_version, ) } else { self.get_node_direct_fn( key, |node| (*node.value_hash()).wrap_with_cost(OperationCost::default()), value_defined_cost_fn, + grove_version, ) } } @@ -143,19 +178,24 @@ where &self, key: &[u8], allow_cache: bool, - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult, Error> { if allow_cache { self.get_node_fn( key, |node| (*node.inner.kv.hash()).wrap_with_cost(OperationCost::default()), value_defined_cost_fn, + grove_version, ) } else { self.get_node_direct_fn( key, |node| (*node.inner.kv.hash()).wrap_with_cost(OperationCost::default()), value_defined_cost_fn, + grove_version, ) } } @@ -166,7 +206,10 @@ where &self, key: &[u8], allow_cache: bool, - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult, CryptoHash)>, Error> { if allow_cache { self.get_node_fn( @@ -176,6 +219,7 @@ where .wrap_with_cost(OperationCost::default()) }, value_defined_cost_fn, + grove_version, ) } else { self.get_node_direct_fn( @@ -185,6 +229,7 @@ where .wrap_with_cost(OperationCost::default()) }, value_defined_cost_fn, + grove_version, ) } } @@ -193,16 +238,23 @@ where fn has_node_direct( &self, key: &[u8], - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult { - TreeNode::get(&self.storage, key, value_defined_cost_fn).map_ok(|x| x.is_some()) + TreeNode::get(&self.storage, key, value_defined_cost_fn, grove_version) + .map_ok(|x| x.is_some()) } /// See if a node's field exists fn has_node( &self, key: &[u8], - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult { self.use_tree(move |maybe_tree| { let mut cursor = match maybe_tree { @@ -225,7 +277,7 @@ where match maybe_child { None => { // fetch from RocksDB - break self.has_node_direct(key, value_defined_cost_fn); + break self.has_node_direct(key, value_defined_cost_fn, grove_version); } Some(child) => cursor = child, // traverse to child } @@ -238,15 +290,20 @@ where &self, key: &[u8], f: F, - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult, Error> where F: FnOnce(&TreeNode) -> CostContext, { - TreeNode::get(&self.storage, key, value_defined_cost_fn).flat_map_ok(|maybe_node| { - let mut cost = OperationCost::default(); - Ok(maybe_node.map(|node| f(&node).unwrap_add_cost(&mut cost))).wrap_with_cost(cost) - }) + TreeNode::get(&self.storage, key, value_defined_cost_fn, grove_version).flat_map_ok( + |maybe_node| { + let mut cost = OperationCost::default(); + Ok(maybe_node.map(|node| f(&node).unwrap_add_cost(&mut cost))).wrap_with_cost(cost) + }, + ) } /// Generic way to get a node's field @@ -254,7 +311,10 @@ where &self, key: &[u8], f: F, - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult, Error> where F: FnOnce(&TreeNode) -> CostContext, @@ -280,7 +340,12 @@ where match maybe_child { None => { // fetch from RocksDB - break self.get_node_direct_fn(key, f, value_defined_cost_fn); + break self.get_node_direct_fn( + key, + f, + value_defined_cost_fn, + grove_version, + ); } Some(child) => cursor = child, // traverse to child } @@ -291,18 +356,25 @@ where #[cfg(test)] mod test { + use grovedb_version::version::GroveVersion; + use crate::{ test_utils::TempMerk, tree::kv::ValueDefinedCostType, Op, TreeFeatureType::BasicMerkNode, }; #[test] fn test_has_node_with_empty_tree() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); let key = b"something"; let result = merk - .has_node(key, None::<&fn(&[u8]) -> Option>) + .has_node( + key, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .unwrap(); @@ -312,12 +384,16 @@ mod test { let batch = vec![batch_entry]; - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("should ..."); let result = merk - .has_node(key, None::<&fn(&[u8]) -> Option>) + .has_node( + key, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .unwrap(); diff --git a/merk/src/merk/mod.rs b/merk/src/merk/mod.rs index e9bab4f7..ee0deccc 100644 --- a/merk/src/merk/mod.rs +++ b/merk/src/merk/mod.rs @@ -55,6 +55,7 @@ use grovedb_costs::{ CostResult, CostsExt, FeatureSumLength, OperationCost, }; use grovedb_storage::{self, Batch, RawIterator, StorageContext}; +use grovedb_version::version::GroveVersion; use source::MerkSource; use crate::{ @@ -524,7 +525,10 @@ where /// Meaning that it doesn't have a parent Merk pub(crate) fn load_base_root( &mut self, - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { self.storage .get_root(ROOT_KEY_KEY) @@ -534,14 +538,18 @@ where if let Some(tree_root_key) = tree_root_key_opt { // Trying to build a tree out of it, costs will be accumulated because // `Tree::get` returns `CostContext` and this call happens inside `flat_map_ok`. - TreeNode::get(&self.storage, tree_root_key, value_defined_cost_fn).map_ok( - |tree| { - if let Some(t) = tree.as_ref() { - self.root_tree_key = Cell::new(Some(t.key().to_vec())); - } - self.tree = Cell::new(tree); - }, + TreeNode::get( + &self.storage, + tree_root_key, + value_defined_cost_fn, + grove_version, ) + .map_ok(|tree| { + if let Some(t) = tree.as_ref() { + self.root_tree_key = Cell::new(Some(t.key().to_vec())); + } + self.tree = Cell::new(tree); + }) } else { Ok(()).wrap_with_cost(Default::default()) } @@ -553,13 +561,22 @@ where /// Meaning that it doesn't have a parent Merk pub(crate) fn load_root( &mut self, - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult<(), Error> { // In case of successful seek for root key check if it exists if let Some(tree_root_key) = self.root_tree_key.get_mut() { // Trying to build a tree out of it, costs will be accumulated because // `Tree::get` returns `CostContext` and this call happens inside `flat_map_ok`. - TreeNode::get(&self.storage, tree_root_key, value_defined_cost_fn).map_ok(|tree| { + TreeNode::get( + &self.storage, + tree_root_key, + value_defined_cost_fn, + grove_version, + ) + .map_ok(|tree| { self.tree = Cell::new(tree); }) } else { @@ -575,6 +592,7 @@ where pub fn verify( &self, skip_sum_checks: bool, + grove_version: &GroveVersion, ) -> (BTreeMap, CryptoHash>, BTreeMap, Vec>) { let tree = self.tree.take(); @@ -590,6 +608,7 @@ where &mut bad_link_map, &mut parent_keys, skip_sum_checks, + grove_version, ); self.tree.set(tree); @@ -603,6 +622,7 @@ where bad_link_map: &mut BTreeMap, CryptoHash>, parent_keys: &mut BTreeMap, Vec>, skip_sum_checks: bool, + grove_version: &GroveVersion, ) { if let Some(link) = tree.link(LEFT) { traversal_instruction.push(LEFT); @@ -613,6 +633,7 @@ where bad_link_map, parent_keys, skip_sum_checks, + grove_version, ); traversal_instruction.pop(); } @@ -626,6 +647,7 @@ where bad_link_map, parent_keys, skip_sum_checks, + grove_version, ); traversal_instruction.pop(); } @@ -639,6 +661,7 @@ where bad_link_map: &mut BTreeMap, CryptoHash>, parent_keys: &mut BTreeMap, Vec>, skip_sum_checks: bool, + grove_version: &GroveVersion, ) { let (hash, key, sum) = match link { Link::Reference { hash, key, sum, .. } => { @@ -662,7 +685,8 @@ where let node = TreeNode::get( &self.storage, key, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap(); @@ -701,6 +725,7 @@ where bad_link_map, parent_keys, skip_sum_checks, + grove_version, ); } } @@ -708,12 +733,14 @@ where fn fetch_node<'db>( db: &impl StorageContext<'db>, key: &[u8], - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option Option>, + grove_version: &GroveVersion, ) -> Result, Error> { let bytes = db.get(key).unwrap().map_err(StorageError)?; // TODO: get_pinned ? if let Some(bytes) = bytes { Ok(Some( - TreeNode::decode(key.to_vec(), &bytes, value_defined_cost_fn).map_err(EdError)?, + TreeNode::decode(key.to_vec(), &bytes, value_defined_cost_fn, grove_version) + .map_err(EdError)?, )) } else { Ok(None) @@ -730,6 +757,7 @@ mod test { rocksdb_storage::{PrefixedRocksDbStorageContext, RocksDbStorage}, RawIterator, Storage, StorageBatch, StorageContext, }; + use grovedb_version::version::GroveVersion; use tempfile::TempDir; use super::{Merk, RefWalker}; @@ -749,10 +777,11 @@ mod test { #[test] fn simple_insert_apply() { + let grove_version = GroveVersion::latest(); let batch_size = 20; - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..batch_size); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); @@ -768,34 +797,35 @@ mod test { #[test] fn tree_height() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..1); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(1)); // height 2 - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..2); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(2)); // height 5 // 2^5 - 1 = 31 (max number of elements in tree of height 5) - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..31); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(5)); // should still be height 5 for 29 elements - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..29); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(5)); @@ -803,17 +833,18 @@ mod test { #[test] fn insert_uncached() { + let grove_version = GroveVersion::latest(); let batch_size = 20; - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..batch_size); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_invariants(&merk); let batch = make_batch_seq(batch_size..(batch_size * 2)); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_invariants(&merk); @@ -821,13 +852,14 @@ mod test { #[test] fn insert_two() { + let grove_version = GroveVersion::latest(); let tree_size = 2; let batch_size = 1; - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..(tree_size / batch_size) { let batch = make_batch_rand(batch_size, i); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); } @@ -835,14 +867,15 @@ mod test { #[test] fn insert_rand() { + let grove_version = GroveVersion::latest(); let tree_size = 40; let batch_size = 4; - let mut merk = TempMerk::new(); + let mut merk = TempMerk::new(grove_version); for i in 0..(tree_size / batch_size) { println!("i:{i}"); let batch = make_batch_rand(batch_size, i); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); } @@ -850,15 +883,16 @@ mod test { #[test] fn actual_deletes() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_rand(10, 1); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); let key = batch.first().unwrap().0.clone(); - merk.apply::<_, Vec<_>>(&[(key.clone(), Op::Delete)], &[], None) + merk.apply::<_, Vec<_>>(&[(key.clone(), Op::Delete)], &[], None, grove_version) .unwrap() .unwrap(); @@ -868,15 +902,17 @@ mod test { #[test] fn aux_data() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); merk.apply::, _>( &[], &[(vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerkNode), None)], None, + grove_version, ) .unwrap() .expect("apply failed"); - merk.commit(); + merk.commit(grove_version); let val = merk.get_aux(&[1, 2, 3]).unwrap().unwrap(); assert_eq!(val, Some(vec![4, 5, 6])); @@ -884,14 +920,16 @@ mod test { #[test] fn get_not_found() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); // no root assert!(merk .get( &[1, 2, 3], true, - None::<&fn(&[u8]) -> Option> + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version ) .unwrap() .unwrap() @@ -902,6 +940,7 @@ mod test { &[(vec![5, 5, 5], Op::Put(vec![], BasicMerkNode))], &[], None, + grove_version, ) .unwrap() .unwrap(); @@ -909,7 +948,8 @@ mod test { .get( &[1, 2, 3], true, - None::<&fn(&[u8]) -> Option> + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version ) .unwrap() .unwrap() @@ -924,6 +964,7 @@ mod test { ], &[], None, + grove_version, ) .unwrap() .unwrap(); @@ -931,7 +972,8 @@ mod test { .get( &[3, 3, 3], true, - None::<&fn(&[u8]) -> Option> + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version ) .unwrap() .unwrap() @@ -941,6 +983,7 @@ mod test { // TODO: what this test should do? #[test] fn reopen_check_root_hash() { + let grove_version = GroveVersion::latest(); let tmp_dir = TempDir::new().expect("cannot open tempdir"); let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); @@ -949,22 +992,24 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); let batch = make_batch_seq(1..10); - merk.apply::<_, Vec<_>>(batch.as_slice(), &[], None) + merk.apply::<_, Vec<_>>(batch.as_slice(), &[], None, grove_version) .unwrap() .unwrap(); let batch = make_batch_seq(11..12); - merk.apply::<_, Vec<_>>(batch.as_slice(), &[], None) + merk.apply::<_, Vec<_>>(batch.as_slice(), &[], None, grove_version) .unwrap() .unwrap(); } #[test] fn test_get_node_cost() { + let grove_version = GroveVersion::latest(); let tmp_dir = TempDir::new().expect("cannot open tempdir"); let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); @@ -973,12 +1018,13 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); let batch = make_batch_seq(1..10); - merk.apply::<_, Vec<_>>(batch.as_slice(), &[], None) + merk.apply::<_, Vec<_>>(batch.as_slice(), &[], None, grove_version) .unwrap() .unwrap(); drop(merk); @@ -986,20 +1032,30 @@ mod test { #[test] fn reopen() { + let grove_version = GroveVersion::latest(); fn collect( mut node: RefWalker>, nodes: &mut Vec>, ) { + let grove_version = GroveVersion::latest(); nodes.push(node.tree().encode()); if let Some(c) = node - .walk(true, None::<&fn(&[u8]) -> Option>) + .walk( + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .unwrap() { collect(c, nodes); } if let Some(c) = node - .walk(false, None::<&fn(&[u8]) -> Option>) + .walk( + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap() .unwrap() { @@ -1018,12 +1074,13 @@ mod test { .get_storage_context(SubtreePath::empty(), Some(&batch)) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); let merk_batch = make_batch_seq(1..10_000); - merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None) + merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None, grove_version) .unwrap() .unwrap(); @@ -1036,7 +1093,8 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); @@ -1056,7 +1114,8 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); @@ -1074,6 +1133,7 @@ mod test { #[test] fn reopen_iter() { + let grove_version = GroveVersion::latest(); fn collect(iter: PrefixedStorageIter<'_, '_>, nodes: &mut Vec<(Vec, Vec)>) { while iter.valid().unwrap() { nodes.push(( @@ -1094,12 +1154,13 @@ mod test { .get_storage_context(SubtreePath::empty(), Some(&batch)) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); let merk_batch = make_batch_seq(1..10_000); - merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None) + merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None, grove_version) .unwrap() .unwrap(); @@ -1114,7 +1175,8 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); @@ -1128,7 +1190,8 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); @@ -1141,6 +1204,7 @@ mod test { #[test] fn update_node() { + let grove_version = GroveVersion::latest(); let tmp_dir = TempDir::new().expect("cannot open tempdir"); let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); @@ -1150,7 +1214,8 @@ mod test { .get_storage_context(SubtreePath::empty(), Some(&batch)) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); @@ -1159,6 +1224,7 @@ mod test { &[(b"9".to_vec(), Op::Put(b"a".to_vec(), BasicMerkNode))], &[], None, + grove_version, ) .unwrap() .expect("should insert successfully"); @@ -1166,6 +1232,7 @@ mod test { &[(b"10".to_vec(), Op::Put(b"a".to_vec(), BasicMerkNode))], &[], None, + grove_version, ) .unwrap() .expect("should insert successfully"); @@ -1174,7 +1241,8 @@ mod test { .get( b"10".as_slice(), true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("should get successfully"); @@ -1185,6 +1253,7 @@ mod test { &[(b"10".to_vec(), Op::Put(b"b".to_vec(), BasicMerkNode))], &[], None, + grove_version, ) .unwrap() .expect("should insert successfully"); @@ -1192,7 +1261,8 @@ mod test { .get( b"10".as_slice(), true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("should get successfully"); @@ -1208,7 +1278,8 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("cannot open merk"); @@ -1218,6 +1289,7 @@ mod test { &[(b"10".to_vec(), Op::Put(b"c".to_vec(), BasicMerkNode))], &[], None, + grove_version, ) .unwrap() .expect("should insert successfully"); @@ -1225,7 +1297,8 @@ mod test { .get( b"10".as_slice(), true, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("should get successfully"); diff --git a/merk/src/merk/open.rs b/merk/src/merk/open.rs index af15d596..c8646afa 100644 --- a/merk/src/merk/open.rs +++ b/merk/src/merk/open.rs @@ -2,6 +2,7 @@ use std::cell::Cell; use grovedb_costs::CostResult; use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; use crate::{ tree::kv::ValueDefinedCostType, @@ -28,7 +29,10 @@ where pub fn open_standalone( storage: S, is_sum_tree: bool, - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult { let mut merk = Self { tree: Cell::new(None), @@ -38,14 +42,18 @@ where is_sum_tree, }; - merk.load_base_root(value_defined_cost_fn).map_ok(|_| merk) + merk.load_base_root(value_defined_cost_fn, grove_version) + .map_ok(|_| merk) } /// Open base tree pub fn open_base( storage: S, is_sum_tree: bool, - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult { let mut merk = Self { tree: Cell::new(None), @@ -55,7 +63,8 @@ where is_sum_tree, }; - merk.load_base_root(value_defined_cost_fn).map_ok(|_| merk) + merk.load_base_root(value_defined_cost_fn, grove_version) + .map_ok(|_| merk) } /// Open layered tree with root key @@ -63,7 +72,10 @@ where storage: S, root_key: Option>, is_sum_tree: bool, - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult { let mut merk = Self { tree: Cell::new(None), @@ -73,7 +85,8 @@ where is_sum_tree, }; - merk.load_root(value_defined_cost_fn).map_ok(|_| merk) + merk.load_root(value_defined_cost_fn, grove_version) + .map_ok(|_| merk) } } @@ -85,12 +98,14 @@ mod test { rocksdb_storage::{test_utils::TempStorage, RocksDbStorage}, Storage, StorageBatch, }; + use grovedb_version::version::GroveVersion; use tempfile::TempDir; use crate::{tree::kv::ValueDefinedCostType, Merk, Op, TreeFeatureType::BasicMerkNode}; #[test] fn test_reopen_root_hash() { + let grove_version = GroveVersion::latest(); let tmp_dir = TempDir::new().expect("cannot open tempdir"); let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); @@ -102,7 +117,8 @@ mod test { .get_storage_context(SubtreePath::from(test_prefix.as_ref()), Some(&batch)) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .unwrap(); @@ -111,6 +127,7 @@ mod test { &[(vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerkNode))], &[], None, + grove_version, ) .unwrap() .expect("apply failed"); @@ -127,7 +144,8 @@ mod test { .get_storage_context(SubtreePath::from(test_prefix.as_ref()), None) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .unwrap(); @@ -136,6 +154,7 @@ mod test { #[test] fn test_open_fee() { + let grove_version = GroveVersion::latest(); let storage = TempStorage::new(); let batch = StorageBatch::new(); @@ -144,7 +163,8 @@ mod test { .get_storage_context(SubtreePath::empty(), Some(&batch)) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ); // Opening not existing merk should cost only root key seek (except context // creation) @@ -158,6 +178,7 @@ mod test { &[(vec![1, 2, 3], Op::Put(vec![4, 5, 6], BasicMerkNode))], &[], None, + grove_version, ) .unwrap() .expect("apply failed"); @@ -172,7 +193,8 @@ mod test { .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ); // Opening existing merk should cost two seeks. (except context creation) diff --git a/merk/src/merk/prove.rs b/merk/src/merk/prove.rs index 99227c13..a92f28fb 100644 --- a/merk/src/merk/prove.rs +++ b/merk/src/merk/prove.rs @@ -2,6 +2,7 @@ use std::collections::LinkedList; use grovedb_costs::{CostResult, CostsExt}; use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; use crate::{ proofs::{encode_into, query::QueryItem, Op as ProofOp, Query}, @@ -29,9 +30,10 @@ where &self, query: Query, limit: Option, + grove_version: &GroveVersion, ) -> CostResult { let left_to_right = query.left_to_right; - self.prove_unchecked(query, limit, left_to_right) + self.prove_unchecked(query, limit, left_to_right, grove_version) .map_ok(|(proof, limit)| { let mut bytes = Vec::with_capacity(128); encode_into(proof.iter(), &mut bytes); @@ -54,9 +56,10 @@ where &self, query: Query, limit: Option, + grove_version: &GroveVersion, ) -> CostResult { let left_to_right = query.left_to_right; - self.prove_unchecked(query, limit, left_to_right) + self.prove_unchecked(query, limit, left_to_right, grove_version) .map_ok(|(proof, limit)| ProofWithoutEncodingResult::new(proof, limit)) } @@ -77,6 +80,7 @@ where query: I, limit: Option, left_to_right: bool, + grove_version: &GroveVersion, ) -> CostResult where Q: Into, @@ -92,7 +96,12 @@ where .wrap_with_cost(Default::default()) .flat_map_ok(|tree| { let mut ref_walker = RefWalker::new(tree, self.source()); - ref_walker.create_proof(query_vec.as_slice(), limit, left_to_right) + ref_walker.create_proof( + query_vec.as_slice(), + limit, + left_to_right, + grove_version, + ) }) .map_ok(|(proof, _, limit, ..)| (proof, limit)) }) @@ -115,6 +124,7 @@ where query_items: &[QueryItem], limit: Option, left_to_right: bool, + grove_version: &GroveVersion, ) -> CostResult { self.use_tree_mut(|maybe_tree| { maybe_tree @@ -124,7 +134,7 @@ where .wrap_with_cost(Default::default()) .flat_map_ok(|tree| { let mut ref_walker = RefWalker::new(tree, self.source()); - ref_walker.create_proof(query_items, limit, left_to_right) + ref_walker.create_proof(query_items, limit, left_to_right, grove_version) }) .map_ok(|(proof, _, limit, ..)| (proof, limit)) }) diff --git a/merk/src/merk/restore.rs b/merk/src/merk/restore.rs index c5ce1286..1082e80b 100644 --- a/merk/src/merk/restore.rs +++ b/merk/src/merk/restore.rs @@ -32,6 +32,7 @@ use std::collections::BTreeMap; use grovedb_storage::{Batch, StorageContext}; +use grovedb_version::version::GroveVersion; use crate::{ merk, @@ -87,6 +88,7 @@ impl<'db, S: StorageContext<'db>> Restorer { &mut self, chunk_id: &[u8], chunk: Vec, + grove_version: &GroveVersion, ) -> Result>, Error> { let expected_root_hash = self .chunk_id_to_root_hash @@ -106,7 +108,12 @@ impl<'db, S: StorageContext<'db>> Restorer { } else { // every non root chunk has some associated parent with an placeholder link // here we update the placeholder link to represent the true data - self.rewrite_parent_link(chunk_id, &root_traversal_instruction, &chunk_tree)?; + self.rewrite_parent_link( + chunk_id, + &root_traversal_instruction, + &chunk_tree, + grove_version, + )?; } // next up, we need to write the chunk and build the map again @@ -125,6 +132,7 @@ impl<'db, S: StorageContext<'db>> Restorer { pub fn process_multi_chunk( &mut self, multi_chunk: Vec, + grove_version: &GroveVersion, ) -> Result>, Error> { let mut expect_chunk_id = true; let mut chunk_ids = vec![]; @@ -144,7 +152,8 @@ impl<'db, S: StorageContext<'db>> Restorer { } ChunkOp::Chunk(chunk) => { // TODO: remove clone - let next_chunk_ids = self.process_chunk(¤t_chunk_id, chunk)?; + let next_chunk_ids = + self.process_chunk(¤t_chunk_id, chunk, grove_version)?; chunk_ids.extend(next_chunk_ids); } } @@ -282,6 +291,7 @@ impl<'db, S: StorageContext<'db>> Restorer { chunk_id: &[u8], traversal_instruction: &[bool], chunk_tree: &ProofTree, + grove_version: &GroveVersion, ) -> Result<(), Error> { let parent_key = self .parent_keys @@ -293,7 +303,8 @@ impl<'db, S: StorageContext<'db>> Restorer { let mut parent = merk::fetch_node( &self.merk.storage, parent_key.as_slice(), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, )? .ok_or(Error::ChunkRestoringError(InternalError( "cannot find expected parent in memory, most likely state corruption issue", @@ -328,16 +339,18 @@ impl<'db, S: StorageContext<'db>> Restorer { /// Each nodes height is not added to state as such the producer could lie /// about the height values after replication we need to verify the /// heights and if invalid recompute the correct values - fn rewrite_heights(&mut self) -> Result<(), Error> { + fn rewrite_heights(&mut self, grove_version: &GroveVersion) -> Result<(), Error> { fn rewrite_child_heights<'s, 'db, S: StorageContext<'db>>( mut walker: RefWalker>, batch: &mut >::Batch, + grove_version: &GroveVersion, ) -> Result<(u8, u8), Error> { // TODO: remove unwrap let mut cloned_node = TreeNode::decode( walker.tree().key().to_vec(), walker.tree().encode().as_slice(), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap(); @@ -345,19 +358,28 @@ impl<'db, S: StorageContext<'db>> Restorer { let mut right_height = 0; if let Some(left_walker) = walker - .walk(LEFT, None::<&fn(&[u8]) -> Option>) + .walk( + LEFT, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap()? { - let left_child_heights = rewrite_child_heights(left_walker, batch)?; + let left_child_heights = rewrite_child_heights(left_walker, batch, grove_version)?; left_height = left_child_heights.0.max(left_child_heights.1) + 1; *cloned_node.link_mut(LEFT).unwrap().child_heights_mut() = left_child_heights; } if let Some(right_walker) = walker - .walk(RIGHT, None::<&fn(&[u8]) -> Option>) + .walk( + RIGHT, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap()? { - let right_child_heights = rewrite_child_heights(right_walker, batch)?; + let right_child_heights = + rewrite_child_heights(right_walker, batch, grove_version)?; right_height = right_child_heights.0.max(right_child_heights.1) + 1; *cloned_node.link_mut(RIGHT).unwrap().child_heights_mut() = right_child_heights; } @@ -375,7 +397,7 @@ impl<'db, S: StorageContext<'db>> Restorer { let mut tree = self.merk.tree.take().unwrap(); let walker = RefWalker::new(&mut tree, self.merk.source()); - rewrite_child_heights(walker, &mut batch)?; + rewrite_child_heights(walker, &mut batch, grove_version)?; self.merk.tree.set(Some(tree)); @@ -387,9 +409,9 @@ impl<'db, S: StorageContext<'db>> Restorer { } /// Rebuild restoration state from partial storage state - fn attempt_state_recovery(&mut self) -> Result<(), Error> { + fn attempt_state_recovery(&mut self, grove_version: &GroveVersion) -> Result<(), Error> { // TODO: think about the return type some more - let (bad_link_map, parent_keys) = self.merk.verify(false); + let (bad_link_map, parent_keys) = self.merk.verify(false, grove_version); if !bad_link_map.is_empty() { self.chunk_id_to_root_hash = bad_link_map; self.parent_keys = parent_keys; @@ -401,7 +423,7 @@ impl<'db, S: StorageContext<'db>> Restorer { /// Consumes the `Restorer` and returns a newly created, fully populated /// Merk instance. This method will return an error if called before /// processing all chunks. - pub fn finalize(mut self) -> Result, Error> { + pub fn finalize(mut self, grove_version: &GroveVersion) -> Result, Error> { // ensure all chunks have been processed if !self.chunk_id_to_root_hash.is_empty() || !self.parent_keys.is_empty() { return Err(Error::ChunkRestoringError( @@ -410,20 +432,27 @@ impl<'db, S: StorageContext<'db>> Restorer { } // get the latest version of the root node - let _ = self - .merk - .load_base_root(None::<&fn(&[u8]) -> Option>); + let _ = self.merk.load_base_root( + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ); // if height values are wrong, rewrite height - if self.verify_height().is_err() { - let _ = self.rewrite_heights(); + if self.verify_height(grove_version).is_err() { + let _ = self.rewrite_heights(grove_version); // update the root node after height rewrite - let _ = self - .merk - .load_base_root(None::<&fn(&[u8]) -> Option>); + let _ = self.merk.load_base_root( + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ); } - if !self.merk.verify(self.merk.is_sum_tree).0.is_empty() { + if !self + .merk + .verify(self.merk.is_sum_tree, grove_version) + .0 + .is_empty() + { return Err(Error::ChunkRestoringError(ChunkError::InternalError( "restored tree invalid", ))); @@ -434,10 +463,10 @@ impl<'db, S: StorageContext<'db>> Restorer { /// Verify that the child heights of the merk tree links correctly represent /// the tree - fn verify_height(&self) -> Result<(), Error> { + fn verify_height(&self, grove_version: &GroveVersion) -> Result<(), Error> { let tree = self.merk.tree.take(); let height_verification_result = if let Some(tree) = &tree { - self.verify_tree_height(tree, tree.height()) + self.verify_tree_height(tree, tree.height(), grove_version) } else { Ok(()) }; @@ -445,7 +474,12 @@ impl<'db, S: StorageContext<'db>> Restorer { height_verification_result } - fn verify_tree_height(&self, tree: &TreeNode, parent_height: u8) -> Result<(), Error> { + fn verify_tree_height( + &self, + tree: &TreeNode, + parent_height: u8, + grove_version: &GroveVersion, + ) -> Result<(), Error> { let (left_height, right_height) = tree.child_heights(); if (left_height.abs_diff(right_height)) > 1 { @@ -477,13 +511,14 @@ impl<'db, S: StorageContext<'db>> Restorer { let left_tree = TreeNode::get( &self.merk.storage, link.key(), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap()? .ok_or(Error::CorruptedState("link points to non-existent node"))?; - self.verify_tree_height(&left_tree, left_height)?; + self.verify_tree_height(&left_tree, left_height, grove_version)?; } else { - self.verify_tree_height(left_tree.unwrap(), left_height)?; + self.verify_tree_height(left_tree.unwrap(), left_height, grove_version)?; } } @@ -493,13 +528,14 @@ impl<'db, S: StorageContext<'db>> Restorer { let right_tree = TreeNode::get( &self.merk.storage, link.key(), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap()? .ok_or(Error::CorruptedState("link points to non-existent node"))?; - self.verify_tree_height(&right_tree, right_height)?; + self.verify_tree_height(&right_tree, right_height, grove_version)?; } else { - self.verify_tree_height(right_tree.unwrap(), right_height)?; + self.verify_tree_height(right_tree.unwrap(), right_height, grove_version)?; } } @@ -628,9 +664,10 @@ mod tests { #[test] fn test_process_chunk_correct_chunk_id_map() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..15); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(4)); @@ -646,7 +683,8 @@ mod tests { .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .unwrap(); @@ -671,12 +709,13 @@ mod tests { ); // generate first chunk - let (chunk, _) = chunk_producer.chunk_with_index(1).unwrap(); + let (chunk, _) = chunk_producer.chunk_with_index(1, grove_version).unwrap(); // apply first chunk let new_chunk_ids = restorer .process_chunk( &traversal_instruction_as_vec_bytes(vec![].as_slice()), chunk, + grove_version, ) .expect("should process chunk successfully"); assert_eq!(new_chunk_ids.len(), 4); @@ -687,30 +726,62 @@ mod tests { // assert all the chunk hash values assert_eq!( restorer.chunk_id_to_root_hash.get(vec![1, 1].as_slice()), - Some(get_node_hash(traverse_get_node_hash(&mut tree_walker, &[LEFT, LEFT])).unwrap()) - .as_ref() + Some( + get_node_hash(traverse_get_node_hash( + &mut tree_walker, + &[LEFT, LEFT], + grove_version + )) + .unwrap() + ) + .as_ref() ); assert_eq!( restorer.chunk_id_to_root_hash.get(vec![1, 0].as_slice()), - Some(get_node_hash(traverse_get_node_hash(&mut tree_walker, &[LEFT, RIGHT])).unwrap()) - .as_ref() + Some( + get_node_hash(traverse_get_node_hash( + &mut tree_walker, + &[LEFT, RIGHT], + grove_version + )) + .unwrap() + ) + .as_ref() ); assert_eq!( restorer.chunk_id_to_root_hash.get(vec![0, 1].as_slice()), - Some(get_node_hash(traverse_get_node_hash(&mut tree_walker, &[RIGHT, LEFT])).unwrap()) - .as_ref() + Some( + get_node_hash(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, LEFT], + grove_version + )) + .unwrap() + ) + .as_ref() ); assert_eq!( restorer.chunk_id_to_root_hash.get(vec![0, 0].as_slice()), - Some(get_node_hash(traverse_get_node_hash(&mut tree_walker, &[RIGHT, RIGHT])).unwrap()) - .as_ref() + Some( + get_node_hash(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT, RIGHT], + grove_version + )) + .unwrap() + ) + .as_ref() ); // generate second chunk - let (chunk, _) = chunk_producer.chunk_with_index(2).unwrap(); + let (chunk, _) = chunk_producer.chunk_with_index(2, grove_version).unwrap(); // apply second chunk let new_chunk_ids = restorer - .process_chunk(&traversal_instruction_as_vec_bytes(&[LEFT, LEFT]), chunk) + .process_chunk( + &traversal_instruction_as_vec_bytes(&[LEFT, LEFT]), + chunk, + grove_version, + ) .unwrap(); assert_eq!(new_chunk_ids.len(), 0); // chunk_map should have 1 less element @@ -721,10 +792,13 @@ mod tests { ); // let's try to apply the second chunk again, should not work - let (chunk, _) = chunk_producer.chunk_with_index(2).unwrap(); + let (chunk, _) = chunk_producer.chunk_with_index(2, grove_version).unwrap(); // apply second chunk - let chunk_process_result = - restorer.process_chunk(&traversal_instruction_as_vec_bytes(&[LEFT, LEFT]), chunk); + let chunk_process_result = restorer.process_chunk( + &traversal_instruction_as_vec_bytes(&[LEFT, LEFT]), + chunk, + grove_version, + ); assert!(chunk_process_result.is_err()); assert!(matches!( chunk_process_result, @@ -733,9 +807,12 @@ mod tests { // next let's get a random but expected chunk and work with that e.g. chunk 4 // but let's apply it to the wrong place - let (chunk, _) = chunk_producer.chunk_with_index(4).unwrap(); - let chunk_process_result = - restorer.process_chunk(&traversal_instruction_as_vec_bytes(&[LEFT, RIGHT]), chunk); + let (chunk, _) = chunk_producer.chunk_with_index(4, grove_version).unwrap(); + let chunk_process_result = restorer.process_chunk( + &traversal_instruction_as_vec_bytes(&[LEFT, RIGHT]), + chunk, + grove_version, + ); assert!(chunk_process_result.is_err()); assert!(matches!( chunk_process_result, @@ -745,10 +822,14 @@ mod tests { )); // correctly apply chunk 5 - let (chunk, _) = chunk_producer.chunk_with_index(5).unwrap(); + let (chunk, _) = chunk_producer.chunk_with_index(5, grove_version).unwrap(); // apply second chunk let new_chunk_ids = restorer - .process_chunk(&traversal_instruction_as_vec_bytes(&[RIGHT, RIGHT]), chunk) + .process_chunk( + &traversal_instruction_as_vec_bytes(&[RIGHT, RIGHT]), + chunk, + grove_version, + ) .unwrap(); assert_eq!(new_chunk_ids.len(), 0); // chunk_map should have 1 less element @@ -759,10 +840,14 @@ mod tests { ); // correctly apply chunk 3 - let (chunk, _) = chunk_producer.chunk_with_index(3).unwrap(); + let (chunk, _) = chunk_producer.chunk_with_index(3, grove_version).unwrap(); // apply second chunk let new_chunk_ids = restorer - .process_chunk(&traversal_instruction_as_vec_bytes(&[LEFT, RIGHT]), chunk) + .process_chunk( + &traversal_instruction_as_vec_bytes(&[LEFT, RIGHT]), + chunk, + grove_version, + ) .unwrap(); assert_eq!(new_chunk_ids.len(), 0); // chunk_map should have 1 less element @@ -773,10 +858,14 @@ mod tests { ); // correctly apply chunk 4 - let (chunk, _) = chunk_producer.chunk_with_index(4).unwrap(); + let (chunk, _) = chunk_producer.chunk_with_index(4, grove_version).unwrap(); // apply second chunk let new_chunk_ids = restorer - .process_chunk(&traversal_instruction_as_vec_bytes(&[RIGHT, LEFT]), chunk) + .process_chunk( + &traversal_instruction_as_vec_bytes(&[RIGHT, LEFT]), + chunk, + grove_version, + ) .unwrap(); assert_eq!(new_chunk_ids.len(), 0); // chunk_map should have 1 less element @@ -787,7 +876,9 @@ mod tests { ); // finalize merk - let restored_merk = restorer.finalize().expect("should finalized successfully"); + let restored_merk = restorer + .finalize(grove_version) + .expect("should finalized successfully"); assert_eq!( restored_merk.root_hash().unwrap(), @@ -833,6 +924,7 @@ mod tests { // attempts restoration on some empty merk // verifies that restoration was performed correctly. fn test_restoration_single_chunk_strategy(batch_size: u64) { + let grove_version = GroveVersion::latest(); // build the source merk let storage = TempStorage::new(); let tx = storage.start_transaction(); @@ -841,13 +933,14 @@ mod tests { .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .unwrap(); let batch = make_batch_seq(0..batch_size); source_merk - .apply::<_, Vec<_>>(&batch, &[], None) + .apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); @@ -859,7 +952,8 @@ mod tests { .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .unwrap(); @@ -881,9 +975,11 @@ mod tests { // perform chunk production and processing let mut chunk_id_opt = Some(vec![]); while let Some(chunk_id) = chunk_id_opt { - let (chunk, next_chunk_id) = chunk_producer.chunk(&chunk_id).expect("should get chunk"); + let (chunk, next_chunk_id) = chunk_producer + .chunk(&chunk_id, grove_version) + .expect("should get chunk"); restorer - .process_chunk(&chunk_id, chunk) + .process_chunk(&chunk_id, chunk, grove_version) .expect("should process chunk successfully"); chunk_id_opt = next_chunk_id; } @@ -891,7 +987,7 @@ mod tests { // after chunk processing we should be able to finalize assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); assert_eq!(restorer.parent_keys.len(), 0); - let restored_merk = restorer.finalize().expect("should finalize"); + let restored_merk = restorer.finalize(grove_version).expect("should finalize"); // compare root hash values assert_eq!( @@ -914,9 +1010,10 @@ mod tests { #[test] fn test_process_multi_chunk_no_limit() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..15); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(4)); @@ -928,7 +1025,8 @@ mod tests { .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .unwrap(); @@ -953,7 +1051,7 @@ mod tests { // generate multi chunk from root with no limit let chunk = chunk_producer - .multi_chunk_with_limit(vec![].as_slice(), None) + .multi_chunk_with_limit(vec![].as_slice(), None, grove_version) .expect("should generate multichunk"); assert_eq!(chunk.chunk.len(), 2); @@ -961,14 +1059,16 @@ mod tests { assert_eq!(chunk.remaining_limit, None); let next_ids = restorer - .process_multi_chunk(chunk.chunk) + .process_multi_chunk(chunk.chunk, grove_version) .expect("should process chunk"); // should have replicated all chunks assert_eq!(next_ids.len(), 0); assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); assert_eq!(restorer.parent_keys.len(), 0); - let restored_merk = restorer.finalize().expect("should be able to finalize"); + let restored_merk = restorer + .finalize(grove_version) + .expect("should be able to finalize"); // compare root hash values assert_eq!( @@ -979,9 +1079,10 @@ mod tests { #[test] fn test_process_multi_chunk_no_limit_but_non_root() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..15); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(4)); @@ -993,7 +1094,8 @@ mod tests { .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .unwrap(); @@ -1017,9 +1119,13 @@ mod tests { ); // first restore the first chunk - let (chunk, next_chunk_index) = chunk_producer.chunk_with_index(1).unwrap(); + let (chunk, next_chunk_index) = chunk_producer.chunk_with_index(1, grove_version).unwrap(); let new_chunk_ids = restorer - .process_chunk(&traversal_instruction_as_vec_bytes(&[]), chunk) + .process_chunk( + &traversal_instruction_as_vec_bytes(&[]), + chunk, + grove_version, + ) .expect("should process chunk"); assert_eq!(new_chunk_ids.len(), 4); assert_eq!(next_chunk_index, Some(2)); @@ -1028,19 +1134,23 @@ mod tests { // generate multi chunk from the 2nd chunk with no limit let multi_chunk = chunk_producer - .multi_chunk_with_limit_and_index(next_chunk_index.unwrap(), None) + .multi_chunk_with_limit_and_index(next_chunk_index.unwrap(), None, grove_version) .unwrap(); // tree of height 4 has 5 chunks // we have restored the first leaving 4 chunks // each chunk has an extra chunk id, since they are disjoint // hence the size of the multi chunk should be 8 assert_eq!(multi_chunk.chunk.len(), 8); - let new_chunk_ids = restorer.process_multi_chunk(multi_chunk.chunk).unwrap(); + let new_chunk_ids = restorer + .process_multi_chunk(multi_chunk.chunk, grove_version) + .unwrap(); assert_eq!(new_chunk_ids.len(), 0); assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); assert_eq!(restorer.parent_keys.len(), 0); - let restored_merk = restorer.finalize().expect("should be able to finalize"); + let restored_merk = restorer + .finalize(grove_version) + .expect("should be able to finalize"); // compare root hash values assert_eq!( @@ -1051,9 +1161,10 @@ mod tests { #[test] fn test_process_multi_chunk_with_limit() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..15); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(4)); @@ -1065,7 +1176,8 @@ mod tests { .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .unwrap(); @@ -1084,13 +1196,15 @@ mod tests { // build multi chunk with with limit of 325 let multi_chunk = chunk_producer - .multi_chunk_with_limit(vec![].as_slice(), Some(600)) + .multi_chunk_with_limit(vec![].as_slice(), Some(600), grove_version) .unwrap(); // should only contain the first chunk assert_eq!(multi_chunk.chunk.len(), 2); // should point to chunk 2 assert_eq!(multi_chunk.next_index, Some(vec![1, 1])); - let next_ids = restorer.process_multi_chunk(multi_chunk.chunk).unwrap(); + let next_ids = restorer + .process_multi_chunk(multi_chunk.chunk, grove_version) + .unwrap(); assert_eq!(next_ids.len(), 4); assert_eq!(restorer.chunk_id_to_root_hash.len(), 4); assert_eq!(restorer.parent_keys.len(), 4); @@ -1099,11 +1213,17 @@ mod tests { // with limit just above 642 should get 2 chunks (2 and 3) // disjoint, so multi chunk len should be 4 let multi_chunk = chunk_producer - .multi_chunk_with_limit(multi_chunk.next_index.unwrap().as_slice(), Some(645)) + .multi_chunk_with_limit( + multi_chunk.next_index.unwrap().as_slice(), + Some(645), + grove_version, + ) .unwrap(); assert_eq!(multi_chunk.chunk.len(), 4); assert_eq!(multi_chunk.next_index, Some(vec![0u8, 1u8])); - let next_ids = restorer.process_multi_chunk(multi_chunk.chunk).unwrap(); + let next_ids = restorer + .process_multi_chunk(multi_chunk.chunk, grove_version) + .unwrap(); // chunks 2 and 3 are leaf chunks assert_eq!(next_ids.len(), 0); assert_eq!(restorer.chunk_id_to_root_hash.len(), 2); @@ -1111,18 +1231,24 @@ mod tests { // get the last 2 chunks let multi_chunk = chunk_producer - .multi_chunk_with_limit(multi_chunk.next_index.unwrap().as_slice(), Some(645)) + .multi_chunk_with_limit( + multi_chunk.next_index.unwrap().as_slice(), + Some(645), + grove_version, + ) .unwrap(); assert_eq!(multi_chunk.chunk.len(), 4); assert_eq!(multi_chunk.next_index, None); - let next_ids = restorer.process_multi_chunk(multi_chunk.chunk).unwrap(); + let next_ids = restorer + .process_multi_chunk(multi_chunk.chunk, grove_version) + .unwrap(); // chunks 2 and 3 are leaf chunks assert_eq!(next_ids.len(), 0); assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); assert_eq!(restorer.parent_keys.len(), 0); // finalize merk - let restored_merk = restorer.finalize().unwrap(); + let restored_merk = restorer.finalize(grove_version).unwrap(); // compare root hash values assert_eq!( @@ -1135,11 +1261,12 @@ mod tests { // attempts restoration on some empty merk, with multi chunks // verifies that restoration was performed correctly. fn test_restoration_multi_chunk_strategy(batch_size: u64, limit: Option) { + let grove_version = GroveVersion::latest(); // build the source merk - let mut source_merk = TempMerk::new(); + let mut source_merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..batch_size); source_merk - .apply::<_, Vec<_>>(&batch, &[], None) + .apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); @@ -1151,7 +1278,8 @@ mod tests { .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .unwrap(); @@ -1174,10 +1302,10 @@ mod tests { let mut chunk_id_opt = Some(vec![]); while let Some(chunk_id) = chunk_id_opt { let multi_chunk = chunk_producer - .multi_chunk_with_limit(&chunk_id, limit) + .multi_chunk_with_limit(&chunk_id, limit, grove_version) .expect("should get chunk"); restorer - .process_multi_chunk(multi_chunk.chunk) + .process_multi_chunk(multi_chunk.chunk, grove_version) .expect("should process chunk successfully"); chunk_id_opt = multi_chunk.next_index; } @@ -1185,7 +1313,7 @@ mod tests { // after chunk processing we should be able to finalize assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); assert_eq!(restorer.parent_keys.len(), 0); - let restored_merk = restorer.finalize().expect("should finalize"); + let restored_merk = restorer.finalize(grove_version).expect("should finalize"); // compare root hash values assert_eq!( @@ -1217,9 +1345,10 @@ mod tests { #[test] fn test_restoration_interruption() { - let mut merk = TempMerk::new(); + let grove_version = GroveVersion::latest(); + let mut merk = TempMerk::new(grove_version); let batch = make_batch_seq(0..15); - merk.apply::<_, Vec<_>>(&batch, &[], None) + merk.apply::<_, Vec<_>>(&batch, &[], None, grove_version) .unwrap() .expect("apply failed"); assert_eq!(merk.height(), Some(4)); @@ -1231,7 +1360,8 @@ mod tests { .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .unwrap(); @@ -1255,9 +1385,13 @@ mod tests { ); // first restore the first chunk - let (chunk, next_chunk_index) = chunk_producer.chunk_with_index(1).unwrap(); + let (chunk, next_chunk_index) = chunk_producer.chunk_with_index(1, grove_version).unwrap(); let new_chunk_ids = restorer - .process_chunk(&traversal_instruction_as_vec_bytes(&[]), chunk) + .process_chunk( + &traversal_instruction_as_vec_bytes(&[]), + chunk, + grove_version, + ) .expect("should process chunk"); assert_eq!(new_chunk_ids.len(), 4); assert_eq!(next_chunk_index, Some(2)); @@ -1276,7 +1410,8 @@ mod tests { .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), false, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .unwrap(); @@ -1287,7 +1422,7 @@ mod tests { assert_eq!(restorer.parent_keys.len(), 0); // recover state - let recovery_attempt = restorer.attempt_state_recovery(); + let recovery_attempt = restorer.attempt_state_recovery(grove_version); assert!(recovery_attempt.is_ok()); assert_eq!(restorer.chunk_id_to_root_hash.len(), 4); assert_eq!(restorer.parent_keys.len(), 4); diff --git a/merk/src/merk/source.rs b/merk/src/merk/source.rs index 46782bdc..dd71e74e 100644 --- a/merk/src/merk/source.rs +++ b/merk/src/merk/source.rs @@ -1,5 +1,6 @@ use grovedb_costs::CostResult; use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; use crate::{ tree::{kv::ValueDefinedCostType, Fetch, TreeNode}, @@ -40,10 +41,18 @@ where fn fetch( &self, link: &Link, - value_defined_cost_fn: Option<&impl Fn(&[u8]) -> Option>, + value_defined_cost_fn: Option< + &impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult { - TreeNode::get(self.storage, link.key(), value_defined_cost_fn) - .map_ok(|x| x.ok_or(Error::KeyNotFoundError("Key not found for fetch"))) - .flatten() + TreeNode::get( + self.storage, + link.key(), + value_defined_cost_fn, + grove_version, + ) + .map_ok(|x| x.ok_or(Error::KeyNotFoundError("Key not found for fetch"))) + .flatten() } } diff --git a/merk/src/owner.rs b/merk/src/owner.rs index 18efb8f2..1543a089 100644 --- a/merk/src/owner.rs +++ b/merk/src/owner.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Owner use std::ops::{Deref, DerefMut}; diff --git a/merk/src/proofs/chunk/chunk.rs b/merk/src/proofs/chunk/chunk.rs index 95d888ec..4960c53f 100644 --- a/merk/src/proofs/chunk/chunk.rs +++ b/merk/src/proofs/chunk/chunk.rs @@ -27,6 +27,7 @@ // DEALINGS IN THE SOFTWARE. use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; +use grovedb_version::version::GroveVersion; // TODO: add copyright comment use crate::proofs::{Node, Op, Tree}; @@ -44,11 +45,15 @@ where S: Fetch + Sized + Clone, { /// Returns a chunk of a given depth from a RefWalker - pub fn create_chunk(&mut self, depth: usize) -> Result, Error> { + pub fn create_chunk( + &mut self, + depth: usize, + grove_version: &GroveVersion, + ) -> Result, Error> { // build the proof vector let mut proof = vec![]; - self.create_chunk_internal(&mut proof, depth)?; + self.create_chunk_internal(&mut proof, depth, grove_version)?; Ok(proof) } @@ -57,6 +62,7 @@ where &mut self, proof: &mut Vec, remaining_depth: usize, + grove_version: &GroveVersion, ) -> Result<(), Error> { // at some point we will reach the depth // here we need to put the node hash @@ -69,10 +75,14 @@ where let has_left_child = self.tree().link(true).is_some(); if has_left_child { let mut left = self - .walk(true, None::<&fn(&[u8]) -> Option>) + .walk( + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap()? .expect("confirmed is some"); - left.create_chunk_internal(proof, remaining_depth - 1)?; + left.create_chunk_internal(proof, remaining_depth - 1, grove_version)?; } // add current node's data @@ -84,10 +94,14 @@ where // traverse right if let Some(mut right) = self - .walk(false, None::<&fn(&[u8]) -> Option>) + .walk( + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) .unwrap()? { - right.create_chunk_internal(proof, remaining_depth - 1)?; + right.create_chunk_internal(proof, remaining_depth - 1, grove_version)?; proof.push(Op::Child); } @@ -101,11 +115,12 @@ where &mut self, instructions: &[bool], depth: usize, + grove_version: &GroveVersion, ) -> Result, Error> { // base case if instructions.is_empty() { // we are at the desired node - return self.create_chunk(depth); + return self.create_chunk(depth, grove_version); } // link must exist @@ -120,13 +135,14 @@ where let mut child = self .walk( instructions[0], - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap()? .expect("confirmed link exists so cannot be none"); // recurse on child - child.traverse_and_build_chunk(&instructions[1..], depth) + child.traverse_and_build_chunk(&instructions[1..], depth, grove_version) } /// Returns the smallest amount of tree ops, that can convince @@ -140,7 +156,11 @@ where /// . /// . /// . - pub fn generate_height_proof(&mut self, proof: &mut Vec) -> CostResult<(), Error> { + pub fn generate_height_proof( + &mut self, + proof: &mut Vec, + grove_version: &GroveVersion, + ) -> CostResult<(), Error> { // TODO: look into making height proofs more efficient // they will always be used in the context of some // existing chunk, we don't want to repeat nodes unnecessarily @@ -148,13 +168,17 @@ where let maybe_left = cost_return_on_error!( &mut cost, - self.walk(LEFT, None::<&fn(&[u8]) -> Option>) + self.walk( + LEFT, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) ); let has_left_child = maybe_left.is_some(); // recurse to leftmost element if let Some(mut left) = maybe_left { - cost_return_on_error!(&mut cost, left.generate_height_proof(proof)) + cost_return_on_error!(&mut cost, left.generate_height_proof(proof, grove_version)) } proof.push(Op::Push(self.to_kvhash_node())); @@ -207,6 +231,7 @@ pub fn verify_height_tree(height_proof_tree: &Tree) -> Result { #[cfg(test)] pub mod tests { use ed::Encode; + use grovedb_version::version::GroveVersion; use crate::{ proofs::{ @@ -220,6 +245,7 @@ pub mod tests { }; fn build_tree_10_nodes() -> TreeNode { + let grove_version = GroveVersion::latest(); // 3 // / \ // 1 7 @@ -227,7 +253,7 @@ pub mod tests { // 0 2 5 8 // / \ \ // 4 6 9 - make_tree_seq_with_start_key(10, [0; 8].to_vec()) + make_tree_seq_with_start_key(10, [0; 8].to_vec(), grove_version) } /// Traverses a tree to a certain node and returns the node hash of that @@ -235,10 +261,14 @@ pub mod tests { pub fn traverse_get_node_hash( walker: &mut RefWalker, traverse_instructions: &[bool], + grove_version: &GroveVersion, ) -> Node { - traverse_and_apply(walker, traverse_instructions, |walker| { - walker.to_hash_node().unwrap() - }) + traverse_and_apply( + walker, + traverse_instructions, + |walker| walker.to_hash_node().unwrap(), + grove_version, + ) } /// Traverses a tree to a certain node and returns the kv_feature_type of @@ -246,20 +276,28 @@ pub mod tests { pub fn traverse_get_kv_feature_type( walker: &mut RefWalker, traverse_instructions: &[bool], + grove_version: &GroveVersion, ) -> Node { - traverse_and_apply(walker, traverse_instructions, |walker| { - walker.to_kv_value_hash_feature_type_node() - }) + traverse_and_apply( + walker, + traverse_instructions, + |walker| walker.to_kv_value_hash_feature_type_node(), + grove_version, + ) } /// Traverses a tree to a certain node and returns the kv_hash of /// that node pub fn traverse_get_kv_hash( walker: &mut RefWalker, traverse_instructions: &[bool], + grove_version: &GroveVersion, ) -> Node { - traverse_and_apply(walker, traverse_instructions, |walker| { - walker.to_kvhash_node() - }) + traverse_and_apply( + walker, + traverse_instructions, + |walker| walker.to_kvhash_node(), + grove_version, + ) } /// Traverses a tree to a certain node and returns the result of applying @@ -268,6 +306,7 @@ pub mod tests { walker: &mut RefWalker, traverse_instructions: &[bool], apply_fn: T, + grove_version: &GroveVersion, ) -> Node where T: Fn(&mut RefWalker) -> Node, @@ -279,25 +318,34 @@ pub mod tests { let mut child = walker .walk( traverse_instructions[0], - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .unwrap() .unwrap(); - traverse_and_apply(&mut child, &traverse_instructions[1..], apply_fn) + traverse_and_apply( + &mut child, + &traverse_instructions[1..], + apply_fn, + grove_version, + ) } #[test] fn build_chunk_from_root_depth_0() { + let grove_version = GroveVersion::latest(); let mut tree = build_tree_10_nodes(); let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); // should return the node hash of the root node - let chunk = tree_walker.create_chunk(0).expect("should build chunk"); + let chunk = tree_walker + .create_chunk(0, grove_version) + .expect("should build chunk"); assert_eq!(chunk.len(), 1); assert_eq!( chunk[0], - Op::Push(traverse_get_node_hash(&mut tree_walker, &[])) + Op::Push(traverse_get_node_hash(&mut tree_walker, &[], grove_version)) ); let computed_tree = execute(chunk.into_iter().map(Ok), true, |_| Ok(())) @@ -308,6 +356,7 @@ pub mod tests { #[test] fn build_chunk_from_root_depth_1() { + let grove_version = GroveVersion::latest(); let mut tree = build_tree_10_nodes(); let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); @@ -316,15 +365,29 @@ pub mod tests { // 3 // / \ // Hash(1) Hash(7) - let chunk = tree_walker.create_chunk(1).expect("should build chunk"); + let chunk = tree_walker + .create_chunk(1, grove_version) + .expect("should build chunk"); assert_eq!(chunk.len(), 5); assert_eq!( chunk, vec![ - Op::Push(traverse_get_node_hash(&mut tree_walker, &[LEFT])), - Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[])), + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[], + grove_version + )), Op::Parent, - Op::Push(traverse_get_node_hash(&mut tree_walker, &[RIGHT])), + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT], + grove_version + )), Op::Child ] ); @@ -337,6 +400,7 @@ pub mod tests { #[test] fn build_chunk_from_root_depth_3() { + let grove_version = GroveVersion::latest(); let mut tree = build_tree_10_nodes(); let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); @@ -349,47 +413,68 @@ pub mod tests { // 0 2 5 8 // / \ \ // H(4) H(6) H(9) - let chunk = tree_walker.create_chunk(3).expect("should build chunk"); + let chunk = tree_walker + .create_chunk(3, grove_version) + .expect("should build chunk"); assert_eq!(chunk.len(), 19); assert_eq!( chunk, vec![ Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[LEFT, LEFT] + &[LEFT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT], + grove_version )), - Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[LEFT])), Op::Parent, Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[LEFT, RIGHT] + &[LEFT, RIGHT], + grove_version )), Op::Child, - Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[])), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[], + grove_version + )), Op::Parent, Op::Push(traverse_get_node_hash( &mut tree_walker, - &[RIGHT, LEFT, LEFT] + &[RIGHT, LEFT, LEFT], + grove_version )), Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[RIGHT, LEFT] + &[RIGHT, LEFT], + grove_version )), Op::Parent, Op::Push(traverse_get_node_hash( &mut tree_walker, - &[RIGHT, LEFT, RIGHT] + &[RIGHT, LEFT, RIGHT], + grove_version )), Op::Child, - Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[RIGHT])), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT], + grove_version + )), Op::Parent, Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[RIGHT, RIGHT] + &[RIGHT, RIGHT], + grove_version )), Op::Push(traverse_get_node_hash( &mut tree_walker, - &[RIGHT, RIGHT, RIGHT] + &[RIGHT, RIGHT, RIGHT], + grove_version )), Op::Child, Op::Child, @@ -405,6 +490,7 @@ pub mod tests { #[test] fn build_chunk_from_root_depth_max_depth() { + let grove_version = GroveVersion::latest(); let mut tree = build_tree_10_nodes(); let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); @@ -416,47 +502,68 @@ pub mod tests { // 0 2 5 8 // / \ \ // 4 6 9 - let chunk = tree_walker.create_chunk(4).expect("should build chunk"); + let chunk = tree_walker + .create_chunk(4, grove_version) + .expect("should build chunk"); assert_eq!(chunk.len(), 19); assert_eq!( chunk, vec![ Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[LEFT, LEFT] + &[LEFT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[LEFT], + grove_version )), - Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[LEFT])), Op::Parent, Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[LEFT, RIGHT] + &[LEFT, RIGHT], + grove_version )), Op::Child, - Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[])), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[], + grove_version + )), Op::Parent, Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[RIGHT, LEFT, LEFT] + &[RIGHT, LEFT, LEFT], + grove_version )), Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[RIGHT, LEFT] + &[RIGHT, LEFT], + grove_version )), Op::Parent, Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[RIGHT, LEFT, RIGHT] + &[RIGHT, LEFT, RIGHT], + grove_version )), Op::Child, - Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[RIGHT])), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT], + grove_version + )), Op::Parent, Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[RIGHT, RIGHT] + &[RIGHT, RIGHT], + grove_version )), Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[RIGHT, RIGHT, RIGHT] + &[RIGHT, RIGHT, RIGHT], + grove_version )), Op::Child, Op::Child, @@ -472,13 +579,18 @@ pub mod tests { #[test] fn chunk_greater_than_max_should_equal_max_depth() { + let grove_version = GroveVersion::latest(); let mut tree = build_tree_10_nodes(); let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); // build chunk with depth greater than tree // we should get the same result as building with the exact depth - let large_depth_chunk = tree_walker.create_chunk(100).expect("should build chunk"); - let exact_depth_chunk = tree_walker.create_chunk(4).expect("should build chunk"); + let large_depth_chunk = tree_walker + .create_chunk(100, grove_version) + .expect("should build chunk"); + let exact_depth_chunk = tree_walker + .create_chunk(4, grove_version) + .expect("should build chunk"); assert_eq!(large_depth_chunk, exact_depth_chunk); let tree_a = execute(large_depth_chunk.into_iter().map(Ok), true, |_| Ok(())) @@ -492,6 +604,7 @@ pub mod tests { #[test] fn build_chunk_after_traversal_depth_2() { + let grove_version = GroveVersion::latest(); let mut tree = build_tree_10_nodes(); let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); @@ -505,34 +618,43 @@ pub mod tests { // right traversal let chunk = tree_walker - .traverse_and_build_chunk(&[RIGHT], 2) + .traverse_and_build_chunk(&[RIGHT], 2, grove_version) .expect("should build chunk"); assert_eq!( chunk, vec![ Op::Push(traverse_get_node_hash( &mut tree_walker, - &[RIGHT, LEFT, LEFT] + &[RIGHT, LEFT, LEFT], + grove_version )), Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[RIGHT, LEFT] + &[RIGHT, LEFT], + grove_version )), Op::Parent, Op::Push(traverse_get_node_hash( &mut tree_walker, - &[RIGHT, LEFT, RIGHT] + &[RIGHT, LEFT, RIGHT], + grove_version )), Op::Child, - Op::Push(traverse_get_kv_feature_type(&mut tree_walker, &[RIGHT])), + Op::Push(traverse_get_kv_feature_type( + &mut tree_walker, + &[RIGHT], + grove_version + )), Op::Parent, Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[RIGHT, RIGHT] + &[RIGHT, RIGHT], + grove_version )), Op::Push(traverse_get_node_hash( &mut tree_walker, - &[RIGHT, RIGHT, RIGHT] + &[RIGHT, RIGHT, RIGHT], + grove_version )), Op::Child, Op::Child, @@ -547,12 +669,13 @@ pub mod tests { .expect("should reconstruct tree"); assert_eq!( Node::Hash(computed_tree.hash().unwrap()), - traverse_get_node_hash(&mut tree_walker, &[RIGHT]) + traverse_get_node_hash(&mut tree_walker, &[RIGHT], grove_version) ); } #[test] fn build_chunk_after_traversal_depth_1() { + let grove_version = GroveVersion::latest(); let mut tree = build_tree_10_nodes(); let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); @@ -564,23 +687,26 @@ pub mod tests { // instruction traversal let chunk = tree_walker - .traverse_and_build_chunk(&[RIGHT, LEFT], 1) + .traverse_and_build_chunk(&[RIGHT, LEFT], 1, grove_version) .expect("should build chunk"); assert_eq!( chunk, vec![ Op::Push(traverse_get_node_hash( &mut tree_walker, - &[RIGHT, LEFT, LEFT] + &[RIGHT, LEFT, LEFT], + grove_version )), Op::Push(traverse_get_kv_feature_type( &mut tree_walker, - &[RIGHT, LEFT] + &[RIGHT, LEFT], + grove_version )), Op::Parent, Op::Push(traverse_get_node_hash( &mut tree_walker, - &[RIGHT, LEFT, RIGHT] + &[RIGHT, LEFT, RIGHT], + grove_version )), Op::Child, ] @@ -591,7 +717,7 @@ pub mod tests { .expect("should reconstruct tree"); assert_eq!( Node::Hash(computed_tree.hash().unwrap()), - traverse_get_node_hash(&mut tree_walker, &[RIGHT, LEFT]) + traverse_get_node_hash(&mut tree_walker, &[RIGHT, LEFT], grove_version) ); } @@ -616,12 +742,13 @@ pub mod tests { #[test] fn test_height_proof_generation() { + let grove_version = GroveVersion::latest(); let mut tree = build_tree_10_nodes(); let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); let mut height_proof = vec![]; tree_walker - .generate_height_proof(&mut height_proof) + .generate_height_proof(&mut height_proof, grove_version) .unwrap() .expect("should generate height proof"); @@ -629,14 +756,30 @@ pub mod tests { assert_eq!( height_proof, vec![ - Op::Push(traverse_get_kv_hash(&mut tree_walker, &[LEFT, LEFT])), - Op::Push(traverse_get_kv_hash(&mut tree_walker, &[LEFT])), + Op::Push(traverse_get_kv_hash( + &mut tree_walker, + &[LEFT, LEFT], + grove_version + )), + Op::Push(traverse_get_kv_hash( + &mut tree_walker, + &[LEFT], + grove_version + )), Op::Parent, - Op::Push(traverse_get_node_hash(&mut tree_walker, &[LEFT, RIGHT])), + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[LEFT, RIGHT], + grove_version + )), Op::Child, - Op::Push(traverse_get_kv_hash(&mut tree_walker, &[])), + Op::Push(traverse_get_kv_hash(&mut tree_walker, &[], grove_version)), Op::Parent, - Op::Push(traverse_get_node_hash(&mut tree_walker, &[RIGHT])), + Op::Push(traverse_get_node_hash( + &mut tree_walker, + &[RIGHT], + grove_version + )), Op::Child, ] ); @@ -644,12 +787,13 @@ pub mod tests { #[test] fn test_height_proof_verification() { + let grove_version = GroveVersion::latest(); let mut tree = build_tree_10_nodes(); let mut tree_walker = RefWalker::new(&mut tree, PanicSource {}); let mut height_proof = vec![]; tree_walker - .generate_height_proof(&mut height_proof) + .generate_height_proof(&mut height_proof, grove_version) .unwrap() .expect("should generate height proof"); diff --git a/merk/src/proofs/chunk/util.rs b/merk/src/proofs/chunk/util.rs index fab2024a..3cbc0942 100644 --- a/merk/src/proofs/chunk/util.rs +++ b/merk/src/proofs/chunk/util.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Collection of state independent algorithms needed for facilitate chunk //! production and restoration @@ -573,7 +545,7 @@ mod test { #[test] fn test_traversal_instruction_as_string() { - assert_eq!(traversal_instruction_as_vec_bytes(&[]), vec![]); + assert_eq!(traversal_instruction_as_vec_bytes(&[]), Vec::::new()); assert_eq!(traversal_instruction_as_vec_bytes(&[LEFT]), vec![1u8]); assert_eq!(traversal_instruction_as_vec_bytes(&[RIGHT]), vec![0u8]); assert_eq!( diff --git a/merk/src/proofs/query/map.rs b/merk/src/proofs/query/map.rs index 757403a2..9e741cea 100644 --- a/merk/src/proofs/query/map.rs +++ b/merk/src/proofs/query/map.rs @@ -236,38 +236,6 @@ impl<'a> Iterator for Range<'a> { } } -#[cfg(feature = "full")] -/// `BTreeMapExtras` provides extra functionality to work with `BTreeMap` that -/// either missed or unstable -/// NOTE: We can easily remove this when the following feature will be rolled -/// out into stable rust: https://github.com/rust-lang/rust/issues/62924 -trait BTreeMapExtras { - type K; - type V; - - /// Returns `None` if `BTreeMap` is empty otherwise the first key-value pair - /// in the map. The key in this pair is the minimum key in the map. - fn first_key_value(&self) -> Option<(&Self::K, &Self::V)>; - - /// Returns `None` if `BTreeMap` is empty otherwise the last key-value pair - /// in the map. The key in this pair is the maximum key in the map. - fn last_key_value(&self) -> Option<(&Self::K, &Self::V)>; -} - -#[cfg(feature = "full")] -impl BTreeMapExtras for BTreeMap { - type K = KK; - type V = VV; - - fn first_key_value(&self) -> Option<(&Self::K, &Self::V)> { - self.iter().next() - } - - fn last_key_value(&self) -> Option<(&Self::K, &Self::V)> { - self.iter().next_back() - } -} - #[cfg(feature = "full")] #[cfg(test)] mod tests { diff --git a/merk/src/proofs/query/mod.rs b/merk/src/proofs/query/mod.rs index 107a1ec8..669940cc 100644 --- a/merk/src/proofs/query/mod.rs +++ b/merk/src/proofs/query/mod.rs @@ -20,6 +20,7 @@ use std::{collections::HashSet, fmt, ops::RangeFull}; #[cfg(feature = "full")] use grovedb_costs::{cost_return_on_error, CostContext, CostResult, CostsExt, OperationCost}; +use grovedb_version::version::GroveVersion; #[cfg(any(feature = "full", feature = "verify"))] use indexmap::IndexMap; #[cfg(feature = "full")] @@ -71,7 +72,7 @@ pub struct SubqueryBranch { #[cfg(any(feature = "full", feature = "verify"))] /// `Query` represents one or more keys or ranges of keys, which can be used to -/// resolve a proof which will include all of the requested values. +/// resolve a proof which will include all the requested values. #[derive(Debug, Default, Clone, PartialEq)] pub struct Query { /// Items @@ -458,7 +459,7 @@ impl Query { } } - /// Check if has subquery + /// Check if there is a subquery pub fn has_subquery(&self) -> bool { // checks if a query has subquery items if self.default_subquery_branch.subquery.is_some() @@ -470,7 +471,7 @@ impl Query { false } - /// Check if has only keys + /// Check if there are only keys pub fn has_only_keys(&self) -> bool { // checks if all searched for items are keys self.items.iter().all(|a| a.is_key()) @@ -579,18 +580,6 @@ where self.tree().hash().map(Node::Hash) } - #[cfg(feature = "full")] - #[allow(dead_code)] // TODO: remove when proofs will be enabled - /// Create a full proof - pub(crate) fn create_full_proof( - &mut self, - query: &[QueryItem], - limit: Option, - left_to_right: bool, - ) -> CostResult { - self.create_proof(query, limit, left_to_right) - } - /// Generates a proof for the list of queried keys. Returns a tuple /// containing the generated proof operators, and a tuple representing if /// any keys were queried were less than the left edge or greater than the @@ -603,6 +592,7 @@ where query: &[QueryItem], limit: Option, left_to_right: bool, + grove_version: &GroveVersion, ) -> CostResult { let mut cost = OperationCost::default(); @@ -678,12 +668,24 @@ where let (mut proof, left_absence, mut new_limit) = if left_to_right { cost_return_on_error!( &mut cost, - self.create_child_proof(proof_direction, left_items, limit, left_to_right) + self.create_child_proof( + proof_direction, + left_items, + limit, + left_to_right, + grove_version + ) ) } else { cost_return_on_error!( &mut cost, - self.create_child_proof(proof_direction, right_items, limit, left_to_right) + self.create_child_proof( + proof_direction, + right_items, + limit, + left_to_right, + grove_version + ) ) }; @@ -717,12 +719,24 @@ where let (mut right_proof, right_absence, new_limit) = if left_to_right { cost_return_on_error!( &mut cost, - self.create_child_proof(proof_direction, right_items, new_limit, left_to_right,) + self.create_child_proof( + proof_direction, + right_items, + new_limit, + left_to_right, + grove_version + ) ) } else { cost_return_on_error!( &mut cost, - self.create_child_proof(proof_direction, left_items, new_limit, left_to_right,) + self.create_child_proof( + proof_direction, + left_items, + new_limit, + left_to_right, + grove_version + ) ) }; @@ -786,17 +800,21 @@ where query: &[QueryItem], limit: Option, left_to_right: bool, + grove_version: &GroveVersion, ) -> CostResult { if !query.is_empty() { - self.walk(left, None::<&fn(&[u8]) -> Option>) - .flat_map_ok(|child_opt| { - if let Some(mut child) = child_opt { - child.create_proof(query, limit, left_to_right) - } else { - Ok((LinkedList::new(), (true, true), limit)) - .wrap_with_cost(Default::default()) - } - }) + self.walk( + left, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .flat_map_ok(|child_opt| { + if let Some(mut child) = child_opt { + child.create_proof(query, limit, left_to_right, grove_version) + } else { + Ok((LinkedList::new(), (true, true), limit)).wrap_with_cost(Default::default()) + } + }) } else if let Some(link) = self.tree().link(left) { let mut proof = LinkedList::new(); proof.push_back(if left_to_right { @@ -844,7 +862,7 @@ mod test { *, }; use crate::{ - proofs::query::{query_item::QueryItem::RangeAfter, verify}, + proofs::query::verify, test_utils::make_tree_seq, tree::{NoopCommit, PanicSource, RefWalker, TreeNode}, TreeFeatureType::BasicMerkNode, @@ -901,11 +919,12 @@ mod test { } fn verify_keys_test(keys: Vec>, expected_result: Vec>>) { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let (proof, ..) = walker - .create_full_proof( + .create_proof( keys.clone() .into_iter() .map(QueryItem::Key) @@ -913,6 +932,7 @@ mod test { .as_slice(), None, true, + grove_version, ) .unwrap() .expect("failed to create proof"); @@ -1141,11 +1161,12 @@ mod test { #[test] fn empty_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let (proof, absence, ..) = walker - .create_full_proof(vec![].as_slice(), None, true) + .create_proof(vec![].as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1187,12 +1208,13 @@ mod test { #[test] fn root_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::Key(vec![5])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1242,12 +1264,13 @@ mod test { #[test] fn leaf_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::Key(vec![3])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1297,12 +1320,13 @@ mod test { #[test] fn double_leaf_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::Key(vec![3]), QueryItem::Key(vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1359,6 +1383,7 @@ mod test { #[test] fn all_nodes_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); @@ -1368,7 +1393,7 @@ mod test { QueryItem::Key(vec![7]), ]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1429,12 +1454,13 @@ mod test { #[test] fn global_edge_absence_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::Key(vec![8])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1483,12 +1509,13 @@ mod test { #[test] fn absence_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::Key(vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1540,6 +1567,7 @@ mod test { #[test] fn doc_proof() { + let grove_version = GroveVersion::latest(); let mut tree = TreeNode::new(vec![5], vec![5], None, BasicMerkNode) .unwrap() .attach( @@ -1622,7 +1650,7 @@ mod test { QueryItem::Key(vec![4]), ]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1779,14 +1807,15 @@ mod test { #[test] fn range_proof() { - let mut tree = make_tree_seq(10); + let grove_version = GroveVersion::latest(); + let mut tree = make_tree_seq(10, grove_version); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::Range( vec![0, 0, 0, 0, 0, 0, 0, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], )]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -1880,14 +1909,14 @@ mod test { assert_eq!(res.limit, None); // right to left test - let mut tree = make_tree_seq(10); + let mut tree = make_tree_seq(10, grove_version); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::Range( vec![0, 0, 0, 0, 0, 0, 0, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7], )]; let (proof, ..) = walker - .create_full_proof(query_items.as_slice(), None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); @@ -1912,14 +1941,15 @@ mod test { #[test] fn range_proof_inclusive() { - let mut tree = make_tree_seq(10); + let grove_version = GroveVersion::latest(); + let mut tree = make_tree_seq(10, grove_version); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::RangeInclusive( vec![0, 0, 0, 0, 0, 0, 0, 5]..=vec![0, 0, 0, 0, 0, 0, 0, 7], )]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2015,14 +2045,14 @@ mod test { assert_eq!(res.limit, None); // right_to_left proof - let mut tree = make_tree_seq(10); + let mut tree = make_tree_seq(10, grove_version); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::RangeInclusive( vec![0, 0, 0, 0, 0, 0, 0, 5]..=vec![0, 0, 0, 0, 0, 0, 0, 7], )]; let (proof, ..) = walker - .create_full_proof(query_items.as_slice(), None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); @@ -2049,12 +2079,13 @@ mod test { #[test] fn range_from_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::RangeFrom(vec![5]..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2127,13 +2158,13 @@ mod test { let query_items = vec![QueryItem::RangeFrom(vec![5]..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(1), true) + .create_proof(query_items.as_slice(), Some(1), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::Key(vec![5])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2159,7 +2190,7 @@ mod test { let query_items = vec![QueryItem::RangeFrom(vec![5]..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(2), true) + .create_proof(query_items.as_slice(), Some(2), true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2169,7 +2200,7 @@ mod test { QueryItem::Key(vec![7]), ]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2198,13 +2229,13 @@ mod test { let query_items = vec![QueryItem::RangeFrom(vec![5]..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(100), true) + .create_proof(query_items.as_slice(), Some(100), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeFrom(vec![5]..)]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2233,7 +2264,7 @@ mod test { let query_items = vec![QueryItem::RangeFrom(vec![5]..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); @@ -2257,12 +2288,13 @@ mod test { #[test] fn range_to_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2363,13 +2395,13 @@ mod test { let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(1), true) + .create_proof(query_items.as_slice(), Some(1), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![2])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2395,13 +2427,13 @@ mod test { let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(2), true) + .create_proof(query_items.as_slice(), Some(2), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![3])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2430,13 +2462,13 @@ mod test { let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(100), true) + .create_proof(query_items.as_slice(), Some(100), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeTo(..vec![6])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2470,7 +2502,7 @@ mod test { let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); @@ -2501,7 +2533,7 @@ mod test { let query_items = vec![QueryItem::RangeTo(..vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(2), false) + .create_proof(query_items.as_slice(), Some(2), false, grove_version) .unwrap() .expect("create_proof errored"); @@ -2526,12 +2558,13 @@ mod test { #[test] fn range_to_proof_inclusive() { + let grove_version = GroveVersion::latest(); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2632,13 +2665,13 @@ mod test { let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(1), true) + .create_proof(query_items.as_slice(), Some(1), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![2])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2664,13 +2697,13 @@ mod test { let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(2), true) + .create_proof(query_items.as_slice(), Some(2), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![3])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2699,13 +2732,13 @@ mod test { let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(100), true) + .create_proof(query_items.as_slice(), Some(100), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2739,7 +2772,7 @@ mod test { let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); @@ -2770,7 +2803,7 @@ mod test { let query_items = vec![QueryItem::RangeToInclusive(..=vec![6])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(1), false) + .create_proof(query_items.as_slice(), Some(1), false, grove_version) .unwrap() .expect("create_proof errored"); @@ -2792,12 +2825,13 @@ mod test { #[test] fn range_after_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let query_items = vec![RangeAfter(vec![3]..)]; + let query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2898,13 +2932,13 @@ mod test { let query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(1), true) + .create_proof(query_items.as_slice(), Some(1), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![4])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2930,13 +2964,13 @@ mod test { let query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(2), true) + .create_proof(query_items.as_slice(), Some(2), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![5])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -2965,13 +2999,13 @@ mod test { let query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(100), true) + .create_proof(query_items.as_slice(), Some(100), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3003,9 +3037,9 @@ mod test { let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let query_items = vec![RangeAfter(vec![3]..)]; + let query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); @@ -3034,9 +3068,9 @@ mod test { let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); - let query_items = vec![RangeAfter(vec![3]..)]; + let query_items = vec![QueryItem::RangeAfter(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(3), false) + .create_proof(query_items.as_slice(), Some(3), false, grove_version) .unwrap() .expect("create_proof errored"); @@ -3061,12 +3095,13 @@ mod test { #[test] fn range_after_to_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3157,13 +3192,13 @@ mod test { let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(1), true) + .create_proof(query_items.as_slice(), Some(1), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![4])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3189,13 +3224,13 @@ mod test { let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(2), true) + .create_proof(query_items.as_slice(), Some(2), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![5])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3224,13 +3259,13 @@ mod test { let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(100), true) + .create_proof(query_items.as_slice(), Some(100), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3259,7 +3294,7 @@ mod test { let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); @@ -3285,7 +3320,7 @@ mod test { let query_items = vec![QueryItem::RangeAfterTo(vec![3]..vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(300), false) + .create_proof(query_items.as_slice(), Some(300), false, grove_version) .unwrap() .expect("create_proof errored"); @@ -3310,12 +3345,13 @@ mod test { #[test] fn range_after_to_proof_inclusive() { + let grove_version = GroveVersion::latest(); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3405,13 +3441,13 @@ mod test { let query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(1), true) + .create_proof(query_items.as_slice(), Some(1), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![4])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3437,13 +3473,13 @@ mod test { let query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(2), true) + .create_proof(query_items.as_slice(), Some(2), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![5])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3472,13 +3508,13 @@ mod test { let query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(100), true) + .create_proof(query_items.as_slice(), Some(100), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3507,7 +3543,7 @@ mod test { let query_items = vec![QueryItem::RangeAfterToInclusive(vec![3]..=vec![7])]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); @@ -3531,12 +3567,13 @@ mod test { #[test] fn range_full_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3645,13 +3682,13 @@ mod test { let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(1), true) + .create_proof(query_items.as_slice(), Some(1), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![2])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3677,13 +3714,13 @@ mod test { let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(2), true) + .create_proof(query_items.as_slice(), Some(2), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeToInclusive(..=vec![3])]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3712,13 +3749,13 @@ mod test { let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(100), true) + .create_proof(query_items.as_slice(), Some(100), true, grove_version) .unwrap() .expect("create_proof errored"); let equivalent_query_items = vec![QueryItem::RangeFull(..)]; let (equivalent_proof, equivalent_absence, ..) = walker - .create_full_proof(equivalent_query_items.as_slice(), None, true) + .create_proof(equivalent_query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3754,7 +3791,7 @@ mod test { let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); @@ -3787,7 +3824,7 @@ mod test { let query_items = vec![QueryItem::RangeFull(..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), Some(2), false) + .create_proof(query_items.as_slice(), Some(2), false, grove_version) .unwrap() .expect("create_proof errored"); @@ -3812,12 +3849,13 @@ mod test { #[test] fn proof_with_limit() { + let grove_version = GroveVersion::latest(); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::RangeFrom(vec![2]..)]; let (proof, _, limit) = walker - .create_full_proof(query_items.as_slice(), Some(1), true) + .create_proof(query_items.as_slice(), Some(1), true, grove_version) .unwrap() .expect("create_proof errored"); @@ -3886,12 +3924,13 @@ mod test { #[test] fn right_to_left_proof() { + let grove_version = GroveVersion::latest(); let mut tree = make_6_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::RangeFrom(vec![3]..)]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, false) + .create_proof(query_items.as_slice(), None, false, grove_version) .unwrap() .expect("create_proof errored"); @@ -3991,14 +4030,15 @@ mod test { #[test] fn range_proof_missing_upper_bound() { - let mut tree = make_tree_seq(10); + let grove_version = GroveVersion::latest(); + let mut tree = make_tree_seq(10, grove_version); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![QueryItem::Range( vec![0, 0, 0, 0, 0, 0, 0, 5]..vec![0, 0, 0, 0, 0, 0, 0, 6, 5], )]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4093,7 +4133,8 @@ mod test { #[test] fn range_proof_missing_lower_bound() { - let mut tree = make_tree_seq(10); + let grove_version = GroveVersion::latest(); + let mut tree = make_tree_seq(10, grove_version); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let query_items = vec![ @@ -4101,7 +4142,7 @@ mod test { QueryItem::Range(vec![0, 0, 0, 0, 0, 0, 0, 5, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7]), ]; let (proof, absence, ..) = walker - .create_full_proof(query_items.as_slice(), None, true) + .create_proof(query_items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4192,7 +4233,8 @@ mod test { #[test] fn subset_proof() { - let mut tree = make_tree_seq(10); + let grove_version = GroveVersion::latest(); + let mut tree = make_tree_seq(10, grove_version); let expected_hash = tree.hash().unwrap().to_owned(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); @@ -4201,7 +4243,7 @@ mod test { query.insert_all(); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), None, true) + .create_proof(query.items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4228,7 +4270,7 @@ mod test { query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 2]..=vec![0, 0, 0, 0, 0, 0, 0, 5]); query.insert_range(vec![0, 0, 0, 0, 0, 0, 0, 7]..vec![0, 0, 0, 0, 0, 0, 0, 10]); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), None, true) + .create_proof(query.items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4259,7 +4301,7 @@ mod test { query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 2]..=vec![0, 0, 0, 0, 0, 0, 0, 5]); query.insert_range(vec![0, 0, 0, 0, 0, 0, 0, 6]..vec![0, 0, 0, 0, 0, 0, 0, 10]); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), None, true) + .create_proof(query.items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4290,7 +4332,7 @@ mod test { query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 1]..=vec![0, 0, 0, 0, 0, 0, 0, 3]); query.insert_range_inclusive(vec![0, 0, 0, 0, 0, 0, 0, 2]..=vec![0, 0, 0, 0, 0, 0, 0, 5]); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), None, true) + .create_proof(query.items.as_slice(), None, true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4320,7 +4362,7 @@ mod test { let mut query = Query::new(); query.insert_range_from(vec![0, 0, 0, 0, 0, 0, 0, 1]..); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), Some(5), true) + .create_proof(query.items.as_slice(), Some(5), true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4349,6 +4391,7 @@ mod test { #[test] fn break_subset_proof() { + let grove_version = GroveVersion::latest(); // TODO: move this to where you'd set the constraints for this definition // goal is to show that ones limit and offset values are involved // whether a query is subset or not now also depends on the state @@ -4358,7 +4401,7 @@ mod test { // with limit and offset the nodes a query highlights now depends on state // hence it's impossible to know if something is subset at definition time - let mut tree = make_tree_seq(10); + let mut tree = make_tree_seq(10, grove_version); let expected_hash = tree.hash().unwrap().to_owned(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); @@ -4366,7 +4409,7 @@ mod test { let mut query = Query::new(); query.insert_range_from(vec![0, 0, 0, 0, 0, 0, 0, 1]..); let (proof, ..) = walker - .create_full_proof(query.items.as_slice(), Some(3), true) + .create_proof(query.items.as_slice(), Some(3), true, grove_version) .unwrap() .expect("create_proof errored"); @@ -4444,6 +4487,7 @@ mod test { #[test] fn verify_ops() { + let grove_version = GroveVersion::latest(); let mut tree = TreeNode::new(vec![5], vec![5], None, BasicMerkNode).unwrap(); tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) .unwrap() @@ -4453,7 +4497,12 @@ mod test { let mut walker = RefWalker::new(&mut tree, PanicSource {}); let (proof, ..) = walker - .create_full_proof(vec![QueryItem::Key(vec![5])].as_slice(), None, true) + .create_proof( + vec![QueryItem::Key(vec![5])].as_slice(), + None, + true, + grove_version, + ) .unwrap() .expect("failed to create proof"); let mut bytes = vec![]; @@ -4470,6 +4519,7 @@ mod test { #[test] #[should_panic(expected = "verify failed")] fn verify_ops_mismatched_hash() { + let grove_version = GroveVersion::latest(); let mut tree = TreeNode::new(vec![5], vec![5], None, BasicMerkNode).unwrap(); tree.commit(&mut NoopCommit {}, &|_, _| Ok(0)) .unwrap() @@ -4478,7 +4528,12 @@ mod test { let mut walker = RefWalker::new(&mut tree, PanicSource {}); let (proof, ..) = walker - .create_full_proof(vec![QueryItem::Key(vec![5])].as_slice(), None, true) + .create_proof( + vec![QueryItem::Key(vec![5])].as_slice(), + None, + true, + grove_version, + ) .unwrap() .expect("failed to create proof"); let mut bytes = vec![]; @@ -4493,11 +4548,12 @@ mod test { #[test] #[should_panic(expected = "verify failed")] fn verify_query_mismatched_hash() { + let grove_version = GroveVersion::latest(); let mut tree = make_3_node_tree(); let mut walker = RefWalker::new(&mut tree, PanicSource {}); let keys = vec![vec![5], vec![7]]; let (proof, ..) = walker - .create_full_proof( + .create_proof( keys.clone() .into_iter() .map(QueryItem::Key) @@ -4505,6 +4561,7 @@ mod test { .as_slice(), None, true, + grove_version, ) .unwrap() .expect("failed to create proof"); diff --git a/merk/src/test_utils/mod.rs b/merk/src/test_utils/mod.rs index d5d76673..397ad13a 100644 --- a/merk/src/test_utils/mod.rs +++ b/merk/src/test_utils/mod.rs @@ -35,6 +35,7 @@ use std::{convert::TryInto, ops::Range}; use grovedb_costs::storage_cost::removal::StorageRemovedBytes::BasicStorageRemoval; use grovedb_path::SubtreePath; use grovedb_storage::{Storage, StorageBatch}; +use grovedb_version::version::GroveVersion; use rand::prelude::*; pub use temp_merk::TempMerk; @@ -74,7 +75,11 @@ pub fn assert_tree_invariants(tree: &TreeNode) { /// Apply given batch to given tree and commit using memory only. /// Used by `apply_memonly` which also performs checks using /// `assert_tree_invariants`. Return Tree. -pub fn apply_memonly_unchecked(tree: TreeNode, batch: &MerkBatch>) -> TreeNode { +pub fn apply_memonly_unchecked( + tree: TreeNode, + batch: &MerkBatch>, + grove_version: &GroveVersion, +) -> TreeNode { let is_sum_node = tree.is_sum_node(); let walker = Walker::::new(tree, PanicSource {}); let mut tree = Walker::::apply_to( @@ -88,7 +93,7 @@ pub fn apply_memonly_unchecked(tree: TreeNode, batch: &MerkBatch>) -> Tr is_sum_node, )) }, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -96,6 +101,7 @@ pub fn apply_memonly_unchecked(tree: TreeNode, batch: &MerkBatch>) -> Tr BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed") @@ -116,8 +122,12 @@ pub fn apply_memonly_unchecked(tree: TreeNode, batch: &MerkBatch>) -> Tr /// Apply given batch to given tree and commit using memory only. /// Perform checks using `assert_tree_invariants`. Return Tree. -pub fn apply_memonly(tree: TreeNode, batch: &MerkBatch>) -> TreeNode { - let tree = apply_memonly_unchecked(tree, batch); +pub fn apply_memonly( + tree: TreeNode, + batch: &MerkBatch>, + grove_version: &GroveVersion, +) -> TreeNode { + let tree = apply_memonly_unchecked(tree, batch, grove_version); assert_tree_invariants(&tree); tree } @@ -128,6 +138,7 @@ pub fn apply_to_memonly( maybe_tree: Option, batch: &MerkBatch>, is_sum_tree: bool, + grove_version: &GroveVersion, ) -> Option { let maybe_walker = maybe_tree.map(|tree| Walker::::new(tree, PanicSource {})); Walker::::apply_to( @@ -141,7 +152,7 @@ pub fn apply_to_memonly( is_sum_tree, )) }, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -149,6 +160,7 @@ pub fn apply_to_memonly( BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply failed") @@ -234,6 +246,7 @@ pub fn make_tree_rand( batch_size: u64, initial_seed: u64, is_sum_tree: bool, + grove_version: &GroveVersion, ) -> TreeNode { assert!(node_count >= batch_size); assert_eq!((node_count % batch_size), 0); @@ -251,7 +264,7 @@ pub fn make_tree_rand( let batch_count = node_count / batch_size; for _ in 0..batch_count { let batch = make_batch_rand(batch_size, seed); - tree = apply_memonly(tree, &batch); + tree = apply_memonly(tree, &batch, grove_version); seed += 1; } @@ -261,14 +274,18 @@ pub fn make_tree_rand( /// Create tree with initial fixed values and apply `node count` Put ops using /// sequential keys using memory only /// starting tree node is [0; 20] -pub fn make_tree_seq(node_count: u64) -> TreeNode { - make_tree_seq_with_start_key(node_count, [0; 20].to_vec()) +pub fn make_tree_seq(node_count: u64, grove_version: &GroveVersion) -> TreeNode { + make_tree_seq_with_start_key(node_count, [0; 20].to_vec(), grove_version) } /// Create tree with initial fixed values and apply `node count` Put ops using /// sequential keys using memory only /// requires a starting key vector -pub fn make_tree_seq_with_start_key(node_count: u64, start_key: Vec) -> TreeNode { +pub fn make_tree_seq_with_start_key( + node_count: u64, + start_key: Vec, + grove_version: &GroveVersion, +) -> TreeNode { let batch_size = if node_count >= 10_000 { assert_eq!(node_count % 10_000, 0); 10_000 @@ -283,7 +300,7 @@ pub fn make_tree_seq_with_start_key(node_count: u64, start_key: Vec) -> Tree let batch_count = node_count / batch_size; for i in 0..batch_count { let batch = make_batch_seq((i * batch_size)..((i + 1) * batch_size)); - tree = apply_memonly(tree, &batch); + tree = apply_memonly(tree, &batch, grove_version); } tree @@ -292,6 +309,7 @@ pub fn make_tree_seq_with_start_key(node_count: u64, start_key: Vec) -> Tree pub fn empty_path_merk<'db, S>( storage: &'db S, batch: &'db StorageBatch, + grove_version: &GroveVersion, ) -> Merk<>::BatchStorageContext> where S: Storage<'db>, @@ -301,7 +319,8 @@ where .get_storage_context(SubtreePath::empty(), Some(batch)) .unwrap(), false, - None:: Option>, + None:: Option>, + grove_version, ) .unwrap() .unwrap() @@ -310,6 +329,7 @@ where /// Shortcut to open a Merk for read only pub fn empty_path_merk_read_only<'db, S>( storage: &'db S, + grove_version: &GroveVersion, ) -> Merk<>::BatchStorageContext> where S: Storage<'db>, @@ -319,7 +339,8 @@ where .get_storage_context(SubtreePath::empty(), None) .unwrap(), false, - None:: Option>, + None:: Option>, + grove_version, ) .unwrap() .unwrap() diff --git a/merk/src/test_utils/temp_merk.rs b/merk/src/test_utils/temp_merk.rs index 25e5b75c..69e5b555 100644 --- a/merk/src/test_utils/temp_merk.rs +++ b/merk/src/test_utils/temp_merk.rs @@ -38,6 +38,7 @@ use grovedb_storage::{ rocksdb_storage::{test_utils::TempStorage, PrefixedRocksDbStorageContext}, Storage, }; +use grovedb_version::version::GroveVersion; use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "full")] @@ -55,7 +56,7 @@ pub struct TempMerk { impl TempMerk { /// Opens a `TempMerk` at the given file path, creating a new one if it /// does not exist. - pub fn new() -> Self { + pub fn new(grove_version: &GroveVersion) -> Self { let storage = Box::leak(Box::new(TempStorage::new())); let batch = Box::leak(Box::new(StorageBatch::new())); @@ -66,7 +67,8 @@ impl TempMerk { let merk = Merk::open_base( context, false, - None:: Option>, + None:: Option>, + grove_version, ) .unwrap() .unwrap(); @@ -78,7 +80,7 @@ impl TempMerk { } /// Commits pending batch operations. - pub fn commit(&mut self) { + pub fn commit(&mut self, grove_version: &GroveVersion) { let batch = unsafe { Box::from_raw(self.batch as *const _ as *mut StorageBatch) }; self.storage .commit_multi_context_batch(*batch, None) @@ -92,7 +94,8 @@ impl TempMerk { self.merk = Merk::open_base( context, false, - None:: Option>, + None:: Option>, + grove_version, ) .unwrap() .unwrap(); @@ -113,7 +116,7 @@ impl Drop for TempMerk { #[cfg(feature = "full")] impl Default for TempMerk { fn default() -> Self { - Self::new() + Self::new(GroveVersion::latest()) } } diff --git a/merk/src/tree/encoding.rs b/merk/src/tree/encoding.rs index 3a97c895..cd10937d 100644 --- a/merk/src/tree/encoding.rs +++ b/merk/src/tree/encoding.rs @@ -8,6 +8,7 @@ use grovedb_costs::{ }; #[cfg(feature = "full")] use grovedb_storage::StorageContext; +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] use super::TreeNode; @@ -26,16 +27,22 @@ impl TreeNode { pub fn decode_raw( bytes: &[u8], key: Vec, - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> Result { - TreeNode::decode(key, bytes, value_defined_cost_fn).map_err(EdError) + TreeNode::decode(key, bytes, value_defined_cost_fn, grove_version).map_err(EdError) } /// Get value from storage given key. pub(crate) fn get<'db, S, K>( storage: &S, key: K, - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult, Error> where S: StorageContext<'db>, @@ -47,7 +54,12 @@ impl TreeNode { let tree_opt = cost_return_on_error_no_add!( &cost, tree_bytes - .map(|x| TreeNode::decode_raw(&x, key.as_ref().to_vec(), value_defined_cost_fn)) + .map(|x| TreeNode::decode_raw( + &x, + key.as_ref().to_vec(), + value_defined_cost_fn, + grove_version + )) .transpose() ); @@ -96,13 +108,16 @@ impl TreeNode { &mut self, key: Vec, input: &[u8], - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> ed::Result<()> { let mut tree_inner: TreeNodeInner = Decode::decode(input)?; tree_inner.kv.key = key; if let Some(value_defined_cost_fn) = value_defined_cost_fn { tree_inner.kv.value_defined_cost = - value_defined_cost_fn(tree_inner.kv.value.as_slice()); + value_defined_cost_fn(tree_inner.kv.value.as_slice(), grove_version); } self.inner = Box::new(tree_inner); Ok(()) @@ -113,13 +128,16 @@ impl TreeNode { pub fn decode( key: Vec, input: &[u8], - value_defined_cost_fn: Option Option>, + value_defined_cost_fn: Option< + impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> ed::Result { let mut tree_inner: TreeNodeInner = Decode::decode(input)?; tree_inner.kv.key = key; if let Some(value_defined_cost_fn) = value_defined_cost_fn { tree_inner.kv.value_defined_cost = - value_defined_cost_fn(tree_inner.kv.value.as_slice()); + value_defined_cost_fn(tree_inner.kv.value.as_slice(), grove_version); } Ok(TreeNode::new_with_tree_inner(tree_inner)) } @@ -268,6 +286,7 @@ mod tests { #[test] fn decode_leaf_tree() { + let grove_version = GroveVersion::latest(); let bytes = vec![ 0, 0, 0, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 32, 34, 236, 157, 87, 27, 167, 116, 207, 158, @@ -277,7 +296,8 @@ mod tests { let tree = TreeNode::decode( vec![0], bytes.as_slice(), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .expect("should decode correctly"); assert_eq!(tree.key(), &[0]); @@ -287,6 +307,7 @@ mod tests { #[test] fn decode_reference_tree() { + let grove_version = GroveVersion::latest(); let bytes = vec![ 1, 1, 2, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 123, 124, 0, 0, 0, 55, 55, 55, 55, @@ -297,7 +318,8 @@ mod tests { let tree = TreeNode::decode( vec![0], bytes.as_slice(), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .expect("should decode correctly"); assert_eq!(tree.key(), &[0]); @@ -319,11 +341,13 @@ mod tests { #[test] fn decode_invalid_bytes_as_tree() { + let grove_version = GroveVersion::latest(); let bytes = vec![2, 3, 4, 5]; let tree = TreeNode::decode( vec![0], bytes.as_slice(), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ); assert!(tree.is_err()); } diff --git a/merk/src/tree/fuzz_tests.rs b/merk/src/tree/fuzz_tests.rs index 2f3067d1..eb026f56 100644 --- a/merk/src/tree/fuzz_tests.rs +++ b/merk/src/tree/fuzz_tests.rs @@ -43,7 +43,13 @@ fn fuzz_396148930387069749() { fn fuzz_case(seed: u64, using_sum_trees: bool) { let mut rng: SmallRng = SeedableRng::seed_from_u64(seed); let initial_size = (rng.gen::() % 10) + 1; - let tree = make_tree_rand(initial_size, initial_size, seed, using_sum_trees); + let tree = make_tree_rand( + initial_size, + initial_size, + seed, + using_sum_trees, + grove_version, + ); let mut map = Map::from_iter(tree.iter()); let mut maybe_tree = Some(tree); println!("====== MERK FUZZ ======"); @@ -55,7 +61,7 @@ fn fuzz_case(seed: u64, using_sum_trees: bool) { let batch = make_batch(maybe_tree.as_ref(), batch_size, rng.gen::()); println!("BATCH {}", j); println!("{:?}", batch); - maybe_tree = apply_to_memonly(maybe_tree, &batch, using_sum_trees); + maybe_tree = apply_to_memonly(maybe_tree, &batch, using_sum_trees, grove_version); apply_to_map(&mut map, &batch); assert_map(maybe_tree.as_ref(), &map); if let Some(tree) = &maybe_tree { diff --git a/merk/src/tree/mod.rs b/merk/src/tree/mod.rs index 9a29dc8e..6b2710b6 100644 --- a/merk/src/tree/mod.rs +++ b/merk/src/tree/mod.rs @@ -42,6 +42,7 @@ use grovedb_costs::{ }, CostContext, CostResult, CostsExt, OperationCost, }; +use grovedb_version::version::GroveVersion; #[cfg(any(feature = "full", feature = "verify"))] pub use hash::{ combine_hash, kv_digest_to_kv_hash, kv_hash, node_hash, value_hash, CryptoHash, HASH_LENGTH, @@ -922,9 +923,10 @@ impl TreeNode { left: bool, source: &S, value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, ) -> CostResult<(), Error> where - V: Fn(&[u8]) -> Option, + V: Fn(&[u8], &GroveVersion) -> Option, { // TODO: return Err instead of panic? let link = self.link(left).expect("Expected link"); @@ -939,7 +941,10 @@ impl TreeNode { }; let mut cost = OperationCost::default(); - let tree = cost_return_on_error!(&mut cost, source.fetch(link, value_defined_cost_fn)); + let tree = cost_return_on_error!( + &mut cost, + source.fetch(link, value_defined_cost_fn, grove_version) + ); debug_assert_eq!(tree.key(), link.key()); *self.slot_mut(left) = Some(Link::Loaded { tree, diff --git a/merk/src/tree/ops.rs b/merk/src/tree/ops.rs index 738a89df..156c7e60 100644 --- a/merk/src/tree/ops.rs +++ b/merk/src/tree/ops.rs @@ -16,6 +16,7 @@ use grovedb_costs::{ }, CostContext, CostResult, CostsExt, OperationCost, }; +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] use integer_encoding::VarInt; #[cfg(feature = "full")] @@ -40,18 +41,18 @@ pub enum Op { /// cost into the Merk tree. This is ideal for sum items where /// we want sizes to always be fixed PutWithSpecializedCost(Vec, u32, TreeFeatureType), - /// Combined references include the value in the node hash + /// `Combined references` include the value in the node hash /// because the value is independent of the reference hash /// In GroveDB this is used for references PutCombinedReference(Vec, CryptoHash, TreeFeatureType), - /// Layered references include the value in the node hash + /// `Layered references` include the value in the node hash /// because the value is independent of the reference hash /// In GroveDB this is used for trees /// A layered reference does not pay for the tree's value, /// instead providing a cost for the value PutLayeredReference(Vec, u32, CryptoHash, TreeFeatureType), /// Replacing a layered reference is slightly more efficient - /// than putting it as the replace will not modify the size + /// than putting it as the replace operation will not modify the size /// hence there is no need to calculate a difference in /// costs ReplaceLayeredReference(Vec, u32, CryptoHash, TreeFeatureType), @@ -125,7 +126,10 @@ impl Fetch for PanicSource { fn fetch( &self, _link: &Link, - _value_defined_cost_fn: Option<&impl Fn(&[u8]) -> Option>, + _value_defined_cost_fn: Option< + &impl Fn(&[u8], &GroveVersion) -> Option, + >, + _grove_version: &GroveVersion, ) -> CostResult { unreachable!("'fetch' should not have been called") } @@ -149,10 +153,11 @@ where value_defined_cost_fn: Option<&V>, update_tree_value_based_on_costs: &mut U, section_removal_bytes: &mut R, + grove_version: &GroveVersion, ) -> CostContext, KeyUpdates), Error>> where C: Fn(&Vec, &Vec) -> Result, - V: Fn(&[u8]) -> Option, + V: Fn(&[u8], &GroveVersion) -> Option, U: FnMut( &StorageCost, &Vec, @@ -182,6 +187,7 @@ where value_defined_cost_fn, update_tree_value_based_on_costs, section_removal_bytes, + grove_version, ) .map_ok(|tree| { let new_keys: BTreeSet> = batch @@ -207,7 +213,8 @@ where old_tree_cost, value_defined_cost_fn, update_tree_value_based_on_costs, - section_removal_bytes + section_removal_bytes, + grove_version ) ) } @@ -228,10 +235,11 @@ where value_defined_cost_fn: Option<&V>, update_tree_value_based_on_costs: &mut U, section_removal_bytes: &mut R, + grove_version: &GroveVersion, ) -> CostResult, Error> where C: Fn(&Vec, &Vec) -> Result, - V: Fn(&[u8]) -> Option, + V: Fn(&[u8], &GroveVersion) -> Option, U: FnMut( &StorageCost, &Vec, @@ -260,7 +268,8 @@ where old_tree_cost, value_defined_cost_fn, update_tree_value_based_on_costs, - section_removal_bytes + section_removal_bytes, + grove_version ) ) .map(|tree| Self::new(tree, source.clone())); @@ -273,7 +282,8 @@ where old_tree_cost, value_defined_cost_fn, update_tree_value_based_on_costs, - section_removal_bytes + section_removal_bytes, + grove_version ) ) .0 @@ -286,7 +296,8 @@ where old_tree_cost, value_defined_cost_fn, update_tree_value_based_on_costs, - section_removal_bytes + section_removal_bytes, + grove_version ) ) .map(|tree| Self::new(tree, source.clone())), @@ -358,6 +369,7 @@ where value_defined_cost_fn, update_tree_value_based_on_costs, section_removal_bytes, + grove_version, ) ) .0 @@ -369,11 +381,12 @@ where fn apply_sorted_without_costs>( self, batch: &MerkBatch, + grove_version: &GroveVersion, ) -> CostResult<(Option, KeyUpdates), Error> { self.apply_sorted( batch, &|_, _| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -381,6 +394,7 @@ where BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) } @@ -395,10 +409,11 @@ where value_defined_cost_fn: Option<&V>, update_tree_value_based_on_costs: &mut U, section_removal_bytes: &mut R, + grove_version: &GroveVersion, ) -> CostResult<(Option, KeyUpdates), Error> where C: Fn(&Vec, &Vec) -> Result, - V: Fn(&[u8]) -> Option, + V: Fn(&[u8], &GroveVersion) -> Option, U: FnMut( &StorageCost, &Vec, @@ -427,7 +442,7 @@ where feature_type.to_owned(), old_specialized_cost, update_tree_value_based_on_costs, - section_removal_bytes + section_removal_bytes, ) ) } @@ -493,7 +508,7 @@ where old_specialized_cost(&key_vec, value) ) } - _ => 0, // can't get here anyways + _ => 0, // can't get here anyway }; let key_len = key_vec.len() as u32; @@ -522,8 +537,10 @@ where needs_value_verification: false, }; - let maybe_tree_walker = - cost_return_on_error!(&mut cost, self.remove(value_defined_cost_fn)); + let maybe_tree_walker = cost_return_on_error!( + &mut cost, + self.remove(value_defined_cost_fn, grove_version) + ); // If there are no more batch updates to the left this means that the index is 0 // There would be no key updates to the left of this part of the tree. @@ -550,6 +567,7 @@ where value_defined_cost_fn, update_tree_value_based_on_costs, section_removal_bytes, + grove_version, ) ); let new_keys: BTreeSet> = batch[..index] @@ -574,7 +592,8 @@ where old_specialized_cost, value_defined_cost_fn, update_tree_value_based_on_costs, - section_removal_bytes + section_removal_bytes, + grove_version ) ) } @@ -606,6 +625,7 @@ where value_defined_cost_fn, update_tree_value_based_on_costs, section_removal_bytes, + grove_version, ) ); let new_keys: BTreeSet> = batch[index + 1..] @@ -630,7 +650,8 @@ where old_specialized_cost, value_defined_cost_fn, update_tree_value_based_on_costs, - section_removal_bytes + section_removal_bytes, + grove_version ) ) } @@ -678,6 +699,7 @@ where value_defined_cost_fn, update_tree_value_based_on_costs, section_removal_bytes, + grove_version, ) .add_cost(cost) } @@ -697,6 +719,7 @@ where value_defined_cost_fn: Option<&V>, update_tree_value_based_on_costs: &mut U, section_removal_bytes: &mut R, + grove_version: &GroveVersion, ) -> CostResult<(Option, KeyUpdates), Error> where C: Fn(&Vec, &Vec) -> Result, @@ -705,7 +728,7 @@ where &Vec, &mut Vec, ) -> Result<(bool, Option), Error>, - V: Fn(&[u8]) -> Option, + V: Fn(&[u8], &GroveVersion) -> Option, R: FnMut(&Vec, u32, u32) -> Result<(StorageRemovedBytes, StorageRemovedBytes), Error>, { let mut cost = OperationCost::default(); @@ -734,6 +757,7 @@ where value_defined_cost_fn, update_tree_value_based_on_costs, section_removal_bytes, + grove_version, ) .map_ok(|(maybe_left, mut key_updates_left)| { key_updates.new_keys.append(&mut key_updates_left.new_keys); @@ -746,7 +770,8 @@ where maybe_left }) }, - value_defined_cost_fn + value_defined_cost_fn, + grove_version, ) ) } else { @@ -768,6 +793,7 @@ where value_defined_cost_fn, update_tree_value_based_on_costs, section_removal_bytes, + grove_version, ) .map_ok(|(maybe_right, mut key_updates_right)| { key_updates.new_keys.append(&mut key_updates_right.new_keys); @@ -780,14 +806,18 @@ where maybe_right }) }, - value_defined_cost_fn + value_defined_cost_fn, + grove_version ) ) } else { tree }; - let tree = cost_return_on_error!(&mut cost, tree.maybe_balance(value_defined_cost_fn)); + let tree = cost_return_on_error!( + &mut cost, + tree.maybe_balance(value_defined_cost_fn, grove_version) + ); let new_root_key = tree.tree().key(); @@ -808,11 +838,15 @@ where } /// Checks if the tree is unbalanced and if so, applies AVL tree rotation(s) - /// to rebalance the tree and its subtrees. Returns the root node of the + /// to re-balance the tree and its subtrees. Returns the root node of the /// balanced tree after applying the rotations. - fn maybe_balance(self, value_defined_cost_fn: Option<&V>) -> CostResult + fn maybe_balance( + self, + value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, + ) -> CostResult where - V: Fn(&[u8]) -> Option, + V: Fn(&[u8], &GroveVersion) -> Option, { let mut cost = OperationCost::default(); @@ -830,9 +864,10 @@ where self.walk_expect( left, |child| child - .rotate(!left, value_defined_cost_fn) - .map_ok(Option::Some), - value_defined_cost_fn + .rotate(!left, value_defined_cost_fn, grove_version) + .map_ok(Some), + value_defined_cost_fn, + grove_version, ) ) } else { @@ -840,41 +875,54 @@ where }; let rotate = tree - .rotate(left, value_defined_cost_fn) + .rotate(left, value_defined_cost_fn, grove_version) .unwrap_add_cost(&mut cost); rotate.wrap_with_cost(cost) } /// Applies an AVL tree rotation, a constant-time operation which only needs - /// to swap pointers in order to rebalance a tree. - fn rotate(self, left: bool, value_defined_cost_fn: Option<&V>) -> CostResult + /// to swap pointers in order to re-balance a tree. + fn rotate( + self, + left: bool, + value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, + ) -> CostResult where - V: Fn(&[u8]) -> Option, + V: Fn(&[u8], &GroveVersion) -> Option, { let mut cost = OperationCost::default(); - let (tree, child) = - cost_return_on_error!(&mut cost, self.detach_expect(left, value_defined_cost_fn)); - let (child, maybe_grandchild) = - cost_return_on_error!(&mut cost, child.detach(!left, value_defined_cost_fn)); + let (tree, child) = cost_return_on_error!( + &mut cost, + self.detach_expect(left, value_defined_cost_fn, grove_version) + ); + let (child, maybe_grandchild) = cost_return_on_error!( + &mut cost, + child.detach(!left, value_defined_cost_fn, grove_version) + ); // attach grandchild to self tree.attach(left, maybe_grandchild) - .maybe_balance(value_defined_cost_fn) + .maybe_balance(value_defined_cost_fn, grove_version) .flat_map_ok(|tree| { // attach self to child, return child child .attach(!left, Some(tree)) - .maybe_balance(value_defined_cost_fn) + .maybe_balance(value_defined_cost_fn, grove_version) }) .add_cost(cost) } - /// Removes the root node from the tree. Rearranges and rebalances + /// Removes the root node from the tree. Rearranges and re-balances /// descendants (if any) in order to maintain a valid tree. - pub fn remove(self, value_defined_cost_fn: Option<&V>) -> CostResult, Error> + pub fn remove( + self, + value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, + ) -> CostResult, Error> where - V: Fn(&[u8]) -> Option, + V: Fn(&[u8], &GroveVersion) -> Option, { let mut cost = OperationCost::default(); @@ -885,19 +933,27 @@ where let maybe_tree = if has_left && has_right { // two children, promote edge of taller child - let (tree, tall_child) = - cost_return_on_error!(&mut cost, self.detach_expect(left, value_defined_cost_fn)); - let (_, short_child) = - cost_return_on_error!(&mut cost, tree.detach_expect(!left, value_defined_cost_fn)); + let (tree, tall_child) = cost_return_on_error!( + &mut cost, + self.detach_expect(left, value_defined_cost_fn, grove_version) + ); + let (_, short_child) = cost_return_on_error!( + &mut cost, + tree.detach_expect(!left, value_defined_cost_fn, grove_version) + ); let promoted = cost_return_on_error!( &mut cost, - tall_child.promote_edge(!left, short_child, value_defined_cost_fn) + tall_child.promote_edge(!left, short_child, value_defined_cost_fn, grove_version) ); Some(promoted) } else if has_left || has_right { // single child, promote it Some( - cost_return_on_error!(&mut cost, self.detach_expect(left, value_defined_cost_fn)).1, + cost_return_on_error!( + &mut cost, + self.detach_expect(left, value_defined_cost_fn, grove_version) + ) + .1, ) } else { // no child @@ -916,15 +972,16 @@ where left: bool, attach: Self, value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, ) -> CostResult where - V: Fn(&[u8]) -> Option, + V: Fn(&[u8], &GroveVersion) -> Option, { - self.remove_edge(left, value_defined_cost_fn) + self.remove_edge(left, value_defined_cost_fn, grove_version) .flat_map_ok(|(edge, maybe_child)| { edge.attach(!left, maybe_child) .attach(left, Some(attach)) - .maybe_balance(value_defined_cost_fn) + .maybe_balance(value_defined_cost_fn, grove_version) }) } @@ -935,25 +992,30 @@ where self, left: bool, value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, ) -> CostResult<(Self, Option), Error> where - V: Fn(&[u8]) -> Option, + V: Fn(&[u8], &GroveVersion) -> Option, { let mut cost = OperationCost::default(); if self.tree().link(left).is_some() { // this node is not the edge, recurse - let (tree, child) = - cost_return_on_error!(&mut cost, self.detach_expect(left, value_defined_cost_fn)); - let (edge, maybe_child) = - cost_return_on_error!(&mut cost, child.remove_edge(left, value_defined_cost_fn)); + let (tree, child) = cost_return_on_error!( + &mut cost, + self.detach_expect(left, value_defined_cost_fn, grove_version) + ); + let (edge, maybe_child) = cost_return_on_error!( + &mut cost, + child.remove_edge(left, value_defined_cost_fn, grove_version) + ); tree.attach(left, maybe_child) - .maybe_balance(value_defined_cost_fn) + .maybe_balance(value_defined_cost_fn, grove_version) .map_ok(|tree| (edge, Some(tree))) .add_cost(cost) } else { // this node is the edge, detach its child if present - self.detach(!left, value_defined_cost_fn) + self.detach(!left, value_defined_cost_fn, grove_version) } } } @@ -969,10 +1031,11 @@ mod test { #[test] fn simple_insert() { - let batch = [(b"foo2".to_vec(), Op::Put(b"bar2".to_vec(), BasicMerkNode))]; + let grove_version = GroveVersion::latest(); + let batch = [(b"foo2".to_vec(), Put(b"bar2".to_vec(), BasicMerkNode))]; let tree = TreeNode::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerkNode).unwrap(); let (maybe_walker, key_updates) = Walker::new(tree, PanicSource {}) - .apply_sorted_without_costs(&batch) + .apply_sorted_without_costs(&batch, grove_version) .unwrap() .expect("apply errored"); let walker = maybe_walker.expect("should be Some"); @@ -985,10 +1048,11 @@ mod test { #[test] fn simple_update() { - let batch = [(b"foo".to_vec(), Op::Put(b"bar2".to_vec(), BasicMerkNode))]; + let grove_version = GroveVersion::latest(); + let batch = [(b"foo".to_vec(), Put(b"bar2".to_vec(), BasicMerkNode))]; let tree = TreeNode::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerkNode).unwrap(); let (maybe_walker, key_updates) = Walker::new(tree, PanicSource {}) - .apply_sorted_without_costs(&batch) + .apply_sorted_without_costs(&batch, grove_version) .unwrap() .expect("apply errored"); let walker = maybe_walker.expect("should be Some"); @@ -1002,7 +1066,8 @@ mod test { #[test] fn simple_delete() { - let batch = [(b"foo2".to_vec(), Op::Delete)]; + let grove_version = GroveVersion::latest(); + let batch = [(b"foo2".to_vec(), Delete)]; let tree = TreeNode::from_fields( b"foo".to_vec(), b"bar".to_vec(), @@ -1019,7 +1084,7 @@ mod test { ) .unwrap(); let (maybe_walker, key_updates) = Walker::new(tree, PanicSource {}) - .apply_sorted_without_costs(&batch) + .apply_sorted_without_costs(&batch, grove_version) .unwrap() .expect("apply errored"); let walker = maybe_walker.expect("should be Some"); @@ -1037,20 +1102,22 @@ mod test { #[test] fn delete_non_existent() { - let batch = [(b"foo2".to_vec(), Op::Delete)]; + let grove_version = GroveVersion::latest(); + let batch = [(b"foo2".to_vec(), Delete)]; let tree = TreeNode::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerkNode).unwrap(); Walker::new(tree, PanicSource {}) - .apply_sorted_without_costs(&batch) + .apply_sorted_without_costs(&batch, grove_version) .unwrap() .unwrap(); } #[test] fn delete_only_node() { - let batch = [(b"foo".to_vec(), Op::Delete)]; + let grove_version = GroveVersion::latest(); + let batch = [(b"foo".to_vec(), Delete)]; let tree = TreeNode::new(b"foo".to_vec(), b"bar".to_vec(), None, BasicMerkNode).unwrap(); let (maybe_walker, key_updates) = Walker::new(tree, PanicSource {}) - .apply_sorted_without_costs(&batch) + .apply_sorted_without_costs(&batch, grove_version) .unwrap() .expect("apply errored"); assert!(maybe_walker.is_none()); @@ -1064,10 +1131,11 @@ mod test { #[test] fn delete_deep() { - let tree = make_tree_seq(50); + let grove_version = GroveVersion::latest(); + let tree = make_tree_seq(50, grove_version); let batch = [del_entry(5)]; let (maybe_walker, key_updates) = Walker::new(tree, PanicSource {}) - .apply_sorted_without_costs(&batch) + .apply_sorted_without_costs(&batch, grove_version) .unwrap() .expect("apply errored"); maybe_walker.expect("should be Some"); @@ -1081,10 +1149,11 @@ mod test { #[test] fn delete_recursive() { - let tree = make_tree_seq(50); + let grove_version = GroveVersion::latest(); + let tree = make_tree_seq(50, grove_version); let batch = [del_entry(29), del_entry(34)]; let (maybe_walker, mut key_updates) = Walker::new(tree, PanicSource {}) - .apply_sorted_without_costs(&batch) + .apply_sorted_without_costs(&batch, grove_version) .unwrap() .expect("apply errored"); maybe_walker.expect("should be Some"); @@ -1102,10 +1171,11 @@ mod test { #[test] fn delete_recursive_2() { - let tree = make_tree_seq(10); + let grove_version = GroveVersion::latest(); + let tree = make_tree_seq(10, grove_version); let batch = [del_entry(7), del_entry(9)]; let (maybe_walker, key_updates) = Walker::new(tree, PanicSource {}) - .apply_sorted_without_costs(&batch) + .apply_sorted_without_costs(&batch, grove_version) .unwrap() .expect("apply errored"); maybe_walker.expect("should be Some"); @@ -1118,12 +1188,13 @@ mod test { #[test] fn apply_empty_none() { + let grove_version = GroveVersion::latest(); let (maybe_tree, key_updates) = Walker::::apply_to::, _, _, _, _>( None, &[], PanicSource {}, &|_, _| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -1131,6 +1202,7 @@ mod test { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply_to failed"); @@ -1141,13 +1213,14 @@ mod test { #[test] fn insert_empty_single() { - let batch = vec![(vec![0], Op::Put(vec![1], BasicMerkNode))]; + let grove_version = GroveVersion::latest(); + let batch = vec![(vec![0], Put(vec![1], BasicMerkNode))]; let (maybe_tree, key_updates) = Walker::::apply_to( None, &batch, PanicSource {}, &|_, _| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -1155,6 +1228,7 @@ mod test { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply_to failed"); @@ -1168,13 +1242,14 @@ mod test { #[test] fn insert_updated_single() { - let batch = vec![(vec![0], Op::Put(vec![1], BasicMerkNode))]; + let grove_version = GroveVersion::latest(); + let batch = vec![(vec![0], Put(vec![1], BasicMerkNode))]; let (maybe_tree, key_updates) = Walker::::apply_to( None, &batch, PanicSource {}, &|_, _| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -1182,6 +1257,7 @@ mod test { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply_to failed"); @@ -1190,15 +1266,15 @@ mod test { let maybe_walker = maybe_tree.map(|tree| Walker::::new(tree, PanicSource {})); let batch = vec![ - (vec![0], Op::Put(vec![2], BasicMerkNode)), - (vec![1], Op::Put(vec![2], BasicMerkNode)), + (vec![0], Put(vec![2], BasicMerkNode)), + (vec![1], Put(vec![2], BasicMerkNode)), ]; let (maybe_tree, key_updates) = Walker::::apply_to( maybe_walker, &batch, PanicSource {}, &|_, _| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -1206,6 +1282,7 @@ mod test { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply_to failed"); @@ -1218,17 +1295,18 @@ mod test { #[test] fn insert_updated_multiple() { + let grove_version = GroveVersion::latest(); let batch = vec![ - (vec![0], Op::Put(vec![1], BasicMerkNode)), - (vec![1], Op::Put(vec![2], BasicMerkNode)), - (vec![2], Op::Put(vec![3], BasicMerkNode)), + (vec![0], Put(vec![1], BasicMerkNode)), + (vec![1], Put(vec![2], BasicMerkNode)), + (vec![2], Put(vec![3], BasicMerkNode)), ]; let (maybe_tree, key_updates) = Walker::::apply_to( None, &batch, PanicSource {}, &|_, _| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -1236,6 +1314,7 @@ mod test { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply_to failed"); @@ -1244,16 +1323,16 @@ mod test { let maybe_walker = maybe_tree.map(|tree| Walker::::new(tree, PanicSource {})); let batch = vec![ - (vec![0], Op::Put(vec![5], BasicMerkNode)), - (vec![1], Op::Put(vec![8], BasicMerkNode)), - (vec![2], Op::Delete), + (vec![0], Put(vec![5], BasicMerkNode)), + (vec![1], Put(vec![8], BasicMerkNode)), + (vec![2], Delete), ]; let (maybe_tree, key_updates) = Walker::::apply_to( maybe_walker, &batch, PanicSource {}, &|_, _| Ok(0), - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, &mut |_, _, _| Ok((false, None)), &mut |_flags, key_bytes_to_remove, value_bytes_to_remove| { Ok(( @@ -1261,6 +1340,7 @@ mod test { BasicStorageRemoval(value_bytes_to_remove), )) }, + grove_version, ) .unwrap() .expect("apply_to failed"); @@ -1274,9 +1354,10 @@ mod test { #[test] fn insert_root_single() { + let grove_version = GroveVersion::latest(); let tree = TreeNode::new(vec![5], vec![123], None, BasicMerkNode).unwrap(); - let batch = vec![(vec![6], Op::Put(vec![123], BasicMerkNode))]; - let tree = apply_memonly(tree, &batch); + let batch = vec![(vec![6], Put(vec![123], BasicMerkNode))]; + let tree = apply_memonly(tree, &batch, grove_version); assert_eq!(tree.key(), &[5]); assert!(tree.child(true).is_none()); assert_eq!(tree.child(false).expect("expected child").key(), &[6]); @@ -1284,12 +1365,13 @@ mod test { #[test] fn insert_root_double() { + let grove_version = GroveVersion::latest(); let tree = TreeNode::new(vec![5], vec![123], None, BasicMerkNode).unwrap(); let batch = vec![ - (vec![4], Op::Put(vec![123], BasicMerkNode)), - (vec![6], Op::Put(vec![123], BasicMerkNode)), + (vec![4], Put(vec![123], BasicMerkNode)), + (vec![6], Put(vec![123], BasicMerkNode)), ]; - let tree = apply_memonly(tree, &batch); + let tree = apply_memonly(tree, &batch, grove_version); assert_eq!(tree.key(), &[5]); assert_eq!(tree.child(true).expect("expected child").key(), &[4]); assert_eq!(tree.child(false).expect("expected child").key(), &[6]); @@ -1297,13 +1379,14 @@ mod test { #[test] fn insert_rebalance() { + let grove_version = GroveVersion::latest(); let tree = TreeNode::new(vec![5], vec![123], None, BasicMerkNode).unwrap(); - let batch = vec![(vec![6], Op::Put(vec![123], BasicMerkNode))]; - let tree = apply_memonly(tree, &batch); + let batch = vec![(vec![6], Put(vec![123], BasicMerkNode))]; + let tree = apply_memonly(tree, &batch, grove_version); - let batch = vec![(vec![7], Op::Put(vec![123], BasicMerkNode))]; - let tree = apply_memonly(tree, &batch); + let batch = vec![(vec![7], Put(vec![123], BasicMerkNode))]; + let tree = apply_memonly(tree, &batch, grove_version); assert_eq!(tree.key(), &[6]); assert_eq!(tree.child(true).expect("expected child").key(), &[5]); @@ -1312,11 +1395,12 @@ mod test { #[test] fn insert_100_sequential() { + let grove_version = GroveVersion::latest(); let mut tree = TreeNode::new(vec![0], vec![123], None, BasicMerkNode).unwrap(); for i in 0..100 { - let batch = vec![(vec![i + 1], Op::Put(vec![123], BasicMerkNode))]; - tree = apply_memonly(tree, &batch); + let batch = vec![(vec![i + 1], Put(vec![123], BasicMerkNode))]; + tree = apply_memonly(tree, &batch, grove_version); } assert_eq!(tree.key(), &[63]); diff --git a/merk/src/tree/walk/fetch.rs b/merk/src/tree/walk/fetch.rs index e99df5bd..0ba657f2 100644 --- a/merk/src/tree/walk/fetch.rs +++ b/merk/src/tree/walk/fetch.rs @@ -2,6 +2,7 @@ #[cfg(feature = "full")] use grovedb_costs::CostResult; +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] use super::super::{Link, TreeNode}; @@ -20,6 +21,9 @@ pub trait Fetch { fn fetch( &self, link: &Link, - value_defined_cost_fn: Option<&impl Fn(&[u8]) -> Option>, + value_defined_cost_fn: Option< + &impl Fn(&[u8], &GroveVersion) -> Option, + >, + grove_version: &GroveVersion, ) -> CostResult; } diff --git a/merk/src/tree/walk/mod.rs b/merk/src/tree/walk/mod.rs index adf2a07d..a84a1d4c 100644 --- a/merk/src/tree/walk/mod.rs +++ b/merk/src/tree/walk/mod.rs @@ -13,6 +13,7 @@ use grovedb_costs::{ cost_return_on_error_no_add, storage_cost::{removal::StorageRemovedBytes, StorageCost}, }; +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] pub use ref_walker::RefWalker; @@ -53,9 +54,10 @@ where mut self, left: bool, value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, ) -> CostResult<(Self, Option), Error> where - V: Fn(&[u8]) -> Option, + V: Fn(&[u8], &GroveVersion) -> Option, { let mut cost = OperationCost::default(); @@ -77,7 +79,8 @@ where } cost_return_on_error!( &mut cost, - self.source.fetch(&link.unwrap(), value_defined_cost_fn) + self.source + .fetch(&link.unwrap(), value_defined_cost_fn, grove_version) ) }; @@ -92,11 +95,12 @@ where self, left: bool, value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, ) -> CostResult<(Self, Self), Error> where - V: Fn(&[u8]) -> Option, + V: Fn(&[u8], &GroveVersion) -> Option, { - self.detach(left, value_defined_cost_fn) + self.detach(left, value_defined_cost_fn, grove_version) .map_ok(|(walker, maybe_child)| { if let Some(child) = maybe_child { (walker, child) @@ -116,16 +120,19 @@ where left: bool, f: F, value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, ) -> CostResult where F: FnOnce(Option) -> CostResult, Error>, T: Into, - V: Fn(&[u8]) -> Option, + V: Fn(&[u8], &GroveVersion) -> Option, { let mut cost = OperationCost::default(); - let (mut walker, maybe_child) = - cost_return_on_error!(&mut cost, self.detach(left, value_defined_cost_fn)); + let (mut walker, maybe_child) = cost_return_on_error!( + &mut cost, + self.detach(left, value_defined_cost_fn, grove_version) + ); let new_child = match f(maybe_child).unwrap_add_cost(&mut cost) { Ok(x) => x.map(|t| t.into()), Err(e) => return Err(e).wrap_with_cost(cost), @@ -141,16 +148,19 @@ where left: bool, f: F, value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, ) -> CostResult where F: FnOnce(Self) -> CostResult, Error>, T: Into, - V: Fn(&[u8]) -> Option, + V: Fn(&[u8], &GroveVersion) -> Option, { let mut cost = OperationCost::default(); - let (mut walker, child) = - cost_return_on_error!(&mut cost, self.detach_expect(left, value_defined_cost_fn)); + let (mut walker, child) = cost_return_on_error!( + &mut cost, + self.detach_expect(left, value_defined_cost_fn, grove_version) + ); let new_child = match f(child).unwrap_add_cost(&mut cost) { Ok(x) => x.map(|t| t.into()), Err(e) => return Err(e).wrap_with_cost(cost), @@ -370,6 +380,7 @@ where #[cfg(test)] mod test { use grovedb_costs::CostsExt; + use grovedb_version::version::GroveVersion; use super::{super::NoopCommit, *}; use crate::tree::{TreeFeatureType::BasicMerkNode, TreeNode}; @@ -381,7 +392,10 @@ mod test { fn fetch( &self, link: &Link, - _value_defined_cost_fn: Option<&impl Fn(&[u8]) -> Option>, + _value_defined_cost_fn: Option< + &impl Fn(&[u8], &GroveVersion) -> Option, + >, + _grove_version: &GroveVersion, ) -> CostResult { TreeNode::new(link.key().to_vec(), b"foo".to_vec(), None, BasicMerkNode).map(Ok) } @@ -389,6 +403,7 @@ mod test { #[test] fn walk_modified() { + let grove_version = GroveVersion::latest(); let tree = TreeNode::new(b"test".to_vec(), b"abc".to_vec(), None, BasicMerkNode) .unwrap() .attach( @@ -406,7 +421,8 @@ mod test { assert_eq!(child.expect("should have child").tree().key(), b"foo"); Ok(None).wrap_with_cost(Default::default()) }, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("walk failed"); @@ -415,6 +431,7 @@ mod test { #[test] fn walk_stored() { + let grove_version = GroveVersion::latest(); let mut tree = TreeNode::new(b"test".to_vec(), b"abc".to_vec(), None, BasicMerkNode) .unwrap() .attach( @@ -435,7 +452,8 @@ mod test { assert_eq!(child.expect("should have child").tree().key(), b"foo"); Ok(None).wrap_with_cost(Default::default()) }, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("walk failed"); @@ -444,6 +462,7 @@ mod test { #[test] fn walk_pruned() { + let grove_version = GroveVersion::latest(); let tree = TreeNode::from_fields( b"test".to_vec(), b"abc".to_vec(), @@ -469,7 +488,8 @@ mod test { assert_eq!(child.tree().key(), b"foo"); Ok(None).wrap_with_cost(Default::default()) }, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("walk failed"); @@ -478,6 +498,7 @@ mod test { #[test] fn walk_none() { + let grove_version = GroveVersion::latest(); let tree = TreeNode::new(b"test".to_vec(), b"abc".to_vec(), None, BasicMerkNode).unwrap(); let source = MockSource {}; @@ -490,7 +511,8 @@ mod test { assert!(child.is_none()); Ok(None).wrap_with_cost(Default::default()) }, - None::<&fn(&[u8]) -> Option>, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, ) .unwrap() .expect("walk failed"); diff --git a/merk/src/tree/walk/ref_walker.rs b/merk/src/tree/walk/ref_walker.rs index 17f4e6c4..189bc7ee 100644 --- a/merk/src/tree/walk/ref_walker.rs +++ b/merk/src/tree/walk/ref_walker.rs @@ -2,6 +2,7 @@ #[cfg(feature = "full")] use grovedb_costs::{CostResult, CostsExt, OperationCost}; +use grovedb_version::version::GroveVersion; #[cfg(feature = "full")] use super::{ @@ -50,9 +51,10 @@ where &mut self, left: bool, value_defined_cost_fn: Option<&V>, + grove_version: &GroveVersion, ) -> CostResult>, Error> where - V: Fn(&[u8]) -> Option, + V: Fn(&[u8], &GroveVersion) -> Option, { let link = match self.tree.link(left) { None => return Ok(None).wrap_with_cost(Default::default()), @@ -64,7 +66,7 @@ where Link::Reference { .. } => { let load_res = self .tree - .load(left, &self.source, value_defined_cost_fn) + .load(left, &self.source, value_defined_cost_fn, grove_version) .unwrap_add_cost(&mut cost); if let Err(e) = load_res { return Err(e).wrap_with_cost(cost); diff --git a/node-grove/Cargo.toml b/node-grove/Cargo.toml index 1e18a6f3..7656ea7d 100644 --- a/node-grove/Cargo.toml +++ b/node-grove/Cargo.toml @@ -11,6 +11,7 @@ crate-type = ["cdylib"] [dependencies] grovedb = { path = "../grovedb", features = ["full", "estimated_costs"] } +grovedb-version = { path = "../grovedb-version" } [dependencies.neon] version = "0.10.1" diff --git a/node-grove/src/lib.rs b/node-grove/src/lib.rs index d3d6e0a6..e9e4ac0a 100644 --- a/node-grove/src/lib.rs +++ b/node-grove/src/lib.rs @@ -1,31 +1,3 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! GroveDB binding for Node.JS #![deny(missing_docs)] @@ -35,6 +7,7 @@ mod converter; use std::{option::Option::None, path::Path, sync::mpsc, thread}; use grovedb::{GroveDb, Transaction, TransactionArg}; +use grovedb_version::version::GroveVersion; use neon::prelude::*; type DbCallback = Box FnOnce(&'a GroveDb, TransactionArg, &Channel) + Send>; @@ -348,6 +321,7 @@ impl GroveDbWrapper { path.as_slice(), &key, using_transaction.then_some(transaction).flatten(), + GroveVersion::latest(), ) .unwrap(); // Todo: Costs @@ -397,6 +371,7 @@ impl GroveDbWrapper { &key, None, using_transaction.then_some(transaction).flatten(), + GroveVersion::latest(), ) .unwrap(); // Todo: Costs; @@ -446,6 +421,7 @@ impl GroveDbWrapper { element, None, using_transaction.then_some(transaction).flatten(), + GroveVersion::latest(), ) .unwrap(); // Todo: Costs; @@ -488,6 +464,7 @@ impl GroveDbWrapper { &key, element, using_transaction.then_some(transaction).flatten(), + GroveVersion::latest(), ) .unwrap(); // Todo: Costs; @@ -666,6 +643,7 @@ impl GroveDbWrapper { true, true, using_transaction.then_some(transaction).flatten(), + GroveVersion::latest(), ) .unwrap(); // Todo: Costs; @@ -760,7 +738,10 @@ impl GroveDbWrapper { db.send_to_db_thread(move |grove_db: &GroveDb, transaction, channel| { let result = grove_db - .root_hash(using_transaction.then_some(transaction).flatten()) + .root_hash( + using_transaction.then_some(transaction).flatten(), + GroveVersion::latest(), + ) .unwrap(); // Todo: Costs; channel.send(move |mut task_context| { diff --git a/tutorials/src/bin/delete.rs b/tutorials/src/bin/delete.rs index 5ff6beab..063243fd 100644 --- a/tutorials/src/bin/delete.rs +++ b/tutorials/src/bin/delete.rs @@ -20,6 +20,7 @@ fn main() { Element::Item(val1.to_vec(), None), None, None, + grove_version, ) .unwrap() .expect("successful key1 insert"); @@ -31,13 +32,14 @@ fn main() { Element::Item(val2.to_vec(), None), None, None, + grove_version, ) .unwrap() .expect("successful key2 insert"); // Check the key-values are there - let result1 = db.get(root_path, key1, None).unwrap(); - let result2 = db.get(root_path, key2, None).unwrap(); + let result1 = db.get(root_path, key1, None, grove_version).unwrap(); + let result2 = db.get(root_path, key2, None, grove_version).unwrap(); println!("Before deleting, we have key1: {:?}", result1); println!("Before deleting, we have key2: {:?}", result2); @@ -50,8 +52,8 @@ fn main() { .expect("successfully deleted key2"); // Check the key-values again - let result3 = db.get(root_path, key1, None).unwrap(); - let result4 = db.get(root_path, key2, None).unwrap(); + let result3 = db.get(root_path, key1, None, grove_version).unwrap(); + let result4 = db.get(root_path, key2, None, grove_version).unwrap(); println!("After deleting, we have key1: {:?}", result3); println!("After deleting, we have key2: {:?}", result4); } diff --git a/tutorials/src/bin/insert.rs b/tutorials/src/bin/insert.rs index 5b1a4cd1..3d9f9b2a 100644 --- a/tutorials/src/bin/insert.rs +++ b/tutorials/src/bin/insert.rs @@ -20,6 +20,7 @@ fn main() { Element::Item(val1.to_vec(), None), None, None, + grove_version, ) .unwrap() .expect("successful key1 insert"); @@ -31,6 +32,7 @@ fn main() { Element::Item(val2.to_vec(), None), None, None, + grove_version, ) .unwrap() .expect("successful key2 insert"); @@ -42,10 +44,10 @@ fn main() { // function to get them from the RocksDB backing store. // Get value 1 - let result1 = db.get(root_path, key1, None).unwrap(); + let result1 = db.get(root_path, key1, None, grove_version).unwrap(); // Get value 2 - let result2 = db.get(root_path, key2, None).unwrap(); + let result2 = db.get(root_path, key2, None, grove_version).unwrap(); // Print the values to terminal println!("{:?}", result1); diff --git a/tutorials/src/bin/proofs.rs b/tutorials/src/bin/proofs.rs index d56abbda..02596919 100644 --- a/tutorials/src/bin/proofs.rs +++ b/tutorials/src/bin/proofs.rs @@ -33,14 +33,14 @@ fn main() { .expect("expected successful get_path_query"); // Generate proof. - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = db.prove_query(&path_query, None, grove_version).unwrap().unwrap(); // Get hash from query proof and print to terminal along with GroveDB root hash. - let (hash, _result_set) = GroveDb::verify_query(&proof, &path_query).unwrap(); + let (hash, _result_set) = GroveDb::verify_query(&proof, &path_query, grove_version).unwrap(); // See if the query proof hash matches the GroveDB root hash println!("Does the hash generated from the query proof match the GroveDB root hash?"); - if hash == db.root_hash(None).unwrap().unwrap() { + if hash == db.root_hash(None, grove_version).unwrap().unwrap() { println!("Yes"); } else { println!("No"); @@ -52,13 +52,13 @@ fn populate(db: &GroveDb) { // Put an empty subtree into the root tree nodes at KEY1. // Call this SUBTREE1. - db.insert(root_path, KEY1, Element::empty_tree(), INSERT_OPTIONS, None) + db.insert(root_path, KEY1, Element::empty_tree(), INSERT_OPTIONS, None, grove_version) .unwrap() .expect("successful SUBTREE1 insert"); // Put an empty subtree into subtree1 at KEY2. // Call this SUBTREE2. - db.insert(&[KEY1], KEY2, Element::empty_tree(), INSERT_OPTIONS, None) + db.insert(&[KEY1], KEY2, Element::empty_tree(), INSERT_OPTIONS, None, grove_version) .unwrap() .expect("successful SUBTREE2 insert"); @@ -71,6 +71,7 @@ fn populate(db: &GroveDb) { Element::new_item(i_vec.clone()), INSERT_OPTIONS, None, + grove_version, ) .unwrap() .expect("successfully inserted values"); diff --git a/tutorials/src/bin/query-complex.rs b/tutorials/src/bin/query-complex.rs index 131faa92..b4fb78cf 100644 --- a/tutorials/src/bin/query-complex.rs +++ b/tutorials/src/bin/query-complex.rs @@ -78,13 +78,13 @@ fn populate(db: &GroveDb) { let root_path: &[&[u8]] = &[]; // Put an empty subtree into the root tree nodes at KEY1. // Call this SUBTREE1. - db.insert(root_path, KEY1, Element::empty_tree(), INSERT_OPTIONS, None) + db.insert(root_path, KEY1, Element::empty_tree(), INSERT_OPTIONS, None, grove_version) .unwrap() .expect("successful SUBTREE1 insert"); // Put an empty subtree into subtree1 at KEY2. // Call this SUBTREE2. - db.insert(&[KEY1], KEY2, Element::empty_tree(), INSERT_OPTIONS, None) + db.insert(&[KEY1], KEY2, Element::empty_tree(), INSERT_OPTIONS, None, grove_version) .unwrap() .expect("successful SUBTREE2 insert"); @@ -97,6 +97,7 @@ fn populate(db: &GroveDb) { Element::new_item(i_vec.clone()), INSERT_OPTIONS, None, + grove_version, ) .unwrap() .expect("successfully inserted values in SUBTREE2"); @@ -115,6 +116,7 @@ fn populate(db: &GroveDb) { Element::empty_tree(), INSERT_OPTIONS, None, + grove_version, ) .unwrap() .expect("successful SUBTREE3 insert"); @@ -128,6 +130,7 @@ fn populate(db: &GroveDb) { Element::new_item(i_vec.clone()), INSERT_OPTIONS, None, + grove_version, ) .unwrap() .expect("successfully inserted values in SUBTREE3"); @@ -141,6 +144,7 @@ fn populate(db: &GroveDb) { Element::empty_tree(), INSERT_OPTIONS, None, + grove_version, ) .unwrap() .expect("successful SUBTREE4 insert"); @@ -153,6 +157,7 @@ fn populate(db: &GroveDb) { Element::empty_tree(), INSERT_OPTIONS, None, + grove_version, ) .unwrap() .expect("successful SUBTREE5 insert"); @@ -166,6 +171,7 @@ fn populate(db: &GroveDb) { Element::new_item(i_vec.clone()), INSERT_OPTIONS, None, + grove_version, ) .unwrap() .expect("successfully inserted values in SUBTREE5"); diff --git a/tutorials/src/bin/query-simple.rs b/tutorials/src/bin/query-simple.rs index 6bc7a2fd..ab888873 100644 --- a/tutorials/src/bin/query-simple.rs +++ b/tutorials/src/bin/query-simple.rs @@ -48,13 +48,13 @@ fn populate(db: &GroveDb) { let root_path: &[&[u8]] = &[]; // Put an empty subtree into the root tree nodes at KEY1. // Call this SUBTREE1. - db.insert(root_path, KEY1, Element::empty_tree(), INSERT_OPTIONS, None) + db.insert(root_path, KEY1, Element::empty_tree(), INSERT_OPTIONS, None, grove_version) .unwrap() .expect("successful SUBTREE1 insert"); // Put an empty subtree into subtree1 at KEY2. // Call this SUBTREE2. - db.insert(&[KEY1], KEY2, Element::empty_tree(), INSERT_OPTIONS, None) + db.insert(&[KEY1], KEY2, Element::empty_tree(), INSERT_OPTIONS, None, grove_version) .unwrap() .expect("successful SUBTREE2 insert"); @@ -67,6 +67,7 @@ fn populate(db: &GroveDb) { Element::new_item(i_vec.clone()), INSERT_OPTIONS, None, + grove_version, ) .unwrap() .expect("successfully inserted values"); diff --git a/tutorials/src/bin/replication.rs b/tutorials/src/bin/replication.rs index 6b5f0626..5ed6ab5b 100644 --- a/tutorials/src/bin/replication.rs +++ b/tutorials/src/bin/replication.rs @@ -89,15 +89,15 @@ fn main() { let db_destination = create_empty_db(path_destination.clone()); println!("\n######### root_hashes:"); - let root_hash_source = db_source.root_hash(None).unwrap().unwrap(); + let root_hash_source = db_source.root_hash(None, grove_version).unwrap().unwrap(); println!("root_hash_source: {:?}", hex::encode(root_hash_source)); - let root_hash_checkpoint_0 = db_checkpoint_0.root_hash(None).unwrap().unwrap(); + let root_hash_checkpoint_0 = db_checkpoint_0.root_hash(None, grove_version).unwrap().unwrap(); println!("root_hash_checkpoint_0: {:?}", hex::encode(root_hash_checkpoint_0)); - let root_hash_destination = db_destination.root_hash(None).unwrap().unwrap(); + let root_hash_destination = db_destination.root_hash(None, grove_version).unwrap().unwrap(); println!("root_hash_destination: {:?}", hex::encode(root_hash_destination)); println!("\n######### source_subtree_metadata of db_source"); - let subtrees_metadata_source = db_source.get_subtrees_metadata(None).unwrap(); + let subtrees_metadata_source = db_source.get_subtrees_metadata(None, grove_version).unwrap(); println!("{:?}", subtrees_metadata_source); println!("\n######### db_checkpoint_0 -> db_destination state sync"); @@ -107,7 +107,7 @@ fn main() { db_destination.commit_transaction(tx).unwrap().expect("expected to commit transaction"); println!("\n######### verify db_destination"); - let incorrect_hashes = db_destination.verify_grovedb(None).unwrap(); + let incorrect_hashes = db_destination.verify_grovedb(None, grove_version).unwrap(); if incorrect_hashes.len() > 0 { println!("DB verification failed!"); } @@ -116,11 +116,11 @@ fn main() { } println!("\n######### root_hashes:"); - let root_hash_source = db_source.root_hash(None).unwrap().unwrap(); + let root_hash_source = db_source.root_hash(None, grove_version).unwrap().unwrap(); println!("root_hash_source: {:?}", hex::encode(root_hash_source)); - let root_hash_checkpoint_0 = db_checkpoint_0.root_hash(None).unwrap().unwrap(); + let root_hash_checkpoint_0 = db_checkpoint_0.root_hash(None, grove_version).unwrap().unwrap(); println!("root_hash_checkpoint_0: {:?}", hex::encode(root_hash_checkpoint_0)); - let root_hash_destination = db_destination.root_hash(None).unwrap().unwrap(); + let root_hash_destination = db_destination.root_hash(None, grove_version).unwrap().unwrap(); println!("root_hash_destination: {:?}", hex::encode(root_hash_destination)); let query_path = &[MAIN_ΚΕΥ, KEY_INT_0]; @@ -136,7 +136,7 @@ fn main() { fn insert_empty_tree_db(db: &GroveDb, path: &[&[u8]], key: &[u8]) { - db.insert(path, key, Element::empty_tree(), INSERT_OPTIONS, None) + db.insert(path, key, Element::empty_tree(), INSERT_OPTIONS, None, grove_version) .unwrap() .expect("successfully inserted tree"); } @@ -150,6 +150,7 @@ fn insert_range_values_db(db: &GroveDb, path: &[&[u8]], min_i: u32, max_i: u32, Element::new_item(i_vec.to_vec()), INSERT_OPTIONS, Some(&transaction), + grove_version, ) .unwrap() .expect("successfully inserted values"); @@ -172,6 +173,7 @@ fn insert_range_ref_double_values_db(db: &GroveDb, path: &[&[u8]], ref_key: &[u8 ])), INSERT_OPTIONS, Some(&transaction), + grove_version, ) .unwrap() .expect("successfully inserted values"); @@ -180,7 +182,7 @@ fn insert_range_ref_double_values_db(db: &GroveDb, path: &[&[u8]], ref_key: &[u8 fn insert_empty_sum_tree_db(db: &GroveDb, path: &[&[u8]], key: &[u8]) { - db.insert(path, key, Element::empty_sum_tree(), INSERT_OPTIONS, None) + db.insert(path, key, Element::empty_sum_tree(), INSERT_OPTIONS, None, grove_version) .unwrap() .expect("successfully inserted tree"); } @@ -197,6 +199,7 @@ fn insert_sum_element_db(db: &GroveDb, path: &[&[u8]], min_i: u32, max_i: u32, t Element::new_sum_item(value as SumValue), INSERT_OPTIONS, Some(&transaction), + grove_version, ) .unwrap() .expect("successfully inserted values"); @@ -229,11 +232,11 @@ fn query_db(db: &GroveDb, path: &[&[u8]], key: Vec) { println!(">> {:?}", e); } - let proof = db.prove_query(&path_query, None).unwrap().unwrap(); + let proof = db.prove_query(&path_query, None, grove_version).unwrap().unwrap(); // Get hash from query proof and print to terminal along with GroveDB root hash. - let (verify_hash, _) = GroveDb::verify_query(&proof, &path_query).unwrap(); + let (verify_hash, _) = GroveDb::verify_query(&proof, &path_query, grove_version).unwrap(); println!("verify_hash: {:?}", hex::encode(verify_hash)); - if verify_hash == db.root_hash(None).unwrap().unwrap() { + if verify_hash == db.root_hash(None, grove_version).unwrap().unwrap() { println!("Query verified"); } else { println!("Verification FAILED"); }; } @@ -244,8 +247,8 @@ fn sync_db_demo( state_sync_info: MultiStateSyncInfo, target_tx: &Transaction, ) -> Result<(), grovedb::Error> { - let app_hash = source_db.root_hash(None).value.unwrap(); - let mut state_sync_info = target_db.start_snapshot_syncing(state_sync_info, app_hash, target_tx, CURRENT_STATE_SYNC_VERSION)?; + let app_hash = source_db.root_hash(None, grove_version).value.unwrap(); + let mut state_sync_info = target_db.start_snapshot_syncing(state_sync_info, app_hash, target_tx, CURRENT_STATE_SYNC_VERSION, grove_version)?; let mut chunk_queue : VecDeque> = VecDeque::new(); @@ -253,8 +256,8 @@ fn sync_db_demo( chunk_queue.push_back(app_hash.to_vec()); while let Some(chunk_id) = chunk_queue.pop_front() { - let ops = source_db.fetch_chunk(chunk_id.as_slice(), None, CURRENT_STATE_SYNC_VERSION)?; - let (more_chunks, new_state_sync_info) = target_db.apply_chunk(state_sync_info, chunk_id.as_slice(), ops, target_tx, CURRENT_STATE_SYNC_VERSION)?; + let ops = source_db.fetch_chunk(chunk_id.as_slice(), None, CURRENT_STATE_SYNC_VERSION, grove_version)?; + let (more_chunks, new_state_sync_info) = target_db.apply_chunk(state_sync_info, chunk_id.as_slice(), ops, target_tx, CURRENT_STATE_SYNC_VERSION, grove_version)?; state_sync_info = new_state_sync_info; chunk_queue.extend(more_chunks); }