diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index 485941644..ad90d7825 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -249,7 +249,7 @@ impl GroveDb { Some(_) => &self.temp_subtrees, }; - let prefixes: Vec> = subtrees.keys().map(|x| x.clone()).collect(); + let prefixes: Vec> = subtrees.keys().cloned().collect(); // TODO: make StorageOrTransaction which will has the access to either storage // or transaction @@ -299,8 +299,7 @@ impl GroveDb { .expect("`root_leaf_keys` must be in sync with `subtrees`"); leaf_hashes[*root_leaf_idx] = subtree_merk.root_hash(); } - let res = MerkleTree::::from_leaves(&leaf_hashes); - res + MerkleTree::::from_leaves(&leaf_hashes) } pub fn elements_iterator( @@ -341,9 +340,9 @@ impl GroveDb { if path_slice.is_empty() { // Hit the root tree match transaction { - None => self.root_tree = Self::build_root_tree(&subtrees, &root_leaf_keys), + None => self.root_tree = Self::build_root_tree(subtrees, root_leaf_keys), Some(_) => { - self.temp_root_tree = Self::build_root_tree(&subtrees, &root_leaf_keys) + self.temp_root_tree = Self::build_root_tree(subtrees, root_leaf_keys) } }; break; @@ -367,19 +366,19 @@ impl GroveDb { /// A helper method to build a prefix to rocksdb keys or identify a subtree /// in `subtrees` map by tree path; fn compress_subtree_key(path: &[&[u8]], key: Option<&[u8]>) -> Vec { - let segments_iter = path.into_iter().map(|x| *x).chain(key.into_iter()); + let segments_iter = path.iter().copied().chain(key.into_iter()); let mut segments_count = path.len(); if key.is_some() { segments_count += 1; } let mut res = segments_iter.fold(Vec::::new(), |mut acc, p| { - acc.extend(p.into_iter()); + acc.extend(p.iter()); acc }); res.extend(segments_count.to_ne_bytes()); - path.into_iter() - .map(|x| *x) + path.iter() + .copied() .chain(key.into_iter()) .fold(&mut res, |acc, p| { acc.extend(p.len().to_ne_bytes()); @@ -463,7 +462,7 @@ impl GroveDb { /// Returns true if transaction is started. For more details on the /// transaction usage, please check [`GroveDb::start_transaction`] pub fn is_transaction_started(&self) -> bool { - return self.is_readonly; + self.is_readonly } /// Commits previously started db transaction. For more details on the diff --git a/grovedb/src/operations/delete.rs b/grovedb/src/operations/delete.rs index 4cccafcc0..b4c1f97d8 100644 --- a/grovedb/src/operations/delete.rs +++ b/grovedb/src/operations/delete.rs @@ -30,7 +30,7 @@ impl GroveDb { let mut merk = subtrees .get_mut(&Self::compress_subtree_key(path, None)) .ok_or(Error::InvalidPath("no subtree found under that path"))?; - Element::delete(&mut merk, key.clone(), transaction)?; + Element::delete(merk, key.clone(), transaction)?; } if let Element::Tree(_) = element { @@ -73,7 +73,7 @@ impl GroveDb { transaction: Option<&OptimisticTransactionDBTransaction>, ) -> Result>>, Error> { let mut queue: Vec>> = vec![path.clone()]; - let mut result: Vec>> = vec![path.clone()]; + let mut result: Vec>> = vec![path]; while let Some(q) = queue.pop() { // TODO: eventually we need to do something about this nested slices diff --git a/grovedb/src/operations/get.rs b/grovedb/src/operations/get.rs index 661a75d0f..aea322b4d 100644 --- a/grovedb/src/operations/get.rs +++ b/grovedb/src/operations/get.rs @@ -1,18 +1,9 @@ -use std::{ - collections::{HashMap, HashSet}, - ops::Range, -}; +use std::collections::{HashMap, HashSet}; -use merk::{ - proofs::{query::QueryItem, Query}, - Merk, -}; -use storage::{ - rocksdb_storage::{OptimisticTransactionDBTransaction, PrefixedRocksDbStorage}, - RawIterator, -}; +use merk::Merk; +use storage::rocksdb_storage::{OptimisticTransactionDBTransaction, PrefixedRocksDbStorage}; -use crate::{subtree::raw_decode, Element, Error, GroveDb, PathQuery, SizedQuery}; +use crate::{Element, Error, GroveDb, PathQuery}; /// Limit of possible indirections pub(crate) const MAX_REFERENCE_HOPS: usize = 10; @@ -83,7 +74,7 @@ impl GroveDb { let merk = subtrees .get(&Self::compress_subtree_key(path, None)) .ok_or(Error::InvalidPath("no subtree found under that path"))?; - Element::get(&merk, key) + Element::get(merk, key) } pub fn get_path_queries( @@ -102,7 +93,7 @@ impl GroveDb { Err(Error::InvalidQuery("the reference must result in an item")) } } - other => Err(Error::InvalidQuery("path_queries can only refer to references")), + _ => Err(Error::InvalidQuery("path_queries can only refer to references")), } }).collect::>, Error>>()?; Ok(results) diff --git a/grovedb/src/operations/insert.rs b/grovedb/src/operations/insert.rs index ae314b6b8..d58d2c379 100644 --- a/grovedb/src/operations/insert.rs +++ b/grovedb/src/operations/insert.rs @@ -11,7 +11,7 @@ fn create_merk_with_prefix( path: &[&[u8]], key: &[u8], ) -> Result<(Vec, Merk), Error> { - let subtree_prefix = GroveDb::compress_subtree_key(&path, Some(&key)); + let subtree_prefix = GroveDb::compress_subtree_key(path, Some(key)); Ok(( subtree_prefix.clone(), Merk::open(PrefixedRocksDbStorage::new(db, subtree_prefix)?) @@ -94,14 +94,14 @@ impl GroveDb { }; // Open Merk and put handle into `subtrees` dictionary accessible by its // compressed path - let (subtree_prefix, subtree_merk) = create_merk_with_prefix(self.db.clone(), &[], &key)?; + let (subtree_prefix, subtree_merk) = create_merk_with_prefix(self.db.clone(), &[], key)?; subtrees.insert(subtree_prefix.clone(), subtree_merk); // Update root leafs index to persist rs-merkle structure later if root_leaf_keys.get(&subtree_prefix).is_none() { root_leaf_keys.insert(subtree_prefix, root_tree.leaves_len()); } - self.propagate_changes(&[&key], transaction)?; + self.propagate_changes(&[key], transaction)?; Ok(()) } @@ -137,7 +137,7 @@ impl GroveDb { .get_mut(&compressed_path) .expect("merk object must exist in `subtrees`"); // need to mark key as taken in the upper tree - element.insert(&mut merk, key, transaction)?; + element.insert(merk, key, transaction)?; self.propagate_changes(path, transaction)?; Ok(()) } diff --git a/grovedb/src/operations/proof.rs b/grovedb/src/operations/proof.rs index 1b0c00edf..09cb17138 100644 --- a/grovedb/src/operations/proof.rs +++ b/grovedb/src/operations/proof.rs @@ -1,9 +1,9 @@ use std::collections::HashMap; -use merk::proofs::query::{Map, QueryItem}; +use merk::proofs::query::Map; use rs_merkle::{algorithms::Sha256, MerkleProof}; -use crate::{Element, Error, GroveDb, PathQuery, Proof, Query, SizedQuery}; +use crate::{Element, Error, GroveDb, PathQuery, Proof, Query}; impl GroveDb { pub fn proof(&mut self, proof_queries: Vec) -> Result, Error> { @@ -56,11 +56,11 @@ impl GroveDb { // Construct the leaf proofs for proof_query in proof_queries { - let mut path = proof_query.path; + let path = proof_query.path; // If there is a subquery with a limit it's possible that we only need a reduced // proof for this leaf. - let mut reduced_proof_query = proof_query; + let reduced_proof_query = proof_query; // First we must get elements @@ -164,14 +164,14 @@ impl GroveDb { // and store hash + index for later root proof execution let root_key = &path[0]; let (hash, proof_result_map) = GroveDb::execute_path(&path, &proof.proofs)?; - let compressed_root_key_path = GroveDb::compress_subtree_key(&[], Some(&root_key)); + let compressed_root_key_path = GroveDb::compress_subtree_key(&[], Some(root_key)); let compressed_query_path = GroveDb::compress_subtree_key(&path, None); let index = proof .root_leaf_keys .get(&compressed_root_key_path) .ok_or(Error::InvalidPath("Bad path"))?; - if !root_keys_index.contains(&index) { + if !root_keys_index.contains(index) { root_keys_index.push(*index); root_hashes.push(hash); } diff --git a/grovedb/src/subtree.rs b/grovedb/src/subtree.rs index 9ab72f526..f798a7222 100644 --- a/grovedb/src/subtree.rs +++ b/grovedb/src/subtree.rs @@ -1,10 +1,7 @@ //! Module for subtrees handling. //! Subtrees handling is isolated so basically this module is about adapting //! Merk API to GroveDB needs. -use std::{ - collections::HashMap, - ops::{Range, RangeFrom, RangeTo, RangeToInclusive}, -}; +use std::collections::HashMap; use merk::{ proofs::{query::QueryItem, Query}, @@ -57,7 +54,7 @@ impl Element { /// Merk should be loaded by this moment pub fn get(merk: &Merk, key: &[u8]) -> Result { let element = bincode::deserialize( - merk.get(&key) + merk.get(key) .map_err(|e| Error::CorruptedData(e.to_string()))? .ok_or(Error::InvalidPath("key not found in Merk"))? .as_slice(), @@ -71,7 +68,7 @@ impl Element { query: &Query, ) -> Result, Error> { let sized_query = SizedQuery::new(query.clone(), None, None, true); - let (elements, skipped) = Element::get_sized_query(merk, &sized_query)?; + let (elements, _) = Element::get_sized_query(merk, &sized_query)?; Ok(elements) } @@ -92,10 +89,8 @@ impl Element { if limit.is_some() { *limit = Some(limit.unwrap() - 1); } - } else { - if offset.is_some() { - *offset = Some(offset.unwrap() - 1); - } + } else if offset.is_some() { + *offset = Some(offset.unwrap() - 1); } Ok(()) } @@ -140,10 +135,10 @@ impl Element { let (mut sub_elements, skipped) = Element::get_sized_query(inner_merk, &inner_query)?; if let Some(limit) = limit { - *limit = *limit - sub_elements.len() as u16; + *limit -= sub_elements.len() as u16; } if let Some(offset) = offset { - *offset = *offset - skipped; + *offset -= skipped; } results.append(&mut sub_elements); } else { @@ -155,10 +150,8 @@ impl Element { if limit.is_some() { *limit = Some(limit.unwrap() - 1); } - } else { - if offset.is_some() { - *offset = Some(offset.unwrap() - 1); - } + } else if offset.is_some() { + *offset = Some(offset.unwrap() - 1); } } } @@ -169,10 +162,8 @@ impl Element { if limit.is_some() { *limit = Some(limit.unwrap() - 1); } - } else { - if offset.is_some() { - *offset = Some(offset.unwrap() - 1); - } + } else if offset.is_some() { + *offset = Some(offset.unwrap() - 1); } } } @@ -237,7 +228,7 @@ impl Element { &mut results, &mut limit, &mut offset, - ); + )?; } } else { // this is a query on a range @@ -265,7 +256,7 @@ impl Element { &mut results, &mut limit, &mut offset, - ); + )?; if sized_query.left_to_right { iter.next(); } else { diff --git a/grovedb/src/tests.rs b/grovedb/src/tests.rs index 46f5e0ec2..5b58531a7 100644 --- a/grovedb/src/tests.rs +++ b/grovedb/src/tests.rs @@ -347,26 +347,26 @@ fn test_proof_construction() { // Insert level 2 nodes let mut inner_tree = TempMerk::new(); let value_one = Element::Item(b"value1".to_vec()); - value_one.insert(&mut inner_tree, b"key1".to_vec(), None); + value_one.insert(&mut inner_tree, b"key1".to_vec(), None).unwrap(); let value_two = Element::Item(b"value2".to_vec()); - value_two.insert(&mut inner_tree, b"key2".to_vec(), None); + value_two.insert(&mut inner_tree, b"key2".to_vec(), None).unwrap(); let mut inner_tree_2 = TempMerk::new(); let value_three = Element::Item(b"value3".to_vec()); - value_three.insert(&mut inner_tree_2, b"key3".to_vec(), None); + value_three.insert(&mut inner_tree_2, b"key3".to_vec(), None).unwrap(); let mut inner_tree_3 = TempMerk::new(); let value_four = Element::Item(b"value4".to_vec()); - value_four.insert(&mut inner_tree_3, b"key4".to_vec(), None); + value_four.insert(&mut inner_tree_3, b"key4".to_vec(), None).unwrap(); // Insert level 1 nodes let mut test_leaf = TempMerk::new(); let inner_tree_root = Element::Tree(inner_tree.root_hash()); - inner_tree_root.insert(&mut test_leaf, b"innertree".to_vec(), None); + inner_tree_root.insert(&mut test_leaf, b"innertree".to_vec(), None).unwrap(); let mut another_test_leaf = TempMerk::new(); let inner_tree_2_root = Element::Tree(inner_tree_2.root_hash()); - inner_tree_2_root.insert(&mut another_test_leaf, b"innertree2".to_vec(), None); + inner_tree_2_root.insert(&mut another_test_leaf, b"innertree2".to_vec(), None).unwrap(); let inner_tree_3_root = Element::Tree(inner_tree_3.root_hash()); - inner_tree_3_root.insert(&mut another_test_leaf, b"innertree3".to_vec(), None); + inner_tree_3_root.insert(&mut another_test_leaf, b"innertree3".to_vec(), None).unwrap(); // Insert root nodes let leaves = [test_leaf.root_hash(), another_test_leaf.root_hash()]; let root_tree = MerkleTree::::from_leaves(&leaves); @@ -738,7 +738,7 @@ fn test_is_empty_tree() { b"innertree".to_vec(), Element::empty_tree(), None, - ); + ).unwrap(); assert_eq!( db.is_empty_tree(&[TEST_LEAF, b"innertree"], None) @@ -752,7 +752,7 @@ fn test_is_empty_tree() { b"key1".to_vec(), Element::Item(b"hello".to_vec()), None, - ); + ).unwrap(); assert_eq!( db.is_empty_tree(&[TEST_LEAF, b"innertree"], None) .expect("path is valid tree"), @@ -765,7 +765,7 @@ fn transaction_insert_item_with_transaction_should_use_transaction() { let item_key = b"key3".to_vec(); let mut db = make_grovedb(); - db.start_transaction(); + db.start_transaction().unwrap(); let storage = db.storage(); let transaction = storage.transaction(); @@ -795,7 +795,7 @@ fn transaction_insert_item_with_transaction_should_use_transaction() { // Test that commit works // transaction.commit(); - db.commit_transaction(transaction); + db.commit_transaction(transaction).unwrap(); // Check that the change was committed let result = db @@ -811,7 +811,7 @@ fn transaction_insert_tree_with_transaction_should_use_transaction() { let mut db = make_grovedb(); let storage = db.storage(); let db_transaction = storage.transaction(); - db.start_transaction(); + db.start_transaction().unwrap(); // Check that there's no such key in the DB let result = db.get(&[TEST_LEAF], &subtree_key, None); @@ -833,7 +833,7 @@ fn transaction_insert_tree_with_transaction_should_use_transaction() { .expect("Expected to work"); assert_eq!(result_with_transaction, Element::empty_tree()); - db.commit_transaction(db_transaction); + db.commit_transaction(db_transaction).unwrap(); let result = db .get(&[TEST_LEAF], &subtree_key, None) @@ -846,7 +846,7 @@ fn transaction_insert_should_return_error_when_trying_to_insert_while_transactio let item_key = b"key3".to_vec(); let mut db = make_grovedb(); - db.start_transaction(); + db.start_transaction().unwrap(); let storage = db.storage(); let transaction = storage.transaction(); @@ -855,7 +855,7 @@ fn transaction_insert_should_return_error_when_trying_to_insert_while_transactio let result = db.insert(&[TEST_LEAF], item_key.clone(), element1.clone(), None); assert!(matches!(result, Err(Error::DbIsInReadonlyMode))); - db.commit_transaction(transaction); + db.commit_transaction(transaction).unwrap(); // Check that writes are unlocked after the transaction is committed let result = db.insert(&[TEST_LEAF], item_key.clone(), element1.clone(), None); @@ -868,7 +868,7 @@ fn transaction_should_be_aborted_when_rollback_is_called() { let mut db = make_grovedb(); - db.start_transaction(); + db.start_transaction().unwrap(); let storage = db.storage(); let transaction = storage.transaction(); @@ -883,7 +883,7 @@ fn transaction_should_be_aborted_when_rollback_is_called() { assert!(matches!(result, Ok(()))); - db.rollback_transaction(&transaction); + db.rollback_transaction(&transaction).unwrap(); let result = db.get(&[TEST_LEAF], &item_key.clone(), Some(&transaction)); assert!(matches!(result, Err(Error::InvalidPath(_)))); @@ -893,7 +893,7 @@ fn transaction_should_be_aborted_when_rollback_is_called() { fn transaction_is_started_should_return_true_if_transaction_was_started() { let mut db = make_grovedb(); - db.start_transaction(); + db.start_transaction().unwrap(); let result = db.is_transaction_started(); assert!(result, "transaction is not started"); @@ -901,7 +901,7 @@ fn transaction_is_started_should_return_true_if_transaction_was_started() { #[test] fn transaction_is_started_should_return_false_if_transaction_was_not_started() { - let mut db = make_grovedb(); + let db = make_grovedb(); let result = db.is_transaction_started(); @@ -912,21 +912,21 @@ fn transaction_is_started_should_return_false_if_transaction_was_not_started() { fn transaction_should_be_aborted() { let mut db = make_grovedb(); - db.start_transaction(); + db.start_transaction().unwrap(); let storage = db.storage(); let transaction = storage.transaction(); let item_key = b"key3".to_vec(); let element = Element::Item(b"ayy".to_vec()); - let result = db.insert( + db.insert( &[TEST_LEAF], item_key.clone(), element.clone(), Some(&transaction), - ); + ).unwrap(); - db.abort_transaction(transaction); + db.abort_transaction(transaction).unwrap(); // Transaction should be closed assert!(!db.is_transaction_started()); @@ -1238,7 +1238,7 @@ fn test_aux_with_transaction() { let mut db = make_grovedb(); let storage = db.storage(); let db_transaction = storage.transaction(); - db.start_transaction(); + db.start_transaction().unwrap(); // Insert a regular data with aux data in the same transaction db.insert( @@ -1421,7 +1421,7 @@ fn test_get_range_query_with_non_unique_subquery() { Some(sub_query.clone()), ); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); @@ -1449,16 +1449,16 @@ fn test_get_range_query_with_unique_subquery() { let path_query = PathQuery::new_unsized(&path, query.clone(), Some(subquery_key), None); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); assert_eq!(elements.len(), 4); - let mut first_value = (1988 as u32).to_be_bytes().to_vec(); + let first_value = (1988 as u32).to_be_bytes().to_vec(); assert_eq!(elements[0], first_value); - let mut last_value = (1991 as u32).to_be_bytes().to_vec(); + let last_value = (1991 as u32).to_be_bytes().to_vec(); assert_eq!(elements[elements.len() - 1], last_value); } @@ -1475,16 +1475,16 @@ fn test_get_range_query_with_unique_subquery_on_references() { let path_query = PathQuery::new_unsized(&path, query.clone(), Some(subquery_key), None); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); assert_eq!(elements.len(), 4); - let mut first_value = (1988 as u32).to_be_bytes().to_vec(); + let first_value = (1988 as u32).to_be_bytes().to_vec(); assert_eq!(elements[0], first_value); - let mut last_value = (1991 as u32).to_be_bytes().to_vec(); + let last_value = (1991 as u32).to_be_bytes().to_vec(); assert_eq!(elements[elements.len() - 1], last_value); } @@ -1510,7 +1510,7 @@ fn test_get_range_inclusive_query_with_non_unique_subquery() { Some(sub_query.clone()), ); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); @@ -1547,7 +1547,7 @@ fn test_get_range_inclusive_query_with_non_unique_subquery_on_references() { Some(sub_query.clone()), ); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); @@ -1577,16 +1577,16 @@ fn test_get_range_inclusive_query_with_unique_subquery() { let path_query = PathQuery::new_unsized(&path, query.clone(), Some(subquery_key), None); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); assert_eq!(elements.len(), 8); - let mut first_value = (1988 as u32).to_be_bytes().to_vec(); + let first_value = (1988 as u32).to_be_bytes().to_vec(); assert_eq!(elements[0], first_value); - let mut last_value = (1995 as u32).to_be_bytes().to_vec(); + let last_value = (1995 as u32).to_be_bytes().to_vec(); assert_eq!(elements[elements.len() - 1], last_value); } @@ -1610,7 +1610,7 @@ fn test_get_range_from_query_with_non_unique_subquery() { Some(sub_query.clone()), ); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); @@ -1638,16 +1638,16 @@ fn test_get_range_from_query_with_unique_subquery() { let path_query = PathQuery::new_unsized(&path, query.clone(), Some(subquery_key), None); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); assert_eq!(elements.len(), 5); - let mut first_value = (1995 as u32).to_be_bytes().to_vec(); + let first_value = (1995 as u32).to_be_bytes().to_vec(); assert_eq!(elements[0], first_value); - let mut last_value = (1999 as u32).to_be_bytes().to_vec(); + let last_value = (1999 as u32).to_be_bytes().to_vec(); assert_eq!(elements[elements.len() - 1], last_value); } @@ -1671,7 +1671,7 @@ fn test_get_range_to_query_with_non_unique_subquery() { Some(sub_query.clone()), ); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); @@ -1699,16 +1699,16 @@ fn test_get_range_to_query_with_unique_subquery() { let path_query = PathQuery::new_unsized(&path, query.clone(), Some(subquery_key), None); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); assert_eq!(elements.len(), 10); - let mut first_value = (1985 as u32).to_be_bytes().to_vec(); + let first_value = (1985 as u32).to_be_bytes().to_vec(); assert_eq!(elements[0], first_value); - let mut last_value = (1994 as u32).to_be_bytes().to_vec(); + let last_value = (1994 as u32).to_be_bytes().to_vec(); assert_eq!(elements[elements.len() - 1], last_value); } @@ -1732,7 +1732,7 @@ fn test_get_range_to_inclusive_query_with_non_unique_subquery() { Some(sub_query.clone()), ); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); @@ -1760,16 +1760,16 @@ fn test_get_range_to_inclusive_query_with_unique_subquery() { let path_query = PathQuery::new_unsized(&path, query.clone(), Some(subquery_key), None); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); assert_eq!(elements.len(), 11); - let mut first_value = (1985 as u32).to_be_bytes().to_vec(); + let first_value = (1985 as u32).to_be_bytes().to_vec(); assert_eq!(elements[0], first_value); - let mut last_value = (1995 as u32).to_be_bytes().to_vec(); + let last_value = (1995 as u32).to_be_bytes().to_vec(); assert_eq!(elements[elements.len() - 1], last_value); } @@ -1793,7 +1793,7 @@ fn test_get_range_after_query_with_non_unique_subquery() { Some(sub_query.clone()), ); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); @@ -1830,7 +1830,7 @@ fn test_get_range_after_to_query_with_non_unique_subquery() { Some(sub_query.clone()), ); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); @@ -1867,7 +1867,7 @@ fn test_get_range_after_to_inclusive_query_with_non_unique_subquery() { Some(sub_query.clone()), ); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); @@ -1903,7 +1903,7 @@ fn test_get_range_query_with_limit_and_offset() { Some(sub_query.clone()), ); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); @@ -1925,7 +1925,7 @@ fn test_get_range_query_with_limit_and_offset() { Some(sub_query.clone()), ); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); @@ -1947,7 +1947,7 @@ fn test_get_range_query_with_limit_and_offset() { Some(sub_query.clone()), ); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); @@ -1970,7 +1970,7 @@ fn test_get_range_query_with_limit_and_offset() { Some(sub_query.clone()), ); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); @@ -1998,7 +1998,7 @@ fn test_get_range_query_with_limit_and_offset() { Some(sub_query.clone()), ); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); @@ -2022,7 +2022,7 @@ fn test_get_range_query_with_limit_and_offset() { Some(sub_query.clone()), ); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); @@ -2036,7 +2036,7 @@ fn test_get_range_query_with_limit_and_offset() { Some(sub_query.clone()), ); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); @@ -2056,16 +2056,16 @@ fn test_get_range_query_with_limit_and_offset() { None, ); - let (elements, skipped) = db + let (elements, _) = db .get_path_query(&path_query, None) .expect("expected successful get_path_query"); assert_eq!(elements.len(), 5); - let mut first_value = (1992 as u32).to_be_bytes().to_vec(); + let first_value = (1992 as u32).to_be_bytes().to_vec(); assert_eq!(elements[0], first_value); - let mut last_value = (1996 as u32).to_be_bytes().to_vec(); + let last_value = (1996 as u32).to_be_bytes().to_vec(); assert_eq!(elements[elements.len() - 1], last_value); } @@ -2081,13 +2081,12 @@ fn test_root_hash() { None, ) .expect("unable to insert an item"); - let new_root_hash = db.root_hash(None); - assert_ne!(old_root_hash, db.root_hash(None)); + assert_ne!(old_root_hash.unwrap(), db.root_hash(None).unwrap()); // Check isolation let storage = db.storage(); let transaction = storage.transaction(); - db.start_transaction(); + db.start_transaction().unwrap(); db.insert( &[TEST_LEAF], @@ -2097,9 +2096,9 @@ fn test_root_hash() { ) .expect("unable to insert an item"); let root_hash_outside = db.root_hash(None); - assert_ne!(db.root_hash(Some(&transaction)), root_hash_outside); + assert_ne!(db.root_hash(Some(&transaction)).unwrap(), root_hash_outside.unwrap()); - assert_eq!(db.root_hash(None), root_hash_outside); - db.commit_transaction(transaction); - assert_ne!(db.root_hash(None), root_hash_outside); + assert_eq!(db.root_hash(None).unwrap(), root_hash_outside.unwrap()); + db.commit_transaction(transaction).unwrap(); + assert_ne!(db.root_hash(None).unwrap(), root_hash_outside.unwrap()); } diff --git a/merk/src/merk/mod.rs b/merk/src/merk/mod.rs index d36bfc493..985c67943 100644 --- a/merk/src/merk/mod.rs +++ b/merk/src/merk/mod.rs @@ -433,7 +433,6 @@ impl Commit for MerkCommitter { #[cfg(test)] mod test { - use rocksdb::{DBRawIteratorWithThreadMode, OptimisticTransactionDB}; use storage::{ rocksdb_storage::{ default_rocksdb, PrefixedRocksDbStorage, RawPrefixedTransactionalIterator, @@ -445,8 +444,6 @@ mod test { use super::{Merk, MerkSource, RefWalker}; use crate::{test_utils::*, Op}; - type OptimisticTransactionDBRawIterator<'a> = - DBRawIteratorWithThreadMode<'a, OptimisticTransactionDB>; // TODO: Close and then reopen test fn assert_invariants(merk: &TempMerk) { diff --git a/merk/src/proofs/query/mod.rs b/merk/src/proofs/query/mod.rs index cd5ee7a68..d77aed129 100644 --- a/merk/src/proofs/query/mod.rs +++ b/merk/src/proofs/query/mod.rs @@ -13,10 +13,7 @@ use storage::{rocksdb_storage::RawPrefixedTransactionalIterator, RawIterator}; use {super::Op, std::collections::LinkedList}; use super::{tree::execute, Decoder, Node}; -use crate::{ - proofs::{query::QueryItem::RangeAfter, Op::Parent}, - tree::{Fetch, Hash, Link, RefWalker}, -}; +use crate::tree::{Fetch, Hash, Link, RefWalker}; /// `Query` represents one or more keys or ranges of keys, which can be used to /// resolve a proof which will include all of the requested values. @@ -324,25 +321,22 @@ impl QueryItem { // Lower is bounded if upper_unbounded { - return QueryItem::RangeFrom(RangeFrom { + QueryItem::RangeFrom(RangeFrom { start: start.to_vec(), - }); + }) } else if end_inclusive { - return QueryItem::RangeInclusive(RangeInclusive::new(start.to_vec(), end.to_vec())); + QueryItem::RangeInclusive(RangeInclusive::new(start.to_vec(), end.to_vec())) } else { // upper is bounded and not inclusive - return QueryItem::Range(Range { + QueryItem::Range(Range { start: start.to_vec(), end: end.to_vec(), - }); + }) } } pub fn is_range(&self) -> bool { - match self { - QueryItem::Key(_) => false, - _ => true, - } + !matches!(self, QueryItem::Key(_)) } pub fn seek_for_iter(&self, iter: &mut RawPrefixedTransactionalIterator, left_to_right: bool) { @@ -437,7 +431,7 @@ impl QueryItem { && (!left_to_right || iter.key() != Some(end)); // if we are going backwards, we need to make sure we are going to stop after // the first element - let next_valid = !(!left_to_right && iter.key() == Some(start)); + let next_valid = left_to_right || iter.key() != Some(start); (valid, next_valid) } QueryItem::RangeInclusive(range_inclusive) => { @@ -460,7 +454,7 @@ impl QueryItem { && iter.valid() && iter.key().is_some() && work; - let next_valid = !(!left_to_right && iter.key() == Some(start)); + let next_valid = left_to_right || iter.key() != Some(start); (valid, next_valid) } QueryItem::RangeTo(RangeTo { end }) => { @@ -479,14 +473,14 @@ impl QueryItem { let valid = (limit == None || limit.unwrap() > 0) && iter.valid() && iter.key().is_some() - && !(!left_to_right && iter.key() == Some(start)); + && (left_to_right || iter.key() != Some(start)); (valid, true) } QueryItem::RangeAfterTo(Range { start, end }) => { let valid = (limit == None || limit.unwrap() > 0) && iter.valid() && iter.key().is_some() - && !(!left_to_right && iter.key() == Some(start)) + && (left_to_right || iter.key() != Some(start)) && !(left_to_right && iter.key() == Some(end)); (valid, true) } @@ -495,7 +489,7 @@ impl QueryItem { && iter.valid() && iter.key().is_some() && work; - let next_valid = !(!left_to_right && iter.key() == Some(range_inclusive.start())) + let next_valid = (left_to_right || iter.key() != Some(range_inclusive.start())) && !(left_to_right && iter.key() == Some(range_inclusive.end())); (valid, next_valid) } @@ -670,7 +664,7 @@ where Err(index) => (&query[..index], &query[index..]), }; - if left_to_right { + // if left_to_right { let (mut proof, left_absence, new_limit, new_offset) = self.create_child_proof(true, left_items, limit, offset, left_to_right)?; let (mut right_proof, right_absence, new_limit, new_offset) = @@ -704,41 +698,41 @@ where new_limit, new_offset, )) - } else { - let (mut proof, left_absence, new_limit, new_offset) = - self.create_child_proof(true, left_items, limit, offset, left_to_right)?; - let (mut right_proof, right_absence, new_limit, new_offset) = - self.create_child_proof(false, right_items, new_limit, new_offset, left_to_right)?; - - let (has_left, has_right) = (!proof.is_empty(), !right_proof.is_empty()); - - proof.push_back(match search { - Ok(_) => Op::Push(self.to_kv_node()), - Err(_) => { - if left_absence.1 || right_absence.0 { - Op::Push(self.to_kv_node()) - } else { - Op::Push(self.to_kvhash_node()) - } - } - }); - - if has_left { - proof.push_back(Op::Parent); - } - - if has_right { - proof.append(&mut right_proof); - proof.push_back(Op::Child); - } - - Ok(( - proof, - (left_absence.0, right_absence.1), - new_limit, - new_offset, - )) - } + // } else { + // let (mut proof, left_absence, new_limit, new_offset) = + // self.create_child_proof(true, left_items, limit, offset, left_to_right)?; + // let (mut right_proof, right_absence, new_limit, new_offset) = + // self.create_child_proof(false, right_items, new_limit, new_offset, left_to_right)?; + // + // let (has_left, has_right) = (!proof.is_empty(), !right_proof.is_empty()); + // + // proof.push_back(match search { + // Ok(_) => Op::Push(self.to_kv_node()), + // Err(_) => { + // if left_absence.1 || right_absence.0 { + // Op::Push(self.to_kv_node()) + // } else { + // Op::Push(self.to_kvhash_node()) + // } + // } + // }); + // + // if has_left { + // proof.push_back(Op::Parent); + // } + // + // if has_right { + // proof.append(&mut right_proof); + // proof.push_back(Op::Child); + // } + // + // Ok(( + // proof, + // (left_absence.0, right_absence.1), + // new_limit, + // new_offset, + // )) + // } } /// Similar to `create_proof`. Recurses into the child on the given side and diff --git a/node-grove/src/converter.rs b/node-grove/src/converter.rs index 8c6f61077..60eba685d 100644 --- a/node-grove/src/converter.rs +++ b/node-grove/src/converter.rs @@ -55,7 +55,7 @@ pub fn element_to_js_object<'a, C: Context<'a>>( let js_value: Handle = match element { Element::Item(item) => { - let js_buffer = JsBuffer::external(cx, item.clone()); + let js_buffer = JsBuffer::external(cx, item); js_buffer.upcast() } Element::Reference(reference) => { @@ -70,7 +70,7 @@ pub fn element_to_js_object<'a, C: Context<'a>>( js_array.upcast() } Element::Tree(tree) => { - let js_buffer = JsBuffer::external(cx, tree.clone()); + let js_buffer = JsBuffer::external(cx, tree); js_buffer.upcast() } }; diff --git a/storage/src/rocksdb_storage/mod.rs b/storage/src/rocksdb_storage/mod.rs index 5816b0229..98a2eb633 100644 --- a/storage/src/rocksdb_storage/mod.rs +++ b/storage/src/rocksdb_storage/mod.rs @@ -56,10 +56,9 @@ pub fn default_rocksdb(path: &Path) -> Rc { ) } -fn make_prefixed_key(prefix: Vec, key: &[u8]) -> Vec { - let mut prefixed_key = prefix.clone(); - prefixed_key.extend_from_slice(key); - prefixed_key +fn make_prefixed_key(mut prefix: Vec, key: &[u8]) -> Vec { + prefix.extend_from_slice(key); + prefix } pub struct RawPrefixedTransactionalIterator<'a> { @@ -191,7 +190,7 @@ mod tests { use tempdir::TempDir; use super::*; - use crate::{Batch, Storage, Transaction}; + use crate::{Batch, Storage}; struct TempPrefixedStorage { storage: PrefixedRocksDbStorage,