Skip to content

Commit

Permalink
Add entry API prop tests
Browse files Browse the repository at this point in the history
  • Loading branch information
Avi-D-coder committed Mar 25, 2024
1 parent d8b4510 commit eebc3cb
Show file tree
Hide file tree
Showing 5 changed files with 107 additions and 6 deletions.
7 changes: 7 additions & 0 deletions tests/build_store_entry_ops.proptest-regressions

Large diffs are not rendered by default.

20 changes: 19 additions & 1 deletion tests/build_store_entry_ops.rs
Original file line number Diff line number Diff line change
@@ -1,24 +1,34 @@
mod utils;
use std::collections::HashMap;

use proptest::prelude::*;

use kairos_trie::{
stored::{memory_db::MemoryDb, merkle::SnapshotBuilder},
Transaction, TrieRoot,
};
use utils::operations::*;

pub fn end_to_end_entry_ops(batches: &[&[Operation]]) {
fn end_to_end_entry_ops(batches: Vec<Vec<Operation>>) {
// The persistent backing, likely rocksdb
let db = &MemoryDb::<[u8; 8]>::empty();

// An empty trie root
let mut prior_root_hash = TrieRoot::default();

// used as a reference for trie behavior
let mut hash_map = HashMap::new();

for batch in batches.iter() {
eprintln!("Batch size: {}", batch.len());
// We build a snapshot on the server.
let (new_root_hash, snapshot) =
run_against_snapshot_builder(batch, prior_root_hash, db, &mut hash_map);

// We verify the snapshot in a zkVM
run_against_snapshot(batch, snapshot, new_root_hash, prior_root_hash);

// After a batch is verified in an on chain zkVM the contract would update's its root hash
prior_root_hash = new_root_hash;
}

Expand All @@ -34,3 +44,11 @@ pub fn end_to_end_entry_ops(batches: &[&[Operation]]) {
assert_eq!(v, ret_v);
}
}

proptest! {
#[test]
fn prop_end_to_end_entry_ops(
batches in arb_batches(1..5000usize, 1..100_000usize, 1000, 10_000)) {
end_to_end_entry_ops(batches);
}
}
2 changes: 1 addition & 1 deletion tests/build_store_modify.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use utils::{insert_get::*, *};

prop_compose! {
fn arb_hashmap()(
map in prop::collection::hash_map(arb_key_hash(), 0u64.., 0..1_000)
map in prop::collection::hash_map(arb_key_hash(), 0u64.., 0..500)
) -> HashMap<KeyHash, u64> {
map
}
Expand Down
2 changes: 2 additions & 0 deletions tests/utils/insert_get.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#![allow(unused)]

use std::collections::HashMap;

use kairos_trie::{
Expand Down
82 changes: 78 additions & 4 deletions tests/utils/operations.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
#![allow(unused)]

use std::collections::{hash_map, HashMap};

use proptest::{prelude::*, sample::SizeRange};

use kairos_trie::{
stored::{
memory_db::MemoryDb,
Expand All @@ -9,16 +13,83 @@ use kairos_trie::{
KeyHash, NodeHash, Transaction, TrieRoot,
};

use super::arb_key_hash;

pub type Value = [u8; 8];

#[derive(Debug, Clone, Copy)]
pub enum Operation {
Get(KeyHash),
Insert(KeyHash, Value),
EntryGet(KeyHash),
EntryInsert(KeyHash, Value),
EntryAndModify(KeyHash, Value),
EntryAndModifyOrInsert(KeyHash, Value),
EntryOrInsert(KeyHash, Value),
}

prop_compose! {
pub fn arb_value()(data in any::<[u8; 8]>()) -> Value {
data
}
}

prop_compose! {
pub fn arb_operations(key_count: impl Into<SizeRange>, op_count: impl Into<SizeRange>)
(keys in prop::collection::vec(arb_key_hash(), key_count),
ops in prop::collection::vec(
(0..5u8,
any::<prop::sample::Index>(),
arb_value()
),
op_count
)
) -> Vec<Operation> {
ops.into_iter().map(|(op, idx, value)| {
let key = keys[idx.index(keys.len())];
match op {
0 => Operation::Get(key),
1 => Operation::Insert(key, value),
2 => Operation::EntryGet(key),
3 => Operation::EntryInsert(key, value),
4 => Operation::EntryAndModifyOrInsert(key, value),
5 => Operation::EntryOrInsert(key, value),
_ => unreachable!(),
}}).collect()
}
}

prop_compose! {
pub fn arb_batches(key_count: impl Into<SizeRange>, op_count: impl Into<SizeRange>, max_batch_count: usize, max_batch_size: usize)
(
ops in arb_operations(key_count, op_count),
windows in prop::collection::vec(0..max_batch_size, max_batch_count - 1)
) -> Vec<Vec<Operation>> {
arb_batches_inner(ops, windows)
}
}

fn arb_batches_inner(ops: Vec<Operation>, windows: Vec<usize>) -> Vec<Vec<Operation>> {
let mut batches = Vec::new();
let mut start = 0;

// Partition the operations into batches
for window_size in windows {
if start + window_size > ops.len() {
break;
}

batches.push(ops[start..start + window_size].to_vec());

start += window_size;
}

if start < ops.len() {
batches.push(ops[start..].to_vec());
}

batches
}

// Code like this runs in the server.
pub fn run_against_snapshot_builder(
batch: &[Operation],
Expand Down Expand Up @@ -87,9 +158,12 @@ fn trie_op<S: Store<Value>>(
let new = v.insert(*value);
(None, Some(*new))
}
kairos_trie::Entry::VacantEmptyTrie(_) => (None, None),
kairos_trie::Entry::VacantEmptyTrie(v) => {
let new = v.insert(*value);
(None, Some(*new))
}
},
Operation::EntryAndModify(key, value) => {
Operation::EntryAndModifyOrInsert(key, value) => {
let entry = txn.entry(key).unwrap();
let mut old = None;
let new = entry
Expand Down Expand Up @@ -141,7 +215,7 @@ fn hashmap_op(op: &Operation, map: &mut HashMap<KeyHash, Value>) -> (Option<Valu
(None, Some(*new))
}
},
Operation::EntryAndModify(key, value) => {
Operation::EntryAndModifyOrInsert(key, value) => {
let entry = map.entry(*key);
let mut old = None;
let new = entry
Expand Down

0 comments on commit eebc3cb

Please sign in to comment.