From 1be338ea33c21af8dd7c32695f59f152dd3dff08 Mon Sep 17 00:00:00 2001 From: Alessandro Decina Date: Mon, 18 Nov 2024 11:14:20 +0000 Subject: [PATCH] WIP: faster status cache --- Cargo.lock | 2 + Cargo.toml | 2 +- core/Cargo.toml | 1 + core/tests/snapshots.rs | 4 - programs/sbf/Cargo.lock | 2 + runtime/Cargo.toml | 1 + runtime/benches/status_cache.rs | 6 +- runtime/src/bank.rs | 29 +- runtime/src/bank/check_transactions.rs | 3 +- runtime/src/bank_forks.rs | 3 +- runtime/src/fixed_concurrent_map.rs | 345 ++++++++++++++++++++++ runtime/src/lib.rs | 1 + runtime/src/snapshot_bank_utils.rs | 8 +- runtime/src/status_cache.rs | 387 +++++++++++++++---------- sdk/frozen-abi/Cargo.toml | 1 + sdk/frozen-abi/src/abi_example.rs | 16 + svm/examples/Cargo.lock | 2 + 17 files changed, 631 insertions(+), 182 deletions(-) create mode 100644 runtime/src/fixed_concurrent_map.rs diff --git a/Cargo.lock b/Cargo.lock index b5dfc4f3227fe0..9f265b0958316a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1863,6 +1863,7 @@ dependencies = [ "once_cell", "parking_lot_core 0.9.8", "rayon", + "serde", ] [[package]] @@ -8357,6 +8358,7 @@ dependencies = [ "serde_derive", "serde_json", "serde_with", + "smallvec", "solana-accounts-db", "solana-address-lookup-table-program", "solana-bpf-loader-program", diff --git a/Cargo.toml b/Cargo.toml index b402283e560a5e..5d99e54cf67f4c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -284,7 +284,7 @@ crossbeam-channel = "0.5.13" csv = "1.3.1" ctrlc = "3.4.5" curve25519-dalek = { version = "4.1.3", features = ["digest", "rand_core"] } -dashmap = "5.5.3" +dashmap = { version = "5.5.3", features = ["serde"] } derivation-path = { version = "0.2.0", default-features = false } derive-where = "1.2.7" dialoguer = "0.10.4" diff --git a/core/Cargo.toml b/core/Cargo.toml index 3b8f37a0db3298..4bee9da17334d9 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -110,6 +110,7 @@ solana-ledger = { workspace = true, features = ["dev-context-only-utils"] } solana-logger = { workspace = true } solana-poh = { workspace = true, features = ["dev-context-only-utils"] } solana-program-runtime = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } solana-stake-program = { workspace = true } solana-unified-scheduler-pool = { workspace = true, features = [ diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 11855f60acf819..e2fb7fd2bb0c64 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -363,11 +363,7 @@ fn test_slots_to_snapshot(snapshot_version: SnapshotVersion, cluster_type: Clust .unwrap() .root_bank() .status_cache - .read() - .unwrap() .roots() - .iter() - .cloned() .sorted(); assert!(slots_to_snapshot.into_iter().eq(expected_slots_to_snapshot)); } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 75b914a69db651..545180597701fb 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1321,6 +1321,7 @@ dependencies = [ "once_cell", "parking_lot_core 0.9.8", "rayon", + "serde", ] [[package]] @@ -6599,6 +6600,7 @@ dependencies = [ "serde_derive", "serde_json", "serde_with", + "smallvec", "solana-accounts-db", "solana-address-lookup-table-program", "solana-bpf-loader-program", diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index b7eefcea5a76b2..1db8a8ffc1c738 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -48,6 +48,7 @@ serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } serde_json = { workspace = true } serde_with = { workspace = true } +smallvec = { workspace = true } solana-accounts-db = { workspace = true } solana-address-lookup-table-program = { workspace = true } solana-bpf-loader-program = { workspace = true } diff --git a/runtime/benches/status_cache.rs b/runtime/benches/status_cache.rs index 8f25842f1febd3..8780751a7adf7a 100644 --- a/runtime/benches/status_cache.rs +++ b/runtime/benches/status_cache.rs @@ -15,7 +15,7 @@ use { #[bench] fn bench_status_cache_serialize(bencher: &mut Bencher) { - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); status_cache.add_root(0); status_cache.clear(); for hash_index in 0..100 { @@ -30,7 +30,7 @@ fn bench_status_cache_serialize(bencher: &mut Bencher) { status_cache.insert(&blockhash, sig, 0, Ok(())); } } - assert!(status_cache.roots().contains(&0)); + assert!(status_cache.roots().collect::>().contains(&0)); bencher.iter(|| { let _ = serialize(&status_cache.root_slot_deltas()).unwrap(); }); @@ -38,7 +38,7 @@ fn bench_status_cache_serialize(bencher: &mut Bencher) { #[bench] fn bench_status_cache_root_slot_deltas(bencher: &mut Bencher) { - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); // fill the status cache let slots: Vec<_> = (42..).take(MAX_CACHE_ENTRIES).collect(); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index f03a983e903196..bfabf123b7e22f 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -256,7 +256,7 @@ struct RentMetrics { pub type BankStatusCache = StatusCache>; #[cfg_attr( feature = "frozen-abi", - frozen_abi(digest = "BHg4qpwegtaJypLUqAdjQYzYeLfEGf6tA4U5cREbHMHi") + frozen_abi(digest = "CQE8Pab7YMwvUj6rjD95kqt9fgBE4mkG7GRUko2DyveD") )] pub type BankSlotDelta = SlotDelta>; @@ -751,7 +751,7 @@ pub struct Bank { pub rc: BankRc, /// A cache of signature statuses - pub status_cache: Arc>, + pub status_cache: Arc, /// FIFO queue of `recent_blockhash` items blockhash_queue: RwLock, @@ -1016,7 +1016,7 @@ impl Bank { let mut bank = Self { skipped_rewrites: Mutex::default(), rc: BankRc::new(accounts), - status_cache: Arc::>::default(), + status_cache: Arc::::default(), blockhash_queue: RwLock::::default(), ancestors: Ancestors::default(), hash: RwLock::::default(), @@ -1682,7 +1682,7 @@ impl Bank { let mut bank = Self { skipped_rewrites: Mutex::default(), rc: bank_rc, - status_cache: Arc::>::default(), + status_cache: Arc::::default(), blockhash_queue: RwLock::new(fields.blockhash_queue), ancestors, hash: RwLock::new(fields.hash), @@ -1958,7 +1958,7 @@ impl Bank { } pub fn status_cache_ancestors(&self) -> Vec { - let mut roots = self.status_cache.read().unwrap().roots().clone(); + let mut roots = self.status_cache.roots().collect::>(); let min = roots.iter().min().cloned().unwrap_or(0); for ancestor in self.ancestors.keys() { if ancestor >= min { @@ -3103,7 +3103,7 @@ impl Bank { let mut squash_cache_time = Measure::start("squash_cache_time"); roots .iter() - .for_each(|slot| self.status_cache.write().unwrap().add_root(*slot)); + .for_each(|slot| self.status_cache.add_root(*slot)); squash_cache_time.stop(); SquashTiming { @@ -3366,12 +3366,13 @@ impl Bank { } /// Forget all signatures. Useful for benchmarking. + #[cfg(feature = "dev-context-only-utils")] pub fn clear_signatures(&self) { - self.status_cache.write().unwrap().clear(); + self.status_cache.clear(); } pub fn clear_slot_signatures(&self, slot: Slot) { - self.status_cache.write().unwrap().clear_slot_entries(slot); + self.status_cache.clear_slot_entries(slot); } fn update_transaction_statuses( @@ -3379,13 +3380,12 @@ impl Bank { sanitized_txs: &[impl TransactionWithMeta], processing_results: &[TransactionProcessingResult], ) { - let mut status_cache = self.status_cache.write().unwrap(); assert_eq!(sanitized_txs.len(), processing_results.len()); for (tx, processing_result) in sanitized_txs.iter().zip(processing_results) { if let Ok(processed_tx) = &processing_result { // Add the message hash to the status cache to ensure that this message // won't be processed again with a different signature. - status_cache.insert( + self.status_cache.insert( tx.recent_blockhash(), tx.message_hash(), self.slot(), @@ -3394,7 +3394,7 @@ impl Bank { // Add the transaction signature to the status cache so that transaction status // can be queried by transaction signature over RPC. In the future, this should // only be added for API nodes because voting validators don't need to do this. - status_cache.insert( + self.status_cache.insert( tx.recent_blockhash(), tx.signature(), self.slot(), @@ -5526,15 +5526,14 @@ impl Bank { signature: &Signature, blockhash: &Hash, ) -> Option> { - let rcache = self.status_cache.read().unwrap(); - rcache + self.status_cache .get_status(signature, blockhash, &self.ancestors) .map(|v| v.1) } pub fn get_signature_status_slot(&self, signature: &Signature) -> Option<(Slot, Result<()>)> { - let rcache = self.status_cache.read().unwrap(); - rcache.get_status_any_blockhash(signature, &self.ancestors) + self.status_cache + .get_status_any_blockhash(signature, &self.ancestors) } pub fn get_signature_status(&self, signature: &Signature) -> Option> { diff --git a/runtime/src/bank/check_transactions.rs b/runtime/src/bank/check_transactions.rs index 6d966e32ba8931..a2bedcdd6d7041 100644 --- a/runtime/src/bank/check_transactions.rs +++ b/runtime/src/bank/check_transactions.rs @@ -191,13 +191,12 @@ impl Bank { ) -> Vec { // Do allocation before acquiring the lock on the status cache. let mut check_results = Vec::with_capacity(sanitized_txs.len()); - let rcache = self.status_cache.read().unwrap(); check_results.extend(sanitized_txs.iter().zip(lock_results).map( |(sanitized_tx, lock_result)| { let sanitized_tx = sanitized_tx.borrow(); if lock_result.is_ok() - && self.is_transaction_already_processed(sanitized_tx, &rcache) + && self.is_transaction_already_processed(sanitized_tx, &self.status_cache) { error_counters.already_processed += 1; return Err(TransactionError::AlreadyProcessed); diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index 83c2e0ab3fd675..7237a897ca8ee6 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -452,8 +452,7 @@ impl BankForks { if bank.is_startup_verification_complete() { // Save off the status cache because these may get pruned if another // `set_root()` is called before the snapshots package can be generated - let status_cache_slot_deltas = - bank.status_cache.read().unwrap().root_slot_deltas(); + let status_cache_slot_deltas = bank.status_cache.root_slot_deltas(); if let Err(e) = accounts_background_request_sender.send_snapshot_request(SnapshotRequest { snapshot_root_bank: Arc::clone(bank), diff --git a/runtime/src/fixed_concurrent_map.rs b/runtime/src/fixed_concurrent_map.rs new file mode 100644 index 00000000000000..2e61579095032e --- /dev/null +++ b/runtime/src/fixed_concurrent_map.rs @@ -0,0 +1,345 @@ +use ahash::AHasher; +use std::borrow::Borrow; +use std::hash::{Hash, Hasher}; +use std::mem; +use std::ops::Deref; +use std::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; + +/// Fast concurrent hashmap with fixed capacity. +/// +/// This hashmap is designed for use cases where the capacity is known in advance and small. It's a +/// stripped down version of dashmap that guarantees that there are no collisions and so that +/// there's never contention when working with distinct keys. +/// +/// The map uses linear probing to resolve collisions, so it works best with a low load factor (i.e. +/// a small number of elements relative to the capacity). +#[derive(Debug)] +pub struct FixedConcurrentMap { + table: Vec>>, +} + +impl FixedConcurrentMap { + /// Creates a new hashmap with the given capacity. + pub fn new(capacity: usize) -> Self { + assert!( + capacity.is_power_of_two(), + "capacity must be a power of two" + ); + Self { + table: (0..capacity).map(|_| RwLock::new(None)).collect(), + } + } + + fn hash(&self, key: &Q) -> usize { + let mask = self.table.len() - 1; + let mut hasher = AHasher::default(); + key.hash(&mut hasher); + (hasher.finish() as usize) & mask + } + + /// Inserts a key-value pair into the hashmap. + /// + /// If the key already exists, the value is updated and the old value is returned. + #[allow(dead_code)] + pub fn insert(&self, key: K, value: V) -> Result, &'static str> { + let mask = self.table.len() - 1; + let index = self.hash(&key); + for i in 0..self.table.len() { + let probe_index = (index + i) & mask; + let slot = &self.table[probe_index]; + + let mut entry = slot.write().unwrap(); + match &mut *entry { + Some((existing_key, existing_value)) if existing_key == &key => { + let old_value = mem::replace(existing_value, value); + return Ok(Some(old_value)); + } + Some(_) => continue, + None => { + *entry = Some((key, value)); + return Ok(None); + } + } + } + Err("Table is full") + } + + /// Gets the value associated with the given key. + /// + /// Acquires a read lock on the entry, which is released when the returned guard is dropped. + pub fn get(&self, key: &Q) -> Option> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + let mask = self.table.len() - 1; + let index = self.hash(key); + for i in 0..self.table.len() { + let probe_index = (index + i) & mask; + let slot = &self.table[probe_index]; + let entry = slot.read().unwrap(); + match &*entry { + Some((existing_key, _)) if existing_key.borrow() == key => { + return Some(ReadGuard { inner: entry }); + } + Some(_) => continue, + None => return None, + } + } + None + } + + /// Gets the value associated with the given key or inserts it using the provided closure. + /// + /// The implementation assumes that the key almost always exists in the map, + /// so it's optimized for that case. + /// + /// Always returns a read guard, even if the key was inserted in which case, the write lock is + /// released and the read lock is re-acquired. This is optimized for the case where the value is + /// itself a concurrent data structure and so holding the write lock is unnecessary after the + /// value is inserted and would only cause contention. + /// + /// In the case where the value is inserted, there's a small window where the entry could be + /// removed while the write lock is released and the read lock is re-acquired. In this case, the + /// function returns an error. + pub fn get_or_insert_with(&self, key: K, default: F) -> Result, &'static str> + where + F: FnOnce() -> V, + { + let mask = self.table.len() - 1; + let index = self.hash(&key); + for i in 0..self.table.len() { + let probe_index = (index + i) & mask; + let slot = &self.table[probe_index]; + + { + // Optimistically try to read the entry without acquiring the write lock. + let entry = slot.read().unwrap(); + if let Some((existing_key, _)) = &*entry { + if existing_key == &key { + return Ok(ReadGuard { inner: entry }); + } else { + continue; + } + } + } + + { + let mut entry = slot.write().unwrap(); + match &*entry { + Some((existing_key, _)) if existing_key == &key => { + // The entry was inserted in the window between releasing the read lock and + // acquiring the write lock. + } + Some(_) => continue, + None => { + // Insert the new entry. + *entry = Some((key, default())); + } + } + } + + // Drop the write lock and re-acquire a read lock. + let entry = slot.read().unwrap(); + if let Some((_existing_key, _)) = &*entry { + return Ok(ReadGuard { inner: entry }); + } else { + return Err("entry removed while in get_or_insert_with"); + } + } + Err("map is full") + } + + /// Removes the value associated with the given key. + /// + /// Returns the value if it existed. + pub fn remove(&self, key: &Q) -> Option + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + let mask = self.table.len() - 1; + let index = self.hash(key); + for i in 0..self.table.len() { + let probe_index = (index + i) & mask; + let slot = &self.table[probe_index]; + let mut entry = slot.write().unwrap(); + match &*entry { + Some((existing_key, _)) if existing_key.borrow() == key => { + let (_, value) = entry.take().unwrap(); + return Some(value); + } + Some(_) => continue, + None => return None, + } + } + None + } + + /// Retains only the elements specified by the predicate. + pub fn retain(&self, mut f: F) + where + F: FnMut(&K, &mut V) -> bool, + { + for slot in &self.table { + let mut entry = slot.write().unwrap(); + if let Some((ref key, ref mut value)) = *entry { + if !f(key, value) { + *entry = None; + } + } + } + } + + /// Clears the map, removing all key-value pairs. + #[cfg(feature = "dev-context-only-utils")] + pub fn clear(&self) { + for slot in &self.table { + let mut entry = slot.write().unwrap(); + *entry = None; + } + } + + /// Returns an iterator over the key-value pairs in the map. + pub fn iter(&self) -> impl Iterator> { + self.table.iter().filter_map(|slot| { + let entry = slot.read().unwrap(); + if entry.is_some() { + Some(IterGuard { inner: entry }) + } else { + None + } + }) + } +} +pub struct ReadGuard<'a, K, V> { + inner: RwLockReadGuard<'a, Option<(K, V)>>, +} + +impl<'a, K, V> Deref for ReadGuard<'a, K, V> { + type Target = V; + + fn deref(&self) -> &Self::Target { + &self.inner.as_ref().unwrap().1 + } +} + +pub struct WriteGuard<'a, K, V> { + inner: RwLockWriteGuard<'a, Option<(K, V)>>, +} + +impl<'a, K, V> Deref for WriteGuard<'a, K, V> { + type Target = V; + + fn deref(&self) -> &Self::Target { + &self.inner.as_ref().unwrap().1 + } +} + +pub struct IterGuard<'a, K, V> { + inner: RwLockReadGuard<'a, Option<(K, V)>>, +} + +impl<'a, K, V> Deref for IterGuard<'a, K, V> { + type Target = (K, V); + + fn deref(&self) -> &Self::Target { + self.inner.as_ref().unwrap() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::sync::Arc; + + #[test] + fn test_insert_and_get() { + let map = Arc::new(FixedConcurrentMap::new(16)); + assert_eq!(map.insert("key1", 10).unwrap(), None); + assert_eq!(*map.get("key1").unwrap(), 10); + } + + #[test] + fn test_insert_existing_key() { + let map = Arc::new(FixedConcurrentMap::new(16)); + assert!(map.insert("key1", 10).is_ok()); + assert_eq!(map.insert("key1", 20).unwrap(), Some(10)); + assert_eq!(*map.get("key1").unwrap(), 20); + } + + #[test] + fn test_remove() { + let map = Arc::new(FixedConcurrentMap::new(16)); + assert!(map.insert("key1", 10).is_ok()); + assert_eq!(map.remove("key1"), Some(10)); + assert!(map.get("key1").is_none()); + } + + #[test] + fn test_remove_nonexistent_key() { + let map = Arc::new(FixedConcurrentMap::<&str, ()>::new(16)); + assert_eq!(map.remove("key1"), None); + } + + #[test] + fn test_table_full() { + let map = Arc::new(FixedConcurrentMap::new(4)); + assert!(map.insert("key1", 10).is_ok()); + assert!(map.insert("key2", 20).is_ok()); + assert!(map.insert("key3", 30).is_ok()); + assert!(map.insert("key4", 40).is_ok()); + assert!(map.insert("key5", 50).is_err()); + } + + #[test] + fn test_get_or_insert_with() { + let map = Arc::new(FixedConcurrentMap::new(16)); + assert_eq!(*map.get_or_insert_with("key1", || 10).unwrap(), 10); + assert_eq!(*map.get_or_insert_with("key1", || 20).unwrap(), 10); + } + + #[test] + fn test_retain() { + let map = Arc::new(FixedConcurrentMap::new(16)); + assert!(map.insert("key1", 10).is_ok()); + assert!(map.insert("key2", 20).is_ok()); + assert!(map.insert("key3", 30).is_ok()); + + map.retain(|_, &mut v| v != 20); + + assert_eq!(*map.get("key1").unwrap(), 10); + assert!(map.get("key2").is_none()); + assert_eq!(*map.get("key3").unwrap(), 30); + } + + #[test] + fn test_iter() { + let map = Arc::new(FixedConcurrentMap::new(16)); + assert!(map.insert("key1", 10).is_ok()); + assert!(map.insert("key2", 20).is_ok()); + assert!(map.insert("key3", 30).is_ok()); + + let mut iter = map.iter().collect::>(); + iter.sort_by_key(|guard| guard.0); + + assert_eq!(iter.len(), 3); + assert_eq!(*(iter[0]), ("key1", 10)); + assert_eq!(*(iter[1]), ("key2", 20)); + assert_eq!(*(iter[2]), ("key3", 30)); + } + + #[test] + fn test_clear() { + let map = Arc::new(FixedConcurrentMap::new(16)); + assert!(map.insert("key1", 10).is_ok()); + assert!(map.insert("key2", 20).is_ok()); + assert!(map.insert("key3", 30).is_ok()); + + map.clear(); + + assert!(map.get("key1").is_none()); + assert!(map.get("key2").is_none()); + assert!(map.get("key3").is_none()); + } +} diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index c3f877cd57b4e3..f87b6e5a2666a8 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -13,6 +13,7 @@ pub mod bank_hash_cache; pub mod bank_utils; pub mod commitment; pub mod epoch_stakes; +mod fixed_concurrent_map; pub mod genesis_utils; pub mod installed_scheduler_pool; pub mod loader_utils; diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 4d90329a785191..10de13914c86af 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -624,7 +624,7 @@ fn rebuild_bank_from_unarchived_snapshots( verify_slot_deltas(slot_deltas.as_slice(), &bank)?; - bank.status_cache.write().unwrap().append(&slot_deltas); + bank.status_cache.append(&slot_deltas); info!("Rebuilt bank for slot: {}", bank.slot()); Ok(( @@ -686,7 +686,7 @@ fn rebuild_bank_from_snapshot( verify_slot_deltas(slot_deltas.as_slice(), &bank)?; - bank.status_cache.write().unwrap().append(&slot_deltas); + bank.status_cache.append(&slot_deltas); info!("Rebuilt bank for slot: {}", bank.slot()); Ok(( @@ -912,7 +912,7 @@ fn bank_to_full_snapshot_archive_with( bank.update_accounts_hash(CalcAccountsHashDataSource::Storages, false, false); let snapshot_storages = bank.get_snapshot_storages(None); - let status_cache_slot_deltas = bank.status_cache.read().unwrap().root_slot_deltas(); + let status_cache_slot_deltas = bank.status_cache.root_slot_deltas(); let accounts_package = AccountsPackage::new_for_snapshot( AccountsPackageKind::Snapshot(SnapshotKind::FullSnapshot), bank, @@ -975,7 +975,7 @@ pub fn bank_to_incremental_snapshot_archive( bank.update_incremental_accounts_hash(full_snapshot_slot); let snapshot_storages = bank.get_snapshot_storages(Some(full_snapshot_slot)); - let status_cache_slot_deltas = bank.status_cache.read().unwrap().root_slot_deltas(); + let status_cache_slot_deltas = bank.status_cache.root_slot_deltas(); let accounts_package = AccountsPackage::new_for_snapshot( AccountsPackageKind::Snapshot(SnapshotKind::IncrementalSnapshot(full_snapshot_slot)), bank, diff --git a/runtime/src/status_cache.rs b/runtime/src/status_cache.rs index d5340b5a5c5ad1..eb1ac31736c719 100644 --- a/runtime/src/status_cache.rs +++ b/runtime/src/status_cache.rs @@ -1,97 +1,89 @@ use { - log::*, + crate::fixed_concurrent_map::FixedConcurrentMap, + ahash::random_state::RandomState as AHashRandomState, + dashmap::{mapref::entry::Entry, DashMap, DashSet}, rand::{thread_rng, Rng}, serde::Serialize, + smallvec::SmallVec, solana_accounts_db::ancestors::Ancestors, solana_sdk::{ clock::{Slot, MAX_RECENT_BLOCKHASHES}, hash::Hash, }, std::{ - collections::{hash_map::Entry, HashMap, HashSet}, - sync::{Arc, Mutex}, + mem::MaybeUninit, + ptr, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, }, }; pub const MAX_CACHE_ENTRIES: usize = MAX_RECENT_BLOCKHASHES; const CACHED_KEY_SIZE: usize = 20; +const CONCURRENT_MAP_SLOTS: usize = (MAX_CACHE_ENTRIES * 4).next_power_of_two(); +const DASHMAP_SHARDS: usize = (MAX_CACHE_ENTRIES * 4).next_power_of_two(); // Store forks in a single chunk of memory to avoid another lookup. -pub type ForkStatus = Vec<(Slot, T)>; +pub type ForkStatus = SmallVec<[(Slot, T); 2]>; type KeySlice = [u8; CACHED_KEY_SIZE]; -type KeyMap = HashMap>; +type KeyMap = DashMap, AHashRandomState>; // Map of Hash and status -pub type Status = Arc)>>>; +pub type Status = Arc), AHashRandomState>>; // A Map of hash + the highest fork it's been observed on along with // the key offset and a Map of the key slice + Fork status for that key -type KeyStatusMap = HashMap)>; +type KeyStatusMap = FixedConcurrentMap)>; // A map of keys recorded in each fork; used to serialize for snapshots easily. // Doesn't store a `SlotDelta` in it because the bool `root` is usually set much later -type SlotDeltaMap = HashMap>; +type SlotDeltaMap = FixedConcurrentMap>; // The statuses added during a slot, can be used to build on top of a status cache or to // construct a new one. Usually derived from a status cache's `SlotDeltaMap` pub type SlotDelta = (Slot, bool, Status); #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct StatusCache { + // map[blockhash][tx_key] => [(fork1_slot, tx_result), (fork2_slot, tx_result), ...] + // used to check if a tx_key was seen on a fork and for rpc to retrieve the tx_result cache: KeyStatusMap, - roots: HashSet, - /// all keys seen during a fork/slot + // set of rooted slots + roots: DashSet, + // map[slot][blockhash] => [(tx_key, tx_result), ...] used to serialize for snapshots slot_deltas: SlotDeltaMap, } impl Default for StatusCache { fn default() -> Self { Self { - cache: HashMap::default(), + cache: KeyStatusMap::new(CONCURRENT_MAP_SLOTS), // 0 is always a root - roots: HashSet::from([0]), - slot_deltas: HashMap::default(), + roots: DashSet::from_iter([0].into_iter()), + slot_deltas: SlotDeltaMap::new(CONCURRENT_MAP_SLOTS), } } } -impl PartialEq for StatusCache { - fn eq(&self, other: &Self) -> bool { - self.roots == other.roots - && self - .cache - .iter() - .all(|(hash, (slot, key_index, hash_map))| { - if let Some((other_slot, other_key_index, other_hash_map)) = - other.cache.get(hash) - { - if slot == other_slot && key_index == other_key_index { - return hash_map.iter().all(|(slice, fork_map)| { - if let Some(other_fork_map) = other_hash_map.get(slice) { - // all this work just to compare the highest forks in the fork map - // per entry - return fork_map.last() == other_fork_map.last(); - } - false - }); - } - } - false - }) - } -} - impl StatusCache { - pub fn clear_slot_entries(&mut self, slot: Slot) { + /// Clear all entries for a slot. + /// + /// This is used when a slot is purged from the bank, see + /// ReplayStage::purge_unconfirmed_duplicate_slot(). When this is called, it's guaranteed that + /// there are no threads inserting new entries for this slot, so there are no races. + pub fn clear_slot_entries(&self, slot: Slot) { let slot_deltas = self.slot_deltas.remove(&slot); if let Some(slot_deltas) = slot_deltas { - let slot_deltas = slot_deltas.lock().unwrap(); - for (blockhash, (_, key_list)) in slot_deltas.iter() { + for item in slot_deltas.iter() { + let blockhash = item.key(); + let (_, key_list) = item.value(); // Any blockhash that exists in self.slot_deltas must also exist // in self.cache, because in self.purge_roots(), when an entry // (b, (max_slot, _, _)) is removed from self.cache, this implies // all entries in self.slot_deltas < max_slot are also removed - if let Entry::Occupied(mut o_blockhash_entries) = self.cache.entry(*blockhash) { - let (_, _, all_hash_maps) = o_blockhash_entries.get_mut(); + if let Some(guard) = self.cache.get(blockhash) { + let (_, _, all_hash_maps) = &*guard; for (key_slice, _) in key_list { if let Entry::Occupied(mut o_key_list) = all_hash_maps.entry(*key_slice) { @@ -108,7 +100,8 @@ impl StatusCache { } if all_hash_maps.is_empty() { - o_blockhash_entries.remove_entry(); + drop(guard); + self.cache.remove(blockhash); } } else { panic!("Blockhash must exist if it exists in self.slot_deltas, slot: {slot}") @@ -126,6 +119,19 @@ impl StatusCache { ancestors: &Ancestors, ) -> Option<(Slot, T)> { let map = self.cache.get(transaction_blockhash)?; + self.do_get_status(&*map, &key, ancestors) + } + + fn do_get_status>( + &self, + map: &( + AtomicU64, + usize, + DashMap<[u8; 20], SmallVec<[(u64, T); 2]>, AHashRandomState>, + ), + key: &K, + ancestors: &Ancestors, + ) -> Option<(u64, T)> { let (_, index, keymap) = map; let max_key_index = key.as_ref().len().saturating_sub(CACHED_KEY_SIZE + 1); let index = (*index).min(max_key_index); @@ -143,19 +149,17 @@ impl StatusCache { None } - /// Search for a key with any blockhash - /// Prefer get_status for performance reasons, it doesn't need - /// to search all blockhashes. + /// Search for a key with any blockhash. + /// + /// Prefer get_status for performance reasons, it doesn't need to search all blockhashes. pub fn get_status_any_blockhash>( &self, key: K, ancestors: &Ancestors, ) -> Option<(Slot, T)> { - let keys: Vec<_> = self.cache.keys().copied().collect(); - - for blockhash in keys.iter() { - trace!("get_status_any_blockhash: trying {}", blockhash); - let status = self.get_status(&key, blockhash, ancestors); + for item in self.cache.iter() { + let (_blockhash, map) = &*item; + let status = self.do_get_status(map, &key, ancestors); if status.is_some() { return status; } @@ -163,126 +167,159 @@ impl StatusCache { None } - /// Add a known root fork. Roots are always valid ancestors. - /// After MAX_CACHE_ENTRIES, roots are removed, and any old keys are cleared. - pub fn add_root(&mut self, fork: Slot) { + /// Add a known root fork. + /// + /// Roots are always valid ancestors. After MAX_CACHE_ENTRIES, roots are removed, and any old + /// keys are cleared. + pub fn add_root(&self, fork: Slot) { self.roots.insert(fork); self.purge_roots(); } - pub fn roots(&self) -> &HashSet { - &self.roots + /// Get all the roots. + pub fn roots(&self) -> impl Iterator + '_ { + self.roots.iter().map(|x| *x) } /// Insert a new key for a specific slot. - pub fn insert>( - &mut self, - transaction_blockhash: &Hash, - key: K, - slot: Slot, - res: T, - ) { + pub fn insert>(&self, transaction_blockhash: &Hash, key: K, slot: Slot, res: T) { let max_key_index = key.as_ref().len().saturating_sub(CACHED_KEY_SIZE + 1); + let mut key_slice = MaybeUninit::<[u8; CACHED_KEY_SIZE]>::uninit(); // Get the cache entry for this blockhash. - let (max_slot, key_index, hash_map) = - self.cache.entry(*transaction_blockhash).or_insert_with(|| { - let key_index = thread_rng().gen_range(0..max_key_index + 1); - (slot, key_index, HashMap::new()) - }); - - // Update the max slot observed to contain txs using this blockhash. - *max_slot = std::cmp::max(slot, *max_slot); - - // Grab the key slice. - let key_index = (*key_index).min(max_key_index); - let mut key_slice = [0u8; CACHED_KEY_SIZE]; - key_slice.clone_from_slice(&key.as_ref()[key_index..key_index + CACHED_KEY_SIZE]); - - // Insert the slot and tx result into the cache entry associated with - // this blockhash and keyslice. - let forks = hash_map.entry(key_slice).or_default(); - forks.push((slot, res.clone())); + let key_index = { + let (max_slot, key_index, hash_map) = &*self + .cache + .get_or_insert_with(*transaction_blockhash, || { + let key_index = thread_rng().gen_range(0..max_key_index + 1); + ( + AtomicU64::new(slot), + key_index, + DashMap::with_hasher_and_shard_amount( + AHashRandomState::default(), + DASHMAP_SHARDS, + ), + ) + }) + .unwrap(); + + // Update the max slot observed to contain txs using this blockhash. + max_slot.fetch_max(slot, Ordering::Relaxed); + + // Grab the key slice. + let key_index = (*key_index).min(max_key_index); + unsafe { + ptr::copy_nonoverlapping( + key.as_ref()[key_index..key_index + CACHED_KEY_SIZE].as_ptr(), + key_slice.as_mut_ptr() as *mut u8, + CACHED_KEY_SIZE, + ) + } - self.add_to_slot_delta(transaction_blockhash, slot, key_index, key_slice, res); + // Insert the slot and tx result into the cache entry associated with + // this blockhash and keyslice. + let mut forks = hash_map + .entry(unsafe { key_slice.assume_init() }) + .or_default(); + forks.push((slot, res.clone())); + + key_index + }; + + self.add_to_slot_delta( + transaction_blockhash, + slot, + key_index, + unsafe { key_slice.assume_init() }, + res, + ); } - pub fn purge_roots(&mut self) { + fn purge_roots(&self) { if self.roots.len() > MAX_CACHE_ENTRIES { - if let Some(min) = self.roots.iter().min().cloned() { + if let Some(min) = self.roots().min() { self.roots.remove(&min); - self.cache.retain(|_, (fork, _, _)| *fork > min); + self.cache + .retain(|_, (max_slot, _, _)| max_slot.load(Ordering::Relaxed) > min); self.slot_deltas.retain(|slot, _| *slot > min); } } } - /// Clear for testing - pub fn clear(&mut self) { - for v in self.cache.values_mut() { - v.2 = HashMap::new(); - } - - self.slot_deltas - .iter_mut() - .for_each(|(_, status)| status.lock().unwrap().clear()); + #[cfg(feature = "dev-context-only-utils")] + pub fn clear(&self) { + self.cache.clear(); + self.slot_deltas.clear(); } - /// Get the statuses for all the root slots + /// Get the statuses for all the root slots. + /// + /// This is never called concurrently with add_root(), and for a slot to be a root there must be + /// no new entries for that slot, so there are no races. + /// + /// See ReplayStage::handle_new_root() => BankForks::set_root() => + /// BankForks::do_set_root_return_metrics() => root_slot_deltas() pub fn root_slot_deltas(&self) -> Vec> { self.roots() - .iter() .map(|root| { ( - *root, + root, true, // <-- is_root - self.slot_deltas.get(root).cloned().unwrap_or_default(), + self.slot_deltas + .get(&root) + .map(|x| x.clone()) + .unwrap_or_default(), ) }) .collect() } - // replay deltas into a status_cache allows "appending" data - pub fn append(&mut self, slot_deltas: &[SlotDelta]) { + /// Pupulate the cache with the slot deltas from a snapshot. + /// + /// Really badly named method. See load_bank_forks() => ... => + /// rebuild_bank_from_snapshot() => [load slot deltas from snapshot] => append() + pub fn append(&self, slot_deltas: &[SlotDelta]) { for (slot, is_root, statuses) in slot_deltas { - statuses - .lock() - .unwrap() - .iter() - .for_each(|(tx_hash, (key_index, statuses))| { - for (key_slice, res) in statuses.iter() { - self.insert_with_slice(tx_hash, *slot, *key_index, *key_slice, res.clone()) - } - }); + statuses.iter().for_each(|item| { + let tx_hash = item.key(); + let (key_index, statuses) = item.value(); + for (key_slice, res) in statuses.iter() { + self.insert_with_slice(tx_hash, *slot, *key_index, *key_slice, res.clone()) + } + }); if *is_root { self.add_root(*slot); } } } - pub fn from_slot_deltas(slot_deltas: &[SlotDelta]) -> Self { - // play all deltas back into the status cache - let mut me = Self::default(); - me.append(slot_deltas); - me - } - fn insert_with_slice( - &mut self, + &self, transaction_blockhash: &Hash, slot: Slot, key_index: usize, key_slice: [u8; CACHED_KEY_SIZE], res: T, ) { - let hash_map = - self.cache - .entry(*transaction_blockhash) - .or_insert((slot, key_index, HashMap::new())); - hash_map.0 = std::cmp::max(slot, hash_map.0); + { + let (max_slot, _, hash_map) = &*self + .cache + .get_or_insert_with(*transaction_blockhash, || { + ( + AtomicU64::new(slot), + key_index, + DashMap::with_hasher_and_shard_amount( + AHashRandomState::default(), + DASHMAP_SHARDS, + ), + ) + }) + .unwrap(); + max_slot.fetch_max(slot, Ordering::Relaxed); - let forks = hash_map.2.entry(key_slice).or_default(); - forks.push((slot, res.clone())); + let mut forks = hash_map.entry(key_slice).or_default(); + forks.push((slot, res.clone())); + } self.add_to_slot_delta(transaction_blockhash, slot, key_index, key_slice, res); } @@ -290,17 +327,26 @@ impl StatusCache { // Add this key slice to the list of key slices for this slot and blockhash // combo. fn add_to_slot_delta( - &mut self, + &self, transaction_blockhash: &Hash, slot: Slot, key_index: usize, key_slice: [u8; CACHED_KEY_SIZE], res: T, ) { - let mut fork_entry = self.slot_deltas.entry(slot).or_default().lock().unwrap(); - let (_key_index, hash_entry) = fork_entry + let fork_entry = self + .slot_deltas + .get_or_insert_with(slot, || { + Arc::new(DashMap::with_hasher_and_shard_amount( + AHashRandomState::default(), + DASHMAP_SHARDS, + )) + }) + .unwrap(); + + let (_key_index, hash_entry) = &mut *fork_entry .entry(*transaction_blockhash) - .or_insert((key_index, vec![])); + .or_insert_with(|| (key_index, Vec::new())); hash_entry.push((key_slice, res)) } } @@ -314,6 +360,46 @@ mod tests { type BankStatusCache = StatusCache<()>; + impl StatusCache { + fn from_slot_deltas(slot_deltas: &[SlotDelta]) -> Self { + let cache = Self::default(); + cache.append(slot_deltas); + cache + } + } + + impl PartialEq for StatusCache { + fn eq(&self, other: &Self) -> bool { + use std::collections::HashSet; + + let roots = self.roots.iter().map(|x| *x).collect::>(); + let other_roots = other.roots.iter().map(|x| *x).collect::>(); + roots == other_roots + && self.cache.iter().all(|item| { + let (hash, (max_slot, key_index, hash_map)) = &*item; + if let Some(item) = other.cache.get(hash) { + let (other_max_slot, other_key_index, other_hash_map) = &*item; + if max_slot.load(Ordering::Relaxed) + == other_max_slot.load(Ordering::Relaxed) + && key_index == other_key_index + { + return hash_map.iter().all(|item| { + let slice = item.key(); + let fork_map = item.value(); + if let Some(other_fork_map) = other_hash_map.get(slice) { + // all this work just to compare the highest forks in the fork map + // per entry + return fork_map.last() == other_fork_map.last(); + } + false + }); + } + } + false + }) + } + } + #[test] fn test_empty_has_no_sigs() { let sig = Signature::default(); @@ -332,7 +418,7 @@ mod tests { #[test] fn test_find_sig_with_ancestor_fork() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let ancestors = vec![(0, 1)].into_iter().collect(); status_cache.insert(&blockhash, sig, 0, ()); @@ -349,7 +435,7 @@ mod tests { #[test] fn test_find_sig_without_ancestor_fork() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let ancestors = Ancestors::default(); status_cache.insert(&blockhash, sig, 1, ()); @@ -360,7 +446,7 @@ mod tests { #[test] fn test_find_sig_with_root_ancestor_fork() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let ancestors = Ancestors::default(); status_cache.insert(&blockhash, sig, 0, ()); @@ -374,7 +460,7 @@ mod tests { #[test] fn test_insert_picks_latest_blockhash_fork() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let ancestors = vec![(0, 0)].into_iter().collect(); status_cache.insert(&blockhash, sig, 0, ()); @@ -390,7 +476,7 @@ mod tests { #[test] fn test_root_expires() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let ancestors = Ancestors::default(); status_cache.insert(&blockhash, sig, 0, ()); @@ -403,7 +489,7 @@ mod tests { #[test] fn test_clear_signatures_sigs_are_gone() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let ancestors = Ancestors::default(); status_cache.insert(&blockhash, sig, 0, ()); @@ -415,7 +501,7 @@ mod tests { #[test] fn test_clear_signatures_insert_works() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let ancestors = Ancestors::default(); status_cache.add_root(0); @@ -429,11 +515,11 @@ mod tests { #[test] fn test_signatures_slice() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); status_cache.clear(); status_cache.insert(&blockhash, sig, 0, ()); - let (_, index, sig_map) = status_cache.cache.get(&blockhash).unwrap(); + let (_, index, sig_map) = &*status_cache.cache.get(&blockhash).unwrap(); let sig_slice: &[u8; CACHED_KEY_SIZE] = arrayref::array_ref![sig.as_ref(), *index, CACHED_KEY_SIZE]; assert!(sig_map.get(sig_slice).is_some()); @@ -442,11 +528,11 @@ mod tests { #[test] fn test_slot_deltas() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); status_cache.clear(); status_cache.insert(&blockhash, sig, 0, ()); - assert!(status_cache.roots().contains(&0)); + assert!(status_cache.roots().collect::>().contains(&0)); let slot_deltas = status_cache.root_slot_deltas(); let cache = StatusCache::from_slot_deltas(&slot_deltas); assert_eq!(cache, status_cache); @@ -458,7 +544,7 @@ mod tests { #[test] fn test_roots_deltas() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let blockhash2 = hash(blockhash.as_ref()); status_cache.insert(&blockhash, sig, 0, ()); @@ -467,8 +553,7 @@ mod tests { for i in 0..(MAX_CACHE_ENTRIES + 1) { status_cache.add_root(i as u64); } - assert_eq!(status_cache.slot_deltas.len(), 1); - assert!(status_cache.slot_deltas.contains_key(&1)); + assert!(status_cache.slot_deltas.get(&1).is_some()); let slot_deltas = status_cache.root_slot_deltas(); let cache = StatusCache::from_slot_deltas(&slot_deltas); assert_eq!(cache, status_cache); @@ -483,7 +568,7 @@ mod tests { #[test] fn test_clear_slot_signatures() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let blockhash2 = hash(blockhash.as_ref()); status_cache.insert(&blockhash, sig, 0, ()); @@ -512,26 +597,26 @@ mod tests { // Check that the slot delta for slot 0 is gone, but slot 1 still // exists - assert!(!status_cache.slot_deltas.contains_key(&0)); - assert!(status_cache.slot_deltas.contains_key(&1)); + assert!(!status_cache.slot_deltas.get(&0).is_some()); + assert!(status_cache.slot_deltas.get(&1).is_some()); // Clear slot 1 related data status_cache.clear_slot_entries(1); - assert!(status_cache.slot_deltas.is_empty()); + assert!(status_cache.slot_deltas.get(&0).is_none()); + assert!(status_cache.slot_deltas.get(&1).is_none()); assert!(status_cache .get_status(sig, &blockhash, &ancestors1) .is_none()); assert!(status_cache .get_status(sig, &blockhash2, &ancestors1) .is_none()); - assert!(status_cache.cache.is_empty()); } // Status cache uses a random key offset for each blockhash. Ensure that shorter // keys can still be used if the offset if greater than the key length. #[test] fn test_different_sized_keys() { - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let ancestors = vec![(0, 0)].into_iter().collect(); let blockhash = Hash::default(); for _ in 0..100 { diff --git a/sdk/frozen-abi/Cargo.toml b/sdk/frozen-abi/Cargo.toml index 8ac0d0282b3b02..fe8012e7b1bcf9 100644 --- a/sdk/frozen-abi/Cargo.toml +++ b/sdk/frozen-abi/Cargo.toml @@ -12,6 +12,7 @@ edition = { workspace = true } [dependencies] bs58 = { workspace = true, features = ["alloc"] } bv = { workspace = true, features = ["serde"] } +dashmap = { workspace = true } log = { workspace = true, features = ["std"] } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } diff --git a/sdk/frozen-abi/src/abi_example.rs b/sdk/frozen-abi/src/abi_example.rs index 63b3c1d68c28d6..09e4bb656f538a 100644 --- a/sdk/frozen-abi/src/abi_example.rs +++ b/sdk/frozen-abi/src/abi_example.rs @@ -1,5 +1,6 @@ use { crate::abi_digester::{AbiDigester, DigestError, DigestResult}, + dashmap::DashMap, log::*, serde::Serialize, std::any::type_name, @@ -617,3 +618,18 @@ impl AbiExample for std::sync::OnceLock { Self::from(T::example()) } } + +#[cfg(not(target_os = "solana"))] +impl< + T: std::cmp::Eq + std::hash::Hash + AbiExample, + S: AbiExample, + H: std::hash::BuildHasher + Default + std::clone::Clone, + > AbiExample for DashMap +{ + fn example() -> Self { + info!("AbiExample for (DashMap): {}", type_name::()); + let map = DashMap::default(); + map.insert(T::example(), S::example()); + map + } +} diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index d9ddd42f43633d..65ad3faeda0b74 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -1231,6 +1231,7 @@ dependencies = [ "once_cell", "parking_lot_core 0.9.10", "rayon", + "serde", ] [[package]] @@ -6419,6 +6420,7 @@ dependencies = [ "serde_derive", "serde_json", "serde_with", + "smallvec", "solana-accounts-db", "solana-address-lookup-table-program", "solana-bpf-loader-program",