From af7fd32f4c1b0b753e49b22bb04150ef086b77a2 Mon Sep 17 00:00:00 2001 From: Kevin Heavey <24635973+kevinheavey@users.noreply.github.com> Date: Wed, 25 Oct 2023 15:21:53 +0100 Subject: [PATCH 01/98] chore: fix some typos (#33833) * fix spelling of "retrieved" * fix spelling of "should" * fix spelling of "comparisons" --- accounts-db/src/accounts_db.rs | 2 +- accounts-db/src/accounts_hash.rs | 8 ++++---- ci/semver_bash/semver_test.sh | 12 ++++++------ cost-model/src/cost_tracker.rs | 2 +- docs/static/katex/katex.js | 2 +- docs/static/katex/katex.mjs | 2 +- sdk/program/src/system_instruction.rs | 2 +- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 469de17c5bcc2d..f080abcb85556f 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -5882,7 +5882,7 @@ impl AccountsDb { self.purge_slot_cache(*remove_slot, slot_cache); remove_cache_elapsed.stop(); remove_cache_elapsed_across_slots += remove_cache_elapsed.as_us(); - // Nobody else shoud have removed the slot cache entry yet + // Nobody else should have removed the slot cache entry yet assert!(self.accounts_cache.remove_slot(*remove_slot).is_some()); } else { self.purge_slot_storage(*remove_slot, purge_stats); diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 7aac99f5a6ffb6..6b853895d7b790 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -1517,18 +1517,18 @@ mod tests { let len = combined.len(); assert_eq!(cumulative.total_count(), len); (0..combined.len()).for_each(|start| { - let mut retreived = Vec::default(); + let mut retrieved = Vec::default(); let mut cumulative_start = start; // read all data - while retreived.len() < (len - start) { + while retrieved.len() < (len - start) { let this_one = cumulative.get_slice(cumulative_start); - retreived.extend(this_one.iter()); + retrieved.extend(this_one.iter()); cumulative_start += this_one.len(); assert_ne!(0, this_one.len()); } assert_eq!( &combined[start..], - &retreived[..], + &retrieved[..], "permutation: {permutation}" ); }); diff --git a/ci/semver_bash/semver_test.sh b/ci/semver_bash/semver_test.sh index a0ff99461ec43b..a4cca97484de4f 100755 --- a/ci/semver_bash/semver_test.sh +++ b/ci/semver_bash/semver_test.sh @@ -21,7 +21,7 @@ echo "$A -> M:$MAJOR m:$MINOR p:$PATCH s:$SPECIAL. Expect M:1 m:3 p:2 s:" semverParseInto $E MAJOR MINOR PATCH SPECIAL echo "$E -> M:$MAJOR m:$MINOR p:$PATCH s:$SPECIAL. Expect M:1 m:3 p:2 s:a" -echo "Equality comparisions" +echo "Equality comparisons" semverEQ $A $A echo "$A == $A -> $?. Expect 0." @@ -32,7 +32,7 @@ semverGT $A $A echo "$A > $A -> $?. Expect 1." -echo "Major number comparisions" +echo "Major number comparisons" semverEQ $A $B echo "$A == $B -> $?. Expect 1." @@ -52,7 +52,7 @@ semverGT $B $A echo "$B > $A -> $?. Expect 0." -echo "Minor number comparisions" +echo "Minor number comparisons" semverEQ $A $C echo "$A == $C -> $?. Expect 1." @@ -71,7 +71,7 @@ echo "$C < $A -> $?. Expect 1." semverGT $C $A echo "$C > $A -> $?. Expect 0." -echo "patch number comparisions" +echo "patch number comparisons" semverEQ $A $D echo "$A == $D -> $?. Expect 1." @@ -90,7 +90,7 @@ echo "$D < $A -> $?. Expect 1." semverGT $D $A echo "$D > $A -> $?. Expect 0." -echo "special section vs no special comparisions" +echo "special section vs no special comparisons" semverEQ $A $E echo "$A == $E -> $?. Expect 1." @@ -109,7 +109,7 @@ echo "$E < $A -> $?. Expect 0." semverGT $E $A echo "$E > $A -> $?. Expect 1." -echo "special section vs special comparisions" +echo "special section vs special comparisons" semverEQ $E $F echo "$E == $F -> $?. Expect 1." diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index e4f1b917d74b26..efdd86512d2039 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -713,7 +713,7 @@ mod tests { } // case 3: add tx writes to [acct1, acct2], acct2 exceeds limit, should failed atomically, - // we shoudl still have: + // we should still have: // | acct1 | $cost | // | acct2 | $cost * 2 | // | acct3 | $cost | diff --git a/docs/static/katex/katex.js b/docs/static/katex/katex.js index 37fb0fa89676a4..e5d316691883bf 100644 --- a/docs/static/katex/katex.js +++ b/docs/static/katex/katex.js @@ -3674,7 +3674,7 @@ function assertSpan(group) { // '\expandafter\show\the\scriptscriptfont2' \ // '\stop' // -// The metrics themselves were retreived using the following commands: +// The metrics themselves were retrieved using the following commands: // // tftopl cmsy10 // tftopl cmsy7 diff --git a/docs/static/katex/katex.mjs b/docs/static/katex/katex.mjs index 76938229fff30c..488d2101c21232 100644 --- a/docs/static/katex/katex.mjs +++ b/docs/static/katex/katex.mjs @@ -3698,7 +3698,7 @@ var metricMap = { // '\expandafter\show\the\scriptscriptfont2' \ // '\stop' // -// The metrics themselves were retreived using the following commands: +// The metrics themselves were retrieved using the following commands: // // tftopl cmsy10 // tftopl cmsy7 diff --git a/sdk/program/src/system_instruction.rs b/sdk/program/src/system_instruction.rs index 0c8ba59e6e9428..74646f7fb7d331 100644 --- a/sdk/program/src/system_instruction.rs +++ b/sdk/program/src/system_instruction.rs @@ -1461,7 +1461,7 @@ pub fn create_nonce_account( /// /// When constructing a transaction that includes an `AdvanceNonceInstruction` /// the [`recent_blockhash`] must be treated differently — instead of -/// setting it to a recent blockhash, the value of the nonce must be retreived +/// setting it to a recent blockhash, the value of the nonce must be retrieved /// and deserialized from the nonce account, and that value specified as the /// "recent blockhash". A nonce account can be deserialized with the /// [`solana_rpc_client_nonce_utils::data_from_account`][dfa] function. From bd1080b26fedf0574afc7303912998fafe02d6da Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 25 Oct 2023 10:47:21 -0400 Subject: [PATCH 02/98] Adds AtomicAge to bucket map holder (#33841) --- accounts-db/src/accounts_index.rs | 12 ++++++------ accounts-db/src/bucket_map_holder.rs | 16 +++++++++------- accounts-db/src/bucket_map_holder_stats.rs | 8 ++++---- accounts-db/src/in_mem_accounts_index.rs | 12 ++++++------ 4 files changed, 25 insertions(+), 23 deletions(-) diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index cd37df61693248..fc389116d09b71 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -3,7 +3,7 @@ use { accounts_index_storage::{AccountsIndexStorage, Startup}, accounts_partition::RentPayingAccountsByPartition, ancestors::Ancestors, - bucket_map_holder::{Age, BucketMapHolder}, + bucket_map_holder::{Age, AtomicAge, BucketMapHolder}, contains::Contains, in_mem_accounts_index::{InMemAccountsIndex, InsertNewEntryResults, StartupStats}, inline_spl_token::{self, GenericTokenAccount}, @@ -36,7 +36,7 @@ use { }, path::PathBuf, sync::{ - atomic::{AtomicBool, AtomicU64, AtomicU8, AtomicUsize, Ordering}, + atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, Arc, Mutex, OnceLock, RwLock, RwLockReadGuard, RwLockWriteGuard, }, }, @@ -238,7 +238,7 @@ pub struct AccountMapEntryMeta { /// true if entry in in-mem idx has changes and needs to be written to disk pub dirty: AtomicBool, /// 'age' at which this entry should be purged from the cache (implements lru) - pub age: AtomicU8, + pub age: AtomicAge, } impl AccountMapEntryMeta { @@ -248,7 +248,7 @@ impl AccountMapEntryMeta { ) -> Self { AccountMapEntryMeta { dirty: AtomicBool::new(true), - age: AtomicU8::new(storage.future_age_to_flush(is_cached)), + age: AtomicAge::new(storage.future_age_to_flush(is_cached)), } } pub fn new_clean + Into>( @@ -256,7 +256,7 @@ impl AccountMapEntryMeta { ) -> Self { AccountMapEntryMeta { dirty: AtomicBool::new(false), - age: AtomicU8::new(storage.future_age_to_flush(false)), + age: AtomicAge::new(storage.future_age_to_flush(false)), } } } @@ -2113,7 +2113,7 @@ pub mod tests { let (slot, account_info) = entry.slot_list.read().unwrap()[0]; let meta = AccountMapEntryMeta { dirty: AtomicBool::new(entry.dirty()), - age: AtomicU8::new(entry.age()), + age: AtomicAge::new(entry.age()), }; PreAllocatedAccountMapEntry::Entry(Arc::new(AccountMapEntryInner::new( vec![(slot, account_info)], diff --git a/accounts-db/src/bucket_map_holder.rs b/accounts-db/src/bucket_map_holder.rs index c5fb8e68729b08..fc7bf3ba4131f0 100644 --- a/accounts-db/src/bucket_map_holder.rs +++ b/accounts-db/src/bucket_map_holder.rs @@ -22,6 +22,8 @@ use { }, }; pub type Age = u8; +pub type AtomicAge = AtomicU8; +const _: () = assert!(std::mem::size_of::() == std::mem::size_of::()); const AGE_MS: u64 = DEFAULT_MS_PER_SLOT; // match one age per slot time @@ -37,12 +39,12 @@ pub struct BucketMapHolder + Into> /// Instead of accessing the single age and doing math each time, each value is incremented each time the age occurs, which is ~400ms. /// Callers can ask for the precomputed value they already want. /// rolling 'current' age - pub age: AtomicU8, + pub age: AtomicAge, /// rolling age that is 'ages_to_stay_in_cache' + 'age' - pub future_age_to_flush: AtomicU8, + pub future_age_to_flush: AtomicAge, /// rolling age that is effectively 'age' - 1 /// these items are expected to be flushed from the accounts write cache or otherwise modified before this age occurs - pub future_age_to_flush_cached: AtomicU8, + pub future_age_to_flush_cached: AtomicAge, pub stats: BucketMapHolderStats, @@ -255,11 +257,11 @@ impl + Into> BucketMapHolder ages_to_stay_in_cache, count_buckets_flushed: AtomicUsize::default(), // age = 0 - age: AtomicU8::default(), + age: AtomicAge::default(), // future age = age (=0) + ages_to_stay_in_cache - future_age_to_flush: AtomicU8::new(ages_to_stay_in_cache), + future_age_to_flush: AtomicAge::new(ages_to_stay_in_cache), // effectively age (0) - 1. So, the oldest possible age from 'now' - future_age_to_flush_cached: AtomicU8::new(0_u8.wrapping_sub(1)), + future_age_to_flush_cached: AtomicAge::new(Age::MAX), stats: BucketMapHolderStats::new(bins), wait_dirty_or_aged: Arc::default(), next_bucket_to_flush: AtomicUsize::new(0), @@ -442,7 +444,7 @@ pub mod tests { let test = BucketMapHolder::::new(bins, &Some(AccountsIndexConfig::default()), 1); assert_eq!(0, test.current_age()); assert_eq!(test.ages_to_stay_in_cache, test.future_age_to_flush(false)); - assert_eq!(u8::MAX, test.future_age_to_flush(true)); + assert_eq!(Age::MAX, test.future_age_to_flush(true)); (0..bins).for_each(|_| { test.bucket_flushed_at_current_age(false); }); diff --git a/accounts-db/src/bucket_map_holder_stats.rs b/accounts-db/src/bucket_map_holder_stats.rs index 4df611539d16ed..9b5cd20f0cd9b5 100644 --- a/accounts-db/src/bucket_map_holder_stats.rs +++ b/accounts-db/src/bucket_map_holder_stats.rs @@ -1,12 +1,12 @@ use { crate::{ accounts_index::{DiskIndexValue, IndexValue}, - bucket_map_holder::BucketMapHolder, + bucket_map_holder::{Age, AtomicAge, BucketMapHolder}, }, solana_sdk::timing::AtomicInterval, std::{ fmt::Debug, - sync::atomic::{AtomicBool, AtomicU64, AtomicU8, AtomicUsize, Ordering}, + sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, }, }; @@ -52,7 +52,7 @@ pub struct BucketMapHolderStats { pub flush_entries_evicted_from_mem: AtomicU64, pub active_threads: AtomicU64, pub get_range_us: AtomicU64, - last_age: AtomicU8, + last_age: AtomicAge, last_ages_flushed: AtomicU64, pub flush_scan_us: AtomicU64, pub flush_update_us: AtomicU64, @@ -120,7 +120,7 @@ impl BucketMapHolderStats { let mut age_now = age_now as u64; if last_age > age_now { // age wrapped - age_now += u8::MAX as u64 + 1; + age_now += Age::MAX as u64 + 1; } let age_delta = age_now.saturating_sub(last_age); if age_delta > 0 { diff --git a/accounts-db/src/in_mem_accounts_index.rs b/accounts-db/src/in_mem_accounts_index.rs index 3d943956cab23d..1e8e8a8fd73822 100644 --- a/accounts-db/src/in_mem_accounts_index.rs +++ b/accounts-db/src/in_mem_accounts_index.rs @@ -4,7 +4,7 @@ use { AccountMapEntry, AccountMapEntryInner, AccountMapEntryMeta, DiskIndexValue, IndexValue, PreAllocatedAccountMapEntry, RefCount, SlotList, UpsertReclaim, ZeroLamport, }, - bucket_map_holder::{Age, BucketMapHolder}, + bucket_map_holder::{Age, AtomicAge, BucketMapHolder}, bucket_map_holder_stats::BucketMapHolderStats, waitable_condvar::WaitableCondvar, }, @@ -17,7 +17,7 @@ use { fmt::Debug, ops::{Bound, RangeBounds, RangeInclusive}, sync::{ - atomic::{AtomicBool, AtomicU64, AtomicU8, Ordering}, + atomic::{AtomicBool, AtomicU64, Ordering}, Arc, Mutex, RwLock, RwLockWriteGuard, }, }, @@ -89,7 +89,7 @@ impl PossibleEvictions { // one instance of this represents one bin of the accounts index. pub struct InMemAccountsIndex + Into> { - last_age_flushed: AtomicU8, + last_age_flushed: AtomicAge, // backing store map_internal: RwLock>, @@ -115,7 +115,7 @@ pub struct InMemAccountsIndex + Into< /// how many more ages to skip before this bucket is flushed (as opposed to being skipped). /// When this reaches 0, this bucket is flushed. - remaining_ages_to_skip_flushing: AtomicU8, + remaining_ages_to_skip_flushing: AtomicAge, /// an individual bucket will evict its entries and write to disk every 1/NUM_AGES_TO_DISTRIBUTE_FLUSHES ages /// Higher numbers mean we flush less buckets/s @@ -181,12 +181,12 @@ impl + Into> InMemAccountsIndex Date: Wed, 25 Oct 2023 09:04:44 -0700 Subject: [PATCH 03/98] Append crate to ELF file while deploying program (#33849) * Append crate to ELF file while deploying program * review feedback --- cargo-registry/src/crate_handler.rs | 165 +++++++++++++++++++++------- cargo-registry/src/main.rs | 8 +- 2 files changed, 128 insertions(+), 45 deletions(-) diff --git a/cargo-registry/src/crate_handler.rs b/cargo-registry/src/crate_handler.rs index e95f51d752552b..c459279cbce043 100644 --- a/cargo-registry/src/crate_handler.rs +++ b/cargo-registry/src/crate_handler.rs @@ -31,6 +31,8 @@ use { tempfile::{tempdir, TempDir}, }; +const APPEND_CRATE_TO_ELF: bool = true; + pub(crate) type Error = Box; #[derive(Clone, Debug, Deserialize, Serialize)] @@ -99,6 +101,8 @@ pub(crate) struct Program { path: String, id: Pubkey, _tempdir: Arc, + meta: PackageMetaData, + crate_bytes: CrateTarGz, } impl Program { @@ -107,9 +111,17 @@ impl Program { return Err("Signer doesn't match program ID".into()); } - let program_data = read_and_verify_elf(self.path.as_ref()) + let mut program_data = read_and_verify_elf(self.path.as_ref()) .map_err(|e| format!("failed to read the program: {}", e))?; + if APPEND_CRATE_TO_ELF { + let program_id_str = Program::program_id_to_crate_name(self.id); + let crate_tar_gz = + CrateTarGz::new_rebased(&self.crate_bytes, &self.meta, &program_id_str)?; + let crate_len = u32::to_le_bytes(crate_tar_gz.0.len() as u32); + program_data.extend_from_slice(&crate_tar_gz.0); + program_data.extend_from_slice(&crate_len); + } let command_config = RPCCommandConfig::new(client.as_ref()); process_deploy_program( @@ -128,7 +140,7 @@ impl Program { Ok(()) } - fn dump(&self, client: Arc) -> Result<(), Error> { + fn dump(&mut self, client: Arc) -> Result<(), Error> { info!("Fetching program {:?}", self.id); let command_config = RPCCommandConfig::new(client.as_ref()); @@ -143,14 +155,42 @@ impl Program { format!("Failed to fetch the program: {}", e) })?; + if APPEND_CRATE_TO_ELF { + let Ok(buffer) = fs::read(&self.path) else { + return Err("Failed to read the program file".into()); + }; + + let data = Bytes::from(buffer); + + let data_len = data.len(); + let sizeof_length = size_of::(); + + // The crate length is at the tail of the data buffer, as 4 LE bytes. + let length_le = data.slice(data_len.saturating_sub(sizeof_length)..data_len); + let length = + u32::from_le_bytes(length_le.deref().try_into().expect("Failed to read length")); + + let crate_start = data_len + .saturating_sub(sizeof_length) + .saturating_sub(length as usize); + let crate_end = data_len.saturating_sub(sizeof_length); + + let crate_bytes = CrateTarGz(Bytes::copy_from_slice(&data[crate_start..crate_end])); + self.crate_bytes = crate_bytes; + } Ok(()) } pub(crate) fn crate_name_to_program_id(crate_name: &str) -> Option { - hex::decode(crate_name) + let (_, id_str) = crate_name.split_once('-')?; + hex::decode(id_str) .ok() .and_then(|bytes| Pubkey::try_from(bytes).ok()) } + + fn program_id_to_crate_name(id: Pubkey) -> String { + format!("sol-{}", hex::encode(id.to_bytes())) + } } impl From<&UnpackedCrate> for Program { @@ -159,20 +199,23 @@ impl From<&UnpackedCrate> for Program { path: value.program_path.clone(), id: value.program_id, _tempdir: value.tempdir.clone(), + meta: value.meta.clone(), + crate_bytes: value.crate_bytes.clone(), } } } -pub(crate) struct CratePackage(pub(crate) Bytes); +#[derive(Clone, Default)] +pub(crate) struct CrateTarGz(pub(crate) Bytes); -impl From for Result { - fn from(value: UnpackedCrate) -> Self { +impl CrateTarGz { + fn new(value: UnpackedCrate) -> Result { let mut archive = Builder::new(Vec::new()); archive.mode(HeaderMode::Deterministic); - let base_path = UnpackedCrate::make_path(&value.tempdir, &value.meta, "out"); + let base_path = UnpackedCrate::make_path(&value.tempdir, &value.meta, ""); archive.append_dir_all( - format!("{}-{}/out", value.meta.name, value.meta.vers), + format!("{}-{}/", value.meta.name, value.meta.vers), base_path, )?; let data = archive.into_inner()?; @@ -182,10 +225,28 @@ impl From for Result { let mut zipped_data = Vec::new(); encoder.read_to_end(&mut zipped_data)?; - Ok(CratePackage(Bytes::from(zipped_data))) + Ok(CrateTarGz(Bytes::from(zipped_data))) + } + + fn new_rebased(&self, meta: &PackageMetaData, target_base: &str) -> Result { + let mut unpacked = UnpackedCrate::decompress(self.clone(), meta.clone())?; + + let name = Program::program_id_to_crate_name(unpacked.program_id); + UnpackedCrate::fixup_toml(&unpacked.tempdir, "Cargo.toml.orig", &unpacked.meta, &name)?; + UnpackedCrate::fixup_toml(&unpacked.tempdir, "Cargo.toml", &unpacked.meta, &name)?; + + let source_path = UnpackedCrate::make_path(&unpacked.tempdir, &unpacked.meta, ""); + unpacked.meta.name = target_base.to_string(); + let target_path = UnpackedCrate::make_path(&unpacked.tempdir, &unpacked.meta, ""); + fs::rename(source_path, target_path.clone()) + .map_err(|_| "Failed to rename the crate folder")?; + + Self::new(unpacked) } } +pub(crate) struct CratePackage(pub(crate) Bytes); + pub(crate) struct UnpackedCrate { meta: PackageMetaData, cksum: String, @@ -193,19 +254,14 @@ pub(crate) struct UnpackedCrate { program_path: String, program_id: Pubkey, keypair: Option, + crate_bytes: CrateTarGz, } -impl From for Result { - fn from(value: CratePackage) -> Self { - let bytes = value.0; - let (meta, offset) = PackageMetaData::new(&bytes)?; - - let (_crate_file_length, length_size) = - PackageMetaData::read_u32_length(&bytes.slice(offset..))?; - let crate_bytes = bytes.slice(offset.saturating_add(length_size)..); - let cksum = format!("{:x}", Sha256::digest(&crate_bytes)); +impl UnpackedCrate { + fn decompress(crate_bytes: CrateTarGz, meta: PackageMetaData) -> Result { + let cksum = format!("{:x}", Sha256::digest(&crate_bytes.0)); - let decoder = GzDecoder::new(crate_bytes.as_ref()); + let decoder = GzDecoder::new(crate_bytes.0.as_ref()); let mut archive = Archive::new(decoder); let tempdir = tempdir()?; @@ -213,10 +269,6 @@ impl From for Result { let lib_name = UnpackedCrate::program_library_name(&tempdir, &meta)?; - let base_path = UnpackedCrate::make_path(&tempdir, &meta, "out"); - fs::create_dir_all(base_path) - .map_err(|_| "Failed to create the base directory for output")?; - let program_path = UnpackedCrate::make_path(&tempdir, &meta, format!("out/{}.so", lib_name)) .into_os_string() @@ -237,8 +289,19 @@ impl From for Result { program_path, program_id: keypair.pubkey(), keypair: Some(keypair), + crate_bytes, }) } + + pub(crate) fn unpack(value: CratePackage) -> Result { + let bytes = value.0; + let (meta, offset) = PackageMetaData::new(&bytes)?; + + let (_crate_file_length, length_size) = + PackageMetaData::read_u32_length(&bytes.slice(offset..))?; + let crate_bytes = CrateTarGz(bytes.slice(offset.saturating_add(length_size)..)); + UnpackedCrate::decompress(crate_bytes, meta) + } } impl UnpackedCrate { @@ -262,36 +325,37 @@ impl UnpackedCrate { } pub(crate) fn fetch_index(id: Pubkey, client: Arc) -> Result { - let (_program, unpacked_crate) = Self::fetch_program(id, client)?; - let mut entry: IndexEntry = unpacked_crate.meta.clone().into(); - - let packed_crate: Result = UnpackedCrate::into(unpacked_crate); - let packed_crate = packed_crate?; - + let (packed_crate, meta) = Self::fetch(id, "0.1.0", client)?; + let mut entry: IndexEntry = meta.into(); entry.cksum = format!("{:x}", Sha256::digest(&packed_crate.0)); Ok(entry) } - pub(crate) fn fetch(id: Pubkey, client: Arc) -> Result { - let (_program, unpacked_crate) = Self::fetch_program(id, client)?; - UnpackedCrate::into(unpacked_crate) - } - - fn fetch_program(id: Pubkey, client: Arc) -> Result<(Program, UnpackedCrate), Error> { - let crate_obj = Self::new_empty(id)?; - let program = Program::from(&crate_obj); + pub(crate) fn fetch( + id: Pubkey, + vers: &str, + client: Arc, + ) -> Result<(CrateTarGz, PackageMetaData), Error> { + let crate_obj = Self::new_empty(id, vers)?; + let mut program = Program::from(&crate_obj); program.dump(client)?; // Decompile the program // Generate a Cargo.toml - Ok((program, crate_obj)) + let meta = crate_obj.meta.clone(); + + if APPEND_CRATE_TO_ELF { + Ok((program.crate_bytes, meta)) + } else { + CrateTarGz::new(crate_obj).map(|file| (file, meta)) + } } - fn new_empty(id: Pubkey) -> Result { + fn new_empty(id: Pubkey, vers: &str) -> Result { let meta = PackageMetaData { - name: hex::encode(id.to_bytes()), - vers: "0.1.0".to_string(), + name: Program::program_id_to_crate_name(id), + vers: vers.to_string(), deps: vec![], features: BTreeMap::new(), authors: vec![], @@ -328,6 +392,7 @@ impl UnpackedCrate { program_path, program_id: id, keypair: None, + crate_bytes: CrateTarGz::default(), }) } @@ -348,4 +413,22 @@ impl UnpackedCrate { .ok_or("Failed to get module name")?; Ok(library_name.to_string()) } + + fn fixup_toml( + tempdir: &TempDir, + cargo_toml_name: &str, + meta: &PackageMetaData, + name: &str, + ) -> Result<(), Error> { + let toml_orig_path = Self::make_path(tempdir, meta, cargo_toml_name); + let toml_content = fs::read_to_string(&toml_orig_path)?; + let mut toml = toml_content.parse::()?; + toml.get_mut("package") + .and_then(|v| v.get_mut("name")) + .map(|v| *v = toml::Value::String(name.to_string())) + .ok_or("Failed to set package name")?; + + fs::write(toml_orig_path, toml.to_string())?; + Ok(()) + } } diff --git a/cargo-registry/src/main.rs b/cargo-registry/src/main.rs index 288e7fc9e69388..c98fcb23bd364c 100644 --- a/cargo-registry/src/main.rs +++ b/cargo-registry/src/main.rs @@ -38,7 +38,7 @@ impl CargoRegistryService { match bytes { Ok(data) => { - let Ok(crate_object) = CratePackage(data).into() else { + let Ok(crate_object) = UnpackedCrate::unpack(CratePackage(data)) else { return response_builder::error_response( hyper::StatusCode::INTERNAL_SERVER_ERROR, "Failed to parse the crate information", @@ -83,7 +83,7 @@ impl CargoRegistryService { _request: &hyper::Request, client: Arc, ) -> hyper::Response { - let Some((path, crate_name, _version)) = Self::get_crate_name_and_version(path) else { + let Some((path, crate_name, version)) = Self::get_crate_name_and_version(path) else { return response_builder::error_in_parsing(); }; @@ -92,10 +92,10 @@ impl CargoRegistryService { } let package = Program::crate_name_to_program_id(crate_name) - .and_then(|id| UnpackedCrate::fetch(id, client).ok()); + .and_then(|id| UnpackedCrate::fetch(id, version, client).ok()); // Return the package to the caller in the response - if let Some(package) = package { + if let Some((package, _meta)) = package { response_builder::success_response_bytes(package.0) } else { response_builder::error_response( From 3e21361534b9a7900f1be021e9f9f79e2bce83c7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Oct 2023 17:43:58 +0000 Subject: [PATCH 04/98] build(deps): bump rustls from 0.21.7 to 0.21.8 (#33855) * build(deps): bump rustls from 0.21.7 to 0.21.8 Bumps [rustls](https://github.com/rustls/rustls) from 0.21.7 to 0.21.8. - [Release notes](https://github.com/rustls/rustls/releases) - [Commits](https://github.com/rustls/rustls/compare/v/0.21.7...v/0.21.8) --- updated-dependencies: - dependency-name: rustls dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 44 ++++++++++++++++++++++++++++++----------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 44 ++++++++++++++++++++++++++++++----------- 3 files changed, 65 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2f33fe8a6b8b6f..d1ba79b8b79e50 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4125,7 +4125,7 @@ checksum = "2c78e758510582acc40acb90458401172d41f1016f8c9dde89e49677afb7eec1" dependencies = [ "bytes", "rand 0.8.5", - "ring", + "ring 0.16.20", "rustc-hash", "rustls", "rustls-native-certs", @@ -4311,7 +4311,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", - "ring", + "ring 0.16.20", "time", "yasna", ] @@ -4464,11 +4464,25 @@ dependencies = [ "libc", "once_cell", "spin 0.5.2", - "untrusted", + "untrusted 0.7.1", "web-sys", "winapi 0.3.9", ] +[[package]] +name = "ring" +version = "0.17.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9babe80d5c16becf6594aa32ad2be8fe08498e7ae60b77de8df700e67f191d7e" +dependencies = [ + "cc", + "getrandom 0.2.10", + "libc", + "spin 0.9.2", + "untrusted 0.9.0", + "windows-sys 0.48.0", +] + [[package]] name = "rocksdb" version = "0.21.0" @@ -4563,12 +4577,12 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.7" +version = "0.21.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" +checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" dependencies = [ "log", - "ring", + "ring 0.17.3", "rustls-webpki", "sct", ] @@ -4605,12 +4619,12 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.101.4" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring", - "untrusted", + "ring 0.17.3", + "untrusted 0.9.0", ] [[package]] @@ -4688,8 +4702,8 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -8711,6 +8725,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "uriparse" version = "0.6.4" diff --git a/Cargo.toml b/Cargo.toml index 6a95693e26bc31..673a8439f21412 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -283,7 +283,7 @@ reqwest = { version = "0.11.22", default-features = false } rolling-file = "0.2.0" rpassword = "7.2" rustc_version = "0.4" -rustls = { version = "0.21.7", default-features = false, features = ["quic"] } +rustls = { version = "0.21.8", default-features = false, features = ["quic"] } rustversion = "1.0.14" scopeguard = "1.2.0" semver = "1.0.20" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 1b789fb7f72299..d19c9c0e5be045 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3630,7 +3630,7 @@ checksum = "2c78e758510582acc40acb90458401172d41f1016f8c9dde89e49677afb7eec1" dependencies = [ "bytes", "rand 0.8.5", - "ring", + "ring 0.16.20", "rustc-hash", "rustls", "rustls-native-certs", @@ -3769,7 +3769,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", - "ring", + "ring 0.16.20", "time", "yasna", ] @@ -3907,11 +3907,25 @@ dependencies = [ "libc", "once_cell", "spin 0.5.2", - "untrusted", + "untrusted 0.7.1", "web-sys", "winapi 0.3.9", ] +[[package]] +name = "ring" +version = "0.17.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9babe80d5c16becf6594aa32ad2be8fe08498e7ae60b77de8df700e67f191d7e" +dependencies = [ + "cc", + "getrandom 0.2.10", + "libc", + "spin 0.9.3", + "untrusted 0.9.0", + "windows-sys 0.48.0", +] + [[package]] name = "rocksdb" version = "0.21.0" @@ -3997,12 +4011,12 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.7" +version = "0.21.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" +checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" dependencies = [ "log", - "ring", + "ring 0.17.3", "rustls-webpki", "sct", ] @@ -4039,12 +4053,12 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.101.4" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring", - "untrusted", + "ring 0.17.3", + "untrusted 0.9.0", ] [[package]] @@ -4110,8 +4124,8 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -7515,6 +7529,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "uriparse" version = "0.6.4" From a851670d54aab54b24d097d920c1314613457cca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Oct 2023 17:44:18 +0000 Subject: [PATCH 05/98] build(deps): bump bytecount from 0.6.4 to 0.6.7 (#33857) Bumps [bytecount](https://github.com/llogiq/bytecount) from 0.6.4 to 0.6.7. - [Commits](https://github.com/llogiq/bytecount/commits) --- updated-dependencies: - dependency-name: bytecount dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d1ba79b8b79e50..9b4798f85716f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -864,9 +864,9 @@ dependencies = [ [[package]] name = "bytecount" -version = "0.6.4" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad152d03a2c813c80bb94fedbf3a3f02b28f793e39e7c214c8a0bcc196343de7" +checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205" [[package]] name = "bytemuck" diff --git a/Cargo.toml b/Cargo.toml index 673a8439f21412..3cd5ee29800d36 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -157,7 +157,7 @@ borsh = "0.10.3" bs58 = "0.4.0" bv = "0.11.1" byte-unit = "4.0.19" -bytecount = "0.6.4" +bytecount = "0.6.7" bytemuck = "1.14.0" byteorder = "1.5.0" bytes = "1.5" From e555a61c78e92e40eae9e883e5c72149b9ccfef5 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 25 Oct 2023 18:59:14 +0000 Subject: [PATCH 06/98] adds metrics to repair QUIC endpoint (#33818) --- core/src/repair/quic_endpoint.rs | 332 +++++++++++++++++++++++++++++-- 1 file changed, 311 insertions(+), 21 deletions(-) diff --git a/core/src/repair/quic_endpoint.rs b/core/src/repair/quic_endpoint.rs index 031de37f94b5a0..2c5e954a626c74 100644 --- a/core/src/repair/quic_endpoint.rs +++ b/core/src/repair/quic_endpoint.rs @@ -6,8 +6,8 @@ use { log::error, quinn::{ ClientConfig, ConnectError, Connecting, Connection, ConnectionError, Endpoint, - EndpointConfig, ReadToEndError, RecvStream, SendStream, ServerConfig, TokioRuntime, - TransportConfig, VarInt, WriteError, + EndpointConfig, ReadError, ReadToEndError, RecvStream, SendStream, ServerConfig, + TokioRuntime, TransportConfig, VarInt, WriteError, }, rcgen::RcgenError, rustls::{Certificate, PrivateKey}, @@ -24,7 +24,7 @@ use { io::{Cursor, Error as IoError}, net::{IpAddr, SocketAddr, UdpSocket}, sync::{ - atomic::{AtomicBool, Ordering}, + atomic::{AtomicBool, AtomicU64, Ordering}, Arc, RwLock, }, time::Duration, @@ -82,16 +82,14 @@ pub struct RemoteRequest { #[derive(Error, Debug)] #[allow(clippy::enum_variant_names)] pub(crate) enum Error { - #[error(transparent)] - BincodeError(#[from] bincode::Error), #[error(transparent)] CertificateError(#[from] RcgenError), + #[error("Channel Send Error")] + ChannelSendError, #[error(transparent)] ConnectError(#[from] ConnectError), #[error(transparent)] ConnectionError(#[from] ConnectionError), - #[error("Channel Send Error")] - ChannelSendError, #[error("Invalid Identity: {0:?}")] InvalidIdentity(SocketAddr), #[error(transparent)] @@ -103,9 +101,15 @@ pub(crate) enum Error { #[error("read_to_end Timeout")] ReadToEndTimeout, #[error(transparent)] - WriteError(#[from] WriteError), - #[error(transparent)] TlsError(#[from] rustls::Error), + #[error(transparent)] + WriteError(#[from] WriteError), +} + +macro_rules! add_metric { + ($metric: expr) => {{ + $metric.fetch_add(1, Ordering::Relaxed); + }}; } #[allow(clippy::type_complexity)] @@ -207,8 +211,11 @@ async fn run_server( router: Arc>>>, cache: Arc>>, ) { + let stats = Arc::::default(); + let report_metrics_task = + tokio::task::spawn(report_metrics_task("repair_quic_server", stats.clone())); while let Some(connecting) = endpoint.accept().await { - tokio::task::spawn(handle_connecting_error( + tokio::task::spawn(handle_connecting_task( endpoint.clone(), connecting, remote_request_sender.clone(), @@ -216,8 +223,10 @@ async fn run_server( prune_cache_pending.clone(), router.clone(), cache.clone(), + stats.clone(), )); } + report_metrics_task.abort(); } async fn run_client( @@ -229,14 +238,17 @@ async fn run_client( router: Arc>>>, cache: Arc>>, ) { + let stats = Arc::::default(); + let report_metrics_task = + tokio::task::spawn(report_metrics_task("repair_quic_client", stats.clone())); while let Some(request) = receiver.recv().await { - let Some(request) = try_route_request(request, &*router.read().await) else { + let Some(request) = try_route_request(request, &*router.read().await, &stats) else { continue; }; let remote_address = request.remote_address; let receiver = { let mut router = router.write().await; - let Some(request) = try_route_request(request, &router) else { + let Some(request) = try_route_request(request, &router, &stats) else { continue; }; let (sender, receiver) = tokio::sync::mpsc::channel(ROUTER_CHANNEL_BUFFER); @@ -253,11 +265,13 @@ async fn run_client( prune_cache_pending.clone(), router.clone(), cache.clone(), + stats.clone(), )); } close_quic_endpoint(&endpoint); // Drop sender channels to unblock threads waiting on the receiving end. router.write().await.clear(); + report_metrics_task.abort(); } // Routes the local request to respective channel. Drops the request if the @@ -266,13 +280,15 @@ async fn run_client( fn try_route_request( request: LocalRequest, router: &HashMap>, + stats: &RepairQuicStats, ) -> Option { match router.get(&request.remote_address) { None => Some(request), Some(sender) => match sender.try_send(request) { Ok(()) => None, Err(TrySendError::Full(request)) => { - error!("TrySendError::Full {}", request.remote_address); + debug!("TrySendError::Full {}", request.remote_address); + add_metric!(stats.router_try_send_error_full); None } Err(TrySendError::Closed(request)) => Some(request), @@ -280,7 +296,7 @@ fn try_route_request( } } -async fn handle_connecting_error( +async fn handle_connecting_task( endpoint: Endpoint, connecting: Connecting, remote_request_sender: Sender, @@ -288,6 +304,7 @@ async fn handle_connecting_error( prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, + stats: Arc, ) { if let Err(err) = handle_connecting( endpoint, @@ -297,10 +314,12 @@ async fn handle_connecting_error( prune_cache_pending, router, cache, + stats.clone(), ) .await { - error!("handle_connecting: {err:?}"); + debug!("handle_connecting: {err:?}"); + record_error(&err, &stats); } } @@ -312,6 +331,7 @@ async fn handle_connecting( prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, + stats: Arc, ) -> Result<(), Error> { let connection = connecting.await?; let remote_address = connection.remote_address(); @@ -332,6 +352,7 @@ async fn handle_connecting( prune_cache_pending, router, cache, + stats, ) .await; Ok(()) @@ -349,6 +370,7 @@ async fn handle_connection( prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, + stats: Arc, ) { cache_connection( remote_pubkey, @@ -361,8 +383,10 @@ async fn handle_connection( .await; let send_requests_task = tokio::task::spawn(send_requests_task( endpoint.clone(), + remote_address, connection.clone(), receiver, + stats.clone(), )); let recv_requests_task = tokio::task::spawn(recv_requests_task( endpoint, @@ -370,11 +394,13 @@ async fn handle_connection( remote_pubkey, connection.clone(), remote_request_sender, + stats.clone(), )); match futures::future::try_join(send_requests_task, recv_requests_task).await { Err(err) => error!("handle_connection: {remote_pubkey}, {remote_address}, {err:?}"), - Ok(((), Err(ref err))) => { - error!("recv_requests_task: {remote_pubkey}, {remote_address}, {err:?}"); + Ok(((), Err(err))) => { + debug!("recv_requests_task: {remote_pubkey}, {remote_address}, {err:?}"); + record_error(&err, &stats); } Ok(((), Ok(()))) => (), } @@ -392,6 +418,7 @@ async fn recv_requests_task( remote_pubkey: Pubkey, connection: Connection, remote_request_sender: Sender, + stats: Arc, ) -> Result<(), Error> { loop { let (send_stream, recv_stream) = connection.accept_bi().await?; @@ -402,6 +429,7 @@ async fn recv_requests_task( send_stream, recv_stream, remote_request_sender.clone(), + stats.clone(), )); } } @@ -413,6 +441,7 @@ async fn handle_streams_task( send_stream: SendStream, recv_stream: RecvStream, remote_request_sender: Sender, + stats: Arc, ) { if let Err(err) = handle_streams( &endpoint, @@ -424,7 +453,8 @@ async fn handle_streams_task( ) .await { - error!("handle_stream: {remote_address}, {remote_pubkey}, {err:?}"); + debug!("handle_stream: {remote_address}, {remote_pubkey}, {err:?}"); + record_error(&err, &stats); } } @@ -469,21 +499,32 @@ async fn handle_streams( async fn send_requests_task( endpoint: Endpoint, + remote_address: SocketAddr, connection: Connection, mut receiver: AsyncReceiver, + stats: Arc, ) { while let Some(request) = receiver.recv().await { tokio::task::spawn(send_request_task( endpoint.clone(), + remote_address, connection.clone(), request, + stats.clone(), )); } } -async fn send_request_task(endpoint: Endpoint, connection: Connection, request: LocalRequest) { +async fn send_request_task( + endpoint: Endpoint, + remote_address: SocketAddr, + connection: Connection, + request: LocalRequest, + stats: Arc, +) { if let Err(err) = send_request(endpoint, connection, request).await { - error!("send_request: {err:?}") + debug!("send_request: {remote_address}, {err:?}"); + record_error(&err, &stats); } } @@ -542,6 +583,7 @@ async fn make_connection_task( prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, + stats: Arc, ) { if let Err(err) = make_connection( endpoint, @@ -552,10 +594,12 @@ async fn make_connection_task( prune_cache_pending, router, cache, + stats.clone(), ) .await { - error!("make_connection: {remote_address}, {err:?}"); + debug!("make_connection: {remote_address}, {err:?}"); + record_error(&err, &stats); } } @@ -568,6 +612,7 @@ async fn make_connection( prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, + stats: Arc, ) -> Result<(), Error> { let connection = endpoint .connect(remote_address, CONNECT_SERVER_NAME)? @@ -583,6 +628,7 @@ async fn make_connection( prune_cache_pending, router, cache, + stats, ) .await; Ok(()) @@ -698,6 +744,250 @@ impl From> for Error { } } +#[derive(Default)] +struct RepairQuicStats { + connect_error_invalid_remote_address: AtomicU64, + connect_error_other: AtomicU64, + connect_error_too_many_connections: AtomicU64, + connection_error_application_closed: AtomicU64, + connection_error_connection_closed: AtomicU64, + connection_error_locally_closed: AtomicU64, + connection_error_reset: AtomicU64, + connection_error_timed_out: AtomicU64, + connection_error_transport_error: AtomicU64, + connection_error_version_mismatch: AtomicU64, + invalid_identity: AtomicU64, + no_response_received: AtomicU64, + read_to_end_error_connection_lost: AtomicU64, + read_to_end_error_illegal_ordered_read: AtomicU64, + read_to_end_error_reset: AtomicU64, + read_to_end_error_too_long: AtomicU64, + read_to_end_error_unknown_stream: AtomicU64, + read_to_end_error_zero_rtt_rejected: AtomicU64, + read_to_end_timeout: AtomicU64, + router_try_send_error_full: AtomicU64, + write_error_connection_lost: AtomicU64, + write_error_stopped: AtomicU64, + write_error_unknown_stream: AtomicU64, + write_error_zero_rtt_rejected: AtomicU64, +} + +async fn report_metrics_task(name: &'static str, stats: Arc) { + const METRICS_SUBMIT_CADENCE: Duration = Duration::from_secs(2); + loop { + tokio::time::sleep(METRICS_SUBMIT_CADENCE).await; + report_metrics(name, &stats); + } +} + +fn record_error(err: &Error, stats: &RepairQuicStats) { + match err { + Error::CertificateError(_) => (), + Error::ChannelSendError => (), + Error::ConnectError(ConnectError::EndpointStopping) => { + add_metric!(stats.connect_error_other) + } + Error::ConnectError(ConnectError::TooManyConnections) => { + add_metric!(stats.connect_error_too_many_connections) + } + Error::ConnectError(ConnectError::InvalidDnsName(_)) => { + add_metric!(stats.connect_error_other) + } + Error::ConnectError(ConnectError::InvalidRemoteAddress(_)) => { + add_metric!(stats.connect_error_invalid_remote_address) + } + Error::ConnectError(ConnectError::NoDefaultClientConfig) => { + add_metric!(stats.connect_error_other) + } + Error::ConnectError(ConnectError::UnsupportedVersion) => { + add_metric!(stats.connect_error_other) + } + Error::ConnectionError(ConnectionError::VersionMismatch) => { + add_metric!(stats.connection_error_version_mismatch) + } + Error::ConnectionError(ConnectionError::TransportError(_)) => { + add_metric!(stats.connection_error_transport_error) + } + Error::ConnectionError(ConnectionError::ConnectionClosed(_)) => { + add_metric!(stats.connection_error_connection_closed) + } + Error::ConnectionError(ConnectionError::ApplicationClosed(_)) => { + add_metric!(stats.connection_error_application_closed) + } + Error::ConnectionError(ConnectionError::Reset) => add_metric!(stats.connection_error_reset), + Error::ConnectionError(ConnectionError::TimedOut) => { + add_metric!(stats.connection_error_timed_out) + } + Error::ConnectionError(ConnectionError::LocallyClosed) => { + add_metric!(stats.connection_error_locally_closed) + } + Error::InvalidIdentity(_) => add_metric!(stats.invalid_identity), + Error::IoError(_) => (), + Error::NoResponseReceived => add_metric!(stats.no_response_received), + Error::ReadToEndError(ReadToEndError::Read(ReadError::Reset(_))) => { + add_metric!(stats.read_to_end_error_reset) + } + Error::ReadToEndError(ReadToEndError::Read(ReadError::ConnectionLost(_))) => { + add_metric!(stats.read_to_end_error_connection_lost) + } + Error::ReadToEndError(ReadToEndError::Read(ReadError::UnknownStream)) => { + add_metric!(stats.read_to_end_error_unknown_stream) + } + Error::ReadToEndError(ReadToEndError::Read(ReadError::IllegalOrderedRead)) => { + add_metric!(stats.read_to_end_error_illegal_ordered_read) + } + Error::ReadToEndError(ReadToEndError::Read(ReadError::ZeroRttRejected)) => { + add_metric!(stats.read_to_end_error_zero_rtt_rejected) + } + Error::ReadToEndError(ReadToEndError::TooLong) => { + add_metric!(stats.read_to_end_error_too_long) + } + Error::ReadToEndTimeout => add_metric!(stats.read_to_end_timeout), + Error::TlsError(_) => (), + Error::WriteError(WriteError::Stopped(_)) => add_metric!(stats.write_error_stopped), + Error::WriteError(WriteError::ConnectionLost(_)) => { + add_metric!(stats.write_error_connection_lost) + } + Error::WriteError(WriteError::UnknownStream) => { + add_metric!(stats.write_error_unknown_stream) + } + Error::WriteError(WriteError::ZeroRttRejected) => { + add_metric!(stats.write_error_zero_rtt_rejected) + } + } +} + +fn report_metrics(name: &'static str, stats: &RepairQuicStats) { + macro_rules! reset_metric { + ($metric: expr) => { + $metric.swap(0, Ordering::Relaxed) + }; + } + datapoint_info!( + name, + ( + "connect_error_invalid_remote_address", + reset_metric!(stats.connect_error_invalid_remote_address), + i64 + ), + ( + "connect_error_other", + reset_metric!(stats.connect_error_other), + i64 + ), + ( + "connect_error_too_many_connections", + reset_metric!(stats.connect_error_too_many_connections), + i64 + ), + ( + "connection_error_application_closed", + reset_metric!(stats.connection_error_application_closed), + i64 + ), + ( + "connection_error_connection_closed", + reset_metric!(stats.connection_error_connection_closed), + i64 + ), + ( + "connection_error_locally_closed", + reset_metric!(stats.connection_error_locally_closed), + i64 + ), + ( + "connection_error_reset", + reset_metric!(stats.connection_error_reset), + i64 + ), + ( + "connection_error_timed_out", + reset_metric!(stats.connection_error_timed_out), + i64 + ), + ( + "connection_error_transport_error", + reset_metric!(stats.connection_error_transport_error), + i64 + ), + ( + "connection_error_version_mismatch", + reset_metric!(stats.connection_error_version_mismatch), + i64 + ), + ( + "invalid_identity", + reset_metric!(stats.invalid_identity), + i64 + ), + ( + "no_response_received", + reset_metric!(stats.no_response_received), + i64 + ), + ( + "read_to_end_error_connection_lost", + reset_metric!(stats.read_to_end_error_connection_lost), + i64 + ), + ( + "read_to_end_error_illegal_ordered_read", + reset_metric!(stats.read_to_end_error_illegal_ordered_read), + i64 + ), + ( + "read_to_end_error_reset", + reset_metric!(stats.read_to_end_error_reset), + i64 + ), + ( + "read_to_end_error_too_long", + reset_metric!(stats.read_to_end_error_too_long), + i64 + ), + ( + "read_to_end_error_unknown_stream", + reset_metric!(stats.read_to_end_error_unknown_stream), + i64 + ), + ( + "read_to_end_error_zero_rtt_rejected", + reset_metric!(stats.read_to_end_error_zero_rtt_rejected), + i64 + ), + ( + "read_to_end_timeout", + reset_metric!(stats.read_to_end_timeout), + i64 + ), + ( + "router_try_send_error_full", + reset_metric!(stats.router_try_send_error_full), + i64 + ), + ( + "write_error_connection_lost", + reset_metric!(stats.write_error_connection_lost), + i64 + ), + ( + "write_error_stopped", + reset_metric!(stats.write_error_stopped), + i64 + ), + ( + "write_error_unknown_stream", + reset_metric!(stats.write_error_unknown_stream), + i64 + ), + ( + "write_error_zero_rtt_rejected", + reset_metric!(stats.write_error_zero_rtt_rejected), + i64 + ), + ); +} + #[cfg(test)] mod tests { use { From 70107e2196c12ef46d73788c55502ffd3fe2080e Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Wed, 25 Oct 2023 12:27:57 -0700 Subject: [PATCH 07/98] Retrieve crate version from cargo-registry package (#33867) --- cargo-registry/src/crate_handler.rs | 33 +++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/cargo-registry/src/crate_handler.rs b/cargo-registry/src/crate_handler.rs index c459279cbce043..043c69252c4e17 100644 --- a/cargo-registry/src/crate_handler.rs +++ b/cargo-registry/src/crate_handler.rs @@ -243,6 +243,33 @@ impl CrateTarGz { Self::new(unpacked) } + + fn version(&self) -> String { + let decoder = GzDecoder::new(self.0.as_ref()); + let mut archive = Archive::new(decoder); + + if let Some(Ok(entry)) = archive + .entries() + .ok() + .and_then(|mut entries| entries.nth(0)) + { + if let Ok(path) = entry.path() { + if let Some(path_str) = path.to_str() { + if let Some((_, vers)) = path_str.rsplit_once('-') { + let mut version = vers.to_string(); + // Removing trailing '/' + if version.ends_with('/') { + version.pop(); + } + return version; + } + } + } + } + + // Placeholder version. + "0.1.0".to_string() + } } pub(crate) struct CratePackage(pub(crate) Bytes); @@ -325,7 +352,7 @@ impl UnpackedCrate { } pub(crate) fn fetch_index(id: Pubkey, client: Arc) -> Result { - let (packed_crate, meta) = Self::fetch(id, "0.1.0", client)?; + let (packed_crate, meta) = Self::fetch(id, "", client)?; let mut entry: IndexEntry = meta.into(); entry.cksum = format!("{:x}", Sha256::digest(&packed_crate.0)); Ok(entry) @@ -343,9 +370,11 @@ impl UnpackedCrate { // Decompile the program // Generate a Cargo.toml - let meta = crate_obj.meta.clone(); + let mut meta = crate_obj.meta.clone(); if APPEND_CRATE_TO_ELF { + let version = program.crate_bytes.version(); + meta.vers = version; Ok((program.crate_bytes, meta)) } else { CrateTarGz::new(crate_obj).map(|file| (file, meta)) From a799a90a62958dcf91080671eb0bc70862f60433 Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 26 Oct 2023 10:34:07 +0200 Subject: [PATCH 08/98] Update upload_confirmed_blocks() return value when no blocks to upload (#33861) upload_confirmed_blocks() states that it will return the passed in ending_slot when there are no blocks to upload. This is enforced in one early return but not the other. The result is that BigTableUploadService could potentially get stuck in a loop of trying to upload the same slot. While this case seems to be caused when an operator restarts their node without --no-snapshot-fetch (which can cause a gap in blockstore), we can still be friendly and allow them to break out of this loop. --- ledger/src/bigtable_upload.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ledger/src/bigtable_upload.rs b/ledger/src/bigtable_upload.rs index 3db5f8eebbe863..be28ee8a0703d8 100644 --- a/ledger/src/bigtable_upload.rs +++ b/ledger/src/bigtable_upload.rs @@ -138,7 +138,7 @@ pub async fn upload_confirmed_blocks( "No blocks between {} and {} need to be uploaded to bigtable", starting_slot, ending_slot ); - return Ok(last_blockstore_slot); + return Ok(ending_slot); } let last_slot = *blocks_to_upload.last().unwrap(); info!( From 22503f0ae9f9de9071f05d9049d3de1bf9c88dff Mon Sep 17 00:00:00 2001 From: Tyera Date: Thu, 26 Oct 2023 09:21:20 -0600 Subject: [PATCH 09/98] BigtableUploadService: increment start_slot to prevent rechecks (#33870) Increment start_slot --- ledger/src/bigtable_upload_service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ledger/src/bigtable_upload_service.rs b/ledger/src/bigtable_upload_service.rs index 3149eb96a32d8e..0ffb02aac2475c 100644 --- a/ledger/src/bigtable_upload_service.rs +++ b/ledger/src/bigtable_upload_service.rs @@ -117,7 +117,7 @@ impl BigTableUploadService { )); match result { - Ok(last_slot_uploaded) => start_slot = last_slot_uploaded, + Ok(last_slot_uploaded) => start_slot = last_slot_uploaded.saturating_add(1), Err(err) => { warn!("bigtable: upload_confirmed_blocks: {}", err); std::thread::sleep(std::time::Duration::from_secs(2)); From 7048e72d81e47218eddbd47cd5efc20f9655a373 Mon Sep 17 00:00:00 2001 From: Tyera Date: Thu, 26 Oct 2023 11:38:58 -0600 Subject: [PATCH 10/98] Blockstore: only return block times for rooted slots (#33871) * Add Blockstore::get_rooted_block_time method and use in RPC * Un-pub get_block_time --- ledger/src/blockstore.rs | 15 +++++++++++++-- rpc/src/rpc.rs | 2 +- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index ce9336e1132192..e1893c1033998a 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -1962,12 +1962,23 @@ impl Blockstore { } } - pub fn get_block_time(&self, slot: Slot) -> Result> { - datapoint_info!("blockstore-rpc-api", ("method", "get_block_time", String)); + fn get_block_time(&self, slot: Slot) -> Result> { let _lock = self.check_lowest_cleanup_slot(slot)?; self.blocktime_cf.get(slot) } + pub fn get_rooted_block_time(&self, slot: Slot) -> Result> { + datapoint_info!( + "blockstore-rpc-api", + ("method", "get_rooted_block_time", String) + ); + let _lock = self.check_lowest_cleanup_slot(slot)?; + if self.is_root(slot) { + return self.blocktime_cf.get(slot); + } + Err(BlockstoreError::SlotNotRooted) + } + pub fn cache_block_time(&self, slot: Slot, timestamp: UnixTimestamp) -> Result<()> { self.blocktime_cf.put(slot, ×tamp) } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index a36ec712ebfeb8..38f76dc019259c 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -1321,7 +1321,7 @@ impl JsonRpcRequestProcessor { .unwrap() .highest_super_majority_root() { - let result = self.blockstore.get_block_time(slot); + let result = self.blockstore.get_rooted_block_time(slot); self.check_blockstore_root(&result, slot)?; if result.is_err() || matches!(result, Ok(None)) { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { From 2a5ec4acf8579b37c24c1bd8024a4c504b25eeae Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Thu, 26 Oct 2023 12:58:20 -0700 Subject: [PATCH 11/98] Cleanup cargo registry code (#33879) --- cargo-registry/src/crate_handler.rs | 54 +++++++++++++---------------- cargo-registry/src/main.rs | 7 ++-- 2 files changed, 28 insertions(+), 33 deletions(-) diff --git a/cargo-registry/src/crate_handler.rs b/cargo-registry/src/crate_handler.rs index 043c69252c4e17..d1d013314eed95 100644 --- a/cargo-registry/src/crate_handler.rs +++ b/cargo-registry/src/crate_handler.rs @@ -102,7 +102,7 @@ pub(crate) struct Program { id: Pubkey, _tempdir: Arc, meta: PackageMetaData, - crate_bytes: CrateTarGz, + packed_crate: PackedCrate, } impl Program { @@ -117,7 +117,7 @@ impl Program { if APPEND_CRATE_TO_ELF { let program_id_str = Program::program_id_to_crate_name(self.id); let crate_tar_gz = - CrateTarGz::new_rebased(&self.crate_bytes, &self.meta, &program_id_str)?; + PackedCrate::new_rebased(&self.packed_crate, &self.meta, &program_id_str)?; let crate_len = u32::to_le_bytes(crate_tar_gz.0.len() as u32); program_data.extend_from_slice(&crate_tar_gz.0); program_data.extend_from_slice(&crate_len); @@ -175,8 +175,7 @@ impl Program { .saturating_sub(length as usize); let crate_end = data_len.saturating_sub(sizeof_length); - let crate_bytes = CrateTarGz(Bytes::copy_from_slice(&data[crate_start..crate_end])); - self.crate_bytes = crate_bytes; + self.packed_crate = PackedCrate(Bytes::copy_from_slice(&data[crate_start..crate_end])); } Ok(()) } @@ -200,15 +199,16 @@ impl From<&UnpackedCrate> for Program { id: value.program_id, _tempdir: value.tempdir.clone(), meta: value.meta.clone(), - crate_bytes: value.crate_bytes.clone(), + packed_crate: value.packed_crate.clone(), } } } +/// Contents of a .crate file #[derive(Clone, Default)] -pub(crate) struct CrateTarGz(pub(crate) Bytes); +pub(crate) struct PackedCrate(pub(crate) Bytes); -impl CrateTarGz { +impl PackedCrate { fn new(value: UnpackedCrate) -> Result { let mut archive = Builder::new(Vec::new()); archive.mode(HeaderMode::Deterministic); @@ -225,7 +225,7 @@ impl CrateTarGz { let mut zipped_data = Vec::new(); encoder.read_to_end(&mut zipped_data)?; - Ok(CrateTarGz(Bytes::from(zipped_data))) + Ok(PackedCrate(Bytes::from(zipped_data))) } fn new_rebased(&self, meta: &PackageMetaData, target_base: &str) -> Result { @@ -272,8 +272,6 @@ impl CrateTarGz { } } -pub(crate) struct CratePackage(pub(crate) Bytes); - pub(crate) struct UnpackedCrate { meta: PackageMetaData, cksum: String, @@ -281,14 +279,14 @@ pub(crate) struct UnpackedCrate { program_path: String, program_id: Pubkey, keypair: Option, - crate_bytes: CrateTarGz, + packed_crate: PackedCrate, } impl UnpackedCrate { - fn decompress(crate_bytes: CrateTarGz, meta: PackageMetaData) -> Result { - let cksum = format!("{:x}", Sha256::digest(&crate_bytes.0)); + fn decompress(packed_crate: PackedCrate, meta: PackageMetaData) -> Result { + let cksum = format!("{:x}", Sha256::digest(&packed_crate.0)); - let decoder = GzDecoder::new(crate_bytes.0.as_ref()); + let decoder = GzDecoder::new(packed_crate.0.as_ref()); let mut archive = Archive::new(decoder); let tempdir = tempdir()?; @@ -316,22 +314,19 @@ impl UnpackedCrate { program_path, program_id: keypair.pubkey(), keypair: Some(keypair), - crate_bytes, + packed_crate, }) } - pub(crate) fn unpack(value: CratePackage) -> Result { - let bytes = value.0; + pub(crate) fn new(bytes: Bytes) -> Result { let (meta, offset) = PackageMetaData::new(&bytes)?; let (_crate_file_length, length_size) = PackageMetaData::read_u32_length(&bytes.slice(offset..))?; - let crate_bytes = CrateTarGz(bytes.slice(offset.saturating_add(length_size)..)); - UnpackedCrate::decompress(crate_bytes, meta) + let packed_crate = PackedCrate(bytes.slice(offset.saturating_add(length_size)..)); + UnpackedCrate::decompress(packed_crate, meta) } -} -impl UnpackedCrate { pub(crate) fn publish( &self, client: Arc, @@ -362,22 +357,21 @@ impl UnpackedCrate { id: Pubkey, vers: &str, client: Arc, - ) -> Result<(CrateTarGz, PackageMetaData), Error> { - let crate_obj = Self::new_empty(id, vers)?; - let mut program = Program::from(&crate_obj); + ) -> Result<(PackedCrate, PackageMetaData), Error> { + let unpacked = Self::new_empty(id, vers)?; + let mut program = Program::from(&unpacked); program.dump(client)?; // Decompile the program // Generate a Cargo.toml - let mut meta = crate_obj.meta.clone(); + let mut meta = unpacked.meta.clone(); if APPEND_CRATE_TO_ELF { - let version = program.crate_bytes.version(); - meta.vers = version; - Ok((program.crate_bytes, meta)) + meta.vers = program.packed_crate.version(); + Ok((program.packed_crate, meta)) } else { - CrateTarGz::new(crate_obj).map(|file| (file, meta)) + PackedCrate::new(unpacked).map(|file| (file, meta)) } } @@ -421,7 +415,7 @@ impl UnpackedCrate { program_path, program_id: id, keypair: None, - crate_bytes: CrateTarGz::default(), + packed_crate: PackedCrate::default(), }) } diff --git a/cargo-registry/src/main.rs b/cargo-registry/src/main.rs index c98fcb23bd364c..0bfc2c7f3ff004 100644 --- a/cargo-registry/src/main.rs +++ b/cargo-registry/src/main.rs @@ -2,7 +2,7 @@ use { crate::{ client::Client, - crate_handler::{CratePackage, Error, Program, UnpackedCrate}, + crate_handler::{Error, Program, UnpackedCrate}, sparse_index::RegistryIndex, }, hyper::{ @@ -38,14 +38,15 @@ impl CargoRegistryService { match bytes { Ok(data) => { - let Ok(crate_object) = UnpackedCrate::unpack(CratePackage(data)) else { + let Ok(unpacked_crate) = UnpackedCrate::new(data) else { return response_builder::error_response( hyper::StatusCode::INTERNAL_SERVER_ERROR, "Failed to parse the crate information", ); }; let Ok(result) = - tokio::task::spawn_blocking(move || crate_object.publish(client, index)).await + tokio::task::spawn_blocking(move || unpacked_crate.publish(client, index)) + .await else { return response_builder::error_response( hyper::StatusCode::INTERNAL_SERVER_ERROR, From ba112a021a471c123817d90b99b206a47c6217c0 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 27 Oct 2023 09:30:51 +0800 Subject: [PATCH 12/98] TransactionScheduler: SchedulerController (#33825) --- .../immutable_deserialized_packet.rs | 4 + .../transaction_scheduler/mod.rs | 2 + .../scheduler_controller.rs | 630 ++++++++++++++++++ runtime/src/transaction_priority_details.rs | 2 +- 4 files changed, 637 insertions(+), 1 deletion(-) create mode 100644 core/src/banking_stage/transaction_scheduler/scheduler_controller.rs diff --git a/core/src/banking_stage/immutable_deserialized_packet.rs b/core/src/banking_stage/immutable_deserialized_packet.rs index 4617702059b202..8a9d82e32a38c0 100644 --- a/core/src/banking_stage/immutable_deserialized_packet.rs +++ b/core/src/banking_stage/immutable_deserialized_packet.rs @@ -96,6 +96,10 @@ impl ImmutableDeserializedPacket { self.priority_details.compute_unit_limit } + pub fn priority_details(&self) -> TransactionPriorityDetails { + self.priority_details.clone() + } + // This function deserializes packets into transactions, computes the blake3 hash of transaction // messages, and verifies secp256k1 instructions. pub fn build_sanitized_transaction( diff --git a/core/src/banking_stage/transaction_scheduler/mod.rs b/core/src/banking_stage/transaction_scheduler/mod.rs index bf6f761baca88c..0b65dce06a48fc 100644 --- a/core/src/banking_stage/transaction_scheduler/mod.rs +++ b/core/src/banking_stage/transaction_scheduler/mod.rs @@ -12,6 +12,8 @@ mod batch_id_generator; mod in_flight_tracker; #[allow(dead_code)] mod prio_graph_scheduler; +#[allow(dead_code)] +mod scheduler_controller; mod scheduler_error; #[allow(dead_code)] mod transaction_id_generator; diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs new file mode 100644 index 00000000000000..8c1dc4f9172f73 --- /dev/null +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -0,0 +1,630 @@ +//! Control flow for BankingStage's transaction scheduler. +//! + +use { + super::{ + prio_graph_scheduler::PrioGraphScheduler, scheduler_error::SchedulerError, + transaction_id_generator::TransactionIdGenerator, + transaction_state::SanitizedTransactionTTL, + transaction_state_container::TransactionStateContainer, + }, + crate::banking_stage::{ + decision_maker::{BufferedPacketsDecision, DecisionMaker}, + immutable_deserialized_packet::ImmutableDeserializedPacket, + packet_deserializer::PacketDeserializer, + TOTAL_BUFFERED_PACKETS, + }, + crossbeam_channel::RecvTimeoutError, + solana_runtime::bank_forks::BankForks, + std::{ + sync::{Arc, RwLock}, + time::Duration, + }, +}; + +/// Controls packet and transaction flow into scheduler, and scheduling execution. +pub(crate) struct SchedulerController { + /// Decision maker for determining what should be done with transactions. + decision_maker: DecisionMaker, + /// Packet/Transaction ingress. + packet_receiver: PacketDeserializer, + bank_forks: Arc>, + /// Generates unique IDs for incoming transactions. + transaction_id_generator: TransactionIdGenerator, + /// Container for transaction state. + /// Shared resource between `packet_receiver` and `scheduler`. + container: TransactionStateContainer, + /// State for scheduling and communicating with worker threads. + scheduler: PrioGraphScheduler, +} + +impl SchedulerController { + pub fn new( + decision_maker: DecisionMaker, + packet_deserializer: PacketDeserializer, + bank_forks: Arc>, + scheduler: PrioGraphScheduler, + ) -> Self { + Self { + decision_maker, + packet_receiver: packet_deserializer, + bank_forks, + transaction_id_generator: TransactionIdGenerator::default(), + container: TransactionStateContainer::with_capacity(TOTAL_BUFFERED_PACKETS), + scheduler, + } + } + + pub fn run(mut self) -> Result<(), SchedulerError> { + loop { + // BufferedPacketsDecision is shared with legacy BankingStage, which will forward + // packets. Initially, not renaming these decision variants but the actions taken + // are different, since new BankingStage will not forward packets. + // For `Forward` and `ForwardAndHold`, we want to receive packets but will not + // forward them to the next leader. In this case, `ForwardAndHold` is + // indistiguishable from `Hold`. + // + // `Forward` will drop packets from the buffer instead of forwarding. + // During receiving, since packets would be dropped from buffer anyway, we can + // bypass sanitization and buffering and immediately drop the packets. + let decision = self.decision_maker.make_consume_or_forward_decision(); + + self.process_transactions(&decision)?; + self.scheduler.receive_completed(&mut self.container)?; + if !self.receive_packets(&decision) { + break; + } + } + + Ok(()) + } + + /// Process packets based on decision. + fn process_transactions( + &mut self, + decision: &BufferedPacketsDecision, + ) -> Result<(), SchedulerError> { + match decision { + BufferedPacketsDecision::Consume(_bank_start) => { + let _num_scheduled = self.scheduler.schedule(&mut self.container)?; + } + BufferedPacketsDecision::Forward => { + self.clear_container(); + } + BufferedPacketsDecision::ForwardAndHold | BufferedPacketsDecision::Hold => {} + } + + Ok(()) + } + + /// Clears the transaction state container. + /// This only clears pending transactions, and does **not** clear in-flight transactions. + fn clear_container(&mut self) { + while let Some(id) = self.container.pop() { + self.container.remove_by_id(&id.id); + } + } + + /// Returns whether the packet receiver is still connected. + fn receive_packets(&mut self, decision: &BufferedPacketsDecision) -> bool { + let remaining_queue_capacity = self.container.remaining_queue_capacity(); + + const MAX_PACKET_RECEIVE_TIME: Duration = Duration::from_millis(100); + let (recv_timeout, should_buffer) = match decision { + BufferedPacketsDecision::Consume(_) => ( + if self.container.is_empty() { + MAX_PACKET_RECEIVE_TIME + } else { + Duration::ZERO + }, + true, + ), + BufferedPacketsDecision::Forward => (MAX_PACKET_RECEIVE_TIME, false), + BufferedPacketsDecision::ForwardAndHold | BufferedPacketsDecision::Hold => { + (MAX_PACKET_RECEIVE_TIME, true) + } + }; + + let received_packet_results = self + .packet_receiver + .receive_packets(recv_timeout, remaining_queue_capacity); + + match (received_packet_results, should_buffer) { + (Ok(receive_packet_results), true) => { + self.buffer_packets(receive_packet_results.deserialized_packets) + } + (Ok(receive_packet_results), false) => drop(receive_packet_results), + (Err(RecvTimeoutError::Timeout), _) => {} + (Err(RecvTimeoutError::Disconnected), _) => return false, + } + + true + } + + fn buffer_packets(&mut self, packets: Vec) { + // Sanitize packets, generate IDs, and insert into the container. + let bank = self.bank_forks.read().unwrap().working_bank(); + let last_slot_in_epoch = bank.epoch_schedule().get_last_slot_in_epoch(bank.epoch()); + let feature_set = &bank.feature_set; + let vote_only = bank.vote_only_bank(); + for packet in packets { + let Some(transaction) = + packet.build_sanitized_transaction(feature_set, vote_only, bank.as_ref()) + else { + continue; + }; + + let transaction_id = self.transaction_id_generator.next(); + let transaction_ttl = SanitizedTransactionTTL { + transaction, + max_age_slot: last_slot_in_epoch, + }; + let transaction_priority_details = packet.priority_details(); + self.container.insert_new_transaction( + transaction_id, + transaction_ttl, + transaction_priority_details, + ); + } + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::{ + banking_stage::{ + consumer::TARGET_NUM_TRANSACTIONS_PER_BATCH, + scheduler_messages::{ConsumeWork, FinishedConsumeWork, TransactionBatchId}, + tests::create_slow_genesis_config, + }, + banking_trace::BankingPacketBatch, + sigverify::SigverifyTracerPacketStats, + }, + crossbeam_channel::{unbounded, Receiver, Sender}, + itertools::Itertools, + solana_ledger::{ + blockstore::Blockstore, genesis_utils::GenesisConfigInfo, + get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache, + }, + solana_perf::packet::{to_packet_batches, PacketBatch, NUM_PACKETS}, + solana_poh::poh_recorder::{PohRecorder, Record, WorkingBankEntry}, + solana_runtime::{bank::Bank, bank_forks::BankForks}, + solana_sdk::{ + compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, + poh_config::PohConfig, pubkey::Pubkey, signature::Keypair, signer::Signer, + system_instruction, transaction::Transaction, + }, + std::sync::{atomic::AtomicBool, Arc, RwLock}, + tempfile::TempDir, + }; + + const TEST_TIMEOUT: Duration = Duration::from_millis(1000); + + fn create_channels(num: usize) -> (Vec>, Vec>) { + (0..num).map(|_| unbounded()).unzip() + } + + // Helper struct to create tests that hold channels, files, etc. + // such that our tests can be more easily set up and run. + struct TestFrame { + bank: Arc, + _ledger_path: TempDir, + _entry_receiver: Receiver, + _record_receiver: Receiver, + poh_recorder: Arc>, + banking_packet_sender: Sender, Option)>>, + + consume_work_receivers: Vec>, + finished_consume_work_sender: Sender, + } + + fn create_test_frame(num_threads: usize) -> (TestFrame, SchedulerController) { + let GenesisConfigInfo { genesis_config, .. } = create_slow_genesis_config(10_000); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); + let bank_forks = BankForks::new_rw_arc(bank); + let bank = bank_forks.read().unwrap().working_bank(); + + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( + bank.tick_height(), + bank.last_blockhash(), + bank.clone(), + Some((4, 4)), + bank.ticks_per_slot(), + &Pubkey::new_unique(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); + let poh_recorder = Arc::new(RwLock::new(poh_recorder)); + let decision_maker = DecisionMaker::new(Pubkey::new_unique(), poh_recorder.clone()); + + let (banking_packet_sender, banking_packet_receiver) = unbounded(); + let packet_deserializer = + PacketDeserializer::new(banking_packet_receiver, bank_forks.clone()); + + let (consume_work_senders, consume_work_receivers) = create_channels(num_threads); + let (finished_consume_work_sender, finished_consume_work_receiver) = unbounded(); + + let test_frame = TestFrame { + bank, + _ledger_path: ledger_path, + _entry_receiver: entry_receiver, + _record_receiver: record_receiver, + poh_recorder, + banking_packet_sender, + consume_work_receivers, + finished_consume_work_sender, + }; + let scheduler_controller = SchedulerController::new( + decision_maker, + packet_deserializer, + bank_forks, + PrioGraphScheduler::new(consume_work_senders, finished_consume_work_receiver), + ); + + (test_frame, scheduler_controller) + } + + fn prioritized_tranfer( + from_keypair: &Keypair, + to_pubkey: &Pubkey, + lamports: u64, + priority: u64, + recent_blockhash: Hash, + ) -> Transaction { + let transfer = system_instruction::transfer(&from_keypair.pubkey(), to_pubkey, lamports); + let prioritization = ComputeBudgetInstruction::set_compute_unit_price(priority); + let message = Message::new(&[transfer, prioritization], Some(&from_keypair.pubkey())); + Transaction::new(&vec![from_keypair], message, recent_blockhash) + } + + fn to_banking_packet_batch(txs: &[Transaction]) -> BankingPacketBatch { + let packet_batch = to_packet_batches(txs, NUM_PACKETS); + Arc::new((packet_batch, None)) + } + + #[test] + #[should_panic(expected = "batch id 0 is not being tracked")] + fn test_unexpected_batch_id() { + let (test_frame, central_scheduler_banking_stage) = create_test_frame(1); + let TestFrame { + finished_consume_work_sender, + .. + } = &test_frame; + + finished_consume_work_sender + .send(FinishedConsumeWork { + work: ConsumeWork { + batch_id: TransactionBatchId::new(0), + ids: vec![], + transactions: vec![], + max_age_slots: vec![], + }, + retryable_indexes: vec![], + }) + .unwrap(); + + central_scheduler_banking_stage.run().unwrap(); + } + + #[test] + fn test_schedule_consume_single_threaded_no_conflicts() { + let (test_frame, central_scheduler_banking_stage) = create_test_frame(1); + let TestFrame { + bank, + poh_recorder, + banking_packet_sender, + consume_work_receivers, + .. + } = &test_frame; + + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); + let scheduler_thread = std::thread::spawn(move || central_scheduler_banking_stage.run()); + + // Send packet batch to the scheduler - should do nothing until we become the leader. + let tx1 = prioritized_tranfer( + &Keypair::new(), + &Pubkey::new_unique(), + 1, + 1, + bank.last_blockhash(), + ); + let tx2 = prioritized_tranfer( + &Keypair::new(), + &Pubkey::new_unique(), + 1, + 2, + bank.last_blockhash(), + ); + let tx1_hash = tx1.message().hash(); + let tx2_hash = tx2.message().hash(); + + let txs = vec![tx1, tx2]; + banking_packet_sender + .send(to_banking_packet_batch(&txs)) + .unwrap(); + + let consume_work = consume_work_receivers[0] + .recv_timeout(TEST_TIMEOUT) + .unwrap(); + assert_eq!(consume_work.ids.len(), 2); + assert_eq!(consume_work.transactions.len(), 2); + let message_hashes = consume_work + .transactions + .iter() + .map(|tx| tx.message_hash()) + .collect_vec(); + assert_eq!(message_hashes, vec![&tx2_hash, &tx1_hash]); + + drop(test_frame); + let _ = scheduler_thread.join(); + } + + #[test] + fn test_schedule_consume_single_threaded_conflict() { + let (test_frame, central_scheduler_banking_stage) = create_test_frame(1); + let TestFrame { + bank, + poh_recorder, + banking_packet_sender, + consume_work_receivers, + .. + } = &test_frame; + + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); + let scheduler_thread = std::thread::spawn(move || central_scheduler_banking_stage.run()); + + let pk = Pubkey::new_unique(); + let tx1 = prioritized_tranfer(&Keypair::new(), &pk, 1, 1, bank.last_blockhash()); + let tx2 = prioritized_tranfer(&Keypair::new(), &pk, 1, 2, bank.last_blockhash()); + let tx1_hash = tx1.message().hash(); + let tx2_hash = tx2.message().hash(); + + let txs = vec![tx1, tx2]; + banking_packet_sender + .send(to_banking_packet_batch(&txs)) + .unwrap(); + + // We expect 2 batches to be scheduled + let consume_works = (0..2) + .map(|_| { + consume_work_receivers[0] + .recv_timeout(TEST_TIMEOUT) + .unwrap() + }) + .collect_vec(); + + let num_txs_per_batch = consume_works.iter().map(|cw| cw.ids.len()).collect_vec(); + let message_hashes = consume_works + .iter() + .flat_map(|cw| cw.transactions.iter().map(|tx| tx.message_hash())) + .collect_vec(); + assert_eq!(num_txs_per_batch, vec![1; 2]); + assert_eq!(message_hashes, vec![&tx2_hash, &tx1_hash]); + + drop(test_frame); + let _ = scheduler_thread.join(); + } + + #[test] + fn test_schedule_consume_single_threaded_multi_batch() { + let (test_frame, central_scheduler_banking_stage) = create_test_frame(1); + let TestFrame { + bank, + poh_recorder, + banking_packet_sender, + consume_work_receivers, + .. + } = &test_frame; + + let scheduler_thread = std::thread::spawn(move || central_scheduler_banking_stage.run()); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); + + // Send multiple batches - all get scheduled + let txs1 = (0..2 * TARGET_NUM_TRANSACTIONS_PER_BATCH) + .map(|i| { + prioritized_tranfer( + &Keypair::new(), + &Pubkey::new_unique(), + i as u64, + 1, + bank.last_blockhash(), + ) + }) + .collect_vec(); + let txs2 = (0..2 * TARGET_NUM_TRANSACTIONS_PER_BATCH) + .map(|i| { + prioritized_tranfer( + &Keypair::new(), + &Pubkey::new_unique(), + i as u64, + 2, + bank.last_blockhash(), + ) + }) + .collect_vec(); + + banking_packet_sender + .send(to_banking_packet_batch(&txs1)) + .unwrap(); + banking_packet_sender + .send(to_banking_packet_batch(&txs2)) + .unwrap(); + + // We expect 4 batches to be scheduled + let consume_works = (0..4) + .map(|_| { + consume_work_receivers[0] + .recv_timeout(TEST_TIMEOUT) + .unwrap() + }) + .collect_vec(); + + assert_eq!( + consume_works.iter().map(|cw| cw.ids.len()).collect_vec(), + vec![TARGET_NUM_TRANSACTIONS_PER_BATCH; 4] + ); + + drop(test_frame); + let _ = scheduler_thread.join(); + } + + #[test] + fn test_schedule_consume_simple_thread_selection() { + let (test_frame, central_scheduler_banking_stage) = create_test_frame(2); + let TestFrame { + bank, + poh_recorder, + banking_packet_sender, + consume_work_receivers, + .. + } = &test_frame; + + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); + let scheduler_thread = std::thread::spawn(move || central_scheduler_banking_stage.run()); + + // Send 4 transactions w/o conflicts. 2 should be scheduled on each thread + let txs = (0..4) + .map(|i| { + prioritized_tranfer( + &Keypair::new(), + &Pubkey::new_unique(), + 1, + i, + bank.last_blockhash(), + ) + }) + .collect_vec(); + banking_packet_sender + .send(to_banking_packet_batch(&txs)) + .unwrap(); + + // Priority Expectation: + // Thread 0: [3, 1] + // Thread 1: [2, 0] + let t0_expected = [3, 1] + .into_iter() + .map(|i| txs[i].message().hash()) + .collect_vec(); + let t1_expected = [2, 0] + .into_iter() + .map(|i| txs[i].message().hash()) + .collect_vec(); + let t0_actual = consume_work_receivers[0] + .recv_timeout(TEST_TIMEOUT) + .unwrap() + .transactions + .iter() + .map(|tx| *tx.message_hash()) + .collect_vec(); + let t1_actual = consume_work_receivers[1] + .recv_timeout(TEST_TIMEOUT) + .unwrap() + .transactions + .iter() + .map(|tx| *tx.message_hash()) + .collect_vec(); + + assert_eq!(t0_actual, t0_expected); + assert_eq!(t1_actual, t1_expected); + + drop(test_frame); + let _ = scheduler_thread.join(); + } + + #[test] + fn test_schedule_consume_retryable() { + let (test_frame, central_scheduler_banking_stage) = create_test_frame(1); + let TestFrame { + bank, + poh_recorder, + banking_packet_sender, + consume_work_receivers, + finished_consume_work_sender, + .. + } = &test_frame; + + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); + let scheduler_thread = std::thread::spawn(move || central_scheduler_banking_stage.run()); + + // Send packet batch to the scheduler - should do nothing until we become the leader. + let tx1 = prioritized_tranfer( + &Keypair::new(), + &Pubkey::new_unique(), + 1, + 1, + bank.last_blockhash(), + ); + let tx2 = prioritized_tranfer( + &Keypair::new(), + &Pubkey::new_unique(), + 1, + 2, + bank.last_blockhash(), + ); + let tx1_hash = tx1.message().hash(); + let tx2_hash = tx2.message().hash(); + + let txs = vec![tx1, tx2]; + banking_packet_sender + .send(to_banking_packet_batch(&txs)) + .unwrap(); + + let consume_work = consume_work_receivers[0] + .recv_timeout(TEST_TIMEOUT) + .unwrap(); + assert_eq!(consume_work.ids.len(), 2); + assert_eq!(consume_work.transactions.len(), 2); + let message_hashes = consume_work + .transactions + .iter() + .map(|tx| tx.message_hash()) + .collect_vec(); + assert_eq!(message_hashes, vec![&tx2_hash, &tx1_hash]); + + // Complete the batch - marking the second transaction as retryable + finished_consume_work_sender + .send(FinishedConsumeWork { + work: consume_work, + retryable_indexes: vec![1], + }) + .unwrap(); + + // Transaction should be rescheduled + let consume_work = consume_work_receivers[0] + .recv_timeout(TEST_TIMEOUT) + .unwrap(); + assert_eq!(consume_work.ids.len(), 1); + assert_eq!(consume_work.transactions.len(), 1); + let message_hashes = consume_work + .transactions + .iter() + .map(|tx| tx.message_hash()) + .collect_vec(); + assert_eq!(message_hashes, vec![&tx1_hash]); + + drop(test_frame); + let _ = scheduler_thread.join(); + } +} diff --git a/runtime/src/transaction_priority_details.rs b/runtime/src/transaction_priority_details.rs index 0d0a94df4ed393..1e4ddc532f2091 100644 --- a/runtime/src/transaction_priority_details.rs +++ b/runtime/src/transaction_priority_details.rs @@ -7,7 +7,7 @@ use { }, }; -#[derive(Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct TransactionPriorityDetails { pub priority: u64, pub compute_unit_limit: u64, From 510b6b949fc5b1c4935f9b4054c907956514c664 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Thu, 26 Oct 2023 22:12:56 -0500 Subject: [PATCH 13/98] Split compute budget instructions process from struct (#33852) * Split compute budget instruction processing from ComputeBudget struct itself, so CB instructions can be processed elsewhere without involving ComputeBudget * updated tests * avoid built ComputeBudget from dated ComputeBudgetLimits in this refactoring PR * Clean-up program-runtime/src/compute_budget_processor.rs * Add test for a corner case that deprecated instruction is used to request units greater than max limit; * Update code to handle the corner case. --- accounts-db/src/accounts.rs | 52 +- cost-model/src/cost_model.rs | 69 +- program-runtime/src/compute_budget.rs | 648 +--------------- .../src/compute_budget_processor.rs | 704 ++++++++++++++++++ program-runtime/src/invoke_context.rs | 11 +- program-runtime/src/lib.rs | 1 + programs/sbf/tests/programs.rs | 17 +- runtime/src/bank.rs | 67 +- runtime/src/bank/tests.rs | 34 +- runtime/src/transaction_priority_details.rs | 40 +- 10 files changed, 881 insertions(+), 762 deletions(-) create mode 100644 program-runtime/src/compute_budget_processor.rs diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 7265626d8927e5..0ac199e6633522 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -24,7 +24,7 @@ use { itertools::Itertools, log::*, solana_program_runtime::{ - compute_budget::{self, ComputeBudget}, + compute_budget_processor::process_compute_budget_instructions, loaded_programs::LoadedProgramsForTxBatch, }, solana_sdk::{ @@ -34,9 +34,8 @@ use { bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::{BankId, Slot}, feature_set::{ - self, add_set_tx_loaded_accounts_data_size_instruction, - include_loaded_accounts_data_size_in_fee_calculation, - remove_congestion_multiplier_from_fee_calculation, remove_deprecated_request_unit_ix, + self, include_loaded_accounts_data_size_in_fee_calculation, + remove_congestion_multiplier_from_fee_calculation, simplify_writable_program_account_check, FeatureSet, }, fee::FeeStructure, @@ -246,15 +245,16 @@ impl Accounts { feature_set: &FeatureSet, ) -> Result> { if feature_set.is_active(&feature_set::cap_transaction_accounts_data_size::id()) { - let mut compute_budget = - ComputeBudget::new(compute_budget::MAX_COMPUTE_UNIT_LIMIT as u64); - let _process_transaction_result = compute_budget.process_instructions( + let compute_budget_limits = process_compute_budget_instructions( tx.message().program_instructions_iter(), - !feature_set.is_active(&remove_deprecated_request_unit_ix::id()), - feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), - ); + feature_set, + ) + .unwrap_or_default(); // sanitize against setting size limit to zero - NonZeroUsize::new(compute_budget.loaded_accounts_data_size_limit).map_or( + NonZeroUsize::new( + usize::try_from(compute_budget_limits.loaded_accounts_bytes).unwrap_or_default(), + ) + .map_or( Err(TransactionError::InvalidLoadedAccountsDataSizeLimit), |v| Ok(Some(v)), ) @@ -721,7 +721,7 @@ impl Accounts { fee_structure.calculate_fee( tx.message(), lamports_per_signature, - &ComputeBudget::fee_budget_limits(tx.message().program_instructions_iter(), feature_set), + &process_compute_budget_instructions(tx.message().program_instructions_iter(), feature_set).unwrap_or_default().into(), feature_set.is_active(&remove_congestion_multiplier_from_fee_calculation::id()), feature_set.is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), ) @@ -1470,8 +1470,9 @@ mod tests { transaction_results::{DurableNonceFee, TransactionExecutionDetails}, }, assert_matches::assert_matches, - solana_program_runtime::prioritization_fee::{ - PrioritizationFeeDetails, PrioritizationFeeType, + solana_program_runtime::{ + compute_budget_processor, + prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, }, solana_sdk::{ account::{AccountSharedData, WritableAccount}, @@ -1747,13 +1748,15 @@ mod tests { ); let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&remove_deprecated_request_unit_ix::id()); + feature_set.deactivate(&solana_sdk::feature_set::remove_deprecated_request_unit_ix::id()); let message = SanitizedMessage::try_from(tx.message().clone()).unwrap(); let fee = FeeStructure::default().calculate_fee( &message, lamports_per_signature, - &ComputeBudget::fee_budget_limits(message.program_instructions_iter(), &feature_set), + &process_compute_budget_instructions(message.program_instructions_iter(), &feature_set) + .unwrap_or_default() + .into(), true, false, ); @@ -4249,7 +4252,11 @@ mod tests { let result_no_limit = Ok(None); let result_default_limit = Ok(Some( - NonZeroUsize::new(compute_budget::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES).unwrap(), + NonZeroUsize::new( + usize::try_from(compute_budget_processor::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES) + .unwrap(), + ) + .unwrap(), )); let result_requested_limit: Result> = Ok(Some(NonZeroUsize::new(99).unwrap())); @@ -4277,7 +4284,10 @@ mod tests { // if tx doesn't set limit, then default limit (64MiB) // if tx sets limit, then requested limit // if tx sets limit to zero, then TransactionError::InvalidLoadedAccountsDataSizeLimit - feature_set.activate(&add_set_tx_loaded_accounts_data_size_instruction::id(), 0); + feature_set.activate( + &solana_sdk::feature_set::add_set_tx_loaded_accounts_data_size_instruction::id(), + 0, + ); test(tx_not_set_limit, &feature_set, &result_default_limit); test(tx_set_limit_99, &feature_set, &result_requested_limit); test(tx_set_limit_0, &feature_set, &result_invalid_limit); @@ -4312,13 +4322,15 @@ mod tests { ); let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&remove_deprecated_request_unit_ix::id()); + feature_set.deactivate(&solana_sdk::feature_set::remove_deprecated_request_unit_ix::id()); let message = SanitizedMessage::try_from(tx.message().clone()).unwrap(); let fee = FeeStructure::default().calculate_fee( &message, lamports_per_signature, - &ComputeBudget::fee_budget_limits(message.program_instructions_iter(), &feature_set), + &process_compute_budget_instructions(message.program_instructions_iter(), &feature_set) + .unwrap_or_default() + .into(), true, false, ); diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index 0e8d6954202351..bb3e296d6dcbe0 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -8,17 +8,17 @@ use { crate::{block_cost_limits::*, transaction_cost::*}, log::*, - solana_program_runtime::compute_budget::{ - ComputeBudget, DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, MAX_COMPUTE_UNIT_LIMIT, + solana_program_runtime::{ + compute_budget::DEFAULT_HEAP_COST, + compute_budget_processor::{ + process_compute_budget_instructions, ComputeBudgetLimits, + DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, MAX_COMPUTE_UNIT_LIMIT, + }, }, solana_sdk::{ borsh0_10::try_from_slice_unchecked, compute_budget::{self, ComputeBudgetInstruction}, - feature_set::{ - add_set_tx_loaded_accounts_data_size_instruction, - include_loaded_accounts_data_size_in_fee_calculation, - remove_deprecated_request_unit_ix, FeatureSet, - }, + feature_set::{include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, fee::FeeStructure, instruction::CompiledInstruction, program_utils::limited_deserialize, @@ -62,10 +62,12 @@ impl CostModel { // to set limit, `compute_budget.loaded_accounts_data_size_limit` is set to default // limit of 64MB; which will convert to (64M/32K)*8CU = 16_000 CUs // - pub fn calculate_loaded_accounts_data_size_cost(compute_budget: &ComputeBudget) -> u64 { + pub fn calculate_loaded_accounts_data_size_cost( + compute_budget_limits: &ComputeBudgetLimits, + ) -> u64 { FeeStructure::calculate_memory_usage_cost( - compute_budget.loaded_accounts_data_size_limit, - compute_budget.heap_cost, + usize::try_from(compute_budget_limits.loaded_accounts_bytes).unwrap(), + DEFAULT_HEAP_COST, ) } @@ -128,32 +130,28 @@ impl CostModel { } // calculate bpf cost based on compute budget instructions - let mut compute_budget = ComputeBudget::default(); - - let result = compute_budget.process_instructions( - transaction.message().program_instructions_iter(), - !feature_set.is_active(&remove_deprecated_request_unit_ix::id()), - feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), - ); // if failed to process compute_budget instructions, the transaction will not be executed // by `bank`, therefore it should be considered as no execution cost by cost model. - match result { - Ok(_) => { + match process_compute_budget_instructions( + transaction.message().program_instructions_iter(), + feature_set, + ) { + Ok(compute_budget_limits) => { // if tx contained user-space instructions and a more accurate estimate available correct it, // where "user-space instructions" must be specifically checked by // 'compute_unit_limit_is_set' flag, because compute_budget does not distinguish // builtin and bpf instructions when calculating default compute-unit-limit. (see // compute_budget.rs test `test_process_mixed_instructions_without_compute_budget`) if bpf_costs > 0 && compute_unit_limit_is_set { - bpf_costs = compute_budget.compute_unit_limit + bpf_costs = u64::from(compute_budget_limits.compute_unit_limit); } if feature_set .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()) { loaded_accounts_data_size_cost = - Self::calculate_loaded_accounts_data_size_cost(&compute_budget); + Self::calculate_loaded_accounts_data_size_cost(&compute_budget_limits); } } Err(_) => { @@ -545,7 +543,8 @@ mod tests { // default loaded_accounts_data_size_limit const DEFAULT_PAGE_COST: u64 = 8; let expected_loaded_accounts_data_size_cost = - solana_program_runtime::compute_budget::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES as u64 + solana_program_runtime::compute_budget_processor::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES + as u64 / ACCOUNT_DATA_COST_PAGE_SIZE * DEFAULT_PAGE_COST; @@ -663,36 +662,36 @@ mod tests { #[allow(clippy::field_reassign_with_default)] #[test] fn test_calculate_loaded_accounts_data_size_cost() { - let mut compute_budget = ComputeBudget::default(); + let mut compute_budget_limits = ComputeBudgetLimits::default(); // accounts data size are priced in block of 32K, ... // ... requesting less than 32K should still be charged as one block - compute_budget.loaded_accounts_data_size_limit = 31_usize * 1024; + compute_budget_limits.loaded_accounts_bytes = 31 * 1024; assert_eq!( - compute_budget.heap_cost, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget) + DEFAULT_HEAP_COST, + CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) ); // ... requesting exact 32K should be charged as one block - compute_budget.loaded_accounts_data_size_limit = 32_usize * 1024; + compute_budget_limits.loaded_accounts_bytes = 32 * 1024; assert_eq!( - compute_budget.heap_cost, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget) + DEFAULT_HEAP_COST, + CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) ); // ... requesting slightly above 32K should be charged as 2 block - compute_budget.loaded_accounts_data_size_limit = 33_usize * 1024; + compute_budget_limits.loaded_accounts_bytes = 33 * 1024; assert_eq!( - compute_budget.heap_cost * 2, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget) + DEFAULT_HEAP_COST * 2, + CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) ); // ... requesting exact 64K should be charged as 2 block - compute_budget.loaded_accounts_data_size_limit = 64_usize * 1024; + compute_budget_limits.loaded_accounts_bytes = 64 * 1024; assert_eq!( - compute_budget.heap_cost * 2, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget) + DEFAULT_HEAP_COST * 2, + CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) ); } diff --git a/program-runtime/src/compute_budget.rs b/program-runtime/src/compute_budget.rs index f9239224b488a0..a568162c139c37 100644 --- a/program-runtime/src/compute_budget.rs +++ b/program-runtime/src/compute_budget.rs @@ -1,28 +1,11 @@ use { - crate::prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, + crate::compute_budget_processor::{self, process_compute_budget_instructions}, solana_sdk::{ - borsh0_10::try_from_slice_unchecked, - compute_budget::{self, ComputeBudgetInstruction}, - entrypoint::HEAP_LENGTH as MIN_HEAP_FRAME_BYTES, - feature_set::{ - add_set_tx_loaded_accounts_data_size_instruction, remove_deprecated_request_unit_ix, - FeatureSet, - }, - fee::FeeBudgetLimits, - instruction::{CompiledInstruction, InstructionError}, - pubkey::Pubkey, - transaction::TransactionError, + feature_set::FeatureSet, instruction::CompiledInstruction, pubkey::Pubkey, + transaction::Result, }, }; -/// The total accounts data a transaction can load is limited to 64MiB to not break -/// anyone in Mainnet-beta today. It can be set by set_loaded_accounts_data_size_limit instruction -pub const MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES: usize = 64 * 1024 * 1024; - -pub const DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT: u32 = 200_000; -pub const MAX_COMPUTE_UNIT_LIMIT: u32 = 1_400_000; -const MAX_HEAP_FRAME_BYTES: u32 = 256 * 1024; - #[cfg(RUSTC_WITH_SPECIALIZATION)] impl ::solana_frozen_abi::abi_example::AbiExample for ComputeBudget { fn example() -> Self { @@ -31,6 +14,10 @@ impl ::solana_frozen_abi::abi_example::AbiExample for ComputeBudget { } } +/// Roughly 0.5us/page, where page is 32K; given roughly 15CU/us, the +/// default heap page cost = 0.5 * 15 ~= 8CU/page +pub const DEFAULT_HEAP_COST: u64 = 8; + #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct ComputeBudget { /// Number of compute units that a transaction or individual instruction is @@ -118,9 +105,6 @@ pub struct ComputeBudget { pub alt_bn128_pairing_one_pair_cost_other: u64, /// Big integer modular exponentiation cost pub big_modular_exponentiation_cost: u64, - /// Maximum accounts data size, in bytes, that a transaction is allowed to load; The - /// value is capped by MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES to prevent overuse of memory. - pub loaded_accounts_data_size_limit: usize, /// Coefficient `a` of the quadratic function which determines the number /// of compute units consumed to call poseidon syscall for a given number /// of inputs. @@ -143,7 +127,7 @@ pub struct ComputeBudget { impl Default for ComputeBudget { fn default() -> Self { - Self::new(MAX_COMPUTE_UNIT_LIMIT as u64) + Self::new(compute_budget_processor::MAX_COMPUTE_UNIT_LIMIT as u64) } } @@ -180,14 +164,13 @@ impl ComputeBudget { curve25519_ristretto_msm_base_cost: 2303, curve25519_ristretto_msm_incremental_cost: 788, heap_size: u32::try_from(solana_sdk::entrypoint::HEAP_LENGTH).unwrap(), - heap_cost: 8, + heap_cost: DEFAULT_HEAP_COST, mem_op_base_cost: 10, alt_bn128_addition_cost: 334, alt_bn128_multiplication_cost: 3_840, alt_bn128_pairing_one_pair_cost_first: 36_364, alt_bn128_pairing_one_pair_cost_other: 12_121, big_modular_exponentiation_cost: 33, - loaded_accounts_data_size_limit: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, poseidon_cost_coefficient_a: 61, poseidon_cost_coefficient_c: 542, get_remaining_compute_units_cost: 100, @@ -198,127 +181,16 @@ impl ComputeBudget { } } - pub fn process_instructions<'a>( - &mut self, - instructions: impl Iterator, - support_request_units_deprecated: bool, - support_set_loaded_accounts_data_size_limit_ix: bool, - ) -> Result { - let mut num_non_compute_budget_instructions: u32 = 0; - let mut updated_compute_unit_limit = None; - let mut requested_heap_size = None; - let mut prioritization_fee = None; - let mut updated_loaded_accounts_data_size_limit = None; - - for (i, (program_id, instruction)) in instructions.enumerate() { - if compute_budget::check_id(program_id) { - let invalid_instruction_data_error = TransactionError::InstructionError( - i as u8, - InstructionError::InvalidInstructionData, - ); - let duplicate_instruction_error = TransactionError::DuplicateInstruction(i as u8); - - match try_from_slice_unchecked(&instruction.data) { - Ok(ComputeBudgetInstruction::RequestUnitsDeprecated { - units: compute_unit_limit, - additional_fee, - }) if support_request_units_deprecated => { - if updated_compute_unit_limit.is_some() { - return Err(duplicate_instruction_error); - } - if prioritization_fee.is_some() { - return Err(duplicate_instruction_error); - } - updated_compute_unit_limit = Some(compute_unit_limit); - prioritization_fee = - Some(PrioritizationFeeType::Deprecated(additional_fee as u64)); - } - Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { - if requested_heap_size.is_some() { - return Err(duplicate_instruction_error); - } - requested_heap_size = Some((bytes, i as u8)); - } - Ok(ComputeBudgetInstruction::SetComputeUnitLimit(compute_unit_limit)) => { - if updated_compute_unit_limit.is_some() { - return Err(duplicate_instruction_error); - } - updated_compute_unit_limit = Some(compute_unit_limit); - } - Ok(ComputeBudgetInstruction::SetComputeUnitPrice(micro_lamports)) => { - if prioritization_fee.is_some() { - return Err(duplicate_instruction_error); - } - prioritization_fee = - Some(PrioritizationFeeType::ComputeUnitPrice(micro_lamports)); - } - Ok(ComputeBudgetInstruction::SetLoadedAccountsDataSizeLimit(bytes)) - if support_set_loaded_accounts_data_size_limit_ix => - { - if updated_loaded_accounts_data_size_limit.is_some() { - return Err(duplicate_instruction_error); - } - updated_loaded_accounts_data_size_limit = Some(bytes as usize); - } - _ => return Err(invalid_instruction_data_error), - } - } else { - // only include non-request instructions in default max calc - num_non_compute_budget_instructions = - num_non_compute_budget_instructions.saturating_add(1); - } - } - - if let Some((bytes, i)) = requested_heap_size { - if bytes > MAX_HEAP_FRAME_BYTES - || bytes < MIN_HEAP_FRAME_BYTES as u32 - || bytes % 1024 != 0 - { - return Err(TransactionError::InstructionError( - i, - InstructionError::InvalidInstructionData, - )); - } - self.heap_size = bytes; - } - - let compute_unit_limit = updated_compute_unit_limit - .unwrap_or_else(|| { - num_non_compute_budget_instructions - .saturating_mul(DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT) - }) - .min(MAX_COMPUTE_UNIT_LIMIT); - self.compute_unit_limit = u64::from(compute_unit_limit); - - self.loaded_accounts_data_size_limit = updated_loaded_accounts_data_size_limit - .unwrap_or(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES) - .min(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES); - - Ok(prioritization_fee - .map(|fee_type| PrioritizationFeeDetails::new(fee_type, self.compute_unit_limit)) - .unwrap_or_default()) - } - - pub fn fee_budget_limits<'a>( + pub fn try_from_instructions<'a>( instructions: impl Iterator, feature_set: &FeatureSet, - ) -> FeeBudgetLimits { - let mut compute_budget = Self::default(); - - let prioritization_fee_details = compute_budget - .process_instructions( - instructions, - !feature_set.is_active(&remove_deprecated_request_unit_ix::id()), - feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), - ) - .unwrap_or_default(); - - FeeBudgetLimits { - loaded_accounts_data_size_limit: compute_budget.loaded_accounts_data_size_limit, - heap_cost: compute_budget.heap_cost, - compute_unit_limit: compute_budget.compute_unit_limit, - prioritization_fee: prioritization_fee_details.get_fee(), - } + ) -> Result { + let compute_budget_limits = process_compute_budget_instructions(instructions, feature_set)?; + Ok(ComputeBudget { + compute_unit_limit: u64::from(compute_budget_limits.compute_unit_limit), + heap_size: compute_budget_limits.updated_heap_bytes, + ..ComputeBudget::default() + }) } /// Returns cost of the Poseidon hash function for the given number of @@ -350,489 +222,3 @@ impl ComputeBudget { Some(final_result) } } - -#[cfg(test)] -mod tests { - use { - super::*, - solana_sdk::{ - hash::Hash, - instruction::Instruction, - message::Message, - pubkey::Pubkey, - signature::Keypair, - signer::Signer, - system_instruction::{self}, - transaction::{SanitizedTransaction, Transaction}, - }, - }; - - macro_rules! test { - ( $instructions: expr, $expected_result: expr, $expected_budget: expr, $support_set_loaded_accounts_data_size_limit_ix: expr ) => { - let payer_keypair = Keypair::new(); - let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( - &[&payer_keypair], - Message::new($instructions, Some(&payer_keypair.pubkey())), - Hash::default(), - )); - let mut compute_budget = ComputeBudget::default(); - let result = compute_budget.process_instructions( - tx.message().program_instructions_iter(), - false, /*not support request_units_deprecated*/ - $support_set_loaded_accounts_data_size_limit_ix, - ); - assert_eq!($expected_result, result); - assert_eq!(compute_budget, $expected_budget); - }; - ( $instructions: expr, $expected_result: expr, $expected_budget: expr) => { - test!($instructions, $expected_result, $expected_budget, false); - }; - } - - #[test] - fn test_process_instructions() { - // Units - test!( - &[], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: 0, - ..ComputeBudget::default() - } - ); - test!( - &[ - ComputeBudgetInstruction::set_compute_unit_limit(1), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: 1, - ..ComputeBudget::default() - } - ); - test!( - &[ - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT + 1), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT as u64, - ..ComputeBudget::default() - } - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT as u64, - ..ComputeBudget::default() - } - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(1), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: 1, - ..ComputeBudget::default() - } - ); - - test!( - &[ - ComputeBudgetInstruction::set_compute_unit_limit(1), - ComputeBudgetInstruction::set_compute_unit_price(42) - ], - Ok(PrioritizationFeeDetails::new( - PrioritizationFeeType::ComputeUnitPrice(42), - 1 - )), - ComputeBudget { - compute_unit_limit: 1, - ..ComputeBudget::default() - } - ); - - // HeapFrame - test!( - &[], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: 0, - ..ComputeBudget::default() - } - ); - test!( - &[ - ComputeBudgetInstruction::request_heap_frame(40 * 1024), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - heap_size: 40 * 1024, - ..ComputeBudget::default() - } - ); - test!( - &[ - ComputeBudgetInstruction::request_heap_frame(40 * 1024 + 1), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default() - ); - test!( - &[ - ComputeBudgetInstruction::request_heap_frame(31 * 1024), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default() - ); - test!( - &[ - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES + 1), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default() - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - heap_size: MAX_HEAP_FRAME_BYTES, - ..ComputeBudget::default() - } - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(1), - ], - Err(TransactionError::InstructionError( - 3, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default() - ); - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64 * 7, - ..ComputeBudget::default() - } - ); - - // Combined - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), - ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), - ], - Ok(PrioritizationFeeDetails::new( - PrioritizationFeeType::ComputeUnitPrice(u64::MAX), - MAX_COMPUTE_UNIT_LIMIT as u64, - )), - ComputeBudget { - compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT as u64, - heap_size: MAX_HEAP_FRAME_BYTES, - ..ComputeBudget::default() - } - ); - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(1), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), - ], - Ok(PrioritizationFeeDetails::new( - PrioritizationFeeType::ComputeUnitPrice(u64::MAX), - 1 - )), - ComputeBudget { - compute_unit_limit: 1, - heap_size: MAX_HEAP_FRAME_BYTES, - ..ComputeBudget::default() - } - ); - - // Duplicates - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT - 1), - ], - Err(TransactionError::DuplicateInstruction(2)), - ComputeBudget::default() - ); - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(MIN_HEAP_FRAME_BYTES as u32), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ], - Err(TransactionError::DuplicateInstruction(2)), - ComputeBudget::default() - ); - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_price(0), - ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), - ], - Err(TransactionError::DuplicateInstruction(2)), - ComputeBudget::default() - ); - - // deprecated - test!( - &[Instruction::new_with_borsh( - compute_budget::id(), - &compute_budget::ComputeBudgetInstruction::RequestUnitsDeprecated { - units: 1_000, - additional_fee: 10 - }, - vec![] - )], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default() - ); - } - - #[test] - fn test_process_loaded_accounts_data_size_limit_instruction() { - // Assert for empty instructions, change value of support_set_loaded_accounts_data_size_limit_ix - // will not change results, which should all be default - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - test!( - &[], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: 0, - ..ComputeBudget::default() - }, - support_set_loaded_accounts_data_size_limit_ix - ); - } - - // Assert when set_loaded_accounts_data_size_limit presents, - // if support_set_loaded_accounts_data_size_limit_ix then - // budget is set with data_size - // else - // return InstructionError - let data_size: usize = 1; - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let (expected_result, expected_budget) = - if support_set_loaded_accounts_data_size_limit_ix { - ( - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - loaded_accounts_data_size_limit: data_size, - ..ComputeBudget::default() - }, - ) - } else { - ( - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default(), - ) - }; - - test!( - &[ - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size as u32), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - expected_result, - expected_budget, - support_set_loaded_accounts_data_size_limit_ix - ); - } - - // Assert when set_loaded_accounts_data_size_limit presents, with greater than max value - // if support_set_loaded_accounts_data_size_limit_ix then - // budget is set to max data size - // else - // return InstructionError - let data_size: usize = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES + 1; - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let (expected_result, expected_budget) = - if support_set_loaded_accounts_data_size_limit_ix { - ( - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - loaded_accounts_data_size_limit: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, - ..ComputeBudget::default() - }, - ) - } else { - ( - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default(), - ) - }; - - test!( - &[ - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size as u32), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - expected_result, - expected_budget, - support_set_loaded_accounts_data_size_limit_ix - ); - } - - // Assert when set_loaded_accounts_data_size_limit is not presented - // if support_set_loaded_accounts_data_size_limit_ix then - // budget is set to default data size - // else - // return - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let (expected_result, expected_budget) = ( - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - loaded_accounts_data_size_limit: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, - ..ComputeBudget::default() - }, - ); - - test!( - &[Instruction::new_with_bincode( - Pubkey::new_unique(), - &0_u8, - vec![] - ),], - expected_result, - expected_budget, - support_set_loaded_accounts_data_size_limit_ix - ); - } - - // Assert when set_loaded_accounts_data_size_limit presents more than once, - // if support_set_loaded_accounts_data_size_limit_ix then - // return DuplicateInstruction - // else - // return InstructionError - let data_size: usize = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES; - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let (expected_result, expected_budget) = - if support_set_loaded_accounts_data_size_limit_ix { - ( - Err(TransactionError::DuplicateInstruction(2)), - ComputeBudget::default(), - ) - } else { - ( - Err(TransactionError::InstructionError( - 1, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default(), - ) - }; - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size as u32), - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size as u32), - ], - expected_result, - expected_budget, - support_set_loaded_accounts_data_size_limit_ix - ); - } - } - - #[test] - fn test_process_mixed_instructions_without_compute_budget() { - let payer_keypair = Keypair::new(); - - let transaction = - SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - system_instruction::transfer(&payer_keypair.pubkey(), &Pubkey::new_unique(), 2), - ], - Some(&payer_keypair.pubkey()), - &[&payer_keypair], - Hash::default(), - )); - - let mut compute_budget = ComputeBudget::default(); - let result = compute_budget.process_instructions( - transaction.message().program_instructions_iter(), - false, //not support request_units_deprecated - true, //support_set_loaded_accounts_data_size_limit_ix, - ); - - // assert process_instructions will be successful with default, - assert_eq!(Ok(PrioritizationFeeDetails::default()), result); - // assert the default compute_unit_limit is 2 times default: one for bpf ix, one for - // builtin ix. - assert_eq!( - compute_budget, - ComputeBudget { - compute_unit_limit: 2 * DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - ..ComputeBudget::default() - } - ); - } -} diff --git a/program-runtime/src/compute_budget_processor.rs b/program-runtime/src/compute_budget_processor.rs new file mode 100644 index 00000000000000..b2c3a892493d41 --- /dev/null +++ b/program-runtime/src/compute_budget_processor.rs @@ -0,0 +1,704 @@ +//! Process compute_budget instructions to extract and sanitize limits. +use { + crate::{ + compute_budget::DEFAULT_HEAP_COST, + prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, + }, + solana_sdk::{ + borsh0_10::try_from_slice_unchecked, + compute_budget::{self, ComputeBudgetInstruction}, + entrypoint::HEAP_LENGTH as MIN_HEAP_FRAME_BYTES, + feature_set::{ + add_set_tx_loaded_accounts_data_size_instruction, remove_deprecated_request_unit_ix, + FeatureSet, + }, + fee::FeeBudgetLimits, + instruction::{CompiledInstruction, InstructionError}, + pubkey::Pubkey, + transaction::TransactionError, + }, +}; + +const MAX_HEAP_FRAME_BYTES: u32 = 256 * 1024; +pub const DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT: u32 = 200_000; +pub const MAX_COMPUTE_UNIT_LIMIT: u32 = 1_400_000; + +/// The total accounts data a transaction can load is limited to 64MiB to not break +/// anyone in Mainnet-beta today. It can be set by set_loaded_accounts_data_size_limit instruction +pub const MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES: u32 = 64 * 1024 * 1024; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ComputeBudgetLimits { + pub updated_heap_bytes: u32, + pub compute_unit_limit: u32, + pub compute_unit_price: u64, + pub loaded_accounts_bytes: u32, + pub deprecated_additional_fee: Option, +} + +impl Default for ComputeBudgetLimits { + fn default() -> Self { + ComputeBudgetLimits { + updated_heap_bytes: u32::try_from(MIN_HEAP_FRAME_BYTES).unwrap(), + compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, + compute_unit_price: 0, + loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, + deprecated_additional_fee: None, + } + } +} + +impl From for FeeBudgetLimits { + fn from(val: ComputeBudgetLimits) -> Self { + let prioritization_fee = + if let Some(deprecated_additional_fee) = val.deprecated_additional_fee { + deprecated_additional_fee + } else { + let prioritization_fee_details = PrioritizationFeeDetails::new( + PrioritizationFeeType::ComputeUnitPrice(val.compute_unit_price), + u64::from(val.compute_unit_limit), + ); + prioritization_fee_details.get_fee() + }; + + FeeBudgetLimits { + // NOTE - usize::from(u32).unwrap() may fail if target is 16-bit and + // `loaded_accounts_bytes` is greater than u16::MAX. In that case, panic is proper. + loaded_accounts_data_size_limit: usize::try_from(val.loaded_accounts_bytes).unwrap(), + heap_cost: DEFAULT_HEAP_COST, + compute_unit_limit: u64::from(val.compute_unit_limit), + prioritization_fee, + } + } +} + +/// Processing compute_budget could be part of tx sanitizing, failed to process +/// these instructions will drop the transaction eventually without execution, +/// may as well fail it early. +/// If succeeded, the transaction's specific limits/requests (could be default) +/// are retrieved and returned, +pub fn process_compute_budget_instructions<'a>( + instructions: impl Iterator, + feature_set: &FeatureSet, +) -> Result { + let support_request_units_deprecated = + !feature_set.is_active(&remove_deprecated_request_unit_ix::id()); + let support_set_loaded_accounts_data_size_limit_ix = + feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()); + + let mut num_non_compute_budget_instructions: u32 = 0; + let mut updated_compute_unit_limit = None; + let mut updated_compute_unit_price = None; + let mut requested_heap_size = None; + let mut updated_loaded_accounts_data_size_limit = None; + let mut deprecated_additional_fee = None; + + for (i, (program_id, instruction)) in instructions.enumerate() { + if compute_budget::check_id(program_id) { + let invalid_instruction_data_error = TransactionError::InstructionError( + i as u8, + InstructionError::InvalidInstructionData, + ); + let duplicate_instruction_error = TransactionError::DuplicateInstruction(i as u8); + + match try_from_slice_unchecked(&instruction.data) { + Ok(ComputeBudgetInstruction::RequestUnitsDeprecated { + units: compute_unit_limit, + additional_fee, + }) if support_request_units_deprecated => { + if updated_compute_unit_limit.is_some() { + return Err(duplicate_instruction_error); + } + if updated_compute_unit_price.is_some() { + return Err(duplicate_instruction_error); + } + updated_compute_unit_limit = Some(compute_unit_limit); + updated_compute_unit_price = + support_deprecated_requested_units(additional_fee, compute_unit_limit); + deprecated_additional_fee = Some(u64::from(additional_fee)); + } + Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { + if requested_heap_size.is_some() { + return Err(duplicate_instruction_error); + } + if sanitize_requested_heap_size(bytes) { + requested_heap_size = Some(bytes); + } else { + return Err(invalid_instruction_data_error); + } + } + Ok(ComputeBudgetInstruction::SetComputeUnitLimit(compute_unit_limit)) => { + if updated_compute_unit_limit.is_some() { + return Err(duplicate_instruction_error); + } + updated_compute_unit_limit = Some(compute_unit_limit); + } + Ok(ComputeBudgetInstruction::SetComputeUnitPrice(micro_lamports)) => { + if updated_compute_unit_price.is_some() { + return Err(duplicate_instruction_error); + } + updated_compute_unit_price = Some(micro_lamports); + } + Ok(ComputeBudgetInstruction::SetLoadedAccountsDataSizeLimit(bytes)) + if support_set_loaded_accounts_data_size_limit_ix => + { + if updated_loaded_accounts_data_size_limit.is_some() { + return Err(duplicate_instruction_error); + } + updated_loaded_accounts_data_size_limit = Some(bytes); + } + _ => return Err(invalid_instruction_data_error), + } + } else { + // only include non-request instructions in default max calc + num_non_compute_budget_instructions = + num_non_compute_budget_instructions.saturating_add(1); + } + } + + // sanitize limits + let updated_heap_bytes = requested_heap_size + .unwrap_or(u32::try_from(MIN_HEAP_FRAME_BYTES).unwrap()) // loader's default heap_size + .min(MAX_HEAP_FRAME_BYTES); + + let compute_unit_limit = updated_compute_unit_limit + .unwrap_or_else(|| { + num_non_compute_budget_instructions + .saturating_mul(DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT) + }) + .min(MAX_COMPUTE_UNIT_LIMIT); + + let compute_unit_price = updated_compute_unit_price.unwrap_or(0); + + let loaded_accounts_bytes = updated_loaded_accounts_data_size_limit + .unwrap_or(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES) + .min(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES); + + Ok(ComputeBudgetLimits { + updated_heap_bytes, + compute_unit_limit, + compute_unit_price, + loaded_accounts_bytes, + deprecated_additional_fee, + }) +} + +fn sanitize_requested_heap_size(bytes: u32) -> bool { + (u32::try_from(MIN_HEAP_FRAME_BYTES).unwrap()..=MAX_HEAP_FRAME_BYTES).contains(&bytes) + && bytes % 1024 == 0 +} + +// Supports request_units_deprecated ix, returns compute_unit_price from deprecated requested +// units. +fn support_deprecated_requested_units(additional_fee: u32, compute_unit_limit: u32) -> Option { + // TODO: remove support of 'Deprecated' after feature remove_deprecated_request_unit_ix::id() is activated + let prioritization_fee_details = PrioritizationFeeDetails::new( + PrioritizationFeeType::Deprecated(u64::from(additional_fee)), + u64::from(compute_unit_limit), + ); + Some(prioritization_fee_details.get_priority()) +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_sdk::{ + hash::Hash, + instruction::Instruction, + message::Message, + pubkey::Pubkey, + signature::Keypair, + signer::Signer, + system_instruction::{self}, + transaction::{SanitizedTransaction, Transaction}, + }, + }; + + macro_rules! test { + ( $instructions: expr, $expected_result: expr, $support_set_loaded_accounts_data_size_limit_ix: expr ) => { + let payer_keypair = Keypair::new(); + let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( + &[&payer_keypair], + Message::new($instructions, Some(&payer_keypair.pubkey())), + Hash::default(), + )); + let mut feature_set = FeatureSet::default(); + feature_set.activate(&remove_deprecated_request_unit_ix::id(), 0); + if $support_set_loaded_accounts_data_size_limit_ix { + feature_set.activate(&add_set_tx_loaded_accounts_data_size_instruction::id(), 0); + } + let result = process_compute_budget_instructions( + tx.message().program_instructions_iter(), + &feature_set, + ); + assert_eq!($expected_result, result); + }; + ( $instructions: expr, $expected_result: expr ) => { + test!($instructions, $expected_result, false); + }; + } + + #[test] + fn test_process_instructions() { + // Units + test!( + &[], + Ok(ComputeBudgetLimits { + compute_unit_limit: 0, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + ComputeBudgetInstruction::set_compute_unit_limit(1), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: 1, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT + 1), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_limit(1), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: 1, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + ComputeBudgetInstruction::set_compute_unit_limit(1), + ComputeBudgetInstruction::set_compute_unit_price(42) + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: 1, + compute_unit_price: 42, + ..ComputeBudgetLimits::default() + }) + ); + + // HeapFrame + test!( + &[], + Ok(ComputeBudgetLimits { + compute_unit_limit: 0, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + ComputeBudgetInstruction::request_heap_frame(40 * 1024), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + updated_heap_bytes: 40 * 1024, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + ComputeBudgetInstruction::request_heap_frame(40 * 1024 + 1), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + ); + test!( + &[ + ComputeBudgetInstruction::request_heap_frame(31 * 1024), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + ); + test!( + &[ + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES + 1), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + updated_heap_bytes: MAX_HEAP_FRAME_BYTES, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::request_heap_frame(1), + ], + Err(TransactionError::InstructionError( + 3, + InstructionError::InvalidInstructionData, + )) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT * 7, + ..ComputeBudgetLimits::default() + }) + ); + + // Combined + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), + ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), + ], + Ok(ComputeBudgetLimits { + compute_unit_price: u64::MAX, + compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, + updated_heap_bytes: MAX_HEAP_FRAME_BYTES, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_limit(1), + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), + ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), + ], + Ok(ComputeBudgetLimits { + compute_unit_price: u64::MAX, + compute_unit_limit: 1, + updated_heap_bytes: MAX_HEAP_FRAME_BYTES, + ..ComputeBudgetLimits::default() + }) + ); + + // Duplicates + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT - 1), + ], + Err(TransactionError::DuplicateInstruction(2)) + ); + + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::request_heap_frame(MIN_HEAP_FRAME_BYTES as u32), + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), + ], + Err(TransactionError::DuplicateInstruction(2)) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_price(0), + ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), + ], + Err(TransactionError::DuplicateInstruction(2)) + ); + + // deprecated + test!( + &[Instruction::new_with_borsh( + compute_budget::id(), + &compute_budget::ComputeBudgetInstruction::RequestUnitsDeprecated { + units: 1_000, + additional_fee: 10 + }, + vec![] + )], + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + ); + } + + #[test] + fn test_process_loaded_accounts_data_size_limit_instruction() { + // Assert for empty instructions, change value of support_set_loaded_accounts_data_size_limit_ix + // will not change results, which should all be default + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + test!( + &[], + Ok(ComputeBudgetLimits { + compute_unit_limit: 0, + ..ComputeBudgetLimits::default() + }), + support_set_loaded_accounts_data_size_limit_ix + ); + } + + // Assert when set_loaded_accounts_data_size_limit presents, + // if support_set_loaded_accounts_data_size_limit_ix then + // budget is set with data_size + // else + // return InstructionError + let data_size = 1; + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + let expected_result = if support_set_loaded_accounts_data_size_limit_ix { + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + loaded_accounts_bytes: data_size, + ..ComputeBudgetLimits::default() + }) + } else { + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + }; + + test!( + &[ + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + expected_result, + support_set_loaded_accounts_data_size_limit_ix + ); + } + + // Assert when set_loaded_accounts_data_size_limit presents, with greater than max value + // if support_set_loaded_accounts_data_size_limit_ix then + // budget is set to max data size + // else + // return InstructionError + let data_size = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES + 1; + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + let expected_result = if support_set_loaded_accounts_data_size_limit_ix { + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, + ..ComputeBudgetLimits::default() + }) + } else { + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + }; + + test!( + &[ + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + expected_result, + support_set_loaded_accounts_data_size_limit_ix + ); + } + + // Assert when set_loaded_accounts_data_size_limit is not presented + // if support_set_loaded_accounts_data_size_limit_ix then + // budget is set to default data size + // else + // return + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + let expected_result = Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, + ..ComputeBudgetLimits::default() + }); + + test!( + &[Instruction::new_with_bincode( + Pubkey::new_unique(), + &0_u8, + vec![] + ),], + expected_result, + support_set_loaded_accounts_data_size_limit_ix + ); + } + + // Assert when set_loaded_accounts_data_size_limit presents more than once, + // if support_set_loaded_accounts_data_size_limit_ix then + // return DuplicateInstruction + // else + // return InstructionError + let data_size = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES; + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + let expected_result = if support_set_loaded_accounts_data_size_limit_ix { + Err(TransactionError::DuplicateInstruction(2)) + } else { + Err(TransactionError::InstructionError( + 1, + InstructionError::InvalidInstructionData, + )) + }; + + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), + ], + expected_result, + support_set_loaded_accounts_data_size_limit_ix + ); + } + } + + #[test] + fn test_process_mixed_instructions_without_compute_budget() { + let payer_keypair = Keypair::new(); + + let transaction = + SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + system_instruction::transfer(&payer_keypair.pubkey(), &Pubkey::new_unique(), 2), + ], + Some(&payer_keypair.pubkey()), + &[&payer_keypair], + Hash::default(), + )); + + let mut feature_set = FeatureSet::default(); + feature_set.activate(&remove_deprecated_request_unit_ix::id(), 0); + feature_set.activate(&add_set_tx_loaded_accounts_data_size_instruction::id(), 0); + + let result = process_compute_budget_instructions( + transaction.message().program_instructions_iter(), + &feature_set, + ); + + // assert process_instructions will be successful with default, + // and the default compute_unit_limit is 2 times default: one for bpf ix, one for + // builtin ix. + assert_eq!( + result, + Ok(ComputeBudgetLimits { + compute_unit_limit: 2 * DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + ..ComputeBudgetLimits::default() + }) + ); + } + + fn try_prioritization_fee_from_deprecated_requested_units( + additional_fee: u32, + compute_unit_limit: u32, + ) { + let payer_keypair = Keypair::new(); + let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( + &[&payer_keypair], + Message::new( + &[Instruction::new_with_borsh( + compute_budget::id(), + &compute_budget::ComputeBudgetInstruction::RequestUnitsDeprecated { + units: compute_unit_limit, + additional_fee, + }, + vec![], + )], + Some(&payer_keypair.pubkey()), + ), + Hash::default(), + )); + + // sucessfully process deprecated instruction + let compute_budget_limits = process_compute_budget_instructions( + tx.message().program_instructions_iter(), + &FeatureSet::default(), + ) + .unwrap(); + + // assert compute_budget_limit + let expected_compute_unit_price = (additional_fee as u128) + .saturating_mul(1_000_000) + .checked_div(compute_unit_limit as u128) + .map(|cu_price| u64::try_from(cu_price).unwrap_or(u64::MAX)) + .unwrap(); + let expected_compute_unit_limit = compute_unit_limit.min(MAX_COMPUTE_UNIT_LIMIT); + assert_eq!( + compute_budget_limits.compute_unit_price, + expected_compute_unit_price + ); + assert_eq!( + compute_budget_limits.compute_unit_limit, + expected_compute_unit_limit + ); + + // assert fee_budget_limits + let fee_budget_limits = FeeBudgetLimits::from(compute_budget_limits); + assert_eq!( + fee_budget_limits.prioritization_fee, + u64::from(additional_fee) + ); + assert_eq!( + fee_budget_limits.compute_unit_limit, + u64::from(expected_compute_unit_limit) + ); + } + + #[test] + fn test_support_deprecated_requested_units() { + // a normal case + try_prioritization_fee_from_deprecated_requested_units(647, 6002); + + // requesting cu limit more than MAX, div result will be round down + try_prioritization_fee_from_deprecated_requested_units( + 640, + MAX_COMPUTE_UNIT_LIMIT + 606_002, + ); + + // requesting cu limit more than MAX, div result will round up + try_prioritization_fee_from_deprecated_requested_units( + 764, + MAX_COMPUTE_UNIT_LIMIT + 606_004, + ); + } +} diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index be95fca637ccdb..6ee87fefa7ccdc 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -769,7 +769,7 @@ pub fn mock_process_instruction TransactionExecutionResult::NotExecuted(e.clone()), (Ok(loaded_transaction), nonce) => { - let compute_budget = if let Some(compute_budget) = - self.runtime_config.compute_budget - { - compute_budget - } else { - let mut compute_budget = - ComputeBudget::new(compute_budget::MAX_COMPUTE_UNIT_LIMIT as u64); - - let mut compute_budget_process_transaction_time = - Measure::start("compute_budget_process_transaction_time"); - let process_transaction_result = compute_budget.process_instructions( - tx.message().program_instructions_iter(), - !self - .feature_set - .is_active(&remove_deprecated_request_unit_ix::id()), - self.feature_set - .is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), - ); - compute_budget_process_transaction_time.stop(); - saturating_add_assign!( - timings - .execute_accessories - .compute_budget_process_transaction_us, - compute_budget_process_transaction_time.as_us() - ); - if let Err(err) = process_transaction_result { - return TransactionExecutionResult::NotExecuted(err); - } - compute_budget - }; + let compute_budget = + if let Some(compute_budget) = self.runtime_config.compute_budget { + compute_budget + } else { + let mut compute_budget_process_transaction_time = + Measure::start("compute_budget_process_transaction_time"); + let maybe_compute_budget = ComputeBudget::try_from_instructions( + tx.message().program_instructions_iter(), + &self.feature_set, + ); + compute_budget_process_transaction_time.stop(); + saturating_add_assign!( + timings + .execute_accessories + .compute_budget_process_transaction_us, + compute_budget_process_transaction_time.as_us() + ); + if let Err(err) = maybe_compute_budget { + return TransactionExecutionResult::NotExecuted(err); + } + maybe_compute_budget.unwrap() + }; let result = self.execute_loaded_transaction( tx, diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 343e87975b57a5..30bf6ceb7ba93f 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -46,7 +46,8 @@ use { }, solana_logger, solana_program_runtime::{ - compute_budget::{self, ComputeBudget, MAX_COMPUTE_UNIT_LIMIT}, + compute_budget::ComputeBudget, + compute_budget_processor::{self, MAX_COMPUTE_UNIT_LIMIT}, declare_process_instruction, invoke_context::mock_process_instruction, loaded_programs::{LoadedProgram, LoadedProgramType, DELAY_VISIBILITY_SLOT_OFFSET}, @@ -10120,7 +10121,9 @@ fn test_compute_budget_program_noop() { assert_eq!( *compute_budget, ComputeBudget { - compute_unit_limit: compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, + compute_unit_limit: u64::from( + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + ), heap_size: 48 * 1024, ..ComputeBudget::default() } @@ -10133,7 +10136,7 @@ fn test_compute_budget_program_noop() { let message = Message::new( &[ ComputeBudgetInstruction::set_compute_unit_limit( - compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, ), ComputeBudgetInstruction::request_heap_frame(48 * 1024), Instruction::new_with_bincode(program_id, &0, vec![]), @@ -10163,7 +10166,9 @@ fn test_compute_request_instruction() { assert_eq!( *compute_budget, ComputeBudget { - compute_unit_limit: compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, + compute_unit_limit: u64::from( + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + ), heap_size: 48 * 1024, ..ComputeBudget::default() } @@ -10176,7 +10181,7 @@ fn test_compute_request_instruction() { let message = Message::new( &[ ComputeBudgetInstruction::set_compute_unit_limit( - compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, ), ComputeBudgetInstruction::request_heap_frame(48 * 1024), Instruction::new_with_bincode(program_id, &0, vec![]), @@ -10213,7 +10218,9 @@ fn test_failed_compute_request_instruction() { assert_eq!( *compute_budget, ComputeBudget { - compute_unit_limit: compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, + compute_unit_limit: u64::from( + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + ), heap_size: 48 * 1024, ..ComputeBudget::default() } @@ -10444,14 +10451,19 @@ fn calculate_test_fee( remove_congestion_multiplier: bool, ) -> u64 { let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&remove_deprecated_request_unit_ix::id()); + feature_set.deactivate(&solana_sdk::feature_set::remove_deprecated_request_unit_ix::id()); if !support_set_accounts_data_size_limit_ix { - feature_set.deactivate(&include_loaded_accounts_data_size_in_fee_calculation::id()); + feature_set.deactivate( + &solana_sdk::feature_set::include_loaded_accounts_data_size_in_fee_calculation::id(), + ); } let budget_limits = - ComputeBudget::fee_budget_limits(message.program_instructions_iter(), &feature_set); + process_compute_budget_instructions(message.program_instructions_iter(), &feature_set) + .unwrap_or_default() + .into(); + fee_structure.calculate_fee( message, lamports_per_signature, @@ -11478,7 +11490,9 @@ fn test_rent_state_list_len() { ); let compute_budget = bank.runtime_config.compute_budget.unwrap_or_else(|| { - ComputeBudget::new(compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64) + ComputeBudget::new(u64::from( + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + )) }); let transaction_context = TransactionContext::new( loaded_txs[0].0.as_ref().unwrap().accounts.clone(), diff --git a/runtime/src/transaction_priority_details.rs b/runtime/src/transaction_priority_details.rs index 1e4ddc532f2091..d7a1ed590894a1 100644 --- a/runtime/src/transaction_priority_details.rs +++ b/runtime/src/transaction_priority_details.rs @@ -1,6 +1,7 @@ use { - solana_program_runtime::compute_budget::ComputeBudget, + solana_program_runtime::compute_budget_processor::process_compute_budget_instructions, solana_sdk::{ + feature_set::FeatureSet, instruction::CompiledInstruction, pubkey::Pubkey, transaction::{SanitizedTransaction, SanitizedVersionedTransaction}, @@ -23,18 +24,17 @@ pub trait GetTransactionPriorityDetails { instructions: impl Iterator, _round_compute_unit_price_enabled: bool, ) -> Option { - let mut compute_budget = ComputeBudget::default(); - let prioritization_fee_details = compute_budget - .process_instructions( - instructions, - true, // supports prioritization by request_units_deprecated instruction - true, // enable support set accounts data size instruction - // TODO: round_compute_unit_price_enabled: bool - ) - .ok()?; + let mut feature_set = FeatureSet::default(); + feature_set.activate( + &solana_sdk::feature_set::add_set_tx_loaded_accounts_data_size_instruction::id(), + 0, + ); + + let compute_budget_limits = + process_compute_budget_instructions(instructions, &feature_set).ok()?; Some(TransactionPriorityDetails { - priority: prioritization_fee_details.get_priority(), - compute_unit_limit: compute_budget.compute_unit_limit, + priority: compute_budget_limits.compute_unit_price, + compute_unit_limit: u64::from(compute_budget_limits.compute_unit_limit), }) } } @@ -98,8 +98,8 @@ mod tests { Some(TransactionPriorityDetails { priority: 0, compute_unit_limit: - solana_program_runtime::compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64 + solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + as u64, }) ); @@ -111,8 +111,8 @@ mod tests { Some(TransactionPriorityDetails { priority: 0, compute_unit_limit: - solana_program_runtime::compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64 + solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + as u64, }) ); } @@ -174,8 +174,8 @@ mod tests { Some(TransactionPriorityDetails { priority: requested_price, compute_unit_limit: - solana_program_runtime::compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64 + solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + as u64, }) ); @@ -187,8 +187,8 @@ mod tests { Some(TransactionPriorityDetails { priority: requested_price, compute_unit_limit: - solana_program_runtime::compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64 + solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + as u64, }) ); } From 080285cb95b923dc8399378f4574d61bc44b1636 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Fri, 27 Oct 2023 12:29:41 +0900 Subject: [PATCH 14/98] Adjust solana-core for cleaner scheduler-pr diff (#33881) --- core/src/replay_stage.rs | 4 +--- core/src/validator.rs | 11 ++++++----- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 2c5c0ff9f526bd..5cceb8dff3502b 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -2786,7 +2786,6 @@ impl ReplayStage { match replay_result { Ok(replay_tx_count) => tx_count += replay_tx_count, Err(err) => { - // Error means the slot needs to be marked as dead Self::mark_dead_slot( blockstore, bank, @@ -2802,8 +2801,7 @@ impl ReplayStage { ancestor_hashes_replay_update_sender, purge_repair_slot_counter, ); - // If the bank was corrupted, don't try to run the below logic to check if the - // bank is completed + // don't try to run the below logic to check if the bank is completed continue; } } diff --git a/core/src/validator.rs b/core/src/validator.rs index ea67b7e6bb61ab..9dede099d1778c 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -812,6 +812,12 @@ impl Validator { config.block_verification_method, config.block_production_method ); + let (replay_vote_sender, replay_vote_receiver) = unbounded(); + + // block min prioritization fee cache should be readable by RPC, and writable by validator + // (by both replay stage and banking stage) + let prioritization_fee_cache = Arc::new(PrioritizationFeeCache::default()); + let leader_schedule_cache = Arc::new(leader_schedule_cache); let entry_notification_sender = entry_notifier_service .as_ref() @@ -939,10 +945,6 @@ impl Validator { )), }; - // block min prioritization fee cache should be readable by RPC, and writable by validator - // (by both replay stage and banking stage) - let prioritization_fee_cache = Arc::new(PrioritizationFeeCache::default()); - let rpc_override_health_check = Arc::new(AtomicBool::new(config.rpc_config.disable_health_check)); let ( @@ -1229,7 +1231,6 @@ impl Validator { }; let last_vote = tower.last_vote(); - let (replay_vote_sender, replay_vote_receiver) = unbounded(); let tvu = Tvu::new( vote_account, authorized_voter_keypairs, From 0873705c1bc4c7a084eae46ca582e261ab8af97b Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Fri, 27 Oct 2023 12:30:45 +0900 Subject: [PATCH 15/98] Define register_unique_recent_blockhash_for_test (#33880) --- program-test/src/lib.rs | 4 ++-- programs/sbf/tests/programs.rs | 2 +- runtime/src/bank.rs | 6 ++++++ runtime/src/bank/tests.rs | 2 +- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 5192319aecaae3..37e848471a8b3a 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -888,7 +888,7 @@ impl ProgramTest { .read() .unwrap() .working_bank() - .register_recent_blockhash(&Hash::new_unique()); + .register_unique_recent_blockhash_for_test(); } }); @@ -1040,7 +1040,7 @@ impl ProgramTestContext { .read() .unwrap() .working_bank() - .register_recent_blockhash(&Hash::new_unique()); + .register_unique_recent_blockhash_for_test(); } }), ); diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 465f9f44a84f1e..97d5c2ceb58756 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -4013,7 +4013,7 @@ fn test_cpi_account_ownership_writability() { TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLEE, TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLER, ] { - bank.register_recent_blockhash(&Hash::new_unique()); + bank.register_unique_recent_blockhash_for_test(); let account = AccountSharedData::new(42, account_size, &invoke_program_id); bank.store_account(&account_keypair.pubkey(), &account); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 33d7ec6c9b685b..edc2c26bc4a3e9 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4186,6 +4186,12 @@ impl Bank { self.update_recent_blockhashes_locked(&w_blockhash_queue); } + // gating this under #[cfg(feature = "dev-context-only-utils")] isn't easy due to + // solana-program-test's usage... + pub fn register_unique_recent_blockhash_for_test(&self) { + self.register_recent_blockhash(&Hash::new_unique()) + } + /// Tell the bank which Entry IDs exist on the ledger. This function assumes subsequent calls /// correspond to later entries, and will boot the oldest ones once its internal cache is full. /// Once boot, the bank will reject transactions using that `hash`. diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 30bf6ceb7ba93f..311b928a2995bd 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -13373,7 +13373,7 @@ fn test_program_execution_restricted_for_stake_account_in_reward_period() { // Push a dummy blockhash, so that the latest_blockhash() for the transfer transaction in each // iteration are different. Otherwise, all those transactions will be the same, and will not be // executed by the bank except the first one. - bank.register_recent_blockhash(&Hash::new_unique()); + bank.register_unique_recent_blockhash_for_test(); previous_bank = Arc::new(bank); } } From a2138dba986d83901712f65fe589810bc753f538 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 27 Oct 2023 00:26:51 -0400 Subject: [PATCH 16/98] snapshot test requires using snapshot archives at startup (#33885) --- local-cluster/tests/local_cluster.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 658fdf0de3b04e..d675feda0a06f8 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -765,12 +765,16 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st accounts_hash_interval, num_account_paths, ); - let validator_snapshot_test_config = SnapshotValidatorConfig::new( + let mut validator_snapshot_test_config = SnapshotValidatorConfig::new( full_snapshot_interval, incremental_snapshot_interval, accounts_hash_interval, num_account_paths, ); + // The test has asserts that require the validator always boots from snapshot archives + validator_snapshot_test_config + .validator_config + .use_snapshot_archives_at_startup = UseSnapshotArchivesAtStartup::Always; let stake = DEFAULT_NODE_STAKE; let mut config = ClusterConfig { node_stakes: vec![stake], From d04ad6557d17263c86c115d0393e8cdcc75be4bc Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 27 Oct 2023 07:23:29 -0400 Subject: [PATCH 17/98] Fastboots by default (#33883) --- ledger/src/use_snapshot_archives_at_startup.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ledger/src/use_snapshot_archives_at_startup.rs b/ledger/src/use_snapshot_archives_at_startup.rs index e34abfb777967f..b173ed1564e5fa 100644 --- a/ledger/src/use_snapshot_archives_at_startup.rs +++ b/ledger/src/use_snapshot_archives_at_startup.rs @@ -8,7 +8,6 @@ use strum::{Display, EnumString, EnumVariantNames, IntoStaticStr, VariantNames}; pub enum UseSnapshotArchivesAtStartup { /// If snapshot archives are used, they will be extracted and overwrite any existing state /// already on disk. This will incur the associated runtime costs for extracting. - #[default] Always, /// If snapshot archives are not used, then the local snapshot state already on disk is /// used instead. If there is no local state on disk, startup will fail. @@ -18,6 +17,7 @@ pub enum UseSnapshotArchivesAtStartup { /// restarting. At startup, the snapshot archive would be the newest and loaded from. /// Note, this also implies that snapshot archives will be used if there is no local snapshot /// state on disk. + #[default] WhenNewest, } From 950ca5ea8605d38fb4352d34f33508e578d18b06 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Fri, 27 Oct 2023 21:42:18 +0900 Subject: [PATCH 18/98] Add InstalledScheduler for blockstore_processor (#33875) * Add InstalledScheduler for blockstore_processor * Reverse if clauses * Add more comments for process_batches() * Elaborate comment * Simplify schedule_transaction_executions type --- Cargo.lock | 40 ++++++ Cargo.toml | 1 + ledger/src/blockstore_processor.rs | 180 +++++++++++++++++++----- programs/sbf/Cargo.lock | 97 +++++++++++++ runtime/Cargo.toml | 1 + runtime/src/installed_scheduler_pool.rs | 51 ++++++- 6 files changed, 329 insertions(+), 41 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9b4798f85716f9..7ffb340f5508e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1699,6 +1699,12 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + [[package]] name = "eager" version = "0.1.0" @@ -1984,6 +1990,12 @@ dependencies = [ "percent-encoding 2.3.0", ] +[[package]] +name = "fragile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" + [[package]] name = "fs-err" version = "2.9.0" @@ -3137,6 +3149,33 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "mockall" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" +dependencies = [ + "cfg-if 1.0.0", + "downcast", + "fragile", + "lazy_static", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" +dependencies = [ + "cfg-if 1.0.0", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "modular-bitfield" version = "0.11.2" @@ -6929,6 +6968,7 @@ dependencies = [ "lz4", "memmap2", "memoffset 0.9.0", + "mockall", "modular-bitfield", "num-derive 0.4.1", "num-traits", diff --git a/Cargo.toml b/Cargo.toml index 3cd5ee29800d36..71841636218a2d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -243,6 +243,7 @@ memmap2 = "0.5.10" memoffset = "0.9" merlin = "3" min-max-heap = "1.3.0" +mockall = "0.11.4" modular-bitfield = "0.11.2" nix = "0.26.4" num-bigint = "0.4.4" diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index bf8fa02249da15..bb717ff8348584 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -294,6 +294,70 @@ fn execute_batches_internal( }) } +// This fn diverts the code-path into two variants. Both must provide exactly the same set of +// validations. For this reason, this fn is deliberately inserted into the code path to be called +// inside process_entries(), so that Bank::prepare_sanitized_batch() has been called on all of +// batches already, while minimizing code duplication (thus divergent behavior risk) at the cost of +// acceptable overhead of meaningless buffering of batches for the scheduler variant. +// +// Also note that the scheduler variant can't implement the batch-level sanitization naively, due +// to the nature of individual tx processing. That's another reason of this particular placement of +// divergent point in the code-path (i.e. not one layer up with its own prepare_sanitized_batch() +// invocation). +fn process_batches( + bank: &BankWithScheduler, + batches: &[TransactionBatchWithIndexes], + transaction_status_sender: Option<&TransactionStatusSender>, + replay_vote_sender: Option<&ReplayVoteSender>, + batch_execution_timing: &mut BatchExecutionTiming, + log_messages_bytes_limit: Option, + prioritization_fee_cache: &PrioritizationFeeCache, +) -> Result<()> { + if bank.has_installed_scheduler() { + debug!( + "process_batches()/schedule_batches_for_execution({} batches)", + batches.len() + ); + // scheduling always succeeds here without being blocked on actual transaction executions. + // The transaction execution errors will be collected via the blocking fn called + // BankWithScheduler::wait_for_completed_scheduler(), if any. + schedule_batches_for_execution(bank, batches); + Ok(()) + } else { + debug!( + "process_batches()/rebatch_and_execute_batches({} batches)", + batches.len() + ); + rebatch_and_execute_batches( + bank, + batches, + transaction_status_sender, + replay_vote_sender, + batch_execution_timing, + log_messages_bytes_limit, + prioritization_fee_cache, + ) + } +} + +fn schedule_batches_for_execution( + bank: &BankWithScheduler, + batches: &[TransactionBatchWithIndexes], +) { + for TransactionBatchWithIndexes { + batch, + transaction_indexes, + } in batches + { + bank.schedule_transaction_executions( + batch + .sanitized_transactions() + .iter() + .zip(transaction_indexes.iter()), + ); + } +} + fn rebatch_transactions<'a>( lock_results: &'a [Result<()>], bank: &'a Arc, @@ -314,7 +378,7 @@ fn rebatch_transactions<'a>( } } -fn execute_batches( +fn rebatch_and_execute_batches( bank: &Arc, batches: &[TransactionBatchWithIndexes], transaction_status_sender: Option<&TransactionStatusSender>, @@ -488,7 +552,7 @@ fn process_entries( if bank.is_block_boundary(bank.tick_height() + tick_hashes.len() as u64) { // If it's a tick that will cause a new blockhash to be created, // execute the group and register the tick - execute_batches( + process_batches( bank, &batches, transaction_status_sender, @@ -541,7 +605,7 @@ fn process_entries( } else { // else we have an entry that conflicts with a prior entry // execute the current queue and try to process this entry again - execute_batches( + process_batches( bank, &batches, transaction_status_sender, @@ -556,7 +620,7 @@ fn process_entries( } } } - execute_batches( + process_batches( bank, &batches, transaction_status_sender, @@ -1856,8 +1920,11 @@ pub mod tests { rand::{thread_rng, Rng}, solana_entry::entry::{create_ticks, next_entry, next_entry_mut}, solana_program_runtime::declare_process_instruction, - solana_runtime::genesis_utils::{ - self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, + solana_runtime::{ + genesis_utils::{ + self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, + }, + installed_scheduler_pool::MockInstalledScheduler, }, solana_sdk::{ account::{AccountSharedData, WritableAccount}, @@ -4245,6 +4312,38 @@ pub mod tests { ) } + fn create_test_transactions( + mint_keypair: &Keypair, + genesis_hash: &Hash, + ) -> Vec { + let pubkey = solana_sdk::pubkey::new_rand(); + let keypair2 = Keypair::new(); + let pubkey2 = solana_sdk::pubkey::new_rand(); + let keypair3 = Keypair::new(); + let pubkey3 = solana_sdk::pubkey::new_rand(); + + vec![ + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + mint_keypair, + &pubkey, + 1, + *genesis_hash, + )), + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + &keypair2, + &pubkey2, + 1, + *genesis_hash, + )), + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + &keypair3, + &pubkey3, + 1, + *genesis_hash, + )), + ] + } + #[test] fn test_confirm_slot_entries_progress_num_txs_indexes() { let GenesisConfigInfo { @@ -4368,34 +4467,7 @@ pub mod tests { .. } = create_genesis_config_with_leader(500, &dummy_leader_pubkey, 100); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - - let pubkey = solana_sdk::pubkey::new_rand(); - let keypair2 = Keypair::new(); - let pubkey2 = solana_sdk::pubkey::new_rand(); - let keypair3 = Keypair::new(); - let pubkey3 = solana_sdk::pubkey::new_rand(); - - let txs = vec![ - SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( - &mint_keypair, - &pubkey, - 1, - genesis_config.hash(), - )), - SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( - &keypair2, - &pubkey2, - 1, - genesis_config.hash(), - )), - SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( - &keypair3, - &pubkey3, - 1, - genesis_config.hash(), - )), - ]; - + let txs = create_test_transactions(&mint_keypair, &genesis_config.hash()); let batch = bank.prepare_sanitized_batch(&txs); assert!(batch.needs_unlock()); let transaction_indexes = vec![42, 43, 44]; @@ -4424,6 +4496,46 @@ pub mod tests { assert_eq!(batch3.transaction_indexes, vec![43, 44]); } + #[test] + fn test_schedule_batches_for_execution() { + solana_logger::setup(); + let dummy_leader_pubkey = solana_sdk::pubkey::new_rand(); + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config_with_leader(500, &dummy_leader_pubkey, 100); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + + let txs = create_test_transactions(&mint_keypair, &genesis_config.hash()); + + let mut mocked_scheduler = MockInstalledScheduler::new(); + mocked_scheduler + .expect_schedule_execution() + .times(txs.len()) + .returning(|_| ()); + let bank = BankWithScheduler::new(bank, Some(Box::new(mocked_scheduler))); + + let batch = bank.prepare_sanitized_batch(&txs); + let batch_with_indexes = TransactionBatchWithIndexes { + batch, + transaction_indexes: (0..txs.len()).collect(), + }; + + let mut batch_execution_timing = BatchExecutionTiming::default(); + let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64); + assert!(process_batches( + &bank, + &[batch_with_indexes], + None, + None, + &mut batch_execution_timing, + None, + &ignored_prioritization_fee_cache + ) + .is_ok()); + } + #[test] fn test_confirm_slot_entries_with_fix() { const HASHES_PER_TICK: u64 = 10; diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index d19c9c0e5be045..b193fd5bd9f275 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1306,6 +1306,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "difflib" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" + [[package]] name = "digest" version = "0.8.1" @@ -1399,6 +1405,12 @@ dependencies = [ "syn 2.0.38", ] +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + [[package]] name = "eager" version = "0.1.0" @@ -1643,6 +1655,15 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "float-cmp" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" +dependencies = [ + "num-traits", +] + [[package]] name = "fnv" version = "1.0.7" @@ -1673,6 +1694,12 @@ dependencies = [ "percent-encoding 2.3.0", ] +[[package]] +name = "fragile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" + [[package]] name = "fs-err" version = "2.9.0" @@ -2787,6 +2814,33 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "mockall" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" +dependencies = [ + "cfg-if 1.0.0", + "downcast", + "fragile", + "lazy_static", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" +dependencies = [ + "cfg-if 1.0.0", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "modular-bitfield" version = "0.11.2" @@ -2866,6 +2920,12 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "normalize-line-endings" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" + [[package]] name = "num" version = "0.2.1" @@ -3438,6 +3498,36 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" +[[package]] +name = "predicates" +version = "2.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" +dependencies = [ + "difflib", + "float-cmp", + "itertools", + "normalize-line-endings", + "predicates-core", + "regex", +] + +[[package]] +name = "predicates-core" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174" + +[[package]] +name = "predicates-tree" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf" +dependencies = [ + "predicates-core", + "termtree", +] + [[package]] name = "pretty-hex" version = "0.3.0" @@ -5579,6 +5669,7 @@ dependencies = [ "lru", "lz4", "memmap2", + "mockall", "modular-bitfield", "num-derive 0.4.1", "num-traits", @@ -6984,6 +7075,12 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "termtree" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" + [[package]] name = "test-case" version = "3.2.1" diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 2d15c7acbace71..e67ee5d2a66a59 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -32,6 +32,7 @@ log = { workspace = true } lru = { workspace = true } lz4 = { workspace = true } memmap2 = { workspace = true } +mockall = { workspace = true } modular-bitfield = { workspace = true } num-derive = { workspace = true } num-traits = { workspace = true } diff --git a/runtime/src/installed_scheduler_pool.rs b/runtime/src/installed_scheduler_pool.rs index 9fd3a5546097cc..5fef97bc6e6908 100644 --- a/runtime/src/installed_scheduler_pool.rs +++ b/runtime/src/installed_scheduler_pool.rs @@ -1,19 +1,35 @@ -//! Currently, there's only one auxiliary type called BankWithScheduler.. This file will be -//! populated by later PRs to align with the filename. +//! Currently, there are only two things: minimal InstalledScheduler trait and an auxiliary type +//! called BankWithScheduler.. This file will be populated by later PRs to align with the filename. -#[cfg(feature = "dev-context-only-utils")] -use qualifier_attr::qualifiers; use { crate::bank::Bank, + log::*, + solana_sdk::transaction::SanitizedTransaction, std::{ fmt::Debug, ops::Deref, sync::{Arc, RwLock}, }, }; +#[cfg(feature = "dev-context-only-utils")] +use {mockall::automock, qualifier_attr::qualifiers}; + +#[cfg_attr(feature = "dev-context-only-utils", automock)] +// suppress false clippy complaints arising from mockall-derive: +// warning: `#[must_use]` has no effect when applied to a struct field +// warning: the following explicit lifetimes could be elided: 'a +#[cfg_attr( + feature = "dev-context-only-utils", + allow(unused_attributes, clippy::needless_lifetimes) +)] +pub trait InstalledScheduler: Send + Sync + Debug + 'static { + fn schedule_execution<'a>( + &'a self, + transaction_with_index: &'a (&'a SanitizedTransaction, usize), + ); +} -// currently dummy type; will be replaced with the introduction of real type by upcoming pr... -pub type DefaultInstalledSchedulerBox = (); +pub type DefaultInstalledSchedulerBox = Box; /// Very thin wrapper around Arc /// @@ -40,7 +56,6 @@ pub struct BankWithScheduler { #[derive(Debug)] pub struct BankWithSchedulerInner { bank: Arc, - #[allow(dead_code)] scheduler: InstalledSchedulerRwLock, } pub type InstalledSchedulerRwLock = RwLock>; @@ -70,6 +85,28 @@ impl BankWithScheduler { self.inner.bank.clone() } + pub fn has_installed_scheduler(&self) -> bool { + self.inner.scheduler.read().unwrap().is_some() + } + + // 'a is needed; anonymous_lifetime_in_impl_trait isn't stabilized yet... + pub fn schedule_transaction_executions<'a>( + &self, + transactions_with_indexes: impl ExactSizeIterator, + ) { + trace!( + "schedule_transaction_executions(): {} txs", + transactions_with_indexes.len() + ); + + let scheduler_guard = self.inner.scheduler.read().unwrap(); + let scheduler = scheduler_guard.as_ref().unwrap(); + + for (sanitized_transaction, &index) in transactions_with_indexes { + scheduler.schedule_execution(&(sanitized_transaction, index)); + } + } + pub const fn no_scheduler_available() -> InstalledSchedulerRwLock { RwLock::new(None) } From 372af8d3aabce24facc707c6c4b7c54377bb5ae4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 Oct 2023 13:19:05 +0000 Subject: [PATCH 19/98] build(deps): bump ahash from 0.8.5 to 0.8.6 (#33878) * build(deps): bump ahash from 0.8.5 to 0.8.6 Bumps [ahash](https://github.com/tkaitchuck/ahash) from 0.8.5 to 0.8.6. - [Release notes](https://github.com/tkaitchuck/ahash/releases) - [Commits](https://github.com/tkaitchuck/ahash/commits) --- updated-dependencies: - dependency-name: ahash dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 16 ++++++++-------- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7ffb340f5508e9..dde40b2af55a25 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -75,9 +75,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7d5a2cecb58716e47d67d5703a249964b14c7be1ec3cad3affc295b2d1c35d" +checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" dependencies = [ "cfg-if 1.0.0", "getrandom 0.2.10", @@ -2319,7 +2319,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.5", + "ahash 0.8.6", ] [[package]] @@ -6528,7 +6528,7 @@ dependencies = [ name = "solana-perf" version = "1.18.0" dependencies = [ - "ahash 0.8.5", + "ahash 0.8.6", "assert_matches", "bincode", "bv", @@ -9224,18 +9224,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.11" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c19fae0c8a9efc6a8281f2e623db8af1db9e57852e04cde3e754dd2dc29340f" +checksum = "81ba595b9f2772fbee2312de30eeb80ec773b4cb2f1e8098db024afadda6c06f" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.11" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc56589e9ddd1f1c28d4b4b5c773ce232910a6bb67a70133d61c9e347585efe9" +checksum = "772666c41fb6dceaf520b564b962d738a8e1a83b41bd48945f50837aed78bb1d" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 71841636218a2d..cbf6d5bf4b6e97 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -133,7 +133,7 @@ edition = "2021" [workspace.dependencies] Inflector = "0.11.4" aes-gcm-siv = "0.10.3" -ahash = "0.8.5" +ahash = "0.8.6" anyhow = "1.0.75" ark-bn254 = "0.4.0" ark-ec = "0.4.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b193fd5bd9f275..4ea9ee79b1d9ee 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -76,9 +76,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7d5a2cecb58716e47d67d5703a249964b14c7be1ec3cad3affc295b2d1c35d" +checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" dependencies = [ "cfg-if 1.0.0", "getrandom 0.2.10", @@ -1971,7 +1971,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.5", + "ahash 0.8.6", ] [[package]] @@ -5315,7 +5315,7 @@ checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e" name = "solana-perf" version = "1.18.0" dependencies = [ - "ahash 0.8.5", + "ahash 0.8.6", "bincode", "bv", "caps", @@ -8058,18 +8058,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.11" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c19fae0c8a9efc6a8281f2e623db8af1db9e57852e04cde3e754dd2dc29340f" +checksum = "81ba595b9f2772fbee2312de30eeb80ec773b4cb2f1e8098db024afadda6c06f" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.11" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc56589e9ddd1f1c28d4b4b5c773ce232910a6bb67a70133d61c9e347585efe9" +checksum = "772666c41fb6dceaf520b564b962d738a8e1a83b41bd48945f50837aed78bb1d" dependencies = [ "proc-macro2", "quote", From f6bce134c59876ded7e9d1d87317d9b1e0534b66 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 Oct 2023 13:20:20 +0000 Subject: [PATCH 20/98] build(deps): bump serde from 1.0.189 to 1.0.190 (#33877) * build(deps): bump serde from 1.0.189 to 1.0.190 Bumps [serde](https://github.com/serde-rs/serde) from 1.0.189 to 1.0.190. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.189...v1.0.190) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dde40b2af55a25..1157029157907d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4806,9 +4806,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.189" +version = "1.0.190" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +checksum = "91d3c334ca1ee894a2c6f6ad698fe8c435b76d504b13d436f0685d648d6d96f7" dependencies = [ "serde_derive", ] @@ -4824,9 +4824,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.189" +version = "1.0.190" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +checksum = "67c5609f394e5c2bd7fc51efda478004ea80ef42fee983d5c67a65e34f32c0e3" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index cbf6d5bf4b6e97..caccf5189e49ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -289,7 +289,7 @@ rustversion = "1.0.14" scopeguard = "1.2.0" semver = "1.0.20" seqlock = "0.2.0" -serde = "1.0.189" +serde = "1.0.190" serde_bytes = "0.11.12" serde_derive = "1.0.103" serde_json = "1.0.107" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 4ea9ee79b1d9ee..71baf3c7932d89 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4258,9 +4258,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.189" +version = "1.0.190" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +checksum = "91d3c334ca1ee894a2c6f6ad698fe8c435b76d504b13d436f0685d648d6d96f7" dependencies = [ "serde_derive", ] @@ -4276,9 +4276,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.189" +version = "1.0.190" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +checksum = "67c5609f394e5c2bd7fc51efda478004ea80ef42fee983d5c67a65e34f32c0e3" dependencies = [ "proc-macro2", "quote", From a18debc34a4919c91ec5b7b007afc8d4ab497949 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 27 Oct 2023 07:14:05 -0700 Subject: [PATCH 21/98] allow test feature to skip rewrites (#33851) * allow test feature to skip rewrites * hook up cli arg for test skip rewrites, update tests * fix sanity checker * add account hash to abi to fix a test * reviews * use hashmap to collect skip_rewrites. exclude skip_rewrites from dirty pubkey set * accumulate skipped_rewrite in reduce * mutex * fmt * skip hash verify for this test flag * add skipped rewrites num stat * skip bank hash verify not account hash verify * reviews --------- Co-authored-by: HaoranYi --- accounts-db/src/accounts_db.rs | 42 +++++++++++++++++--- accounts-db/src/accounts_hash.rs | 2 +- ledger-tool/src/args.rs | 2 + ledger-tool/src/main.rs | 13 ++++++ runtime/src/bank.rs | 68 +++++++++++++++++++++++++++----- validator/src/cli.rs | 6 +++ validator/src/main.rs | 2 + 7 files changed, 120 insertions(+), 15 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index f080abcb85556f..92c144ac0cbe59 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -486,6 +486,7 @@ pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig { exhaustively_verify_refcounts: false, create_ancient_storage: CreateAncientStorage::Pack, test_partitioned_epoch_rewards: TestPartitionedEpochRewards::CompareResults, + test_skip_rewrites_but_include_in_bank_hash: false, }; pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig { index: Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS), @@ -498,6 +499,7 @@ pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig exhaustively_verify_refcounts: false, create_ancient_storage: CreateAncientStorage::Pack, test_partitioned_epoch_rewards: TestPartitionedEpochRewards::None, + test_skip_rewrites_but_include_in_bank_hash: false, }; pub type BinnedHashData = Vec>; @@ -557,6 +559,7 @@ pub struct AccountsDbConfig { /// if None, ancient append vecs are set to ANCIENT_APPEND_VEC_DEFAULT_OFFSET /// Some(offset) means include slots up to (max_slot - (slots_per_epoch - 'offset')) pub ancient_append_vec_offset: Option, + pub test_skip_rewrites_but_include_in_bank_hash: bool, pub skip_initial_hash_calc: bool, pub exhaustively_verify_refcounts: bool, /// how to create ancient storages @@ -1440,6 +1443,9 @@ pub struct AccountsDb { /// from AccountsDbConfig create_ancient_storage: CreateAncientStorage, + /// true if this client should skip rewrites but still include those rewrites in the bank hash as if rewrites had occurred. + pub test_skip_rewrites_but_include_in_bank_hash: bool, + pub accounts_cache: AccountsCache, write_cache_limit_bytes: Option, @@ -1573,6 +1579,7 @@ pub struct AccountsStats { delta_hash_scan_time_total_us: AtomicU64, delta_hash_accumulate_time_total_us: AtomicU64, delta_hash_num: AtomicU64, + skipped_rewrites_num: AtomicUsize, last_store_report: AtomicInterval, store_hash_accounts: AtomicU64, @@ -2547,6 +2554,7 @@ impl AccountsDb { exhaustively_verify_refcounts: false, partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig::default(), epoch_accounts_hash_manager: EpochAccountsHashManager::new_invalid(), + test_skip_rewrites_but_include_in_bank_hash: false, } } @@ -2622,6 +2630,11 @@ impl AccountsDb { .map(|config| config.test_partitioned_epoch_rewards) .unwrap_or_default(); + let test_skip_rewrites_but_include_in_bank_hash = accounts_db_config + .as_ref() + .map(|config| config.test_skip_rewrites_but_include_in_bank_hash) + .unwrap_or_default(); + let partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig = PartitionedEpochRewardsConfig::new(test_partitioned_epoch_rewards); @@ -2647,6 +2660,7 @@ impl AccountsDb { .and_then(|x| x.write_cache_limit_bytes), partitioned_epoch_rewards_config, exhaustively_verify_refcounts, + test_skip_rewrites_but_include_in_bank_hash, ..Self::default_with_accounts_index( accounts_index, base_working_path, @@ -6944,6 +6958,11 @@ impl AccountsDb { .swap(0, Ordering::Relaxed), i64 ), + ( + "skipped_rewrites_num", + self.stats.skipped_rewrites_num.swap(0, Ordering::Relaxed), + i64 + ), ); } @@ -7908,7 +7927,6 @@ impl AccountsDb { slot: Slot, ) -> (Vec<(Pubkey, AccountHash)>, u64, Measure) { let mut scan = Measure::start("scan"); - let scan_result: ScanStorageResult<(Pubkey, AccountHash), DashMap> = self.scan_account_storage( slot, @@ -7928,6 +7946,7 @@ impl AccountsDb { ScanStorageResult::Cached(cached_result) => cached_result, ScanStorageResult::Stored(stored_result) => stored_result.into_iter().collect(), }; + (hashes, scan.as_us(), accumulate) } @@ -7968,12 +7987,12 @@ impl AccountsDb { } } - /// Calculate accounts delta hash for `slot` + /// Wrapper function to calculate accounts delta hash for `slot` (only used for testing and benchmarking.) /// /// As part of calculating the accounts delta hash, get a list of accounts modified this slot /// (aka dirty pubkeys) and add them to `self.uncleaned_pubkeys` for future cleaning. pub fn calculate_accounts_delta_hash(&self, slot: Slot) -> AccountsDeltaHash { - self.calculate_accounts_delta_hash_internal(slot, None) + self.calculate_accounts_delta_hash_internal(slot, None, HashMap::default()) } /// Calculate accounts delta hash for `slot` @@ -7984,9 +8003,20 @@ impl AccountsDb { &self, slot: Slot, ignore: Option, + mut skipped_rewrites: HashMap, ) -> AccountsDeltaHash { let (mut hashes, scan_us, mut accumulate) = self.get_pubkey_hash_for_slot(slot); let dirty_keys = hashes.iter().map(|(pubkey, _hash)| *pubkey).collect(); + + hashes.iter().for_each(|(k, _h)| { + skipped_rewrites.remove(k); + }); + + let num_skipped_rewrites = skipped_rewrites.len(); + hashes.extend(skipped_rewrites); + + info!("skipped rewrite hashes {} {}", slot, num_skipped_rewrites); + if let Some(ignore) = ignore { hashes.retain(|k| k.0 != ignore); } @@ -8015,6 +8045,10 @@ impl AccountsDb { .delta_hash_accumulate_time_total_us .fetch_add(accumulate.as_us(), Ordering::Relaxed); self.stats.delta_hash_num.fetch_add(1, Ordering::Relaxed); + self.stats + .skipped_rewrites_num + .fetch_add(num_skipped_rewrites, Ordering::Relaxed); + accounts_delta_hash } @@ -15020,7 +15054,6 @@ pub mod tests { db.store_uncached(1, &[(&account_key1, &account2)]); db.calculate_accounts_delta_hash(0); db.calculate_accounts_delta_hash(1); - db.print_accounts_stats("pre-clean1"); // clean accounts - no accounts should be cleaned, since no rooted slots @@ -15042,7 +15075,6 @@ pub mod tests { db.store_uncached(2, &[(&account_key2, &account3)]); db.store_uncached(2, &[(&account_key1, &account3)]); db.calculate_accounts_delta_hash(2); - db.clean_accounts_for_tests(); db.print_accounts_stats("post-clean2"); diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 6b853895d7b790..7631ea694635b8 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -1243,7 +1243,7 @@ pub enum ZeroLamportAccounts { /// Hash of an account #[repr(transparent)] -#[derive(Debug, Copy, Clone, Eq, PartialEq, Pod, Zeroable)] +#[derive(Debug, Copy, Clone, Eq, PartialEq, Pod, Zeroable, AbiExample)] pub struct AccountHash(pub Hash); // Ensure the newtype wrapper never changes size from the underlying Hash diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index c11954a56780ab..0bb28e4a2779ca 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -83,6 +83,8 @@ pub fn get_accounts_db_config( exhaustively_verify_refcounts: arg_matches.is_present("accounts_db_verify_refcounts"), skip_initial_hash_calc: arg_matches.is_present("accounts_db_skip_initial_hash_calculation"), test_partitioned_epoch_rewards, + test_skip_rewrites_but_include_in_bank_hash: arg_matches + .is_present("accounts_db_test_skip_rewrites"), ..AccountsDbConfig::default() } } diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 697199981b26f8..33031e9d14a0a5 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -1128,6 +1128,12 @@ fn main() { "Debug option to scan all AppendVecs and verify account index refcounts prior to clean", ) .hidden(hidden_unless_forced()); + let accounts_db_test_skip_rewrites_but_include_in_bank_hash = Arg::with_name("accounts_db_test_skip_rewrites") + .long("accounts-db-test-skip-rewrites") + .help( + "Debug option to skip rewrites for rent-exempt accounts but still add them in bank delta hash calculation", + ) + .hidden(hidden_unless_forced()); let accounts_filler_count = Arg::with_name("accounts_filler_count") .long("accounts-filler-count") .value_name("COUNT") @@ -1556,6 +1562,7 @@ fn main() { .arg(&disable_disk_index) .arg(&accountsdb_verify_refcounts) .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) ) .subcommand( SubCommand::with_name("shred-meta") @@ -1573,6 +1580,7 @@ fn main() { .arg(&disable_disk_index) .arg(&accountsdb_verify_refcounts) .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) ) .subcommand( SubCommand::with_name("bounds") @@ -1608,6 +1616,7 @@ fn main() { .arg(&disable_disk_index) .arg(&accountsdb_skip_shrink) .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) .arg(&accounts_filler_count) .arg(&accounts_filler_size) .arg(&verify_index_arg) @@ -1688,6 +1697,7 @@ fn main() { .arg(&accounts_index_limit) .arg(&disable_disk_index) .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) .arg(&accounts_db_skip_initial_hash_calc_arg) .arg(&halt_at_slot_arg) .arg(&hard_forks_arg) @@ -1724,6 +1734,7 @@ fn main() { .arg(&accounts_index_limit) .arg(&disable_disk_index) .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) .arg(&accounts_db_skip_initial_hash_calc_arg) .arg(&accountsdb_skip_shrink) .arg(&ancient_append_vecs) @@ -1918,6 +1929,7 @@ fn main() { .arg(&accounts_index_limit) .arg(&disable_disk_index) .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) .arg(&accounts_db_skip_initial_hash_calc_arg) .arg(&halt_at_slot_arg) .arg(&hard_forks_arg) @@ -1952,6 +1964,7 @@ fn main() { .arg(&accounts_index_limit) .arg(&disable_disk_index) .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) .arg(&accounts_db_skip_initial_hash_calc_arg) .arg(&halt_at_slot_arg) .arg(&hard_forks_arg) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index edc2c26bc4a3e9..5c63f05e693b17 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -75,11 +75,13 @@ use { TransactionLoadResult, }, accounts_db::{ - AccountShrinkThreshold, AccountStorageEntry, AccountsDbConfig, + AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, CalcAccountsHashDataSource, VerifyAccountsHashAndLamportsConfig, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING, }, - accounts_hash::{AccountsHash, CalcAccountsHashConfig, HashStats, IncrementalAccountsHash}, + accounts_hash::{ + AccountHash, AccountsHash, CalcAccountsHashConfig, HashStats, IncrementalAccountsHash, + }, accounts_index::{AccountSecondaryIndexes, IndexKey, ScanConfig, ScanResult, ZeroLamport}, accounts_partition::{self, Partition, PartitionIndex}, accounts_update_notifier_interface::AccountsUpdateNotifier, @@ -195,7 +197,7 @@ use { AtomicBool, AtomicI64, AtomicU64, AtomicUsize, Ordering::{self, AcqRel, Acquire, Relaxed}, }, - Arc, LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard, + Arc, LockResult, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, }, thread::Builder, time::{Duration, Instant}, @@ -507,6 +509,7 @@ impl PartialEq for Bank { return true; } let Self { + skipped_rewrites: _, rc: _, status_cache: _, blockhash_queue, @@ -815,6 +818,10 @@ pub struct Bank { /// The change to accounts data size in this Bank, due to off-chain events (i.e. rent collection) accounts_data_size_delta_off_chain: AtomicI64, + /// until the skipped rewrites feature is activated, it is possible to skip rewrites and still include + /// the account hash of the accounts that would have been rewritten as bank hash expects. + skipped_rewrites: Mutex>, + /// Transaction fee structure pub fee_structure: FeeStructure, @@ -1012,6 +1019,7 @@ impl Bank { fn default_with_accounts(accounts: Accounts) -> Self { let mut bank = Self { + skipped_rewrites: Mutex::default(), incremental_snapshot_persistence: None, rc: BankRc::new(accounts, Slot::default()), status_cache: Arc::>::default(), @@ -1343,6 +1351,7 @@ impl Bank { let accounts_data_size_initial = parent.load_accounts_data_size(); let mut new = Self { + skipped_rewrites: Mutex::default(), incremental_snapshot_persistence: None, rc, status_cache, @@ -1797,6 +1806,7 @@ impl Bank { ); let stakes_accounts_load_duration = now.elapsed(); let mut bank = Self { + skipped_rewrites: Mutex::default(), incremental_snapshot_persistence: fields.incremental_snapshot_persistence, rc: bank_rc, status_cache: Arc::>::default(), @@ -6010,9 +6020,16 @@ impl Bank { let mut time_collecting_rent_us = 0; let mut time_storing_accounts_us = 0; let can_skip_rewrites = self.bank_hash_skips_rent_rewrites(); + let test_skip_rewrites_but_include_hash_in_bank_hash = !can_skip_rewrites + && self + .rc + .accounts + .accounts_db + .test_skip_rewrites_but_include_in_bank_hash; let set_exempt_rent_epoch_max: bool = self .feature_set .is_active(&solana_sdk::feature_set::set_exempt_rent_epoch_max::id()); + let mut skipped_rewrites = Vec::default(); for (pubkey, account, _loaded_slot) in accounts.iter_mut() { let (rent_collected_info, measure) = measure!(self.rent_collector.collect_from_existing_account( @@ -6028,7 +6045,9 @@ impl Bank { // Also, there's another subtle side-effect from rewrites: this // ensures we verify the whole on-chain state (= all accounts) // via the bank delta hash slowly once per an epoch. - if !can_skip_rewrites || !Self::skip_rewrite(rent_collected_info.rent_amount, account) { + if (!can_skip_rewrites && !test_skip_rewrites_but_include_hash_in_bank_hash) + || !Self::skip_rewrite(rent_collected_info.rent_amount, account) + { if rent_collected_info.rent_amount > 0 { if let Some(rent_paying_pubkeys) = rent_paying_pubkeys { if !rent_paying_pubkeys.contains(pubkey) { @@ -6058,6 +6077,13 @@ impl Bank { } total_rent_collected_info += rent_collected_info; accounts_to_store.push((pubkey, account)); + } else if test_skip_rewrites_but_include_hash_in_bank_hash { + // include rewrites that we skipped in the accounts delta hash. + // This is what consensus requires prior to activation of bank_hash_skips_rent_rewrites. + // This code path exists to allow us to test the long term effects on validators when the skipped rewrites + // feature is enabled. + let hash = AccountsDb::hash_account(account, pubkey); + skipped_rewrites.push((*pubkey, hash)); } rent_debits.insert(pubkey, rent_collected_info.rent_amount, account.lamports()); } @@ -6071,6 +6097,7 @@ impl Bank { } CollectRentFromAccountsInfo { + skipped_rewrites, rent_collected_info: total_rent_collected_info, rent_rewards: rent_debits.into_unordered_rewards_iter().collect(), time_collecting_rent_us, @@ -6173,6 +6200,11 @@ impl Bank { CollectRentInPartitionInfo::reduce, ); + self.skipped_rewrites + .lock() + .unwrap() + .extend(&mut results.skipped_rewrites.into_iter()); + // We cannot assert here that we collected from all expected keys. // Some accounts may have been topped off or may have had all funds removed and gone to 0 lamports. @@ -7066,7 +7098,11 @@ impl Bank { .rc .accounts .accounts_db - .calculate_accounts_delta_hash_internal(slot, ignore); + .calculate_accounts_delta_hash_internal( + slot, + ignore, + std::mem::take(&mut self.skipped_rewrites.lock().unwrap()), + ); let mut signature_count_buf = [0u8; 8]; LittleEndian::write_u64(&mut signature_count_buf[..], self.signature_count()); @@ -7635,10 +7671,20 @@ impl Bank { }); let (verified_bank, verify_bank_time_us) = measure_us!({ - info!("Verifying bank..."); - let verified = self.verify_hash(); - info!("Verifying bank... Done."); - verified + let should_verify_bank = !self + .rc + .accounts + .accounts_db + .test_skip_rewrites_but_include_in_bank_hash; + if should_verify_bank { + info!("Verifying bank..."); + let verified = self.verify_hash(); + info!("Verifying bank... Done."); + verified + } else { + info!("Verifying bank... Skipped."); + true + } }); datapoint_info!( @@ -8337,6 +8383,7 @@ enum ApplyFeatureActivationsCaller { /// process later. #[derive(Debug, Default)] struct CollectRentFromAccountsInfo { + skipped_rewrites: Vec<(Pubkey, AccountHash)>, rent_collected_info: CollectedInfo, rent_rewards: Vec<(Pubkey, RewardInfo)>, time_collecting_rent_us: u64, @@ -8348,6 +8395,7 @@ struct CollectRentFromAccountsInfo { /// `collect_rent_in_partition()`—and then perform a reduce on all of them. #[derive(Debug, Default)] struct CollectRentInPartitionInfo { + skipped_rewrites: Vec<(Pubkey, AccountHash)>, rent_collected: u64, accounts_data_size_reclaimed: u64, rent_rewards: Vec<(Pubkey, RewardInfo)>, @@ -8363,6 +8411,7 @@ impl CollectRentInPartitionInfo { #[must_use] fn new(info: CollectRentFromAccountsInfo, time_loading_accounts: Duration) -> Self { Self { + skipped_rewrites: info.skipped_rewrites, rent_collected: info.rent_collected_info.rent_amount, accounts_data_size_reclaimed: info.rent_collected_info.account_data_len_reclaimed, rent_rewards: info.rent_rewards, @@ -8380,6 +8429,7 @@ impl CollectRentInPartitionInfo { #[must_use] fn reduce(lhs: Self, rhs: Self) -> Self { Self { + skipped_rewrites: [lhs.skipped_rewrites, rhs.skipped_rewrites].concat(), rent_collected: lhs.rent_collected.saturating_add(rhs.rent_collected), accounts_data_size_reclaimed: lhs .accounts_data_size_reclaimed diff --git a/validator/src/cli.rs b/validator/src/cli.rs index bd82c0a4ac2727..9aa1c466f8e336 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1194,6 +1194,12 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .help("Debug option to scan all append vecs and verify account index refcounts prior to clean") .hidden(hidden_unless_forced()) ) + .arg( + Arg::with_name("accounts_db_test_skip_rewrites") + .long("accounts-db-test-skip-rewrites") + .help("Debug option to skip rewrites for rent-exempt accounts but still add them in bank delta hash calculation") + .hidden(hidden_unless_forced()) + ) .arg( Arg::with_name("no_skip_initial_accounts_db_clean") .long("no-skip-initial-accounts-db-clean") diff --git a/validator/src/main.rs b/validator/src/main.rs index 38bb9813ab3a70..4c247c9a9977a2 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1206,6 +1206,8 @@ pub fn main() { .then_some(CreateAncientStorage::Pack) .unwrap_or_default(), test_partitioned_epoch_rewards, + test_skip_rewrites_but_include_in_bank_hash: matches + .is_present("accounts_db_test_skip_rewrites"), ..AccountsDbConfig::default() }; From 01603fdd1d95c306a14831e00ddb527838c62638 Mon Sep 17 00:00:00 2001 From: "Daniel Porteous (dport)" Date: Fri, 27 Oct 2023 16:45:24 +0100 Subject: [PATCH 22/98] Fix typo in versioned-transactions.md (#33902) --- docs/src/developing/versioned-transactions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/developing/versioned-transactions.md b/docs/src/developing/versioned-transactions.md index 95c2073b2115b6..8d942814d7310d 100644 --- a/docs/src/developing/versioned-transactions.md +++ b/docs/src/developing/versioned-transactions.md @@ -93,7 +93,7 @@ let blockhash = await connection Create an `array` of all the `instructions` you desire to send in your transaction. In this example below, we are creating a simple SOL transfer instruction: ```js -// create an array with your desires `instructions` +// create an array with your desired `instructions` const instructions = [ web3.SystemProgram.transfer({ fromPubkey: payer.publicKey, From ecd674bd92be2ed86550649b21077fed6e5fdcf2 Mon Sep 17 00:00:00 2001 From: Joe C Date: Fri, 27 Oct 2023 18:28:13 +0200 Subject: [PATCH 23/98] revert programify feature gate (#33897) --- runtime/src/bank.rs | 56 ++- runtime/src/bank/replace_account.rs | 191 ---------- runtime/src/bank/tests.rs | 417 ++------------------- runtime/src/inline_feature_gate_program.rs | 5 - runtime/src/lib.rs | 1 - sdk/src/feature_set.rs | 5 - 6 files changed, 62 insertions(+), 613 deletions(-) delete mode 100644 runtime/src/bank/replace_account.rs delete mode 100644 runtime/src/inline_feature_gate_program.rs diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 5c63f05e693b17..2bdb21e6d3573b 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -43,7 +43,6 @@ use { builtins::{BuiltinPrototype, BUILTINS}, epoch_rewards_hasher::hash_rewards_into_partitions, epoch_stakes::{EpochStakes, NodeVoteAccounts}, - inline_feature_gate_program, runtime_config::RuntimeConfig, serde_snapshot::BankIncrementalSnapshotPersistence, snapshot_hash::SnapshotHash, @@ -218,7 +217,6 @@ pub mod bank_hash_details; mod builtin_programs; pub mod epoch_accounts_hash_utils; mod metrics; -mod replace_account; mod serde_snapshot; mod sysvar_cache; #[cfg(test)] @@ -8085,24 +8083,6 @@ impl Bank { if new_feature_activations.contains(&feature_set::update_hashes_per_tick6::id()) { self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK6); } - - if new_feature_activations.contains(&feature_set::programify_feature_gate_program::id()) { - let datapoint_name = "bank-progamify_feature_gate_program"; - if let Err(e) = replace_account::replace_empty_account_with_upgradeable_program( - self, - &feature::id(), - &inline_feature_gate_program::noop_program::id(), - datapoint_name, - ) { - warn!( - "{}: Failed to replace empty account {} with upgradeable program: {}", - datapoint_name, - feature::id(), - e - ); - datapoint_warn!(datapoint_name, ("slot", self.slot(), i64),); - } - } } fn apply_updated_hashes_per_tick(&mut self, hashes_per_tick: u64) { @@ -8244,6 +8224,42 @@ impl Bank { } } + /// Use to replace programs by feature activation + #[allow(dead_code)] + fn replace_program_account( + &mut self, + old_address: &Pubkey, + new_address: &Pubkey, + datapoint_name: &'static str, + ) { + if let Some(old_account) = self.get_account_with_fixed_root(old_address) { + if let Some(new_account) = self.get_account_with_fixed_root(new_address) { + datapoint_info!(datapoint_name, ("slot", self.slot, i64)); + + // Burn lamports in the old account + self.capitalization + .fetch_sub(old_account.lamports(), Relaxed); + + // Transfer new account to old account + self.store_account(old_address, &new_account); + + // Clear new account + self.store_account(new_address, &AccountSharedData::default()); + + // Unload a program from the bank's cache + self.loaded_programs_cache + .write() + .unwrap() + .remove_programs([*old_address].into_iter()); + + self.calculate_and_update_accounts_data_size_delta_off_chain( + old_account.data().len(), + new_account.data().len(), + ); + } + } + } + /// Get all the accounts for this bank and calculate stats pub fn get_total_accounts_stats(&self) -> ScanResult { let accounts = self.get_all_accounts()?; diff --git a/runtime/src/bank/replace_account.rs b/runtime/src/bank/replace_account.rs deleted file mode 100644 index 8d650aeebe7e87..00000000000000 --- a/runtime/src/bank/replace_account.rs +++ /dev/null @@ -1,191 +0,0 @@ -use { - super::Bank, - log::*, - solana_accounts_db::accounts_index::ZeroLamport, - solana_sdk::{ - account::{Account, AccountSharedData, ReadableAccount}, - bpf_loader_upgradeable::{self, UpgradeableLoaderState}, - pubkey::Pubkey, - }, - std::sync::atomic::Ordering::Relaxed, - thiserror::Error, -}; - -/// Errors returned by `replace_account` methods -#[derive(Debug, Error)] -pub enum ReplaceAccountError { - /// Account not found - #[error("Account not found: {0:?}")] - AccountNotFound(Pubkey), - /// Account exists - #[error("Account exists: {0:?}")] - AccountExists(Pubkey), - #[error("Bincode Error: {0}")] - BincodeError(#[from] bincode::Error), - /// Not an upgradeable program - #[error("Not an upgradeable program")] - NotAnUpgradeableProgram, -} - -/// Moves one account in place of another -/// `source`: the account to replace with -/// `destination`: the account to be replaced -fn move_account( - bank: &Bank, - source_address: &Pubkey, - source_account: &V, - destination_address: &Pubkey, - destination_account: Option<&U>, -) where - U: ReadableAccount + Sync + ZeroLamport, - V: ReadableAccount + Sync + ZeroLamport, -{ - let (destination_lamports, destination_len) = match destination_account { - Some(destination_account) => ( - destination_account.lamports(), - destination_account.data().len(), - ), - None => (0, 0), - }; - - // Burn lamports in the destination account - bank.capitalization.fetch_sub(destination_lamports, Relaxed); - - // Transfer source account to destination account - bank.store_account(destination_address, source_account); - - // Clear source account - bank.store_account(source_address, &AccountSharedData::default()); - - bank.calculate_and_update_accounts_data_size_delta_off_chain( - destination_len, - source_account.data().len(), - ); -} - -/// Use to replace non-upgradeable programs by feature activation -/// `source`: the non-upgradeable program account to replace with -/// `destination`: the non-upgradeable program account to be replaced -#[allow(dead_code)] -pub(crate) fn replace_non_upgradeable_program_account( - bank: &Bank, - source_address: &Pubkey, - destination_address: &Pubkey, - datapoint_name: &'static str, -) -> Result<(), ReplaceAccountError> { - let destination_account = bank - .get_account_with_fixed_root(destination_address) - .ok_or(ReplaceAccountError::AccountNotFound(*destination_address))?; - let source_account = bank - .get_account_with_fixed_root(source_address) - .ok_or(ReplaceAccountError::AccountNotFound(*source_address))?; - - datapoint_info!(datapoint_name, ("slot", bank.slot, i64)); - - move_account( - bank, - source_address, - &source_account, - destination_address, - Some(&destination_account), - ); - - // Unload a program from the bank's cache - bank.loaded_programs_cache - .write() - .unwrap() - .remove_programs([*destination_address].into_iter()); - - Ok(()) -} - -/// Use to replace an empty account with a program by feature activation -/// Note: The upgradeable program should have both: -/// - Program account -/// - Program data account -/// `source`: the upgradeable program account to replace with -/// `destination`: the empty account to be replaced -pub(crate) fn replace_empty_account_with_upgradeable_program( - bank: &Bank, - source_address: &Pubkey, - destination_address: &Pubkey, - datapoint_name: &'static str, -) -> Result<(), ReplaceAccountError> { - // Must be attempting to replace an empty account with a program - // account _and_ data account - let source_account = bank - .get_account_with_fixed_root(source_address) - .ok_or(ReplaceAccountError::AccountNotFound(*source_address))?; - - let (destination_data_address, _) = Pubkey::find_program_address( - &[destination_address.as_ref()], - &bpf_loader_upgradeable::id(), - ); - let (source_data_address, _) = - Pubkey::find_program_address(&[source_address.as_ref()], &bpf_loader_upgradeable::id()); - - // Make sure the data within the source account is the PDA of its - // data account. This also means it has at least the necessary - // lamports for rent. - let source_state = bincode::deserialize::(source_account.data())?; - if !matches!(source_state, UpgradeableLoaderState::Program { .. }) { - return Err(ReplaceAccountError::NotAnUpgradeableProgram); - } - - let source_data_account = bank - .get_account_with_fixed_root(&source_data_address) - .ok_or(ReplaceAccountError::AccountNotFound(source_data_address))?; - - // Make sure the destination account is empty - // We aren't going to check that there isn't a data account at - // the known program-derived address (ie. `destination_data_address`), - // because if it exists, it will be overwritten - if bank - .get_account_with_fixed_root(destination_address) - .is_some() - { - return Err(ReplaceAccountError::AccountExists(*destination_address)); - } - let state = UpgradeableLoaderState::Program { - programdata_address: destination_data_address, - }; - let data = bincode::serialize(&state)?; - let lamports = bank.get_minimum_balance_for_rent_exemption(data.len()); - let created_program_account = Account { - lamports, - data, - owner: bpf_loader_upgradeable::id(), - executable: true, - rent_epoch: source_account.rent_epoch(), - }; - - datapoint_info!(datapoint_name, ("slot", bank.slot, i64)); - let change_in_capitalization = source_account.lamports().saturating_sub(lamports); - - // Replace the destination data account with the source one - // If the destination data account does not exist, it will be created - // If it does exist, it will be overwritten - move_account( - bank, - &source_data_address, - &source_data_account, - &destination_data_address, - bank.get_account_with_fixed_root(&destination_data_address) - .as_ref(), - ); - - // Write the source data account's PDA into the destination program account - move_account( - bank, - source_address, - &created_program_account, - destination_address, - None::<&AccountSharedData>, - ); - - // Any remaining lamports in the source program account are burnt - bank.capitalization - .fetch_sub(change_in_capitalization, Relaxed); - - Ok(()) -} diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 311b928a2995bd..2f1c0e0aee45f7 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -10,10 +10,6 @@ use { accounts_background_service::{ AbsRequestSender, PrunedBanksRequestHandler, SendDroppedBankCallback, }, - bank::replace_account::{ - replace_empty_account_with_upgradeable_program, - replace_non_upgradeable_program_account, ReplaceAccountError, - }, bank_client::BankClient, bank_forks::BankForks, epoch_rewards_hasher::hash_rewards_into_partitions, @@ -8014,403 +8010,42 @@ fn test_compute_active_feature_set() { assert!(feature_set.is_active(&test_feature)); } -fn test_program_replace_set_up_account( - bank: &Bank, - pubkey: &Pubkey, - lamports: u64, - state: &T, - owner: &Pubkey, - executable: bool, -) -> AccountSharedData { - let data_len = bincode::serialized_size(state).unwrap() as usize; - let mut account = AccountSharedData::from(Account { - lamports, - owner: *owner, - executable, - data: vec![0u8; data_len], - ..Account::default() - }); - account.serialize_data(state).unwrap(); - bank.store_account_and_update_capitalization(pubkey, &account); - assert_eq!(bank.get_balance(pubkey), lamports); - account -} - #[test] -fn test_replace_non_upgradeable_program_account() { - // Non-upgradeable program - // - Destination: [Destination program data] - // - Source: [*Source program data] - // - // Should replace the destination program account with the source program account: - // - Destination: [*Source program data] - let bpf_id = bpf_loader::id(); - let bank = create_simple_test_bank(0); - - let destination = Pubkey::new_unique(); - let destination_state = vec![0u8; 4]; - let destination_lamports = bank.get_minimum_balance_for_rent_exemption(destination_state.len()); - test_program_replace_set_up_account( - &bank, - &destination, - destination_lamports, - &destination_state, - &bpf_id, - true, - ); - - let source = Pubkey::new_unique(); - let source_state = vec![6; 30]; - let source_lamports = bank.get_minimum_balance_for_rent_exemption(source_state.len()); - let check_source_account = test_program_replace_set_up_account( - &bank, - &source, - source_lamports, - &source_state, - &bpf_id, - true, - ); - let check_data_account_data = check_source_account.data().to_vec(); - - let original_capitalization = bank.capitalization(); - - replace_non_upgradeable_program_account( - &bank, - &source, - &destination, - "bank-apply_program_replacement", - ) - .unwrap(); - - // Destination program account balance is now the source program account's balance - assert_eq!(bank.get_balance(&destination), source_lamports); - - // Source program account is now empty - assert_eq!(bank.get_balance(&source), 0); - - // Destination program account now holds the source program data, ie: - // - Destination: [*Source program data] - let destination_account = bank.get_account(&destination).unwrap(); - assert_eq!(destination_account.data(), &check_data_account_data); - - // Ownership & executable match the source program account - assert_eq!(destination_account.owner(), &bpf_id); - assert!(destination_account.executable()); - - // The destination account's original lamports balance was burnt - assert_eq!( - bank.capitalization(), - original_capitalization - destination_lamports - ); -} - -#[test_case( - Pubkey::new_unique(), - None; - "Empty destination account _without_ corresponding data account" -)] -#[test_case( - Pubkey::new_unique(), - Some(vec![4; 40]); - "Empty destination account _with_ corresponding data account" -)] -#[test_case( - feature::id(), // `Feature11111111` - None; - "Native destination account _without_ corresponding data account" -)] -#[test_case( - feature::id(), // `Feature11111111` - Some(vec![4; 40]); - "Native destination account _with_ corresponding data account" -)] -fn test_replace_empty_account_with_upgradeable_program_success( - destination: Pubkey, - maybe_destination_data_state: Option>, // Inner data of the destination program _data_ account -) { - // Ensures a program account and data account are created when replacing an - // empty account, ie: - // - Destination: PDA(DestinationData) - // - DestinationData: [Destination program data] - // - // If the destination data account exists, it will be overwritten - let bpf_upgradeable_id = bpf_loader_upgradeable::id(); - let bank = create_simple_test_bank(0); - - // Create the test source accounts, one for program and one for data - let source = Pubkey::new_unique(); - let (source_data, _) = Pubkey::find_program_address(&[source.as_ref()], &bpf_upgradeable_id); - let source_state = UpgradeableLoaderState::Program { - programdata_address: source_data, - }; - let source_lamports = - bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()); - let source_data_state = vec![6; 30]; - let source_data_lamports = bank.get_minimum_balance_for_rent_exemption(source_data_state.len()); - test_program_replace_set_up_account( - &bank, - &source, - source_lamports, - &source_state, - &bpf_upgradeable_id, - true, - ); - let check_source_data_account = test_program_replace_set_up_account( - &bank, - &source_data, - source_data_lamports, - &source_data_state, - &bpf_upgradeable_id, - false, - ); - let check_data_account_data = check_source_data_account.data().to_vec(); - - // Derive the well-known PDA address for the destination data account - let (destination_data, _) = - Pubkey::find_program_address(&[destination.as_ref()], &bpf_upgradeable_id); - - // Determine the lamports that will be burnt after the replacement - let burnt_after_rent = if let Some(destination_data_state) = maybe_destination_data_state { - // Create the data account if necessary - let destination_data_lamports = - bank.get_minimum_balance_for_rent_exemption(destination_data_state.len()); - test_program_replace_set_up_account( - &bank, - &destination_data, - destination_data_lamports, - &destination_data_state, - &bpf_upgradeable_id, - false, - ); - destination_data_lamports + source_lamports - - bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()) - } else { - source_lamports - - bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()) - }; - - let original_capitalization = bank.capitalization(); - - // Do the replacement - replace_empty_account_with_upgradeable_program( - &bank, - &source, - &destination, - "bank-apply_empty_account_replacement_for_program", - ) - .unwrap(); - - // Destination program account was created and funded to pay for minimum rent - // for the PDA - assert_eq!( - bank.get_balance(&destination), - bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()), - ); - - // Destination data account was created, now holds the source data account's balance - assert_eq!(bank.get_balance(&destination_data), source_data_lamports); +fn test_program_replacement() { + let mut bank = create_simple_test_bank(0); - // Source program accounts are now empty - assert_eq!(bank.get_balance(&source), 0); - assert_eq!(bank.get_balance(&source_data), 0); - - // Destination program account holds the PDA, ie: - // - Destination: PDA(DestinationData) - let destination_account = bank.get_account(&destination).unwrap(); - assert_eq!( - destination_account.data(), - &bincode::serialize(&UpgradeableLoaderState::Program { - programdata_address: destination_data - }) - .unwrap(), - ); - - // Destination data account holds the source data, ie: - // - DestinationData: [*Source program data] - let destination_data_account = bank.get_account(&destination_data).unwrap(); - assert_eq!(destination_data_account.data(), &check_data_account_data); - - // Ownership & executable match the source program accounts - assert_eq!(destination_account.owner(), &bpf_upgradeable_id); - assert!(destination_account.executable()); - assert_eq!(destination_data_account.owner(), &bpf_upgradeable_id); - assert!(!destination_data_account.executable()); - - // The remaining lamports from both program accounts minus the rent-exempt - // minimum were burnt - assert_eq!( - bank.capitalization(), - original_capitalization - burnt_after_rent - ); -} - -#[test_case( - None; - "Existing destination account _without_ corresponding data account" -)] -#[test_case( - Some(vec![4; 40]); - "Existing destination account _with_ corresponding data account" -)] -fn test_replace_empty_account_with_upgradeable_program_fail_when_account_exists( - maybe_destination_data_state: Option>, // Inner data of the destination program _data_ account -) { - // Should not be allowed to execute replacement - let bpf_upgradeable_id = bpf_loader_upgradeable::id(); - let bank = create_simple_test_bank(0); - - // Create the test destination account with some arbitrary data and lamports balance - let destination = Pubkey::new_unique(); - let destination_state = vec![0, 0, 0, 0]; // Arbitrary bytes, doesn't matter - let destination_lamports = bank.get_minimum_balance_for_rent_exemption(destination_state.len()); - let destination_account = test_program_replace_set_up_account( - &bank, - &destination, - destination_lamports, - &destination_state, - &bpf_upgradeable_id, - true, - ); - - // Create the test source accounts, one for program and one for data - let source = Pubkey::new_unique(); - let (source_data, _) = Pubkey::find_program_address(&[source.as_ref()], &bpf_upgradeable_id); - let source_state = UpgradeableLoaderState::Program { - programdata_address: source_data, - }; - let source_lamports = - bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()); - let source_data_state = vec![6; 30]; - let source_data_lamports = bank.get_minimum_balance_for_rent_exemption(source_data_state.len()); - let source_account = test_program_replace_set_up_account( - &bank, - &source, - source_lamports, - &source_state, - &bpf_upgradeable_id, - true, - ); - let source_data_account = test_program_replace_set_up_account( - &bank, - &source_data, - source_data_lamports, - &source_data_state, - &bpf_upgradeable_id, - false, + // Setup original program account + let old_address = Pubkey::new_unique(); + let new_address = Pubkey::new_unique(); + bank.store_account_and_update_capitalization( + &old_address, + &AccountSharedData::from(Account { + lamports: 100, + ..Account::default() + }), ); + assert_eq!(bank.get_balance(&old_address), 100); - // Derive the well-known PDA address for the destination data account - let (destination_data, _) = - Pubkey::find_program_address(&[destination.as_ref()], &bpf_upgradeable_id); - - // Create the data account if necessary - let destination_data_account = - if let Some(destination_data_state) = maybe_destination_data_state { - let destination_data_lamports = - bank.get_minimum_balance_for_rent_exemption(destination_data_state.len()); - let destination_data_account = test_program_replace_set_up_account( - &bank, - &destination_data, - destination_data_lamports, - &destination_data_state, - &bpf_upgradeable_id, - false, - ); - Some(destination_data_account) - } else { - None - }; + // Setup new program account + let new_program_account = AccountSharedData::from(Account { + lamports: 123, + ..Account::default() + }); + bank.store_account_and_update_capitalization(&new_address, &new_program_account); + assert_eq!(bank.get_balance(&new_address), 123); let original_capitalization = bank.capitalization(); - // Attempt the replacement - assert_matches!( - replace_empty_account_with_upgradeable_program( - &bank, - &source, - &destination, - "bank-apply_empty_account_replacement_for_program", - ) - .unwrap_err(), - ReplaceAccountError::AccountExists(..) - ); - - // Everything should be unchanged - assert_eq!(bank.get_account(&destination).unwrap(), destination_account); - if let Some(destination_data_account) = destination_data_account { - assert_eq!( - bank.get_account(&destination_data).unwrap(), - destination_data_account - ); - } - assert_eq!(bank.get_account(&source).unwrap(), source_account); - assert_eq!(bank.get_account(&source_data).unwrap(), source_data_account); - assert_eq!(bank.capitalization(), original_capitalization); -} - -#[test] -fn test_replace_empty_account_with_upgradeable_program_fail_when_not_upgradeable_program() { - // Should not be allowed to execute replacement - let bpf_upgradeable_id = bpf_loader_upgradeable::id(); - let bank = create_simple_test_bank(0); - - // Create the test destination account with some arbitrary data and lamports balance - let destination = Pubkey::new_unique(); - let destination_state = vec![0, 0, 0, 0]; // Arbitrary bytes, doesn't matter - let destination_lamports = bank.get_minimum_balance_for_rent_exemption(destination_state.len()); - let destination_account = test_program_replace_set_up_account( - &bank, - &destination, - destination_lamports, - &destination_state, - &bpf_upgradeable_id, - true, - ); - - // Create the test source accounts, one for program and one for data - let source = Pubkey::new_unique(); - let (source_data, _) = Pubkey::find_program_address(&[source.as_ref()], &bpf_upgradeable_id); - let source_state = [0, 0, 0, 0]; // Arbitrary bytes, NOT an upgradeable program - let source_lamports = - bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()); - let source_data_state = vec![6; 30]; - let source_data_lamports = bank.get_minimum_balance_for_rent_exemption(source_data_state.len()); - let source_account = test_program_replace_set_up_account( - &bank, - &source, - source_lamports, - &source_state, - &bpf_upgradeable_id, - true, - ); - let source_data_account = test_program_replace_set_up_account( - &bank, - &source_data, - source_data_lamports, - &source_data_state, - &bpf_upgradeable_id, - false, - ); + bank.replace_program_account(&old_address, &new_address, "bank-apply_program_replacement"); - let original_capitalization = bank.capitalization(); + // New program account is now empty + assert_eq!(bank.get_balance(&new_address), 0); - // Attempt the replacement - assert_matches!( - replace_empty_account_with_upgradeable_program( - &bank, - &source, - &destination, - "bank-apply_empty_account_replacement_for_program", - ) - .unwrap_err(), - ReplaceAccountError::NotAnUpgradeableProgram - ); + // Old program account holds the new program account + assert_eq!(bank.get_account(&old_address), Some(new_program_account)); - // Everything should be unchanged - assert_eq!(bank.get_account(&destination).unwrap(), destination_account); - assert_eq!(bank.get_account(&source).unwrap(), source_account); - assert_eq!(bank.get_account(&source_data).unwrap(), source_data_account); - assert_eq!(bank.capitalization(), original_capitalization); + // Lamports in the old token account were burnt + assert_eq!(bank.capitalization(), original_capitalization - 100); } fn min_rent_exempt_balance_for_sysvars(bank: &Bank, sysvar_ids: &[Pubkey]) -> u64 { diff --git a/runtime/src/inline_feature_gate_program.rs b/runtime/src/inline_feature_gate_program.rs deleted file mode 100644 index a2c647bbda22a0..00000000000000 --- a/runtime/src/inline_feature_gate_program.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! Contains replacement program IDs for the feature gate program - -pub(crate) mod noop_program { - solana_sdk::declare_id!("37Yr1mVPdfUuy6oC2yPjWtg8xyyVi33TYYqyNQocsAkT"); -} diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 1bbd479848e987..e6ba2b1bd8969b 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -14,7 +14,6 @@ pub mod commitment; mod epoch_rewards_hasher; pub mod epoch_stakes; pub mod genesis_utils; -pub mod inline_feature_gate_program; pub mod inline_spl_associated_token_account; pub mod installed_scheduler_pool; pub mod loader_utils; diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 8682836c2ba247..376880e6327d6a 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -700,10 +700,6 @@ pub mod better_error_codes_for_tx_lamport_check { solana_sdk::declare_id!("Ffswd3egL3tccB6Rv3XY6oqfdzn913vUcjCSnpvCKpfx"); } -pub mod programify_feature_gate_program { - solana_sdk::declare_id!("8GdovDzVwWU5edz2G697bbB7GZjrUc6aQZLWyNNAtHdg"); -} - pub mod update_hashes_per_tick2 { solana_sdk::declare_id!("EWme9uFqfy1ikK1jhJs8fM5hxWnK336QJpbscNtizkTU"); } @@ -894,7 +890,6 @@ lazy_static! { (require_rent_exempt_split_destination::id(), "Require stake split destination account to be rent exempt"), (better_error_codes_for_tx_lamport_check::id(), "better error codes for tx lamport check #33353"), (enable_alt_bn128_compression_syscall::id(), "add alt_bn128 compression syscalls"), - (programify_feature_gate_program::id(), "move feature gate activation logic to an on-chain program #32783"), (update_hashes_per_tick2::id(), "Update desired hashes per tick to 2.8M"), (update_hashes_per_tick3::id(), "Update desired hashes per tick to 4.4M"), (update_hashes_per_tick4::id(), "Update desired hashes per tick to 7.6M"), From 1ce91a56a5db16b15d2f6c6db4963d587cac5150 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Sat, 28 Oct 2023 00:51:34 +0800 Subject: [PATCH 24/98] ci: fixed sccache version for sccache-action (#33904) --- .github/workflows/downstream-project-spl.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/downstream-project-spl.yml b/.github/workflows/downstream-project-spl.yml index f0ecfb20accb4e..6afd398f43accb 100644 --- a/.github/workflows/downstream-project-spl.yml +++ b/.github/workflows/downstream-project-spl.yml @@ -43,6 +43,8 @@ jobs: .github/scripts/purge-ubuntu-runner.sh - uses: mozilla-actions/sccache-action@v0.0.3 + with: + version: "v0.5.4" - shell: bash run: | @@ -90,6 +92,8 @@ jobs: .github/scripts/purge-ubuntu-runner.sh - uses: mozilla-actions/sccache-action@v0.0.3 + with: + version: "v0.5.4" - shell: bash run: | @@ -139,6 +143,8 @@ jobs: .github/scripts/purge-ubuntu-runner.sh - uses: mozilla-actions/sccache-action@v0.0.3 + with: + version: "v0.5.4" - shell: bash run: | From 1814b2bc81aca29b322f8e1f280494c1b9ee6b32 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 27 Oct 2023 13:03:33 -0400 Subject: [PATCH 25/98] Adds logs for starting/stopping of the background account hasher (#33903) --- accounts-db/src/accounts_db.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 92c144ac0cbe59..2084c8197b7c24 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -2926,6 +2926,7 @@ impl AccountsDb { } fn background_hasher(receiver: Receiver) { + info!("Background account hasher has started"); loop { let result = receiver.recv(); match result { @@ -2936,11 +2937,13 @@ impl AccountsDb { let _ = (*account).hash(); }; } - Err(_) => { + Err(err) => { + info!("Background account hasher is stopping because: {err}"); break; } } } + info!("Background account hasher has stopped"); } fn start_background_hasher(&mut self) { From 24a4670cef5ecea78307d1c07876cc8ca25e4f79 Mon Sep 17 00:00:00 2001 From: Joe C Date: Fri, 27 Oct 2023 21:54:55 +0200 Subject: [PATCH 26/98] add program examples repo to docs (#33898) --- docs/src/developing/on-chain-programs/developing-rust.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/src/developing/on-chain-programs/developing-rust.md b/docs/src/developing/on-chain-programs/developing-rust.md index 3e21799222077d..d1f8423ecdbe6f 100644 --- a/docs/src/developing/on-chain-programs/developing-rust.md +++ b/docs/src/developing/on-chain-programs/developing-rust.md @@ -386,5 +386,10 @@ $ cargo build-bpf --dump ## Examples The [Solana Program Library -github](https://github.com/solana-labs/solana-program-library/tree/master/examples/rust) +GitHub](https://github.com/solana-labs/solana-program-library/tree/master/examples/rust) repo contains a collection of Rust examples. + +The [Solana Developers +Program Examples GitHub](https://github.com/solana-developers/program-examples) +repo also contains a collection of beginner to intermediate Rust program +examples. \ No newline at end of file From b0bf24b6fc3b5b57032416643e2db169e8bd79ca Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Fri, 27 Oct 2023 15:37:45 -0700 Subject: [PATCH 27/98] [zk-token-proof] Round compute units to nice numbers (#33910) round zk-token-proof compute units to nice numbers --- programs/zk-token-proof/src/lib.rs | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/programs/zk-token-proof/src/lib.rs b/programs/zk-token-proof/src/lib.rs index 3e43c564e70cef..0aa75c4ef5cff5 100644 --- a/programs/zk-token-proof/src/lib.rs +++ b/programs/zk-token-proof/src/lib.rs @@ -17,20 +17,20 @@ use { }; pub const CLOSE_CONTEXT_STATE_COMPUTE_UNITS: u64 = 3_300; -pub const VERIFY_ZERO_BALANCE_COMPUTE_UNITS: u64 = 6012; -pub const VERIFY_WITHDRAW_COMPUTE_UNITS: u64 = 112_454; -pub const VERIFY_CIPHERTEXT_CIPHERTEXT_EQUALITY_COMPUTE_UNITS: u64 = 7_943; -pub const VERIFY_TRANSFER_COMPUTE_UNITS: u64 = 219_290; -pub const VERIFY_TRANSFER_WITH_FEE_COMPUTE_UNITS: u64 = 407_121; -pub const VERIFY_PUBKEY_VALIDITY_COMPUTE_UNITS: u64 = 2_619; -pub const VERIFY_RANGE_PROOF_U64_COMPUTE_UNITS: u64 = 105_066; -pub const VERIFY_BATCHED_RANGE_PROOF_U64_COMPUTE_UNITS: u64 = 111_478; -pub const VERIFY_BATCHED_RANGE_PROOF_U128_COMPUTE_UNITS: u64 = 204_512; +pub const VERIFY_ZERO_BALANCE_COMPUTE_UNITS: u64 = 6_000; +pub const VERIFY_WITHDRAW_COMPUTE_UNITS: u64 = 110_000; +pub const VERIFY_CIPHERTEXT_CIPHERTEXT_EQUALITY_COMPUTE_UNITS: u64 = 8_000; +pub const VERIFY_TRANSFER_COMPUTE_UNITS: u64 = 219_000; +pub const VERIFY_TRANSFER_WITH_FEE_COMPUTE_UNITS: u64 = 407_000; +pub const VERIFY_PUBKEY_VALIDITY_COMPUTE_UNITS: u64 = 2_600; +pub const VERIFY_RANGE_PROOF_U64_COMPUTE_UNITS: u64 = 105_000; +pub const VERIFY_BATCHED_RANGE_PROOF_U64_COMPUTE_UNITS: u64 = 111_000; +pub const VERIFY_BATCHED_RANGE_PROOF_U128_COMPUTE_UNITS: u64 = 200_000; pub const VERIFY_BATCHED_RANGE_PROOF_U256_COMPUTE_UNITS: u64 = 368_000; -pub const VERIFY_CIPHERTEXT_COMMITMENT_EQUALITY_COMPUTE_UNITS: u64 = 6_424; -pub const VERIFY_GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_COMPUTE_UNITS: u64 = 6_440; -pub const VERIFY_BATCHED_GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_COMPUTE_UNITS: u64 = 12_575; -pub const VERIFY_FEE_SIGMA_COMPUTE_UNITS: u64 = 6_547; +pub const VERIFY_CIPHERTEXT_COMMITMENT_EQUALITY_COMPUTE_UNITS: u64 = 6_400; +pub const VERIFY_GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_COMPUTE_UNITS: u64 = 6_400; +pub const VERIFY_BATCHED_GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_COMPUTE_UNITS: u64 = 13_000; +pub const VERIFY_FEE_SIGMA_COMPUTE_UNITS: u64 = 6_500; fn process_verify_proof(invoke_context: &mut InvokeContext) -> Result<(), InstructionError> where From cdc284189a042b7a4e2c44b99c2bc36da7566524 Mon Sep 17 00:00:00 2001 From: Brooks Date: Sat, 28 Oct 2023 12:47:29 -0400 Subject: [PATCH 28/98] Refactors RollingBitField::min() (#33911) --- accounts-db/src/rolling_bit_field.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/accounts-db/src/rolling_bit_field.rs b/accounts-db/src/rolling_bit_field.rs index cfbfe820a176c7..65d3ff76b54ae7 100644 --- a/accounts-db/src/rolling_bit_field.rs +++ b/accounts-db/src/rolling_bit_field.rs @@ -67,15 +67,12 @@ impl RollingBitField { } else if self.excess.is_empty() { Some(self.min) } else { - let mut min = if self.all_items_in_excess() { - u64::MAX + let excess_min = self.excess.iter().min().copied(); + if self.all_items_in_excess() { + excess_min } else { - self.min - }; - for item in &self.excess { - min = std::cmp::min(min, *item); + Some(std::cmp::min(self.min, excess_min.unwrap_or(u64::MAX))) } - Some(min) } } From 30491995edda0fd6ebad83d7644acf59e749079e Mon Sep 17 00:00:00 2001 From: Illia Bobyr Date: Mon, 30 Oct 2023 20:23:00 -0700 Subject: [PATCH 29/98] Fixup: zeroize: Allow versions newer than 1.3 for `curve25519-dalek` (#33930) Fixes commit a099c7a0b8ac4281dbc6dd422d697e700c28a7d1 Author: Illia Bobyr Date: Mon Oct 23 12:19:59 2023 -0700 zeroize: Allow versions newer than 1.3 for `curve25519-dalek` (#33516) Use correct commit hash from `solana-labs/curve25519-dalek.git`. --- Cargo.lock | 2 +- Cargo.toml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1157029157907d..626541d61f3a08 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1464,7 +1464,7 @@ dependencies = [ [[package]] name = "curve25519-dalek" version = "3.2.1" -source = "git+https://github.com/solana-labs/curve25519-dalek.git?rev=c14774464c4d38de553c6ef2f48a10982c1b4801#c14774464c4d38de553c6ef2f48a10982c1b4801" +source = "git+https://github.com/solana-labs/curve25519-dalek.git?rev=b500cdc2a920cd5bff9e2dd974d7b97349d61464#b500cdc2a920cd5bff9e2dd974d7b97349d61464" dependencies = [ "byteorder", "digest 0.9.0", diff --git a/Cargo.toml b/Cargo.toml index caccf5189e49ee..e6168de4d7aec8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -516,9 +516,9 @@ rev = "6105d7a5591aefa646a95d12b5e8d3f55a9214ef" # # https://github.com/dalek-cryptography/curve25519-dalek/commit/29e5c29b0e5c6821e4586af58b0d0891dd2ec639 # -# Comparison with `c14774464c4d38de553c6ef2f48a10982c1b4801`: +# Comparison with `b500cdc2a920cd5bff9e2dd974d7b97349d61464`: # -# https://github.com/dalek-cryptography/curve25519-dalek/compare/3.2.1...solana-labs:curve25519-dalek:c14774464c4d38de553c6ef2f48a10982c1b4801 +# https://github.com/dalek-cryptography/curve25519-dalek/compare/3.2.1...solana-labs:curve25519-dalek:b500cdc2a920cd5bff9e2dd974d7b97349d61464 # # Or, using the branch name instead of the hash: # @@ -526,4 +526,4 @@ rev = "6105d7a5591aefa646a95d12b5e8d3f55a9214ef" # [patch.crates-io.curve25519-dalek] git = "https://github.com/solana-labs/curve25519-dalek.git" -rev = "c14774464c4d38de553c6ef2f48a10982c1b4801" +rev = "b500cdc2a920cd5bff9e2dd974d7b97349d61464" From b2cec5aa48ebbc7ade0d75af67547c22583b2673 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 31 Oct 2023 12:20:39 +0800 Subject: [PATCH 30/98] multinode-demo scripts support --block-production-method arg (#33891) --- multinode-demo/bootstrap-validator.sh | 3 +++ multinode-demo/validator.sh | 3 +++ 2 files changed, 6 insertions(+) diff --git a/multinode-demo/bootstrap-validator.sh b/multinode-demo/bootstrap-validator.sh index f69c05d1ed3d7f..5afc543b2f0032 100755 --- a/multinode-demo/bootstrap-validator.sh +++ b/multinode-demo/bootstrap-validator.sh @@ -106,6 +106,9 @@ while [[ -n $1 ]]; do elif [[ $1 = --log-messages-bytes-limit ]]; then args+=("$1" "$2") shift 2 + elif [[ $1 == --block-production-method ]]; then + args+=("$1" "$2") + shift 2 else echo "Unknown argument: $1" $program --help diff --git a/multinode-demo/validator.sh b/multinode-demo/validator.sh index 9090055b908b10..487154101ac979 100755 --- a/multinode-demo/validator.sh +++ b/multinode-demo/validator.sh @@ -182,6 +182,9 @@ while [[ -n $1 ]]; do elif [[ $1 == --skip-require-tower ]]; then maybeRequireTower=false shift + elif [[ $1 == --block-production-method ]]; then + args+=("$1" "$2") + shift 2 elif [[ $1 = -h ]]; then usage "$@" else From 136ab21f34793913b5e79dfb38180965ea002019 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Tue, 31 Oct 2023 14:33:36 +0900 Subject: [PATCH 31/98] Define InstalledScheduler::wait_for_termination() (#33922) * Define InstalledScheduler::wait_for_termination() * Rename to wait_for_scheduler_termination * Comment wait_for_termination and WaitReason better --- Cargo.lock | 1 + core/src/replay_stage.rs | 37 ++- ledger/Cargo.toml | 1 + ledger/src/blockstore_processor.rs | 34 ++- programs/sbf/Cargo.lock | 1 + runtime/src/bank.rs | 31 ++- runtime/src/bank/tests.rs | 2 +- runtime/src/installed_scheduler_pool.rs | 310 +++++++++++++++++++++++- 8 files changed, 401 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 626541d61f3a08..610b5edb49c5f6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6258,6 +6258,7 @@ dependencies = [ "libc", "log", "lru", + "mockall", "num_cpus", "num_enum 0.7.0", "prost", diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 5cceb8dff3502b..2aa0d82be0651e 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -42,7 +42,8 @@ use { block_error::BlockError, blockstore::Blockstore, blockstore_processor::{ - self, BlockstoreProcessorError, ConfirmationProgress, TransactionStatusSender, + self, BlockstoreProcessorError, ConfirmationProgress, ExecuteBatchesInternalMetrics, + TransactionStatusSender, }, entry_notifier_service::EntryNotifierSender, leader_schedule_cache::LeaderScheduleCache, @@ -2815,6 +2816,40 @@ impl ReplayStage { .expect("Bank fork progress entry missing for completed bank"); let replay_stats = bank_progress.replay_stats.clone(); + + if let Some((result, completed_execute_timings)) = + bank.wait_for_completed_scheduler() + { + let metrics = ExecuteBatchesInternalMetrics::new_with_timings_from_all_threads( + completed_execute_timings, + ); + replay_stats + .write() + .unwrap() + .batch_execute + .accumulate(metrics); + + if let Err(err) = result { + Self::mark_dead_slot( + blockstore, + bank, + bank_forks.read().unwrap().root(), + &BlockstoreProcessorError::InvalidTransaction(err), + rpc_subscriptions, + duplicate_slots_tracker, + gossip_duplicate_confirmed_slots, + epoch_slots_frozen_slots, + progress, + heaviest_subtree_fork_choice, + duplicate_slots_to_repair, + ancestor_hashes_replay_update_sender, + purge_repair_slot_counter, + ); + // don't try to run the remaining normal processing for the completed bank + continue; + } + } + let r_replay_stats = replay_stats.read().unwrap(); let replay_progress = bank_progress.replay_progress.clone(); let r_replay_progress = replay_progress.read().unwrap(); diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index b3fb1ac5f9b97d..87ba0c39235a12 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -25,6 +25,7 @@ lazy_static = { workspace = true } libc = { workspace = true } log = { workspace = true } lru = { workspace = true } +mockall = { workspace = true } num_cpus = { workspace = true } num_enum = { workspace = true } prost = { workspace = true } diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index bb717ff8348584..ccdfb97ece81f7 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -216,12 +216,27 @@ fn execute_batch( } #[derive(Default)] -struct ExecuteBatchesInternalMetrics { +pub struct ExecuteBatchesInternalMetrics { execution_timings_per_thread: HashMap, total_batches_len: u64, execute_batches_us: u64, } +impl ExecuteBatchesInternalMetrics { + pub fn new_with_timings_from_all_threads(execute_timings: ExecuteTimings) -> Self { + const DUMMY_THREAD_INDEX: usize = 999; + let mut new = Self::default(); + new.execution_timings_per_thread.insert( + DUMMY_THREAD_INDEX, + ThreadExecuteTimings { + execute_timings, + ..ThreadExecuteTimings::default() + }, + ); + new + } +} + fn execute_batches_internal( bank: &Arc, batches: &[TransactionBatchWithIndexes], @@ -1068,7 +1083,7 @@ pub struct BatchExecutionTiming { } impl BatchExecutionTiming { - fn accumulate(&mut self, new_batch: ExecuteBatchesInternalMetrics) { + pub fn accumulate(&mut self, new_batch: ExecuteBatchesInternalMetrics) { let Self { totals, wall_clock_us, @@ -1382,6 +1397,9 @@ fn process_bank_0( &mut ExecuteTimings::default(), ) .expect("Failed to process bank 0 from ledger. Did you forget to provide a snapshot?"); + if let Some((result, _timings)) = bank0.wait_for_completed_scheduler() { + result.unwrap(); + } bank0.freeze(); if blockstore.is_primary_access() { blockstore.insert_bank_hash(bank0.slot(), bank0.hash(), false); @@ -1784,6 +1802,9 @@ fn process_single_slot( err })?; + if let Some((result, _timings)) = bank.wait_for_completed_scheduler() { + result? + } bank.freeze(); // all banks handled by this routine are created from complete slots if blockstore.is_primary_access() { blockstore.insert_bank_hash(bank.slot(), bank.hash(), false); @@ -1924,7 +1945,7 @@ pub mod tests { genesis_utils::{ self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, }, - installed_scheduler_pool::MockInstalledScheduler, + installed_scheduler_pool::{MockInstalledScheduler, WaitReason}, }, solana_sdk::{ account::{AccountSharedData, WritableAccount}, @@ -4510,10 +4531,17 @@ pub mod tests { let txs = create_test_transactions(&mint_keypair, &genesis_config.hash()); let mut mocked_scheduler = MockInstalledScheduler::new(); + let mut seq = mockall::Sequence::new(); mocked_scheduler .expect_schedule_execution() .times(txs.len()) .returning(|_| ()); + mocked_scheduler + .expect_wait_for_termination() + .with(mockall::predicate::eq(WaitReason::DroppedFromBankForks)) + .times(1) + .in_sequence(&mut seq) + .returning(|_| None); let bank = BankWithScheduler::new(bank, Some(Box::new(mocked_scheduler))); let batch = bank.prepare_sanitized_batch(&txs); diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 71baf3c7932d89..a81280a23341d0 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5191,6 +5191,7 @@ dependencies = [ "libc", "log", "lru", + "mockall", "num_cpus", "num_enum 0.7.0", "prost", diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 2bdb21e6d3573b..7a770833cc5a73 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -43,6 +43,7 @@ use { builtins::{BuiltinPrototype, BUILTINS}, epoch_rewards_hasher::hash_rewards_into_partitions, epoch_stakes::{EpochStakes, NodeVoteAccounts}, + installed_scheduler_pool::{BankWithScheduler, InstalledSchedulerRwLock}, runtime_config::RuntimeConfig, serde_snapshot::BankIncrementalSnapshotPersistence, snapshot_hash::SnapshotHash, @@ -220,7 +221,7 @@ mod metrics; mod serde_snapshot; mod sysvar_cache; #[cfg(test)] -mod tests; +pub(crate) mod tests; mod transaction_account_state_info; pub const SECONDS_PER_YEAR: f64 = 365.25 * 24.0 * 60.0 * 60.0; @@ -4185,7 +4186,11 @@ impl Bank { /// Register a new recent blockhash in the bank's recent blockhash queue. Called when a bank /// reaches its max tick height. Can be called by tests to get new blockhashes for transaction /// processing without advancing to a new bank slot. - pub fn register_recent_blockhash(&self, blockhash: &Hash) { + fn register_recent_blockhash(&self, blockhash: &Hash, scheduler: &InstalledSchedulerRwLock) { + // This is needed because recent_blockhash updates necessitate synchronizations for + // consistent tx check_age handling. + BankWithScheduler::wait_for_paused_scheduler(self, scheduler); + // Only acquire the write lock for the blockhash queue on block boundaries because // readers can starve this write lock acquisition and ticks would be slowed down too // much if the write lock is acquired for each tick. @@ -4197,7 +4202,10 @@ impl Bank { // gating this under #[cfg(feature = "dev-context-only-utils")] isn't easy due to // solana-program-test's usage... pub fn register_unique_recent_blockhash_for_test(&self) { - self.register_recent_blockhash(&Hash::new_unique()) + self.register_recent_blockhash( + &Hash::new_unique(), + &BankWithScheduler::no_scheduler_available(), + ) } /// Tell the bank which Entry IDs exist on the ledger. This function assumes subsequent calls @@ -4206,14 +4214,14 @@ impl Bank { /// /// This is NOT thread safe because if tick height is updated by two different threads, the /// block boundary condition could be missed. - pub fn register_tick(&self, hash: &Hash) { + pub fn register_tick(&self, hash: &Hash, scheduler: &InstalledSchedulerRwLock) { assert!( !self.freeze_started(), "register_tick() working on a bank that is already frozen or is undergoing freezing!" ); if self.is_block_boundary(self.tick_height.load(Relaxed) + 1) { - self.register_recent_blockhash(hash); + self.register_recent_blockhash(hash, scheduler); } // ReplayStage will start computing the accounts delta hash when it @@ -4226,18 +4234,17 @@ impl Bank { #[cfg(feature = "dev-context-only-utils")] pub fn register_tick_for_test(&self, hash: &Hash) { - // currently meaningless wrapper; upcoming pr will make it an actual helper... - self.register_tick(hash) + self.register_tick(hash, &BankWithScheduler::no_scheduler_available()) } #[cfg(feature = "dev-context-only-utils")] pub fn register_default_tick_for_test(&self) { - self.register_tick(&Hash::default()) + self.register_tick_for_test(&Hash::default()) } #[cfg(feature = "dev-context-only-utils")] pub fn register_unique_tick(&self) { - self.register_tick(&Hash::new_unique()) + self.register_tick_for_test(&Hash::new_unique()) } pub fn is_complete(&self) -> bool { @@ -8008,10 +8015,14 @@ impl Bank { } pub fn fill_bank_with_ticks_for_tests(&self) { + self.do_fill_bank_with_ticks_for_tests(&BankWithScheduler::no_scheduler_available()) + } + + pub(crate) fn do_fill_bank_with_ticks_for_tests(&self, scheduler: &InstalledSchedulerRwLock) { if self.tick_height.load(Relaxed) < self.max_tick_height { let last_blockhash = self.last_blockhash(); while self.last_blockhash() == last_blockhash { - self.register_tick(&Hash::new_unique()) + self.register_tick(&Hash::new_unique(), scheduler) } } else { warn!("Bank already reached max tick height, cannot fill it with more ticks"); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 2f1c0e0aee45f7..df39171d84b681 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -274,7 +274,7 @@ fn test_bank_new() { assert_eq!(rent.lamports_per_byte_year, 5); } -fn create_simple_test_bank(lamports: u64) -> Bank { +pub(crate) fn create_simple_test_bank(lamports: u64) -> Bank { let (genesis_config, _mint_keypair) = create_genesis_config(lamports); Bank::new_for_tests(&genesis_config) } diff --git a/runtime/src/installed_scheduler_pool.rs b/runtime/src/installed_scheduler_pool.rs index 5fef97bc6e6908..553a31c800e6e4 100644 --- a/runtime/src/installed_scheduler_pool.rs +++ b/runtime/src/installed_scheduler_pool.rs @@ -4,7 +4,11 @@ use { crate::bank::Bank, log::*, - solana_sdk::transaction::SanitizedTransaction, + solana_program_runtime::timings::ExecuteTimings, + solana_sdk::{ + hash::Hash, + transaction::{Result, SanitizedTransaction}, + }, std::{ fmt::Debug, ops::Deref, @@ -23,14 +27,64 @@ use {mockall::automock, qualifier_attr::qualifiers}; allow(unused_attributes, clippy::needless_lifetimes) )] pub trait InstalledScheduler: Send + Sync + Debug + 'static { + // Calling this is illegal as soon as wait_for_termination is called. fn schedule_execution<'a>( &'a self, transaction_with_index: &'a (&'a SanitizedTransaction, usize), ); + + /// Wait for a scheduler to terminate after it is notified with the given reason. + /// + /// Firstly, this function blocks the current thread while waiting for the scheduler to + /// complete all of the executions for the scheduled transactions. This means the scheduler has + /// prepared the finalized `ResultWithTimings` at least internally at the time of existing from + /// this function. If no trsanction is scheduled, the result and timing will be `Ok(())` and + /// `ExecuteTimings::default()` respectively. This is done in the same way regardless of + /// `WaitReason`. + /// + /// After that, the scheduler may behave differently depending on the reason, regarding the + /// final bookkeeping. Specifically, this function guaranteed to return + /// `Some(finalized_result_with_timings)` unless the reason is `PausedForRecentBlockhash`. In + /// the case of `PausedForRecentBlockhash`, the scheduler is responsible to retain the + /// finalized `ResultWithTimings` until it's `wait_for_termination()`-ed with one of the other + /// two reasons later. + #[must_use] + fn wait_for_termination(&mut self, reason: &WaitReason) -> Option; } pub type DefaultInstalledSchedulerBox = Box; +pub type ResultWithTimings = (Result<()>, ExecuteTimings); + +/// A hint from the bank about the reason the caller is waiting on its scheduler termination. +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum WaitReason { + // The bank wants its scheduler to terminate after the completion of transaction execution, in + // order to freeze itself immediately thereafter. This is by far the most normal wait reason. + // + // Note that `wait_for_termination(TerminatedToFreeze)` must explicitly be done prior + // to Bank::freeze(). This can't be done inside Bank::freeze() implicitly to remain it + // infallible. + TerminatedToFreeze, + // The bank wants its scheduler to terminate just like `TerminatedToFreeze` and indicate that + // Drop::drop() is the caller. + DroppedFromBankForks, + // The bank wants its scheduler to pause the scheduler after the completion without being + // returned to the pool to collect scheduler's internally-held `ResultWithTimings` later. + PausedForRecentBlockhash, +} + +impl WaitReason { + pub fn is_paused(&self) -> bool { + // Exhaustive `match` is preferred here than `matches!()` to trigger an explicit + // decision to be made, should we add new variants like `PausedForFooBar`... + match self { + WaitReason::PausedForRecentBlockhash => true, + WaitReason::TerminatedToFreeze | WaitReason::DroppedFromBankForks => false, + } + } +} + /// Very thin wrapper around Arc /// /// It brings type-safety against accidental mixing of bank and scheduler with different slots, @@ -85,6 +139,14 @@ impl BankWithScheduler { self.inner.bank.clone() } + pub fn register_tick(&self, hash: &Hash) { + self.inner.bank.register_tick(hash, &self.inner.scheduler); + } + + pub fn fill_bank_with_ticks_for_tests(&self) { + self.do_fill_bank_with_ticks_for_tests(&self.inner.scheduler); + } + pub fn has_installed_scheduler(&self) -> bool { self.inner.scheduler.read().unwrap().is_some() } @@ -107,11 +169,111 @@ impl BankWithScheduler { } } + // take needless &mut only to communicate its semantic mutability to humans... + #[cfg(feature = "dev-context-only-utils")] + pub fn drop_scheduler(&mut self) { + self.inner.drop_scheduler(); + } + + pub(crate) fn wait_for_paused_scheduler(bank: &Bank, scheduler: &InstalledSchedulerRwLock) { + let maybe_result_with_timings = BankWithSchedulerInner::wait_for_scheduler_termination( + bank, + scheduler, + WaitReason::PausedForRecentBlockhash, + ); + assert!( + maybe_result_with_timings.is_none(), + "Premature result was returned from scheduler after paused" + ); + } + + #[must_use] + pub fn wait_for_completed_scheduler(&self) -> Option { + BankWithSchedulerInner::wait_for_scheduler_termination( + &self.inner.bank, + &self.inner.scheduler, + WaitReason::TerminatedToFreeze, + ) + } + pub const fn no_scheduler_available() -> InstalledSchedulerRwLock { RwLock::new(None) } } +impl BankWithSchedulerInner { + #[must_use] + fn wait_for_completed_scheduler_from_drop(&self) -> Option { + Self::wait_for_scheduler_termination( + &self.bank, + &self.scheduler, + WaitReason::DroppedFromBankForks, + ) + } + + #[must_use] + fn wait_for_scheduler_termination( + bank: &Bank, + scheduler: &InstalledSchedulerRwLock, + reason: WaitReason, + ) -> Option { + debug!( + "wait_for_scheduler_termination(slot: {}, reason: {:?}): started...", + bank.slot(), + reason, + ); + + let mut scheduler = scheduler.write().unwrap(); + let result_with_timings = if scheduler.is_some() { + let result_with_timings = scheduler + .as_mut() + .and_then(|scheduler| scheduler.wait_for_termination(&reason)); + if !reason.is_paused() { + drop(scheduler.take().expect("scheduler after waiting")); + } + result_with_timings + } else { + None + }; + debug!( + "wait_for_scheduler_termination(slot: {}, reason: {:?}): finished with: {:?}...", + bank.slot(), + reason, + result_with_timings.as_ref().map(|(result, _)| result), + ); + + result_with_timings + } + + fn drop_scheduler(&self) { + if std::thread::panicking() { + error!( + "BankWithSchedulerInner::drop_scheduler(): slot: {} skipping due to already panicking...", + self.bank.slot(), + ); + return; + } + + // There's no guarantee ResultWithTimings is available or not at all when being dropped. + if let Some(Err(err)) = self + .wait_for_completed_scheduler_from_drop() + .map(|(result, _timings)| result) + { + warn!( + "BankWithSchedulerInner::drop_scheduler(): slot: {} discarding error from scheduler: {:?}", + self.bank.slot(), + err, + ); + } + } +} + +impl Drop for BankWithSchedulerInner { + fn drop(&mut self) { + self.drop_scheduler(); + } +} + impl Deref for BankWithScheduler { type Target = Arc; @@ -119,3 +281,149 @@ impl Deref for BankWithScheduler { &self.inner.bank } } + +#[cfg(test)] +mod tests { + use { + super::*, + crate::{ + bank::test_utils::goto_end_of_slot_with_scheduler, + genesis_utils::{create_genesis_config, GenesisConfigInfo}, + }, + assert_matches::assert_matches, + mockall::Sequence, + solana_sdk::system_transaction, + }; + + fn setup_mocked_scheduler_with_extra( + wait_reasons: impl Iterator, + f: Option, + ) -> DefaultInstalledSchedulerBox { + let mut mock = MockInstalledScheduler::new(); + let mut seq = Sequence::new(); + + for wait_reason in wait_reasons { + mock.expect_wait_for_termination() + .with(mockall::predicate::eq(wait_reason)) + .times(1) + .in_sequence(&mut seq) + .returning(move |_| { + if wait_reason.is_paused() { + None + } else { + Some((Ok(()), ExecuteTimings::default())) + } + }); + } + + if let Some(f) = f { + f(&mut mock); + } + + Box::new(mock) + } + + fn setup_mocked_scheduler( + wait_reasons: impl Iterator, + ) -> DefaultInstalledSchedulerBox { + setup_mocked_scheduler_with_extra( + wait_reasons, + None:: ()>, + ) + } + + #[test] + fn test_scheduler_normal_termination() { + solana_logger::setup(); + + let bank = Arc::new(Bank::default_for_tests()); + let bank = BankWithScheduler::new( + bank, + Some(setup_mocked_scheduler( + [WaitReason::TerminatedToFreeze].into_iter(), + )), + ); + assert!(bank.has_installed_scheduler()); + assert_matches!(bank.wait_for_completed_scheduler(), Some(_)); + + // Repeating to call wait_for_completed_scheduler() is okay with no ResultWithTimings being + // returned. + assert!(!bank.has_installed_scheduler()); + assert_matches!(bank.wait_for_completed_scheduler(), None); + } + + #[test] + fn test_no_scheduler_termination() { + solana_logger::setup(); + + let bank = Arc::new(Bank::default_for_tests()); + let bank = BankWithScheduler::new_without_scheduler(bank); + + // Calling wait_for_completed_scheduler() is noop, when no scheduler is installed. + assert!(!bank.has_installed_scheduler()); + assert_matches!(bank.wait_for_completed_scheduler(), None); + } + + #[test] + fn test_scheduler_termination_from_drop() { + solana_logger::setup(); + + let bank = Arc::new(Bank::default_for_tests()); + let bank = BankWithScheduler::new( + bank, + Some(setup_mocked_scheduler( + [WaitReason::DroppedFromBankForks].into_iter(), + )), + ); + drop(bank); + } + + #[test] + fn test_scheduler_pause() { + solana_logger::setup(); + + let bank = Arc::new(crate::bank::tests::create_simple_test_bank(42)); + let bank = BankWithScheduler::new( + bank, + Some(setup_mocked_scheduler( + [ + WaitReason::PausedForRecentBlockhash, + WaitReason::TerminatedToFreeze, + ] + .into_iter(), + )), + ); + goto_end_of_slot_with_scheduler(&bank); + assert_matches!(bank.wait_for_completed_scheduler(), Some(_)); + } + + #[test] + fn test_schedule_executions() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let tx0 = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + &mint_keypair, + &solana_sdk::pubkey::new_rand(), + 2, + genesis_config.hash(), + )); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let mocked_scheduler = setup_mocked_scheduler_with_extra( + [WaitReason::DroppedFromBankForks].into_iter(), + Some(|mocked: &mut MockInstalledScheduler| { + mocked + .expect_schedule_execution() + .times(1) + .returning(|(_, _)| ()); + }), + ); + + let bank = BankWithScheduler::new(bank, Some(mocked_scheduler)); + bank.schedule_transaction_executions([(&tx0, &0)].into_iter()); + } +} From 3f805ad06dad451bea65b1a03377e130eacdbb6d Mon Sep 17 00:00:00 2001 From: Jeff Biseda Date: Wed, 1 Nov 2023 02:39:26 -0400 Subject: [PATCH 32/98] improve batch_send error handling (#33936) --- core/src/repair/repair_service.rs | 21 +++--- core/src/repair/serve_repair.rs | 19 ++--- gossip/tests/gossip.rs | 18 ++--- streamer/src/nonblocking/sendmmsg.rs | 69 +++++++++---------- streamer/src/sendmmsg.rs | 67 +++++++++--------- turbine/src/broadcast_stage.rs | 9 ++- .../broadcast_duplicates_run.rs | 7 +- 7 files changed, 110 insertions(+), 100 deletions(-) diff --git a/core/src/repair/repair_service.rs b/core/src/repair/repair_service.rs index 20d6a01084f212..36ba4978e1c793 100644 --- a/core/src/repair/repair_service.rs +++ b/core/src/repair/repair_service.rs @@ -457,16 +457,17 @@ impl RepairService { let mut batch_send_repairs_elapsed = Measure::start("batch_send_repairs_elapsed"); if !batch.is_empty() { - if let Err(SendPktsError::IoError(err, num_failed)) = - batch_send(repair_socket, &batch) - { - error!( - "{} batch_send failed to send {}/{} packets first error {:?}", - id, - num_failed, - batch.len(), - err - ); + match batch_send(repair_socket, &batch) { + Ok(()) => (), + Err(SendPktsError::IoError(err, num_failed)) => { + error!( + "{} batch_send failed to send {}/{} packets first error {:?}", + id, + num_failed, + batch.len(), + err + ); + } } } batch_send_repairs_elapsed.stop(); diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index ebb2d218658153..2662d487f13b0e 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -1221,15 +1221,16 @@ impl ServeRepair { } } if !pending_pongs.is_empty() { - if let Err(SendPktsError::IoError(err, num_failed)) = - batch_send(repair_socket, &pending_pongs) - { - warn!( - "batch_send failed to send {}/{} packets. First error: {:?}", - num_failed, - pending_pongs.len(), - err - ); + match batch_send(repair_socket, &pending_pongs) { + Ok(()) => (), + Err(SendPktsError::IoError(err, num_failed)) => { + warn!( + "batch_send failed to send {}/{} packets. First error: {:?}", + num_failed, + pending_pongs.len(), + err + ); + } } } } diff --git a/gossip/tests/gossip.rs b/gossip/tests/gossip.rs index 569f7c480dfa1e..7759679bdfd36c 100644 --- a/gossip/tests/gossip.rs +++ b/gossip/tests/gossip.rs @@ -139,14 +139,16 @@ fn retransmit_to( .filter(|addr| socket_addr_space.check(addr)) .collect() }; - if let Err(SendPktsError::IoError(ioerr, num_failed)) = multi_target_send(socket, data, &dests) - { - error!( - "retransmit_to multi_target_send error: {:?}, {}/{} packets failed", - ioerr, - num_failed, - dests.len(), - ); + match multi_target_send(socket, data, &dests) { + Ok(()) => (), + Err(SendPktsError::IoError(ioerr, num_failed)) => { + error!( + "retransmit_to multi_target_send error: {:?}, {}/{} packets failed", + ioerr, + num_failed, + dests.len(), + ); + } } } diff --git a/streamer/src/nonblocking/sendmmsg.rs b/streamer/src/nonblocking/sendmmsg.rs index 106e53d243cf54..15217b906eb5da 100644 --- a/streamer/src/nonblocking/sendmmsg.rs +++ b/streamer/src/nonblocking/sendmmsg.rs @@ -178,16 +178,10 @@ mod tests { let dest_refs: Vec<_> = vec![&ip4, &ip6, &ip4]; let sender = UdpSocket::bind("0.0.0.0:0").await.expect("bind"); - if let Err(SendPktsError::IoError(_, num_failed)) = - batch_send(&sender, &packet_refs[..]).await - { - assert_eq!(num_failed, 1); - } - if let Err(SendPktsError::IoError(_, num_failed)) = - multi_target_send(&sender, &packets[0], &dest_refs).await - { - assert_eq!(num_failed, 1); - } + let res = batch_send(&sender, &packet_refs[..]).await; + assert_matches!(res, Err(SendPktsError::IoError(_, /*num_failed*/ 1))); + let res = multi_target_send(&sender, &packets[0], &dest_refs).await; + assert_matches!(res, Err(SendPktsError::IoError(_, /*num_failed*/ 1))); } #[tokio::test] @@ -205,11 +199,12 @@ mod tests { (&packets[3][..], &ipv4broadcast), (&packets[4][..], &ipv4local), ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - batch_send(&sender, &packet_refs[..]).await - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 2); + match batch_send(&sender, &packet_refs[..]).await { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 2); + } } // test leading and trailing failures for batch_send @@ -220,11 +215,12 @@ mod tests { (&packets[3][..], &ipv4local), (&packets[4][..], &ipv4broadcast), ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - batch_send(&sender, &packet_refs[..]).await - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 3); + match batch_send(&sender, &packet_refs[..]).await { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 3); + } } // test consecutive intermediate failures for batch_send @@ -235,11 +231,12 @@ mod tests { (&packets[3][..], &ipv4broadcast), (&packets[4][..], &ipv4local), ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - batch_send(&sender, &packet_refs[..]).await - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 2); + match batch_send(&sender, &packet_refs[..]).await { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 2); + } } // test intermediate failures for multi_target_send @@ -250,11 +247,12 @@ mod tests { &ipv4broadcast, &ipv4local, ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - multi_target_send(&sender, &packets[0], &dest_refs).await - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 2); + match multi_target_send(&sender, &packets[0], &dest_refs).await { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 2); + } } // test leading and trailing failures for multi_target_send @@ -265,11 +263,12 @@ mod tests { &ipv4local, &ipv4broadcast, ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - multi_target_send(&sender, &packets[0], &dest_refs).await - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 3); + match multi_target_send(&sender, &packets[0], &dest_refs).await { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 3); + } } } } diff --git a/streamer/src/sendmmsg.rs b/streamer/src/sendmmsg.rs index 3340b10e6fdeda..459d868a2ed0c8 100644 --- a/streamer/src/sendmmsg.rs +++ b/streamer/src/sendmmsg.rs @@ -282,14 +282,10 @@ mod tests { let dest_refs: Vec<_> = vec![&ip4, &ip6, &ip4]; let sender = UdpSocket::bind("0.0.0.0:0").expect("bind"); - if let Err(SendPktsError::IoError(_, num_failed)) = batch_send(&sender, &packet_refs[..]) { - assert_eq!(num_failed, 1); - } - if let Err(SendPktsError::IoError(_, num_failed)) = - multi_target_send(&sender, &packets[0], &dest_refs) - { - assert_eq!(num_failed, 1); - } + let res = batch_send(&sender, &packet_refs[..]); + assert_matches!(res, Err(SendPktsError::IoError(_, /*num_failed*/ 1))); + let res = multi_target_send(&sender, &packets[0], &dest_refs); + assert_matches!(res, Err(SendPktsError::IoError(_, /*num_failed*/ 1))); } #[test] @@ -307,11 +303,12 @@ mod tests { (&packets[3][..], &ipv4broadcast), (&packets[4][..], &ipv4local), ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - batch_send(&sender, &packet_refs[..]) - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 2); + match batch_send(&sender, &packet_refs[..]) { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 2); + } } // test leading and trailing failures for batch_send @@ -322,11 +319,12 @@ mod tests { (&packets[3][..], &ipv4local), (&packets[4][..], &ipv4broadcast), ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - batch_send(&sender, &packet_refs[..]) - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 3); + match batch_send(&sender, &packet_refs[..]) { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 3); + } } // test consecutive intermediate failures for batch_send @@ -337,11 +335,12 @@ mod tests { (&packets[3][..], &ipv4broadcast), (&packets[4][..], &ipv4local), ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - batch_send(&sender, &packet_refs[..]) - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 2); + match batch_send(&sender, &packet_refs[..]) { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 2); + } } // test intermediate failures for multi_target_send @@ -352,11 +351,12 @@ mod tests { &ipv4broadcast, &ipv4local, ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - multi_target_send(&sender, &packets[0], &dest_refs) - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 2); + match multi_target_send(&sender, &packets[0], &dest_refs) { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 2); + } } // test leading and trailing failures for multi_target_send @@ -367,11 +367,12 @@ mod tests { &ipv4local, &ipv4broadcast, ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - multi_target_send(&sender, &packets[0], &dest_refs) - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 3); + match multi_target_send(&sender, &packets[0], &dest_refs) { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 3); + } } } } diff --git a/turbine/src/broadcast_stage.rs b/turbine/src/broadcast_stage.rs index f866747ad81e67..98566dfa24bc48 100644 --- a/turbine/src/broadcast_stage.rs +++ b/turbine/src/broadcast_stage.rs @@ -466,9 +466,12 @@ pub fn broadcast_shreds( transmit_stats.shred_select += shred_select.as_us(); let mut send_mmsg_time = Measure::start("send_mmsg"); - if let Err(SendPktsError::IoError(ioerr, num_failed)) = batch_send(s, &packets[..]) { - transmit_stats.dropped_packets_udp += num_failed; - result = Err(Error::Io(ioerr)); + match batch_send(s, &packets[..]) { + Ok(()) => (), + Err(SendPktsError::IoError(ioerr, num_failed)) => { + transmit_stats.dropped_packets_udp += num_failed; + result = Err(Error::Io(ioerr)); + } } send_mmsg_time.stop(); transmit_stats.send_mmsg_elapsed += send_mmsg_time.as_us(); diff --git a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs index 0db4003a079ce8..bae5945aea0e13 100644 --- a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs @@ -376,8 +376,11 @@ impl BroadcastRun for BroadcastDuplicatesRun { .flatten() .collect(); - if let Err(SendPktsError::IoError(ioerr, _)) = batch_send(sock, &packets) { - return Err(Error::Io(ioerr)); + match batch_send(sock, &packets) { + Ok(()) => (), + Err(SendPktsError::IoError(ioerr, _)) => { + return Err(Error::Io(ioerr)); + } } Ok(()) } From 63abc72e8671c15144653bbf4736b0c6145c5578 Mon Sep 17 00:00:00 2001 From: Jeff Biseda Date: Wed, 1 Nov 2023 02:40:45 -0400 Subject: [PATCH 33/98] remove unused replay-loop-voting-stats values (#33935) --- core/src/replay_stage.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 2aa0d82be0651e..8e355daeae44d2 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -260,8 +260,6 @@ pub struct ReplayTiming { start_leader_elapsed: u64, reset_bank_elapsed: u64, voting_elapsed: u64, - vote_push_us: u64, - vote_send_us: u64, generate_vote_us: u64, update_commitment_cache_us: u64, select_forks_elapsed: u64, @@ -337,8 +335,6 @@ impl ReplayTiming { if elapsed_ms > 1000 { datapoint_info!( "replay-loop-voting-stats", - ("vote_push_us", self.vote_push_us, i64), - ("vote_send_us", self.vote_send_us, i64), ("generate_vote_us", self.generate_vote_us, i64), ( "update_commitment_cache_us", From 25a29c9b7fc97f90ab6422acb046e9d824d8660a Mon Sep 17 00:00:00 2001 From: Tyera Date: Wed, 1 Nov 2023 11:21:25 -0600 Subject: [PATCH 34/98] solana-tokens: unpub/remove some helpers (#33937) * Unpub token helpers * Remove unused method --- tokens/src/spl_token.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/tokens/src/spl_token.rs b/tokens/src/spl_token.rs index e3d291c5c19dd8..d4d1c8cf5aac4f 100644 --- a/tokens/src/spl_token.rs +++ b/tokens/src/spl_token.rs @@ -36,11 +36,7 @@ pub fn update_decimals(client: &RpcClient, args: &mut Option) -> R Ok(()) } -pub fn spl_token_amount(amount: f64, decimals: u8) -> u64 { - (amount * 10_usize.pow(decimals as u32) as f64) as u64 -} - -pub fn build_spl_token_instructions( +pub(crate) fn build_spl_token_instructions( allocation: &Allocation, args: &DistributeTokensArgs, do_create_associated_token_account: bool, @@ -77,7 +73,7 @@ pub fn build_spl_token_instructions( instructions } -pub fn check_spl_token_balances( +pub(crate) fn check_spl_token_balances( messages: &[Message], allocations: &[Allocation], client: &RpcClient, @@ -114,7 +110,7 @@ pub fn check_spl_token_balances( Ok(()) } -pub fn print_token_balances( +pub(crate) fn print_token_balances( client: &RpcClient, allocation: &Allocation, spl_token_args: &SplTokenArgs, From 808f67aeadcb5d0a8c1cb9685f91493df392babe Mon Sep 17 00:00:00 2001 From: Kirill Fomichev Date: Thu, 2 Nov 2023 01:21:53 +0600 Subject: [PATCH 35/98] check plugin name on plugin_reload rpc call (#33582) * check plugin name on plugin_reload rpc call * add name to error message --- .../src/geyser_plugin_manager.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/geyser-plugin-manager/src/geyser_plugin_manager.rs b/geyser-plugin-manager/src/geyser_plugin_manager.rs index 0698cf1a656363..92180d1991b56e 100644 --- a/geyser-plugin-manager/src/geyser_plugin_manager.rs +++ b/geyser-plugin-manager/src/geyser_plugin_manager.rs @@ -177,6 +177,22 @@ impl GeyserPluginManager { data: None, })?; + // Then see if a plugin with this name already exists. If so, abort + if self + .plugins + .iter() + .any(|plugin| plugin.name().eq(new_plugin.name())) + { + return Err(jsonrpc_core::Error { + code: ErrorCode::InvalidRequest, + message: format!( + "There already exists a plugin named {} loaded, while reloading {name}. Did not load requested plugin", + new_plugin.name() + ), + data: None, + }); + } + // Attempt to on_load with new plugin match new_plugin.on_load(new_parsed_config_file) { // On success, push plugin and library From 1c00d5d81a7bc32fd80203198a277e3520371d84 Mon Sep 17 00:00:00 2001 From: norwnd <112318969+norwnd@users.noreply.github.com> Date: Fri, 3 Nov 2023 03:06:00 +0300 Subject: [PATCH 36/98] cli: solana-tokens, validate inputs gracefully (#33926) * cli: solana-tokens, refactor imports * cli: solana-tokens, validate inputs gracefully * change Allocation struct field types to simplify things * fix typos, apply some review suggestions * preserve backward compatibility for public APIs * apply latest review suggestions * address next batch of review comments --------- Co-authored-by: norwnd --- tokens/src/commands.rs | 563 ++++++++++++++++++++++++++++------------ tokens/src/spl_token.rs | 12 +- 2 files changed, 402 insertions(+), 173 deletions(-) diff --git a/tokens/src/commands.rs b/tokens/src/commands.rs index c10ad508d61a1c..8219ffa858ec24 100644 --- a/tokens/src/commands.rs +++ b/tokens/src/commands.rs @@ -42,6 +42,7 @@ use { std::{ cmp::{self}, io, + str::FromStr, sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -51,6 +52,7 @@ use { }, }; +/// Allocation is a helper (mostly for tests), prefer using TypedAllocation instead when possible. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] pub struct Allocation { pub recipient: String, @@ -58,6 +60,14 @@ pub struct Allocation { pub lockup_date: String, } +/// TypedAllocation is same as Allocation but contains typed fields. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +pub struct TypedAllocation { + pub recipient: Pubkey, + pub amount: u64, + pub lockup_date: Option>, +} + #[derive(Debug, PartialEq, Eq)] pub enum FundingSource { FeePayer, @@ -98,8 +108,20 @@ type StakeExtras = Vec<(Keypair, Option>)>; pub enum Error { #[error("I/O error")] IoError(#[from] io::Error), + #[error("CSV file seems to be empty")] + CsvIsEmptyError, #[error("CSV error")] CsvError(#[from] csv::Error), + #[error("Bad input data for pubkey: {input}, error: {err}")] + BadInputPubkeyError { + input: String, + err: pubkey::ParsePubkeyError, + }, + #[error("Bad input data for lockup date: {input}, error: {err}")] + BadInputLockupDate { + input: String, + err: chrono::ParseError, + }, #[error("PickleDb error")] PickleDbError(#[from] pickledb::error::Error), #[error("Transport error")] @@ -118,15 +140,15 @@ pub enum Error { ExitSignal, } -fn merge_allocations(allocations: &[Allocation]) -> Vec { +fn merge_allocations(allocations: &[TypedAllocation]) -> Vec { let mut allocation_map = IndexMap::new(); for allocation in allocations { allocation_map .entry(&allocation.recipient) - .or_insert(Allocation { - recipient: allocation.recipient.clone(), + .or_insert(TypedAllocation { + recipient: allocation.recipient, amount: 0, - lockup_date: "".to_string(), + lockup_date: None, }) .amount += allocation.amount; } @@ -134,13 +156,13 @@ fn merge_allocations(allocations: &[Allocation]) -> Vec { } /// Return true if the recipient and lockups are the same -fn has_same_recipient(allocation: &Allocation, transaction_info: &TransactionInfo) -> bool { - allocation.recipient == transaction_info.recipient.to_string() - && allocation.lockup_date.parse().ok() == transaction_info.lockup_date +fn has_same_recipient(allocation: &TypedAllocation, transaction_info: &TransactionInfo) -> bool { + allocation.recipient == transaction_info.recipient + && allocation.lockup_date == transaction_info.lockup_date } fn apply_previous_transactions( - allocations: &mut Vec, + allocations: &mut Vec, transaction_infos: &[TransactionInfo], ) { for transaction_info in transaction_infos { @@ -179,7 +201,7 @@ fn transfer( } fn distribution_instructions( - allocation: &Allocation, + allocation: &TypedAllocation, new_stake_account_address: &Pubkey, args: &DistributeTokensArgs, lockup_date: Option>, @@ -193,7 +215,7 @@ fn distribution_instructions( // No stake args; a simple token transfer. None => { let from = args.sender_keypair.pubkey(); - let to = allocation.recipient.parse().unwrap(); + let to = allocation.recipient; let lamports = allocation.amount; let instruction = system_instruction::transfer(&from, &to, lamports); vec![instruction] @@ -203,7 +225,7 @@ fn distribution_instructions( Some(stake_args) => { let unlocked_sol = stake_args.unlocked_sol; let sender_pubkey = args.sender_keypair.pubkey(); - let recipient = allocation.recipient.parse().unwrap(); + let recipient = allocation.recipient; let mut instructions = match &stake_args.sender_stake_args { // No source stake account, so create a recipient stake account directly. @@ -304,7 +326,7 @@ fn distribution_instructions( fn build_messages( client: &RpcClient, db: &mut PickleDb, - allocations: &[Allocation], + allocations: &[TypedAllocation], args: &DistributeTokensArgs, exit: Arc, messages: &mut Vec, @@ -318,7 +340,7 @@ fn build_messages( let associated_token_addresses = allocation_chunk .iter() .map(|x| { - let wallet_address = x.recipient.parse().unwrap(); + let wallet_address = x.recipient; get_associated_token_address(&wallet_address, &spl_token_args.mint) }) .collect::>(); @@ -333,11 +355,7 @@ fn build_messages( return Err(Error::ExitSignal); } let new_stake_account_keypair = Keypair::new(); - let lockup_date = if allocation.lockup_date.is_empty() { - None - } else { - Some(allocation.lockup_date.parse::>().unwrap()) - }; + let lockup_date = allocation.lockup_date; let do_create_associated_token_account = if let Some(spl_token_args) = &args.spl_token_args { @@ -382,7 +400,7 @@ fn build_messages( fn send_messages( client: &RpcClient, db: &mut PickleDb, - allocations: &[Allocation], + allocations: &[TypedAllocation], args: &DistributeTokensArgs, exit: Arc, messages: Vec, @@ -404,7 +422,7 @@ fn send_messages( signers.push(&*sender_stake_args.stake_authority); signers.push(&*sender_stake_args.withdraw_authority); signers.push(&new_stake_account_keypair); - if !allocation.lockup_date.is_empty() { + if allocation.lockup_date.is_some() { if let Some(lockup_authority) = &sender_stake_args.lockup_authority { signers.push(&**lockup_authority); } else { @@ -435,7 +453,7 @@ fn send_messages( args.stake_args.as_ref().map(|_| &new_stake_account_address); db::set_transaction_info( db, - &allocation.recipient.parse().unwrap(), + &allocation.recipient, allocation.amount, &transaction, new_stake_account_address_option, @@ -455,7 +473,7 @@ fn send_messages( fn distribute_allocations( client: &RpcClient, db: &mut PickleDb, - allocations: &[Allocation], + allocations: &[TypedAllocation], args: &DistributeTokensArgs, exit: Arc, ) -> Result<(), Error> { @@ -490,63 +508,91 @@ fn distribute_allocations( fn read_allocations( input_csv: &str, transfer_amount: Option, - require_lockup_heading: bool, + with_lockup: bool, raw_amount: bool, -) -> io::Result> { +) -> Result, Error> { let mut rdr = ReaderBuilder::new().trim(Trim::All).from_path(input_csv)?; let allocations = if let Some(amount) = transfer_amount { - let recipients: Vec = rdr - .deserialize() - .map(|recipient| recipient.unwrap()) - .collect(); - recipients - .into_iter() - .map(|recipient| Allocation { - recipient, - amount, - lockup_date: "".to_string(), + rdr.deserialize() + .map(|recipient| { + let recipient: String = recipient?; + let recipient = + Pubkey::from_str(&recipient).map_err(|err| Error::BadInputPubkeyError { + input: recipient, + err, + })?; + Ok(TypedAllocation { + recipient, + amount, + lockup_date: None, + }) }) - .collect() - } else if require_lockup_heading { - let recipients: Vec<(String, f64, String)> = rdr - .deserialize() - .map(|recipient| recipient.unwrap()) - .collect(); - recipients - .into_iter() - .map(|(recipient, amount, lockup_date)| Allocation { - recipient, - amount: sol_to_lamports(amount), - lockup_date, + .collect::, Error>>()? + } else if with_lockup { + // We only support SOL token in "require lockup" mode. + rdr.deserialize() + .map(|recipient| { + let (recipient, amount, lockup_date): (String, f64, String) = recipient?; + let recipient = + Pubkey::from_str(&recipient).map_err(|err| Error::BadInputPubkeyError { + input: recipient, + err, + })?; + let lockup_date = if !lockup_date.is_empty() { + let lockup_date = lockup_date.parse::>().map_err(|err| { + Error::BadInputLockupDate { + input: lockup_date, + err, + } + })?; + Some(lockup_date) + } else { + // empty lockup date means no lockup, it's okay to have only some lockups specified + None + }; + Ok(TypedAllocation { + recipient, + amount: sol_to_lamports(amount), + lockup_date, + }) }) - .collect() + .collect::, Error>>()? } else if raw_amount { - let recipients: Vec<(String, u64)> = rdr - .deserialize() - .map(|recipient| recipient.unwrap()) - .collect(); - recipients - .into_iter() - .map(|(recipient, amount)| Allocation { - recipient, - amount, - lockup_date: "".to_string(), + rdr.deserialize() + .map(|recipient| { + let (recipient, amount): (String, u64) = recipient?; + let recipient = + Pubkey::from_str(&recipient).map_err(|err| Error::BadInputPubkeyError { + input: recipient, + err, + })?; + Ok(TypedAllocation { + recipient, + amount, + lockup_date: None, + }) }) - .collect() + .collect::, Error>>()? } else { - let recipients: Vec<(String, f64)> = rdr - .deserialize() - .map(|recipient| recipient.unwrap()) - .collect(); - recipients - .into_iter() - .map(|(recipient, amount)| Allocation { - recipient, - amount: sol_to_lamports(amount), - lockup_date: "".to_string(), + rdr.deserialize() + .map(|recipient| { + let (recipient, amount): (String, f64) = recipient?; + let recipient = + Pubkey::from_str(&recipient).map_err(|err| Error::BadInputPubkeyError { + input: recipient, + err, + })?; + Ok(TypedAllocation { + recipient, + amount: sol_to_lamports(amount), + lockup_date: None, + }) }) - .collect() + .collect::, Error>>()? }; + if allocations.is_empty() { + return Err(Error::CsvIsEmptyError); + } Ok(allocations) } @@ -566,11 +612,11 @@ pub fn process_allocations( args: &DistributeTokensArgs, exit: Arc, ) -> Result, Error> { - let require_lockup_heading = args.stake_args.is_some(); - let mut allocations: Vec = read_allocations( + let with_lockup = args.stake_args.is_some(); + let mut allocations: Vec = read_allocations( &args.input_csv, args.transfer_amount, - require_lockup_heading, + with_lockup, args.spl_token_args.is_some(), )?; @@ -773,7 +819,7 @@ pub fn get_fee_estimate_for_messages( fn check_payer_balances( messages: &[Message], - allocations: &[Allocation], + allocations: &[TypedAllocation], client: &RpcClient, args: &DistributeTokensArgs, ) -> Result<(), Error> { @@ -857,7 +903,7 @@ pub fn process_balances( args: &BalancesArgs, exit: Arc, ) -> Result<(), Error> { - let allocations: Vec = + let allocations: Vec = read_allocations(&args.input_csv, None, false, args.spl_token_args.is_some())?; let allocations = merge_allocations(&allocations); @@ -885,7 +931,7 @@ pub fn process_balances( if let Some(spl_token_args) = &args.spl_token_args { print_token_balances(client, allocation, spl_token_args)?; } else { - let address: Pubkey = allocation.recipient.parse().unwrap(); + let address: Pubkey = allocation.recipient; let expected = lamports_to_sol(allocation.amount); let actual = lamports_to_sol(client.get_balance(&address).unwrap()); println!( @@ -909,9 +955,13 @@ pub fn process_transaction_log(args: &TransactionLogArgs) -> Result<(), Error> { use { crate::db::check_output_file, - solana_sdk::{pubkey::Pubkey, signature::Keypair}, + solana_sdk::{ + pubkey::{self, Pubkey}, + signature::Keypair, + }, tempfile::{tempdir, NamedTempFile}, }; + pub fn test_process_distribute_tokens_with_client( client: &RpcClient, sender_keypair: Keypair, @@ -939,7 +989,7 @@ pub fn test_process_distribute_tokens_with_client( } else { sol_to_lamports(1000.0) }; - let alice_pubkey = solana_sdk::pubkey::new_rand(); + let alice_pubkey = pubkey::new_rand(); let allocations_file = NamedTempFile::new().unwrap(); let input_csv = allocations_file.path().to_str().unwrap().to_string(); let mut wtr = csv::WriterBuilder::new().from_writer(allocations_file); @@ -1039,7 +1089,7 @@ pub fn test_process_create_stake_with_client(client: &RpcClient, sender_keypair: .unwrap(); let expected_amount = sol_to_lamports(1000.0); - let alice_pubkey = solana_sdk::pubkey::new_rand(); + let alice_pubkey = pubkey::new_rand(); let file = NamedTempFile::new().unwrap(); let input_csv = file.path().to_str().unwrap().to_string(); let mut wtr = csv::WriterBuilder::new().from_writer(file); @@ -1161,7 +1211,7 @@ pub fn test_process_distribute_stake_with_client(client: &RpcClient, sender_keyp .unwrap(); let expected_amount = sol_to_lamports(1000.0); - let alice_pubkey = solana_sdk::pubkey::new_rand(); + let alice_pubkey = pubkey::new_rand(); let file = NamedTempFile::new().unwrap(); let input_csv = file.path().to_str().unwrap().to_string(); let mut wtr = csv::WriterBuilder::new().from_writer(file); @@ -1328,16 +1378,27 @@ mod tests { #[test] fn test_read_allocations() { - let alice_pubkey = solana_sdk::pubkey::new_rand(); - let allocation = Allocation { - recipient: alice_pubkey.to_string(), + let alice_pubkey = pubkey::new_rand(); + let allocation = TypedAllocation { + recipient: alice_pubkey, amount: 42, - lockup_date: "".to_string(), + lockup_date: None, }; let file = NamedTempFile::new().unwrap(); let input_csv = file.path().to_str().unwrap().to_string(); let mut wtr = csv::WriterBuilder::new().from_writer(file); - wtr.serialize(&allocation).unwrap(); + wtr.serialize(( + "recipient".to_string(), + "amount".to_string(), + "require_lockup".to_string(), + )) + .unwrap(); + wtr.serialize(( + allocation.recipient.to_string(), + allocation.amount, + allocation.lockup_date, + )) + .unwrap(); wtr.flush().unwrap(); assert_eq!( @@ -1345,10 +1406,10 @@ mod tests { vec![allocation] ); - let allocation_sol = Allocation { - recipient: alice_pubkey.to_string(), + let allocation_sol = TypedAllocation { + recipient: alice_pubkey, amount: sol_to_lamports(42.0), - lockup_date: "".to_string(), + lockup_date: None, }; assert_eq!( @@ -1367,8 +1428,8 @@ mod tests { #[test] fn test_read_allocations_no_lockup() { - let pubkey0 = solana_sdk::pubkey::new_rand(); - let pubkey1 = solana_sdk::pubkey::new_rand(); + let pubkey0 = pubkey::new_rand(); + let pubkey1 = pubkey::new_rand(); let file = NamedTempFile::new().unwrap(); let input_csv = file.path().to_str().unwrap().to_string(); let mut wtr = csv::WriterBuilder::new().from_writer(file); @@ -1379,15 +1440,15 @@ mod tests { wtr.flush().unwrap(); let expected_allocations = vec![ - Allocation { - recipient: pubkey0.to_string(), + TypedAllocation { + recipient: pubkey0, amount: sol_to_lamports(42.0), - lockup_date: "".to_string(), + lockup_date: None, }, - Allocation { - recipient: pubkey1.to_string(), + TypedAllocation { + recipient: pubkey1, amount: sol_to_lamports(43.0), - lockup_date: "".to_string(), + lockup_date: None, }, ]; assert_eq!( @@ -1397,42 +1458,210 @@ mod tests { } #[test] - #[should_panic] fn test_read_allocations_malformed() { - let pubkey0 = solana_sdk::pubkey::new_rand(); - let pubkey1 = solana_sdk::pubkey::new_rand(); + let pubkey0 = pubkey::new_rand(); + let pubkey1 = pubkey::new_rand(); + + // Empty file. let file = NamedTempFile::new().unwrap(); + let mut wtr = csv::WriterBuilder::new().from_writer(&file); + wtr.flush().unwrap(); let input_csv = file.path().to_str().unwrap().to_string(); - let mut wtr = csv::WriterBuilder::new().from_writer(file); + let got = read_allocations(&input_csv, None, false, false); + assert!(matches!(got, Err(Error::CsvIsEmptyError))); + + // Missing 2nd column. + let file = NamedTempFile::new().unwrap(); + let mut wtr = csv::WriterBuilder::new().from_writer(&file); + wtr.serialize("recipient".to_string()).unwrap(); + wtr.serialize(pubkey0.to_string()).unwrap(); + wtr.serialize(pubkey1.to_string()).unwrap(); + wtr.flush().unwrap(); + let input_csv = file.path().to_str().unwrap().to_string(); + let got = read_allocations(&input_csv, None, false, false); + assert!(matches!(got, Err(Error::CsvError(..)))); + + // Missing 3rd column. + let file = NamedTempFile::new().unwrap(); + let mut wtr = csv::WriterBuilder::new().from_writer(&file); wtr.serialize(("recipient".to_string(), "amount".to_string())) .unwrap(); - wtr.serialize((&pubkey0.to_string(), 42.0)).unwrap(); - wtr.serialize((&pubkey1.to_string(), 43.0)).unwrap(); + wtr.serialize((pubkey0.to_string(), "42.0".to_string())) + .unwrap(); + wtr.serialize((pubkey1.to_string(), "43.0".to_string())) + .unwrap(); wtr.flush().unwrap(); + let input_csv = file.path().to_str().unwrap().to_string(); + let got = read_allocations(&input_csv, None, true, false); + assert!(matches!(got, Err(Error::CsvError(..)))); + + let generate_csv_file = |header: (String, String, String), + data: Vec<(String, String, String)>, + file: &NamedTempFile| { + let mut wtr = csv::WriterBuilder::new().from_writer(file); + wtr.serialize(header).unwrap(); + wtr.serialize(&data[0]).unwrap(); + wtr.serialize(&data[1]).unwrap(); + wtr.flush().unwrap(); + }; - let expected_allocations = vec![ - Allocation { - recipient: pubkey0.to_string(), - amount: sol_to_lamports(42.0), - lockup_date: "".to_string(), - }, - Allocation { - recipient: pubkey1.to_string(), - amount: sol_to_lamports(43.0), - lockup_date: "".to_string(), - }, - ]; - assert_eq!( - read_allocations(&input_csv, None, true, false).unwrap(), - expected_allocations + let default_header = ( + "recipient".to_string(), + "amount".to_string(), + "require_lockup".to_string(), + ); + + // Bad pubkey (default). + let file = NamedTempFile::new().unwrap(); + generate_csv_file( + default_header.clone(), + vec![ + (pubkey0.to_string(), "42.0".to_string(), "".to_string()), + ("bad pubkey".to_string(), "43.0".to_string(), "".to_string()), + ], + &file, + ); + let input_csv = file.path().to_str().unwrap().to_string(); + let got_err = read_allocations(&input_csv, None, false, false).unwrap_err(); + assert!( + matches!(got_err, Error::BadInputPubkeyError { input, .. } if input == *"bad pubkey") + ); + // Bad pubkey (with transfer amount). + let file = NamedTempFile::new().unwrap(); + generate_csv_file( + default_header.clone(), + vec![ + (pubkey0.to_string(), "42.0".to_string(), "".to_string()), + ("bad pubkey".to_string(), "43.0".to_string(), "".to_string()), + ], + &file, + ); + let input_csv = file.path().to_str().unwrap().to_string(); + let got_err = read_allocations(&input_csv, Some(123), false, false).unwrap_err(); + assert!( + matches!(got_err, Error::BadInputPubkeyError { input, .. } if input == *"bad pubkey") + ); + // Bad pubkey (with require lockup). + let file = NamedTempFile::new().unwrap(); + generate_csv_file( + default_header.clone(), + vec![ + ( + pubkey0.to_string(), + "42.0".to_string(), + "2021-02-07T00:00:00Z".to_string(), + ), + ( + "bad pubkey".to_string(), + "43.0".to_string(), + "2021-02-07T00:00:00Z".to_string(), + ), + ], + &file, + ); + let input_csv = file.path().to_str().unwrap().to_string(); + let got_err = read_allocations(&input_csv, None, true, false).unwrap_err(); + assert!( + matches!(got_err, Error::BadInputPubkeyError { input, .. } if input == *"bad pubkey") + ); + // Bad pubkey (with raw amount). + let file = NamedTempFile::new().unwrap(); + generate_csv_file( + default_header.clone(), + vec![ + (pubkey0.to_string(), "42".to_string(), "".to_string()), + ("bad pubkey".to_string(), "43".to_string(), "".to_string()), + ], + &file, + ); + let input_csv = file.path().to_str().unwrap().to_string(); + let got_err = read_allocations(&input_csv, None, false, true).unwrap_err(); + assert!( + matches!(got_err, Error::BadInputPubkeyError { input, .. } if input == *"bad pubkey") + ); + + // Bad value in 2nd column (default). + let file = NamedTempFile::new().unwrap(); + generate_csv_file( + default_header.clone(), + vec![ + ( + pubkey0.to_string(), + "bad amount".to_string(), + "".to_string(), + ), + ( + pubkey1.to_string(), + "43.0".to_string().to_string(), + "".to_string(), + ), + ], + &file, + ); + let input_csv = file.path().to_str().unwrap().to_string(); + let got = read_allocations(&input_csv, None, false, false); + assert!(matches!(got, Err(Error::CsvError(..)))); + // Bad value in 2nd column (with require lockup). + let file = NamedTempFile::new().unwrap(); + generate_csv_file( + default_header.clone(), + vec![ + ( + pubkey0.to_string(), + "bad amount".to_string(), + "".to_string(), + ), + (pubkey1.to_string(), "43.0".to_string(), "".to_string()), + ], + &file, + ); + let input_csv = file.path().to_str().unwrap().to_string(); + let got = read_allocations(&input_csv, None, true, false); + assert!(matches!(got, Err(Error::CsvError(..)))); + // Bad value in 2nd column (with raw amount). + let file = NamedTempFile::new().unwrap(); + generate_csv_file( + default_header.clone(), + vec![ + (pubkey0.to_string(), "42".to_string(), "".to_string()), + (pubkey1.to_string(), "43.0".to_string(), "".to_string()), // bad raw amount + ], + &file, + ); + let input_csv = file.path().to_str().unwrap().to_string(); + let got = read_allocations(&input_csv, None, false, true); + assert!(matches!(got, Err(Error::CsvError(..)))); + + // Bad value in 3rd column. + let file = NamedTempFile::new().unwrap(); + generate_csv_file( + default_header.clone(), + vec![ + ( + pubkey0.to_string(), + "42.0".to_string(), + "2021-01-07T00:00:00Z".to_string(), + ), + ( + pubkey1.to_string(), + "43.0".to_string(), + "bad lockup date".to_string(), + ), + ], + &file, + ); + let input_csv = file.path().to_str().unwrap().to_string(); + let got_err = read_allocations(&input_csv, None, true, false).unwrap_err(); + assert!( + matches!(got_err, Error::BadInputLockupDate { input, .. } if input == *"bad lockup date") ); } #[test] fn test_read_allocations_transfer_amount() { - let pubkey0 = solana_sdk::pubkey::new_rand(); - let pubkey1 = solana_sdk::pubkey::new_rand(); - let pubkey2 = solana_sdk::pubkey::new_rand(); + let pubkey0 = pubkey::new_rand(); + let pubkey1 = pubkey::new_rand(); + let pubkey2 = pubkey::new_rand(); let file = NamedTempFile::new().unwrap(); let input_csv = file.path().to_str().unwrap().to_string(); let mut wtr = csv::WriterBuilder::new().from_writer(file); @@ -1445,20 +1674,20 @@ mod tests { let amount = sol_to_lamports(1.5); let expected_allocations = vec![ - Allocation { - recipient: pubkey0.to_string(), + TypedAllocation { + recipient: pubkey0, amount, - lockup_date: "".to_string(), + lockup_date: None, }, - Allocation { - recipient: pubkey1.to_string(), + TypedAllocation { + recipient: pubkey1, amount, - lockup_date: "".to_string(), + lockup_date: None, }, - Allocation { - recipient: pubkey2.to_string(), + TypedAllocation { + recipient: pubkey2, amount, - lockup_date: "".to_string(), + lockup_date: None, }, ]; assert_eq!( @@ -1469,18 +1698,18 @@ mod tests { #[test] fn test_apply_previous_transactions() { - let alice = solana_sdk::pubkey::new_rand(); - let bob = solana_sdk::pubkey::new_rand(); + let alice = pubkey::new_rand(); + let bob = pubkey::new_rand(); let mut allocations = vec![ - Allocation { - recipient: alice.to_string(), + TypedAllocation { + recipient: alice, amount: sol_to_lamports(1.0), - lockup_date: "".to_string(), + lockup_date: None, }, - Allocation { - recipient: bob.to_string(), + TypedAllocation { + recipient: bob, amount: sol_to_lamports(1.0), - lockup_date: "".to_string(), + lockup_date: None, }, ]; let transaction_infos = vec![TransactionInfo { @@ -1493,24 +1722,24 @@ mod tests { // Ensure that we applied the transaction to the allocation with // a matching recipient address (to bob, not alice). - assert_eq!(allocations[0].recipient, alice.to_string()); + assert_eq!(allocations[0].recipient, alice); } #[test] fn test_has_same_recipient() { - let alice_pubkey = solana_sdk::pubkey::new_rand(); - let bob_pubkey = solana_sdk::pubkey::new_rand(); + let alice_pubkey = pubkey::new_rand(); + let bob_pubkey = pubkey::new_rand(); let lockup0 = "2021-01-07T00:00:00Z".to_string(); let lockup1 = "9999-12-31T23:59:59Z".to_string(); - let alice_alloc = Allocation { - recipient: alice_pubkey.to_string(), + let alice_alloc = TypedAllocation { + recipient: alice_pubkey, amount: sol_to_lamports(1.0), - lockup_date: "".to_string(), + lockup_date: None, }; - let alice_alloc_lockup0 = Allocation { - recipient: alice_pubkey.to_string(), + let alice_alloc_lockup0 = TypedAllocation { + recipient: alice_pubkey, amount: sol_to_lamports(1.0), - lockup_date: lockup0.clone(), + lockup_date: lockup0.parse().ok(), }; let alice_info = TransactionInfo { recipient: alice_pubkey, @@ -1550,13 +1779,13 @@ mod tests { #[test] fn test_set_split_stake_lockup() { let lockup_date_str = "2021-01-07T00:00:00Z"; - let allocation = Allocation { - recipient: Pubkey::default().to_string(), + let allocation = TypedAllocation { + recipient: Pubkey::default(), amount: sol_to_lamports(1.002_282_880), - lockup_date: lockup_date_str.to_string(), + lockup_date: lockup_date_str.parse().ok(), }; - let stake_account_address = solana_sdk::pubkey::new_rand(); - let new_stake_account_address = solana_sdk::pubkey::new_rand(); + let stake_account_address = pubkey::new_rand(); + let new_stake_account_address = pubkey::new_rand(); let lockup_authority = Keypair::new(); let lockup_authority_address = lockup_authority.pubkey(); let sender_stake_args = SenderStakeArgs { @@ -1613,12 +1842,12 @@ mod tests { sender_keypair_file: &str, fee_payer: &str, stake_args: Option, - ) -> (Vec, DistributeTokensArgs) { - let recipient = solana_sdk::pubkey::new_rand(); - let allocations = vec![Allocation { - recipient: recipient.to_string(), + ) -> (Vec, DistributeTokensArgs) { + let recipient = pubkey::new_rand(); + let allocations = vec![TypedAllocation { + recipient, amount: allocation_amount, - lockup_date: "".to_string(), + lockup_date: None, }]; let args = DistributeTokensArgs { sender_keypair: read_keypair_file(sender_keypair_file).unwrap().into(), @@ -1890,10 +2119,10 @@ mod tests { // Underfunded stake-account let expensive_allocation_amount = 5000.0; - let expensive_allocations = vec![Allocation { - recipient: solana_sdk::pubkey::new_rand().to_string(), + let expensive_allocations = vec![TypedAllocation { + recipient: pubkey::new_rand(), amount: sol_to_lamports(expensive_allocation_amount), - lockup_date: "".to_string(), + lockup_date: None, }]; let err_result = check_payer_balances( &[one_signer_message(&client)], @@ -2108,10 +2337,10 @@ mod tests { spl_token_args: None, transfer_amount: None, }; - let allocation = Allocation { - recipient: recipient.to_string(), + let allocation = TypedAllocation { + recipient, amount: sol_to_lamports(1.0), - lockup_date: "".to_string(), + lockup_date: None, }; let mut messages: Vec = vec![]; @@ -2230,10 +2459,10 @@ mod tests { spl_token_args: None, transfer_amount: None, }; - let allocation = Allocation { - recipient: recipient.to_string(), + let allocation = TypedAllocation { + recipient, amount: sol_to_lamports(1.0), - lockup_date: "".to_string(), + lockup_date: None, }; let message = transaction.message.clone(); @@ -2329,10 +2558,10 @@ mod tests { .to_string(); let mut db = db::open_db(&db_file, false).unwrap(); let recipient = Pubkey::new_unique(); - let allocation = Allocation { - recipient: recipient.to_string(), + let allocation = TypedAllocation { + recipient, amount: sol_to_lamports(1.0), - lockup_date: "".to_string(), + lockup_date: None, }; // This is just dummy data; Args will not affect messages let args = DistributeTokensArgs { diff --git a/tokens/src/spl_token.rs b/tokens/src/spl_token.rs index d4d1c8cf5aac4f..3e998c1a124e8a 100644 --- a/tokens/src/spl_token.rs +++ b/tokens/src/spl_token.rs @@ -1,7 +1,7 @@ use { crate::{ args::{DistributeTokensArgs, SplTokenArgs}, - commands::{get_fee_estimate_for_messages, Allocation, Error, FundingSource}, + commands::{get_fee_estimate_for_messages, Error, FundingSource, TypedAllocation}, }, console::style, solana_account_decoder::parse_token::{real_number_string, real_number_string_trimmed}, @@ -37,7 +37,7 @@ pub fn update_decimals(client: &RpcClient, args: &mut Option) -> R } pub(crate) fn build_spl_token_instructions( - allocation: &Allocation, + allocation: &TypedAllocation, args: &DistributeTokensArgs, do_create_associated_token_account: bool, ) -> Vec { @@ -45,7 +45,7 @@ pub(crate) fn build_spl_token_instructions( .spl_token_args .as_ref() .expect("spl_token_args must be some"); - let wallet_address = allocation.recipient.parse().unwrap(); + let wallet_address = allocation.recipient; let associated_token_address = get_associated_token_address(&wallet_address, &spl_token_args.mint); let mut instructions = vec![]; @@ -75,7 +75,7 @@ pub(crate) fn build_spl_token_instructions( pub(crate) fn check_spl_token_balances( messages: &[Message], - allocations: &[Allocation], + allocations: &[TypedAllocation], client: &RpcClient, args: &DistributeTokensArgs, created_accounts: u64, @@ -112,10 +112,10 @@ pub(crate) fn check_spl_token_balances( pub(crate) fn print_token_balances( client: &RpcClient, - allocation: &Allocation, + allocation: &TypedAllocation, spl_token_args: &SplTokenArgs, ) -> Result<(), Error> { - let address = allocation.recipient.parse().unwrap(); + let address = allocation.recipient; let expected = allocation.amount; let associated_token_address = get_associated_token_address(&address, &spl_token_args.mint); let recipient_account = client From a4a66026e17f271e23ea22e090c57a9598e88542 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Fri, 3 Nov 2023 16:02:12 +0900 Subject: [PATCH 37/98] Introduce InstalledSchedulerPool trait (#33934) * Introduce InstalledSchedulerPool * Use type alias * Remove log_prefix for now... * Simplify return_to_pool() * Simplify InstalledScheduler's context methods * Reorder trait methods semantically * Simplify Arc handling --- Cargo.lock | 34 ++++++ Cargo.toml | 1 + ledger/src/blockstore_processor.rs | 13 ++- programs/sbf/Cargo.lock | 34 ++++++ runtime/Cargo.toml | 1 + runtime/src/bank_forks.rs | 23 +++- runtime/src/installed_scheduler_pool.rs | 145 +++++++++++++++++++++++- 7 files changed, 242 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 610b5edb49c5f6..862b9ca59021dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -167,6 +167,20 @@ version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +[[package]] +name = "aquamarine" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df752953c49ce90719c7bf1fc587bc8227aed04732ea0c0f85e5397d7fdbd1a1" +dependencies = [ + "include_dir", + "itertools", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "arc-swap" version = "1.5.0" @@ -2611,6 +2625,25 @@ dependencies = [ "version_check", ] +[[package]] +name = "include_dir" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18762faeff7122e89e0857b02f7ce6fcc0d101d5e9ad2ad7846cc01d61b7f19e" +dependencies = [ + "include_dir_macros", +] + +[[package]] +name = "include_dir_macros" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" +dependencies = [ + "proc-macro2", + "quote", +] + [[package]] name = "index_list" version = "0.2.7" @@ -6943,6 +6976,7 @@ dependencies = [ name = "solana-runtime" version = "1.18.0" dependencies = [ + "aquamarine", "arrayref", "assert_matches", "base64 0.21.5", diff --git a/Cargo.toml b/Cargo.toml index e6168de4d7aec8..4ae0c286356643 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -132,6 +132,7 @@ edition = "2021" [workspace.dependencies] Inflector = "0.11.4" +aquamarine = "0.3.2" aes-gcm-siv = "0.10.3" ahash = "0.8.6" anyhow = "1.0.75" diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index ccdfb97ece81f7..5218b55c4b9050 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -1945,7 +1945,7 @@ pub mod tests { genesis_utils::{ self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, }, - installed_scheduler_pool::{MockInstalledScheduler, WaitReason}, + installed_scheduler_pool::{MockInstalledScheduler, SchedulingContext, WaitReason}, }, solana_sdk::{ account::{AccountSharedData, WritableAccount}, @@ -4527,11 +4527,17 @@ pub mod tests { .. } = create_genesis_config_with_leader(500, &dummy_leader_pubkey, 100); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let context = SchedulingContext::new(bank.clone()); let txs = create_test_transactions(&mint_keypair, &genesis_config.hash()); let mut mocked_scheduler = MockInstalledScheduler::new(); let mut seq = mockall::Sequence::new(); + mocked_scheduler + .expect_context() + .times(1) + .in_sequence(&mut seq) + .return_const(context); mocked_scheduler .expect_schedule_execution() .times(txs.len()) @@ -4542,6 +4548,11 @@ pub mod tests { .times(1) .in_sequence(&mut seq) .returning(|_| None); + mocked_scheduler + .expect_return_to_pool() + .times(1) + .in_sequence(&mut seq) + .returning(|| ()); let bank = BankWithScheduler::new(bank, Some(Box::new(mocked_scheduler))); let batch = bank.prepare_sanitized_batch(&txs); diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index a81280a23341d0..cd8e73c093d4a7 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -156,6 +156,20 @@ version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +[[package]] +name = "aquamarine" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df752953c49ce90719c7bf1fc587bc8227aed04732ea0c0f85e5397d7fdbd1a1" +dependencies = [ + "include_dir", + "itertools", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "arc-swap" version = "1.5.0" @@ -2245,6 +2259,25 @@ dependencies = [ "version_check", ] +[[package]] +name = "include_dir" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18762faeff7122e89e0857b02f7ce6fcc0d101d5e9ad2ad7846cc01d61b7f19e" +dependencies = [ + "include_dir_macros", +] + +[[package]] +name = "include_dir_macros" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" +dependencies = [ + "proc-macro2", + "quote", +] + [[package]] name = "index_list" version = "0.2.7" @@ -5648,6 +5681,7 @@ dependencies = [ name = "solana-runtime" version = "1.18.0" dependencies = [ + "aquamarine", "arrayref", "base64 0.21.5", "bincode", diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index e67ee5d2a66a59..f0509811497037 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -10,6 +10,7 @@ license = { workspace = true } edition = { workspace = true } [dependencies] +aquamarine = { workspace = true } arrayref = { workspace = true } base64 = { workspace = true } bincode = { workspace = true } diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index ced6d5a0c61813..dabd90e4c2c835 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -4,7 +4,9 @@ use { crate::{ accounts_background_service::{AbsRequestSender, SnapshotRequest, SnapshotRequestKind}, bank::{epoch_accounts_hash_utils, Bank, SquashTiming}, - installed_scheduler_pool::BankWithScheduler, + installed_scheduler_pool::{ + BankWithScheduler, InstalledSchedulerPoolArc, SchedulingContext, + }, snapshot_config::SnapshotConfig, }, log::*, @@ -72,6 +74,7 @@ pub struct BankForks { last_accounts_hash_slot: Slot, in_vote_only_mode: Arc, highest_slot_at_startup: Slot, + scheduler_pool: Option, } impl Index for BankForks { @@ -203,6 +206,7 @@ impl BankForks { last_accounts_hash_slot: root, in_vote_only_mode: Arc::new(AtomicBool::new(false)), highest_slot_at_startup: 0, + scheduler_pool: None, })); for bank in bank_forks.read().unwrap().banks.values() { @@ -215,11 +219,26 @@ impl BankForks { bank_forks } + pub fn install_scheduler_pool(&mut self, pool: InstalledSchedulerPoolArc) { + info!("Installed new scheduler_pool into bank_forks: {:?}", pool); + assert!( + self.scheduler_pool.replace(pool).is_none(), + "Reinstalling scheduler pool isn't supported" + ); + } + pub fn insert(&mut self, mut bank: Bank) -> BankWithScheduler { bank.check_program_modification_slot = self.root.load(Ordering::Relaxed) < self.highest_slot_at_startup; - let bank = BankWithScheduler::new_without_scheduler(Arc::new(bank)); + let bank = Arc::new(bank); + let bank = if let Some(scheduler_pool) = &self.scheduler_pool { + let context = SchedulingContext::new(bank.clone()); + let scheduler = scheduler_pool.take_scheduler(context); + BankWithScheduler::new(bank, Some(scheduler)) + } else { + BankWithScheduler::new_without_scheduler(bank) + }; let prev = self.banks.insert(bank.slot(), bank.clone_with_scheduler()); assert!(prev.is_none()); let slot = bank.slot(); diff --git a/runtime/src/installed_scheduler_pool.rs b/runtime/src/installed_scheduler_pool.rs index 553a31c800e6e4..dde82f2a63f890 100644 --- a/runtime/src/installed_scheduler_pool.rs +++ b/runtime/src/installed_scheduler_pool.rs @@ -1,5 +1,24 @@ -//! Currently, there are only two things: minimal InstalledScheduler trait and an auxiliary type -//! called BankWithScheduler.. This file will be populated by later PRs to align with the filename. +//! Transaction processing glue code, mainly consisting of Object-safe traits +//! +//! [InstalledSchedulerPool] lends one of pooled [InstalledScheduler]s as wrapped in +//! [BankWithScheduler], which can be used by `ReplayStage` and `BankingStage` for transaction +//! execution. After use, the scheduler will be returned to the pool. +//! +//! [InstalledScheduler] can be fed with [SanitizedTransaction]s. Then, it schedules those +//! executions and commits those results into the associated _bank_. +//! +//! It's generally assumed that each [InstalledScheduler] is backed by multiple threads for +//! parallel transaction processing and there are multiple independent schedulers inside a single +//! instance of [InstalledSchedulerPool]. +//! +//! Dynamic dispatch was inevitable due to the desire to piggyback on +//! [BankForks](crate::bank_forks::BankForks)'s pruning for scheduler lifecycle management as the +//! common place both for `ReplayStage` and `BankingStage` and the resultant need of invoking +//! actual implementations provided by the dependent crate (`solana-unified-scheduler-pool`, which +//! in turn depends on `solana-ledger`, which in turn depends on `solana-runtime`), avoiding a +//! cyclic dependency. +//! +//! See [InstalledScheduler] for visualized interaction. use { crate::bank::Bank, @@ -7,6 +26,7 @@ use { solana_program_runtime::timings::ExecuteTimings, solana_sdk::{ hash::Hash, + slot_history::Slot, transaction::{Result, SanitizedTransaction}, }, std::{ @@ -18,6 +38,57 @@ use { #[cfg(feature = "dev-context-only-utils")] use {mockall::automock, qualifier_attr::qualifiers}; +pub trait InstalledSchedulerPool: Send + Sync + Debug { + fn take_scheduler(&self, context: SchedulingContext) -> DefaultInstalledSchedulerBox; +} + +#[cfg_attr(doc, aquamarine::aquamarine)] +/// Schedules, executes, and commits transactions under encapsulated implementation +/// +/// The following chart illustrates the ownership/reference interaction between inter-dependent +/// objects across crates: +/// +/// ```mermaid +/// graph TD +/// Bank["Arc#lt;Bank#gt;"] +/// +/// subgraph solana-runtime +/// BankForks; +/// BankWithScheduler; +/// Bank; +/// LoadExecuteAndCommitTransactions(["load_execute_and_commit_transactions()"]); +/// SchedulingContext; +/// InstalledSchedulerPool{{InstalledSchedulerPool}}; +/// InstalledScheduler{{InstalledScheduler}}; +/// end +/// +/// subgraph solana-unified-scheduler-pool +/// SchedulerPool; +/// PooledScheduler; +/// ScheduleExecution(["schedule_execution()"]); +/// end +/// +/// subgraph solana-ledger +/// ExecuteBatch(["execute_batch()"]); +/// end +/// +/// ScheduleExecution -. calls .-> ExecuteBatch; +/// BankWithScheduler -. dyn-calls .-> ScheduleExecution; +/// ExecuteBatch -. calls .-> LoadExecuteAndCommitTransactions; +/// linkStyle 0,1,2 stroke:gray,color:gray; +/// +/// BankForks -- owns --> BankWithScheduler; +/// BankForks -- owns --> InstalledSchedulerPool; +/// BankWithScheduler -- refs --> Bank; +/// BankWithScheduler -- owns --> InstalledScheduler; +/// SchedulingContext -- refs --> Bank; +/// InstalledScheduler -- owns --> SchedulingContext; +/// +/// SchedulerPool -- owns --> PooledScheduler; +/// SchedulerPool -. impls .-> InstalledSchedulerPool; +/// PooledScheduler -. impls .-> InstalledScheduler; +/// PooledScheduler -- refs --> SchedulerPool; +/// ``` #[cfg_attr(feature = "dev-context-only-utils", automock)] // suppress false clippy complaints arising from mockall-derive: // warning: `#[must_use]` has no effect when applied to a struct field @@ -27,6 +98,9 @@ use {mockall::automock, qualifier_attr::qualifiers}; allow(unused_attributes, clippy::needless_lifetimes) )] pub trait InstalledScheduler: Send + Sync + Debug + 'static { + fn id(&self) -> SchedulerId; + fn context(&self) -> &SchedulingContext; + // Calling this is illegal as soon as wait_for_termination is called. fn schedule_execution<'a>( &'a self, @@ -50,10 +124,45 @@ pub trait InstalledScheduler: Send + Sync + Debug + 'static { /// two reasons later. #[must_use] fn wait_for_termination(&mut self, reason: &WaitReason) -> Option; + + fn return_to_pool(self: Box); } pub type DefaultInstalledSchedulerBox = Box; +pub type InstalledSchedulerPoolArc = Arc; + +pub type SchedulerId = u64; + +/// A small context to propagate a bank and its scheduling mode to the scheduler subsystem. +/// +/// Note that this isn't called `SchedulerContext` because the contexts aren't associated with +/// schedulers one by one. A scheduler will use many SchedulingContexts during its lifetime. +/// "Scheduling" part of the context name refers to an abstract slice of time to schedule and +/// execute all transactions for a given bank for block verification or production. A context is +/// expected to be used by a particular scheduler only for that duration of the time and to be +/// disposed by the scheduler. Then, the scheduler may work on different banks with new +/// `SchedulingContext`s. +#[derive(Clone, Debug)] +pub struct SchedulingContext { + // mode: SchedulingMode, // this will be added later. + bank: Arc, +} + +impl SchedulingContext { + pub fn new(bank: Arc) -> Self { + Self { bank } + } + + pub fn bank(&self) -> &Arc { + &self.bank + } + + pub fn slot(&self) -> Slot { + self.bank().slot() + } +} + pub type ResultWithTimings = (Result<()>, ExecuteTimings); /// A hint from the bank about the reason the caller is waiting on its scheduler termination. @@ -117,6 +226,13 @@ pub type InstalledSchedulerRwLock = RwLock> impl BankWithScheduler { #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] pub(crate) fn new(bank: Arc, scheduler: Option) -> Self { + if let Some(bank_in_context) = scheduler + .as_ref() + .map(|scheduler| scheduler.context().bank()) + { + assert!(Arc::ptr_eq(&bank, bank_in_context)); + } + Self { inner: Arc::new(BankWithSchedulerInner { bank, @@ -229,7 +345,8 @@ impl BankWithSchedulerInner { .as_mut() .and_then(|scheduler| scheduler.wait_for_termination(&reason)); if !reason.is_paused() { - drop(scheduler.take().expect("scheduler after waiting")); + let scheduler = scheduler.take().expect("scheduler after waiting"); + scheduler.return_to_pool(); } result_with_timings } else { @@ -296,12 +413,18 @@ mod tests { }; fn setup_mocked_scheduler_with_extra( + bank: Arc, wait_reasons: impl Iterator, f: Option, ) -> DefaultInstalledSchedulerBox { let mut mock = MockInstalledScheduler::new(); let mut seq = Sequence::new(); + mock.expect_context() + .times(1) + .in_sequence(&mut seq) + .return_const(SchedulingContext::new(bank)); + for wait_reason in wait_reasons { mock.expect_wait_for_termination() .with(mockall::predicate::eq(wait_reason)) @@ -316,6 +439,10 @@ mod tests { }); } + mock.expect_return_to_pool() + .times(1) + .in_sequence(&mut seq) + .returning(|| ()); if let Some(f) = f { f(&mut mock); } @@ -324,9 +451,11 @@ mod tests { } fn setup_mocked_scheduler( + bank: Arc, wait_reasons: impl Iterator, ) -> DefaultInstalledSchedulerBox { setup_mocked_scheduler_with_extra( + bank, wait_reasons, None:: ()>, ) @@ -338,8 +467,9 @@ mod tests { let bank = Arc::new(Bank::default_for_tests()); let bank = BankWithScheduler::new( - bank, + bank.clone(), Some(setup_mocked_scheduler( + bank, [WaitReason::TerminatedToFreeze].into_iter(), )), ); @@ -370,8 +500,9 @@ mod tests { let bank = Arc::new(Bank::default_for_tests()); let bank = BankWithScheduler::new( - bank, + bank.clone(), Some(setup_mocked_scheduler( + bank, [WaitReason::DroppedFromBankForks].into_iter(), )), ); @@ -384,8 +515,9 @@ mod tests { let bank = Arc::new(crate::bank::tests::create_simple_test_bank(42)); let bank = BankWithScheduler::new( - bank, + bank.clone(), Some(setup_mocked_scheduler( + bank, [ WaitReason::PausedForRecentBlockhash, WaitReason::TerminatedToFreeze, @@ -414,6 +546,7 @@ mod tests { )); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let mocked_scheduler = setup_mocked_scheduler_with_extra( + bank.clone(), [WaitReason::DroppedFromBankForks].into_iter(), Some(|mocked: &mut MockInstalledScheduler| { mocked From 79056341d570be6b0d2613a15ad56771de340a9d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 Nov 2023 20:28:53 +0800 Subject: [PATCH 38/98] build(deps): bump futures-util from 0.3.28 to 0.3.29 (#33924) * build(deps): bump futures-util from 0.3.28 to 0.3.29 Bumps [futures-util](https://github.com/rust-lang/futures-rs) from 0.3.28 to 0.3.29. - [Release notes](https://github.com/rust-lang/futures-rs/releases) - [Changelog](https://github.com/rust-lang/futures-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/futures-rs/compare/0.3.28...0.3.29) --- updated-dependencies: - dependency-name: futures-util dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 28 ++++++++++++++-------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 28 ++++++++++++++-------------- 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 862b9ca59021dc..53f59ba86b73c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2051,9 +2051,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" dependencies = [ "futures-core", "futures-sink", @@ -2061,9 +2061,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" [[package]] name = "futures-executor" @@ -2079,15 +2079,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", @@ -2096,21 +2096,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" dependencies = [ "futures 0.1.31", "futures-channel", diff --git a/Cargo.toml b/Cargo.toml index 4ae0c286356643..e46359e1e0df18 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -201,7 +201,7 @@ fnv = "1.0.7" fs-err = "2.9.0" fs_extra = "1.3.0" futures = "0.3.28" -futures-util = "0.3.28" +futures-util = "0.3.29" gag = "1.0.0" generic-array = { version = "0.14.7", default-features = false } gethostname = "0.2.3" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index cd8e73c093d4a7..5ec9cb05b0d36d 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1749,9 +1749,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" dependencies = [ "futures-core", "futures-sink", @@ -1759,9 +1759,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" [[package]] name = "futures-executor" @@ -1777,15 +1777,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", @@ -1794,21 +1794,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" dependencies = [ "futures 0.1.31", "futures-channel", From cc0e417ecb75b5c5c9797df579854e6a4a17df41 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 Nov 2023 20:29:16 +0800 Subject: [PATCH 39/98] build(deps): bump tempfile from 3.8.0 to 3.8.1 (#33925) * build(deps): bump tempfile from 3.8.0 to 3.8.1 Bumps [tempfile](https://github.com/Stebalien/tempfile) from 3.8.0 to 3.8.1. - [Changelog](https://github.com/Stebalien/tempfile/blob/master/CHANGELOG.md) - [Commits](https://github.com/Stebalien/tempfile/commits) --- updated-dependencies: - dependency-name: tempfile dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 43 +++++++++++++++++++++++++---------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 37 +++++++++++++++++++++-------------- 3 files changed, 50 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 53f59ba86b73c0..b701495152c6c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -630,9 +630,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" dependencies = [ "serde", ] @@ -3017,9 +3017,9 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "linux-raw-sys" -version = "0.4.3" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lock_api" @@ -3284,7 +3284,7 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "cfg-if 1.0.0", "libc", ] @@ -3553,7 +3553,7 @@ version = "0.10.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "cfg-if 1.0.0", "foreign-types", "libc", @@ -4063,7 +4063,7 @@ checksum = "7c003ac8c77cb07bb74f5f198bce836a689bcd5a42574612bf14d17bfd08c20e" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.3.3", + "bitflags 2.4.1", "lazy_static", "num-traits", "rand 0.8.5", @@ -4415,6 +4415,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_users" version = "0.4.0" @@ -4636,11 +4645,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.3" +version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac5ffa1efe7548069688cd7028f32591853cd7b5b756d41bcffd2353e4fc75b4" +checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", @@ -6066,7 +6075,7 @@ dependencies = [ name = "solana-frozen-abi" version = "1.18.0" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "block-buffer 0.10.4", "bs58", "bv", @@ -6277,7 +6286,7 @@ version = "1.18.0" dependencies = [ "assert_matches", "bincode", - "bitflags 2.3.3", + "bitflags 2.4.1", "bs58", "byteorder", "chrono", @@ -6639,7 +6648,7 @@ dependencies = [ "assert_matches", "base64 0.21.5", "bincode", - "bitflags 2.3.3", + "bitflags 2.4.1", "blake3", "borsh 0.10.3", "borsh 0.9.3", @@ -7065,7 +7074,7 @@ dependencies = [ "assert_matches", "base64 0.21.5", "bincode", - "bitflags 2.3.3", + "bitflags 2.4.1", "borsh 0.10.3", "bs58", "bytemuck", @@ -8174,13 +8183,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if 1.0.0", "fastrand", - "redox_syscall 0.3.5", + "redox_syscall 0.4.1", "rustix", "windows-sys 0.48.0", ] diff --git a/Cargo.toml b/Cargo.toml index e46359e1e0df18..d4a342952e5dcd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -402,7 +402,7 @@ sysctl = "0.4.6" systemstat = "0.2.3" tar = "0.4.40" tarpc = "0.29.0" -tempfile = "3.8.0" +tempfile = "3.8.1" test-case = "3.2.1" thiserror = "1.0.50" tiny-bip39 = "0.8.2" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 5ec9cb05b0d36d..d276f577335370 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -590,9 +590,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" dependencies = [ "serde", ] @@ -2679,9 +2679,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.3" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lock_api" @@ -3178,7 +3178,7 @@ version = "0.10.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "cfg-if 1.0.0", "foreign-types", "libc", @@ -3921,6 +3921,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_users" version = "0.4.0" @@ -4121,11 +4130,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.3" +version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac5ffa1efe7548069688cd7028f32591853cd7b5b756d41bcffd2353e4fc75b4" +checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", @@ -5211,7 +5220,7 @@ version = "1.18.0" dependencies = [ "assert_matches", "bincode", - "bitflags 2.3.3", + "bitflags 2.4.1", "byteorder 1.5.0", "chrono", "chrono-humanize", @@ -5398,7 +5407,7 @@ dependencies = [ "ark-serialize", "base64 0.21.5", "bincode", - "bitflags 2.3.3", + "bitflags 2.4.1", "blake3", "borsh 0.10.3", "borsh 0.9.3", @@ -6183,7 +6192,7 @@ dependencies = [ "assert_matches", "base64 0.21.5", "bincode", - "bitflags 2.3.3", + "bitflags 2.4.1", "borsh 0.10.3", "bs58", "bytemuck", @@ -7090,13 +7099,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if 1.0.0", "fastrand", - "redox_syscall 0.3.5", + "redox_syscall 0.4.1", "rustix", "windows-sys 0.48.0", ] From 43668c42462ed97d603eec14b865eccbea950f73 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 Nov 2023 20:29:46 +0800 Subject: [PATCH 40/98] build(deps): bump num_enum from 0.7.0 to 0.7.1 (#33932) * build(deps): bump num_enum from 0.7.0 to 0.7.1 Bumps [num_enum](https://github.com/illicitonion/num_enum) from 0.7.0 to 0.7.1. - [Commits](https://github.com/illicitonion/num_enum/commits) --- updated-dependencies: - dependency-name: num_enum dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 24 ++++++++++++------------ Cargo.toml | 2 +- programs/sbf/Cargo.lock | 24 ++++++++++++------------ 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b701495152c6c4..c76ae0f6f19b53 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3447,11 +3447,11 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70bf6736f74634d299d00086f02986875b3c2d924781a6a2cb6c201e73da0ceb" +checksum = "683751d591e6d81200c39fb0d1032608b77724f34114db54f571ff1317b337c0" dependencies = [ - "num_enum_derive 0.7.0", + "num_enum_derive 0.7.1", ] [[package]] @@ -3480,9 +3480,9 @@ dependencies = [ [[package]] name = "num_enum_derive" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ea360eafe1022f7cc56cd7b869ed57330fb2453d0c7831d99b74c65d2f5597" +checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e" dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", @@ -5302,7 +5302,7 @@ dependencies = [ "num-derive 0.4.1", "num-traits", "num_cpus", - "num_enum 0.7.0", + "num_enum 0.7.1", "ouroboros", "percentage", "qualifier_attr", @@ -5546,7 +5546,7 @@ dependencies = [ "log", "memmap2", "modular-bitfield", - "num_enum 0.7.0", + "num_enum 0.7.1", "rand 0.8.5", "rayon", "solana-logger", @@ -5886,7 +5886,7 @@ dependencies = [ "log", "lru", "min-max-heap", - "num_enum 0.7.0", + "num_enum 0.7.1", "prio-graph", "quinn", "rand 0.8.5", @@ -6302,7 +6302,7 @@ dependencies = [ "lru", "mockall", "num_cpus", - "num_enum 0.7.0", + "num_enum 0.7.1", "prost", "rand 0.8.5", "rand_chacha 0.3.1", @@ -7017,7 +7017,7 @@ dependencies = [ "num-derive 0.4.1", "num-traits", "num_cpus", - "num_enum 0.7.0", + "num_enum 0.7.1", "ouroboros", "percentage", "qualifier_attr", @@ -7096,7 +7096,7 @@ dependencies = [ "memmap2", "num-derive 0.4.1", "num-traits", - "num_enum 0.7.0", + "num_enum 0.7.1", "pbkdf2 0.11.0", "qstring", "qualifier_attr", @@ -7919,7 +7919,7 @@ dependencies = [ "bytemuck", "num-derive 0.4.1", "num-traits", - "num_enum 0.7.0", + "num_enum 0.7.1", "solana-program", "solana-zk-token-sdk", "spl-memo", diff --git a/Cargo.toml b/Cargo.toml index d4a342952e5dcd..8e85b51441beda 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -251,7 +251,7 @@ num-bigint = "0.4.4" num-derive = "0.4" num-traits = "0.2" num_cpus = "1.16.0" -num_enum = "0.7.0" +num_enum = "0.7.1" openssl = "0.10" ouroboros = "0.15.6" parking_lot = "0.12" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index d276f577335370..18f0249197663f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3090,11 +3090,11 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70bf6736f74634d299d00086f02986875b3c2d924781a6a2cb6c201e73da0ceb" +checksum = "683751d591e6d81200c39fb0d1032608b77724f34114db54f571ff1317b337c0" dependencies = [ - "num_enum_derive 0.7.0", + "num_enum_derive 0.7.1", ] [[package]] @@ -3111,9 +3111,9 @@ dependencies = [ [[package]] name = "num_enum_derive" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ea360eafe1022f7cc56cd7b869ed57330fb2453d0c7831d99b74c65d2f5597" +checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e" dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2", @@ -4658,7 +4658,7 @@ dependencies = [ "num-derive 0.4.1", "num-traits", "num_cpus", - "num_enum 0.7.0", + "num_enum 0.7.1", "ouroboros", "percentage", "qualifier_attr", @@ -4805,7 +4805,7 @@ dependencies = [ "log", "memmap2", "modular-bitfield", - "num_enum 0.7.0", + "num_enum 0.7.1", "rand 0.8.5", "solana-measure", "solana-sdk", @@ -4957,7 +4957,7 @@ dependencies = [ "log", "lru", "min-max-heap", - "num_enum 0.7.0", + "num_enum 0.7.1", "prio-graph", "quinn", "rand 0.8.5", @@ -5235,7 +5235,7 @@ dependencies = [ "lru", "mockall", "num_cpus", - "num_enum 0.7.0", + "num_enum 0.7.1", "prost", "rand 0.8.5", "rand_chacha 0.3.1", @@ -5718,7 +5718,7 @@ dependencies = [ "num-derive 0.4.1", "num-traits", "num_cpus", - "num_enum 0.7.0", + "num_enum 0.7.1", "ouroboros", "percentage", "qualifier_attr", @@ -6212,7 +6212,7 @@ dependencies = [ "memmap2", "num-derive 0.4.1", "num-traits", - "num_enum 0.7.0", + "num_enum 0.7.1", "pbkdf2 0.11.0", "qstring", "qualifier_attr", @@ -6849,7 +6849,7 @@ dependencies = [ "bytemuck", "num-derive 0.4.1", "num-traits", - "num_enum 0.7.0", + "num_enum 0.7.1", "solana-program", "solana-zk-token-sdk", "spl-memo", From 662ac8bc8678beef560727bc9c6a0911784589ea Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 3 Nov 2023 20:32:38 +0800 Subject: [PATCH 41/98] ci: reorg docker images (#33815) * ci: remove unnecessary args from docker-run.sh * ci: remove cargo uninstall from buildkite post checkout * ci: reorg docker images * ci: add mscgen for docs --- .buildkite/hooks/post-checkout | 7 -- ci/docker-run.sh | 12 --- ci/docker-rust-nightly/Dockerfile | 32 +++--- ci/docker-rust/Dockerfile | 156 ++++++++++++++++++++---------- 4 files changed, 121 insertions(+), 86 deletions(-) diff --git a/.buildkite/hooks/post-checkout b/.buildkite/hooks/post-checkout index a36d2574ebbfad..f41f238e0e6414 100644 --- a/.buildkite/hooks/post-checkout +++ b/.buildkite/hooks/post-checkout @@ -38,10 +38,3 @@ source ci/env.sh kill -9 "$victim" || true done ) - -# HACK: These are in our docker images, need to be removed from CARGO_HOME -# because we try to cache downloads across builds with CARGO_HOME -# cargo lacks a facility for "system" tooling, always tries CARGO_HOME first -cargo uninstall cargo-audit &>/dev/null || true -cargo uninstall svgbob_cli &>/dev/null || true -cargo uninstall mdbook &>/dev/null || true diff --git a/ci/docker-run.sh b/ci/docker-run.sh index eb9d06836f692a..52d3807394c10e 100755 --- a/ci/docker-run.sh +++ b/ci/docker-run.sh @@ -42,9 +42,6 @@ ARGS=( ) if [[ -n $CI ]]; then - # Share the real ~/.cargo between docker containers in CI for speed - ARGS+=(--volume "$HOME:/home") - if [[ -n $BUILDKITE ]]; then # I hate buildkite-esque echo is leaking into this generic shell wrapper. # but it's easiest to notify to users, and properly guarded under $BUILDKITE_ env @@ -66,16 +63,7 @@ if [[ -n $CI ]]; then ) fi fi -else - # Avoid sharing ~/.cargo when building locally to avoid a mixed macOS/Linux - # ~/.cargo - ARGS+=(--volume "$PWD:/home") fi -ARGS+=(--env "HOME=/home" --env "CARGO_HOME=/home/.cargo") - -# kcov tries to set the personality of the binary which docker -# doesn't allow by default. -ARGS+=(--security-opt "seccomp=unconfined") # Ensure files are created with the current host uid/gid if [[ -z "$SOLANA_DOCKER_RUN_NOSETUID" ]]; then diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile index a5d933b2a2d79f..baf7e09632bac6 100644 --- a/ci/docker-rust-nightly/Dockerfile +++ b/ci/docker-rust-nightly/Dockerfile @@ -1,17 +1,21 @@ FROM solanalabs/rust:1.73.0 + ARG date +ARG GRCOV_VERSION=v0.8.18 -RUN set -x \ - && rustup install nightly-$date \ - && rustup component add clippy --toolchain=nightly-$date \ - && rustup component add rustfmt --toolchain=nightly-$date \ - && rustup show \ - && rustc --version \ - && cargo --version \ - && cargo install grcov \ - && rustc +nightly-$date --version \ - && cargo +nightly-$date --version \ - # codecov - && curl -Os https://uploader.codecov.io/latest/linux/codecov \ - && chmod +x codecov \ - && mv codecov /usr/bin +RUN \ + rustup install nightly-$date && \ + rustup component add clippy --toolchain=nightly-$date && \ + rustup component add rustfmt --toolchain=nightly-$date && \ + rustup show && \ + rustc --version && \ + cargo --version && \ + # grcov + curl -LOsS "https://github.com/mozilla/grcov/releases/download/$GRCOV_VERSION/grcov-x86_64-unknown-linux-musl.tar.bz2" && \ + tar -xf grcov-x86_64-unknown-linux-musl.tar.bz2 && \ + mv ./grcov $CARGO_HOME/bin && \ + rm grcov-x86_64-unknown-linux-musl.tar.bz2 && \ + # codecov + curl -Os https://uploader.codecov.io/latest/linux/codecov && \ + chmod +x codecov && \ + mv codecov /usr/bin diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index c15b21636f365f..8619d5e68e30a0 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -1,55 +1,105 @@ -# Note: when the rust version is changed also modify -# ci/rust-version.sh to pick up the new image tag -FROM rust:1.73.0-bullseye +FROM ubuntu:20.04 -ARG NODE_MAJOR=18 +ARG \ + RUST_VERSION=1.73.0 \ + GOLANG_VERSION=1.21.3 \ + NODE_MAJOR=18 \ + SCCACHE_VERSION=v0.5.4 -RUN set -x \ - && apt update \ - && apt-get install apt-transport-https \ - && echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list \ - && apt-key adv --no-tty --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 \ - && apt update \ - && apt install -y \ - buildkite-agent \ - clang \ - cmake \ - jq \ - lcov \ - libudev-dev \ - mscgen \ - nodejs \ - net-tools \ - rsync \ - sudo \ - golang \ - unzip \ - lld \ - protobuf-compiler \ - \ - && apt remove -y libcurl4-openssl-dev \ - # node - && sudo apt-get update \ - && sudo apt-get install -y ca-certificates curl gnupg \ - && sudo mkdir -p /etc/apt/keyrings \ - && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | sudo gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \ - && echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | sudo tee /etc/apt/sources.list.d/nodesource.list \ - && sudo apt-get update \ - && sudo apt-get install nodejs -y \ - && node --version \ - && npm --version \ - # rust - && rustup component add rustfmt \ - && rustup component add clippy \ - && rustup target add wasm32-unknown-unknown \ - && cargo install cargo-audit \ - && cargo install cargo-hack \ - && cargo install cargo-sort \ - && cargo install mdbook \ - && cargo install mdbook-linkcheck \ - && cargo install svgbob_cli \ - && cargo install wasm-pack \ - && cargo install sccache \ - && rustc --version \ - && cargo --version \ - && rm -rf /var/lib/apt/lists/* +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +ENV \ + DEBIAN_FRONTEND=noninteractive \ + TZ=UTC + +# golang +ENV PATH="/usr/local/go/bin:$PATH" + +# rust +ENV \ + RUSTUP_HOME=/usr/local/rustup \ + CARGO_HOME=/usr/local/cargo \ + PATH="$PATH:/usr/local/cargo/bin" + +RUN apt-get update && \ + apt-get install --no-install-recommends -y \ + # basic + tzdata \ + apt-transport-https \ + sudo \ + build-essential \ + git \ + vim \ + jq \ + ca-certificates \ + curl \ + gnupg \ + lld \ + cmake \ + # docs + mscgen \ + # solana compiling + libssl-dev \ + libudev-dev \ + pkg-config \ + zlib1g-dev \ + llvm \ + clang \ + cmake \ + make \ + libprotobuf-dev \ + protobuf-compiler \ + && \ + # buildkite + curl -fsSL https://keys.openpgp.org/vks/v1/by-fingerprint/32A37959C2FA5C3C99EFBC32A79206696452D198 | gpg --dearmor -o /usr/share/keyrings/buildkite-agent-archive-keyring.gpg && \ + echo "deb [signed-by=/usr/share/keyrings/buildkite-agent-archive-keyring.gpg] https://apt.buildkite.com/buildkite-agent stable main" | tee /etc/apt/sources.list.d/buildkite-agent.list && \ + apt-get update && \ + apt-get install -y buildkite-agent && \ + # gh + curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg && \ + sudo chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg && \ + echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null && \ + apt-get update && \ + apt-get install -y gh && \ + # rust + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs/ | sh -s -- --no-modify-path --profile minimal --default-toolchain $RUST_VERSION -y && \ + rustup component add rustfmt && \ + rustup component add clippy && \ + rustup target add wasm32-unknown-unknown && \ + cargo install cargo-audit && \ + cargo install cargo-hack && \ + cargo install cargo-sort && \ + cargo install mdbook && \ + cargo install mdbook-linkcheck && \ + cargo install svgbob_cli && \ + cargo install wasm-pack && \ + cargo install rustfilt && \ + chmod -R a+w $CARGO_HOME $RUSTUP_HOME && \ + rm -rf $CARGO_HOME/registry && \ + # sccache + curl -LOsS "https://github.com/mozilla/sccache/releases/download/$SCCACHE_VERSION/sccache-$SCCACHE_VERSION-x86_64-unknown-linux-musl.tar.gz" && \ + tar -xzf "sccache-$SCCACHE_VERSION-x86_64-unknown-linux-musl.tar.gz" && \ + mv "sccache-$SCCACHE_VERSION-x86_64-unknown-linux-musl"/sccache "$CARGO_HOME/bin/" && \ + rm "sccache-$SCCACHE_VERSION-x86_64-unknown-linux-musl.tar.gz" && \ + rm -rf "sccache-$SCCACHE_VERSION-x86_64-unknown-linux-musl" && \ + # nextest + curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C "$CARGO_HOME/bin" && \ + # golang + curl -LOsS "https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz" && \ + tar -C /usr/local -xzf "go$GOLANG_VERSION.linux-amd64.tar.gz" && \ + rm "go$GOLANG_VERSION.linux-amd64.tar.gz" && \ + # nodejs + sudo mkdir -p /etc/apt/keyrings && \ + curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | sudo gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg && \ + echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | sudo tee /etc/apt/sources.list.d/nodesource.list && \ + sudo apt-get update && \ + sudo apt-get install -y nodejs && \ + # setup path + mkdir /.cache && \ + chmod -R a+w /.cache && \ + mkdir /.config && \ + chmod -R a+w /.config && \ + mkdir /.npm && \ + chmod -R a+w /.npm && \ + # clean lists + rm -rf /var/lib/apt/lists/* From ebe8afb0c34f3a3d3d83a481b65848e86bdae746 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Mon, 6 Nov 2023 03:02:22 +0100 Subject: [PATCH 42/98] Burn fees collected into invalid accounts (#33887) * refactor: create bank::fee_distribution module * feature: add checks to fee distribution * refactor: move Bank::deposit fn into test_utils * feedback * feedback 2 * add datapoints * change to datapoint_warn * typo --- runtime/benches/accounts.rs | 4 +- runtime/src/bank.rs | 286 +-------- runtime/src/bank/fee_distribution.rs | 908 +++++++++++++++++++++++++++ runtime/src/bank/serde_snapshot.rs | 10 +- runtime/src/bank/tests.rs | 266 +------- sdk/src/feature_set.rs | 5 + 6 files changed, 964 insertions(+), 515 deletions(-) create mode 100644 runtime/src/bank/fee_distribution.rs diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index 43361669244578..993c22d2a04e18 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -42,7 +42,7 @@ fn deposit_many(bank: &Bank, pubkeys: &mut Vec, num: usize) -> Result<() AccountSharedData::new((t + 1) as u64, 0, AccountSharedData::default().owner()); pubkeys.push(pubkey); assert!(bank.get_account(&pubkey).is_none()); - bank.deposit(&pubkey, (t + 1) as u64)?; + test_utils::deposit(bank, &pubkey, (t + 1) as u64)?; assert_eq!(bank.get_account(&pubkey).unwrap(), account); } Ok(()) @@ -80,7 +80,7 @@ fn test_accounts_squash(bencher: &mut Bencher) { &Pubkey::default(), slot, )); - next_bank.deposit(&pubkeys[0], 1).unwrap(); + test_utils::deposit(&next_bank, &pubkeys[0], 1).unwrap(); next_bank.squash(); slot += 1; prev_bank = next_bank; diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 7a770833cc5a73..9f3636e653c41f 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -69,7 +69,6 @@ use { }, solana_accounts_db::{ account_overrides::AccountOverrides, - account_rent_state::RentState, accounts::{ AccountAddressFilter, Accounts, LoadedTransaction, PubkeyAccountSlot, RewardInterval, TransactionLoadResult, @@ -150,7 +149,6 @@ use { incinerator, inflation::Inflation, instruction::InstructionError, - lamports::LamportsError, loader_v4::{self, LoaderV4State, LoaderV4Status}, message::{AccountKeys, SanitizedMessage}, native_loader, @@ -186,7 +184,7 @@ use { borrow::Cow, cell::RefCell, collections::{HashMap, HashSet}, - convert::{TryFrom, TryInto}, + convert::TryFrom, fmt, mem, ops::{AddAssign, RangeInclusive}, path::PathBuf, @@ -217,6 +215,7 @@ mod address_lookup_table; pub mod bank_hash_details; mod builtin_programs; pub mod epoch_accounts_hash_utils; +mod fee_distribution; mod metrics; mod serde_snapshot; mod sysvar_cache; @@ -3679,62 +3678,6 @@ impl Bank { stake_weighted_timestamp } - // Distribute collected transaction fees for this slot to collector_id (= current leader). - // - // Each validator is incentivized to process more transactions to earn more transaction fees. - // Transaction fees are rewarded for the computing resource utilization cost, directly - // proportional to their actual processing power. - // - // collector_id is rotated according to stake-weighted leader schedule. So the opportunity of - // earning transaction fees are fairly distributed by stake. And missing the opportunity - // (not producing a block as a leader) earns nothing. So, being online is incentivized as a - // form of transaction fees as well. - // - // On the other hand, rent fees are distributed under slightly different philosophy, while - // still being stake-weighted. - // Ref: distribute_rent_to_validators - fn collect_fees(&self) { - let collector_fees = self.collector_fees.load(Relaxed); - - if collector_fees != 0 { - let (deposit, mut burn) = self.fee_rate_governor.burn(collector_fees); - // burn a portion of fees - debug!( - "distributed fee: {} (rounded from: {}, burned: {})", - deposit, collector_fees, burn - ); - - match self.deposit(&self.collector_id, deposit) { - Ok(post_balance) => { - if deposit != 0 { - self.rewards.write().unwrap().push(( - self.collector_id, - RewardInfo { - reward_type: RewardType::Fee, - lamports: deposit as i64, - post_balance, - commission: None, - }, - )); - } - } - Err(_) => { - error!( - "Burning {} fee instead of crediting {}", - deposit, self.collector_id - ); - datapoint_error!( - "bank-burned_fee", - ("slot", self.slot(), i64), - ("num_lamports", deposit, i64) - ); - burn += deposit; - } - } - self.capitalization.fetch_sub(burn, Relaxed); - } - } - pub fn rehash(&self) { let mut hash = self.hash.write().unwrap(); let new = self.hash_internal_state(); @@ -3760,8 +3703,8 @@ impl Bank { if *hash == Hash::default() { // finish up any deferred changes to account state self.collect_rent_eagerly(); - self.collect_fees(); - self.distribute_rent(); + self.distribute_transaction_fees(); + self.distribute_rent_fees(); self.update_slot_history(); self.run_incinerator(); @@ -3864,12 +3807,14 @@ impl Bank { self.accounts_data_size_initial += account.data().len() as u64; } - // highest staked node is the first collector + // Highest staked node is the first collector but if a genesis config + // doesn't define any staked nodes, we assume this genesis config is for + // testing and set the collector id to a unique pubkey. self.collector_id = self .stakes_cache .stakes() .highest_staked_node() - .unwrap_or_default(); + .unwrap_or_else(Pubkey::new_unique); self.blockhash_queue.write().unwrap().genesis_hash( &genesis_config.hash(), @@ -5666,183 +5611,6 @@ impl Bank { } } - // Distribute collected rent fees for this slot to staked validators (excluding stakers) - // according to stake. - // - // The nature of rent fee is the cost of doing business, every validator has to hold (or have - // access to) the same list of accounts, so we pay according to stake, which is a rough proxy for - // value to the network. - // - // Currently, rent distribution doesn't consider given validator's uptime at all (this might - // change). That's because rent should be rewarded for the storage resource utilization cost. - // It's treated differently from transaction fees, which is for the computing resource - // utilization cost. - // - // We can't use collector_id (which is rotated according to stake-weighted leader schedule) - // as an approximation to the ideal rent distribution to simplify and avoid this per-slot - // computation for the distribution (time: N log N, space: N acct. stores; N = # of - // validators). - // The reason is that rent fee doesn't need to be incentivized for throughput unlike transaction - // fees - // - // Ref: collect_fees - #[allow(clippy::needless_collect)] - fn distribute_rent_to_validators( - &self, - vote_accounts: &VoteAccountsHashMap, - rent_to_be_distributed: u64, - ) { - let mut total_staked = 0; - - // Collect the stake associated with each validator. - // Note that a validator may be present in this vector multiple times if it happens to have - // more than one staked vote account somehow - let mut validator_stakes = vote_accounts - .iter() - .filter_map(|(_vote_pubkey, (staked, account))| { - if *staked == 0 { - None - } else { - total_staked += *staked; - Some((account.node_pubkey()?, *staked)) - } - }) - .collect::>(); - - #[cfg(test)] - if validator_stakes.is_empty() { - // some tests bank.freezes() with bad staking state - self.capitalization - .fetch_sub(rent_to_be_distributed, Relaxed); - return; - } - #[cfg(not(test))] - assert!(!validator_stakes.is_empty()); - - // Sort first by stake and then by validator identity pubkey for determinism. - // If two items are still equal, their relative order does not matter since - // both refer to the same validator. - validator_stakes.sort_unstable_by(|(pubkey1, staked1), (pubkey2, staked2)| { - (staked1, pubkey1).cmp(&(staked2, pubkey2)).reverse() - }); - - let enforce_fix = self.no_overflow_rent_distribution_enabled(); - - let mut rent_distributed_in_initial_round = 0; - let validator_rent_shares = validator_stakes - .into_iter() - .map(|(pubkey, staked)| { - let rent_share = if !enforce_fix { - (((staked * rent_to_be_distributed) as f64) / (total_staked as f64)) as u64 - } else { - (((staked as u128) * (rent_to_be_distributed as u128)) / (total_staked as u128)) - .try_into() - .unwrap() - }; - rent_distributed_in_initial_round += rent_share; - (pubkey, rent_share) - }) - .collect::>(); - - // Leftover lamports after fraction calculation, will be paid to validators starting from highest stake - // holder - let mut leftover_lamports = rent_to_be_distributed - rent_distributed_in_initial_round; - - let mut rewards = vec![]; - validator_rent_shares - .into_iter() - .for_each(|(pubkey, rent_share)| { - let rent_to_be_paid = if leftover_lamports > 0 { - leftover_lamports -= 1; - rent_share + 1 - } else { - rent_share - }; - if !enforce_fix || rent_to_be_paid > 0 { - let mut account = self - .get_account_with_fixed_root(&pubkey) - .unwrap_or_default(); - let rent = self.rent_collector().rent; - let recipient_pre_rent_state = RentState::from_account(&account, &rent); - let distribution = account.checked_add_lamports(rent_to_be_paid); - let recipient_post_rent_state = RentState::from_account(&account, &rent); - let rent_state_transition_allowed = recipient_post_rent_state - .transition_allowed_from(&recipient_pre_rent_state); - if !rent_state_transition_allowed { - warn!( - "Rent distribution of {rent_to_be_paid} to {pubkey} results in \ - invalid RentState: {recipient_post_rent_state:?}" - ); - datapoint_warn!( - "bank-rent_distribution_invalid_state", - ("slot", self.slot(), i64), - ("pubkey", pubkey.to_string(), String), - ("rent_to_be_paid", rent_to_be_paid, i64) - ); - } - if distribution.is_err() - || (self.prevent_rent_paying_rent_recipients() - && !rent_state_transition_allowed) - { - // overflow adding lamports or resulting account is not rent-exempt - self.capitalization.fetch_sub(rent_to_be_paid, Relaxed); - error!( - "Burned {} rent lamports instead of sending to {}", - rent_to_be_paid, pubkey - ); - datapoint_error!( - "bank-burned_rent", - ("slot", self.slot(), i64), - ("num_lamports", rent_to_be_paid, i64) - ); - } else { - self.store_account(&pubkey, &account); - rewards.push(( - pubkey, - RewardInfo { - reward_type: RewardType::Rent, - lamports: rent_to_be_paid as i64, - post_balance: account.lamports(), - commission: None, - }, - )); - } - } - }); - self.rewards.write().unwrap().append(&mut rewards); - - if enforce_fix { - assert_eq!(leftover_lamports, 0); - } else if leftover_lamports != 0 { - warn!( - "There was leftover from rent distribution: {}", - leftover_lamports - ); - self.capitalization.fetch_sub(leftover_lamports, Relaxed); - } - } - - fn distribute_rent(&self) { - let total_rent_collected = self.collected_rent.load(Relaxed); - - let (burned_portion, rent_to_be_distributed) = self - .rent_collector - .rent - .calculate_burn(total_rent_collected); - - debug!( - "distributed rent: {} (rounded from: {}, burned: {})", - rent_to_be_distributed, total_rent_collected, burned_portion - ); - self.capitalization.fetch_sub(burned_portion, Relaxed); - - if rent_to_be_distributed == 0 { - return; - } - - self.distribute_rent_to_validators(&self.vote_accounts(), rent_to_be_distributed); - } - fn collect_rent( &self, execution_results: &[TransactionExecutionResult], @@ -6753,19 +6521,6 @@ impl Bank { } } - pub fn deposit( - &self, - pubkey: &Pubkey, - lamports: u64, - ) -> std::result::Result { - // This doesn't collect rents intentionally. - // Rents should only be applied to actual TXes - let mut account = self.get_account_with_fixed_root(pubkey).unwrap_or_default(); - account.checked_add_lamports(lamports)?; - self.store_account(pubkey, &account); - Ok(account.lamports()) - } - pub fn accounts(&self) -> Arc { self.rc.accounts.clone() } @@ -7982,6 +7737,11 @@ impl Bank { .is_active(&feature_set::prevent_rent_paying_rent_recipients::id()) } + pub fn validate_fee_collector_account(&self) -> bool { + self.feature_set + .is_active(&feature_set::validate_fee_collector_account::id()) + } + pub fn read_cost_tracker(&self) -> LockResult> { self.cost_tracker.read() } @@ -8548,7 +8308,12 @@ pub mod test_utils { use { super::Bank, crate::installed_scheduler_pool::BankWithScheduler, - solana_sdk::{hash::hashv, pubkey::Pubkey}, + solana_sdk::{ + account::{ReadableAccount, WritableAccount}, + hash::hashv, + lamports::LamportsError, + pubkey::Pubkey, + }, solana_vote_program::vote_state::{self, BlockTimestamp, VoteStateVersions}, std::sync::Arc, }; @@ -8580,4 +8345,17 @@ pub mod test_utils { vote_state::to(&versioned, &mut vote_account).unwrap(); bank.store_account(vote_pubkey, &vote_account); } + + pub fn deposit( + bank: &Bank, + pubkey: &Pubkey, + lamports: u64, + ) -> std::result::Result { + // This doesn't collect rents intentionally. + // Rents should only be applied to actual TXes + let mut account = bank.get_account_with_fixed_root(pubkey).unwrap_or_default(); + account.checked_add_lamports(lamports)?; + bank.store_account(pubkey, &account); + Ok(account.lamports()) + } } diff --git a/runtime/src/bank/fee_distribution.rs b/runtime/src/bank/fee_distribution.rs new file mode 100644 index 00000000000000..e1d251c0bf478c --- /dev/null +++ b/runtime/src/bank/fee_distribution.rs @@ -0,0 +1,908 @@ +use { + super::Bank, + log::{debug, warn}, + solana_accounts_db::{account_rent_state::RentState, stake_rewards::RewardInfo}, + solana_sdk::{ + account::{ReadableAccount, WritableAccount}, + pubkey::Pubkey, + reward_type::RewardType, + system_program, + }, + solana_vote::vote_account::VoteAccountsHashMap, + std::{result::Result, sync::atomic::Ordering::Relaxed}, + thiserror::Error, +}; + +#[derive(Debug)] +struct DepositFeeOptions { + check_account_owner: bool, + check_rent_paying: bool, +} + +#[derive(Error, Debug, PartialEq)] +enum DepositFeeError { + #[error("fee account became rent paying")] + InvalidRentPayingAccount, + #[error("lamport overflow")] + LamportOverflow, + #[error("invalid fee account owner")] + InvalidAccountOwner, +} + +impl Bank { + // Distribute collected transaction fees for this slot to collector_id (= current leader). + // + // Each validator is incentivized to process more transactions to earn more transaction fees. + // Transaction fees are rewarded for the computing resource utilization cost, directly + // proportional to their actual processing power. + // + // collector_id is rotated according to stake-weighted leader schedule. So the opportunity of + // earning transaction fees are fairly distributed by stake. And missing the opportunity + // (not producing a block as a leader) earns nothing. So, being online is incentivized as a + // form of transaction fees as well. + // + // On the other hand, rent fees are distributed under slightly different philosophy, while + // still being stake-weighted. + // Ref: distribute_rent_to_validators + pub(super) fn distribute_transaction_fees(&self) { + let collector_fees = self.collector_fees.load(Relaxed); + if collector_fees != 0 { + let (deposit, mut burn) = self.fee_rate_governor.burn(collector_fees); + if deposit > 0 { + let validate_fee_collector = self.validate_fee_collector_account(); + match self.deposit_fees( + &self.collector_id, + deposit, + DepositFeeOptions { + check_account_owner: validate_fee_collector, + check_rent_paying: validate_fee_collector, + }, + ) { + Ok(post_balance) => { + self.rewards.write().unwrap().push(( + self.collector_id, + RewardInfo { + reward_type: RewardType::Fee, + lamports: deposit as i64, + post_balance, + commission: None, + }, + )); + } + Err(err) => { + debug!( + "Burned {} lamport tx fee instead of sending to {} due to {}", + deposit, self.collector_id, err + ); + datapoint_warn!( + "bank-burned_fee", + ("slot", self.slot(), i64), + ("num_lamports", deposit, i64), + ("error", err.to_string(), String), + ); + burn += deposit; + } + } + } + self.capitalization.fetch_sub(burn, Relaxed); + } + } + + // Deposits fees into a specified account and if successful, returns the new balance of that account + fn deposit_fees( + &self, + pubkey: &Pubkey, + fees: u64, + options: DepositFeeOptions, + ) -> Result { + let mut account = self.get_account_with_fixed_root(pubkey).unwrap_or_default(); + + if options.check_account_owner && !system_program::check_id(account.owner()) { + return Err(DepositFeeError::InvalidAccountOwner); + } + + let rent = self.rent_collector().rent; + let recipient_pre_rent_state = RentState::from_account(&account, &rent); + let distribution = account.checked_add_lamports(fees); + if distribution.is_err() { + return Err(DepositFeeError::LamportOverflow); + } + if options.check_rent_paying { + let recipient_post_rent_state = RentState::from_account(&account, &rent); + let rent_state_transition_allowed = + recipient_post_rent_state.transition_allowed_from(&recipient_pre_rent_state); + if !rent_state_transition_allowed { + return Err(DepositFeeError::InvalidRentPayingAccount); + } + } + + self.store_account(pubkey, &account); + Ok(account.lamports()) + } + + // Distribute collected rent fees for this slot to staked validators (excluding stakers) + // according to stake. + // + // The nature of rent fee is the cost of doing business, every validator has to hold (or have + // access to) the same list of accounts, so we pay according to stake, which is a rough proxy for + // value to the network. + // + // Currently, rent distribution doesn't consider given validator's uptime at all (this might + // change). That's because rent should be rewarded for the storage resource utilization cost. + // It's treated differently from transaction fees, which is for the computing resource + // utilization cost. + // + // We can't use collector_id (which is rotated according to stake-weighted leader schedule) + // as an approximation to the ideal rent distribution to simplify and avoid this per-slot + // computation for the distribution (time: N log N, space: N acct. stores; N = # of + // validators). + // The reason is that rent fee doesn't need to be incentivized for throughput unlike transaction + // fees + // + // Ref: distribute_transaction_fees + #[allow(clippy::needless_collect)] + fn distribute_rent_to_validators( + &self, + vote_accounts: &VoteAccountsHashMap, + rent_to_be_distributed: u64, + ) { + let mut total_staked = 0; + + // Collect the stake associated with each validator. + // Note that a validator may be present in this vector multiple times if it happens to have + // more than one staked vote account somehow + let mut validator_stakes = vote_accounts + .iter() + .filter_map(|(_vote_pubkey, (staked, account))| { + if *staked == 0 { + None + } else { + total_staked += *staked; + Some((account.node_pubkey()?, *staked)) + } + }) + .collect::>(); + + #[cfg(test)] + if validator_stakes.is_empty() { + // some tests bank.freezes() with bad staking state + self.capitalization + .fetch_sub(rent_to_be_distributed, Relaxed); + return; + } + #[cfg(not(test))] + assert!(!validator_stakes.is_empty()); + + // Sort first by stake and then by validator identity pubkey for determinism. + // If two items are still equal, their relative order does not matter since + // both refer to the same validator. + validator_stakes.sort_unstable_by(|(pubkey1, staked1), (pubkey2, staked2)| { + (staked1, pubkey1).cmp(&(staked2, pubkey2)).reverse() + }); + + let enforce_fix = self.no_overflow_rent_distribution_enabled(); + + let mut rent_distributed_in_initial_round = 0; + let validator_rent_shares = validator_stakes + .into_iter() + .map(|(pubkey, staked)| { + let rent_share = if !enforce_fix { + (((staked * rent_to_be_distributed) as f64) / (total_staked as f64)) as u64 + } else { + (((staked as u128) * (rent_to_be_distributed as u128)) / (total_staked as u128)) + .try_into() + .unwrap() + }; + rent_distributed_in_initial_round += rent_share; + (pubkey, rent_share) + }) + .collect::>(); + + // Leftover lamports after fraction calculation, will be paid to validators starting from highest stake + // holder + let mut leftover_lamports = rent_to_be_distributed - rent_distributed_in_initial_round; + + let mut rent_to_burn: u64 = 0; + let mut rewards = vec![]; + validator_rent_shares + .into_iter() + .for_each(|(pubkey, rent_share)| { + let rent_to_be_paid = if leftover_lamports > 0 { + leftover_lamports -= 1; + rent_share + 1 + } else { + rent_share + }; + if !enforce_fix || rent_to_be_paid > 0 { + let check_account_owner = self.validate_fee_collector_account(); + let check_rent_paying = self.prevent_rent_paying_rent_recipients(); + match self.deposit_fees( + &pubkey, + rent_to_be_paid, + DepositFeeOptions { + check_account_owner, + check_rent_paying, + }, + ) { + Ok(post_balance) => { + rewards.push(( + pubkey, + RewardInfo { + reward_type: RewardType::Rent, + lamports: rent_to_be_paid as i64, + post_balance, + commission: None, + }, + )); + } + Err(err) => { + debug!( + "Burned {} lamport rent fee instead of sending to {} due to {}", + rent_to_be_paid, pubkey, err + ); + + // overflow adding lamports or resulting account is invalid + // so burn lamports and track lamports burned per slot + rent_to_burn = rent_to_burn.saturating_add(rent_to_be_paid); + } + } + } + }); + self.rewards.write().unwrap().append(&mut rewards); + + if rent_to_burn > 0 { + self.capitalization.fetch_sub(rent_to_burn, Relaxed); + datapoint_warn!( + "bank-burned_rent", + ("slot", self.slot(), i64), + ("num_lamports", rent_to_burn, i64) + ); + } + + if enforce_fix { + assert_eq!(leftover_lamports, 0); + } else if leftover_lamports != 0 { + warn!( + "There was leftover from rent distribution: {}", + leftover_lamports + ); + self.capitalization.fetch_sub(leftover_lamports, Relaxed); + } + } + + pub(super) fn distribute_rent_fees(&self) { + let total_rent_collected = self.collected_rent.load(Relaxed); + + let (burned_portion, rent_to_be_distributed) = self + .rent_collector + .rent + .calculate_burn(total_rent_collected); + + debug!( + "distributed rent: {} (rounded from: {}, burned: {})", + rent_to_be_distributed, total_rent_collected, burned_portion + ); + self.capitalization.fetch_sub(burned_portion, Relaxed); + + if rent_to_be_distributed == 0 { + return; + } + + self.distribute_rent_to_validators(&self.vote_accounts(), rent_to_be_distributed); + } +} + +#[cfg(test)] +pub mod tests { + use { + super::*, + crate::genesis_utils::{ + create_genesis_config, create_genesis_config_with_leader, + create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, + }, + log::info, + solana_sdk::{ + account::AccountSharedData, feature_set, native_token::sol_to_lamports, pubkey, + rent::Rent, signature::Signer, + }, + }; + + #[test] + fn test_distribute_transaction_fees() { + #[derive(PartialEq)] + enum Scenario { + Normal, + InvalidOwner, + RentPaying, + } + + struct TestCase { + scenario: Scenario, + disable_checks: bool, + } + + impl TestCase { + fn new(scenario: Scenario, disable_checks: bool) -> Self { + Self { + scenario, + disable_checks, + } + } + } + + for test_case in [ + TestCase::new(Scenario::Normal, false), + TestCase::new(Scenario::Normal, true), + TestCase::new(Scenario::InvalidOwner, false), + TestCase::new(Scenario::InvalidOwner, true), + TestCase::new(Scenario::RentPaying, false), + TestCase::new(Scenario::RentPaying, true), + ] { + let mut genesis = create_genesis_config(0); + if test_case.disable_checks { + genesis + .genesis_config + .accounts + .remove(&feature_set::validate_fee_collector_account::id()) + .unwrap(); + } + let rent = Rent::default(); + let min_rent_exempt_balance = rent.minimum_balance(0); + genesis.genesis_config.rent = rent; // Ensure rent is non-zero, as genesis_utils sets Rent::free by default + let bank = Bank::new_for_tests(&genesis.genesis_config); + let transaction_fees = 100; + bank.collector_fees.fetch_add(transaction_fees, Relaxed); + assert_eq!(transaction_fees, bank.collector_fees.load(Relaxed)); + let (expected_collected_fees, burn_amount) = + bank.fee_rate_governor.burn(transaction_fees); + assert!(burn_amount > 0); + + if test_case.scenario == Scenario::RentPaying { + // ensure that account balance + collected fees will make it rent-paying + let initial_balance = 100; + let account = AccountSharedData::new(initial_balance, 0, &system_program::id()); + bank.store_account(bank.collector_id(), &account); + assert!(initial_balance + transaction_fees < min_rent_exempt_balance); + } else if test_case.scenario == Scenario::InvalidOwner { + // ensure that account owner is invalid and fee distribution will fail + let account = + AccountSharedData::new(min_rent_exempt_balance, 0, &Pubkey::new_unique()); + bank.store_account(bank.collector_id(), &account); + } else { + let account = + AccountSharedData::new(min_rent_exempt_balance, 0, &system_program::id()); + bank.store_account(bank.collector_id(), &account); + } + + let initial_capitalization = bank.capitalization(); + let initial_collector_id_balance = bank.get_balance(bank.collector_id()); + bank.distribute_transaction_fees(); + let new_collector_id_balance = bank.get_balance(bank.collector_id()); + + if test_case.scenario != Scenario::Normal && !test_case.disable_checks { + assert_eq!(initial_collector_id_balance, new_collector_id_balance); + assert_eq!( + initial_capitalization - transaction_fees, + bank.capitalization() + ); + let locked_rewards = bank.rewards.read().unwrap(); + assert!( + locked_rewards.is_empty(), + "There should be no rewards distributed" + ); + } else { + assert_eq!( + initial_collector_id_balance + expected_collected_fees, + new_collector_id_balance + ); + + assert_eq!(initial_capitalization - burn_amount, bank.capitalization()); + + let locked_rewards = bank.rewards.read().unwrap(); + assert_eq!( + locked_rewards.len(), + 1, + "There should be one reward distributed" + ); + + let reward_info = &locked_rewards[0]; + assert_eq!( + reward_info.1.lamports, expected_collected_fees as i64, + "The reward amount should match the expected deposit" + ); + assert_eq!( + reward_info.1.reward_type, + RewardType::Fee, + "The reward type should be Fee" + ); + } + } + } + + #[test] + fn test_distribute_transaction_fees_zero() { + let genesis = create_genesis_config(0); + let bank = Bank::new_for_tests(&genesis.genesis_config); + assert_eq!(bank.collector_fees.load(Relaxed), 0); + + let initial_capitalization = bank.capitalization(); + let initial_collector_id_balance = bank.get_balance(bank.collector_id()); + bank.distribute_transaction_fees(); + let new_collector_id_balance = bank.get_balance(bank.collector_id()); + + assert_eq!(initial_collector_id_balance, new_collector_id_balance); + assert_eq!(initial_capitalization, bank.capitalization()); + let locked_rewards = bank.rewards.read().unwrap(); + assert!( + locked_rewards.is_empty(), + "There should be no rewards distributed" + ); + } + + #[test] + fn test_distribute_transaction_fees_burn_all() { + let mut genesis = create_genesis_config(0); + genesis.genesis_config.fee_rate_governor.burn_percent = 100; + let bank = Bank::new_for_tests(&genesis.genesis_config); + let transaction_fees = 100; + bank.collector_fees.fetch_add(transaction_fees, Relaxed); + assert_eq!(transaction_fees, bank.collector_fees.load(Relaxed)); + + let initial_capitalization = bank.capitalization(); + let initial_collector_id_balance = bank.get_balance(bank.collector_id()); + bank.distribute_transaction_fees(); + let new_collector_id_balance = bank.get_balance(bank.collector_id()); + + assert_eq!(initial_collector_id_balance, new_collector_id_balance); + assert_eq!( + initial_capitalization - transaction_fees, + bank.capitalization() + ); + let locked_rewards = bank.rewards.read().unwrap(); + assert!( + locked_rewards.is_empty(), + "There should be no rewards distributed" + ); + } + + #[test] + fn test_distribute_transaction_fees_overflow_failure() { + let genesis = create_genesis_config(0); + let bank = Bank::new_for_tests(&genesis.genesis_config); + let transaction_fees = 100; + bank.collector_fees.fetch_add(transaction_fees, Relaxed); + assert_eq!(transaction_fees, bank.collector_fees.load(Relaxed)); + + // ensure that account balance will overflow and fee distribution will fail + let account = AccountSharedData::new(u64::MAX, 0, &system_program::id()); + bank.store_account(bank.collector_id(), &account); + + let initial_capitalization = bank.capitalization(); + let initial_collector_id_balance = bank.get_balance(bank.collector_id()); + bank.distribute_transaction_fees(); + let new_collector_id_balance = bank.get_balance(bank.collector_id()); + + assert_eq!(initial_collector_id_balance, new_collector_id_balance); + assert_eq!( + initial_capitalization - transaction_fees, + bank.capitalization() + ); + let locked_rewards = bank.rewards.read().unwrap(); + assert!( + locked_rewards.is_empty(), + "There should be no rewards distributed" + ); + } + + #[test] + fn test_deposit_fees() { + let initial_balance = 1_000_000_000; + let genesis = create_genesis_config(initial_balance); + let bank = Bank::new_for_tests(&genesis.genesis_config); + let pubkey = genesis.mint_keypair.pubkey(); + + let deposit_amount = 500; + let options = DepositFeeOptions { + check_account_owner: true, + check_rent_paying: true, + }; + + assert_eq!( + bank.deposit_fees(&pubkey, deposit_amount, options), + Ok(initial_balance + deposit_amount), + "New balance should be the sum of the initial balance and deposit amount" + ); + } + + #[test] + fn test_deposit_fees_with_overflow() { + let initial_balance = u64::MAX; + let genesis = create_genesis_config(initial_balance); + let bank = Bank::new_for_tests(&genesis.genesis_config); + let pubkey = genesis.mint_keypair.pubkey(); + + let deposit_amount = 500; + let options = DepositFeeOptions { + check_account_owner: false, + check_rent_paying: false, + }; + + assert_eq!( + bank.deposit_fees(&pubkey, deposit_amount, options), + Err(DepositFeeError::LamportOverflow), + "Expected an error due to lamport overflow" + ); + } + + #[test] + fn test_deposit_fees_invalid_account_owner() { + let initial_balance = 1000; + let genesis = create_genesis_config_with_leader(0, &pubkey::new_rand(), initial_balance); + let bank = Bank::new_for_tests(&genesis.genesis_config); + let pubkey = genesis.voting_keypair.pubkey(); + + let deposit_amount = 500; + + // enable check_account_owner + { + let options = DepositFeeOptions { + check_account_owner: true, // Intentionally checking for account owner + check_rent_paying: false, + }; + + assert_eq!( + bank.deposit_fees(&pubkey, deposit_amount, options), + Err(DepositFeeError::InvalidAccountOwner), + "Expected an error due to invalid account owner" + ); + } + + // disable check_account_owner + { + let options = DepositFeeOptions { + check_account_owner: false, + check_rent_paying: false, + }; + + assert_eq!( + bank.deposit_fees(&pubkey, deposit_amount, options), + Ok(initial_balance + deposit_amount), + "New balance should be the sum of the initial balance and deposit amount" + ); + } + } + + #[test] + fn test_deposit_fees_invalid_rent_paying() { + let initial_balance = 0; + let genesis = create_genesis_config(initial_balance); + let pubkey = genesis.mint_keypair.pubkey(); + let mut genesis_config = genesis.genesis_config; + let rent = Rent::default(); + genesis_config.rent = rent; // Ensure rent is non-zero, as genesis_utils sets Rent::free by default + let bank = Bank::new_for_tests(&genesis_config); + let min_rent_exempt_balance = rent.minimum_balance(0); + + let deposit_amount = 500; + assert!(initial_balance + deposit_amount < min_rent_exempt_balance); + + // enable check_rent_paying + { + let options = DepositFeeOptions { + check_account_owner: false, + check_rent_paying: true, + }; + + assert_eq!( + bank.deposit_fees(&pubkey, deposit_amount, options), + Err(DepositFeeError::InvalidRentPayingAccount), + "Expected an error due to invalid rent paying account" + ); + } + + // disable check_rent_paying + { + let options = DepositFeeOptions { + check_account_owner: false, + check_rent_paying: false, + }; + + assert_eq!( + bank.deposit_fees(&pubkey, deposit_amount, options), + Ok(initial_balance + deposit_amount), + "New balance should be the sum of the initial balance and deposit amount" + ); + } + } + + #[test] + fn test_distribute_rent_to_validators_overflow() { + solana_logger::setup(); + + // These values are taken from the real cluster (testnet) + const RENT_TO_BE_DISTRIBUTED: u64 = 120_525; + const VALIDATOR_STAKE: u64 = 374_999_998_287_840; + + let validator_pubkey = solana_sdk::pubkey::new_rand(); + let mut genesis_config = + create_genesis_config_with_leader(10, &validator_pubkey, VALIDATOR_STAKE) + .genesis_config; + + let bank = Bank::new_for_tests(&genesis_config); + let old_validator_lamports = bank.get_balance(&validator_pubkey); + bank.distribute_rent_to_validators(&bank.vote_accounts(), RENT_TO_BE_DISTRIBUTED); + let new_validator_lamports = bank.get_balance(&validator_pubkey); + assert_eq!( + new_validator_lamports, + old_validator_lamports + RENT_TO_BE_DISTRIBUTED + ); + + genesis_config + .accounts + .remove(&feature_set::no_overflow_rent_distribution::id()) + .unwrap(); + let bank = std::panic::AssertUnwindSafe(Bank::new_for_tests(&genesis_config)); + let old_validator_lamports = bank.get_balance(&validator_pubkey); + let new_validator_lamports = std::panic::catch_unwind(|| { + bank.distribute_rent_to_validators(&bank.vote_accounts(), RENT_TO_BE_DISTRIBUTED); + bank.get_balance(&validator_pubkey) + }); + + if let Ok(new_validator_lamports) = new_validator_lamports { + info!("asserting overflowing incorrect rent distribution"); + assert_ne!( + new_validator_lamports, + old_validator_lamports + RENT_TO_BE_DISTRIBUTED + ); + } else { + info!("NOT-asserting overflowing incorrect rent distribution"); + } + } + + #[test] + fn test_distribute_rent_to_validators_rent_paying() { + solana_logger::setup(); + + const RENT_PER_VALIDATOR: u64 = 55; + const TOTAL_RENT: u64 = RENT_PER_VALIDATOR * 4; + + let empty_validator = ValidatorVoteKeypairs::new_rand(); + let rent_paying_validator = ValidatorVoteKeypairs::new_rand(); + let becomes_rent_exempt_validator = ValidatorVoteKeypairs::new_rand(); + let rent_exempt_validator = ValidatorVoteKeypairs::new_rand(); + let keypairs = vec![ + &empty_validator, + &rent_paying_validator, + &becomes_rent_exempt_validator, + &rent_exempt_validator, + ]; + let genesis_config_info = create_genesis_config_with_vote_accounts( + sol_to_lamports(1000.), + &keypairs, + vec![sol_to_lamports(1000.); 4], + ); + let mut genesis_config = genesis_config_info.genesis_config; + genesis_config.rent = Rent::default(); // Ensure rent is non-zero, as genesis_utils sets Rent::free by default + + for deactivate_feature in [false, true] { + if deactivate_feature { + genesis_config + .accounts + .remove(&feature_set::prevent_rent_paying_rent_recipients::id()) + .unwrap(); + } + let bank = Bank::new_for_tests(&genesis_config); + let rent = bank.rent_collector().rent; + let rent_exempt_minimum = rent.minimum_balance(0); + + // Make one validator have an empty identity account + let mut empty_validator_account = bank + .get_account_with_fixed_root(&empty_validator.node_keypair.pubkey()) + .unwrap(); + empty_validator_account.set_lamports(0); + bank.store_account( + &empty_validator.node_keypair.pubkey(), + &empty_validator_account, + ); + + // Make one validator almost rent-exempt, less RENT_PER_VALIDATOR + let mut becomes_rent_exempt_validator_account = bank + .get_account_with_fixed_root(&becomes_rent_exempt_validator.node_keypair.pubkey()) + .unwrap(); + becomes_rent_exempt_validator_account + .set_lamports(rent_exempt_minimum - RENT_PER_VALIDATOR); + bank.store_account( + &becomes_rent_exempt_validator.node_keypair.pubkey(), + &becomes_rent_exempt_validator_account, + ); + + // Make one validator rent-exempt + let mut rent_exempt_validator_account = bank + .get_account_with_fixed_root(&rent_exempt_validator.node_keypair.pubkey()) + .unwrap(); + rent_exempt_validator_account.set_lamports(rent_exempt_minimum); + bank.store_account( + &rent_exempt_validator.node_keypair.pubkey(), + &rent_exempt_validator_account, + ); + + let get_rent_state = |bank: &Bank, address: &Pubkey| -> RentState { + let account = bank + .get_account_with_fixed_root(address) + .unwrap_or_default(); + RentState::from_account(&account, &rent) + }; + + // Assert starting RentStates + assert_eq!( + get_rent_state(&bank, &empty_validator.node_keypair.pubkey()), + RentState::Uninitialized + ); + assert_eq!( + get_rent_state(&bank, &rent_paying_validator.node_keypair.pubkey()), + RentState::RentPaying { + lamports: 42, + data_size: 0, + } + ); + assert_eq!( + get_rent_state(&bank, &becomes_rent_exempt_validator.node_keypair.pubkey()), + RentState::RentPaying { + lamports: rent_exempt_minimum - RENT_PER_VALIDATOR, + data_size: 0, + } + ); + assert_eq!( + get_rent_state(&bank, &rent_exempt_validator.node_keypair.pubkey()), + RentState::RentExempt + ); + + let old_empty_validator_lamports = + bank.get_balance(&empty_validator.node_keypair.pubkey()); + let old_rent_paying_validator_lamports = + bank.get_balance(&rent_paying_validator.node_keypair.pubkey()); + let old_becomes_rent_exempt_validator_lamports = + bank.get_balance(&becomes_rent_exempt_validator.node_keypair.pubkey()); + let old_rent_exempt_validator_lamports = + bank.get_balance(&rent_exempt_validator.node_keypair.pubkey()); + + bank.distribute_rent_to_validators(&bank.vote_accounts(), TOTAL_RENT); + + let new_empty_validator_lamports = + bank.get_balance(&empty_validator.node_keypair.pubkey()); + let new_rent_paying_validator_lamports = + bank.get_balance(&rent_paying_validator.node_keypair.pubkey()); + let new_becomes_rent_exempt_validator_lamports = + bank.get_balance(&becomes_rent_exempt_validator.node_keypair.pubkey()); + let new_rent_exempt_validator_lamports = + bank.get_balance(&rent_exempt_validator.node_keypair.pubkey()); + + // Assert ending balances; rent should be withheld if test is active and ending RentState + // is RentPaying, ie. empty_validator and rent_paying_validator + assert_eq!( + if deactivate_feature { + old_empty_validator_lamports + RENT_PER_VALIDATOR + } else { + old_empty_validator_lamports + }, + new_empty_validator_lamports + ); + + assert_eq!( + if deactivate_feature { + old_rent_paying_validator_lamports + RENT_PER_VALIDATOR + } else { + old_rent_paying_validator_lamports + }, + new_rent_paying_validator_lamports + ); + + assert_eq!( + old_becomes_rent_exempt_validator_lamports + RENT_PER_VALIDATOR, + new_becomes_rent_exempt_validator_lamports + ); + + assert_eq!( + old_rent_exempt_validator_lamports + RENT_PER_VALIDATOR, + new_rent_exempt_validator_lamports + ); + + // Assert ending RentStates + assert_eq!( + if deactivate_feature { + RentState::RentPaying { + lamports: RENT_PER_VALIDATOR, + data_size: 0, + } + } else { + RentState::Uninitialized + }, + get_rent_state(&bank, &empty_validator.node_keypair.pubkey()), + ); + assert_eq!( + if deactivate_feature { + RentState::RentPaying { + lamports: old_rent_paying_validator_lamports + RENT_PER_VALIDATOR, + data_size: 0, + } + } else { + RentState::RentPaying { + lamports: old_rent_paying_validator_lamports, + data_size: 0, + } + }, + get_rent_state(&bank, &rent_paying_validator.node_keypair.pubkey()), + ); + assert_eq!( + RentState::RentExempt, + get_rent_state(&bank, &becomes_rent_exempt_validator.node_keypair.pubkey()), + ); + assert_eq!( + RentState::RentExempt, + get_rent_state(&bank, &rent_exempt_validator.node_keypair.pubkey()), + ); + } + } + + #[test] + fn test_distribute_rent_to_validators_invalid_owner() { + struct TestCase { + disable_owner_check: bool, + use_invalid_owner: bool, + } + + impl TestCase { + fn new(disable_owner_check: bool, use_invalid_owner: bool) -> Self { + Self { + disable_owner_check, + use_invalid_owner, + } + } + } + + for test_case in [ + TestCase::new(false, false), + TestCase::new(false, true), + TestCase::new(true, false), + TestCase::new(true, true), + ] { + let genesis_config_info = + create_genesis_config_with_leader(0, &Pubkey::new_unique(), 100); + let mut genesis_config = genesis_config_info.genesis_config; + genesis_config.rent = Rent::default(); // Ensure rent is non-zero, as genesis_utils sets Rent::free by default + + if test_case.disable_owner_check { + genesis_config + .accounts + .remove(&feature_set::validate_fee_collector_account::id()) + .unwrap(); + } + let bank = Bank::new_for_tests(&genesis_config); + + let initial_balance = 1_000_000; + let account_owner = if test_case.use_invalid_owner { + Pubkey::new_unique() + } else { + system_program::id() + }; + let account = AccountSharedData::new(initial_balance, 0, &account_owner); + bank.store_account(bank.collector_id(), &account); + + let initial_capitalization = bank.capitalization(); + let rent_fees = 100; + bank.distribute_rent_to_validators(&bank.vote_accounts(), rent_fees); + let new_capitalization = bank.capitalization(); + let new_balance = bank.get_balance(bank.collector_id()); + + if test_case.use_invalid_owner && !test_case.disable_owner_check { + assert_eq!(initial_balance, new_balance); + assert_eq!(initial_capitalization - rent_fees, new_capitalization); + assert_eq!(bank.rewards.read().unwrap().len(), 0); + } else { + assert_eq!(initial_balance + rent_fees, new_balance); + assert_eq!(initial_capitalization, new_capitalization); + assert_eq!(bank.rewards.read().unwrap().len(), 1); + } + } + } +} diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 17bba5638f2d47..e1746c52b79f75 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -3,8 +3,8 @@ mod tests { use { crate::{ bank::{ - epoch_accounts_hash_utils, Bank, BankTestConfig, EpochRewardStatus, - StartBlockHeightAndRewards, + epoch_accounts_hash_utils, test_utils as bank_test_utils, Bank, BankTestConfig, + EpochRewardStatus, StartBlockHeightAndRewards, }, genesis_utils::{activate_all_features, activate_feature}, runtime_config::RuntimeConfig, @@ -109,7 +109,7 @@ mod tests { // Create an account on a non-root fork let key1 = Keypair::new(); - bank1.deposit(&key1.pubkey(), 5).unwrap(); + bank_test_utils::deposit(&bank1, &key1.pubkey(), 5).unwrap(); // If setting an initial EAH, then the bank being snapshotted must be in the EAH calculation // window. Otherwise `bank_to_stream()` below will *not* include the EAH in the bank snapshot, @@ -123,11 +123,11 @@ mod tests { // Test new account let key2 = Keypair::new(); - bank2.deposit(&key2.pubkey(), 10).unwrap(); + bank_test_utils::deposit(&bank2, &key2.pubkey(), 10).unwrap(); assert_eq!(bank2.get_balance(&key2.pubkey()), 10); let key3 = Keypair::new(); - bank2.deposit(&key3.pubkey(), 0).unwrap(); + bank_test_utils::deposit(&bank2, &key3.pubkey(), 0).unwrap(); bank2.freeze(); bank2.squash(); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index df39171d84b681..1f4ed9d8bcc3e7 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -976,232 +976,6 @@ fn test_rent_distribution() { ); } -#[test] -fn test_distribute_rent_to_validators_overflow() { - solana_logger::setup(); - - // These values are taken from the real cluster (testnet) - const RENT_TO_BE_DISTRIBUTED: u64 = 120_525; - const VALIDATOR_STAKE: u64 = 374_999_998_287_840; - - let validator_pubkey = solana_sdk::pubkey::new_rand(); - let mut genesis_config = - create_genesis_config_with_leader(10, &validator_pubkey, VALIDATOR_STAKE).genesis_config; - - let bank = Bank::new_for_tests(&genesis_config); - let old_validator_lamports = bank.get_balance(&validator_pubkey); - bank.distribute_rent_to_validators(&bank.vote_accounts(), RENT_TO_BE_DISTRIBUTED); - let new_validator_lamports = bank.get_balance(&validator_pubkey); - assert_eq!( - new_validator_lamports, - old_validator_lamports + RENT_TO_BE_DISTRIBUTED - ); - - genesis_config - .accounts - .remove(&feature_set::no_overflow_rent_distribution::id()) - .unwrap(); - let bank = std::panic::AssertUnwindSafe(Bank::new_for_tests(&genesis_config)); - let old_validator_lamports = bank.get_balance(&validator_pubkey); - let new_validator_lamports = std::panic::catch_unwind(|| { - bank.distribute_rent_to_validators(&bank.vote_accounts(), RENT_TO_BE_DISTRIBUTED); - bank.get_balance(&validator_pubkey) - }); - - if let Ok(new_validator_lamports) = new_validator_lamports { - info!("asserting overflowing incorrect rent distribution"); - assert_ne!( - new_validator_lamports, - old_validator_lamports + RENT_TO_BE_DISTRIBUTED - ); - } else { - info!("NOT-asserting overflowing incorrect rent distribution"); - } -} - -#[test] -fn test_distribute_rent_to_validators_rent_paying() { - solana_logger::setup(); - - const RENT_PER_VALIDATOR: u64 = 55; - const TOTAL_RENT: u64 = RENT_PER_VALIDATOR * 4; - - let empty_validator = ValidatorVoteKeypairs::new_rand(); - let rent_paying_validator = ValidatorVoteKeypairs::new_rand(); - let becomes_rent_exempt_validator = ValidatorVoteKeypairs::new_rand(); - let rent_exempt_validator = ValidatorVoteKeypairs::new_rand(); - let keypairs = vec![ - &empty_validator, - &rent_paying_validator, - &becomes_rent_exempt_validator, - &rent_exempt_validator, - ]; - let genesis_config_info = create_genesis_config_with_vote_accounts( - sol_to_lamports(1000.), - &keypairs, - vec![sol_to_lamports(1000.); 4], - ); - let mut genesis_config = genesis_config_info.genesis_config; - genesis_config.rent = Rent::default(); // Ensure rent is non-zero, as genesis_utils sets Rent::free by default - - for deactivate_feature in [false, true] { - if deactivate_feature { - genesis_config - .accounts - .remove(&feature_set::prevent_rent_paying_rent_recipients::id()) - .unwrap(); - } - let bank = Bank::new_for_tests(&genesis_config); - let rent = bank.rent_collector().rent; - let rent_exempt_minimum = rent.minimum_balance(0); - - // Make one validator have an empty identity account - let mut empty_validator_account = bank - .get_account_with_fixed_root(&empty_validator.node_keypair.pubkey()) - .unwrap(); - empty_validator_account.set_lamports(0); - bank.store_account( - &empty_validator.node_keypair.pubkey(), - &empty_validator_account, - ); - - // Make one validator almost rent-exempt, less RENT_PER_VALIDATOR - let mut becomes_rent_exempt_validator_account = bank - .get_account_with_fixed_root(&becomes_rent_exempt_validator.node_keypair.pubkey()) - .unwrap(); - becomes_rent_exempt_validator_account - .set_lamports(rent_exempt_minimum - RENT_PER_VALIDATOR); - bank.store_account( - &becomes_rent_exempt_validator.node_keypair.pubkey(), - &becomes_rent_exempt_validator_account, - ); - - // Make one validator rent-exempt - let mut rent_exempt_validator_account = bank - .get_account_with_fixed_root(&rent_exempt_validator.node_keypair.pubkey()) - .unwrap(); - rent_exempt_validator_account.set_lamports(rent_exempt_minimum); - bank.store_account( - &rent_exempt_validator.node_keypair.pubkey(), - &rent_exempt_validator_account, - ); - - let get_rent_state = |bank: &Bank, address: &Pubkey| -> RentState { - let account = bank - .get_account_with_fixed_root(address) - .unwrap_or_default(); - RentState::from_account(&account, &rent) - }; - - // Assert starting RentStates - assert_eq!( - get_rent_state(&bank, &empty_validator.node_keypair.pubkey()), - RentState::Uninitialized - ); - assert_eq!( - get_rent_state(&bank, &rent_paying_validator.node_keypair.pubkey()), - RentState::RentPaying { - lamports: 42, - data_size: 0, - } - ); - assert_eq!( - get_rent_state(&bank, &becomes_rent_exempt_validator.node_keypair.pubkey()), - RentState::RentPaying { - lamports: rent_exempt_minimum - RENT_PER_VALIDATOR, - data_size: 0, - } - ); - assert_eq!( - get_rent_state(&bank, &rent_exempt_validator.node_keypair.pubkey()), - RentState::RentExempt - ); - - let old_empty_validator_lamports = bank.get_balance(&empty_validator.node_keypair.pubkey()); - let old_rent_paying_validator_lamports = - bank.get_balance(&rent_paying_validator.node_keypair.pubkey()); - let old_becomes_rent_exempt_validator_lamports = - bank.get_balance(&becomes_rent_exempt_validator.node_keypair.pubkey()); - let old_rent_exempt_validator_lamports = - bank.get_balance(&rent_exempt_validator.node_keypair.pubkey()); - - bank.distribute_rent_to_validators(&bank.vote_accounts(), TOTAL_RENT); - - let new_empty_validator_lamports = bank.get_balance(&empty_validator.node_keypair.pubkey()); - let new_rent_paying_validator_lamports = - bank.get_balance(&rent_paying_validator.node_keypair.pubkey()); - let new_becomes_rent_exempt_validator_lamports = - bank.get_balance(&becomes_rent_exempt_validator.node_keypair.pubkey()); - let new_rent_exempt_validator_lamports = - bank.get_balance(&rent_exempt_validator.node_keypair.pubkey()); - - // Assert ending balances; rent should be withheld if test is active and ending RentState - // is RentPaying, ie. empty_validator and rent_paying_validator - assert_eq!( - if deactivate_feature { - old_empty_validator_lamports + RENT_PER_VALIDATOR - } else { - old_empty_validator_lamports - }, - new_empty_validator_lamports - ); - - assert_eq!( - if deactivate_feature { - old_rent_paying_validator_lamports + RENT_PER_VALIDATOR - } else { - old_rent_paying_validator_lamports - }, - new_rent_paying_validator_lamports - ); - - assert_eq!( - old_becomes_rent_exempt_validator_lamports + RENT_PER_VALIDATOR, - new_becomes_rent_exempt_validator_lamports - ); - - assert_eq!( - old_rent_exempt_validator_lamports + RENT_PER_VALIDATOR, - new_rent_exempt_validator_lamports - ); - - // Assert ending RentStates - assert_eq!( - if deactivate_feature { - RentState::RentPaying { - lamports: RENT_PER_VALIDATOR, - data_size: 0, - } - } else { - RentState::Uninitialized - }, - get_rent_state(&bank, &empty_validator.node_keypair.pubkey()), - ); - assert_eq!( - if deactivate_feature { - RentState::RentPaying { - lamports: old_rent_paying_validator_lamports + RENT_PER_VALIDATOR, - data_size: 0, - } - } else { - RentState::RentPaying { - lamports: old_rent_paying_validator_lamports, - data_size: 0, - } - }, - get_rent_state(&bank, &rent_paying_validator.node_keypair.pubkey()), - ); - assert_eq!( - RentState::RentExempt, - get_rent_state(&bank, &becomes_rent_exempt_validator.node_keypair.pubkey()), - ); - assert_eq!( - RentState::RentExempt, - get_rent_state(&bank, &rent_exempt_validator.node_keypair.pubkey()), - ); - } -} - #[test] fn test_rent_exempt_executable_account() { let (mut genesis_config, mint_keypair) = create_genesis_config(100_000); @@ -2630,22 +2404,6 @@ fn test_transfer_to_sysvar() { assert_eq!(bank.get_balance(&sysvar_pubkey), 1_169_280); } -#[test] -fn test_bank_deposit() { - let bank = create_simple_test_bank(100); - - // Test new account - let key = solana_sdk::pubkey::new_rand(); - let new_balance = bank.deposit(&key, 10).unwrap(); - assert_eq!(new_balance, 10); - assert_eq!(bank.get_balance(&key), 10); - - // Existing account - let new_balance = bank.deposit(&key, 3).unwrap(); - assert_eq!(new_balance, 13); - assert_eq!(bank.get_balance(&key), 13); -} - #[test] fn test_bank_withdraw() { let bank = create_simple_test_bank(100); @@ -2657,7 +2415,7 @@ fn test_bank_withdraw() { Err(TransactionError::AccountNotFound) ); - bank.deposit(&key, 3).unwrap(); + test_utils::deposit(&bank, &key, 3).unwrap(); assert_eq!(bank.get_balance(&key), 3); // Low balance @@ -6676,7 +6434,7 @@ fn test_clean_nonrooted() { // Store some lamports in bank 1 let some_lamports = 123; let bank1 = Arc::new(Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 1)); - bank1.deposit(&pubkey0, some_lamports).unwrap(); + test_utils::deposit(&bank1, &pubkey0, some_lamports).unwrap(); goto_end_of_slot(bank1.clone()); bank1.freeze(); bank1.flush_accounts_cache_slot_for_tests(); @@ -6686,7 +6444,7 @@ fn test_clean_nonrooted() { // Store some lamports for pubkey1 in bank 2, root bank 2 // bank2's parent is bank0 let bank2 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 2)); - bank2.deposit(&pubkey1, some_lamports).unwrap(); + test_utils::deposit(&bank2, &pubkey1, some_lamports).unwrap(); bank2.store_account(&pubkey0, &account_zero); goto_end_of_slot(bank2.clone()); bank2.freeze(); @@ -6701,7 +6459,7 @@ fn test_clean_nonrooted() { bank2.clean_accounts_for_tests(); let bank3 = Arc::new(Bank::new_from_parent(bank2, &Pubkey::default(), 3)); - bank3.deposit(&pubkey1, some_lamports + 1).unwrap(); + test_utils::deposit(&bank3, &pubkey1, some_lamports + 1).unwrap(); goto_end_of_slot(bank3.clone()); bank3.freeze(); bank3.squash(); @@ -6755,8 +6513,8 @@ fn test_shrink_candidate_slots_cached() { // Store some lamports in bank 1 let some_lamports = 123; let bank1 = Arc::new(new_from_parent(bank0)); - bank1.deposit(&pubkey1, some_lamports).unwrap(); - bank1.deposit(&pubkey2, some_lamports).unwrap(); + test_utils::deposit(&bank1, &pubkey1, some_lamports).unwrap(); + test_utils::deposit(&bank1, &pubkey2, some_lamports).unwrap(); goto_end_of_slot(bank1.clone()); bank1.freeze(); bank1.squash(); @@ -6766,7 +6524,7 @@ fn test_shrink_candidate_slots_cached() { // Store some lamports for pubkey1 in bank 2, root bank 2 let bank2 = Arc::new(new_from_parent(bank1)); - bank2.deposit(&pubkey1, some_lamports).unwrap(); + test_utils::deposit(&bank2, &pubkey1, some_lamports).unwrap(); bank2.store_account(&pubkey0, &account0); goto_end_of_slot(bank2.clone()); bank2.freeze(); @@ -6963,7 +6721,7 @@ fn test_add_builtin_account_inherited_cap_while_replacing() { assert_ne!(bank.capitalization(), bank.calculate_capitalization(true)); continue; } - bank.deposit(&program_id, 10).unwrap(); + test_utils::deposit(&bank, &program_id, 10).unwrap(); if pass == 2 { add_root_and_flush_write_cache(&bank); assert_eq!(bank.capitalization(), bank.calculate_capitalization(true)); @@ -6990,7 +6748,7 @@ fn test_add_builtin_account_squatted_while_not_replacing() { assert_ne!(bank.capitalization(), bank.calculate_capitalization(true)); continue; } - bank.deposit(&program_id, 10).unwrap(); + test_utils::deposit(&bank, &program_id, 10).unwrap(); if pass == 1 { add_root_and_flush_write_cache(&bank); assert_eq!(bank.capitalization(), bank.calculate_capitalization(true)); @@ -7113,7 +6871,7 @@ fn test_add_precompiled_account_inherited_cap_while_replacing() { assert_ne!(bank.capitalization(), bank.calculate_capitalization(true)); continue; } - bank.deposit(&program_id, 10).unwrap(); + test_utils::deposit(&bank, &program_id, 10).unwrap(); if pass == 2 { add_root_and_flush_write_cache(&bank); assert_eq!(bank.capitalization(), bank.calculate_capitalization(true)); @@ -7141,7 +6899,7 @@ fn test_add_precompiled_account_squatted_while_not_replacing() { assert_ne!(bank.capitalization(), bank.calculate_capitalization(true)); continue; } - bank.deposit(&program_id, 10).unwrap(); + test_utils::deposit(&bank, &program_id, 10).unwrap(); if pass == 1 { add_root_and_flush_write_cache(&bank); assert_eq!(bank.capitalization(), bank.calculate_capitalization(true)); @@ -7977,7 +7735,7 @@ fn test_compute_active_feature_set() { assert!(!feature_set.is_active(&test_feature)); // Depositing into the `test_feature` account should do nothing - bank.deposit(&test_feature, 42).unwrap(); + test_utils::deposit(&bank, &test_feature, 42).unwrap(); let (feature_set, new_activations) = bank.compute_active_feature_set(true); assert!(new_activations.is_empty()); assert!(!feature_set.is_active(&test_feature)); diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 376880e6327d6a..471d8679141b9f 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -720,6 +720,10 @@ pub mod update_hashes_per_tick6 { solana_sdk::declare_id!("FKu1qYwLQSiehz644H6Si65U5ZQ2cp9GxsyFUfYcuADv"); } +pub mod validate_fee_collector_account { + solana_sdk::declare_id!("prpFrMtgNmzaNzkPJg9o753fVvbHKqNrNTm76foJ2wm"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -895,6 +899,7 @@ lazy_static! { (update_hashes_per_tick4::id(), "Update desired hashes per tick to 7.6M"), (update_hashes_per_tick5::id(), "Update desired hashes per tick to 9.2M"), (update_hashes_per_tick6::id(), "Update desired hashes per tick to 10M"), + (validate_fee_collector_account::id(), "validate fee collector account #33888"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() From e840b9759af877091350717c5f08bf3bff80a7f0 Mon Sep 17 00:00:00 2001 From: Liam Vovk <63673978+vovkman@users.noreply.github.com> Date: Mon, 6 Nov 2023 00:55:36 -0800 Subject: [PATCH 43/98] Remove RWLock from EntryNotifier because it causes perf degradation (#33797) * Remove RWLock from EntryNotifier because it causes perf degradation when entry notifications are enabled on geyser * remove unused RWLock * Remove RWLock --- core/src/validator.rs | 4 ++-- geyser-plugin-manager/src/geyser_plugin_service.rs | 10 +++++----- ledger/src/entry_notifier_interface.rs | 8 ++------ ledger/src/entry_notifier_service.rs | 11 ++++------- 4 files changed, 13 insertions(+), 20 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index 9dede099d1778c..2becf9590330a3 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -58,7 +58,7 @@ use { }, blockstore_options::{BlockstoreOptions, BlockstoreRecoveryMode, LedgerColumnOptions}, blockstore_processor::{self, TransactionStatusSender}, - entry_notifier_interface::EntryNotifierLock, + entry_notifier_interface::EntryNotifierArc, entry_notifier_service::{EntryNotifierSender, EntryNotifierService}, leader_schedule::FixedSchedule, leader_schedule_cache::LeaderScheduleCache, @@ -1690,7 +1690,7 @@ fn load_blockstore( start_progress: &Arc>, accounts_update_notifier: Option, transaction_notifier: Option, - entry_notifier: Option, + entry_notifier: Option, poh_timing_point_sender: Option, ) -> Result< ( diff --git a/geyser-plugin-manager/src/geyser_plugin_service.rs b/geyser-plugin-manager/src/geyser_plugin_service.rs index b8f9db49102dc7..b762c210e46dd7 100644 --- a/geyser-plugin-manager/src/geyser_plugin_service.rs +++ b/geyser-plugin-manager/src/geyser_plugin_service.rs @@ -12,7 +12,7 @@ use { crossbeam_channel::Receiver, log::*, solana_accounts_db::accounts_update_notifier_interface::AccountsUpdateNotifier, - solana_ledger::entry_notifier_interface::EntryNotifierLock, + solana_ledger::entry_notifier_interface::EntryNotifierArc, solana_rpc::{ optimistically_confirmed_bank_tracker::SlotNotification, transaction_notifier_interface::TransactionNotifierLock, @@ -35,7 +35,7 @@ pub struct GeyserPluginService { plugin_manager: Arc>, accounts_update_notifier: Option, transaction_notifier: Option, - entry_notifier: Option, + entry_notifier: Option, block_metadata_notifier: Option, } @@ -100,9 +100,9 @@ impl GeyserPluginService { None }; - let entry_notifier: Option = if entry_notifications_enabled { + let entry_notifier: Option = if entry_notifications_enabled { let entry_notifier = EntryNotifierImpl::new(plugin_manager.clone()); - Some(Arc::new(RwLock::new(entry_notifier))) + Some(Arc::new(entry_notifier)) } else { None }; @@ -164,7 +164,7 @@ impl GeyserPluginService { self.transaction_notifier.clone() } - pub fn get_entry_notifier(&self) -> Option { + pub fn get_entry_notifier(&self) -> Option { self.entry_notifier.clone() } diff --git a/ledger/src/entry_notifier_interface.rs b/ledger/src/entry_notifier_interface.rs index de523fc979ab01..174be9e1b7f1f4 100644 --- a/ledger/src/entry_notifier_interface.rs +++ b/ledger/src/entry_notifier_interface.rs @@ -1,11 +1,7 @@ -use { - solana_entry::entry::EntrySummary, - solana_sdk::clock::Slot, - std::sync::{Arc, RwLock}, -}; +use {solana_entry::entry::EntrySummary, solana_sdk::clock::Slot, std::sync::Arc}; pub trait EntryNotifier { fn notify_entry(&self, slot: Slot, index: usize, entry: &EntrySummary); } -pub type EntryNotifierLock = Arc>; +pub type EntryNotifierArc = Arc; diff --git a/ledger/src/entry_notifier_service.rs b/ledger/src/entry_notifier_service.rs index 5e108c94e80578..ec7eae0bc75723 100644 --- a/ledger/src/entry_notifier_service.rs +++ b/ledger/src/entry_notifier_service.rs @@ -1,5 +1,5 @@ use { - crate::entry_notifier_interface::EntryNotifierLock, + crate::entry_notifier_interface::EntryNotifierArc, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, solana_entry::entry::EntrySummary, solana_sdk::clock::Slot, @@ -28,7 +28,7 @@ pub struct EntryNotifierService { } impl EntryNotifierService { - pub fn new(entry_notifier: EntryNotifierLock, exit: Arc) -> Self { + pub fn new(entry_notifier: EntryNotifierArc, exit: Arc) -> Self { let (entry_notification_sender, entry_notification_receiver) = unbounded(); let thread_hdl = Builder::new() .name("solEntryNotif".to_string()) @@ -52,14 +52,11 @@ impl EntryNotifierService { fn notify_entry( entry_notification_receiver: &EntryNotifierReceiver, - entry_notifier: EntryNotifierLock, + entry_notifier: EntryNotifierArc, ) -> Result<(), RecvTimeoutError> { let EntryNotification { slot, index, entry } = entry_notification_receiver.recv_timeout(Duration::from_secs(1))?; - entry_notifier - .write() - .unwrap() - .notify_entry(slot, index, &entry); + entry_notifier.notify_entry(slot, index, &entry); Ok(()) } From 63fd5cf049a4b281178b76e58371190635d17cff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sylwester=20R=C4=85pa=C5=82a?= Date: Mon, 6 Nov 2023 13:24:13 +0100 Subject: [PATCH 44/98] chore: impl decode for UiAccountData (#33632) --- account-decoder/src/lib.rs | 43 ++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/account-decoder/src/lib.rs b/account-decoder/src/lib.rs index 9905e15f5323cd..65b60ad5cfc0db 100644 --- a/account-decoder/src/lib.rs +++ b/account-decoder/src/lib.rs @@ -56,6 +56,30 @@ pub enum UiAccountData { Binary(String, UiAccountEncoding), } +impl UiAccountData { + /// Returns decoded account data in binary format if possible + pub fn decode(&self) -> Option> { + match self { + UiAccountData::Json(_) => None, + UiAccountData::LegacyBinary(blob) => bs58::decode(blob).into_vec().ok(), + UiAccountData::Binary(blob, encoding) => match encoding { + UiAccountEncoding::Base58 => bs58::decode(blob).into_vec().ok(), + UiAccountEncoding::Base64 => BASE64_STANDARD.decode(blob).ok(), + UiAccountEncoding::Base64Zstd => { + BASE64_STANDARD.decode(blob).ok().and_then(|zstd_data| { + let mut data = vec![]; + zstd::stream::read::Decoder::new(zstd_data.as_slice()) + .and_then(|mut reader| reader.read_to_end(&mut data)) + .map(|_| data) + .ok() + }) + } + UiAccountEncoding::Binary | UiAccountEncoding::JsonParsed => None, + }, + } + } +} + #[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Eq, Hash)] #[serde(rename_all = "camelCase")] pub enum UiAccountEncoding { @@ -139,24 +163,7 @@ impl UiAccount { } pub fn decode(&self) -> Option { - let data = match &self.data { - UiAccountData::Json(_) => None, - UiAccountData::LegacyBinary(blob) => bs58::decode(blob).into_vec().ok(), - UiAccountData::Binary(blob, encoding) => match encoding { - UiAccountEncoding::Base58 => bs58::decode(blob).into_vec().ok(), - UiAccountEncoding::Base64 => BASE64_STANDARD.decode(blob).ok(), - UiAccountEncoding::Base64Zstd => { - BASE64_STANDARD.decode(blob).ok().and_then(|zstd_data| { - let mut data = vec![]; - zstd::stream::read::Decoder::new(zstd_data.as_slice()) - .and_then(|mut reader| reader.read_to_end(&mut data)) - .map(|_| data) - .ok() - }) - } - UiAccountEncoding::Binary | UiAccountEncoding::JsonParsed => None, - }, - }?; + let data = self.data.decode()?; Some(T::create( self.lamports, data, From 0c3cab77fb7751c4f7e63a2449be4dbdc2dabe3b Mon Sep 17 00:00:00 2001 From: Max Kaplan Date: Mon, 6 Nov 2023 07:27:41 -0500 Subject: [PATCH 45/98] docs: updating apt install to apt upgrade (#33920) --- docs/src/validator/get-started/setup-a-validator.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/validator/get-started/setup-a-validator.md b/docs/src/validator/get-started/setup-a-validator.md index 6598400bda5a37..8379b6f1d1c4d1 100644 --- a/docs/src/validator/get-started/setup-a-validator.md +++ b/docs/src/validator/get-started/setup-a-validator.md @@ -131,7 +131,7 @@ Make sure you have the latest and greatest package versions on your server ``` sudo apt update -sudo apt install +sudo apt upgrade ``` ## Sol User From 75e598ece3ccf18c4fe2748e0d57db40aa3c69ac Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Mon, 6 Nov 2023 11:57:54 -0600 Subject: [PATCH 46/98] rekey stake redelegate feature (#33957) Co-authored-by: HaoranYi --- sdk/src/feature_set.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 471d8679141b9f..5357811ee2a738 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -283,7 +283,7 @@ pub mod stake_deactivate_delinquent_instruction { } pub mod stake_redelegate_instruction { - solana_sdk::declare_id!("GUrp5BKMyDazsAp9mBoVD6orE5ihXNRPC3jkBRfx6Lq7"); + solana_sdk::declare_id!("2KKG3C6RBnxQo9jVVrbzsoSh41TDXLK7gBc9gduyxSzW"); } pub mod vote_withdraw_authority_may_change_authorized_voter { From d6ac9bea84ec16e9d8ceee4fa4f2f29def8a8ffb Mon Sep 17 00:00:00 2001 From: Tyera Date: Mon, 6 Nov 2023 11:14:18 -0700 Subject: [PATCH 47/98] Geyser: return real parent blockhash, or default (#33873) Return real parent blockhash, or default --- core/src/replay_stage.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 8e355daeae44d2..082e0245f88319 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -2952,9 +2952,13 @@ impl ReplayStage { Self::record_rewards(bank, rewards_recorder_sender); if let Some(ref block_metadata_notifier) = block_metadata_notifier { let block_metadata_notifier = block_metadata_notifier.read().unwrap(); + let parent_blockhash = bank + .parent() + .map(|bank| bank.last_blockhash()) + .unwrap_or_default(); block_metadata_notifier.notify_block_metadata( bank.parent_slot(), - &bank.parent_hash().to_string(), + &parent_blockhash.to_string(), bank.slot(), &bank.last_blockhash().to_string(), &bank.rewards, From 6624a09d381dde44bc6abf06e2145fc25586631e Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Mon, 6 Nov 2023 10:32:19 -0800 Subject: [PATCH 48/98] [TieredStorage] Rename account-index to index-block (#33928) #### Problem The current tiered-storage code uses "account-index" to call index-block. This could lead to confusion especially as we start giving each offset/position/index a specific type. #### Summary of Changes This PR renames all structs/variables that use account-index to refer to index-block. --- accounts-db/src/tiered_storage.rs | 8 ++++---- accounts-db/src/tiered_storage/footer.rs | 18 +++++++++--------- accounts-db/src/tiered_storage/hot.rs | 10 +++++----- accounts-db/src/tiered_storage/index.rs | 12 ++++++------ accounts-db/src/tiered_storage/writer.rs | 2 +- 5 files changed, 25 insertions(+), 25 deletions(-) diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index 6a9f0193fd0fee..829b0cb033b4f5 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -19,7 +19,7 @@ use { }, error::TieredStorageError, footer::{AccountBlockFormat, AccountMetaFormat, OwnersBlockFormat}, - index::AccountIndexFormat, + index::IndexBlockFormat, readable::TieredStorageReader, solana_sdk::account::ReadableAccount, std::{ @@ -40,7 +40,7 @@ pub struct TieredStorageFormat { pub meta_entry_size: usize, pub account_meta_format: AccountMetaFormat, pub owners_block_format: OwnersBlockFormat, - pub account_index_format: AccountIndexFormat, + pub index_block_format: IndexBlockFormat, pub account_block_format: AccountBlockFormat, } @@ -236,7 +236,7 @@ mod tests { assert_eq!(tiered_storage_readonly.reader().unwrap().num_accounts(), 0); assert_eq!(footer.account_meta_format, HOT_FORMAT.account_meta_format); assert_eq!(footer.owners_block_format, HOT_FORMAT.owners_block_format); - assert_eq!(footer.account_index_format, HOT_FORMAT.account_index_format); + assert_eq!(footer.index_block_format, HOT_FORMAT.index_block_format); assert_eq!(footer.account_block_format, HOT_FORMAT.account_block_format); assert_eq!( tiered_storage_readonly.file_size().unwrap() as usize, @@ -379,7 +379,7 @@ mod tests { let expected_footer = TieredStorageFooter { account_meta_format: expected_format.account_meta_format, owners_block_format: expected_format.owners_block_format, - account_index_format: expected_format.account_index_format, + index_block_format: expected_format.index_block_format, account_block_format: expected_format.account_block_format, account_entry_count: expected_accounts.len() as u32, // Hash is not yet implemented, so we bypass the check diff --git a/accounts-db/src/tiered_storage/footer.rs b/accounts-db/src/tiered_storage/footer.rs index c88d665c4362e9..7763d8d5622a0a 100644 --- a/accounts-db/src/tiered_storage/footer.rs +++ b/accounts-db/src/tiered_storage/footer.rs @@ -1,6 +1,6 @@ use { crate::tiered_storage::{ - error::TieredStorageError, file::TieredStorageFile, index::AccountIndexFormat, + error::TieredStorageError, file::TieredStorageFile, index::IndexBlockFormat, mmap_utils::get_type, TieredStorageResult as TsResult, }, memmap2::Mmap, @@ -95,7 +95,7 @@ pub struct TieredStorageFooter { /// The format of the owners block. pub owners_block_format: OwnersBlockFormat, /// The format of the account index block. - pub account_index_format: AccountIndexFormat, + pub index_block_format: IndexBlockFormat, /// The format of the account block. pub account_block_format: AccountBlockFormat, @@ -120,7 +120,7 @@ pub struct TieredStorageFooter { // Offsets // Note that offset to the account blocks is omitted as it's always 0. /// The offset pointing to the first byte of the account index block. - pub account_index_offset: u64, + pub index_block_offset: u64, /// The offset pointing to the first byte of the owners block. pub owners_offset: u64, @@ -149,14 +149,14 @@ impl Default for TieredStorageFooter { Self { account_meta_format: AccountMetaFormat::default(), owners_block_format: OwnersBlockFormat::default(), - account_index_format: AccountIndexFormat::default(), + index_block_format: IndexBlockFormat::default(), account_block_format: AccountBlockFormat::default(), account_entry_count: 0, account_meta_entry_size: 0, account_block_size: 0, owner_count: 0, owner_entry_size: 0, - account_index_offset: 0, + index_block_offset: 0, owners_offset: 0, hash: Hash::new_unique(), min_account_address: Pubkey::default(), @@ -241,14 +241,14 @@ mod tests { let expected_footer = TieredStorageFooter { account_meta_format: AccountMetaFormat::Hot, owners_block_format: OwnersBlockFormat::LocalIndex, - account_index_format: AccountIndexFormat::AddressAndOffset, + index_block_format: IndexBlockFormat::AddressAndOffset, account_block_format: AccountBlockFormat::AlignedRaw, account_entry_count: 300, account_meta_entry_size: 24, account_block_size: 4096, owner_count: 250, owner_entry_size: 32, - account_index_offset: 1069600, + index_block_offset: 1069600, owners_offset: 1081200, hash: Hash::new_unique(), min_account_address: Pubkey::default(), @@ -275,7 +275,7 @@ mod tests { fn test_footer_layout() { assert_eq!(offset_of!(TieredStorageFooter, account_meta_format), 0x00); assert_eq!(offset_of!(TieredStorageFooter, owners_block_format), 0x02); - assert_eq!(offset_of!(TieredStorageFooter, account_index_format), 0x04); + assert_eq!(offset_of!(TieredStorageFooter, index_block_format), 0x04); assert_eq!(offset_of!(TieredStorageFooter, account_block_format), 0x06); assert_eq!(offset_of!(TieredStorageFooter, account_entry_count), 0x08); assert_eq!( @@ -285,7 +285,7 @@ mod tests { assert_eq!(offset_of!(TieredStorageFooter, account_block_size), 0x10); assert_eq!(offset_of!(TieredStorageFooter, owner_count), 0x18); assert_eq!(offset_of!(TieredStorageFooter, owner_entry_size), 0x1C); - assert_eq!(offset_of!(TieredStorageFooter, account_index_offset), 0x20); + assert_eq!(offset_of!(TieredStorageFooter, index_block_offset), 0x20); assert_eq!(offset_of!(TieredStorageFooter, owners_offset), 0x28); assert_eq!(offset_of!(TieredStorageFooter, min_account_address), 0x30); assert_eq!(offset_of!(TieredStorageFooter, max_account_address), 0x50); diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 78271700686dd2..9e987f886de101 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -9,7 +9,7 @@ use { footer::{ AccountBlockFormat, AccountMetaFormat, OwnersBlockFormat, TieredStorageFooter, }, - index::AccountIndexFormat, + index::IndexBlockFormat, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, mmap_utils::get_type, TieredStorageFormat, TieredStorageResult, @@ -25,7 +25,7 @@ pub const HOT_FORMAT: TieredStorageFormat = TieredStorageFormat { meta_entry_size: std::mem::size_of::(), account_meta_format: AccountMetaFormat::Hot, owners_block_format: OwnersBlockFormat::LocalIndex, - account_index_format: AccountIndexFormat::AddressAndOffset, + index_block_format: IndexBlockFormat::AddressAndOffset, account_block_format: AccountBlockFormat::AlignedRaw, }; @@ -241,7 +241,7 @@ pub mod tests { FOOTER_SIZE, }, hot::{HotAccountMeta, HotStorageReader}, - index::AccountIndexFormat, + index::IndexBlockFormat, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, }, memoffset::offset_of, @@ -383,14 +383,14 @@ pub mod tests { let expected_footer = TieredStorageFooter { account_meta_format: AccountMetaFormat::Hot, owners_block_format: OwnersBlockFormat::LocalIndex, - account_index_format: AccountIndexFormat::AddressAndOffset, + index_block_format: IndexBlockFormat::AddressAndOffset, account_block_format: AccountBlockFormat::AlignedRaw, account_entry_count: 300, account_meta_entry_size: 16, account_block_size: 4096, owner_count: 250, owner_entry_size: 32, - account_index_offset: 1069600, + index_block_offset: 1069600, owners_offset: 1081200, hash: Hash::new_unique(), min_account_address: Pubkey::default(), diff --git a/accounts-db/src/tiered_storage/index.rs b/accounts-db/src/tiered_storage/index.rs index 656343fb78fc8b..ad66f3c7455ff6 100644 --- a/accounts-db/src/tiered_storage/index.rs +++ b/accounts-db/src/tiered_storage/index.rs @@ -30,7 +30,7 @@ pub struct AccountIndexWriterEntry<'a> { num_enum::IntoPrimitive, num_enum::TryFromPrimitive, )] -pub enum AccountIndexFormat { +pub enum IndexBlockFormat { /// This format optimizes the storage size by storing only account addresses /// and offsets. It skips storing the size of account data by storing account /// block entries and index block entries in the same order. @@ -38,7 +38,7 @@ pub enum AccountIndexFormat { AddressAndOffset = 0, } -impl AccountIndexFormat { +impl IndexBlockFormat { /// Persists the specified index_entries to the specified file and returns /// the total number of bytes written. pub fn write_index_block( @@ -69,7 +69,7 @@ impl AccountIndexFormat { ) -> TieredStorageResult<&'a Pubkey> { let offset = match self { Self::AddressAndOffset => { - footer.account_index_offset as usize + std::mem::size_of::() * index + footer.index_block_offset as usize + std::mem::size_of::() * index } }; let (address, _) = get_type::(map, offset)?; @@ -86,7 +86,7 @@ impl AccountIndexFormat { ) -> TieredStorageResult { match self { Self::AddressAndOffset => { - let offset = footer.account_index_offset as usize + let offset = footer.index_block_offset as usize + std::mem::size_of::() * footer.account_entry_count as usize + index * std::mem::size_of::(); let (account_block_offset, _) = get_type(map, offset)?; @@ -134,11 +134,11 @@ mod tests { { let file = TieredStorageFile::new_writable(&path).unwrap(); - let indexer = AccountIndexFormat::AddressAndOffset; + let indexer = IndexBlockFormat::AddressAndOffset; indexer.write_index_block(&file, &index_entries).unwrap(); } - let indexer = AccountIndexFormat::AddressAndOffset; + let indexer = IndexBlockFormat::AddressAndOffset; let file = OpenOptions::new() .read(true) .create(false) diff --git a/accounts-db/src/tiered_storage/writer.rs b/accounts-db/src/tiered_storage/writer.rs index dece0e42732f49..113d331e4a15c4 100644 --- a/accounts-db/src/tiered_storage/writer.rs +++ b/accounts-db/src/tiered_storage/writer.rs @@ -46,7 +46,7 @@ impl<'format> TieredStorageWriter<'format> { account_meta_format: self.format.account_meta_format, owners_block_format: self.format.owners_block_format, account_block_format: self.format.account_block_format, - account_index_format: self.format.account_index_format, + index_block_format: self.format.index_block_format, account_entry_count: accounts .accounts .len() From ee29647f671bfa5e322ff9613f09f01780d6120e Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 6 Nov 2023 12:56:10 -0600 Subject: [PATCH 49/98] Remove Option<_> from Blockstore::get_rooted_block_time() return type (#33955) Instead of returning Result>, return Result and map None to an error. This makes the return type similar to that of Blockstore::get_rooted_block(). --- ledger/src/blockstore.rs | 8 ++++++-- rpc/src/rpc.rs | 4 ++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index e1893c1033998a..b7a592151e65ab 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -1967,14 +1967,18 @@ impl Blockstore { self.blocktime_cf.get(slot) } - pub fn get_rooted_block_time(&self, slot: Slot) -> Result> { + pub fn get_rooted_block_time(&self, slot: Slot) -> Result { datapoint_info!( "blockstore-rpc-api", ("method", "get_rooted_block_time", String) ); let _lock = self.check_lowest_cleanup_slot(slot)?; + if self.is_root(slot) { - return self.blocktime_cf.get(slot); + return self + .blocktime_cf + .get(slot)? + .ok_or(BlockstoreError::SlotUnavailable); } Err(BlockstoreError::SlotNotRooted) } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 38f76dc019259c..5e62dff9ce55d3 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -1323,7 +1323,7 @@ impl JsonRpcRequestProcessor { { let result = self.blockstore.get_rooted_block_time(slot); self.check_blockstore_root(&result, slot)?; - if result.is_err() || matches!(result, Ok(None)) { + if result.is_err() { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_result = bigtable_ledger_storage.get_confirmed_block(slot).await; self.check_bigtable_result(&bigtable_result)?; @@ -1333,7 +1333,7 @@ impl JsonRpcRequestProcessor { } } self.check_slot_cleaned_up(&result, slot)?; - Ok(result.ok().unwrap_or(None)) + Ok(result.ok()) } else { let r_bank_forks = self.bank_forks.read().unwrap(); if let Some(bank) = r_bank_forks.get(slot) { From 70d97d3261d4ccbb3f29cc114abc0ab8fd78f267 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 6 Nov 2023 14:06:43 -0500 Subject: [PATCH 50/98] Adds `iter_ones()` to RollingBitField (#33956) --- Cargo.lock | 1 + accounts-db/Cargo.toml | 1 + accounts-db/src/rolling_bit_field.rs | 14 +++- .../src/rolling_bit_field/iterators.rs | 76 +++++++++++++++++++ 4 files changed, 91 insertions(+), 1 deletion(-) create mode 100644 accounts-db/src/rolling_bit_field/iterators.rs diff --git a/Cargo.lock b/Cargo.lock index c76ae0f6f19b53..98cc5a1c00d869 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5335,6 +5335,7 @@ dependencies = [ "strum_macros", "tar", "tempfile", + "test-case", "thiserror", ] diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index b4fcceea000552..6ce4d2f087e72d 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -79,6 +79,7 @@ solana-accounts-db = { path = ".", features = ["dev-context-only-utils"] } solana-logger = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } static_assertions = { workspace = true } +test-case = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/accounts-db/src/rolling_bit_field.rs b/accounts-db/src/rolling_bit_field.rs index 65d3ff76b54ae7..73a71d7a084110 100644 --- a/accounts-db/src/rolling_bit_field.rs +++ b/accounts-db/src/rolling_bit_field.rs @@ -2,7 +2,11 @@ //! Relies on there being a sliding window of key values. The key values continue to increase. //! Old key values are removed from the lesser values and do not accumulate. -use {bv::BitVec, solana_nohash_hasher::IntSet, solana_sdk::clock::Slot}; +mod iterators; +use { + bv::BitVec, iterators::RollingBitFieldOnesIter, solana_nohash_hasher::IntSet, + solana_sdk::clock::Slot, +}; #[derive(Debug, Default, AbiExample, Clone)] pub struct RollingBitField { @@ -283,6 +287,14 @@ impl RollingBitField { } all } + + /// Returns an iterator over the rolling bit field + /// + /// The iterator yields all the 'set' bits. + /// Note, the iteration order of the bits in 'excess' is not deterministic. + pub fn iter_ones(&self) -> RollingBitFieldOnesIter<'_> { + RollingBitFieldOnesIter::new(self) + } } #[cfg(test)] diff --git a/accounts-db/src/rolling_bit_field/iterators.rs b/accounts-db/src/rolling_bit_field/iterators.rs new file mode 100644 index 00000000000000..dd075037ee119c --- /dev/null +++ b/accounts-db/src/rolling_bit_field/iterators.rs @@ -0,0 +1,76 @@ +//! Iterators for RollingBitField + +use {super::RollingBitField, std::ops::Range}; + +/// Iterate over the 'set' bits of a RollingBitField +#[derive(Debug)] +pub struct RollingBitFieldOnesIter<'a> { + rolling_bit_field: &'a RollingBitField, + excess_iter: std::collections::hash_set::Iter<'a, u64>, + bit_range: Range, +} + +impl<'a> RollingBitFieldOnesIter<'a> { + #[must_use] + pub fn new(rolling_bit_field: &'a RollingBitField) -> Self { + Self { + rolling_bit_field, + excess_iter: rolling_bit_field.excess.iter(), + bit_range: rolling_bit_field.min..rolling_bit_field.max_exclusive, + } + } +} + +impl Iterator for RollingBitFieldOnesIter<'_> { + type Item = u64; + + fn next(&mut self) -> Option { + // Iterate over the excess first + if let Some(excess) = self.excess_iter.next() { + return Some(*excess); + } + + // Then iterate over the bit vec + loop { + // If there are no more bits in the range, then we've iterated over everything and are done + let Some(bit) = self.bit_range.next() else { + return None; + }; + + if self.rolling_bit_field.contains_assume_in_range(&bit) { + break Some(bit); + } + } + } +} + +#[cfg(test)] +mod tests { + use {super::*, test_case::test_case}; + + #[test_case(128, vec![]; "empty")] + #[test_case(128, vec![128_007, 128_017, 128_107]; "without excess")] + #[test_case(128, vec![128_007, 128_017, 128_107, 3, 30, 300]; "with excess")] + // Even though these values are within the range, in an absolute sense, + // they will wrap around after multiples of 16. + #[test_case(16, vec![35, 40, 45 ])] + #[test_case(16, vec![ 40, 45, 50 ])] + #[test_case(16, vec![ 45, 50, 55 ])] + #[test_case(16, vec![ 50, 55, 60 ])] + #[test_case(16, vec![ 55, 60, 65 ])] + #[test_case(16, vec![ 60, 65, 70])] + fn test_rolling_bit_field_ones_iter(num_bits: u64, mut expected: Vec) { + let mut rolling_bit_field = RollingBitField::new(num_bits); + for val in &expected { + rolling_bit_field.insert(*val); + } + + let mut actual: Vec<_> = rolling_bit_field.iter_ones().collect(); + + // Since iteration order of the 'excess' is not deterministic, sort the 'actual' + // and 'expected' vectors to ensure they can compare deterministically. + actual.sort_unstable(); + expected.sort_unstable(); + assert_eq!(actual, expected); + } +} From da130b87d3c2483825cc8aee1d983ac8d2d0d455 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Mon, 6 Nov 2023 12:21:08 -0800 Subject: [PATCH 51/98] [TieredStorage] Add AccountOffset type (#33927) #### Problem TieredStorage conceptually has different offsets. However, the current code directly uses the same primitive type for accessing offsets, which is error-prone as one could easily use one offset to access data that should be accessed with a different offset type. #### Summary of Changes This PR introduces the AccountOffset type, which allows static-check to on different type of TieredStorage offsets. --- accounts-db/src/tiered_storage/index.rs | 26 ++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/accounts-db/src/tiered_storage/index.rs b/accounts-db/src/tiered_storage/index.rs index ad66f3c7455ff6..8c1f4bc79e555b 100644 --- a/accounts-db/src/tiered_storage/index.rs +++ b/accounts-db/src/tiered_storage/index.rs @@ -17,6 +17,15 @@ pub struct AccountIndexWriterEntry<'a> { pub intra_block_offset: u64, } +/// The offset to an account stored inside its accounts block. +/// This struct is used to access the meta and data of an account by looking through +/// its accounts block. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct AccountOffset { + /// The offset to the accounts block that contains the account meta/data. + pub block: usize, +} + /// The index format of a tiered accounts file. #[repr(u16)] #[derive( @@ -76,21 +85,22 @@ impl IndexBlockFormat { Ok(address) } - /// Returns the offset to the account block that contains the account - /// associated with the specified index to the index block. - pub fn get_account_block_offset( + /// Returns the offset to the account given the specified index. + pub fn get_account_offset( &self, map: &Mmap, footer: &TieredStorageFooter, index: usize, - ) -> TieredStorageResult { + ) -> TieredStorageResult { match self { Self::AddressAndOffset => { let offset = footer.index_block_offset as usize + std::mem::size_of::() * footer.account_entry_count as usize + index * std::mem::size_of::(); let (account_block_offset, _) = get_type(map, offset)?; - Ok(*account_block_offset) + Ok(AccountOffset { + block: *account_block_offset, + }) } } } @@ -146,10 +156,8 @@ mod tests { .unwrap(); let map = unsafe { MmapOptions::new().map(&file).unwrap() }; for (i, index_entry) in index_entries.iter().enumerate() { - assert_eq!( - index_entry.block_offset, - indexer.get_account_block_offset(&map, &footer, i).unwrap() - ); + let account_offset = indexer.get_account_offset(&map, &footer, i).unwrap(); + assert_eq!(index_entry.block_offset, account_offset.block as u64); let address = indexer.get_account_address(&map, &footer, i).unwrap(); assert_eq!(index_entry.address, address); } From 8c69a0ec389372f91d136a9dcc9dc7610b235463 Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Mon, 6 Nov 2023 14:03:25 -0800 Subject: [PATCH 52/98] Remove RwLock on AccountsUpdateNotifier (#33960) --- .../src/accounts_db/geyser_plugin_utils.rs | 25 ++++++------------- .../src/accounts_update_notifier_interface.rs | 4 +-- .../src/geyser_plugin_service.rs | 2 +- 3 files changed, 10 insertions(+), 21 deletions(-) diff --git a/accounts-db/src/accounts_db/geyser_plugin_utils.rs b/accounts-db/src/accounts_db/geyser_plugin_utils.rs index 0fbc11c07964cf..1efd678873f620 100644 --- a/accounts-db/src/accounts_db/geyser_plugin_utils.rs +++ b/accounts-db/src/accounts_db/geyser_plugin_utils.rs @@ -56,8 +56,7 @@ impl AccountsDb { } let accounts_update_notifier = self.accounts_update_notifier.as_ref().unwrap(); - let notifier = &accounts_update_notifier.read().unwrap(); - notifier.notify_end_of_restore_from_snapshot(); + accounts_update_notifier.notify_end_of_restore_from_snapshot(); notify_stats.report(); } @@ -72,8 +71,7 @@ impl AccountsDb { P: Iterator, { if let Some(accounts_update_notifier) = &self.accounts_update_notifier { - let notifier = &accounts_update_notifier.read().unwrap(); - notifier.notify_account_update( + accounts_update_notifier.notify_account_update( slot, account, txn, @@ -121,13 +119,7 @@ impl AccountsDb { mut accounts_to_stream: HashMap, notify_stats: &mut GeyserPluginNotifyAtSnapshotRestoreStats, ) { - let notifier = self - .accounts_update_notifier - .as_ref() - .unwrap() - .read() - .unwrap(); - + let notifier = self.accounts_update_notifier.as_ref().unwrap(); let mut measure_notify = Measure::start("accountsdb-plugin-notifying-accounts"); let local_write_version = 0; for (_, mut account) in accounts_to_stream.drain() { @@ -177,7 +169,7 @@ pub mod tests { }, std::sync::{ atomic::{AtomicBool, Ordering}, - Arc, RwLock, + Arc, }, }; @@ -246,12 +238,11 @@ pub mod tests { accounts.store_uncached(slot0, &[(&key2, &account2)]); - let notifier = Arc::new(RwLock::new(notifier)); + let notifier = Arc::new(notifier); accounts.set_geyser_plugin_notifer(Some(notifier.clone())); accounts.notify_account_restore_from_snapshot(); - let notifier = notifier.write().unwrap(); assert_eq!(notifier.accounts_notified.get(&key1).unwrap().len(), 1); assert_eq!( notifier.accounts_notified.get(&key1).unwrap()[0] @@ -303,12 +294,11 @@ pub mod tests { AccountSharedData::new(account3_lamports, 1, AccountSharedData::default().owner()); accounts.store_uncached(slot1, &[(&key3, &account3)]); - let notifier = Arc::new(RwLock::new(notifier)); + let notifier = Arc::new(notifier); accounts.set_geyser_plugin_notifer(Some(notifier.clone())); accounts.notify_account_restore_from_snapshot(); - let notifier = notifier.write().unwrap(); assert_eq!(notifier.accounts_notified.get(&key1).unwrap().len(), 1); assert_eq!( notifier.accounts_notified.get(&key1).unwrap()[0] @@ -342,7 +332,7 @@ pub mod tests { let notifier = GeyserTestPlugin::default(); - let notifier = Arc::new(RwLock::new(notifier)); + let notifier = Arc::new(notifier); accounts.set_geyser_plugin_notifer(Some(notifier.clone())); // Account with key1 is updated twice in two different slots -- should only get notified twice. @@ -372,7 +362,6 @@ pub mod tests { AccountSharedData::new(account3_lamports, 1, AccountSharedData::default().owner()); accounts.store_cached((slot1, &[(&key3, &account3)][..]), None); - let notifier = notifier.write().unwrap(); assert_eq!(notifier.accounts_notified.get(&key1).unwrap().len(), 2); assert_eq!( notifier.accounts_notified.get(&key1).unwrap()[0] diff --git a/accounts-db/src/accounts_update_notifier_interface.rs b/accounts-db/src/accounts_update_notifier_interface.rs index ae31cb06d32339..ec86fce8cd6898 100644 --- a/accounts-db/src/accounts_update_notifier_interface.rs +++ b/accounts-db/src/accounts_update_notifier_interface.rs @@ -3,7 +3,7 @@ use { solana_sdk::{ account::AccountSharedData, clock::Slot, pubkey::Pubkey, transaction::SanitizedTransaction, }, - std::sync::{Arc, RwLock}, + std::sync::Arc, }; pub trait AccountsUpdateNotifierInterface: std::fmt::Debug { @@ -25,4 +25,4 @@ pub trait AccountsUpdateNotifierInterface: std::fmt::Debug { fn notify_end_of_restore_from_snapshot(&self); } -pub type AccountsUpdateNotifier = Arc>; +pub type AccountsUpdateNotifier = Arc; diff --git a/geyser-plugin-manager/src/geyser_plugin_service.rs b/geyser-plugin-manager/src/geyser_plugin_service.rs index b762c210e46dd7..a95c1b7e1a18c5 100644 --- a/geyser-plugin-manager/src/geyser_plugin_service.rs +++ b/geyser-plugin-manager/src/geyser_plugin_service.rs @@ -87,7 +87,7 @@ impl GeyserPluginService { if account_data_notifications_enabled { let accounts_update_notifier = AccountsUpdateNotifierImpl::new(plugin_manager.clone()); - Some(Arc::new(RwLock::new(accounts_update_notifier))) + Some(Arc::new(accounts_update_notifier)) } else { None }; From ec0ddc94687cfda837a1f5df78ab9eab099e6b4b Mon Sep 17 00:00:00 2001 From: acheron <98934430+acheroncrypto@users.noreply.github.com> Date: Tue, 7 Nov 2023 01:06:49 +0100 Subject: [PATCH 53/98] Fix `solana-install init` making unnecessary API requests (#33949) * Fix `solana-install init` making unnecessary API requests * Add `is_init` check * chore: Move `semver_update_type` to where it's being used --- install/src/command.rs | 86 ++++++++++++++++++++++-------------------- 1 file changed, 45 insertions(+), 41 deletions(-) diff --git a/install/src/command.rs b/install/src/command.rs index ac53f5fe2b5fd5..ed8d37ff0f3b8e 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -968,58 +968,62 @@ pub fn update(config_file: &str, check_only: bool) -> Result { pub fn init_or_update(config_file: &str, is_init: bool, check_only: bool) -> Result { let mut config = Config::load(config_file)?; - let semver_update_type = if is_init { - SemverUpdateType::Fixed - } else { - SemverUpdateType::Patch - }; - let (updated_version, download_url_and_sha256, release_dir) = if let Some(explicit_release) = &config.explicit_release { match explicit_release { ExplicitRelease::Semver(current_release_semver) => { - let progress_bar = new_spinner_progress_bar(); - progress_bar.set_message(format!("{LOOKING_GLASS}Checking for updates...")); - - let github_release = check_for_newer_github_release( - current_release_semver, - semver_update_type, - is_init, - )?; - - progress_bar.finish_and_clear(); + let release_dir = config.release_dir(current_release_semver); + if is_init && release_dir.exists() { + (current_release_semver.to_owned(), None, release_dir) + } else { + let progress_bar = new_spinner_progress_bar(); + progress_bar.set_message(format!("{LOOKING_GLASS}Checking for updates...")); - match github_release { - None => { - return Err(format!("Unknown release: {current_release_semver}")); - } - Some(release_semver) => { - if release_semver == *current_release_semver { - if let Ok(active_release_version) = load_release_version( - &config.active_release_dir().join("version.yml"), - ) { - if format!("v{current_release_semver}") - == active_release_version.channel - { - println!( + let semver_update_type = if is_init { + SemverUpdateType::Fixed + } else { + SemverUpdateType::Patch + }; + let github_release = check_for_newer_github_release( + current_release_semver, + semver_update_type, + is_init, + )?; + + progress_bar.finish_and_clear(); + + match github_release { + None => { + return Err(format!("Unknown release: {current_release_semver}")); + } + Some(release_semver) => { + if release_semver == *current_release_semver { + if let Ok(active_release_version) = load_release_version( + &config.active_release_dir().join("version.yml"), + ) { + if format!("v{current_release_semver}") + == active_release_version.channel + { + println!( "Install is up to date. {release_semver} is the latest compatible release" ); - return Ok(false); + return Ok(false); + } } } + config.explicit_release = + Some(ExplicitRelease::Semver(release_semver.clone())); + + let release_dir = config.release_dir(&release_semver); + let download_url_and_sha256 = if release_dir.exists() { + // Release already present in the cache + None + } else { + Some((github_release_download_url(&release_semver), None)) + }; + (release_semver, download_url_and_sha256, release_dir) } - config.explicit_release = - Some(ExplicitRelease::Semver(release_semver.clone())); - - let release_dir = config.release_dir(&release_semver); - let download_url_and_sha256 = if release_dir.exists() { - // Release already present in the cache - None - } else { - Some((github_release_download_url(&release_semver), None)) - }; - (release_semver, download_url_and_sha256, release_dir) } } } From b8115b430343efdcda4e6614e01256ce46072cc5 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 7 Nov 2023 09:10:59 -0600 Subject: [PATCH 54/98] chunk all ancient append vecs (#33909) * chunk all ancient append vecs * fix a test comments * remove unneeded dead_code attr * do full chunking when pack is used to create ancient storage * refacotr and fix tests * clippy * add cache hash file stats * comments * fix test * Update accounts-db/src/accounts_db.rs Co-authored-by: Brooks * Update accounts-db/src/accounts_db.rs Co-authored-by: Brooks * test_case * remove commented out code * remove hash cache data stats * typo --------- Co-authored-by: HaoranYi Co-authored-by: HaoranYi Co-authored-by: Brooks --- accounts-db/src/accounts_db.rs | 99 ++++++++++++++++++++---------- accounts-db/src/cache_hash_data.rs | 2 +- 2 files changed, 69 insertions(+), 32 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 2084c8197b7c24..65c6a9a52cb23e 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1439,7 +1439,6 @@ pub struct AccountsDb { pub storage: AccountStorage, - #[allow(dead_code)] /// from AccountsDbConfig create_ancient_storage: CreateAncientStorage, @@ -1729,24 +1728,33 @@ impl SplitAncientStorages { /// So a slot remains in the same chunk whenever it is included in the accounts hash. /// When the slot gets deleted or gets consumed in an ancient append vec, it will no longer be in its chunk. /// The results of scanning a chunk of appendvecs can be cached to avoid scanning large amounts of data over and over. - fn new(oldest_non_ancient_slot: Slot, snapshot_storages: &SortedStorages) -> Self { + fn new(oldest_non_ancient_slot: Option, snapshot_storages: &SortedStorages) -> Self { let range = snapshot_storages.range(); - // any ancient append vecs should definitely be cached - // We need to break the ranges into: - // 1. individual ancient append vecs (may be empty) - // 2. first unevenly divided chunk starting at 1 epoch old slot (may be empty) - // 3. evenly divided full chunks in the middle - // 4. unevenly divided chunk of most recent slots (may be empty) - let ancient_slots = - Self::get_ancient_slots(oldest_non_ancient_slot, snapshot_storages, |storage| { - storage.capacity() > get_ancient_append_vec_capacity() * 50 / 100 - }); + let (ancient_slots, first_non_ancient_slot) = if let Some(oldest_non_ancient_slot) = + oldest_non_ancient_slot + { + // any ancient append vecs should definitely be cached + // We need to break the ranges into: + // 1. individual ancient append vecs (may be empty) + // 2. first unevenly divided chunk starting at 1 epoch old slot (may be empty) + // 3. evenly divided full chunks in the middle + // 4. unevenly divided chunk of most recent slots (may be empty) + let ancient_slots = + Self::get_ancient_slots(oldest_non_ancient_slot, snapshot_storages, |storage| { + storage.capacity() > get_ancient_append_vec_capacity() * 50 / 100 + }); + + let first_non_ancient_slot = ancient_slots + .last() + .map(|last_ancient_slot| last_ancient_slot.saturating_add(1)) + .unwrap_or(range.start); + + (ancient_slots, first_non_ancient_slot) + } else { + (vec![], range.start) + }; - let first_non_ancient_slot = ancient_slots - .last() - .map(|last_ancient_slot| last_ancient_slot.saturating_add(1)) - .unwrap_or(range.start); Self::new_with_ancient_info(range, ancient_slots, first_non_ancient_slot) } @@ -7159,21 +7167,32 @@ impl AccountsDb { } } - /// if ancient append vecs are enabled, return a slot 'max_slot_inclusive' - (slots_per_epoch - `self.ancient_append_vec_offset`) - /// otherwise, return 0 + /// `oldest_non_ancient_slot` is only applicable when `Append` is used for ancient append vec packing. + /// If `Pack` is used for ancient append vec packing, return None. + /// Otherwise, return a slot 'max_slot_inclusive' - (slots_per_epoch - `self.ancient_append_vec_offset`) + /// If ancient append vecs are not enabled, return 0. fn get_oldest_non_ancient_slot_for_hash_calc_scan( &self, max_slot_inclusive: Slot, config: &CalcAccountsHashConfig<'_>, - ) -> Slot { - if self.ancient_append_vec_offset.is_some() { + ) -> Option { + if self.create_ancient_storage == CreateAncientStorage::Pack { + // oldest_non_ancient_slot is only applicable when ancient storages are created with `Append`. When ancient storages are created with `Pack`, ancient storages + // can be created in between non-ancient storages. Return None, because oldest_non_ancient_slot is not applicable here. + None + } else if self.ancient_append_vec_offset.is_some() { // For performance, this is required when ancient appendvecs are enabled - self.get_oldest_non_ancient_slot_from_slot(config.epoch_schedule, max_slot_inclusive) + Some( + self.get_oldest_non_ancient_slot_from_slot( + config.epoch_schedule, + max_slot_inclusive, + ), + ) } else { // This causes the entire range to be chunked together, treating older append vecs just like new ones. // This performs well if there are many old append vecs that haven't been cleaned yet. // 0 will have the effect of causing ALL older append vecs to be chunked together, just like every other append vec. - 0 + Some(0) } } @@ -7313,7 +7332,11 @@ impl AccountsDb { let mut init_accum = true; // load from cache failed, so create the cache file for this chunk for (slot, storage) in snapshot_storages.iter_range(&range_this_chunk) { - let ancient = slot < oldest_non_ancient_slot; + let ancient = + oldest_non_ancient_slot.is_some_and(|oldest_non_ancient_slot| { + slot < oldest_non_ancient_slot + }); + let (_, scan_us) = measure_us!(if let Some(storage) = storage { if init_accum { let range = bin_range.end - bin_range.start; @@ -9997,6 +10020,7 @@ pub mod tests { sync::atomic::AtomicBool, thread::{self, Builder, JoinHandle}, }, + test_case::test_case, }; fn linear_ancestors(end_slot: u64) -> Ancestors { @@ -16326,9 +16350,22 @@ pub mod tests { assert_eq!(db.accounts_index.ref_count_from_storage(&pk1), 0); } - #[test] - fn test_get_oldest_non_ancient_slot_for_hash_calc_scan() { + #[test_case(CreateAncientStorage::Append; "append")] + #[test_case(CreateAncientStorage::Pack; "pack")] + fn test_get_oldest_non_ancient_slot_for_hash_calc_scan( + create_ancient_storage: CreateAncientStorage, + ) { + let expected = |v| { + if create_ancient_storage == CreateAncientStorage::Append { + Some(v) + } else { + None + } + }; + let mut db = AccountsDb::new_single_for_tests(); + db.create_ancient_storage = create_ancient_storage; + let config = CalcAccountsHashConfig::default(); let slot = config.epoch_schedule.slots_per_epoch; let slots_per_epoch = config.epoch_schedule.slots_per_epoch; @@ -16337,23 +16374,23 @@ pub mod tests { // no ancient append vecs, so always 0 assert_eq!( db.get_oldest_non_ancient_slot_for_hash_calc_scan(slots_per_epoch + offset, &config), - 0 + expected(0) ); // ancient append vecs enabled (but at 0 offset), so can be non-zero db.ancient_append_vec_offset = Some(0); // 0..=(slots_per_epoch - 1) are all non-ancient assert_eq!( db.get_oldest_non_ancient_slot_for_hash_calc_scan(slots_per_epoch - 1, &config), - 0 + expected(0) ); // 1..=slots_per_epoch are all non-ancient, so 1 is oldest non ancient assert_eq!( db.get_oldest_non_ancient_slot_for_hash_calc_scan(slots_per_epoch, &config), - 1 + expected(1) ); assert_eq!( db.get_oldest_non_ancient_slot_for_hash_calc_scan(slots_per_epoch + offset, &config), - offset + 1 + expected(offset + 1) ); } @@ -16426,7 +16463,7 @@ pub mod tests { fn test_split_storages_ancient_chunks() { let storages = SortedStorages::empty(); assert_eq!(storages.max_slot_inclusive(), 0); - let result = SplitAncientStorages::new(0, &storages); + let result = SplitAncientStorages::new(Some(0), &storages); assert_eq!(result, SplitAncientStorages::default()); } @@ -16776,7 +16813,7 @@ pub mod tests { // 1 = all storages are non-ancient // 2 = ancient slots: 1 // 3 = ancient slots: 1, 2 - // 4 = ancient slots: 1, 2, 3 (except 2 is large, 3 is not, so treat 3 as non-ancient) + // 4 = ancient slots: 1, 2 (except 2 is large, 3 is not, so treat 3 as non-ancient) // 5 = ... for oldest_non_ancient_slot in 0..6 { let ancient_slots = SplitAncientStorages::get_ancient_slots( diff --git a/accounts-db/src/cache_hash_data.rs b/accounts-db/src/cache_hash_data.rs index e136be4f11713c..c839a8338c2fc2 100644 --- a/accounts-db/src/cache_hash_data.rs +++ b/accounts-db/src/cache_hash_data.rs @@ -49,7 +49,7 @@ pub(crate) struct CacheHashDataFile { } impl CacheHashDataFileReference { - /// convert the open file refrence to a mmapped file that can be returned as a slice + /// convert the open file reference to a mmapped file that can be returned as a slice pub(crate) fn map(&self) -> Result { let file_len = self.file_len; let mut m1 = Measure::start("read_file"); From b013c03afaedd939aa9a74a8cbb6209f7d3fb4a6 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Tue, 7 Nov 2023 10:26:21 -0800 Subject: [PATCH 55/98] [TieredStorage] Add IndexOffset type (#33929) #### Problem TieredStorage conceptually has different offsets. However, the current code directly uses the same primitive type for accessing offsets, which is error-prone as one could easily use one offset to access data that should be accessed with a different offset type. #### Summary of Changes This PR adds IndexOffset type -- a struct for obtaining the ith entry inside the index-block to obtain account's offset and address. --- accounts-db/src/tiered_storage/index.rs | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/accounts-db/src/tiered_storage/index.rs b/accounts-db/src/tiered_storage/index.rs index 8c1f4bc79e555b..cd8b2a33c82529 100644 --- a/accounts-db/src/tiered_storage/index.rs +++ b/accounts-db/src/tiered_storage/index.rs @@ -26,6 +26,12 @@ pub struct AccountOffset { pub block: usize, } +/// The offset to an account/address entry in the accounts index block. +/// This can be used to obtain the AccountOffset and address by looking through +/// the accounts index block. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct IndexOffset(usize); + /// The index format of a tiered accounts file. #[repr(u16)] #[derive( @@ -74,11 +80,11 @@ impl IndexBlockFormat { &self, map: &'a Mmap, footer: &TieredStorageFooter, - index: usize, + offset: IndexOffset, ) -> TieredStorageResult<&'a Pubkey> { let offset = match self { Self::AddressAndOffset => { - footer.index_block_offset as usize + std::mem::size_of::() * index + footer.index_block_offset as usize + std::mem::size_of::() * offset.0 } }; let (address, _) = get_type::(map, offset)?; @@ -90,13 +96,13 @@ impl IndexBlockFormat { &self, map: &Mmap, footer: &TieredStorageFooter, - index: usize, + offset: IndexOffset, ) -> TieredStorageResult { match self { Self::AddressAndOffset => { let offset = footer.index_block_offset as usize + std::mem::size_of::() * footer.account_entry_count as usize - + index * std::mem::size_of::(); + + offset.0 * std::mem::size_of::(); let (account_block_offset, _) = get_type(map, offset)?; Ok(AccountOffset { block: *account_block_offset, @@ -156,9 +162,13 @@ mod tests { .unwrap(); let map = unsafe { MmapOptions::new().map(&file).unwrap() }; for (i, index_entry) in index_entries.iter().enumerate() { - let account_offset = indexer.get_account_offset(&map, &footer, i).unwrap(); + let account_offset = indexer + .get_account_offset(&map, &footer, IndexOffset(i)) + .unwrap(); assert_eq!(index_entry.block_offset, account_offset.block as u64); - let address = indexer.get_account_address(&map, &footer, i).unwrap(); + let address = indexer + .get_account_address(&map, &footer, IndexOffset(i)) + .unwrap(); assert_eq!(index_entry.address, address); } } From eba1b2d3e39b516dae736f07438d0b498c08191b Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Tue, 7 Nov 2023 10:28:56 -0800 Subject: [PATCH 56/98] Remove RwLock on TransactionNotifier (#33962) * Remove RwLock on TransactionNotifier --- core/src/validator.rs | 6 +++--- .../src/geyser_plugin_service.rs | 10 +++++----- rpc/src/transaction_notifier_interface.rs | 4 ++-- rpc/src/transaction_status_service.rs | 19 +++++++++---------- 4 files changed, 19 insertions(+), 20 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index 2becf9590330a3..4aa6fb992a0a42 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -83,7 +83,7 @@ use { rpc_pubsub_service::{PubSubConfig, PubSubService}, rpc_service::JsonRpcService, rpc_subscriptions::RpcSubscriptions, - transaction_notifier_interface::TransactionNotifierLock, + transaction_notifier_interface::TransactionNotifierArc, transaction_status_service::TransactionStatusService, }, solana_runtime::{ @@ -1689,7 +1689,7 @@ fn load_blockstore( exit: Arc, start_progress: &Arc>, accounts_update_notifier: Option, - transaction_notifier: Option, + transaction_notifier: Option, entry_notifier: Option, poh_timing_point_sender: Option, ) -> Result< @@ -2167,7 +2167,7 @@ fn initialize_rpc_transaction_history_services( exit: Arc, enable_rpc_transaction_history: bool, enable_extended_tx_metadata_storage: bool, - transaction_notifier: Option, + transaction_notifier: Option, ) -> TransactionHistoryServices { let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root())); let (transaction_status_sender, transaction_status_receiver) = unbounded(); diff --git a/geyser-plugin-manager/src/geyser_plugin_service.rs b/geyser-plugin-manager/src/geyser_plugin_service.rs index a95c1b7e1a18c5..83ab9284cecb2a 100644 --- a/geyser-plugin-manager/src/geyser_plugin_service.rs +++ b/geyser-plugin-manager/src/geyser_plugin_service.rs @@ -15,7 +15,7 @@ use { solana_ledger::entry_notifier_interface::EntryNotifierArc, solana_rpc::{ optimistically_confirmed_bank_tracker::SlotNotification, - transaction_notifier_interface::TransactionNotifierLock, + transaction_notifier_interface::TransactionNotifierArc, }, std::{ path::{Path, PathBuf}, @@ -34,7 +34,7 @@ pub struct GeyserPluginService { slot_status_observer: Option, plugin_manager: Arc>, accounts_update_notifier: Option, - transaction_notifier: Option, + transaction_notifier: Option, entry_notifier: Option, block_metadata_notifier: Option, } @@ -92,10 +92,10 @@ impl GeyserPluginService { None }; - let transaction_notifier: Option = + let transaction_notifier: Option = if transaction_notifications_enabled { let transaction_notifier = TransactionNotifierImpl::new(plugin_manager.clone()); - Some(Arc::new(RwLock::new(transaction_notifier))) + Some(Arc::new(transaction_notifier)) } else { None }; @@ -160,7 +160,7 @@ impl GeyserPluginService { self.accounts_update_notifier.clone() } - pub fn get_transaction_notifier(&self) -> Option { + pub fn get_transaction_notifier(&self) -> Option { self.transaction_notifier.clone() } diff --git a/rpc/src/transaction_notifier_interface.rs b/rpc/src/transaction_notifier_interface.rs index ab765d1207fe27..d09a207b1c6e18 100644 --- a/rpc/src/transaction_notifier_interface.rs +++ b/rpc/src/transaction_notifier_interface.rs @@ -1,7 +1,7 @@ use { solana_sdk::{clock::Slot, signature::Signature, transaction::SanitizedTransaction}, solana_transaction_status::TransactionStatusMeta, - std::sync::{Arc, RwLock}, + std::sync::Arc, }; pub trait TransactionNotifier { @@ -15,4 +15,4 @@ pub trait TransactionNotifier { ); } -pub type TransactionNotifierLock = Arc>; +pub type TransactionNotifierArc = Arc; diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index 193efb69fa481f..b98f0831518675 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -1,5 +1,5 @@ use { - crate::transaction_notifier_interface::TransactionNotifierLock, + crate::transaction_notifier_interface::TransactionNotifierArc, crossbeam_channel::{Receiver, RecvTimeoutError}, itertools::izip, solana_accounts_db::transaction_results::{DurableNonceFee, TransactionExecutionDetails}, @@ -29,7 +29,7 @@ impl TransactionStatusService { write_transaction_status_receiver: Receiver, max_complete_transaction_status_slot: Arc, enable_rpc_transaction_history: bool, - transaction_notifier: Option, + transaction_notifier: Option, blockstore: Arc, enable_extended_tx_metadata_storage: bool, exit: Arc, @@ -60,7 +60,7 @@ impl TransactionStatusService { write_transaction_status_receiver: &Receiver, max_complete_transaction_status_slot: &Arc, enable_rpc_transaction_history: bool, - transaction_notifier: Option, + transaction_notifier: Option, blockstore: &Blockstore, enable_extended_tx_metadata_storage: bool, ) -> Result<(), RecvTimeoutError> { @@ -169,7 +169,7 @@ impl TransactionStatusService { }; if let Some(transaction_notifier) = transaction_notifier.as_ref() { - transaction_notifier.write().unwrap().notify_transaction( + transaction_notifier.notify_transaction( slot, transaction_index, transaction.signature(), @@ -255,7 +255,7 @@ pub(crate) mod tests { std::{ sync::{ atomic::{AtomicBool, Ordering}, - Arc, RwLock, + Arc, }, thread::sleep, time::Duration, @@ -432,7 +432,7 @@ pub(crate) mod tests { transaction_indexes: vec![transaction_index], }; - let test_notifier = Arc::new(RwLock::new(TestTransactionNotifier::new())); + let test_notifier = Arc::new(TestTransactionNotifier::new()); let exit = Arc::new(AtomicBool::new(false)); let transaction_status_service = TransactionStatusService::new( @@ -452,16 +452,15 @@ pub(crate) mod tests { exit.store(true, Ordering::Relaxed); transaction_status_service.join().unwrap(); - let notifier = test_notifier.read().unwrap(); - assert_eq!(notifier.notifications.len(), 1); + assert_eq!(test_notifier.notifications.len(), 1); let key = TestNotifierKey { slot, transaction_index, signature, }; - assert!(notifier.notifications.contains_key(&key)); + assert!(test_notifier.notifications.contains_key(&key)); - let result = &*notifier.notifications.get(&key).unwrap(); + let result = test_notifier.notifications.get(&key).unwrap(); assert_eq!( expected_transaction.signature(), result.transaction.signature() From 87b4dc64e3fc8026515e4186c92ff4325e2ab5a7 Mon Sep 17 00:00:00 2001 From: Will Hickey Date: Tue, 7 Nov 2023 13:48:11 -0600 Subject: [PATCH 57/98] Add --release-with-debug option to cargo-install-all.sh (#33383) * Add --canary option to cargo-install-all for building with separate debug symbols * lint * Remove debug-assertions * switch flag from --canary to --release-with-debug --- scripts/cargo-install-all.sh | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/scripts/cargo-install-all.sh b/scripts/cargo-install-all.sh index 583ba6508f917d..4aceef69a4fe73 100755 --- a/scripts/cargo-install-all.sh +++ b/scripts/cargo-install-all.sh @@ -28,22 +28,29 @@ usage() { echo "Error: $*" fi cat <] [--debug] [--validator-only] +usage: $0 [+] [--debug] [--validator-only] [--release-with-debug] EOF exit $exitcode } maybeRustVersion= installDir= -buildVariant=release -maybeReleaseFlag=--release +# buildProfileArg and buildProfile duplicate some information because cargo +# doesn't allow '--profile debug' but we still need to know that the binaries +# will be in target/debug +buildProfileArg='--profile release' +buildProfile='release' validatorOnly= while [[ -n $1 ]]; do if [[ ${1:0:1} = - ]]; then if [[ $1 = --debug ]]; then - maybeReleaseFlag= - buildVariant=debug + buildProfileArg= # the default cargo profile is 'debug' + buildProfile='debug' + shift + elif [[ $1 = --release-with-debug ]]; then + buildProfileArg='--profile release-with-debug' + buildProfile='release-with-debug' shift elif [[ $1 = --validator-only ]]; then validatorOnly=true @@ -68,7 +75,7 @@ fi installDir="$(mkdir -p "$installDir"; cd "$installDir"; pwd)" mkdir -p "$installDir/bin/deps" -echo "Install location: $installDir ($buildVariant)" +echo "Install location: $installDir ($buildProfile)" cd "$(dirname "$0")"/.. @@ -138,7 +145,7 @@ mkdir -p "$installDir/bin" ( set -x # shellcheck disable=SC2086 # Don't want to double quote $rust_version - "$cargo" $maybeRustVersion build $maybeReleaseFlag "${binArgs[@]}" + "$cargo" $maybeRustVersion build $buildProfileArg "${binArgs[@]}" # Exclude `spl-token` binary for net.sh builds if [[ -z "$validatorOnly" ]]; then @@ -152,7 +159,7 @@ mkdir -p "$installDir/bin" ) for bin in "${BINS[@]}"; do - cp -fv "target/$buildVariant/$bin" "$installDir"/bin + cp -fv "target/$buildProfile/$bin" "$installDir"/bin done if [[ -d target/perf-libs ]]; then @@ -206,7 +213,7 @@ fi set -x # deps dir can be empty shopt -s nullglob - for dep in target/"$buildVariant"/deps/libsolana*program.*; do + for dep in target/"$buildProfile"/deps/libsolana*program.*; do cp -fv "$dep" "$installDir/bin/deps" done ) From e93725e78862e88ca4ac5065470906ac27af45fd Mon Sep 17 00:00:00 2001 From: Tyera Date: Tue, 7 Nov 2023 13:30:10 -0700 Subject: [PATCH 58/98] Add more comments about solana-program patch (downstream) (#33965) * Add more comments re solana-program patch * More specific advice --- Cargo.toml | 7 +++++++ programs/sbf/Cargo.toml | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 8e85b51441beda..d683918584c943 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -451,6 +451,13 @@ crossbeam-epoch = { git = "https://github.com/solana-labs/crossbeam", rev = "fd2 # and we end up with two versions of `solana-program` and `solana-zk-token-sdk` and all of their # dependencies in our build tree. # +# If you are developing downstream using non-crates-io solana-program (local or +# forked repo, or from github rev, eg), duplicate the following patch statements +# in your Cargo.toml. If you still hit duplicate-type errors with the patch +# statements in place, run `cargo update -p solana-program` and/or `cargo update +# -p solana-zk-token-sdk` to remove extraneous versions from your Cargo.lock +# file. +# # There is a similar override in `programs/sbf/Cargo.toml`. Please keep both comments and the # overrides in sync. solana-program = { path = "sdk/program" } diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 6f069a3f5bfd8b..7ab496de8eebd4 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -196,6 +196,13 @@ targets = ["x86_64-unknown-linux-gnu"] # and we end up with two versions of `solana-program` and `solana-zk-token-sdk` and all of their # dependencies in our build tree. # +# If you are developing downstream using non-crates-io solana-program (local or +# forked repo, or from github rev, eg), duplicate the following patch statements +# in your Cargo.toml. If you still hit duplicate-type errors with the patch +# statements in place, run `cargo update -p solana-program` and/or `cargo update +# -p solana-zk-token-sdk` to remove extraneous versions from your Cargo.lock +# file. +# # There is a similar override in `../../Cargo.toml`. Please keep both comments and the # overrides in sync. solana-program = { path = "../../sdk/program" } From dd2845b072fc21ccfa8c2cefad9dcc1bc8c138eb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Nov 2023 13:49:42 -0700 Subject: [PATCH 59/98] build(deps): bump futures from 0.3.28 to 0.3.29 (#33953) * build(deps): bump futures from 0.3.28 to 0.3.29 Bumps [futures](https://github.com/rust-lang/futures-rs) from 0.3.28 to 0.3.29. - [Release notes](https://github.com/rust-lang/futures-rs/releases) - [Changelog](https://github.com/rust-lang/futures-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/futures-rs/compare/0.3.28...0.3.29) --- updated-dependencies: - dependency-name: futures dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 56 ++++++++++++++++++++--------------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 48 +++++++++++++++++------------------ 3 files changed, 53 insertions(+), 53 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 98cc5a1c00d869..4db4ef21cb924b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2036,9 +2036,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" dependencies = [ "futures-channel", "futures-core", @@ -2067,9 +2067,9 @@ checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" dependencies = [ "futures-core", "futures-task", @@ -2252,7 +2252,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8af59a261bcf42f45d1b261232847b9b850ba0a1419d6100698246fb66e9240" dependencies = [ "arc-swap", - "futures 0.3.28", + "futures 0.3.29", "log", "reqwest", "serde", @@ -2513,7 +2513,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" dependencies = [ "bytes", - "futures 0.3.28", + "futures 0.3.29", "headers", "http", "hyper", @@ -2761,7 +2761,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" dependencies = [ "derive_more", - "futures 0.3.28", + "futures 0.3.29", "jsonrpc-core", "jsonrpc-pubsub", "jsonrpc-server-utils", @@ -2779,7 +2779,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "futures-executor", "futures-util", "log", @@ -2794,7 +2794,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "jsonrpc-client-transports", ] @@ -2816,7 +2816,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "hyper", "jsonrpc-core", "jsonrpc-server-utils", @@ -2832,7 +2832,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "382bb0206323ca7cda3dcd7e245cea86d37d02457a02a975e3378fb149a48845" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "jsonrpc-core", "jsonrpc-server-utils", "log", @@ -2847,7 +2847,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "jsonrpc-core", "lazy_static", "log", @@ -2863,7 +2863,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" dependencies = [ "bytes", - "futures 0.3.28", + "futures 0.3.29", "globset", "jsonrpc-core", "lazy_static", @@ -3655,7 +3655,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "libc", "log", "rand 0.7.3", @@ -4961,7 +4961,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e56dd856803e253c8f298af3f4d7eb0ae5e23a737252cd90bb4f3b435033b2d" dependencies = [ "dashmap", - "futures 0.3.28", + "futures 0.3.29", "lazy_static", "log", "parking_lot 0.12.1", @@ -5196,7 +5196,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "futures 0.3.28", + "futures 0.3.29", "httparse", "log", "rand 0.8.5", @@ -5398,7 +5398,7 @@ name = "solana-banks-client" version = "1.18.0" dependencies = [ "borsh 0.10.3", - "futures 0.3.28", + "futures 0.3.29", "solana-banks-interface", "solana-banks-server", "solana-program", @@ -5425,7 +5425,7 @@ version = "1.18.0" dependencies = [ "bincode", "crossbeam-channel", - "futures 0.3.28", + "futures 0.3.29", "solana-accounts-db", "solana-banks-interface", "solana-client", @@ -5766,7 +5766,7 @@ dependencies = [ "bincode", "crossbeam-channel", "dashmap", - "futures 0.3.28", + "futures 0.3.29", "futures-util", "indexmap 2.0.2", "indicatif", @@ -5880,7 +5880,7 @@ dependencies = [ "eager", "etcd-client", "fs_extra", - "futures 0.3.28", + "futures 0.3.29", "histogram", "itertools", "lazy_static", @@ -6295,7 +6295,7 @@ dependencies = [ "crossbeam-channel", "dashmap", "fs_extra", - "futures 0.3.28", + "futures 0.3.29", "itertools", "lazy_static", "libc", @@ -6362,7 +6362,7 @@ dependencies = [ "crossbeam-channel", "csv", "dashmap", - "futures 0.3.28", + "futures 0.3.29", "histogram", "itertools", "log", @@ -6782,7 +6782,7 @@ dependencies = [ "async-mutex", "async-trait", "crossbeam-channel", - "futures 0.3.28", + "futures 0.3.29", "itertools", "lazy_static", "log", @@ -6899,7 +6899,7 @@ dependencies = [ "bincode", "bs58", "crossbeam-channel", - "futures 0.3.28", + "futures 0.3.29", "indicatif", "jsonrpc-core", "jsonrpc-http-server", @@ -6944,7 +6944,7 @@ version = "1.18.0" dependencies = [ "anyhow", "clap 2.33.3", - "futures 0.3.28", + "futures 0.3.29", "serde_json", "solana-account-decoder", "solana-clap-utils", @@ -7194,7 +7194,7 @@ dependencies = [ "bzip2", "enum-iterator", "flate2", - "futures 0.3.28", + "futures 0.3.29", "goauth", "http", "hyper", @@ -7447,7 +7447,7 @@ dependencies = [ "bincode", "bytes", "crossbeam-channel", - "futures 0.3.28", + "futures 0.3.29", "itertools", "log", "lru", @@ -8155,7 +8155,7 @@ checksum = "1c38a012bed6fb9681d3bf71ffaa4f88f3b4b9ed3198cda6e4c8462d24d4bb80" dependencies = [ "anyhow", "fnv", - "futures 0.3.28", + "futures 0.3.29", "humantime", "opentelemetry", "pin-project", diff --git a/Cargo.toml b/Cargo.toml index d683918584c943..8dbfaac7dbb8ea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -200,7 +200,7 @@ flate2 = "1.0.28" fnv = "1.0.7" fs-err = "2.9.0" fs_extra = "1.3.0" -futures = "0.3.28" +futures = "0.3.29" futures-util = "0.3.29" gag = "1.0.0" generic-array = { version = "0.14.7", default-features = false } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 18f0249197663f..c86ac2c782862f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1734,9 +1734,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" dependencies = [ "futures-channel", "futures-core", @@ -1765,9 +1765,9 @@ checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" dependencies = [ "futures-core", "futures-task", @@ -1910,7 +1910,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8af59a261bcf42f45d1b261232847b9b850ba0a1419d6100698246fb66e9240" dependencies = [ "arc-swap", - "futures 0.3.28", + "futures 0.3.29", "log", "reqwest", "serde", @@ -2147,7 +2147,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" dependencies = [ "bytes", - "futures 0.3.28", + "futures 0.3.29", "headers", "http", "hyper", @@ -2384,7 +2384,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" dependencies = [ "derive_more", - "futures 0.3.28", + "futures 0.3.29", "jsonrpc-core", "jsonrpc-pubsub", "jsonrpc-server-utils", @@ -2402,7 +2402,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "futures-executor", "futures-util", "log", @@ -2417,7 +2417,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "jsonrpc-client-transports", ] @@ -2439,7 +2439,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "hyper", "jsonrpc-core", "jsonrpc-server-utils", @@ -2455,7 +2455,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "382bb0206323ca7cda3dcd7e245cea86d37d02457a02a975e3378fb149a48845" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "jsonrpc-core", "jsonrpc-server-utils", "log", @@ -2470,7 +2470,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "jsonrpc-core", "lazy_static", "log", @@ -2486,7 +2486,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" dependencies = [ "bytes", - "futures 0.3.28", + "futures 0.3.29", "globset", "jsonrpc-core", "lazy_static", @@ -3283,7 +3283,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "libc", "log", "rand 0.7.3", @@ -4602,7 +4602,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "futures 0.3.28", + "futures 0.3.29", "httparse", "log", "rand 0.8.5", @@ -4715,7 +4715,7 @@ name = "solana-banks-client" version = "1.18.0" dependencies = [ "borsh 0.10.3", - "futures 0.3.28", + "futures 0.3.29", "solana-banks-interface", "solana-program", "solana-sdk", @@ -4740,7 +4740,7 @@ version = "1.18.0" dependencies = [ "bincode", "crossbeam-channel", - "futures 0.3.28", + "futures 0.3.29", "solana-accounts-db", "solana-banks-interface", "solana-client", @@ -4873,7 +4873,7 @@ dependencies = [ "async-trait", "bincode", "dashmap", - "futures 0.3.28", + "futures 0.3.29", "futures-util", "indexmap 2.0.2", "indicatif", @@ -4950,7 +4950,7 @@ dependencies = [ "dashmap", "eager", "etcd-client", - "futures 0.3.28", + "futures 0.3.29", "histogram", "itertools", "lazy_static", @@ -5227,7 +5227,7 @@ dependencies = [ "crossbeam-channel", "dashmap", "fs_extra", - "futures 0.3.28", + "futures 0.3.29", "itertools", "lazy_static", "libc", @@ -5532,7 +5532,7 @@ version = "1.18.0" dependencies = [ "async-mutex", "async-trait", - "futures 0.3.28", + "futures 0.3.29", "itertools", "lazy_static", "log", @@ -6285,7 +6285,7 @@ dependencies = [ "bzip2", "enum-iterator", "flate2", - "futures 0.3.28", + "futures 0.3.29", "goauth", "http", "hyper", @@ -6459,7 +6459,7 @@ dependencies = [ "bincode", "bytes", "crossbeam-channel", - "futures 0.3.28", + "futures 0.3.29", "itertools", "log", "lru", @@ -7070,7 +7070,7 @@ checksum = "1c38a012bed6fb9681d3bf71ffaa4f88f3b4b9ed3198cda6e4c8462d24d4bb80" dependencies = [ "anyhow", "fnv", - "futures 0.3.28", + "futures 0.3.29", "humantime", "opentelemetry", "pin-project", From 29b4ba0dc1efd2f68c50424e722b0cc138343770 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Nov 2023 13:51:24 -0700 Subject: [PATCH 60/98] build(deps): bump indexmap from 2.0.2 to 2.1.0 (#33954) * build(deps): bump indexmap from 2.0.2 to 2.1.0 Bumps [indexmap](https://github.com/bluss/indexmap) from 2.0.2 to 2.1.0. - [Changelog](https://github.com/bluss/indexmap/blob/master/RELEASES.md) - [Commits](https://github.com/bluss/indexmap/compare/2.0.2...2.1.0) --- updated-dependencies: - dependency-name: indexmap dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 20 ++++++++++---------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 16 ++++++++-------- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4db4ef21cb924b..4d5d1d59523129 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2662,9 +2662,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" +checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", "hashbrown 0.14.1", @@ -4947,7 +4947,7 @@ version = "0.9.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.1.0", "itoa", "ryu", "serde", @@ -5768,7 +5768,7 @@ dependencies = [ "dashmap", "futures 0.3.29", "futures-util", - "indexmap 2.0.2", + "indexmap 2.1.0", "indicatif", "log", "quinn", @@ -5849,7 +5849,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.0.2", + "indexmap 2.1.0", "indicatif", "log", "rand 0.8.5", @@ -6188,7 +6188,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.0.2", + "indexmap 2.1.0", "itertools", "log", "lru", @@ -7254,7 +7254,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "histogram", - "indexmap 2.0.2", + "indexmap 2.1.0", "itertools", "libc", "log", @@ -7345,7 +7345,7 @@ dependencies = [ "console", "csv", "ctrlc", - "indexmap 2.0.2", + "indexmap 2.1.0", "indicatif", "pickledb", "serde", @@ -7374,7 +7374,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.0.2", + "indexmap 2.1.0", "indicatif", "log", "rayon", @@ -8547,7 +8547,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.1.0", "serde", "serde_spanned", "toml_datetime", diff --git a/Cargo.toml b/Cargo.toml index 8dbfaac7dbb8ea..4cc82b9e2694aa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -217,7 +217,7 @@ hyper = "0.14.27" hyper-proxy = "0.9.1" im = "15.1.0" index_list = "0.2.7" -indexmap = "2.0.2" +indexmap = "2.1.0" indicatif = "0.17.7" itertools = "0.10.5" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index c86ac2c782862f..478b5f86aeea99 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2296,9 +2296,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" +checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", "hashbrown 0.14.1", @@ -4378,7 +4378,7 @@ version = "0.9.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.1.0", "itoa", "ryu", "serde", @@ -4875,7 +4875,7 @@ dependencies = [ "dashmap", "futures 0.3.29", "futures-util", - "indexmap 2.0.2", + "indexmap 2.1.0", "indicatif", "log", "quinn", @@ -4925,7 +4925,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.0.2", + "indexmap 2.1.0", "log", "rand 0.8.5", "rayon", @@ -5176,7 +5176,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.0.2", + "indexmap 2.1.0", "itertools", "log", "lru", @@ -6331,7 +6331,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "histogram", - "indexmap 2.0.2", + "indexmap 2.1.0", "itertools", "libc", "log", @@ -6414,7 +6414,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.0.2", + "indexmap 2.1.0", "indicatif", "log", "rayon", From fdab44f76899e9157cb840b9c388aa8e568caff9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Nov 2023 13:52:49 -0700 Subject: [PATCH 61/98] build(deps): bump openssl from 0.10.57 to 0.10.59 (#33952) * build(deps): bump openssl from 0.10.57 to 0.10.59 Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.57 to 0.10.59. - [Release notes](https://github.com/sfackler/rust-openssl/releases) - [Commits](https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.57...openssl-v0.10.59) --- updated-dependencies: - dependency-name: openssl dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 12 ++++++------ programs/sbf/Cargo.lock | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4d5d1d59523129..d027457241e2c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3549,9 +3549,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.57" +version = "0.10.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" +checksum = "7a257ad03cd8fb16ad4172fedf8094451e1af1c4b70097636ef2eac9a5f0cc33" dependencies = [ "bitflags 2.4.1", "cfg-if 1.0.0", @@ -3581,18 +3581,18 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "openssl-src" -version = "111.25.0+1.1.1t" +version = "300.1.6+3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3173cd3626c43e3854b1b727422a276e568d9ec5fe8cec197822cf52cfb743d6" +checksum = "439fac53e092cd7442a3660c85dde4643ab3b5bd39040912388dcdabf6b88085" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.92" +version = "0.9.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db7e971c2c2bba161b2d2fdf37080177eff520b3bc044787c7f1f5f9e78d869b" +checksum = "40a4130519a360279579c2053038317e40eff64d13fd3f004f9e1b72b8a6aaf9" dependencies = [ "cc", "libc", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 478b5f86aeea99..69de98de6b55d3 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3174,9 +3174,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.57" +version = "0.10.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" +checksum = "7a257ad03cd8fb16ad4172fedf8094451e1af1c4b70097636ef2eac9a5f0cc33" dependencies = [ "bitflags 2.4.1", "cfg-if 1.0.0", @@ -3206,18 +3206,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.25.0+1.1.1t" +version = "300.1.6+3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3173cd3626c43e3854b1b727422a276e568d9ec5fe8cec197822cf52cfb743d6" +checksum = "439fac53e092cd7442a3660c85dde4643ab3b5bd39040912388dcdabf6b88085" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.92" +version = "0.9.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db7e971c2c2bba161b2d2fdf37080177eff520b3bc044787c7f1f5f9e78d869b" +checksum = "40a4130519a360279579c2053038317e40eff64d13fd3f004f9e1b72b8a6aaf9" dependencies = [ "cc", "libc", From 6f213c38aa9a9667909dcae41015951144e5d8ad Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 7 Nov 2023 15:58:52 -0500 Subject: [PATCH 62/98] Removes Default from RollingBitField (#33969) --- accounts-db/src/rolling_bit_field.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts-db/src/rolling_bit_field.rs b/accounts-db/src/rolling_bit_field.rs index 73a71d7a084110..5a710d936963be 100644 --- a/accounts-db/src/rolling_bit_field.rs +++ b/accounts-db/src/rolling_bit_field.rs @@ -8,7 +8,7 @@ use { solana_sdk::clock::Slot, }; -#[derive(Debug, Default, AbiExample, Clone)] +#[derive(Debug, AbiExample, Clone)] pub struct RollingBitField { max_width: u64, min: u64, From 29b21253ecd2f11a2c8103953a23c7fc3838544f Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 7 Nov 2023 16:00:09 -0500 Subject: [PATCH 63/98] changelog: Uses fastboot by default (#33900) --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a99a5ffe0045a1..4fe1b4fc2ae902 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ Release channels have their own copy of this changelog: ## [1.18.0] - Unreleased * Changes * Added a github check to support `changelog` label + * The default for `--use-snapshot-archives-at-startup` is now `when-newest` (#33883) * Upgrade Notes ## [1.17.0] From 5efba1fb3a9fc6653c7c17142df1283747688889 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Wed, 8 Nov 2023 11:03:35 +0800 Subject: [PATCH 64/98] ci: add the ability to use GCS backend for sccache (#33967) --- .buildkite/hooks/pre-command | 8 +++++++- ci/docker-run.sh | 30 ++++++++++++++++++++++++------ 2 files changed, 31 insertions(+), 7 deletions(-) diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index 025b228f8579de..4c798a83d36889 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -28,5 +28,11 @@ fi export SBF_TOOLS_VERSION -SCCACHE_S3_KEY_PREFIX="${rust_stable}_${rust_nightly}_${SBF_TOOLS_VERSION}" +SCCACHE_KEY_PREFIX="${rust_stable}_${rust_nightly}_${SBF_TOOLS_VERSION}" +export SCCACHE_KEY_PREFIX + +SCCACHE_S3_KEY_PREFIX="$SCCACHE_KEY_PREFIX" export SCCACHE_S3_KEY_PREFIX + +SCCACHE_GCS_KEY_PREFIX="$SCCACHE_KEY_PREFIX" +export SCCACHE_GCS_KEY_PREFIX diff --git a/ci/docker-run.sh b/ci/docker-run.sh index 52d3807394c10e..8e43bcad55ce5f 100755 --- a/ci/docker-run.sh +++ b/ci/docker-run.sh @@ -51,16 +51,34 @@ if [[ -n $CI ]]; then # sccache-related bugs echo "--- $0 ... (with sccache being DISABLED due to many (${BUILDKITE_RETRY_COUNT}) retries)" else - echo "--- $0 ... (with sccache enabled with prefix: $SCCACHE_S3_KEY_PREFIX)" + echo "--- $0 ... (with sccache enabled with prefix: $SCCACHE_KEY_PREFIX)" + # sccache ARGS+=( --env "RUSTC_WRAPPER=/usr/local/cargo/bin/sccache" - --env AWS_ACCESS_KEY_ID - --env AWS_SECRET_ACCESS_KEY - --env SCCACHE_BUCKET - --env SCCACHE_REGION - --env SCCACHE_S3_KEY_PREFIX ) + + # s3 + if [ -n "$AWS_ACCESS_KEY_ID" ]; then + ARGS+=( + --env AWS_ACCESS_KEY_ID + --env AWS_SECRET_ACCESS_KEY + --env SCCACHE_BUCKET + --env SCCACHE_REGION + --env SCCACHE_S3_KEY_PREFIX + ) + fi + + # gcs + if [ -n "$SCCACHE_GCS_KEY_PATH" ]; then + ARGS+=( + --env SCCACHE_GCS_KEY_PATH + --volume "$SCCACHE_GCS_KEY_PATH:$SCCACHE_GCS_KEY_PATH" + --env SCCACHE_GCS_BUCKET + --env SCCACHE_GCS_RW_MODE + --env SCCACHE_GCS_KEY_PREFIX + ) + fi fi fi fi From 8c5b5f18be77737f0913355f17ddba81f14d5824 Mon Sep 17 00:00:00 2001 From: Illia Bobyr Date: Wed, 8 Nov 2023 02:50:38 -0800 Subject: [PATCH 65/98] scripts/cargo-clippy.sh: Extract our non-trivial `cargo clippy` command (#33982) CI uses a number of parameters when running `cargo clippy`. Repeating the same command manually requires some copy/pasting. It is easier when there is a common script that can be run by everyone. --- ci/test-checks.sh | 30 +------------------- scripts/cargo-clippy.sh | 61 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 29 deletions(-) create mode 100755 scripts/cargo-clippy.sh diff --git a/ci/test-checks.sh b/ci/test-checks.sh index 85375d6bbeec4b..3a4f15ec23d81f 100755 --- a/ci/test-checks.sh +++ b/ci/test-checks.sh @@ -69,35 +69,7 @@ fi _ ci/order-crates-for-publishing.py -nightly_clippy_allows=(--allow=clippy::redundant_clone) - -# Use nightly clippy, as frozen-abi proc-macro generates a lot of code across -# various crates in this whole monorepo (frozen-abi is enabled only under nightly -# due to the use of unstable rust feature). Likewise, frozen-abi(-macro) crates' -# unit tests are only compiled under nightly. -# Similarly, nightly is desired to run clippy over all of bench files because -# the bench itself isn't stabilized yet... -# ref: https://github.com/rust-lang/rust/issues/66287 -_ scripts/cargo-for-all-lock-files.sh -- "+${rust_nightly}" clippy --workspace --all-targets --features dummy-for-ci-check -- \ - --deny=warnings \ - --deny=clippy::default_trait_access \ - --deny=clippy::arithmetic_side_effects \ - --deny=clippy::manual_let_else \ - --deny=clippy::used_underscore_binding \ - "${nightly_clippy_allows[@]}" - -# temporarily run stable clippy as well to scan the codebase for -# `redundant_clone`s, which is disabled as nightly clippy is buggy: -# https://github.com/solana-labs/solana/issues/31834 -# -# can't use --all-targets: -# error[E0554]: `#![feature]` may not be used on the stable release channel -_ scripts/cargo-for-all-lock-files.sh -- clippy --workspace --tests --bins --examples --features dummy-for-ci-check -- \ - --deny=warnings \ - --deny=clippy::default_trait_access \ - --deny=clippy::arithmetic_side_effects \ - --deny=clippy::manual_let_else \ - --deny=clippy::used_underscore_binding +_ scripts/cargo-clippy.sh if [[ -n $CI ]]; then # exclude from printing "Checking xxx ..." diff --git a/scripts/cargo-clippy.sh b/scripts/cargo-clippy.sh new file mode 100755 index 00000000000000..16419cb2cc944d --- /dev/null +++ b/scripts/cargo-clippy.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +# Runs `cargo clippy` in all individual workspaces in the repository. +# +# We have a number of clippy parameters that we want to enforce across the +# code base. They are defined here. +# +# This script is run by the CI, so if you want to replicate what the CI is +# doing, better run this script, rather than calling `cargo clippy` manually. +# +# TODO It would be nice to provide arguments to narrow clippy checks to a single +# workspace and/or package. To speed up the interactive workflow. + +set -o errexit + +here="$(dirname "$0")" +cargo="$(readlink -f "${here}/../cargo")" + +if [[ -z $cargo ]]; then + >&2 echo "Failed to find cargo. Mac readlink doesn't support -f. Consider switching + to gnu readlink with 'brew install coreutils' and then symlink greadlink as + /usr/local/bin/readlink." + exit 1 +fi + +# shellcheck source=ci/rust-version.sh +source "$here/../ci/rust-version.sh" + +nightly_clippy_allows=(--allow=clippy::redundant_clone) + +# Use nightly clippy, as frozen-abi proc-macro generates a lot of code across +# various crates in this whole monorepo (frozen-abi is enabled only under nightly +# due to the use of unstable rust feature). Likewise, frozen-abi(-macro) crates' +# unit tests are only compiled under nightly. +# Similarly, nightly is desired to run clippy over all of bench files because +# the bench itself isn't stabilized yet... +# ref: https://github.com/rust-lang/rust/issues/66287 +"$here/cargo-for-all-lock-files.sh" -- \ + "+${rust_nightly}" clippy \ + --workspace --all-targets --features dummy-for-ci-check -- \ + --deny=warnings \ + --deny=clippy::default_trait_access \ + --deny=clippy::arithmetic_side_effects \ + --deny=clippy::manual_let_else \ + --deny=clippy::used_underscore_binding \ + "${nightly_clippy_allows[@]}" + +# temporarily run stable clippy as well to scan the codebase for +# `redundant_clone`s, which is disabled as nightly clippy is buggy: +# https://github.com/solana-labs/solana/issues/31834 +# +# can't use --all-targets: +# error[E0554]: `#![feature]` may not be used on the stable release channel +"$here/cargo-for-all-lock-files.sh" -- \ + clippy \ + --workspace --tests --bins --examples --features dummy-for-ci-check -- \ + --deny=warnings \ + --deny=clippy::default_trait_access \ + --deny=clippy::arithmetic_side_effects \ + --deny=clippy::manual_let_else \ + --deny=clippy::used_underscore_binding From 73815aee512b9869bcd9851c8fe6ccd5366f3aec Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 8 Nov 2023 11:58:31 -0600 Subject: [PATCH 66/98] Move and rename ledger services from core to ledger (#33947) These services currently live in core/; however, they operate on the ledger. Mores so, these two services operate on the blockstore only, and not necessarily the entire ledger. So, it makes sense to move these services out of core and into ledger. We've recently been doing similar changes with breaking things out into individual crates in order to reduce the scope of core. So, this change moves the services from core/ to ledger/, and replaces ledger with blockstore. --- core/src/lib.rs | 2 - core/src/tvu.rs | 24 +- core/src/validator.rs | 12 +- core/tests/ledger_cleanup.rs | 613 ------------------ ledger/src/blockstore.rs | 10 +- .../src/blockstore_cleanup_service.rs | 55 +- .../src/blockstore_metric_report_service.rs | 12 +- ledger/src/lib.rs | 2 + local-cluster/tests/local_cluster.rs | 43 -- validator/src/main.rs | 2 +- 10 files changed, 67 insertions(+), 708 deletions(-) delete mode 100644 core/tests/ledger_cleanup.rs rename core/src/ledger_cleanup_service.rs => ledger/src/blockstore_cleanup_service.rs (93%) rename core/src/ledger_metric_report_service.rs => ledger/src/blockstore_metric_report_service.rs (75%) diff --git a/core/src/lib.rs b/core/src/lib.rs index 99ac98b5d422cc..44e7a8ab89aa4f 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -22,8 +22,6 @@ pub mod cost_update_service; pub mod drop_bank_service; pub mod fetch_stage; pub mod gen_keys; -pub mod ledger_cleanup_service; -pub mod ledger_metric_report_service; pub mod next_leader; pub mod optimistic_confirmation_verifier; pub mod poh_timing_report_service; diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 639670479f38df..be39b5f9d5810e 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -14,7 +14,6 @@ use { consensus::{tower_storage::TowerStorage, Tower}, cost_update_service::CostUpdateService, drop_bank_service::DropBankService, - ledger_cleanup_service::LedgerCleanupService, repair::{quic_endpoint::LocalRequest, repair_service::RepairInfo}, replay_stage::{ReplayStage, ReplayStageConfig}, rewards_recorder_service::RewardsRecorderSender, @@ -32,8 +31,9 @@ use { duplicate_shred_listener::DuplicateShredListener, }, solana_ledger::{ - blockstore::Blockstore, blockstore_processor::TransactionStatusSender, - entry_notifier_service::EntryNotifierSender, leader_schedule_cache::LeaderScheduleCache, + blockstore::Blockstore, blockstore_cleanup_service::BlockstoreCleanupService, + blockstore_processor::TransactionStatusSender, entry_notifier_service::EntryNotifierSender, + leader_schedule_cache::LeaderScheduleCache, }, solana_poh::poh_recorder::PohRecorder, solana_rpc::{ @@ -63,7 +63,7 @@ pub struct Tvu { window_service: WindowService, cluster_slots_service: ClusterSlotsService, replay_stage: ReplayStage, - ledger_cleanup_service: Option, + blockstore_cleanup_service: Option, cost_update_service: CostUpdateService, voting_service: VotingService, warm_quic_cache_service: Option, @@ -236,14 +236,14 @@ impl Tvu { exit.clone(), ); - let (ledger_cleanup_slot_sender, ledger_cleanup_slot_receiver) = unbounded(); + let (blockstore_cleanup_slot_sender, blockstore_cleanup_slot_receiver) = unbounded(); let replay_stage_config = ReplayStageConfig { vote_account: *vote_account, authorized_voter_keypairs, exit: exit.clone(), rpc_subscriptions: rpc_subscriptions.clone(), leader_schedule_cache: leader_schedule_cache.clone(), - latest_root_senders: vec![ledger_cleanup_slot_sender], + latest_root_senders: vec![blockstore_cleanup_slot_sender], accounts_background_request_sender, block_commitment_cache, transaction_status_sender, @@ -311,9 +311,9 @@ impl Tvu { popular_pruned_forks_receiver, )?; - let ledger_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| { - LedgerCleanupService::new( - ledger_cleanup_slot_receiver, + let blockstore_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| { + BlockstoreCleanupService::new( + blockstore_cleanup_slot_receiver, blockstore.clone(), max_ledger_shreds, exit.clone(), @@ -337,7 +337,7 @@ impl Tvu { window_service, cluster_slots_service, replay_stage, - ledger_cleanup_service, + blockstore_cleanup_service, cost_update_service, voting_service, warm_quic_cache_service, @@ -352,8 +352,8 @@ impl Tvu { self.cluster_slots_service.join()?; self.fetch_stage.join()?; self.shred_sigverify.join()?; - if self.ledger_cleanup_service.is_some() { - self.ledger_cleanup_service.unwrap().join()?; + if self.blockstore_cleanup_service.is_some() { + self.blockstore_cleanup_service.unwrap().join()?; } self.replay_stage.join()?; self.cost_update_service.join()?; diff --git a/core/src/validator.rs b/core/src/validator.rs index 4aa6fb992a0a42..241105e28ccdf0 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -14,7 +14,6 @@ use { tower_storage::{NullTowerStorage, TowerStorage}, ExternalRootSource, Tower, }, - ledger_metric_report_service::LedgerMetricReportService, poh_timing_report_service::PohTimingReportService, repair::{self, serve_repair::ServeRepair, serve_repair_service::ServeRepairService}, rewards_recorder_service::{RewardsRecorderSender, RewardsRecorderService}, @@ -56,6 +55,7 @@ use { blockstore::{ Blockstore, BlockstoreError, BlockstoreSignals, CompletedSlotsReceiver, PurgeType, }, + blockstore_metric_report_service::BlockstoreMetricReportService, blockstore_options::{BlockstoreOptions, BlockstoreRecoveryMode, LedgerColumnOptions}, blockstore_processor::{self, TransactionStatusSender}, entry_notifier_interface::EntryNotifierArc, @@ -465,7 +465,7 @@ pub struct Validator { pub bank_forks: Arc>, pub blockstore: Arc, geyser_plugin_service: Option, - ledger_metric_report_service: LedgerMetricReportService, + blockstore_metric_report_service: BlockstoreMetricReportService, accounts_background_service: AccountsBackgroundService, accounts_hash_verifier: AccountsHashVerifier, turbine_quic_endpoint: Endpoint, @@ -1102,8 +1102,8 @@ impl Validator { ) .map_err(|err| format!("wait_for_supermajority failed: {err:?}"))?; - let ledger_metric_report_service = - LedgerMetricReportService::new(blockstore.clone(), exit.clone()); + let blockstore_metric_report_service = + BlockstoreMetricReportService::new(blockstore.clone(), exit.clone()); let wait_for_vote_to_start_leader = !waited_for_supermajority && !config.no_wait_for_vote_to_start_leader; @@ -1378,7 +1378,7 @@ impl Validator { bank_forks, blockstore, geyser_plugin_service, - ledger_metric_report_service, + blockstore_metric_report_service, accounts_background_service, accounts_hash_verifier, turbine_quic_endpoint, @@ -1507,7 +1507,7 @@ impl Validator { self.stats_reporter_service .join() .expect("stats_reporter_service"); - self.ledger_metric_report_service + self.blockstore_metric_report_service .join() .expect("ledger_metric_report_service"); self.accounts_background_service diff --git a/core/tests/ledger_cleanup.rs b/core/tests/ledger_cleanup.rs deleted file mode 100644 index 1a096c738bf8ff..00000000000000 --- a/core/tests/ledger_cleanup.rs +++ /dev/null @@ -1,613 +0,0 @@ -#![allow(clippy::arithmetic_side_effects)] -// Long-running ledger_cleanup tests - -#[cfg(test)] -mod tests { - use { - crossbeam_channel::unbounded, - log::*, - solana_core::ledger_cleanup_service::LedgerCleanupService, - solana_ledger::{ - blockstore::{make_many_slot_shreds, Blockstore}, - blockstore_options::{ - BlockstoreOptions, BlockstoreRocksFifoOptions, LedgerColumnOptions, - ShredStorageType, - }, - get_tmp_ledger_path, - }, - solana_measure::measure::Measure, - std::{ - collections::VecDeque, - str::FromStr, - sync::{ - atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, Mutex, RwLock, - }, - thread::{self, Builder, JoinHandle}, - time::{Duration, Instant}, - }, - systemstat::{CPULoad, Platform, System}, - }; - - const DEFAULT_BENCHMARK_SLOTS: u64 = 50; - const DEFAULT_BATCH_SIZE_SLOTS: u64 = 1; - const DEFAULT_MAX_LEDGER_SHREDS: u64 = 50; - const DEFAULT_SHREDS_PER_SLOT: u64 = 25; - const DEFAULT_STOP_SIZE_BYTES: u64 = 0; - const DEFAULT_STOP_SIZE_ITERATIONS: u64 = 0; - const DEFAULT_STOP_SIZE_CF_DATA_BYTES: u64 = 0; - const DEFAULT_SHRED_DATA_CF_SIZE_BYTES: u64 = 125 * 1024 * 1024 * 1024; - - #[derive(Debug)] - struct BenchmarkConfig { - benchmark_slots: u64, - batch_size_slots: u64, - max_ledger_shreds: u64, - shreds_per_slot: u64, - stop_size_bytes: u64, - stop_size_iterations: u64, - stop_size_cf_data_bytes: u64, - pre_generate_data: bool, - cleanup_blockstore: bool, - num_writers: u64, - cleanup_service: bool, - fifo_compaction: bool, - shred_data_cf_size: u64, - } - - #[derive(Clone, Copy, Debug)] - struct CpuStatsInner { - cpu_user: f32, - cpu_system: f32, - cpu_idle: f32, - } - - impl From for CpuStatsInner { - fn from(cpu: CPULoad) -> Self { - Self { - cpu_user: cpu.user * 100.0, - cpu_system: cpu.system * 100.0, - cpu_idle: cpu.idle * 100.0, - } - } - } - - impl Default for CpuStatsInner { - fn default() -> Self { - Self { - cpu_user: 0.0, - cpu_system: 0.0, - cpu_idle: 0.0, - } - } - } - - struct CpuStats { - stats: RwLock, - sys: System, - } - - impl Default for CpuStats { - fn default() -> Self { - Self { - stats: RwLock::new(CpuStatsInner::default()), - sys: System::new(), - } - } - } - - impl CpuStats { - fn update(&self) { - if let Ok(cpu) = self.sys.cpu_load_aggregate() { - std::thread::sleep(Duration::from_millis(400)); - let cpu_new = CpuStatsInner::from(cpu.done().unwrap()); - *self.stats.write().unwrap() = cpu_new; - } - } - - fn get_stats(&self) -> CpuStatsInner { - *self.stats.read().unwrap() - } - } - - struct CpuStatsUpdater { - cpu_stats: Arc, - t_cleanup: JoinHandle<()>, - } - - impl CpuStatsUpdater { - pub fn new(exit: Arc) -> Self { - let cpu_stats = Arc::new(CpuStats::default()); - let cpu_stats_clone = cpu_stats.clone(); - - let t_cleanup = Builder::new() - .name("cpu_info".to_string()) - .spawn(move || loop { - if exit.load(Ordering::Relaxed) { - break; - } - cpu_stats_clone.update(); - }) - .unwrap(); - - Self { - cpu_stats, - t_cleanup, - } - } - - pub fn get_stats(&self) -> CpuStatsInner { - self.cpu_stats.get_stats() - } - - pub fn join(self) -> std::thread::Result<()> { - self.t_cleanup.join() - } - } - - fn read_env(key: &str, default: T) -> T - where - T: FromStr, - { - match std::env::var(key) { - Ok(val) => val.parse().unwrap_or(default), - Err(_e) => default, - } - } - - /// Obtains the benchmark config from the following environmental arguments: - /// - /// Basic benchmark settings: - /// - `BENCHMARK_SLOTS`: the number of slots in the benchmark. - /// - `BATCH_SIZE`: the number of slots in each write batch. - /// - `SHREDS_PER_SLOT`: the number of shreds in each slot. Together with - /// the `BATCH_SIZE` and `BENCHMARK_SLOTS`, it means: - /// - the number of shreds in one write batch is `BATCH_SIZE` * `SHREDS_PER_SLOT`. - /// - the total number of batches is `BENCHMARK_SLOTS` / `BATCH_SIZE`. - /// - the total number of shreds is `BENCHMARK_SLOTS` * `SHREDS_PER_SLOT`. - /// - `NUM_WRITERS`: controls the number of concurrent threads performing - /// shred insertion. Default: 1. - /// - /// Advanced benchmark settings: - /// - `STOP_SIZE_BYTES`: if specified, the benchmark will count how - /// many times the ledger store size exceeds the specified threshold. - /// - `STOP_SIZE_CF_DATA_BYTES`: if specified, the benchmark will count how - /// many times the storage size of `cf::ShredData` which stores data shred - /// exceeds the specified threshold. - /// - `STOP_SIZE_ITERATIONS`: when any of the stop size is specified, the - /// benchmark will stop immediately when the number of consecutive times - /// where the ledger store size exceeds the configured `STOP_SIZE_BYTES`. - /// These configs are used to make sure the benchmark runs successfully - /// under the storage limitation. - /// - `CLEANUP_BLOCKSTORE`: if true, the ledger store created in the current - /// benchmark run will be deleted. Default: true. - /// - /// Cleanup-service related settings: - /// - `MAX_LEDGER_SHREDS`: when the clean-up service is on, the service will - /// clean up the ledger store when the number of shreds exceeds this value. - /// - `CLEANUP_SERVICE`: whether to enable the background cleanup service. - /// If set to false, the ledger store in the benchmark will be purely relied - /// on RocksDB's compaction. Default: true. - /// - /// Fifo-compaction settings: - /// - `FIFO_COMPACTION`: if true, then RocksDB's Fifo compaction will be - /// used for storing data shreds. Default: false. - /// - `SHRED_DATA_CF_SIZE_BYTES`: the maximum size of the data-shred column family. - /// Default: 125 * 1024 * 1024 * 1024. - fn get_benchmark_config() -> BenchmarkConfig { - let benchmark_slots = read_env("BENCHMARK_SLOTS", DEFAULT_BENCHMARK_SLOTS); - let batch_size_slots = read_env("BATCH_SIZE", DEFAULT_BATCH_SIZE_SLOTS); - let max_ledger_shreds = read_env("MAX_LEDGER_SHREDS", DEFAULT_MAX_LEDGER_SHREDS); - let shreds_per_slot = read_env("SHREDS_PER_SLOT", DEFAULT_SHREDS_PER_SLOT); - let stop_size_bytes = read_env("STOP_SIZE_BYTES", DEFAULT_STOP_SIZE_BYTES); - let stop_size_iterations = read_env("STOP_SIZE_ITERATIONS", DEFAULT_STOP_SIZE_ITERATIONS); - let stop_size_cf_data_bytes = - read_env("STOP_SIZE_CF_DATA_BYTES", DEFAULT_STOP_SIZE_CF_DATA_BYTES); - let pre_generate_data = read_env("PRE_GENERATE_DATA", false); - let cleanup_blockstore = read_env("CLEANUP_BLOCKSTORE", true); - let num_writers = read_env("NUM_WRITERS", 1); - // A flag indicating whether to have a background clean-up service. - // If set to false, the ledger store will purely rely on RocksDB's - // compaction to perform the clean-up. - let cleanup_service = read_env("CLEANUP_SERVICE", true); - let fifo_compaction = read_env("FIFO_COMPACTION", false); - let shred_data_cf_size = - read_env("SHRED_DATA_CF_SIZE_BYTES", DEFAULT_SHRED_DATA_CF_SIZE_BYTES); - - BenchmarkConfig { - benchmark_slots, - batch_size_slots, - max_ledger_shreds, - shreds_per_slot, - stop_size_bytes, - stop_size_iterations, - stop_size_cf_data_bytes, - pre_generate_data, - cleanup_blockstore, - num_writers, - cleanup_service, - fifo_compaction, - shred_data_cf_size, - } - } - - fn emit_header() { - println!("TIME_MS,DELTA_MS,START_SLOT,BATCH_SIZE,SHREDS,MAX,SIZE,DELTA_SIZE,DATA_SHRED_SIZE,DATA_SHRED_SIZE_DELTA,CPU_USER,CPU_SYSTEM,CPU_IDLE"); - } - - #[allow(clippy::too_many_arguments)] - fn emit_stats( - time_initial: Instant, - time_previous: &mut Instant, - storage_previous: &mut u64, - data_shred_storage_previous: &mut u64, - start_slot: u64, - batch_size: u64, - num_shreds: u64, - max_shreds: i64, - blockstore: &Blockstore, - cpu: &CpuStatsInner, - ) { - let time_now = Instant::now(); - let storage_now = blockstore.storage_size().unwrap_or(0); - let data_shred_storage_now = blockstore.total_data_shred_storage_size().unwrap(); - let (cpu_user, cpu_system, cpu_idle) = (cpu.cpu_user, cpu.cpu_system, cpu.cpu_idle); - - info!( - "{},{},{},{},{},{},{},{},{},{},{:.2},{:.2},{:.2}", - time_now.duration_since(time_initial).as_millis(), - time_now.duration_since(*time_previous).as_millis(), - start_slot, - batch_size, - num_shreds, - max_shreds, - storage_now, - storage_now as i64 - *storage_previous as i64, - data_shred_storage_now, - data_shred_storage_now - *data_shred_storage_previous as i64, - cpu_user, - cpu_system, - cpu_idle, - ); - - *time_previous = time_now; - *storage_previous = storage_now; - *data_shred_storage_previous = data_shred_storage_now.try_into().unwrap(); - } - - /// Helper function of the benchmark `test_ledger_cleanup_compaction` which - /// returns true if the benchmark fails the size limitation check. - fn is_exceeded_stop_size_iterations( - storage_size: u64, - stop_size: u64, - exceeded_iterations: &mut u64, - iteration_limit: u64, - storage_desc: &str, - ) -> bool { - if stop_size > 0 { - if storage_size >= stop_size { - *exceeded_iterations += 1; - warn!( - "{} size {} exceeds the stop size {} for {} times!", - storage_desc, storage_size, stop_size, exceeded_iterations - ); - } else { - *exceeded_iterations = 0; - } - - if *exceeded_iterations >= iteration_limit { - error!( - "{} size exceeds the configured limit {} for {} times", - storage_desc, stop_size, exceeded_iterations, - ); - return true; - } - } - false - } - - /// The ledger cleanup test which can also be used as a benchmark - /// measuring shred insertion performance of the blockstore. - /// - /// The benchmark is controlled by several environmental arguments. - /// Check [`get_benchmark_config`] for the full list of arguments. - /// - /// Example command: - /// BENCHMARK_SLOTS=1000000 BATCH_SIZE=1 SHREDS_PER_SLOT=25 NUM_WRITERS=8 \ - /// PRE_GENERATE_DATA=false cargo test --release tests::test_ledger_cleanup \ - /// -- --exact --nocapture - #[test] - fn test_ledger_cleanup() { - solana_logger::setup_with("error,ledger_cleanup::tests=info"); - - let ledger_path = get_tmp_ledger_path!(); - let config = get_benchmark_config(); - let blockstore = Blockstore::open_with_options( - &ledger_path, - if config.fifo_compaction { - BlockstoreOptions { - column_options: LedgerColumnOptions { - shred_storage_type: ShredStorageType::RocksFifo( - BlockstoreRocksFifoOptions { - shred_data_cf_size: config.shred_data_cf_size, - shred_code_cf_size: config.shred_data_cf_size, - }, - ), - ..LedgerColumnOptions::default() - }, - ..BlockstoreOptions::default() - } - } else { - BlockstoreOptions::default() - }, - ) - .unwrap(); - let blockstore = Arc::new(blockstore); - - info!("Benchmark configuration: {:#?}", config); - info!("Ledger path: {:?}", &ledger_path); - - let benchmark_slots = config.benchmark_slots; - let batch_size_slots = config.batch_size_slots; - let max_ledger_shreds = config.max_ledger_shreds; - let shreds_per_slot = config.shreds_per_slot; - let stop_size_bytes = config.stop_size_bytes; - let stop_size_iterations = config.stop_size_iterations; - let stop_size_cf_data_bytes = config.stop_size_cf_data_bytes; - let pre_generate_data = config.pre_generate_data; - let num_writers = config.num_writers; - let cleanup_service = config.cleanup_service; - - let num_batches = benchmark_slots / batch_size_slots; - let num_shreds_total = benchmark_slots * shreds_per_slot; - - let (sender, receiver) = unbounded(); - let exit = Arc::new(AtomicBool::new(false)); - - let cleaner = if cleanup_service { - Some(LedgerCleanupService::new( - receiver, - blockstore.clone(), - max_ledger_shreds, - exit.clone(), - )) - } else { - None - }; - - let exit_cpu = Arc::new(AtomicBool::new(false)); - let sys = CpuStatsUpdater::new(exit_cpu.clone()); - - let mut shreds = VecDeque::new(); - - if pre_generate_data { - let mut pre_generate_data_timer = Measure::start("Pre-generate data"); - info!("Pre-generate data ... this may take a while"); - for i in 0..num_batches { - let start_slot = i * batch_size_slots; - let (new_shreds, _) = - make_many_slot_shreds(start_slot, batch_size_slots, shreds_per_slot); - shreds.push_back(new_shreds); - } - pre_generate_data_timer.stop(); - info!("{}", pre_generate_data_timer); - } - let shreds = Arc::new(Mutex::new(shreds)); - - info!( - "Bench info num_batches: {}, batch size (slots): {}, shreds_per_slot: {}, num_shreds_total: {}", - num_batches, - batch_size_slots, - shreds_per_slot, - num_shreds_total - ); - - let time_initial = Instant::now(); - let mut time_previous = time_initial; - let mut storage_previous = 0; - let mut data_shred_storage_previous = 0; - let mut stop_size_bytes_exceeded_iterations = 0; - let mut stop_size_cf_data_exceeded_iterations = 0; - - emit_header(); - emit_stats( - time_initial, - &mut time_previous, - &mut storage_previous, - &mut data_shred_storage_previous, - 0, - 0, - 0, - 0, - &blockstore, - &sys.get_stats(), - ); - - let mut insert_threads = vec![]; - let insert_exit = Arc::new(AtomicBool::new(false)); - - info!("Begin inserting shreds ..."); - let mut insert_timer = Measure::start("Shred insertion"); - let current_batch_id = Arc::new(AtomicU64::new(0)); - let finished_batch_count = Arc::new(AtomicU64::new(0)); - - for i in 0..num_writers { - let cloned_insert_exit = insert_exit.clone(); - let cloned_blockstore = blockstore.clone(); - let cloned_shreds = shreds.clone(); - let shared_batch_id = current_batch_id.clone(); - let shared_finished_count = finished_batch_count.clone(); - let insert_thread = Builder::new() - .name(format!("insert_shreds-{i}")) - .spawn(move || { - let start = Instant::now(); - let mut now = Instant::now(); - let mut total = 0; - let mut total_batches = 0; - let mut total_inserted_shreds = 0; - let mut num_shreds = 0; - let mut max_speed = 0f32; - let mut min_speed = f32::MAX; - let (first_shreds, _) = make_many_slot_shreds( - 0, batch_size_slots, shreds_per_slot); - loop { - let batch_id = shared_batch_id.fetch_add(1, Ordering::Relaxed); - let start_slot = batch_id * batch_size_slots; - if start_slot >= benchmark_slots { - break; - } - let len = batch_id; - - // No duplicates being generated, so all shreds - // being passed to insert() are getting inserted - let num_shred_inserted = if pre_generate_data { - let mut sl = cloned_shreds.lock().unwrap(); - if let Some(shreds_from_queue) = sl.pop_front() { - let num_shreds = shreds_from_queue.len(); - total += num_shreds; - cloned_blockstore.insert_shreds( - shreds_from_queue, None, false).unwrap(); - num_shreds - } else { - // If the queue is empty, we're done! - break; - } - } else { - let slot_id = start_slot; - if slot_id > 0 { - let (shreds_with_parent, _) = make_many_slot_shreds( - slot_id, batch_size_slots, shreds_per_slot); - let num_shreds = shreds_with_parent.len(); - total += num_shreds; - cloned_blockstore.insert_shreds( - shreds_with_parent.clone(), None, false).unwrap(); - num_shreds - } else { - let num_shreds = first_shreds.len(); - total += num_shreds; - cloned_blockstore.insert_shreds( - first_shreds.clone(), None, false).unwrap(); - num_shreds - } - }; - - total_batches += 1; - total_inserted_shreds += num_shred_inserted; - num_shreds += num_shred_inserted; - shared_finished_count.fetch_add(1, Ordering::Relaxed); - - // as_secs() returns whole number of seconds, so this runs every second - if now.elapsed().as_secs() > 0 { - let shreds_per_second = num_shreds as f32 / now.elapsed().as_secs() as f32; - warn!( - "insert-{} tried: {} inserted: {} batches: {} len: {} shreds_per_second: {}", - i, total, total_inserted_shreds, total_batches, len, shreds_per_second, - ); - let average_speed = - total_inserted_shreds as f32 / start.elapsed().as_secs() as f32; - max_speed = max_speed.max(shreds_per_second); - min_speed = min_speed.min(shreds_per_second); - warn!( - "highest: {} lowest: {} avg: {}", - max_speed, min_speed, average_speed - ); - now = Instant::now(); - num_shreds = 0; - } - - if cloned_insert_exit.load(Ordering::Relaxed) { - if max_speed > 0.0 { - info!( - "insert-{} exiting highest shreds/s: {}, lowest shreds/s: {}", - i, max_speed, min_speed - ); - } else { - // Not enough time elapsed to sample - info!( - "insert-{} exiting", - i - ); - } - break; - } - } - }) - .unwrap(); - insert_threads.push(insert_thread); - } - - loop { - let finished_batch = finished_batch_count.load(Ordering::Relaxed); - let finished_slot = (finished_batch + 1) * batch_size_slots - 1; - - if cleanup_service { - sender.send(finished_slot).unwrap(); - } - - emit_stats( - time_initial, - &mut time_previous, - &mut storage_previous, - &mut data_shred_storage_previous, - finished_slot, - batch_size_slots, - shreds_per_slot, - max_ledger_shreds as i64, - &blockstore, - &sys.get_stats(), - ); - - if is_exceeded_stop_size_iterations( - storage_previous, - stop_size_bytes, - &mut stop_size_bytes_exceeded_iterations, - stop_size_iterations, - "Storage", - ) { - break; - } - - if is_exceeded_stop_size_iterations( - data_shred_storage_previous, - stop_size_cf_data_bytes, - &mut stop_size_cf_data_exceeded_iterations, - stop_size_iterations, - "cf::ShredData", - ) { - break; - } - - if finished_batch >= num_batches { - break; - } else { - thread::sleep(Duration::from_millis(500)); - } - } - // Send exit signal to stop all the writer threads. - insert_exit.store(true, Ordering::Relaxed); - - while let Some(thread) = insert_threads.pop() { - thread.join().unwrap(); - } - insert_timer.stop(); - - info!( - "Done inserting shreds: {}, {} shreds/s", - insert_timer, - num_shreds_total as f32 / insert_timer.as_s(), - ); - - exit.store(true, Ordering::SeqCst); - if cleanup_service { - cleaner.unwrap().join().unwrap(); - } - - exit_cpu.store(true, Ordering::SeqCst); - sys.join().unwrap(); - - if config.cleanup_blockstore { - drop(blockstore); - Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); - } - } -} diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index b7a592151e65ab..3ea9525fcb194f 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -7821,7 +7821,7 @@ pub mod tests { assert_eq!(counter, 1); } - fn do_test_lowest_cleanup_slot_and_special_cfs(simulate_ledger_cleanup_service: bool) { + fn do_test_lowest_cleanup_slot_and_special_cfs(simulate_blockstore_cleanup_service: bool) { solana_logger::setup(); let ledger_path = get_tmp_ledger_path_auto_delete!(); @@ -7929,13 +7929,13 @@ pub mod tests { assert_eq!(are_missing, (false, false)); assert_existing_always(); - if simulate_ledger_cleanup_service { + if simulate_blockstore_cleanup_service { *blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot; blockstore.purge_slots(0, lowest_cleanup_slot, PurgeType::CompactionFilter); } let are_missing = check_for_missing(); - if simulate_ledger_cleanup_service { + if simulate_blockstore_cleanup_service { // ... when either simulation (or both) is effective, we should observe to be missing // consistently assert_eq!(are_missing, (true, true)); @@ -7947,12 +7947,12 @@ pub mod tests { } #[test] - fn test_lowest_cleanup_slot_and_special_cfs_with_ledger_cleanup_service_simulation() { + fn test_lowest_cleanup_slot_and_special_cfs_with_blockstore_cleanup_service_simulation() { do_test_lowest_cleanup_slot_and_special_cfs(true); } #[test] - fn test_lowest_cleanup_slot_and_special_cfs_without_ledger_cleanup_service_simulation() { + fn test_lowest_cleanup_slot_and_special_cfs_without_blockstore_cleanup_service_simulation() { do_test_lowest_cleanup_slot_and_special_cfs(false); } diff --git a/core/src/ledger_cleanup_service.rs b/ledger/src/blockstore_cleanup_service.rs similarity index 93% rename from core/src/ledger_cleanup_service.rs rename to ledger/src/blockstore_cleanup_service.rs index 80924bf7628564..dbd8e64e612186 100644 --- a/core/src/ledger_cleanup_service.rs +++ b/ledger/src/blockstore_cleanup_service.rs @@ -1,15 +1,15 @@ -//! The `ledger_cleanup_service` drops older ledger data to limit disk space usage. +//! The `blockstore_cleanup_service` drops older ledger data to limit disk space usage. //! The service works by counting the number of live data shreds in the ledger; this //! can be done quickly and should have a fairly stable correlation to actual bytes. //! Once the shred count (and thus roughly the byte count) reaches a threshold, //! the services begins removing data in FIFO order. use { - crossbeam_channel::{Receiver, RecvTimeoutError}, - solana_ledger::{ + crate::{ blockstore::{Blockstore, PurgeType}, blockstore_db::{Result as BlockstoreResult, DATA_SHRED_CF}, }, + crossbeam_channel::{Receiver, RecvTimeoutError}, solana_measure::measure::Measure, solana_sdk::clock::Slot, std::{ @@ -40,11 +40,11 @@ pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000; // and starve other blockstore users. pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512; -pub struct LedgerCleanupService { +pub struct BlockstoreCleanupService { t_cleanup: JoinHandle<()>, } -impl LedgerCleanupService { +impl BlockstoreCleanupService { pub fn new( new_root_receiver: Receiver, blockstore: Arc, @@ -54,12 +54,12 @@ impl LedgerCleanupService { let mut last_purge_slot = 0; info!( - "LedgerCleanupService active. max ledger shreds={}", + "BlockstoreCleanupService active. max ledger shreds={}", max_ledger_shreds ); let t_cleanup = Builder::new() - .name("solLedgerClean".to_string()) + .name("solBstoreClean".to_string()) .spawn(move || loop { if exit.load(Ordering::Relaxed) { break; @@ -296,8 +296,8 @@ impl LedgerCleanupService { mod tests { use { super::*, + crate::{blockstore::make_many_slot_entries, get_tmp_ledger_path_auto_delete}, crossbeam_channel::unbounded, - solana_ledger::{blockstore::make_many_slot_entries, get_tmp_ledger_path_auto_delete}, }; fn flush_blockstore_contents_to_disk(blockstore: Blockstore) -> Blockstore { @@ -314,7 +314,7 @@ mod tests { #[test] fn test_find_slots_to_clean() { - // LedgerCleanupService::find_slots_to_clean() does not modify the + // BlockstoreCleanupService::find_slots_to_clean() does not modify the // Blockstore, so we can make repeated calls on the same slots solana_logger::setup(); let ledger_path = get_tmp_ledger_path_auto_delete!(); @@ -334,22 +334,31 @@ mod tests { // Ensure no cleaning of slots > last_root let last_root = 0; let max_ledger_shreds = 0; - let (should_clean, lowest_purged, _) = - LedgerCleanupService::find_slots_to_clean(&blockstore, last_root, max_ledger_shreds); + let (should_clean, lowest_purged, _) = BlockstoreCleanupService::find_slots_to_clean( + &blockstore, + last_root, + max_ledger_shreds, + ); // Slot 0 will exist in blockstore with zero shreds since it is slot // 1's parent. Thus, slot 0 will be identified for clean. assert!(should_clean && lowest_purged == 0); // Now, set max_ledger_shreds to 1, slot 0 still eligible for clean let max_ledger_shreds = 1; - let (should_clean, lowest_purged, _) = - LedgerCleanupService::find_slots_to_clean(&blockstore, last_root, max_ledger_shreds); + let (should_clean, lowest_purged, _) = BlockstoreCleanupService::find_slots_to_clean( + &blockstore, + last_root, + max_ledger_shreds, + ); assert!(should_clean && lowest_purged == 0); // Ensure no cleaning if blockstore contains fewer than max_ledger_shreds let last_root = num_slots; let max_ledger_shreds = (shreds_per_slot * num_slots) + 1; - let (should_clean, lowest_purged, _) = - LedgerCleanupService::find_slots_to_clean(&blockstore, last_root, max_ledger_shreds); + let (should_clean, lowest_purged, _) = BlockstoreCleanupService::find_slots_to_clean( + &blockstore, + last_root, + max_ledger_shreds, + ); assert!(!should_clean && lowest_purged == 0); for slot in 1..=num_slots { @@ -357,7 +366,7 @@ mod tests { let last_root = slot; // Set max_ledger_shreds to 0 so that all eligible slots are cleaned let max_ledger_shreds = 0; - let (should_clean, lowest_purged, _) = LedgerCleanupService::find_slots_to_clean( + let (should_clean, lowest_purged, _) = BlockstoreCleanupService::find_slots_to_clean( &blockstore, last_root, max_ledger_shreds, @@ -369,7 +378,7 @@ mod tests { // Set max_ledger_shreds to the number of shreds in slots > slot. // This will make it so that slots [1, slot] are cleaned let max_ledger_shreds = shreds_per_slot * (num_slots - slot); - let (should_clean, lowest_purged, _) = LedgerCleanupService::find_slots_to_clean( + let (should_clean, lowest_purged, _) = BlockstoreCleanupService::find_slots_to_clean( &blockstore, last_root, max_ledger_shreds, @@ -393,8 +402,14 @@ mod tests { //send a signal to kill all but 5 shreds, which will be in the newest slots let mut last_purge_slot = 0; sender.send(50).unwrap(); - LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10) - .unwrap(); + BlockstoreCleanupService::cleanup_ledger( + &receiver, + &blockstore, + 5, + &mut last_purge_slot, + 10, + ) + .unwrap(); assert_eq!(last_purge_slot, 50); //check that 0-40 don't exist @@ -437,7 +452,7 @@ mod tests { let mut time = Measure::start("purge time"); sender.send(slot + num_slots).unwrap(); - LedgerCleanupService::cleanup_ledger( + BlockstoreCleanupService::cleanup_ledger( &receiver, &blockstore, initial_slots, diff --git a/core/src/ledger_metric_report_service.rs b/ledger/src/blockstore_metric_report_service.rs similarity index 75% rename from core/src/ledger_metric_report_service.rs rename to ledger/src/blockstore_metric_report_service.rs index 2e91013eb991b8..393442a3e25aca 100644 --- a/core/src/ledger_metric_report_service.rs +++ b/ledger/src/blockstore_metric_report_service.rs @@ -1,7 +1,7 @@ -//! The `ledger_metric_report_service` periodically reports ledger store metrics. +//! The `blockstore_metric_report_service` periodically reports ledger store metrics. use { - solana_ledger::blockstore::Blockstore, + crate::blockstore::Blockstore, std::{ string::ToString, sync::{ @@ -14,15 +14,15 @@ use { }; // Determines how often we report blockstore metrics under -// LedgerMetricReportService. Note that there're other blockstore -// metrics that are reported outside LedgerMetricReportService. +// BlockstoreMetricReportService. Note that there are other blockstore +// metrics that are reported outside BlockstoreMetricReportService. const BLOCKSTORE_METRICS_REPORT_PERIOD_MILLIS: u64 = 10000; -pub struct LedgerMetricReportService { +pub struct BlockstoreMetricReportService { t_cf_metric: JoinHandle<()>, } -impl LedgerMetricReportService { +impl BlockstoreMetricReportService { pub fn new(blockstore: Arc, exit: Arc) -> Self { let t_cf_metric = Builder::new() .name("solRocksCfMtrcs".to_string()) diff --git a/ledger/src/lib.rs b/ledger/src/lib.rs index 0f311ca1216ec4..10dd5182717841 100644 --- a/ledger/src/lib.rs +++ b/ledger/src/lib.rs @@ -9,8 +9,10 @@ pub mod block_error; #[macro_use] pub mod blockstore; pub mod ancestor_iterator; +pub mod blockstore_cleanup_service; pub mod blockstore_db; pub mod blockstore_meta; +pub mod blockstore_metric_report_service; pub mod blockstore_metrics; pub mod blockstore_options; pub mod blockstore_processor; diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index d675feda0a06f8..f6791307dd9453 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -4274,49 +4274,6 @@ fn test_leader_failure_4() { ); } -#[test] -#[serial] -fn test_ledger_cleanup_service() { - solana_logger::setup_with_default(RUST_LOG_FILTER); - error!("test_ledger_cleanup_service"); - let num_nodes = 3; - let validator_config = ValidatorConfig { - max_ledger_shreds: Some(100), - ..ValidatorConfig::default_for_test() - }; - let mut config = ClusterConfig { - cluster_lamports: DEFAULT_CLUSTER_LAMPORTS, - poh_config: PohConfig::new_sleep(Duration::from_millis(50)), - node_stakes: vec![DEFAULT_NODE_STAKE; num_nodes], - validator_configs: make_identical_validator_configs(&validator_config, num_nodes), - ..ClusterConfig::default() - }; - let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); - // 200ms/per * 100 = 20 seconds, so sleep a little longer than that. - sleep(Duration::from_secs(60)); - - cluster_tests::spend_and_verify_all_nodes( - &cluster.entry_point_info, - &cluster.funding_keypair, - num_nodes, - HashSet::new(), - SocketAddrSpace::Unspecified, - &cluster.connection_cache, - ); - cluster.close_preserve_ledgers(); - //check everyone's ledgers and make sure only ~100 slots are stored - for info in cluster.validators.values() { - let mut slots = 0; - let blockstore = Blockstore::open(&info.info.ledger_path).unwrap(); - blockstore - .slot_meta_iterator(0) - .unwrap() - .for_each(|_| slots += 1); - // with 3 nodes up to 3 slots can be in progress and not complete so max slots in blockstore should be up to 103 - assert!(slots <= 103, "got {slots}"); - } -} - // This test verifies that even if votes from a validator end up taking too long to land, and thus // some of the referenced slots are slots are no longer present in the slot hashes sysvar, // consensus can still be attained. diff --git a/validator/src/main.rs b/validator/src/main.rs index 4c247c9a9977a2..bb8fa537b8ecdb 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -22,7 +22,6 @@ use { solana_core::{ banking_trace::DISABLED_BAKING_TRACE_DIR, consensus::tower_storage, - ledger_cleanup_service::{DEFAULT_MAX_LEDGER_SHREDS, DEFAULT_MIN_MAX_LEDGER_SHREDS}, system_monitor_service::SystemMonitorService, tpu::DEFAULT_TPU_COALESCE, validator::{ @@ -32,6 +31,7 @@ use { }, solana_gossip::{cluster_info::Node, legacy_contact_info::LegacyContactInfo as ContactInfo}, solana_ledger::{ + blockstore_cleanup_service::{DEFAULT_MAX_LEDGER_SHREDS, DEFAULT_MIN_MAX_LEDGER_SHREDS}, blockstore_options::{ BlockstoreCompressionType, BlockstoreRecoveryMode, LedgerColumnOptions, ShredStorageType, From 03a456e7bbdd4038a9546139203eff902c9308b3 Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 8 Nov 2023 12:09:10 -0600 Subject: [PATCH 67/98] Remove redundant bounds check from getBlock and getBlockTime (#33901) JsonRpcRequestProcessor::check_blockstore_root() contained some logic that performed duplicate sanity checking on a Blockstore fetch result. The checking involved creating rocksdb iterators, which has non-trivial overhead. This PR removes the duplicate checking, and also adds comments to help reason about how JsonRpcRequestProcessor interprets the Blockstore result. --- ledger/src/blockstore.rs | 31 ----------------------------- rpc/src/rpc.rs | 43 +++++++++++++++++++++++++--------------- 2 files changed, 27 insertions(+), 47 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 3ea9525fcb194f..f96d4daeb6a56f 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -3104,21 +3104,6 @@ impl Blockstore { matches!(self.db.get::(slot), Ok(Some(true))) } - /// Returns true if a slot is between the rooted slot bounds of the ledger, but has not itself - /// been rooted. This is either because the slot was skipped, or due to a gap in ledger data, - /// as when booting from a newer snapshot. - pub fn is_skipped(&self, slot: Slot) -> bool { - let lowest_root = self - .rooted_slot_iterator(0) - .ok() - .and_then(|mut iter| iter.next()) - .unwrap_or_default(); - match self.db.get::(slot).ok().flatten() { - Some(_) => false, - None => slot < self.max_root() && slot > lowest_root, - } - } - pub fn insert_bank_hash(&self, slot: Slot, frozen_hash: Hash, is_duplicate_confirmed: bool) { if let Some(prev_value) = self.bank_hash_cf.get(slot).unwrap() { if prev_value.frozen_hash() == frozen_hash && prev_value.is_duplicate_confirmed() { @@ -6868,22 +6853,6 @@ pub mod tests { } } - #[test] - fn test_is_skipped() { - let ledger_path = get_tmp_ledger_path_auto_delete!(); - let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - let roots = [2, 4, 7, 12, 15]; - blockstore.set_roots(roots.iter()).unwrap(); - - for i in 0..20 { - if i < 2 || roots.contains(&i) || i > 15 { - assert!(!blockstore.is_skipped(i)); - } else { - assert!(blockstore.is_skipped(i)); - } - } - } - #[test] fn test_iter_bounds() { let ledger_path = get_tmp_ledger_path_auto_delete!(); diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 5e62dff9ce55d3..26316f6af05826 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -1002,26 +1002,37 @@ impl JsonRpcRequestProcessor { }) } - fn check_blockstore_root( + // Check if the given `slot` is within the blockstore bounds. This function assumes that + // `result` is from a blockstore fetch, and that the fetch: + // 1) Checked if `slot` is above the lowest cleanup slot (and errored if not) + // 2) Checked if `slot` is a root + fn check_blockstore_bounds( &self, result: &std::result::Result, slot: Slot, ) -> Result<()> { - if let Err(err) = result { - debug!( - "check_blockstore_root, slot: {:?}, max root: {:?}, err: {:?}", - slot, - self.blockstore.max_root(), - err - ); - if slot >= self.blockstore.max_root() { - return Err(RpcCustomError::BlockNotAvailable { slot }.into()); - } - if self.blockstore.is_skipped(slot) { - return Err(RpcCustomError::SlotSkipped { slot }.into()); + match result { + // The slot was found, all good + Ok(_) => Ok(()), + // The slot was cleaned up, return Ok() for now to allow fallback to bigtable + Err(BlockstoreError::SlotCleanedUp) => Ok(()), + // The slot was not cleaned up but also not found + Err(BlockstoreError::SlotNotRooted) => { + let max_root = self.blockstore.max_root(); + debug!("check_blockstore_bounds, slot: {slot}, max root: {max_root}"); + // Our node hasn't seen this slot yet, error out + if slot >= max_root { + return Err(RpcCustomError::BlockNotAvailable { slot }.into()); + } + // The slot is within the bounds of the blockstore as the lookup that yielded + // `result` checked that `slot` was greater than the blockstore's lowest + // cleanup slot and we just checked that `slot` was less than the blockstore's + // largest root. Thus, the slot must have been skipped and we can error out. + Err(RpcCustomError::SlotSkipped { slot }.into()) } + // Some other Blockstore error, ignore for now + _ => Ok(()), } - Ok(()) } fn check_slot_cleaned_up( @@ -1098,7 +1109,7 @@ impl JsonRpcRequestProcessor { { self.check_blockstore_writes_complete(slot)?; let result = self.blockstore.get_rooted_block(slot, true); - self.check_blockstore_root(&result, slot)?; + self.check_blockstore_bounds(&result, slot)?; let encode_block = |confirmed_block: ConfirmedBlock| -> Result { let mut encoded_block = confirmed_block .encode_with_options(encoding, encoding_options) @@ -1322,7 +1333,7 @@ impl JsonRpcRequestProcessor { .highest_super_majority_root() { let result = self.blockstore.get_rooted_block_time(slot); - self.check_blockstore_root(&result, slot)?; + self.check_blockstore_bounds(&result, slot)?; if result.is_err() { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_result = bigtable_ledger_storage.get_confirmed_block(slot).await; From 7cdfba925976322a85100f0fd6e331f6aef07beb Mon Sep 17 00:00:00 2001 From: galactus <96341601+godmodegalactus@users.noreply.github.com> Date: Wed, 8 Nov 2023 19:16:42 +0100 Subject: [PATCH 68/98] Display error message while laoding geyser plugins (#33990) --- geyser-plugin-manager/src/geyser_plugin_manager.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/geyser-plugin-manager/src/geyser_plugin_manager.rs b/geyser-plugin-manager/src/geyser_plugin_manager.rs index 92180d1991b56e..20729146767c0a 100644 --- a/geyser-plugin-manager/src/geyser_plugin_manager.rs +++ b/geyser-plugin-manager/src/geyser_plugin_manager.rs @@ -260,13 +260,13 @@ pub enum GeyserPluginManagerError { #[error("Invalid plugin path")] InvalidPluginPath, - #[error("Cannot load plugin shared library")] + #[error("Cannot load plugin shared library (error: {0})")] PluginLoadError(String), #[error("The geyser plugin {0} is already loaded shared library")] PluginAlreadyLoaded(String), - #[error("The GeyserPlugin on_load method failed")] + #[error("The GeyserPlugin on_load method failed (error: {0})")] PluginStartError(String), } From 69cec7e7b74f29d3aa725899784c8f5b23d87d33 Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Wed, 8 Nov 2023 10:27:50 -0800 Subject: [PATCH 69/98] Remove RwLock on BlockNotifier (#33981) --- core/src/replay_stage.rs | 9 ++++----- core/src/tvu.rs | 4 ++-- .../src/block_metadata_notifier_interface.rs | 2 +- geyser-plugin-manager/src/geyser_plugin_service.rs | 12 ++++++------ 4 files changed, 13 insertions(+), 14 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 082e0245f88319..3c2c7d39d06610 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -36,7 +36,7 @@ use { lazy_static::lazy_static, rayon::{prelude::*, ThreadPool}, solana_entry::entry::VerifyRecyclers, - solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock, + solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierArc, solana_gossip::cluster_info::ClusterInfo, solana_ledger::{ block_error::BlockError, @@ -492,7 +492,7 @@ impl ReplayStage { cost_update_sender: Sender, voting_sender: Sender, drop_bank_sender: Sender>>, - block_metadata_notifier: Option, + block_metadata_notifier: Option, log_messages_bytes_limit: Option, prioritization_fee_cache: Arc, dumped_slots_sender: DumpedSlotsSender, @@ -2760,7 +2760,7 @@ impl ReplayStage { cost_update_sender: &Sender, duplicate_slots_to_repair: &mut DuplicateSlotsToRepair, ancestor_hashes_replay_update_sender: &AncestorHashesReplayUpdateSender, - block_metadata_notifier: Option, + block_metadata_notifier: Option, replay_result_vec: &[ReplaySlotFromBlockstore], purge_repair_slot_counter: &mut PurgeRepairSlotCounter, ) -> bool { @@ -2951,7 +2951,6 @@ impl ReplayStage { } Self::record_rewards(bank, rewards_recorder_sender); if let Some(ref block_metadata_notifier) = block_metadata_notifier { - let block_metadata_notifier = block_metadata_notifier.read().unwrap(); let parent_blockhash = bank .parent() .map(|bank| bank.last_blockhash()) @@ -3016,7 +3015,7 @@ impl ReplayStage { cost_update_sender: &Sender, duplicate_slots_to_repair: &mut DuplicateSlotsToRepair, ancestor_hashes_replay_update_sender: &AncestorHashesReplayUpdateSender, - block_metadata_notifier: Option, + block_metadata_notifier: Option, replay_timing: &mut ReplayTiming, log_messages_bytes_limit: Option, replay_slots_concurrently: bool, diff --git a/core/src/tvu.rs b/core/src/tvu.rs index be39b5f9d5810e..8e479aa92b792d 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -25,7 +25,7 @@ use { bytes::Bytes, crossbeam_channel::{unbounded, Receiver, Sender}, solana_client::connection_cache::ConnectionCache, - solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock, + solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierArc, solana_gossip::{ cluster_info::ClusterInfo, duplicate_shred_handler::DuplicateShredHandler, duplicate_shred_listener::DuplicateShredListener, @@ -128,7 +128,7 @@ impl Tvu { gossip_confirmed_slots_receiver: GossipDuplicateConfirmedSlotsReceiver, tvu_config: TvuConfig, max_slots: &Arc, - block_metadata_notifier: Option, + block_metadata_notifier: Option, wait_to_vote_slot: Option, accounts_background_request_sender: AbsRequestSender, log_messages_bytes_limit: Option, diff --git a/geyser-plugin-manager/src/block_metadata_notifier_interface.rs b/geyser-plugin-manager/src/block_metadata_notifier_interface.rs index f48df55d8d0ce5..465f700efe3275 100644 --- a/geyser-plugin-manager/src/block_metadata_notifier_interface.rs +++ b/geyser-plugin-manager/src/block_metadata_notifier_interface.rs @@ -22,4 +22,4 @@ pub trait BlockMetadataNotifier { ); } -pub type BlockMetadataNotifierLock = Arc>; +pub type BlockMetadataNotifierArc = Arc; diff --git a/geyser-plugin-manager/src/geyser_plugin_service.rs b/geyser-plugin-manager/src/geyser_plugin_service.rs index 83ab9284cecb2a..ff3e050dc4b391 100644 --- a/geyser-plugin-manager/src/geyser_plugin_service.rs +++ b/geyser-plugin-manager/src/geyser_plugin_service.rs @@ -2,7 +2,7 @@ use { crate::{ accounts_update_notifier::AccountsUpdateNotifierImpl, block_metadata_notifier::BlockMetadataNotifierImpl, - block_metadata_notifier_interface::BlockMetadataNotifierLock, + block_metadata_notifier_interface::BlockMetadataNotifierArc, entry_notifier::EntryNotifierImpl, geyser_plugin_manager::{GeyserPluginManager, GeyserPluginManagerRequest}, slot_status_notifier::SlotStatusNotifierImpl, @@ -36,7 +36,7 @@ pub struct GeyserPluginService { accounts_update_notifier: Option, transaction_notifier: Option, entry_notifier: Option, - block_metadata_notifier: Option, + block_metadata_notifier: Option, } impl GeyserPluginService { @@ -109,7 +109,7 @@ impl GeyserPluginService { let (slot_status_observer, block_metadata_notifier): ( Option, - Option, + Option, ) = if account_data_notifications_enabled || transaction_notifications_enabled || entry_notifications_enabled @@ -121,9 +121,9 @@ impl GeyserPluginService { confirmed_bank_receiver, slot_status_notifier, )), - Some(Arc::new(RwLock::new(BlockMetadataNotifierImpl::new( + Some(Arc::new(BlockMetadataNotifierImpl::new( plugin_manager.clone(), - )))), + ))), ) } else { (None, None) @@ -168,7 +168,7 @@ impl GeyserPluginService { self.entry_notifier.clone() } - pub fn get_block_metadata_notifier(&self) -> Option { + pub fn get_block_metadata_notifier(&self) -> Option { self.block_metadata_notifier.clone() } From 4f1d9e00fdb4681e16bd20ad46fd97418bad8c23 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Nov 2023 18:46:09 +0000 Subject: [PATCH 70/98] build(deps): bump index_list from 0.2.7 to 0.2.11 (#33985) * build(deps): bump index_list from 0.2.7 to 0.2.11 Bumps [index_list](https://github.com/Fairglow/index-list) from 0.2.7 to 0.2.11. - [Release notes](https://github.com/Fairglow/index-list/releases) - [Commits](https://github.com/Fairglow/index-list/compare/v0.2.7...v0.2.11) --- updated-dependencies: - dependency-name: index_list dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d027457241e2c6..4d54e74cac4c91 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2646,9 +2646,9 @@ dependencies = [ [[package]] name = "index_list" -version = "0.2.7" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9d968042a4902e08810946fc7cd5851eb75e80301342305af755ca06cb82ce" +checksum = "70891286cb8e844fdfcf1178b47569699f9e20b5ecc4b45a6240a64771444638" [[package]] name = "indexmap" diff --git a/Cargo.toml b/Cargo.toml index 4cc82b9e2694aa..f00f03be90ae54 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -216,7 +216,7 @@ humantime = "2.0.1" hyper = "0.14.27" hyper-proxy = "0.9.1" im = "15.1.0" -index_list = "0.2.7" +index_list = "0.2.11" indexmap = "2.1.0" indicatif = "0.17.7" itertools = "0.10.5" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 69de98de6b55d3..995066a1bc557c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2280,9 +2280,9 @@ dependencies = [ [[package]] name = "index_list" -version = "0.2.7" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9d968042a4902e08810946fc7cd5851eb75e80301342305af755ca06cb82ce" +checksum = "70891286cb8e844fdfcf1178b47569699f9e20b5ecc4b45a6240a64771444638" [[package]] name = "indexmap" From e5d3dbe2faeede9565891b0499d2a24e8999012c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Nov 2023 18:46:28 +0000 Subject: [PATCH 71/98] build(deps): bump serde from 1.0.190 to 1.0.192 (#33986) * build(deps): bump serde from 1.0.190 to 1.0.192 Bumps [serde](https://github.com/serde-rs/serde) from 1.0.190 to 1.0.192. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.190...v1.0.192) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4d54e74cac4c91..6407374cd3a480 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4848,9 +4848,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.190" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91d3c334ca1ee894a2c6f6ad698fe8c435b76d504b13d436f0685d648d6d96f7" +checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" dependencies = [ "serde_derive", ] @@ -4866,9 +4866,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.190" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c5609f394e5c2bd7fc51efda478004ea80ef42fee983d5c67a65e34f32c0e3" +checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index f00f03be90ae54..a37a8fe811aba8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -290,7 +290,7 @@ rustversion = "1.0.14" scopeguard = "1.2.0" semver = "1.0.20" seqlock = "0.2.0" -serde = "1.0.190" +serde = "1.0.192" serde_bytes = "0.11.12" serde_derive = "1.0.103" serde_json = "1.0.107" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 995066a1bc557c..b79a5e3d254b79 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4300,9 +4300,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.190" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91d3c334ca1ee894a2c6f6ad698fe8c435b76d504b13d436f0685d648d6d96f7" +checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" dependencies = [ "serde_derive", ] @@ -4318,9 +4318,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.190" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c5609f394e5c2bd7fc51efda478004ea80ef42fee983d5c67a65e34f32c0e3" +checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" dependencies = [ "proc-macro2", "quote", From 32792d82599709df8855de14e2aba78846aba922 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Nov 2023 18:46:52 +0000 Subject: [PATCH 72/98] build(deps): bump syn from 2.0.38 to 2.0.39 (#33987) * build(deps): bump syn from 2.0.38 to 2.0.39 Bumps [syn](https://github.com/dtolnay/syn) from 2.0.38 to 2.0.39. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.38...2.0.39) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 60 ++++++++++++++++++++--------------------- programs/sbf/Cargo.lock | 58 +++++++++++++++++++-------------------- 2 files changed, 59 insertions(+), 59 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6407374cd3a480..0d60db0a71dd0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -450,7 +450,7 @@ checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -604,7 +604,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1509,7 +1509,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1520,7 +1520,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1704,7 +1704,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1810,7 +1810,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -2091,7 +2091,7 @@ checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3371,7 +3371,7 @@ checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3475,7 +3475,7 @@ dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3487,7 +3487,7 @@ dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3994,7 +3994,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4163,7 +4163,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4872,7 +4872,7 @@ checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4926,7 +4926,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4976,7 +4976,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -6104,7 +6104,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -7133,7 +7133,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -7808,7 +7808,7 @@ checksum = "fadbefec4f3c678215ca72bd71862697bb06b41fd77c0088902dd3203354387b" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -7820,7 +7820,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.38", + "syn 2.0.39", "thiserror", ] @@ -7878,7 +7878,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -8051,9 +8051,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.38" +version = "2.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" dependencies = [ "proc-macro2", "quote", @@ -8229,7 +8229,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -8241,7 +8241,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "test-case-core", ] @@ -8277,7 +8277,7 @@ checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -8416,7 +8416,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -8947,7 +8947,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "wasm-bindgen-shared", ] @@ -8981,7 +8981,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9284,7 +9284,7 @@ checksum = "772666c41fb6dceaf520b564b962d738a8e1a83b41bd48945f50837aed78bb1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -9304,7 +9304,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b79a5e3d254b79..f96e4016f6a789 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -425,7 +425,7 @@ checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -579,7 +579,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1221,7 +1221,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1232,7 +1232,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1416,7 +1416,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1525,7 +1525,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1789,7 +1789,7 @@ checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3024,7 +3024,7 @@ checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3106,7 +3106,7 @@ dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3118,7 +3118,7 @@ dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3584,7 +3584,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3725,7 +3725,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4324,7 +4324,7 @@ checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4369,7 +4369,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -5117,7 +5117,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -6245,7 +6245,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -6747,7 +6747,7 @@ checksum = "fadbefec4f3c678215ca72bd71862697bb06b41fd77c0088902dd3203354387b" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -6759,7 +6759,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.38", + "syn 2.0.39", "thiserror", ] @@ -6807,7 +6807,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -6980,9 +6980,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.38" +version = "2.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" dependencies = [ "proc-macro2", "quote", @@ -7144,7 +7144,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -7156,7 +7156,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "test-case-core", ] @@ -7192,7 +7192,7 @@ checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -7315,7 +7315,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -7798,7 +7798,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "wasm-bindgen-shared", ] @@ -7832,7 +7832,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8117,7 +8117,7 @@ checksum = "772666c41fb6dceaf520b564b962d738a8e1a83b41bd48945f50837aed78bb1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -8137,7 +8137,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] From 06884a044d743b7bea508e50cb011e704a5851cc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Nov 2023 18:47:12 +0000 Subject: [PATCH 73/98] build(deps): bump light-poseidon from 0.1.1 to 0.1.2 (#33988) * build(deps): bump light-poseidon from 0.1.1 to 0.1.2 Bumps [light-poseidon](https://github.com/Lightprotocol/light-poseidon) from 0.1.1 to 0.1.2. - [Release notes](https://github.com/Lightprotocol/light-poseidon/releases) - [Commits](https://github.com/Lightprotocol/light-poseidon/compare/v0.1.1...v0.1.2) --- updated-dependencies: - dependency-name: light-poseidon dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0d60db0a71dd0a..519388ed1be9eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3000,9 +3000,9 @@ dependencies = [ [[package]] name = "light-poseidon" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "949bdd22e4ed93481d45e9a6badb34b99132bcad0c8a8d4f05c42f7dcc7b90bc" +checksum = "a5b439809cdfc0d86ecc7317f1724df13dfa665df48991b79e90e689411451f7" dependencies = [ "ark-bn254", "ark-ff", diff --git a/Cargo.toml b/Cargo.toml index a37a8fe811aba8..778b37f477db1d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -236,7 +236,7 @@ lazy_static = "1.4.0" libc = "0.2.149" libloading = "0.7.4" libsecp256k1 = "0.6.0" -light-poseidon = "0.1.1" +light-poseidon = "0.1.2" log = "0.4.20" lru = "0.7.7" lz4 = "1.24.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index f96e4016f6a789..fe1623388c4188 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2668,9 +2668,9 @@ dependencies = [ [[package]] name = "light-poseidon" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "949bdd22e4ed93481d45e9a6badb34b99132bcad0c8a8d4f05c42f7dcc7b90bc" +checksum = "a5b439809cdfc0d86ecc7317f1724df13dfa665df48991b79e90e689411451f7" dependencies = [ "ark-bn254", "ark-ff", From 7cb83bc4919f75be16572e3f9c8be9ee77aebaa4 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 8 Nov 2023 15:08:33 -0500 Subject: [PATCH 74/98] Adds `S` to HashMap/HashSet impls of Contains (#33973) --- accounts-db/src/contains.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/accounts-db/src/contains.rs b/accounts-db/src/contains.rs index 622ddee7ab51ba..24ceff8d1ac38a 100644 --- a/accounts-db/src/contains.rs +++ b/accounts-db/src/contains.rs @@ -2,7 +2,7 @@ use std::{ borrow::Borrow, cmp::Eq, collections::{HashMap, HashSet}, - hash::Hash, + hash::{BuildHasher, Hash}, }; pub trait Contains<'a, T: Eq + Hash> { @@ -12,24 +12,24 @@ pub trait Contains<'a, T: Eq + Hash> { fn contains_iter(&'a self) -> Self::Iter; } -impl<'a, T: 'a + Eq + Hash, U: 'a> Contains<'a, T> for HashMap { +impl<'a, T: 'a + Eq + Hash, U: 'a, S: BuildHasher> Contains<'a, T> for HashMap { type Item = &'a T; type Iter = std::collections::hash_map::Keys<'a, T, U>; fn contains(&self, key: &T) -> bool { - >::contains_key(self, key) + >::contains_key(self, key) } fn contains_iter(&'a self) -> Self::Iter { self.keys() } } -impl<'a, T: 'a + Eq + Hash> Contains<'a, T> for HashSet { +impl<'a, T: 'a + Eq + Hash, S: BuildHasher> Contains<'a, T> for HashSet { type Item = &'a T; type Iter = std::collections::hash_set::Iter<'a, T>; fn contains(&self, key: &T) -> bool { - >::contains(self, key) + >::contains(self, key) } fn contains_iter(&'a self) -> Self::Iter { self.iter() From 3ac2507d36715579401270bbcec246cf494c6d44 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 8 Nov 2023 20:09:23 +0000 Subject: [PATCH 75/98] adds keep-alive-interval to repair QUIC transport config (#33866) QUIC connections may timeout due to infrequent repair requests. The commit configures keep_alive_interval and max_idle_timeout to avoid timeouts. --- core/src/repair/quic_endpoint.rs | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/core/src/repair/quic_endpoint.rs b/core/src/repair/quic_endpoint.rs index 2c5e954a626c74..c6f2e00df53a26 100644 --- a/core/src/repair/quic_endpoint.rs +++ b/core/src/repair/quic_endpoint.rs @@ -6,8 +6,8 @@ use { log::error, quinn::{ ClientConfig, ConnectError, Connecting, Connection, ConnectionError, Endpoint, - EndpointConfig, ReadError, ReadToEndError, RecvStream, SendStream, ServerConfig, - TokioRuntime, TransportConfig, VarInt, WriteError, + EndpointConfig, IdleTimeout, ReadError, ReadToEndError, RecvStream, SendStream, + ServerConfig, TokioRuntime, TransportConfig, VarInt, WriteError, }, rcgen::RcgenError, rustls::{Certificate, PrivateKey}, @@ -46,7 +46,13 @@ const CONNECT_SERVER_NAME: &str = "solana-repair"; const CLIENT_CHANNEL_BUFFER: usize = 1 << 14; const ROUTER_CHANNEL_BUFFER: usize = 64; const CONNECTION_CACHE_CAPACITY: usize = 3072; + +// Transport config. +// Repair randomly samples peers, uses bi-directional streams and generally has +// low to moderate load and so is configured separately from other protocols. +const KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(4); const MAX_CONCURRENT_BIDI_STREAMS: VarInt = VarInt::from_u32(512); +const MAX_IDLE_TIMEOUT: Duration = Duration::from_secs(10); const CONNECTION_CLOSE_ERROR_CODE_SHUTDOWN: VarInt = VarInt::from_u32(1); const CONNECTION_CLOSE_ERROR_CODE_DROPPED: VarInt = VarInt::from_u32(2); @@ -195,11 +201,15 @@ fn new_client_config(cert: Certificate, key: PrivateKey) -> Result TransportConfig { + let max_idle_timeout = IdleTimeout::try_from(MAX_IDLE_TIMEOUT).unwrap(); let mut config = TransportConfig::default(); + // Disable datagrams and uni streams. config + .datagram_receive_buffer_size(None) + .keep_alive_interval(Some(KEEP_ALIVE_INTERVAL)) .max_concurrent_bidi_streams(MAX_CONCURRENT_BIDI_STREAMS) .max_concurrent_uni_streams(VarInt::from(0u8)) - .datagram_receive_buffer_size(None); + .max_idle_timeout(Some(max_idle_timeout)); config } From 783f13621412536c4445e34b4820fd4b4d05059e Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 8 Nov 2023 20:28:19 +0000 Subject: [PATCH 76/98] expands transport configs in turbine QUIC endpoint (#33864) --- turbine/src/quic_endpoint.rs | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/turbine/src/quic_endpoint.rs b/turbine/src/quic_endpoint.rs index 326f409ae32405..e8a316420b42d8 100644 --- a/turbine/src/quic_endpoint.rs +++ b/turbine/src/quic_endpoint.rs @@ -5,7 +5,8 @@ use { log::error, quinn::{ ClientConfig, ConnectError, Connecting, Connection, ConnectionError, Endpoint, - EndpointConfig, SendDatagramError, ServerConfig, TokioRuntime, TransportConfig, VarInt, + EndpointConfig, IdleTimeout, SendDatagramError, ServerConfig, TokioRuntime, + TransportConfig, VarInt, }, rcgen::RcgenError, rustls::{Certificate, PrivateKey}, @@ -39,10 +40,17 @@ use { const CLIENT_CHANNEL_BUFFER: usize = 1 << 14; const ROUTER_CHANNEL_BUFFER: usize = 64; const CONNECTION_CACHE_CAPACITY: usize = 3072; -const INITIAL_MAXIMUM_TRANSMISSION_UNIT: u16 = 1280; const ALPN_TURBINE_PROTOCOL_ID: &[u8] = b"solana-turbine"; const CONNECT_SERVER_NAME: &str = "solana-turbine"; +// Transport config. +const DATAGRAM_RECEIVE_BUFFER_SIZE: usize = 256 * 1024 * 1024; +const DATAGRAM_SEND_BUFFER_SIZE: usize = 128 * 1024 * 1024; +const INITIAL_MAXIMUM_TRANSMISSION_UNIT: u16 = MINIMUM_MAXIMUM_TRANSMISSION_UNIT; +const KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(4); +const MAX_IDLE_TIMEOUT: Duration = Duration::from_secs(10); +const MINIMUM_MAXIMUM_TRANSMISSION_UNIT: u16 = 1280; + const CONNECTION_CLOSE_ERROR_CODE_SHUTDOWN: VarInt = VarInt::from_u32(1); const CONNECTION_CLOSE_ERROR_CODE_DROPPED: VarInt = VarInt::from_u32(2); const CONNECTION_CLOSE_ERROR_CODE_INVALID_IDENTITY: VarInt = VarInt::from_u32(3); @@ -173,11 +181,18 @@ fn new_client_config(cert: Certificate, key: PrivateKey) -> Result TransportConfig { + let max_idle_timeout = IdleTimeout::try_from(MAX_IDLE_TIMEOUT).unwrap(); let mut config = TransportConfig::default(); config + .datagram_receive_buffer_size(Some(DATAGRAM_RECEIVE_BUFFER_SIZE)) + .datagram_send_buffer_size(DATAGRAM_SEND_BUFFER_SIZE) + .initial_mtu(INITIAL_MAXIMUM_TRANSMISSION_UNIT) + .keep_alive_interval(Some(KEEP_ALIVE_INTERVAL)) .max_concurrent_bidi_streams(VarInt::from(0u8)) .max_concurrent_uni_streams(VarInt::from(0u8)) - .initial_mtu(INITIAL_MAXIMUM_TRANSMISSION_UNIT); + .max_idle_timeout(Some(max_idle_timeout)) + .min_mtu(MINIMUM_MAXIMUM_TRANSMISSION_UNIT) + .mtu_discovery_config(None); config } From bba6ea2d6989f8cc59ecc3a7f5ef02cb915792d9 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 8 Nov 2023 16:15:51 -0500 Subject: [PATCH 77/98] Returns IntMap from select_candidates_by_total_usage() (#33976) --- accounts-db/src/accounts_db.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 65c6a9a52cb23e..ed0f3e551c66cf 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -81,7 +81,7 @@ use { serde::{Deserialize, Serialize}, smallvec::SmallVec, solana_measure::{measure::Measure, measure_us}, - solana_nohash_hasher::IntSet, + solana_nohash_hasher::{IntMap, IntSet}, solana_rayon_threadlimit::get_thread_count, solana_sdk::{ account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, @@ -4328,7 +4328,7 @@ impl AccountsDb { shrink_slots: &ShrinkCandidates, shrink_ratio: f64, oldest_non_ancient_slot: Option, - ) -> (HashMap>, ShrinkCandidates) { + ) -> (IntMap>, ShrinkCandidates) { struct StoreUsageInfo { slot: Slot, alive_ratio: f64, @@ -4371,7 +4371,7 @@ impl AccountsDb { // Working from the beginning of store_usage which are the most sparse and see when we can stop // shrinking while still achieving the overall goals. - let mut shrink_slots = HashMap::new(); + let mut shrink_slots = IntMap::default(); let mut shrink_slots_next_batch = ShrinkCandidates::default(); for usage in &store_usage { let store = &usage.store; From 230779d4590ac625e6710117987ef505c0cf71c6 Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 8 Nov 2023 18:16:51 -0600 Subject: [PATCH 78/98] =?UTF-8?q?Revert=20"=20Remove=20redundant=20bounds?= =?UTF-8?q?=20check=20from=20getBlock=20and=20getBlockTime=E2=80=A6=20(#33?= =?UTF-8?q?996)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Revert " Remove redundant bounds check from getBlock and getBlockTime (#33901)" This reverts commit 03a456e7bbdd4038a9546139203eff902c9308b3. --- ledger/src/blockstore.rs | 31 +++++++++++++++++++++++++++++ rpc/src/rpc.rs | 43 +++++++++++++++------------------------- 2 files changed, 47 insertions(+), 27 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index f96d4daeb6a56f..3ea9525fcb194f 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -3104,6 +3104,21 @@ impl Blockstore { matches!(self.db.get::(slot), Ok(Some(true))) } + /// Returns true if a slot is between the rooted slot bounds of the ledger, but has not itself + /// been rooted. This is either because the slot was skipped, or due to a gap in ledger data, + /// as when booting from a newer snapshot. + pub fn is_skipped(&self, slot: Slot) -> bool { + let lowest_root = self + .rooted_slot_iterator(0) + .ok() + .and_then(|mut iter| iter.next()) + .unwrap_or_default(); + match self.db.get::(slot).ok().flatten() { + Some(_) => false, + None => slot < self.max_root() && slot > lowest_root, + } + } + pub fn insert_bank_hash(&self, slot: Slot, frozen_hash: Hash, is_duplicate_confirmed: bool) { if let Some(prev_value) = self.bank_hash_cf.get(slot).unwrap() { if prev_value.frozen_hash() == frozen_hash && prev_value.is_duplicate_confirmed() { @@ -6853,6 +6868,22 @@ pub mod tests { } } + #[test] + fn test_is_skipped() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + let roots = [2, 4, 7, 12, 15]; + blockstore.set_roots(roots.iter()).unwrap(); + + for i in 0..20 { + if i < 2 || roots.contains(&i) || i > 15 { + assert!(!blockstore.is_skipped(i)); + } else { + assert!(blockstore.is_skipped(i)); + } + } + } + #[test] fn test_iter_bounds() { let ledger_path = get_tmp_ledger_path_auto_delete!(); diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 26316f6af05826..5e62dff9ce55d3 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -1002,37 +1002,26 @@ impl JsonRpcRequestProcessor { }) } - // Check if the given `slot` is within the blockstore bounds. This function assumes that - // `result` is from a blockstore fetch, and that the fetch: - // 1) Checked if `slot` is above the lowest cleanup slot (and errored if not) - // 2) Checked if `slot` is a root - fn check_blockstore_bounds( + fn check_blockstore_root( &self, result: &std::result::Result, slot: Slot, ) -> Result<()> { - match result { - // The slot was found, all good - Ok(_) => Ok(()), - // The slot was cleaned up, return Ok() for now to allow fallback to bigtable - Err(BlockstoreError::SlotCleanedUp) => Ok(()), - // The slot was not cleaned up but also not found - Err(BlockstoreError::SlotNotRooted) => { - let max_root = self.blockstore.max_root(); - debug!("check_blockstore_bounds, slot: {slot}, max root: {max_root}"); - // Our node hasn't seen this slot yet, error out - if slot >= max_root { - return Err(RpcCustomError::BlockNotAvailable { slot }.into()); - } - // The slot is within the bounds of the blockstore as the lookup that yielded - // `result` checked that `slot` was greater than the blockstore's lowest - // cleanup slot and we just checked that `slot` was less than the blockstore's - // largest root. Thus, the slot must have been skipped and we can error out. - Err(RpcCustomError::SlotSkipped { slot }.into()) + if let Err(err) = result { + debug!( + "check_blockstore_root, slot: {:?}, max root: {:?}, err: {:?}", + slot, + self.blockstore.max_root(), + err + ); + if slot >= self.blockstore.max_root() { + return Err(RpcCustomError::BlockNotAvailable { slot }.into()); + } + if self.blockstore.is_skipped(slot) { + return Err(RpcCustomError::SlotSkipped { slot }.into()); } - // Some other Blockstore error, ignore for now - _ => Ok(()), } + Ok(()) } fn check_slot_cleaned_up( @@ -1109,7 +1098,7 @@ impl JsonRpcRequestProcessor { { self.check_blockstore_writes_complete(slot)?; let result = self.blockstore.get_rooted_block(slot, true); - self.check_blockstore_bounds(&result, slot)?; + self.check_blockstore_root(&result, slot)?; let encode_block = |confirmed_block: ConfirmedBlock| -> Result { let mut encoded_block = confirmed_block .encode_with_options(encoding, encoding_options) @@ -1333,7 +1322,7 @@ impl JsonRpcRequestProcessor { .highest_super_majority_root() { let result = self.blockstore.get_rooted_block_time(slot); - self.check_blockstore_bounds(&result, slot)?; + self.check_blockstore_root(&result, slot)?; if result.is_err() { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_result = bigtable_ledger_storage.get_confirmed_block(slot).await; From 874fae507f87606a4a6c5bbdf3ce68fa522a6b52 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Wed, 8 Nov 2023 21:58:44 -0800 Subject: [PATCH 79/98] [TieredStorage] Make HotStorageReader use AccountOffset type (#33964) #### Problem #33927 introduced a new type AccountOffset, but HotStorageReader still uses `usize` to access accounts. #### Summary of Changes This PR makes HotStorageReader use the new AccountOffset type. --- accounts-db/src/tiered_storage/hot.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 9e987f886de101..f2efc1a966ca11 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -9,7 +9,7 @@ use { footer::{ AccountBlockFormat, AccountMetaFormat, OwnersBlockFormat, TieredStorageFooter, }, - index::IndexBlockFormat, + index::{AccountOffset, IndexBlockFormat}, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, mmap_utils::get_type, TieredStorageFormat, TieredStorageResult, @@ -223,8 +223,11 @@ impl HotStorageReader { } /// Returns the account meta located at the specified offset. - fn get_account_meta_from_offset(&self, offset: usize) -> TieredStorageResult<&HotAccountMeta> { - let (meta, _) = get_type::(&self.mmap, offset)?; + fn get_account_meta_from_offset( + &self, + account_offset: AccountOffset, + ) -> TieredStorageResult<&HotAccountMeta> { + let (meta, _) = get_type::(&self.mmap, account_offset.block)?; Ok(meta) } } @@ -241,7 +244,7 @@ pub mod tests { FOOTER_SIZE, }, hot::{HotAccountMeta, HotStorageReader}, - index::IndexBlockFormat, + index::{AccountOffset, IndexBlockFormat}, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, }, memoffset::offset_of, @@ -444,7 +447,7 @@ pub mod tests { .map(|meta| { let prev_offset = current_offset; current_offset += file.write_type(meta).unwrap(); - prev_offset + AccountOffset { block: prev_offset } }) .collect(); // while the test only focuses on account metas, writing a footer From a9509f56b7897b08bcdd16d3a056257a7d396681 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Thu, 9 Nov 2023 13:10:59 +0100 Subject: [PATCH 80/98] Feature - Epoch boundary recompilation phase (#33477) * Adds LoadedPrograms::upcoming_environments. * Moves LoadedPrograms::prune_feature_set_transition() into LoadedPrograms::prune(). * Adds parameter recompile to Bank::load_program(). * Sets latest_root_slot/epoch and environments in Bank::finish_init(). * Removes FEATURES_AFFECTING_RBPF list. * Adjusts test_feature_activation_loaded_programs_recompilation_phase(). --- ledger-tool/src/program.rs | 2 +- program-runtime/src/loaded_programs.rs | 54 +++++---- runtime/src/bank.rs | 160 +++++++++++++++++-------- runtime/src/bank/metrics.rs | 2 + runtime/src/bank/tests.rs | 24 +--- 5 files changed, 151 insertions(+), 91 deletions(-) diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index 7420a1f7a10b4b..c1a65170a239fa 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -552,7 +552,7 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { .clone(), ); for key in cached_account_keys { - loaded_programs.replenish(key, bank.load_program(&key, false)); + loaded_programs.replenish(key, bank.load_program(&key, false, None)); debug!("Loaded program {}", key); } invoke_context.programs_loaded_for_tx_batch = &loaded_programs; diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 84902f31b14f34..ac16578acf6183 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -459,6 +459,14 @@ pub struct LoadedPrograms { pub latest_root_epoch: Epoch, /// Environments of the current epoch pub environments: ProgramRuntimeEnvironments, + /// Anticipated replacement for `environments` at the next epoch + /// + /// This is `None` during most of an epoch, and only `Some` around the boundaries (at the end and beginning of an epoch). + /// More precisely, it starts with the recompilation phase a few hundred slots before the epoch boundary, + /// and it ends with the first rerooting after the epoch boundary. + pub upcoming_environments: Option, + /// List of loaded programs which should be recompiled before the next epoch (but don't have to). + pub programs_to_recompile: Vec<(Pubkey, Arc)>, pub stats: Stats, pub fork_graph: Option>>, } @@ -481,6 +489,8 @@ impl Default for LoadedPrograms { latest_root_slot: 0, latest_root_epoch: 0, environments: ProgramRuntimeEnvironments::default(), + upcoming_environments: None, + programs_to_recompile: Vec::default(), stats: Stats::default(), fork_graph: None, } @@ -567,7 +577,12 @@ impl LoadedPrograms { } /// Returns the current environments depending on the given epoch - pub fn get_environments_for_epoch(&self, _epoch: Epoch) -> &ProgramRuntimeEnvironments { + pub fn get_environments_for_epoch(&self, epoch: Epoch) -> &ProgramRuntimeEnvironments { + if epoch != self.latest_root_epoch { + if let Some(upcoming_environments) = self.upcoming_environments.as_ref() { + return upcoming_environments; + } + } &self.environments } @@ -630,22 +645,6 @@ impl LoadedPrograms { entry } - /// On the epoch boundary this removes all programs of the outdated feature set - pub fn prune_feature_set_transition(&mut self) { - for second_level in self.entries.values_mut() { - second_level.retain(|entry| { - if Self::matches_environment(entry, &self.environments) { - return true; - } - self.stats - .prunes_environment - .fetch_add(1, Ordering::Relaxed); - false - }); - } - self.remove_programs_with_no_entries(); - } - pub fn prune_by_deployment_slot(&mut self, slot: Slot) { self.entries.retain(|_key, second_level| { *second_level = second_level @@ -668,6 +667,15 @@ impl LoadedPrograms { error!("Failed to lock fork graph for reading."); return; }; + let mut recompilation_phase_ends = false; + if self.latest_root_epoch != new_root_epoch { + self.latest_root_epoch = new_root_epoch; + if let Some(upcoming_environments) = self.upcoming_environments.take() { + recompilation_phase_ends = true; + self.environments = upcoming_environments; + self.programs_to_recompile.clear(); + } + } for second_level in self.entries.values_mut() { // Remove entries un/re/deployed on orphan forks let mut first_ancestor_found = false; @@ -697,6 +705,15 @@ impl LoadedPrograms { return false; } } + // Remove outdated environment of previous feature set + if recompilation_phase_ends + && !Self::matches_environment(entry, &self.environments) + { + self.stats + .prunes_environment + .fetch_add(1, Ordering::Relaxed); + return false; + } true }) .cloned() @@ -706,9 +723,6 @@ impl LoadedPrograms { self.remove_programs_with_no_entries(); debug_assert!(self.latest_root_slot <= new_root_slot); self.latest_root_slot = new_root_slot; - if self.latest_root_epoch < new_root_epoch { - self.latest_root_epoch = new_root_epoch; - } } fn matches_environment( diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 9f3636e653c41f..2b8cbe34926100 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -103,6 +103,7 @@ use { }, solana_bpf_loader_program::syscalls::create_program_runtime_environment_v1, solana_cost_model::cost_tracker::CostTracker, + solana_loader_v4_program::create_program_runtime_environment_v2, solana_measure::{measure, measure::Measure, measure_us}, solana_perf::perf_libs, solana_program_runtime::{ @@ -1442,11 +1443,10 @@ impl Bank { }); // Following code may touch AccountsDb, requiring proper ancestors - let parent_epoch = parent.epoch(); let (_, update_epoch_time_us) = measure_us!({ - if parent_epoch < new.epoch() { + if parent.epoch() < new.epoch() { new.process_new_epoch( - parent_epoch, + parent.epoch(), parent.slot(), parent.block_height(), reward_calc_tracer, @@ -1461,11 +1461,71 @@ impl Bank { } }); + let (_, recompilation_time_us) = measure_us!({ + // Recompile loaded programs one at a time before the next epoch hits + let (_epoch, slot_index) = new.get_epoch_and_slot_index(new.slot()); + let slots_in_epoch = new.get_slots_in_epoch(new.epoch()); + let slots_in_recompilation_phase = + (solana_program_runtime::loaded_programs::MAX_LOADED_ENTRY_COUNT as u64) + .min(slots_in_epoch) + .checked_div(2) + .unwrap(); + let mut loaded_programs_cache = new.loaded_programs_cache.write().unwrap(); + if loaded_programs_cache.upcoming_environments.is_some() { + if let Some((key, program_to_recompile)) = + loaded_programs_cache.programs_to_recompile.pop() + { + drop(loaded_programs_cache); + let recompiled = new.load_program(&key, false, Some(program_to_recompile)); + let mut loaded_programs_cache = new.loaded_programs_cache.write().unwrap(); + loaded_programs_cache.replenish(key, recompiled); + } + } else if new.epoch() != loaded_programs_cache.latest_root_epoch + || slot_index.saturating_add(slots_in_recompilation_phase) >= slots_in_epoch + { + // Anticipate the upcoming program runtime environment for the next epoch, + // so we can try to recompile loaded programs before the feature transition hits. + drop(loaded_programs_cache); + let (feature_set, _new_feature_activations) = new.compute_active_feature_set(true); + let mut loaded_programs_cache = new.loaded_programs_cache.write().unwrap(); + let program_runtime_environment_v1 = create_program_runtime_environment_v1( + &feature_set, + &new.runtime_config.compute_budget.unwrap_or_default(), + false, /* deployment */ + false, /* debugging_features */ + ) + .unwrap(); + let program_runtime_environment_v2 = create_program_runtime_environment_v2( + &new.runtime_config.compute_budget.unwrap_or_default(), + false, /* debugging_features */ + ); + let mut upcoming_environments = loaded_programs_cache.environments.clone(); + let changed_program_runtime_v1 = + *upcoming_environments.program_runtime_v1 != program_runtime_environment_v1; + let changed_program_runtime_v2 = + *upcoming_environments.program_runtime_v2 != program_runtime_environment_v2; + if changed_program_runtime_v1 { + upcoming_environments.program_runtime_v1 = + Arc::new(program_runtime_environment_v1); + } + if changed_program_runtime_v2 { + upcoming_environments.program_runtime_v2 = + Arc::new(program_runtime_environment_v2); + } + loaded_programs_cache.upcoming_environments = Some(upcoming_environments); + loaded_programs_cache.programs_to_recompile = loaded_programs_cache + .get_entries_sorted_by_tx_usage( + changed_program_runtime_v1, + changed_program_runtime_v2, + ); + } + }); + // Update sysvars before processing transactions let (_, update_sysvars_time_us) = measure_us!({ new.update_slot_hashes(); - new.update_stake_history(Some(parent_epoch)); - new.update_clock(Some(parent_epoch)); + new.update_stake_history(Some(parent.epoch())); + new.update_clock(Some(parent.epoch())); new.update_fees(); new.update_last_restart_slot() }); @@ -1493,6 +1553,7 @@ impl Bank { feature_set_time_us, ancestors_time_us, update_epoch_time_us, + recompilation_time_us, update_sysvars_time_us, fill_sysvar_cache_time_us, }, @@ -4642,16 +4703,25 @@ impl Bank { ProgramAccountLoadResult::InvalidAccountData } - pub fn load_program(&self, pubkey: &Pubkey, reload: bool) -> Arc { + pub fn load_program( + &self, + pubkey: &Pubkey, + reload: bool, + recompile: Option>, + ) -> Arc { let loaded_programs_cache = self.loaded_programs_cache.read().unwrap(); - let environments = loaded_programs_cache.get_environments_for_epoch(self.epoch); - + let effective_epoch = if recompile.is_some() { + loaded_programs_cache.latest_root_epoch.saturating_add(1) + } else { + self.epoch + }; + let environments = loaded_programs_cache.get_environments_for_epoch(effective_epoch); let mut load_program_metrics = LoadProgramMetrics { program_id: pubkey.to_string(), ..LoadProgramMetrics::default() }; - let loaded_program = match self.load_program_accounts(pubkey) { + let mut loaded_program = match self.load_program_accounts(pubkey) { ProgramAccountLoadResult::AccountNotFound => Ok(LoadedProgram::new_tombstone( self.slot, LoadedProgramType::Closed, @@ -4758,6 +4828,16 @@ impl Bank { let mut timings = ExecuteDetailsTimings::default(); load_program_metrics.submit_datapoint(&mut timings); + if let Some(recompile) = recompile { + loaded_program.effective_slot = loaded_program.effective_slot.max( + self.epoch_schedule() + .get_first_slot_in_epoch(effective_epoch), + ); + loaded_program.tx_usage_counter = + AtomicU64::new(recompile.tx_usage_counter.load(Ordering::Relaxed)); + loaded_program.ix_usage_counter = + AtomicU64::new(recompile.ix_usage_counter.load(Ordering::Relaxed)); + } Arc::new(loaded_program) } @@ -5004,7 +5084,7 @@ impl Bank { let missing_programs: Vec<(Pubkey, Arc)> = missing .iter() .map(|(key, count)| { - let program = self.load_program(key, false); + let program = self.load_program(key, false, None); program.tx_usage_counter.store(*count, Ordering::Relaxed); (*key, program) }) @@ -5014,7 +5094,7 @@ impl Bank { let unloaded_programs: Vec<(Pubkey, Arc)> = unloaded .iter() .map(|(key, count)| { - let program = self.load_program(key, true); + let program = self.load_program(key, true, None); program.tx_usage_counter.store(*count, Ordering::Relaxed); (*key, program) }) @@ -6559,6 +6639,24 @@ impl Bank { } } + let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); + loaded_programs_cache.latest_root_slot = self.slot(); + loaded_programs_cache.latest_root_epoch = self.epoch(); + loaded_programs_cache.environments.program_runtime_v1 = Arc::new( + create_program_runtime_environment_v1( + &self.feature_set, + &self.runtime_config.compute_budget.unwrap_or_default(), + false, /* deployment */ + false, /* debugging_features */ + ) + .unwrap(), + ); + loaded_programs_cache.environments.program_runtime_v2 = + Arc::new(create_program_runtime_environment_v2( + &self.runtime_config.compute_budget.unwrap_or_default(), + false, /* debugging_features */ + )); + if self .feature_set .is_active(&feature_set::cap_accounts_data_len::id()) @@ -7924,46 +8022,6 @@ impl Bank { only_apply_transitions_for_new_features: bool, new_feature_activations: &HashSet, ) { - const FEATURES_AFFECTING_RBPF: &[Pubkey] = &[ - feature_set::error_on_syscall_bpf_function_hash_collisions::id(), - feature_set::reject_callx_r10::id(), - feature_set::switch_to_new_elf_parser::id(), - feature_set::bpf_account_data_direct_mapping::id(), - feature_set::enable_alt_bn128_syscall::id(), - feature_set::enable_alt_bn128_compression_syscall::id(), - feature_set::enable_big_mod_exp_syscall::id(), - feature_set::blake3_syscall_enabled::id(), - feature_set::curve25519_syscall_enabled::id(), - feature_set::disable_fees_sysvar::id(), - feature_set::enable_partitioned_epoch_reward::id(), - feature_set::disable_deploy_of_alloc_free_syscall::id(), - feature_set::last_restart_slot_sysvar::id(), - feature_set::remaining_compute_units_syscall_enabled::id(), - ]; - if !only_apply_transitions_for_new_features - || FEATURES_AFFECTING_RBPF - .iter() - .any(|key| new_feature_activations.contains(key)) - { - let program_runtime_environment_v1 = create_program_runtime_environment_v1( - &self.feature_set, - &self.runtime_config.compute_budget.unwrap_or_default(), - false, /* deployment */ - false, /* debugging_features */ - ) - .unwrap(); - let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); - loaded_programs_cache.environments.program_runtime_v1 = - Arc::new(program_runtime_environment_v1); - let program_runtime_environment_v2 = - solana_loader_v4_program::create_program_runtime_environment_v2( - &self.runtime_config.compute_budget.unwrap_or_default(), - false, /* debugging_features */ - ); - loaded_programs_cache.environments.program_runtime_v2 = - Arc::new(program_runtime_environment_v2); - loaded_programs_cache.prune_feature_set_transition(); - } for builtin in BUILTINS.iter() { if let Some(feature_id) = builtin.feature_id { let should_apply_action_for_feature_transition = diff --git a/runtime/src/bank/metrics.rs b/runtime/src/bank/metrics.rs index 1fa33b2e7f92ee..ccf8c4837761db 100644 --- a/runtime/src/bank/metrics.rs +++ b/runtime/src/bank/metrics.rs @@ -39,6 +39,7 @@ pub(crate) struct NewBankTimings { pub(crate) feature_set_time_us: u64, pub(crate) ancestors_time_us: u64, pub(crate) update_epoch_time_us: u64, + pub(crate) recompilation_time_us: u64, pub(crate) update_sysvars_time_us: u64, pub(crate) fill_sysvar_cache_time_us: u64, } @@ -144,6 +145,7 @@ pub(crate) fn report_new_bank_metrics( ("feature_set_us", timings.feature_set_time_us, i64), ("ancestors_us", timings.ancestors_time_us, i64), ("update_epoch_us", timings.update_epoch_time_us, i64), + ("recompilation_time_us", timings.recompilation_time_us, i64), ("update_sysvars_us", timings.update_sysvars_time_us, i64), ( "fill_sysvar_cache_us", diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 1f4ed9d8bcc3e7..cddac40fe3761f 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -7,9 +7,7 @@ use { *, }, crate::{ - accounts_background_service::{ - AbsRequestSender, PrunedBanksRequestHandler, SendDroppedBankCallback, - }, + accounts_background_service::{PrunedBanksRequestHandler, SendDroppedBankCallback}, bank_client::BankClient, bank_forks::BankForks, epoch_rewards_hasher::hash_rewards_into_partitions, @@ -6990,7 +6988,7 @@ fn test_bank_load_program() { programdata_account.set_rent_epoch(1); bank.store_account_and_update_capitalization(&key1, &program_account); bank.store_account_and_update_capitalization(&programdata_key, &programdata_account); - let program = bank.load_program(&key1, false); + let program = bank.load_program(&key1, false, None); assert_matches!(program.program, LoadedProgramType::LegacyV1(_)); assert_eq!( program.account_size, @@ -7145,7 +7143,7 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len() { assert_eq!(*elf.get(i).unwrap(), *byte); } - let loaded_program = bank.load_program(&program_keypair.pubkey(), false); + let loaded_program = bank.load_program(&program_keypair.pubkey(), false, None); // Invoke deployed program mock_process_instruction( @@ -11903,7 +11901,7 @@ fn test_is_in_slot_hashes_history() { } #[test] -fn test_runtime_feature_enable_with_program_cache() { +fn test_feature_activation_loaded_programs_recompilation_phase() { solana_logger::setup(); // Bank Setup @@ -11969,20 +11967,8 @@ fn test_runtime_feature_enable_with_program_cache() { &feature::create_account(&Feature { activated_at: None }, feature_account_balance), ); - // Reroot to call LoadedPrograms::prune() and end the current recompilation phase goto_end_of_slot(bank.clone()); - bank_forks - .write() - .unwrap() - .insert(Arc::into_inner(bank).unwrap()); - let bank = bank_forks.read().unwrap().working_bank(); - bank_forks.read().unwrap().prune_program_cache(bank.slot); - bank_forks - .write() - .unwrap() - .set_root(bank.slot, &AbsRequestSender::default(), None); - - // Advance to next epoch, which starts the next recompilation phase + // Advance to next epoch, which starts the recompilation phase let bank = new_from_parent_next_epoch(bank, 1); // Execute after feature is enabled to check it was filtered out and reverified. From 28e08ac1410dde841fa149cfffc48fc7c77c8ea4 Mon Sep 17 00:00:00 2001 From: Tyera Date: Thu, 9 Nov 2023 10:03:56 -0700 Subject: [PATCH 81/98] Add Blockstore::get_rooted_block_with_entries method (#33995) * Add helper structs to hold block and entry summaries * Add Blockstore::get_rooted_block_with_entries and dedupe innards * Review comments --- ledger/src/blockstore.rs | 50 ++++++++++++++++++++++++++++++++--- transaction-status/src/lib.rs | 18 +++++++++++++ 2 files changed, 65 insertions(+), 3 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 3ea9525fcb194f..bf9a4096b77e6d 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -59,7 +59,7 @@ use { solana_transaction_status::{ ConfirmedTransactionStatusWithSignature, ConfirmedTransactionWithStatusMeta, Rewards, TransactionStatusMeta, TransactionWithStatusMeta, VersionedConfirmedBlock, - VersionedTransactionWithStatusMeta, + VersionedConfirmedBlockWithEntries, VersionedTransactionWithStatusMeta, }, std::{ borrow::Cow, @@ -2031,6 +2031,33 @@ impl Blockstore { slot: Slot, require_previous_blockhash: bool, ) -> Result { + self.get_complete_block_with_entries(slot, require_previous_blockhash, false) + .map(|result| result.block) + } + + pub fn get_rooted_block_with_entries( + &self, + slot: Slot, + require_previous_blockhash: bool, + ) -> Result { + datapoint_info!( + "blockstore-rpc-api", + ("method", "get_rooted_block_with_entries", String) + ); + let _lock = self.check_lowest_cleanup_slot(slot)?; + + if self.is_root(slot) { + return self.get_complete_block_with_entries(slot, require_previous_blockhash, true); + } + Err(BlockstoreError::SlotNotRooted) + } + + fn get_complete_block_with_entries( + &self, + slot: Slot, + require_previous_blockhash: bool, + populate_entries: bool, + ) -> Result { let Some(slot_meta) = self.meta_cf.get(slot)? else { info!("SlotMeta not found for slot {}", slot); return Err(BlockstoreError::SlotUnavailable); @@ -2042,9 +2069,26 @@ impl Blockstore { .last() .map(|entry| entry.hash) .unwrap_or_else(|| panic!("Rooted slot {slot:?} must have blockhash")); + let mut starting_transaction_index = 0; + let mut entries = if populate_entries { + Vec::with_capacity(slot_entries.len()) + } else { + Vec::new() + }; let slot_transaction_iterator = slot_entries .into_iter() - .flat_map(|entry| entry.transactions) + .flat_map(|entry| { + if populate_entries { + entries.push(solana_transaction_status::EntrySummary { + num_hashes: entry.num_hashes, + hash: entry.hash, + num_transactions: entry.transactions.len() as u64, + starting_transaction_index, + }); + starting_transaction_index += entry.transactions.len(); + } + entry.transactions + }) .map(|transaction| { if let Err(err) = transaction.sanitize() { warn!( @@ -2096,7 +2140,7 @@ impl Blockstore { block_time, block_height, }; - return Ok(block); + return Ok(VersionedConfirmedBlockWithEntries { block, entries }); } } Err(BlockstoreError::SlotUnavailable) diff --git a/transaction-status/src/lib.rs b/transaction-status/src/lib.rs index 84654a564c4c33..fac20d9859cdbd 100644 --- a/transaction-status/src/lib.rs +++ b/transaction-status/src/lib.rs @@ -12,6 +12,7 @@ use { solana_sdk::{ clock::{Slot, UnixTimestamp}, commitment_config::CommitmentConfig, + hash::Hash, instruction::CompiledInstruction, message::{ v0::{self, LoadedAddresses, LoadedMessage, MessageAddressTableLookup}, @@ -793,6 +794,23 @@ pub struct UiConfirmedBlock { pub block_height: Option, } +// Confirmed block with type guarantees that transaction metadata is always +// present, as well as a list of the entry data needed to cryptographically +// verify the block. Used for uploading to BigTable. +pub struct VersionedConfirmedBlockWithEntries { + pub block: VersionedConfirmedBlock, + pub entries: Vec, +} + +// Data needed to reconstruct an Entry, given an ordered list of transactions in +// a block. Used for uploading to BigTable. +pub struct EntrySummary { + pub num_hashes: u64, + pub hash: Hash, + pub num_transactions: u64, + pub starting_transaction_index: usize, +} + #[derive(Clone, Debug, PartialEq)] #[allow(clippy::large_enum_variant)] pub enum TransactionWithStatusMeta { From a96be5d2f03ae6eefd93ed2734bd158041ee3dae Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 9 Nov 2023 15:04:19 -0500 Subject: [PATCH 82/98] Uses IntSet for uncleaned roots during index generation (#34008) --- accounts-db/src/accounts_db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index ed0f3e551c66cf..9b1690f8721621 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -9408,7 +9408,7 @@ impl AccountsDb { // subtract data.len() from accounts_data_len for all old accounts that are in the index twice let mut accounts_data_len_dedup_timer = Measure::start("handle accounts data len duplicates"); - let uncleaned_roots = Mutex::new(HashSet::::default()); + let uncleaned_roots = Mutex::new(IntSet::default()); if pass == 0 { let accounts_data_len_from_duplicates = unique_pubkeys_by_bin .par_iter() From 9f25f67e6016dc8bd2ff7a7455eacf4cecbbc318 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 9 Nov 2023 16:06:15 -0500 Subject: [PATCH 83/98] Simplifies adding uncleaned pubkeys during index generation (#34007) --- accounts-db/src/accounts_db.rs | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 9b1690f8721621..445bc6d10347de 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -71,10 +71,7 @@ use { }, blake3::traits::digest::Digest, crossbeam_channel::{unbounded, Receiver, Sender}, - dashmap::{ - mapref::entry::Entry::{Occupied, Vacant}, - DashMap, DashSet, - }, + dashmap::{DashMap, DashSet}, log::*, rand::{thread_rng, Rng}, rayon::{prelude::*, ThreadPool}, @@ -9368,12 +9365,7 @@ impl AccountsDb { let unique_keys = HashSet::::from_iter(slot_keys.iter().map(|(_, key)| *key)); for (slot, key) in slot_keys { - match self.uncleaned_pubkeys.entry(slot) { - Occupied(mut occupied) => occupied.get_mut().push(key), - Vacant(vacant) => { - vacant.insert(vec![key]); - } - } + self.uncleaned_pubkeys.entry(slot).or_default().push(key); } let unique_pubkeys_by_bin_inner = unique_keys.into_iter().collect::>(); From 59eb55990c2ea95c1455203e768ddf1ade3c098c Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Thu, 9 Nov 2023 14:40:04 -0800 Subject: [PATCH 84/98] Move filter_executable_program_accounts to bank.rs (#34004) --- accounts-db/src/accounts.rs | 272 +----------------------------------- runtime/src/bank.rs | 58 +++++++- runtime/src/bank/tests.rs | 194 +++++++++++++++++++++++++ 3 files changed, 251 insertions(+), 273 deletions(-) diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 0ac199e6633522..b11763b1dd5048 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -59,10 +59,7 @@ use { solana_system_program::{get_system_account_kind, SystemAccountKind}, std::{ cmp::Reverse, - collections::{ - hash_map::{self, Entry}, - BinaryHeap, HashMap, HashSet, - }, + collections::{hash_map, BinaryHeap, HashMap, HashSet}, num::NonZeroUsize, ops::RangeBounds, path::PathBuf, @@ -638,59 +635,6 @@ impl Accounts { ) } - /// Returns a hash map of executable program accounts (program accounts that are not writable - /// in the given transactions), and their owners, for the transactions with a valid - /// blockhash or nonce. - pub fn filter_executable_program_accounts<'a>( - &self, - ancestors: &Ancestors, - txs: &[SanitizedTransaction], - lock_results: &mut [TransactionCheckResult], - program_owners: &'a [Pubkey], - hash_queue: &BlockhashQueue, - ) -> HashMap { - let mut result: HashMap = HashMap::new(); - lock_results.iter_mut().zip(txs).for_each(|etx| { - if let ((Ok(()), nonce), tx) = etx { - if nonce - .as_ref() - .map(|nonce| nonce.lamports_per_signature()) - .unwrap_or_else(|| { - hash_queue.get_lamports_per_signature(tx.message().recent_blockhash()) - }) - .is_some() - { - tx.message() - .account_keys() - .iter() - .for_each(|key| match result.entry(*key) { - Entry::Occupied(mut entry) => { - let (_, count) = entry.get_mut(); - saturating_add_assign!(*count, 1); - } - Entry::Vacant(entry) => { - if let Ok(index) = self.accounts_db.account_matches_owners( - ancestors, - key, - program_owners, - ) { - program_owners - .get(index) - .map(|owner| entry.insert((owner, 1))); - } - } - }); - } else { - // If the transaction's nonce account was not valid, and blockhash is not found, - // the transaction will fail to process. Let's not load any programs from the - // transaction, and update the status of the transaction. - *etx.0 = (Err(TransactionError::BlockhashNotFound), None); - } - } - }); - result - } - #[allow(clippy::too_many_arguments)] pub fn load_accounts( &self, @@ -2000,220 +1944,6 @@ mod tests { ); } - #[test] - fn test_filter_executable_program_accounts() { - let mut tx_accounts: Vec = Vec::new(); - - let keypair1 = Keypair::new(); - let keypair2 = Keypair::new(); - - let non_program_pubkey1 = Pubkey::new_unique(); - let non_program_pubkey2 = Pubkey::new_unique(); - let program1_pubkey = Pubkey::new_unique(); - let program2_pubkey = Pubkey::new_unique(); - let account1_pubkey = Pubkey::new_unique(); - let account2_pubkey = Pubkey::new_unique(); - let account3_pubkey = Pubkey::new_unique(); - let account4_pubkey = Pubkey::new_unique(); - - let account5_pubkey = Pubkey::new_unique(); - - tx_accounts.push(( - non_program_pubkey1, - AccountSharedData::new(1, 10, &account5_pubkey), - )); - tx_accounts.push(( - non_program_pubkey2, - AccountSharedData::new(1, 10, &account5_pubkey), - )); - tx_accounts.push(( - program1_pubkey, - AccountSharedData::new(40, 1, &account5_pubkey), - )); - tx_accounts.push(( - program2_pubkey, - AccountSharedData::new(40, 1, &account5_pubkey), - )); - tx_accounts.push(( - account1_pubkey, - AccountSharedData::new(1, 10, &non_program_pubkey1), - )); - tx_accounts.push(( - account2_pubkey, - AccountSharedData::new(1, 10, &non_program_pubkey2), - )); - tx_accounts.push(( - account3_pubkey, - AccountSharedData::new(40, 1, &program1_pubkey), - )); - tx_accounts.push(( - account4_pubkey, - AccountSharedData::new(40, 1, &program2_pubkey), - )); - - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); - for tx_account in tx_accounts.iter() { - accounts.store_for_tests(0, &tx_account.0, &tx_account.1); - } - - let mut hash_queue = BlockhashQueue::new(100); - - let tx1 = Transaction::new_with_compiled_instructions( - &[&keypair1], - &[non_program_pubkey1], - Hash::new_unique(), - vec![account1_pubkey, account2_pubkey, account3_pubkey], - vec![CompiledInstruction::new(1, &(), vec![0])], - ); - hash_queue.register_hash(&tx1.message().recent_blockhash, 0); - let sanitized_tx1 = SanitizedTransaction::from_transaction_for_tests(tx1); - - let tx2 = Transaction::new_with_compiled_instructions( - &[&keypair2], - &[non_program_pubkey2], - Hash::new_unique(), - vec![account4_pubkey, account3_pubkey, account2_pubkey], - vec![CompiledInstruction::new(1, &(), vec![0])], - ); - hash_queue.register_hash(&tx2.message().recent_blockhash, 0); - let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); - - let ancestors = vec![(0, 0)].into_iter().collect(); - let owners = &[program1_pubkey, program2_pubkey]; - let programs = accounts.filter_executable_program_accounts( - &ancestors, - &[sanitized_tx1, sanitized_tx2], - &mut [(Ok(()), None), (Ok(()), None)], - owners, - &hash_queue, - ); - - // The result should contain only account3_pubkey, and account4_pubkey as the program accounts - assert_eq!(programs.len(), 2); - assert_eq!( - programs - .get(&account3_pubkey) - .expect("failed to find the program account"), - &(&program1_pubkey, 2) - ); - assert_eq!( - programs - .get(&account4_pubkey) - .expect("failed to find the program account"), - &(&program2_pubkey, 1) - ); - } - - #[test] - fn test_filter_executable_program_accounts_invalid_blockhash() { - let mut tx_accounts: Vec = Vec::new(); - - let keypair1 = Keypair::new(); - let keypair2 = Keypair::new(); - - let non_program_pubkey1 = Pubkey::new_unique(); - let non_program_pubkey2 = Pubkey::new_unique(); - let program1_pubkey = Pubkey::new_unique(); - let program2_pubkey = Pubkey::new_unique(); - let account1_pubkey = Pubkey::new_unique(); - let account2_pubkey = Pubkey::new_unique(); - let account3_pubkey = Pubkey::new_unique(); - let account4_pubkey = Pubkey::new_unique(); - - let account5_pubkey = Pubkey::new_unique(); - - tx_accounts.push(( - non_program_pubkey1, - AccountSharedData::new(1, 10, &account5_pubkey), - )); - tx_accounts.push(( - non_program_pubkey2, - AccountSharedData::new(1, 10, &account5_pubkey), - )); - tx_accounts.push(( - program1_pubkey, - AccountSharedData::new(40, 1, &account5_pubkey), - )); - tx_accounts.push(( - program2_pubkey, - AccountSharedData::new(40, 1, &account5_pubkey), - )); - tx_accounts.push(( - account1_pubkey, - AccountSharedData::new(1, 10, &non_program_pubkey1), - )); - tx_accounts.push(( - account2_pubkey, - AccountSharedData::new(1, 10, &non_program_pubkey2), - )); - tx_accounts.push(( - account3_pubkey, - AccountSharedData::new(40, 1, &program1_pubkey), - )); - tx_accounts.push(( - account4_pubkey, - AccountSharedData::new(40, 1, &program2_pubkey), - )); - - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); - for tx_account in tx_accounts.iter() { - accounts.store_for_tests(0, &tx_account.0, &tx_account.1); - } - - let mut hash_queue = BlockhashQueue::new(100); - - let tx1 = Transaction::new_with_compiled_instructions( - &[&keypair1], - &[non_program_pubkey1], - Hash::new_unique(), - vec![account1_pubkey, account2_pubkey, account3_pubkey], - vec![CompiledInstruction::new(1, &(), vec![0])], - ); - hash_queue.register_hash(&tx1.message().recent_blockhash, 0); - let sanitized_tx1 = SanitizedTransaction::from_transaction_for_tests(tx1); - - let tx2 = Transaction::new_with_compiled_instructions( - &[&keypair2], - &[non_program_pubkey2], - Hash::new_unique(), - vec![account4_pubkey, account3_pubkey, account2_pubkey], - vec![CompiledInstruction::new(1, &(), vec![0])], - ); - // Let's not register blockhash from tx2. This should cause the tx2 to fail - let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); - - let ancestors = vec![(0, 0)].into_iter().collect(); - let owners = &[program1_pubkey, program2_pubkey]; - let mut lock_results = vec![(Ok(()), None), (Ok(()), None)]; - let programs = accounts.filter_executable_program_accounts( - &ancestors, - &[sanitized_tx1, sanitized_tx2], - &mut lock_results, - owners, - &hash_queue, - ); - - // The result should contain only account3_pubkey as the program accounts - assert_eq!(programs.len(), 1); - assert_eq!( - programs - .get(&account3_pubkey) - .expect("failed to find the program account"), - &(&program1_pubkey, 1) - ); - assert_eq!(lock_results[1].0, Err(TransactionError::BlockhashNotFound)); - } - #[test] fn test_load_accounts_multiple_loaders() { let mut accounts: Vec = Vec::new(); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 2b8cbe34926100..3bf2d720933443 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -184,7 +184,7 @@ use { std::{ borrow::Cow, cell::RefCell, - collections::{HashMap, HashSet}, + collections::{hash_map::Entry, HashMap, HashSet}, convert::TryFrom, fmt, mem, ops::{AddAssign, RangeInclusive}, @@ -5116,6 +5116,60 @@ impl Bank { loaded_programs_for_txs } + /// Returns a hash map of executable program accounts (program accounts that are not writable + /// in the given transactions), and their owners, for the transactions with a valid + /// blockhash or nonce. + fn filter_executable_program_accounts<'a>( + &self, + ancestors: &Ancestors, + txs: &[SanitizedTransaction], + lock_results: &mut [TransactionCheckResult], + program_owners: &'a [Pubkey], + hash_queue: &BlockhashQueue, + ) -> HashMap { + let mut result: HashMap = HashMap::new(); + lock_results.iter_mut().zip(txs).for_each(|etx| { + if let ((Ok(()), nonce), tx) = etx { + if nonce + .as_ref() + .map(|nonce| nonce.lamports_per_signature()) + .unwrap_or_else(|| { + hash_queue.get_lamports_per_signature(tx.message().recent_blockhash()) + }) + .is_some() + { + tx.message() + .account_keys() + .iter() + .for_each(|key| match result.entry(*key) { + Entry::Occupied(mut entry) => { + let (_, count) = entry.get_mut(); + saturating_add_assign!(*count, 1); + } + Entry::Vacant(entry) => { + if let Ok(index) = self + .rc + .accounts + .accounts_db + .account_matches_owners(ancestors, key, program_owners) + { + program_owners + .get(index) + .map(|owner| entry.insert((owner, 1))); + } + } + }); + } else { + // If the transaction's nonce account was not valid, and blockhash is not found, + // the transaction will fail to process. Let's not load any programs from the + // transaction, and update the status of the transaction. + *etx.0 = (Err(TransactionError::BlockhashNotFound), None); + } + } + }); + result + } + #[allow(clippy::type_complexity)] pub fn load_and_execute_transactions( &self, @@ -5183,7 +5237,7 @@ impl Bank { bpf_loader_deprecated::id(), loader_v4::id(), ]; - let mut program_accounts_map = self.rc.accounts.filter_executable_program_accounts( + let mut program_accounts_map = self.filter_executable_program_accounts( &self.ancestors, sanitized_txs, &mut check_results, diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index cddac40fe3761f..50d68c6cd82288 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -13536,3 +13536,197 @@ fn test_last_restart_slot() { assert!(!last_restart_slot_dirty(&bank7)); assert_eq!(get_last_restart_slot(&bank7), Some(6)); } + +#[test] +fn test_filter_executable_program_accounts() { + let keypair1 = Keypair::new(); + let keypair2 = Keypair::new(); + + let non_program_pubkey1 = Pubkey::new_unique(); + let non_program_pubkey2 = Pubkey::new_unique(); + let program1_pubkey = Pubkey::new_unique(); + let program2_pubkey = Pubkey::new_unique(); + let account1_pubkey = Pubkey::new_unique(); + let account2_pubkey = Pubkey::new_unique(); + let account3_pubkey = Pubkey::new_unique(); + let account4_pubkey = Pubkey::new_unique(); + + let account5_pubkey = Pubkey::new_unique(); + + let (genesis_config, _mint_keypair) = create_genesis_config(10); + let bank = Bank::new_for_tests(&genesis_config); + bank.store_account( + &non_program_pubkey1, + &AccountSharedData::new(1, 10, &account5_pubkey), + ); + bank.store_account( + &non_program_pubkey2, + &AccountSharedData::new(1, 10, &account5_pubkey), + ); + bank.store_account( + &program1_pubkey, + &AccountSharedData::new(40, 1, &account5_pubkey), + ); + bank.store_account( + &program2_pubkey, + &AccountSharedData::new(40, 1, &account5_pubkey), + ); + bank.store_account( + &account1_pubkey, + &AccountSharedData::new(1, 10, &non_program_pubkey1), + ); + bank.store_account( + &account2_pubkey, + &AccountSharedData::new(1, 10, &non_program_pubkey2), + ); + bank.store_account( + &account3_pubkey, + &AccountSharedData::new(40, 1, &program1_pubkey), + ); + bank.store_account( + &account4_pubkey, + &AccountSharedData::new(40, 1, &program2_pubkey), + ); + + let mut hash_queue = BlockhashQueue::new(100); + + let tx1 = Transaction::new_with_compiled_instructions( + &[&keypair1], + &[non_program_pubkey1], + Hash::new_unique(), + vec![account1_pubkey, account2_pubkey, account3_pubkey], + vec![CompiledInstruction::new(1, &(), vec![0])], + ); + hash_queue.register_hash(&tx1.message().recent_blockhash, 0); + let sanitized_tx1 = SanitizedTransaction::from_transaction_for_tests(tx1); + + let tx2 = Transaction::new_with_compiled_instructions( + &[&keypair2], + &[non_program_pubkey2], + Hash::new_unique(), + vec![account4_pubkey, account3_pubkey, account2_pubkey], + vec![CompiledInstruction::new(1, &(), vec![0])], + ); + hash_queue.register_hash(&tx2.message().recent_blockhash, 0); + let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); + + let ancestors = vec![(0, 0)].into_iter().collect(); + let owners = &[program1_pubkey, program2_pubkey]; + let programs = bank.filter_executable_program_accounts( + &ancestors, + &[sanitized_tx1, sanitized_tx2], + &mut [(Ok(()), None), (Ok(()), None)], + owners, + &hash_queue, + ); + + // The result should contain only account3_pubkey, and account4_pubkey as the program accounts + assert_eq!(programs.len(), 2); + assert_eq!( + programs + .get(&account3_pubkey) + .expect("failed to find the program account"), + &(&program1_pubkey, 2) + ); + assert_eq!( + programs + .get(&account4_pubkey) + .expect("failed to find the program account"), + &(&program2_pubkey, 1) + ); +} + +#[test] +fn test_filter_executable_program_accounts_invalid_blockhash() { + let keypair1 = Keypair::new(); + let keypair2 = Keypair::new(); + + let non_program_pubkey1 = Pubkey::new_unique(); + let non_program_pubkey2 = Pubkey::new_unique(); + let program1_pubkey = Pubkey::new_unique(); + let program2_pubkey = Pubkey::new_unique(); + let account1_pubkey = Pubkey::new_unique(); + let account2_pubkey = Pubkey::new_unique(); + let account3_pubkey = Pubkey::new_unique(); + let account4_pubkey = Pubkey::new_unique(); + + let account5_pubkey = Pubkey::new_unique(); + + let (genesis_config, _mint_keypair) = create_genesis_config(10); + let bank = Bank::new_for_tests(&genesis_config); + bank.store_account( + &non_program_pubkey1, + &AccountSharedData::new(1, 10, &account5_pubkey), + ); + bank.store_account( + &non_program_pubkey2, + &AccountSharedData::new(1, 10, &account5_pubkey), + ); + bank.store_account( + &program1_pubkey, + &AccountSharedData::new(40, 1, &account5_pubkey), + ); + bank.store_account( + &program2_pubkey, + &AccountSharedData::new(40, 1, &account5_pubkey), + ); + bank.store_account( + &account1_pubkey, + &AccountSharedData::new(1, 10, &non_program_pubkey1), + ); + bank.store_account( + &account2_pubkey, + &AccountSharedData::new(1, 10, &non_program_pubkey2), + ); + bank.store_account( + &account3_pubkey, + &AccountSharedData::new(40, 1, &program1_pubkey), + ); + bank.store_account( + &account4_pubkey, + &AccountSharedData::new(40, 1, &program2_pubkey), + ); + + let mut hash_queue = BlockhashQueue::new(100); + + let tx1 = Transaction::new_with_compiled_instructions( + &[&keypair1], + &[non_program_pubkey1], + Hash::new_unique(), + vec![account1_pubkey, account2_pubkey, account3_pubkey], + vec![CompiledInstruction::new(1, &(), vec![0])], + ); + hash_queue.register_hash(&tx1.message().recent_blockhash, 0); + let sanitized_tx1 = SanitizedTransaction::from_transaction_for_tests(tx1); + + let tx2 = Transaction::new_with_compiled_instructions( + &[&keypair2], + &[non_program_pubkey2], + Hash::new_unique(), + vec![account4_pubkey, account3_pubkey, account2_pubkey], + vec![CompiledInstruction::new(1, &(), vec![0])], + ); + // Let's not register blockhash from tx2. This should cause the tx2 to fail + let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); + + let ancestors = vec![(0, 0)].into_iter().collect(); + let owners = &[program1_pubkey, program2_pubkey]; + let mut lock_results = vec![(Ok(()), None), (Ok(()), None)]; + let programs = bank.filter_executable_program_accounts( + &ancestors, + &[sanitized_tx1, sanitized_tx2], + &mut lock_results, + owners, + &hash_queue, + ); + + // The result should contain only account3_pubkey as the program accounts + assert_eq!(programs.len(), 1); + assert_eq!( + programs + .get(&account3_pubkey) + .expect("failed to find the program account"), + &(&program1_pubkey, 1) + ); + assert_eq!(lock_results[1].0, Err(TransactionError::BlockhashNotFound)); +} From 1057ba8406c90c016a7c40aca5d3468944e1967b Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 9 Nov 2023 22:56:48 -0600 Subject: [PATCH 85/98] Use is_trusted bool in insert_shreds() instead manually adjusting root (#34010) The test_duplicate_with_pruned_ancestor test needs to get around a limitation where the shreds with a parent older than the latest root are discarded. The previous approach manually adjusted the root value in the blockstore; this is not ideal in that it is fiddling with the inner working of Blockstore. So, use the is_trusted argument in Blockstore::insert_shreds(); setting is_trusted=true bypasses the sanity checks (including the parent >= latest root check). --- ledger/src/blockstore.rs | 5 ----- local-cluster/src/integration_tests.rs | 6 ++++-- local-cluster/tests/local_cluster.rs | 26 +++++++++++++++----------- 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index bf9a4096b77e6d..7f596b0556885d 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -3256,11 +3256,6 @@ impl Blockstore { Ok(()) } - /// For tests - pub fn set_last_root(&mut self, root: Slot) { - *self.last_root.write().unwrap() = root; - } - pub fn mark_slots_as_if_rooted_normally_at_startup( &self, slots: Vec<(Slot, Option)>, diff --git a/local-cluster/src/integration_tests.rs b/local-cluster/src/integration_tests.rs index 41e803799fcd52..26d87d0d39ad85 100644 --- a/local-cluster/src/integration_tests.rs +++ b/local-cluster/src/integration_tests.rs @@ -171,13 +171,15 @@ pub fn wait_for_duplicate_proof(ledger_path: &Path, dup_slot: Slot) -> Option= latest root check; + // this check would otherwise prevent the pruned fork from being inserted let minority_blockstore = open_blockstore(&minority_validator_info.info.ledger_path); - let mut our_blockstore = open_blockstore(&our_node_info.info.ledger_path); - our_blockstore.set_last_root(fork_slot - 1); - copy_blocks(last_minority_vote, &minority_blockstore, &our_blockstore); + let our_blockstore = open_blockstore(&our_node_info.info.ledger_path); + copy_blocks( + last_minority_vote, + &minority_blockstore, + &our_blockstore, + true, + ); // Change last block parent to chain off of (purged) minority fork info!("For our node, changing parent of {last_majority_vote} to {last_minority_vote}"); @@ -4737,9 +4744,6 @@ fn test_duplicate_with_pruned_ancestor() { true, // merkle_variant ); our_blockstore.insert_shreds(shreds, None, false).unwrap(); - - // Update the root to set minority fork back as pruned - our_blockstore.set_last_root(fork_slot + fork_length); } // Actual test, `our_node` will replay the minority fork, then the majority fork which will @@ -5320,7 +5324,7 @@ fn test_duplicate_shreds_switch_failure() { { let blockstore1 = open_blockstore(&duplicate_leader_ledger_path); let blockstore2 = open_blockstore(&target_switch_fork_validator_ledger_path); - copy_blocks(dup_slot, &blockstore1, &blockstore2); + copy_blocks(dup_slot, &blockstore1, &blockstore2, false); } clear_ledger_and_tower( &target_switch_fork_validator_ledger_path, @@ -5353,7 +5357,7 @@ fn test_duplicate_shreds_switch_failure() { { let blockstore1 = open_blockstore(&duplicate_fork_validator1_ledger_path); let blockstore2 = open_blockstore(&duplicate_fork_validator2_ledger_path); - copy_blocks(dup_slot, &blockstore1, &blockstore2); + copy_blocks(dup_slot, &blockstore1, &blockstore2, false); } // Set entrypoint to `target_switch_fork_validator_pubkey` so we can run discovery in gossip even without the From 67f8daf6e966d9bb6ac5007ee082e9fa32ed3345 Mon Sep 17 00:00:00 2001 From: vadorovsky Date: Fri, 10 Nov 2023 08:00:10 +0100 Subject: [PATCH 86/98] chore: Update light-poseidon to 0.2.0 (#33923) That new release contains an important change which prevents a potential DDoS. * Lightprotocol/light-poseidon#32 Invoking `from_bytes_be` function light-poseidon 0.1.1 inverts all the inputs before performing a check whether their length exceeds the modulus of the prime field. Therefore, it was prone to an attack, where a mailicious user could submit long byte slices just to DDoS the validator, being stuck on inverting large byte sequences. The update and mentioned change fixes the same issue as #33363 aims to address. The new release contains also few other less important changes like: * Lightprotocol/light-poseidon#37 * Lightprotocol/light-poseidon#38 * Lightprotocol/light-poseidon#39 --- Cargo.lock | 5 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 5 ++-- sdk/program/src/poseidon.rs | 52 +++++++++++++++++++++---------------- 4 files changed, 37 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 519388ed1be9eb..4e1d37e3901eb7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3000,12 +3000,13 @@ dependencies = [ [[package]] name = "light-poseidon" -version = "0.1.2" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5b439809cdfc0d86ecc7317f1724df13dfa665df48991b79e90e689411451f7" +checksum = "3c9a85a9752c549ceb7578064b4ed891179d20acd85f27318573b64d2d7ee7ee" dependencies = [ "ark-bn254", "ark-ff", + "num-bigint 0.4.4", "thiserror", ] diff --git a/Cargo.toml b/Cargo.toml index 778b37f477db1d..00d03de5856eb8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -236,7 +236,7 @@ lazy_static = "1.4.0" libc = "0.2.149" libloading = "0.7.4" libsecp256k1 = "0.6.0" -light-poseidon = "0.1.2" +light-poseidon = "0.2.0" log = "0.4.20" lru = "0.7.7" lz4 = "1.24.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index fe1623388c4188..e60a929c183e81 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2668,12 +2668,13 @@ dependencies = [ [[package]] name = "light-poseidon" -version = "0.1.2" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5b439809cdfc0d86ecc7317f1724df13dfa665df48991b79e90e689411451f7" +checksum = "3c9a85a9752c549ceb7578064b4ed891179d20acd85f27318573b64d2d7ee7ee" dependencies = [ "ark-bn254", "ark-ff", + "num-bigint 0.4.4", "thiserror", ] diff --git a/sdk/program/src/poseidon.rs b/sdk/program/src/poseidon.rs index c23cded6db9310..9c02fe90bc8b50 100644 --- a/sdk/program/src/poseidon.rs +++ b/sdk/program/src/poseidon.rs @@ -21,12 +21,16 @@ pub enum PoseidonSyscallError { "Invalid length of the input. The length matching the modulus of the prime field is 32." )] InvalidInputLength, + #[error("Failed to convert bytest into a prime field element.")] + BytesToPrimeFieldElement, #[error("Input is larger than the modulus of the prime field.")] InputLargerThanModulus, #[error("Failed to convert a vector of bytes into an array.")] VecToArray, #[error("Failed to convert the number of inputs from u64 to u8.")] U64Tou8, + #[error("Failed to convert bytes to BigInt")] + BytesToBigInt, #[error("Invalid width. Choose a width between 2 and 16 for 1 to 15 inputs.")] InvalidWidthCircom, #[error("Unexpected error")] @@ -41,10 +45,12 @@ impl From for PoseidonSyscallError { 3 => PoseidonSyscallError::InvalidNumberOfInputs, 4 => PoseidonSyscallError::EmptyInput, 5 => PoseidonSyscallError::InvalidInputLength, - 6 => PoseidonSyscallError::InputLargerThanModulus, - 7 => PoseidonSyscallError::VecToArray, - 8 => PoseidonSyscallError::U64Tou8, - 9 => PoseidonSyscallError::InvalidWidthCircom, + 6 => PoseidonSyscallError::BytesToPrimeFieldElement, + 7 => PoseidonSyscallError::InputLargerThanModulus, + 8 => PoseidonSyscallError::VecToArray, + 9 => PoseidonSyscallError::U64Tou8, + 10 => PoseidonSyscallError::BytesToBigInt, + 11 => PoseidonSyscallError::InvalidWidthCircom, _ => PoseidonSyscallError::Unexpected, } } @@ -58,11 +64,13 @@ impl From for u64 { PoseidonSyscallError::InvalidNumberOfInputs => 3, PoseidonSyscallError::EmptyInput => 4, PoseidonSyscallError::InvalidInputLength => 5, - PoseidonSyscallError::InputLargerThanModulus => 6, - PoseidonSyscallError::VecToArray => 7, - PoseidonSyscallError::U64Tou8 => 8, - PoseidonSyscallError::InvalidWidthCircom => 9, - PoseidonSyscallError::Unexpected => 10, + PoseidonSyscallError::BytesToPrimeFieldElement => 6, + PoseidonSyscallError::InputLargerThanModulus => 7, + PoseidonSyscallError::VecToArray => 8, + PoseidonSyscallError::U64Tou8 => 9, + PoseidonSyscallError::BytesToBigInt => 10, + PoseidonSyscallError::InvalidWidthCircom => 11, + PoseidonSyscallError::Unexpected => 12, } } } @@ -210,25 +218,25 @@ pub fn hashv( impl From for PoseidonSyscallError { fn from(error: PoseidonError) -> Self { match error { - PoseidonError::InvalidNumberOfInputs { - inputs: _, - max_limit: _, - width: _, - } => PoseidonSyscallError::InvalidNumberOfInputs, + PoseidonError::InvalidNumberOfInputs { .. } => { + PoseidonSyscallError::InvalidNumberOfInputs + } PoseidonError::EmptyInput => PoseidonSyscallError::EmptyInput, - PoseidonError::InvalidInputLength { - len: _, - modulus_bytes_len: _, - } => PoseidonSyscallError::InvalidInputLength, + PoseidonError::InvalidInputLength { .. } => { + PoseidonSyscallError::InvalidInputLength + } + PoseidonError::BytesToPrimeFieldElement { .. } => { + PoseidonSyscallError::BytesToPrimeFieldElement + } PoseidonError::InputLargerThanModulus => { PoseidonSyscallError::InputLargerThanModulus } PoseidonError::VecToArray => PoseidonSyscallError::VecToArray, PoseidonError::U64Tou8 => PoseidonSyscallError::U64Tou8, - PoseidonError::InvalidWidthCircom { - width: _, - max_limit: _, - } => PoseidonSyscallError::InvalidWidthCircom, + PoseidonError::BytesToBigInt => PoseidonSyscallError::BytesToBigInt, + PoseidonError::InvalidWidthCircom { .. } => { + PoseidonSyscallError::InvalidWidthCircom + } } } } From 985ff402fb55f1632a72f0db6ca1d68ae29f2dfe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Nov 2023 13:28:59 +0000 Subject: [PATCH 87/98] build(deps): bump quinn-proto from 0.10.5 to 0.10.6 (#34002) * build(deps): bump quinn-proto from 0.10.5 to 0.10.6 Bumps [quinn-proto](https://github.com/quinn-rs/quinn) from 0.10.5 to 0.10.6. - [Release notes](https://github.com/quinn-rs/quinn/releases) - [Commits](https://github.com/quinn-rs/quinn/commits) --- updated-dependencies: - dependency-name: quinn-proto dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4e1d37e3901eb7..ae1eb4a1871074 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4192,9 +4192,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c78e758510582acc40acb90458401172d41f1016f8c9dde89e49677afb7eec1" +checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" dependencies = [ "bytes", "rand 0.8.5", diff --git a/Cargo.toml b/Cargo.toml index 00d03de5856eb8..686c63c671e6e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -272,7 +272,7 @@ protobuf-src = "1.1.0" qstring = "0.7.2" qualifier_attr = { version = "0.2.2", default-features = false } quinn = "0.10.2" -quinn-proto = "0.10.5" +quinn-proto = "0.10.6" quote = "1.0" rand = "0.8.5" rand_chacha = "0.3.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index e60a929c183e81..b80f2c3b34fb32 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3748,9 +3748,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c78e758510582acc40acb90458401172d41f1016f8c9dde89e49677afb7eec1" +checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" dependencies = [ "bytes", "rand 0.8.5", From 6d9ed2c5c7982f7ce018753084fd94d81cb08dc5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Nov 2023 13:29:23 +0000 Subject: [PATCH 88/98] build(deps): bump serde_json from 1.0.107 to 1.0.108 (#34001) * build(deps): bump serde_json from 1.0.107 to 1.0.108 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.107 to 1.0.108. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.107...v1.0.108) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ae1eb4a1871074..cd98c2f252e449 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4878,9 +4878,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" dependencies = [ "itoa", "ryu", diff --git a/Cargo.toml b/Cargo.toml index 686c63c671e6e2..4331b89ae06a35 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -293,7 +293,7 @@ seqlock = "0.2.0" serde = "1.0.192" serde_bytes = "0.11.12" serde_derive = "1.0.103" -serde_json = "1.0.107" +serde_json = "1.0.108" serde_with = { version = "2.3.3", default-features = false } serde_yaml = "0.9.25" serial_test = "2.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b80f2c3b34fb32..edbca546f83970 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4330,9 +4330,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" dependencies = [ "itoa", "ryu", From 661aa303d2430708412386855bdcd92817248147 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Nov 2023 13:29:42 +0000 Subject: [PATCH 89/98] build(deps): bump js-sys from 0.3.64 to 0.3.65 (#34000) * build(deps): bump js-sys from 0.3.64 to 0.3.65 Bumps [js-sys](https://github.com/rustwasm/wasm-bindgen) from 0.3.64 to 0.3.65. - [Release notes](https://github.com/rustwasm/wasm-bindgen/releases) - [Changelog](https://github.com/rustwasm/wasm-bindgen/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustwasm/wasm-bindgen/commits) --- updated-dependencies: - dependency-name: js-sys dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 24 ++++++++++++------------ Cargo.toml | 2 +- programs/sbf/Cargo.lock | 24 ++++++++++++------------ 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cd98c2f252e449..9828f3b4fbe6c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2736,9 +2736,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" dependencies = [ "wasm-bindgen", ] @@ -8929,9 +8929,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -8939,9 +8939,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" dependencies = [ "bumpalo", "log", @@ -8966,9 +8966,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8976,9 +8976,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" dependencies = [ "proc-macro2", "quote", @@ -8989,9 +8989,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" [[package]] name = "web-sys" diff --git a/Cargo.toml b/Cargo.toml index 4331b89ae06a35..2af7f29d258461 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -223,7 +223,7 @@ itertools = "0.10.5" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ "unprefixed_malloc_on_supported_platforms", ] } -js-sys = "0.3.64" +js-sys = "0.3.65" json5 = "0.4.1" jsonrpc-core = "18.0.0" jsonrpc-core-client = "18.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index edbca546f83970..dba8a46be23942 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2359,9 +2359,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" dependencies = [ "wasm-bindgen", ] @@ -7780,9 +7780,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -7790,9 +7790,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" dependencies = [ "bumpalo", "log", @@ -7817,9 +7817,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7827,9 +7827,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" dependencies = [ "proc-macro2", "quote", @@ -7840,9 +7840,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" [[package]] name = "web-sys" From ab0cf45a1bde5a73ac4d604236fd7ac9d7fadee2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Nov 2023 13:29:57 +0000 Subject: [PATCH 90/98] build(deps): bump wasm-bindgen from 0.2.87 to 0.2.88 (#33999) * build(deps): bump wasm-bindgen from 0.2.87 to 0.2.88 Bumps [wasm-bindgen](https://github.com/rustwasm/wasm-bindgen) from 0.2.87 to 0.2.88. - [Release notes](https://github.com/rustwasm/wasm-bindgen/releases) - [Changelog](https://github.com/rustwasm/wasm-bindgen/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustwasm/wasm-bindgen/compare/0.2.87...0.2.88) --- updated-dependencies: - dependency-name: wasm-bindgen dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite From 69ab8a82340d4e6021fa75850bc0878fac20a98a Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 10 Nov 2023 11:29:13 -0500 Subject: [PATCH 91/98] Uses IntSet for uncleaned slots from visit_duplicate_pubkeys_during_startup() (#34009) --- accounts-db/src/accounts_db.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 445bc6d10347de..6942f9adad19c7 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -9493,9 +9493,9 @@ impl AccountsDb { pubkeys: &[Pubkey], rent_collector: &RentCollector, timings: &GenerateIndexTimings, - ) -> (u64, HashSet) { + ) -> (u64, IntSet) { let mut accounts_data_len_from_duplicates = 0; - let mut uncleaned_slots = HashSet::::default(); + let mut uncleaned_slots = IntSet::default(); let mut removed_rent_paying = 0; let mut removed_top_off = 0; self.accounts_index.scan( From 3c71f859e18c50bfdd997871a157b5cf97d45d1f Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 10 Nov 2023 14:32:24 -0500 Subject: [PATCH 92/98] Uses fold+reduce for handling duplicate pubkeys during index generation (#34011) --- accounts-db/src/accounts_db.rs | 95 +++++++++++++++++++++++----------- 1 file changed, 64 insertions(+), 31 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 6942f9adad19c7..e8435ff2218edb 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -9397,51 +9397,84 @@ impl AccountsDb { ..GenerateIndexTimings::default() }; - // subtract data.len() from accounts_data_len for all old accounts that are in the index twice - let mut accounts_data_len_dedup_timer = - Measure::start("handle accounts data len duplicates"); - let uncleaned_roots = Mutex::new(IntSet::default()); if pass == 0 { - let accounts_data_len_from_duplicates = unique_pubkeys_by_bin + #[derive(Debug, Default)] + struct DuplicatePubkeysVisitedInfo { + accounts_data_len_from_duplicates: u64, + uncleaned_roots: IntSet, + } + impl DuplicatePubkeysVisitedInfo { + fn reduce(mut a: Self, mut b: Self) -> Self { + if a.uncleaned_roots.len() >= b.uncleaned_roots.len() { + a.merge(b); + a + } else { + b.merge(a); + b + } + } + fn merge(&mut self, other: Self) { + self.accounts_data_len_from_duplicates += + other.accounts_data_len_from_duplicates; + self.uncleaned_roots.extend(other.uncleaned_roots); + } + } + + // subtract data.len() from accounts_data_len for all old accounts that are in the index twice + let mut accounts_data_len_dedup_timer = + Measure::start("handle accounts data len duplicates"); + let DuplicatePubkeysVisitedInfo { + accounts_data_len_from_duplicates, + uncleaned_roots, + } = unique_pubkeys_by_bin .par_iter() - .map(|unique_keys| { - unique_keys - .par_chunks(4096) - .map(|pubkeys| { - let (count, uncleaned_roots_this_group) = self - .visit_duplicate_pubkeys_during_startup( - pubkeys, - &rent_collector, - &timings, - ); - let mut uncleaned_roots = uncleaned_roots.lock().unwrap(); - uncleaned_roots_this_group.into_iter().for_each(|slot| { - uncleaned_roots.insert(slot); - }); - count - }) - .sum::() - }) - .sum(); + .fold( + DuplicatePubkeysVisitedInfo::default, + |accum, pubkeys_by_bin| { + let intermediate = pubkeys_by_bin + .par_chunks(4096) + .fold(DuplicatePubkeysVisitedInfo::default, |accum, pubkeys| { + let (accounts_data_len_from_duplicates, uncleaned_roots) = self + .visit_duplicate_pubkeys_during_startup( + pubkeys, + &rent_collector, + &timings, + ); + let intermediate = DuplicatePubkeysVisitedInfo { + accounts_data_len_from_duplicates, + uncleaned_roots, + }; + DuplicatePubkeysVisitedInfo::reduce(accum, intermediate) + }) + .reduce( + DuplicatePubkeysVisitedInfo::default, + DuplicatePubkeysVisitedInfo::reduce, + ); + DuplicatePubkeysVisitedInfo::reduce(accum, intermediate) + }, + ) + .reduce( + DuplicatePubkeysVisitedInfo::default, + DuplicatePubkeysVisitedInfo::reduce, + ); + accounts_data_len_dedup_timer.stop(); + timings.accounts_data_len_dedup_time_us = accounts_data_len_dedup_timer.as_us(); + timings.slots_to_clean = uncleaned_roots.len() as u64; + + self.accounts_index + .add_uncleaned_roots(uncleaned_roots.into_iter()); accounts_data_len.fetch_sub(accounts_data_len_from_duplicates, Ordering::Relaxed); info!( "accounts data len: {}", accounts_data_len.load(Ordering::Relaxed) ); } - accounts_data_len_dedup_timer.stop(); - timings.accounts_data_len_dedup_time_us = accounts_data_len_dedup_timer.as_us(); - - let uncleaned_roots = uncleaned_roots.into_inner().unwrap(); - timings.slots_to_clean = uncleaned_roots.len() as u64; if pass == 0 { // Need to add these last, otherwise older updates will be cleaned for root in &slots { self.accounts_index.add_root(*root); } - self.accounts_index - .add_uncleaned_roots(uncleaned_roots.into_iter()); self.set_storage_count_and_alive_bytes(storage_info, &mut timings); } From b5256997f8d86c9bfbfa7467ba8a1f72140d4bd8 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Fri, 10 Nov 2023 14:47:42 -0500 Subject: [PATCH 93/98] =?UTF-8?q?refactor:=20GossipDuplicateConfirmed/clus?= =?UTF-8?q?ter=5Fconfirmed=20->=20DuplicateConf=E2=80=A6=20(#34012)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit refactor: GossipDuplicateConfirmed/cluster_confirmed -> DuplicateConfirmed --- core/src/cluster_info_vote_listener.rs | 26 ++-- .../src/repair/cluster_slot_state_verifier.rs | 83 ++++++----- core/src/replay_stage.rs | 136 +++++++++--------- core/src/tpu.rs | 8 +- core/src/tvu.rs | 8 +- core/src/validator.rs | 6 +- core/src/vote_simulator.rs | 4 +- 7 files changed, 134 insertions(+), 137 deletions(-) diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index 782f10d976b60e..3af93fecf963dc 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -65,8 +65,8 @@ pub type VerifiedVoteSender = Sender<(Pubkey, Vec)>; pub type VerifiedVoteReceiver = Receiver<(Pubkey, Vec)>; pub type GossipVerifiedVoteHashSender = Sender<(Pubkey, Slot, Hash)>; pub type GossipVerifiedVoteHashReceiver = Receiver<(Pubkey, Slot, Hash)>; -pub type GossipDuplicateConfirmedSlotsSender = Sender; -pub type GossipDuplicateConfirmedSlotsReceiver = Receiver; +pub type DuplicateConfirmedSlotsSender = Sender; +pub type DuplicateConfirmedSlotsReceiver = Receiver; const THRESHOLDS_TO_CHECK: [f64; 2] = [DUPLICATE_THRESHOLD, VOTE_THRESHOLD_SIZE]; const BANK_SEND_VOTES_LOOP_SLEEP_MS: u128 = 10; @@ -243,7 +243,7 @@ impl ClusterInfoVoteListener { replay_votes_receiver: ReplayVoteReceiver, blockstore: Arc, bank_notification_sender: Option, - cluster_confirmed_slot_sender: GossipDuplicateConfirmedSlotsSender, + duplicate_confirmed_slot_sender: DuplicateConfirmedSlotsSender, ) -> Self { let (verified_vote_label_packets_sender, verified_vote_label_packets_receiver) = unbounded(); @@ -293,7 +293,7 @@ impl ClusterInfoVoteListener { replay_votes_receiver, blockstore, bank_notification_sender, - cluster_confirmed_slot_sender, + duplicate_confirmed_slot_sender, ); }) .unwrap(); @@ -495,12 +495,12 @@ impl ClusterInfoVoteListener { replay_votes_receiver: ReplayVoteReceiver, blockstore: Arc, bank_notification_sender: Option, - cluster_confirmed_slot_sender: GossipDuplicateConfirmedSlotsSender, + duplicate_confirmed_slot_sender: DuplicateConfirmedSlotsSender, ) -> Result<()> { let mut confirmation_verifier = OptimisticConfirmationVerifier::new(bank_forks.read().unwrap().root()); let mut last_process_root = Instant::now(); - let cluster_confirmed_slot_sender = Some(cluster_confirmed_slot_sender); + let duplicate_confirmed_slot_sender = Some(duplicate_confirmed_slot_sender); let mut vote_processing_time = Some(VoteProcessingTiming::default()); loop { if exit.load(Ordering::Relaxed) { @@ -531,7 +531,7 @@ impl ClusterInfoVoteListener { &verified_vote_sender, &replay_votes_receiver, &bank_notification_sender, - &cluster_confirmed_slot_sender, + &duplicate_confirmed_slot_sender, &mut vote_processing_time, ); match confirmed_slots { @@ -586,7 +586,7 @@ impl ClusterInfoVoteListener { verified_vote_sender: &VerifiedVoteSender, replay_votes_receiver: &ReplayVoteReceiver, bank_notification_sender: &Option, - cluster_confirmed_slot_sender: &Option, + duplicate_confirmed_slot_sender: &Option, vote_processing_time: &mut Option, ) -> Result { let mut sel = Select::new(); @@ -615,7 +615,7 @@ impl ClusterInfoVoteListener { gossip_verified_vote_hash_sender, verified_vote_sender, bank_notification_sender, - cluster_confirmed_slot_sender, + duplicate_confirmed_slot_sender, vote_processing_time, )); } @@ -638,7 +638,7 @@ impl ClusterInfoVoteListener { new_optimistic_confirmed_slots: &mut ThresholdConfirmedSlots, is_gossip_vote: bool, bank_notification_sender: &Option, - cluster_confirmed_slot_sender: &Option, + duplicate_confirmed_slot_sender: &Option, ) { if vote.is_empty() { return; @@ -692,7 +692,7 @@ impl ClusterInfoVoteListener { } if reached_threshold_results[0] { - if let Some(sender) = cluster_confirmed_slot_sender { + if let Some(sender) = duplicate_confirmed_slot_sender { let _ = sender.send(vec![(last_vote_slot, last_vote_hash)]); } } @@ -749,7 +749,7 @@ impl ClusterInfoVoteListener { gossip_verified_vote_hash_sender: &GossipVerifiedVoteHashSender, verified_vote_sender: &VerifiedVoteSender, bank_notification_sender: &Option, - cluster_confirmed_slot_sender: &Option, + duplicate_confirmed_slot_sender: &Option, vote_processing_time: &mut Option, ) -> ThresholdConfirmedSlots { let mut diff: HashMap> = HashMap::new(); @@ -776,7 +776,7 @@ impl ClusterInfoVoteListener { &mut new_optimistic_confirmed_slots, is_gossip, bank_notification_sender, - cluster_confirmed_slot_sender, + duplicate_confirmed_slot_sender, ); } gossip_vote_txn_processing_time.stop(); diff --git a/core/src/repair/cluster_slot_state_verifier.rs b/core/src/repair/cluster_slot_state_verifier.rs index d57d0f777f9b1e..bc711a1475e705 100644 --- a/core/src/repair/cluster_slot_state_verifier.rs +++ b/core/src/repair/cluster_slot_state_verifier.rs @@ -16,7 +16,7 @@ pub(crate) type DuplicateSlotsTracker = BTreeSet; pub(crate) type DuplicateSlotsToRepair = HashMap; pub(crate) type PurgeRepairSlotCounter = BTreeMap; pub(crate) type EpochSlotsFrozenSlots = BTreeMap; -pub(crate) type GossipDuplicateConfirmedSlots = BTreeMap; +pub(crate) type DuplicateConfirmedSlots = BTreeMap; #[derive(PartialEq, Eq, Clone, Debug)] pub enum ClusterConfirmedHash { @@ -95,13 +95,13 @@ impl DeadState { pub fn new_from_state( slot: Slot, duplicate_slots_tracker: &DuplicateSlotsTracker, - gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, + duplicate_confirmed_slots: &DuplicateConfirmedSlots, fork_choice: &HeaviestSubtreeForkChoice, epoch_slots_frozen_slots: &EpochSlotsFrozenSlots, ) -> Self { let cluster_confirmed_hash = get_cluster_confirmed_hash_from_state( slot, - gossip_duplicate_confirmed_slots, + duplicate_confirmed_slots, epoch_slots_frozen_slots, fork_choice, None, @@ -132,13 +132,13 @@ impl BankFrozenState { slot: Slot, frozen_hash: Hash, duplicate_slots_tracker: &DuplicateSlotsTracker, - gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, + duplicate_confirmed_slots: &DuplicateConfirmedSlots, fork_choice: &HeaviestSubtreeForkChoice, epoch_slots_frozen_slots: &EpochSlotsFrozenSlots, ) -> Self { let cluster_confirmed_hash = get_cluster_confirmed_hash_from_state( slot, - gossip_duplicate_confirmed_slots, + duplicate_confirmed_slots, epoch_slots_frozen_slots, fork_choice, Some(frozen_hash), @@ -196,7 +196,7 @@ pub struct DuplicateState { impl DuplicateState { pub fn new_from_state( slot: Slot, - gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, + duplicate_confirmed_slots: &DuplicateConfirmedSlots, fork_choice: &HeaviestSubtreeForkChoice, is_dead: impl Fn() -> bool, get_hash: impl Fn() -> Option, @@ -208,7 +208,7 @@ impl DuplicateState { // to skip marking the slot as duplicate. let duplicate_confirmed_hash = get_duplicate_confirmed_hash_from_state( slot, - gossip_duplicate_confirmed_slots, + duplicate_confirmed_slots, fork_choice, bank_status.bank_hash(), ); @@ -236,7 +236,7 @@ impl EpochSlotsFrozenState { pub fn new_from_state( slot: Slot, epoch_slots_frozen_hash: Hash, - gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, + duplicate_confirmed_slots: &DuplicateConfirmedSlots, fork_choice: &HeaviestSubtreeForkChoice, is_dead: impl Fn() -> bool, get_hash: impl Fn() -> Option, @@ -245,7 +245,7 @@ impl EpochSlotsFrozenState { let bank_status = BankStatus::new(is_dead, get_hash); let duplicate_confirmed_hash = get_duplicate_confirmed_hash_from_state( slot, - gossip_duplicate_confirmed_slots, + duplicate_confirmed_slots, fork_choice, bank_status.bank_hash(), ); @@ -689,12 +689,12 @@ fn on_popular_pruned_fork(slot: Slot) -> Vec { /// aggregated through hashes sent in response to requests from `ancestor_hashes_service` fn get_cluster_confirmed_hash_from_state( slot: Slot, - gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, + duplicate_confirmed_slots: &DuplicateConfirmedSlots, epoch_slots_frozen_slots: &EpochSlotsFrozenSlots, fork_choice: &HeaviestSubtreeForkChoice, bank_frozen_hash: Option, ) -> Option { - let gossip_duplicate_confirmed_hash = gossip_duplicate_confirmed_slots.get(&slot).cloned(); + let duplicate_confirmed_hash = duplicate_confirmed_slots.get(&slot).cloned(); // If the bank hasn't been frozen yet, then we haven't duplicate confirmed a local version // this slot through replay yet. let is_local_replay_duplicate_confirmed = if let Some(bank_frozen_hash) = bank_frozen_hash { @@ -707,7 +707,7 @@ fn get_cluster_confirmed_hash_from_state( get_duplicate_confirmed_hash( slot, - gossip_duplicate_confirmed_hash, + duplicate_confirmed_hash, bank_frozen_hash, is_local_replay_duplicate_confirmed, ) @@ -721,11 +721,11 @@ fn get_cluster_confirmed_hash_from_state( fn get_duplicate_confirmed_hash_from_state( slot: Slot, - gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, + duplicate_confirmed_slots: &DuplicateConfirmedSlots, fork_choice: &HeaviestSubtreeForkChoice, bank_frozen_hash: Option, ) -> Option { - let gossip_duplicate_confirmed_hash = gossip_duplicate_confirmed_slots.get(&slot).cloned(); + let duplicate_confirmed_hash = duplicate_confirmed_slots.get(&slot).cloned(); // If the bank hasn't been frozen yet, then we haven't duplicate confirmed a local version // this slot through replay yet. let is_local_replay_duplicate_confirmed = if let Some(bank_frozen_hash) = bank_frozen_hash { @@ -738,7 +738,7 @@ fn get_duplicate_confirmed_hash_from_state( get_duplicate_confirmed_hash( slot, - gossip_duplicate_confirmed_hash, + duplicate_confirmed_hash, bank_frozen_hash, is_local_replay_duplicate_confirmed, ) @@ -747,13 +747,13 @@ fn get_duplicate_confirmed_hash_from_state( /// Finds the duplicate confirmed hash for a slot. /// /// 1) If `is_local_replay_duplicate_confirmed`, return Some(local frozen hash) -/// 2) If we have a `gossip_duplicate_confirmed_hash`, return Some(gossip_hash) +/// 2) If we have a `duplicate_confirmed_hash`, return Some(duplicate_confirmed_hash) /// 3) Else return None /// /// Assumes that if `is_local_replay_duplicate_confirmed`, `bank_frozen_hash` is not None fn get_duplicate_confirmed_hash( slot: Slot, - gossip_duplicate_confirmed_hash: Option, + duplicate_confirmed_hash: Option, bank_frozen_hash: Option, is_local_replay_duplicate_confirmed: bool, ) -> Option { @@ -767,22 +767,19 @@ fn get_duplicate_confirmed_hash( None }; - match ( - local_duplicate_confirmed_hash, - gossip_duplicate_confirmed_hash, - ) { - (Some(local_duplicate_confirmed_hash), Some(gossip_duplicate_confirmed_hash)) => { - if local_duplicate_confirmed_hash != gossip_duplicate_confirmed_hash { + match (local_duplicate_confirmed_hash, duplicate_confirmed_hash) { + (Some(local_duplicate_confirmed_hash), Some(duplicate_confirmed_hash)) => { + if local_duplicate_confirmed_hash != duplicate_confirmed_hash { error!( "For slot {}, the gossip duplicate confirmed hash {}, is not equal to the confirmed hash we replayed: {}", - slot, gossip_duplicate_confirmed_hash, local_duplicate_confirmed_hash + slot, duplicate_confirmed_hash, local_duplicate_confirmed_hash ); } Some(local_duplicate_confirmed_hash) } (Some(bank_frozen_hash), None) => Some(bank_frozen_hash), - _ => gossip_duplicate_confirmed_hash, + _ => duplicate_confirmed_hash, } } @@ -1841,14 +1838,14 @@ mod test { // 2) None (a slot that hasn't even started replay yet). let root = 0; let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); - let gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); + let duplicate_confirmed_slots = DuplicateConfirmedSlots::default(); let mut epoch_slots_frozen_slots = EpochSlotsFrozenSlots::default(); let mut duplicate_slots_to_repair = DuplicateSlotsToRepair::default(); let mut purge_repair_slot_counter = PurgeRepairSlotCounter::default(); let duplicate_slot = 2; let duplicate_state = DuplicateState::new_from_state( duplicate_slot, - &gossip_duplicate_confirmed_slots, + &duplicate_confirmed_slots, &heaviest_subtree_fork_choice, || progress.is_dead(duplicate_slot).unwrap_or(false), || initial_bank_hash, @@ -1887,7 +1884,7 @@ mod test { duplicate_slot, frozen_duplicate_slot_hash, &duplicate_slots_tracker, - &gossip_duplicate_confirmed_slots, + &duplicate_confirmed_slots, &heaviest_subtree_fork_choice, &epoch_slots_frozen_slots, ); @@ -1951,11 +1948,11 @@ mod test { let root = 0; let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); let mut purge_repair_slot_counter = PurgeRepairSlotCounter::default(); - let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); + let mut duplicate_confirmed_slots = DuplicateConfirmedSlots::default(); // Mark slot 2 as duplicate confirmed let slot2_hash = bank_forks.read().unwrap().get(2).unwrap().hash(); - gossip_duplicate_confirmed_slots.insert(2, slot2_hash); + duplicate_confirmed_slots.insert(2, slot2_hash); let duplicate_confirmed_state = DuplicateConfirmedState::new_from_state( slot2_hash, || progress.is_dead(2).unwrap_or(false), @@ -1996,7 +1993,7 @@ mod test { // fork choice let duplicate_state = DuplicateState::new_from_state( 3, - &gossip_duplicate_confirmed_slots, + &duplicate_confirmed_slots, &heaviest_subtree_fork_choice, || progress.is_dead(3).unwrap_or(false), || Some(slot3_hash), @@ -2059,14 +2056,14 @@ mod test { ); let root = 0; let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); - let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); + let mut duplicate_confirmed_slots = DuplicateConfirmedSlots::default(); let mut purge_repair_slot_counter = PurgeRepairSlotCounter::default(); // Mark 2 as duplicate let slot2_hash = bank_forks.read().unwrap().get(2).unwrap().hash(); let duplicate_state = DuplicateState::new_from_state( 2, - &gossip_duplicate_confirmed_slots, + &duplicate_confirmed_slots, &heaviest_subtree_fork_choice, || progress.is_dead(2).unwrap_or(false), || Some(slot2_hash), @@ -2103,7 +2100,7 @@ mod test { ); // Mark slot 3 as duplicate confirmed, should mark slot 2 as duplicate confirmed as well - gossip_duplicate_confirmed_slots.insert(3, slot3_hash); + duplicate_confirmed_slots.insert(3, slot3_hash); let duplicate_confirmed_state = DuplicateConfirmedState::new_from_state( slot3_hash, || progress.is_dead(3).unwrap_or(false), @@ -2177,13 +2174,13 @@ mod test { ); let root = 0; let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); - let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); + let mut duplicate_confirmed_slots = DuplicateConfirmedSlots::default(); let mut epoch_slots_frozen_slots = EpochSlotsFrozenSlots::default(); let mut duplicate_slots_to_repair = DuplicateSlotsToRepair::default(); let mut purge_repair_slot_counter = PurgeRepairSlotCounter::default(); // Mark 3 as duplicate confirmed - gossip_duplicate_confirmed_slots.insert(3, slot3_hash); + duplicate_confirmed_slots.insert(3, slot3_hash); let duplicate_confirmed_state = DuplicateConfirmedState::new_from_state( slot3_hash, || progress.is_dead(3).unwrap_or(false), @@ -2215,7 +2212,7 @@ mod test { let slot1_hash = bank_forks.read().unwrap().get(1).unwrap().hash(); let duplicate_state = DuplicateState::new_from_state( 1, - &gossip_duplicate_confirmed_slots, + &duplicate_confirmed_slots, &heaviest_subtree_fork_choice, || progress.is_dead(1).unwrap_or(false), || Some(slot1_hash), @@ -2258,7 +2255,7 @@ mod test { ); let root = 0; let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); - let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); + let mut duplicate_confirmed_slots = DuplicateConfirmedSlots::default(); let mut epoch_slots_frozen_slots = EpochSlotsFrozenSlots::default(); let mut duplicate_slots_to_repair = DuplicateSlotsToRepair::default(); let mut purge_repair_slot_counter = PurgeRepairSlotCounter::default(); @@ -2269,7 +2266,7 @@ mod test { let epoch_slots_frozen_state = EpochSlotsFrozenState::new_from_state( 3, slot3_hash, - &gossip_duplicate_confirmed_slots, + &duplicate_confirmed_slots, &heaviest_subtree_fork_choice, || progress.is_dead(3).unwrap_or(false), || Some(slot3_hash), @@ -2298,7 +2295,7 @@ mod test { // Mark 3 as duplicate confirmed and epoch slots frozen with the same hash. Should // duplicate confirm all descendants of 3 - gossip_duplicate_confirmed_slots.insert(3, slot3_hash); + duplicate_confirmed_slots.insert(3, slot3_hash); expected_is_duplicate_confirmed = true; let duplicate_confirmed_state = DuplicateConfirmedState::new_from_state( slot3_hash, @@ -2350,7 +2347,7 @@ mod test { ); let root = 0; let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); - let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); + let mut duplicate_confirmed_slots = DuplicateConfirmedSlots::default(); let mut epoch_slots_frozen_slots = EpochSlotsFrozenSlots::default(); let mut duplicate_slots_to_repair = DuplicateSlotsToRepair::default(); let mut purge_repair_slot_counter = PurgeRepairSlotCounter::default(); @@ -2363,7 +2360,7 @@ mod test { let epoch_slots_frozen_state = EpochSlotsFrozenState::new_from_state( 3, mismatched_hash, - &gossip_duplicate_confirmed_slots, + &duplicate_confirmed_slots, &heaviest_subtree_fork_choice, || progress.is_dead(3).unwrap_or(false), || Some(slot3_hash), @@ -2395,7 +2392,7 @@ mod test { // the epoch slots frozen hash above. Should duplicate confirm all descendants of // 3 and remove the mismatched hash from `duplicate_slots_to_repair`, since we // have the right version now, no need to repair - gossip_duplicate_confirmed_slots.insert(3, slot3_hash); + duplicate_confirmed_slots.insert(3, slot3_hash); expected_is_duplicate_confirmed = true; let duplicate_confirmed_state = DuplicateConfirmedState::new_from_state( slot3_hash, diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 3c2c7d39d06610..2bfb72da52d4c0 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -5,7 +5,7 @@ use { banking_trace::BankingTracer, cache_block_meta_service::CacheBlockMetaSender, cluster_info_vote_listener::{ - GossipDuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver, VoteTracker, + DuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver, VoteTracker, }, cluster_slots_service::{cluster_slots::ClusterSlots, ClusterSlotsUpdateSender}, commitment_service::{AggregateCommitmentService, CommitmentAggregationData}, @@ -270,7 +270,7 @@ pub struct ReplayTiming { heaviest_fork_failures_elapsed: u64, bank_count: u64, process_ancestor_hashes_duplicate_slots_elapsed: u64, - process_gossip_duplicate_confirmed_slots_elapsed: u64, + process_duplicate_confirmed_slots_elapsed: u64, process_duplicate_slots_elapsed: u64, process_unfrozen_gossip_verified_vote_hashes_elapsed: u64, process_popular_pruned_forks_elapsed: u64, @@ -300,7 +300,7 @@ impl ReplayTiming { heaviest_fork_failures_elapsed: u64, bank_count: u64, process_ancestor_hashes_duplicate_slots_elapsed: u64, - process_gossip_duplicate_confirmed_slots_elapsed: u64, + process_duplicate_confirmed_slots_elapsed: u64, process_unfrozen_gossip_verified_vote_hashes_elapsed: u64, process_popular_pruned_forks_elapsed: u64, process_duplicate_slots_elapsed: u64, @@ -322,8 +322,7 @@ impl ReplayTiming { self.bank_count += bank_count; self.process_ancestor_hashes_duplicate_slots_elapsed += process_ancestor_hashes_duplicate_slots_elapsed; - self.process_gossip_duplicate_confirmed_slots_elapsed += - process_gossip_duplicate_confirmed_slots_elapsed; + self.process_duplicate_confirmed_slots_elapsed += process_duplicate_confirmed_slots_elapsed; self.process_unfrozen_gossip_verified_vote_hashes_elapsed += process_unfrozen_gossip_verified_vote_hashes_elapsed; self.process_popular_pruned_forks_elapsed += process_popular_pruned_forks_elapsed; @@ -393,8 +392,8 @@ impl ReplayTiming { i64 ), ( - "process_gossip_duplicate_confirmed_slots_elapsed", - self.process_gossip_duplicate_confirmed_slots_elapsed as i64, + "process_duplicate_confirmed_slots_elapsed", + self.process_duplicate_confirmed_slots_elapsed as i64, i64 ), ( @@ -486,7 +485,7 @@ impl ReplayStage { retransmit_slots_sender: Sender, ancestor_duplicate_slots_receiver: AncestorDuplicateSlotsReceiver, replay_vote_sender: ReplayVoteSender, - gossip_duplicate_confirmed_slots_receiver: GossipDuplicateConfirmedSlotsReceiver, + duplicate_confirmed_slots_receiver: DuplicateConfirmedSlotsReceiver, gossip_verified_vote_hash_receiver: GossipVerifiedVoteHashReceiver, cluster_slots_update_sender: ClusterSlotsUpdateSender, cost_update_sender: Sender, @@ -545,8 +544,8 @@ impl ReplayStage { let mut skipped_slots_info = SkippedSlotsInfo::default(); let mut replay_timing = ReplayTiming::default(); let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); - let mut gossip_duplicate_confirmed_slots: GossipDuplicateConfirmedSlots = - GossipDuplicateConfirmedSlots::default(); + let mut duplicate_confirmed_slots: DuplicateConfirmedSlots = + DuplicateConfirmedSlots::default(); let mut epoch_slots_frozen_slots: EpochSlotsFrozenSlots = EpochSlotsFrozenSlots::default(); let mut duplicate_slots_to_repair = DuplicateSlotsToRepair::default(); @@ -616,7 +615,7 @@ impl ReplayStage { &rewards_recorder_sender, &rpc_subscriptions, &mut duplicate_slots_tracker, - &gossip_duplicate_confirmed_slots, + &duplicate_confirmed_slots, &mut epoch_slots_frozen_slots, &mut unfrozen_gossip_verified_vote_hashes, &mut latest_validator_votes_for_frozen_banks, @@ -645,7 +644,7 @@ impl ReplayStage { &blockstore, &ancestor_duplicate_slots_receiver, &mut duplicate_slots_tracker, - &gossip_duplicate_confirmed_slots, + &duplicate_confirmed_slots, &mut epoch_slots_frozen_slots, &progress, &mut heaviest_subtree_fork_choice, @@ -656,14 +655,16 @@ impl ReplayStage { ); process_ancestor_hashes_duplicate_slots_time.stop(); - // Check for any newly confirmed slots detected from gossip. - let mut process_gossip_duplicate_confirmed_slots_time = - Measure::start("process_gossip_duplicate_confirmed_slots"); - Self::process_gossip_duplicate_confirmed_slots( - &gossip_duplicate_confirmed_slots_receiver, + // Check for any newly duplicate confirmed slots detected from gossip / replay + // Note: since this is tracked using both gossip & replay votes, stake is not + // rolled up from descendants. + let mut process_duplicate_confirmed_slots_time = + Measure::start("process_duplicate_confirmed_slots"); + Self::process_duplicate_confirmed_slots( + &duplicate_confirmed_slots_receiver, &blockstore, &mut duplicate_slots_tracker, - &mut gossip_duplicate_confirmed_slots, + &mut duplicate_confirmed_slots, &mut epoch_slots_frozen_slots, &bank_forks, &progress, @@ -672,7 +673,7 @@ impl ReplayStage { &ancestor_hashes_replay_update_sender, &mut purge_repair_slot_counter, ); - process_gossip_duplicate_confirmed_slots_time.stop(); + process_duplicate_confirmed_slots_time.stop(); // Ingest any new verified votes from gossip. Important for fork choice // and switching proofs because these may be votes that haven't yet been @@ -714,7 +715,7 @@ impl ReplayStage { &blockstore, &duplicate_slots_receiver, &mut duplicate_slots_tracker, - &gossip_duplicate_confirmed_slots, + &duplicate_confirmed_slots, &mut epoch_slots_frozen_slots, &bank_forks, &progress, @@ -891,7 +892,7 @@ impl ReplayStage { &mut heaviest_subtree_fork_choice, &bank_notification_sender, &mut duplicate_slots_tracker, - &mut gossip_duplicate_confirmed_slots, + &mut duplicate_confirmed_slots, &mut unfrozen_gossip_verified_vote_hashes, &mut voted_signatures, &mut has_new_vote_been_rooted, @@ -1085,7 +1086,7 @@ impl ReplayStage { heaviest_fork_failures_time.as_us(), u64::from(did_complete_bank), process_ancestor_hashes_duplicate_slots_time.as_us(), - process_gossip_duplicate_confirmed_slots_time.as_us(), + process_duplicate_confirmed_slots_time.as_us(), process_unfrozen_gossip_verified_vote_hashes_time.as_us(), process_popular_pruned_forks_time.as_us(), process_duplicate_slots_time.as_us(), @@ -1412,7 +1413,7 @@ impl ReplayStage { blockstore: &Blockstore, ancestor_duplicate_slots_receiver: &AncestorDuplicateSlotsReceiver, duplicate_slots_tracker: &mut DuplicateSlotsTracker, - gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, + duplicate_confirmed_slots: &DuplicateConfirmedSlots, epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, progress: &ProgressMap, fork_choice: &mut HeaviestSubtreeForkChoice, @@ -1434,7 +1435,7 @@ impl ReplayStage { let epoch_slots_frozen_state = EpochSlotsFrozenState::new_from_state( epoch_slots_frozen_slot, epoch_slots_frozen_hash, - gossip_duplicate_confirmed_slots, + duplicate_confirmed_slots, fork_choice, || progress.is_dead(epoch_slots_frozen_slot).unwrap_or(false), || { @@ -1642,16 +1643,16 @@ impl ReplayStage { } } - // Check for any newly confirmed slots by the cluster. This is only detects - // optimistic and in the future, duplicate slot confirmations on the exact + // Check for any newly duplicate confirmed slots by the cluster. + // This only tracks duplicate slot confirmations on the exact // single slots and does not account for votes on their descendants. Used solely // for duplicate slot recovery. #[allow(clippy::too_many_arguments)] - fn process_gossip_duplicate_confirmed_slots( - gossip_duplicate_confirmed_slots_receiver: &GossipDuplicateConfirmedSlotsReceiver, + fn process_duplicate_confirmed_slots( + duplicate_confirmed_slots_receiver: &DuplicateConfirmedSlotsReceiver, blockstore: &Blockstore, duplicate_slots_tracker: &mut DuplicateSlotsTracker, - gossip_duplicate_confirmed_slots: &mut GossipDuplicateConfirmedSlots, + duplicate_confirmed_slots: &mut DuplicateConfirmedSlots, epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, bank_forks: &RwLock, progress: &ProgressMap, @@ -1661,12 +1662,12 @@ impl ReplayStage { purge_repair_slot_counter: &mut PurgeRepairSlotCounter, ) { let root = bank_forks.read().unwrap().root(); - for new_confirmed_slots in gossip_duplicate_confirmed_slots_receiver.try_iter() { - for (confirmed_slot, duplicate_confirmed_hash) in new_confirmed_slots { + for new_duplicate_confirmed_slots in duplicate_confirmed_slots_receiver.try_iter() { + for (confirmed_slot, duplicate_confirmed_hash) in new_duplicate_confirmed_slots { if confirmed_slot <= root { continue; - } else if let Some(prev_hash) = gossip_duplicate_confirmed_slots - .insert(confirmed_slot, duplicate_confirmed_hash) + } else if let Some(prev_hash) = + duplicate_confirmed_slots.insert(confirmed_slot, duplicate_confirmed_hash) { assert_eq!(prev_hash, duplicate_confirmed_hash); // Already processed this signal @@ -1719,7 +1720,7 @@ impl ReplayStage { blockstore: &Blockstore, duplicate_slots_receiver: &DuplicateSlotReceiver, duplicate_slots_tracker: &mut DuplicateSlotsTracker, - gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, + duplicate_confirmed_slots: &DuplicateConfirmedSlots, epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, bank_forks: &RwLock, progress: &ProgressMap, @@ -1744,7 +1745,7 @@ impl ReplayStage { // WindowService should only send the signal once per slot let duplicate_state = DuplicateState::new_from_state( duplicate_slot, - gossip_duplicate_confirmed_slots, + duplicate_confirmed_slots, fork_choice, || progress.is_dead(duplicate_slot).unwrap_or(false), || bank_hash, @@ -2030,7 +2031,7 @@ impl ReplayStage { err: &BlockstoreProcessorError, rpc_subscriptions: &Arc, duplicate_slots_tracker: &mut DuplicateSlotsTracker, - gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, + duplicate_confirmed_slots: &DuplicateConfirmedSlots, epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, progress: &mut ProgressMap, heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, @@ -2039,7 +2040,7 @@ impl ReplayStage { purge_repair_slot_counter: &mut PurgeRepairSlotCounter, ) { // Do not remove from progress map when marking dead! Needed by - // `process_gossip_duplicate_confirmed_slots()` + // `process_duplicate_confirmed_slots()` // Block producer can abandon the block if it detects a better one // while producing. Somewhat common and expected in a @@ -2077,7 +2078,7 @@ impl ReplayStage { let dead_state = DeadState::new_from_state( slot, duplicate_slots_tracker, - gossip_duplicate_confirmed_slots, + duplicate_confirmed_slots, heaviest_subtree_fork_choice, epoch_slots_frozen_slots, ); @@ -2099,7 +2100,7 @@ impl ReplayStage { { let duplicate_state = DuplicateState::new_from_state( slot, - gossip_duplicate_confirmed_slots, + duplicate_confirmed_slots, heaviest_subtree_fork_choice, || true, || None, @@ -2139,7 +2140,7 @@ impl ReplayStage { heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, bank_notification_sender: &Option, duplicate_slots_tracker: &mut DuplicateSlotsTracker, - gossip_duplicate_confirmed_slots: &mut GossipDuplicateConfirmedSlots, + duplicate_confirmed_slots: &mut DuplicateConfirmedSlots, unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, vote_signatures: &mut Vec, has_new_vote_been_rooted: &mut bool, @@ -2198,7 +2199,7 @@ impl ReplayStage { highest_super_majority_root, heaviest_subtree_fork_choice, duplicate_slots_tracker, - gossip_duplicate_confirmed_slots, + duplicate_confirmed_slots, unfrozen_gossip_verified_vote_hashes, has_new_vote_been_rooted, vote_signatures, @@ -2752,7 +2753,7 @@ impl ReplayStage { rewards_recorder_sender: &Option, rpc_subscriptions: &Arc, duplicate_slots_tracker: &mut DuplicateSlotsTracker, - gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, + duplicate_confirmed_slots: &DuplicateConfirmedSlots, epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks, @@ -2790,7 +2791,7 @@ impl ReplayStage { err, rpc_subscriptions, duplicate_slots_tracker, - gossip_duplicate_confirmed_slots, + duplicate_confirmed_slots, epoch_slots_frozen_slots, progress, heaviest_subtree_fork_choice, @@ -2833,7 +2834,7 @@ impl ReplayStage { &BlockstoreProcessorError::InvalidTransaction(err), rpc_subscriptions, duplicate_slots_tracker, - gossip_duplicate_confirmed_slots, + duplicate_confirmed_slots, epoch_slots_frozen_slots, progress, heaviest_subtree_fork_choice, @@ -2888,7 +2889,7 @@ impl ReplayStage { bank.slot(), bank.hash(), duplicate_slots_tracker, - gossip_duplicate_confirmed_slots, + duplicate_confirmed_slots, heaviest_subtree_fork_choice, epoch_slots_frozen_slots, ); @@ -2910,7 +2911,7 @@ impl ReplayStage { { let duplicate_state = DuplicateState::new_from_state( bank.slot(), - gossip_duplicate_confirmed_slots, + duplicate_confirmed_slots, heaviest_subtree_fork_choice, || false, || Some(bank.hash()), @@ -3007,7 +3008,7 @@ impl ReplayStage { rewards_recorder_sender: &Option, rpc_subscriptions: &Arc, duplicate_slots_tracker: &mut DuplicateSlotsTracker, - gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, + duplicate_confirmed_slots: &DuplicateConfirmedSlots, epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks, @@ -3080,7 +3081,7 @@ impl ReplayStage { rewards_recorder_sender, rpc_subscriptions, duplicate_slots_tracker, - gossip_duplicate_confirmed_slots, + duplicate_confirmed_slots, epoch_slots_frozen_slots, unfrozen_gossip_verified_vote_hashes, latest_validator_votes_for_frozen_banks, @@ -3899,7 +3900,7 @@ impl ReplayStage { highest_super_majority_root: Option, heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, duplicate_slots_tracker: &mut DuplicateSlotsTracker, - gossip_duplicate_confirmed_slots: &mut GossipDuplicateConfirmedSlots, + duplicate_confirmed_slots: &mut DuplicateConfirmedSlots, unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, has_new_vote_been_rooted: &mut bool, voted_signatures: &mut Vec, @@ -3937,7 +3938,7 @@ impl ReplayStage { *duplicate_slots_tracker = duplicate_slots_tracker.split_off(&new_root); // duplicate_slots_tracker now only contains entries >= `new_root` - *gossip_duplicate_confirmed_slots = gossip_duplicate_confirmed_slots.split_off(&new_root); + *duplicate_confirmed_slots = duplicate_confirmed_slots.split_off(&new_root); // gossip_confirmed_slots now only contains entries >= `new_root` unfrozen_gossip_verified_vote_hashes.set_root(new_root); @@ -4418,11 +4419,10 @@ pub(crate) mod tests { let mut duplicate_slots_tracker: DuplicateSlotsTracker = vec![root - 1, root, root + 1].into_iter().collect(); - let mut gossip_duplicate_confirmed_slots: GossipDuplicateConfirmedSlots = - vec![root - 1, root, root + 1] - .into_iter() - .map(|s| (s, Hash::default())) - .collect(); + let mut duplicate_confirmed_slots: DuplicateConfirmedSlots = vec![root - 1, root, root + 1] + .into_iter() + .map(|s| (s, Hash::default())) + .collect(); let mut unfrozen_gossip_verified_vote_hashes: UnfrozenGossipVerifiedVoteHashes = UnfrozenGossipVerifiedVoteHashes { votes_per_slot: vec![root - 1, root, root + 1] @@ -4443,7 +4443,7 @@ pub(crate) mod tests { None, &mut heaviest_subtree_fork_choice, &mut duplicate_slots_tracker, - &mut gossip_duplicate_confirmed_slots, + &mut duplicate_confirmed_slots, &mut unfrozen_gossip_verified_vote_hashes, &mut true, &mut Vec::new(), @@ -4459,7 +4459,7 @@ pub(crate) mod tests { vec![root, root + 1] ); assert_eq!( - gossip_duplicate_confirmed_slots + duplicate_confirmed_slots .keys() .cloned() .collect::>(), @@ -4521,7 +4521,7 @@ pub(crate) mod tests { Some(confirmed_root), &mut heaviest_subtree_fork_choice, &mut DuplicateSlotsTracker::default(), - &mut GossipDuplicateConfirmedSlots::default(), + &mut DuplicateConfirmedSlots::default(), &mut UnfrozenGossipVerifiedVoteHashes::default(), &mut true, &mut Vec::new(), @@ -4837,7 +4837,7 @@ pub(crate) mod tests { err, &rpc_subscriptions, &mut DuplicateSlotsTracker::default(), - &GossipDuplicateConfirmedSlots::new(), + &DuplicateConfirmedSlots::new(), &mut EpochSlotsFrozenSlots::default(), &mut progress, &mut heaviest_subtree_fork_choice, @@ -6485,13 +6485,13 @@ pub(crate) mod tests { blockstore.store_duplicate_slot(5, vec![], vec![]).unwrap(); let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); let mut purge_repair_slot_counter = PurgeRepairSlotCounter::default(); - let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); + let mut duplicate_confirmed_slots = DuplicateConfirmedSlots::default(); let mut epoch_slots_frozen_slots = EpochSlotsFrozenSlots::default(); let bank5_hash = bank_forks.read().unwrap().bank_hash(5).unwrap(); assert_ne!(bank5_hash, Hash::default()); let duplicate_state = DuplicateState::new_from_state( 5, - &gossip_duplicate_confirmed_slots, + &duplicate_confirmed_slots, &vote_simulator.heaviest_subtree_fork_choice, || progress.is_dead(5).unwrap_or(false), || Some(bank5_hash), @@ -6526,7 +6526,7 @@ pub(crate) mod tests { // If slot 5 is marked as confirmed, it becomes the heaviest bank on same slot again let mut duplicate_slots_to_repair = DuplicateSlotsToRepair::default(); - gossip_duplicate_confirmed_slots.insert(5, bank5_hash); + duplicate_confirmed_slots.insert(5, bank5_hash); let duplicate_confirmed_state = DuplicateConfirmedState::new_from_state( bank5_hash, || progress.is_dead(5).unwrap_or(false), @@ -6620,13 +6620,13 @@ pub(crate) mod tests { // because of lockout blockstore.store_duplicate_slot(4, vec![], vec![]).unwrap(); let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); - let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); + let mut duplicate_confirmed_slots = DuplicateConfirmedSlots::default(); let mut epoch_slots_frozen_slots = EpochSlotsFrozenSlots::default(); let bank4_hash = bank_forks.read().unwrap().bank_hash(4).unwrap(); assert_ne!(bank4_hash, Hash::default()); let duplicate_state = DuplicateState::new_from_state( 4, - &gossip_duplicate_confirmed_slots, + &duplicate_confirmed_slots, &vote_simulator.heaviest_subtree_fork_choice, || progress.is_dead(4).unwrap_or(false), || Some(bank4_hash), @@ -6663,7 +6663,7 @@ pub(crate) mod tests { assert_ne!(bank2_hash, Hash::default()); let duplicate_state = DuplicateState::new_from_state( 2, - &gossip_duplicate_confirmed_slots, + &duplicate_confirmed_slots, &vote_simulator.heaviest_subtree_fork_choice, || progress.is_dead(2).unwrap_or(false), || Some(bank2_hash), @@ -6698,7 +6698,7 @@ pub(crate) mod tests { // If slot 4 is marked as confirmed, then this confirms slot 2 and 4, and // then slot 4 is now the heaviest bank again let mut duplicate_slots_to_repair = DuplicateSlotsToRepair::default(); - gossip_duplicate_confirmed_slots.insert(4, bank4_hash); + duplicate_confirmed_slots.insert(4, bank4_hash); let duplicate_confirmed_state = DuplicateConfirmedState::new_from_state( bank4_hash, || progress.is_dead(4).unwrap_or(false), @@ -6853,8 +6853,8 @@ pub(crate) mod tests { // Simulate another version of slot 2 was duplicate confirmed let our_bank2_hash = bank_forks.read().unwrap().bank_hash(2).unwrap(); let duplicate_confirmed_bank2_hash = Hash::new_unique(); - let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); - gossip_duplicate_confirmed_slots.insert(2, duplicate_confirmed_bank2_hash); + let mut duplicate_confirmed_slots = DuplicateConfirmedSlots::default(); + duplicate_confirmed_slots.insert(2, duplicate_confirmed_bank2_hash); let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); let mut duplicate_slots_to_repair = DuplicateSlotsToRepair::default(); let mut epoch_slots_frozen_slots = EpochSlotsFrozenSlots::default(); diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 028a88f416e1fe..e6db8dc60db9e2 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -7,8 +7,8 @@ use { banking_stage::BankingStage, banking_trace::{BankingTracer, TracerThread}, cluster_info_vote_listener::{ - ClusterInfoVoteListener, GossipDuplicateConfirmedSlotsSender, - GossipVerifiedVoteHashSender, VerifiedVoteSender, VoteTracker, + ClusterInfoVoteListener, DuplicateConfirmedSlotsSender, GossipVerifiedVoteHashSender, + VerifiedVoteSender, VoteTracker, }, fetch_stage::FetchStage, sigverify::TransactionSigVerifier, @@ -98,7 +98,7 @@ impl Tpu { replay_vote_sender: ReplayVoteSender, bank_notification_sender: Option, tpu_coalesce: Duration, - cluster_confirmed_slot_sender: GossipDuplicateConfirmedSlotsSender, + duplicate_confirmed_slot_sender: DuplicateConfirmedSlotsSender, connection_cache: &Arc, turbine_quic_endpoint_sender: AsyncSender<(SocketAddr, Bytes)>, keypair: &Keypair, @@ -215,7 +215,7 @@ impl Tpu { replay_vote_receiver, blockstore.clone(), bank_notification_sender, - cluster_confirmed_slot_sender, + duplicate_confirmed_slot_sender, ); let banking_stage = BankingStage::new( diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 8e479aa92b792d..214fae3dceac0f 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -6,8 +6,8 @@ use { banking_trace::BankingTracer, cache_block_meta_service::CacheBlockMetaSender, cluster_info_vote_listener::{ - GossipDuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver, - VerifiedVoteReceiver, VoteTracker, + DuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver, VerifiedVoteReceiver, + VoteTracker, }, cluster_slots_service::{cluster_slots::ClusterSlots, ClusterSlotsService}, completed_data_sets_service::CompletedDataSetsSender, @@ -125,7 +125,7 @@ impl Tvu { replay_vote_sender: ReplayVoteSender, completed_data_sets_sender: CompletedDataSetsSender, bank_notification_sender: Option, - gossip_confirmed_slots_receiver: GossipDuplicateConfirmedSlotsReceiver, + duplicate_confirmed_slots_receiver: DuplicateConfirmedSlotsReceiver, tvu_config: TvuConfig, max_slots: &Arc, block_metadata_notifier: Option, @@ -297,7 +297,7 @@ impl Tvu { retransmit_slots_sender, ancestor_duplicate_slots_receiver, replay_vote_sender, - gossip_confirmed_slots_receiver, + duplicate_confirmed_slots_receiver, gossip_verified_vote_hash_receiver, cluster_slots_update_sender, cost_update_sender, diff --git a/core/src/validator.rs b/core/src/validator.rs index 241105e28ccdf0..27ee18deee5c36 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1128,7 +1128,7 @@ impl Validator { let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded(); let (verified_vote_sender, verified_vote_receiver) = unbounded(); let (gossip_verified_vote_hash_sender, gossip_verified_vote_hash_receiver) = unbounded(); - let (cluster_confirmed_slot_sender, cluster_confirmed_slot_receiver) = unbounded(); + let (duplicate_confirmed_slot_sender, duplicate_confirmed_slots_receiver) = unbounded(); let rpc_completed_slots_service = RpcCompletedSlotsService::spawn( completed_slots_receiver, @@ -1263,7 +1263,7 @@ impl Validator { replay_vote_sender.clone(), completed_data_sets_sender, bank_notification_sender.clone(), - cluster_confirmed_slot_receiver, + duplicate_confirmed_slots_receiver, TvuConfig { max_ledger_shreds: config.max_ledger_shreds, shred_version: node.info.shred_version(), @@ -1328,7 +1328,7 @@ impl Validator { replay_vote_sender, bank_notification_sender.map(|sender| sender.sender), config.tpu_coalesce, - cluster_confirmed_slot_sender, + duplicate_confirmed_slot_sender, &connection_cache, turbine_quic_endpoint_sender, &identity_keypair, diff --git a/core/src/vote_simulator.rs b/core/src/vote_simulator.rs index 3948feab5614f3..d8986d90e5db76 100644 --- a/core/src/vote_simulator.rs +++ b/core/src/vote_simulator.rs @@ -11,7 +11,7 @@ use { Tower, }, repair::cluster_slot_state_verifier::{ - DuplicateSlotsTracker, EpochSlotsFrozenSlots, GossipDuplicateConfirmedSlots, + DuplicateConfirmedSlots, DuplicateSlotsTracker, EpochSlotsFrozenSlots, }, replay_stage::{HeaviestForkFailures, ReplayStage}, unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes, @@ -214,7 +214,7 @@ impl VoteSimulator { None, &mut self.heaviest_subtree_fork_choice, &mut DuplicateSlotsTracker::default(), - &mut GossipDuplicateConfirmedSlots::default(), + &mut DuplicateConfirmedSlots::default(), &mut UnfrozenGossipVerifiedVoteHashes::default(), &mut true, &mut Vec::new(), From 60d267a548705954ac5cbfafa63f6a3b77590e17 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 10 Nov 2023 16:10:51 -0500 Subject: [PATCH 94/98] Adds documentation to verify_and_unarchive_snapshots() (#34020) --- runtime/src/snapshot_utils.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 0cf1aab09daea2..38dd13d78850e4 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1230,6 +1230,7 @@ pub struct BankFromDirTimings { // From testing, 4 seems to be a sweet spot for ranges of 60M-360M accounts and 16-64 cores. This may need to be tuned later. const PARALLEL_UNTAR_READERS_DEFAULT: usize = 4; +/// Unarchives the given full and incremental snapshot archives, as long as they are compatible. pub fn verify_and_unarchive_snapshots( bank_snapshots_dir: impl AsRef, full_snapshot_archive_info: &FullSnapshotArchiveInfo, From b91da2242dca483f59bae1b6c701365894a07cd9 Mon Sep 17 00:00:00 2001 From: steviez Date: Fri, 10 Nov 2023 17:27:43 -0600 Subject: [PATCH 95/98] Change Blockstore max_root from RwLock to AtomicU64 (#33998) The Blockstore currently maintains a RwLock of the maximum root it has seen inserted. The value is initialized during Blockstore::open() and updated during calls to Blockstore::set_roots(). The max root is queried fairly often for several use cases, and caching the value is cheaper than constructing an iterator to look it up every time. However, the access patterns of these RwLock match that of an atomic. That is, there is no critical section of code that is run while the lock is head. Rather, read/write locks are acquired in order to read/ update, respectively. So, change the RwLock to an AtomicU64. --- core/src/consensus.rs | 14 ++-- core/src/validator.rs | 2 +- gossip/src/duplicate_shred_handler.rs | 4 +- ledger-tool/src/bigtable.rs | 2 +- ledger/src/blockstore.rs | 94 +++++++++++++-------------- 5 files changed, 55 insertions(+), 61 deletions(-) diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 08b72ebf18b327..72a0c39bc35730 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -1452,7 +1452,7 @@ impl ExternalRootSource { pub fn reconcile_blockstore_roots_with_external_source( external_source: ExternalRootSource, blockstore: &Blockstore, - // blockstore.last_root() might have been updated already. + // blockstore.max_root() might have been updated already. // so take a &mut param both to input (and output iff we update root) last_blockstore_root: &mut Slot, ) -> blockstore_db::Result<()> { @@ -1489,7 +1489,7 @@ pub fn reconcile_blockstore_roots_with_external_source( // Update the caller-managed state of last root in blockstore. // Repeated calls of this function should result in a no-op for // the range of `new_roots`. - *last_blockstore_root = blockstore.last_root(); + *last_blockstore_root = blockstore.max_root(); } else { // This indicates we're in bad state; but still don't panic here. // That's because we might have a chance of recovering properly with @@ -2947,7 +2947,7 @@ pub mod test { reconcile_blockstore_roots_with_external_source( ExternalRootSource::Tower(tower.root()), &blockstore, - &mut blockstore.last_root(), + &mut blockstore.max_root(), ) .unwrap(); @@ -2983,7 +2983,7 @@ pub mod test { reconcile_blockstore_roots_with_external_source( ExternalRootSource::Tower(tower.root()), &blockstore, - &mut blockstore.last_root(), + &mut blockstore.max_root(), ) .unwrap(); } @@ -3004,14 +3004,14 @@ pub mod test { let mut tower = Tower::default(); tower.vote_state.root_slot = Some(4); - assert_eq!(blockstore.last_root(), 0); + assert_eq!(blockstore.max_root(), 0); reconcile_blockstore_roots_with_external_source( ExternalRootSource::Tower(tower.root()), &blockstore, - &mut blockstore.last_root(), + &mut blockstore.max_root(), ) .unwrap(); - assert_eq!(blockstore.last_root(), 0); + assert_eq!(blockstore.max_root(), 0); } #[test] diff --git a/core/src/validator.rs b/core/src/validator.rs index 27ee18deee5c36..d73bf58e868697 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1747,7 +1747,7 @@ fn load_blockstore( blockstore.shred_timing_point_sender = poh_timing_point_sender; // following boot sequence (esp BankForks) could set root. so stash the original value // of blockstore root away here as soon as possible. - let original_blockstore_root = blockstore.last_root(); + let original_blockstore_root = blockstore.max_root(); let blockstore = Arc::new(blockstore); let blockstore_root_scan = BlockstoreRootScan::new(config, blockstore.clone(), exit.clone()); diff --git a/gossip/src/duplicate_shred_handler.rs b/gossip/src/duplicate_shred_handler.rs index ba95178bc88441..1410e8262f027d 100644 --- a/gossip/src/duplicate_shred_handler.rs +++ b/gossip/src/duplicate_shred_handler.rs @@ -78,7 +78,7 @@ impl DuplicateShredHandler { } fn cache_root_info(&mut self) { - let last_root = self.blockstore.last_root(); + let last_root = self.blockstore.max_root(); if last_root == self.last_root && !self.cached_staked_nodes.is_empty() { return; } @@ -361,7 +361,7 @@ mod tests { // This proof will be rejected because the slot is too far away in the future. let future_slot = - blockstore.last_root() + duplicate_shred_handler.cached_slots_in_epoch + start_slot; + blockstore.max_root() + duplicate_shred_handler.cached_slots_in_epoch + start_slot; let chunks = create_duplicate_proof( my_keypair.clone(), None, diff --git a/ledger-tool/src/bigtable.rs b/ledger-tool/src/bigtable.rs index 6f0f3e8829ed7a..c4d5c77f302669 100644 --- a/ledger-tool/src/bigtable.rs +++ b/ledger-tool/src/bigtable.rs @@ -63,7 +63,7 @@ async fn upload( None => blockstore.get_first_available_block()?, }; - let ending_slot = ending_slot.unwrap_or_else(|| blockstore.last_root()); + let ending_slot = ending_slot.unwrap_or_else(|| blockstore.max_root()); while starting_slot <= ending_slot { let current_ending_slot = min( diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 7f596b0556885d..28c463646c43c7 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -73,7 +73,7 @@ use { path::{Path, PathBuf}, rc::Rc, sync::{ - atomic::{AtomicBool, Ordering}, + atomic::{AtomicBool, AtomicU64, Ordering}, Arc, Mutex, RwLock, }, }, @@ -214,7 +214,7 @@ pub struct Blockstore { program_costs_cf: LedgerColumn, bank_hash_cf: LedgerColumn, optimistic_slots_cf: LedgerColumn, - last_root: RwLock, + max_root: AtomicU64, insert_shreds_lock: Mutex<()>, new_shreds_signals: Mutex>>, completed_slots_senders: Mutex>, @@ -324,7 +324,7 @@ impl Blockstore { .next() .map(|(slot, _)| slot) .unwrap_or(0); - let last_root = RwLock::new(max_root); + let max_root = AtomicU64::new(max_root); measure.stop(); info!("{:?} {}", blockstore_path, measure); @@ -356,7 +356,7 @@ impl Blockstore { completed_slots_senders: Mutex::default(), shred_timing_point_sender: None, insert_shreds_lock: Mutex::<()>::default(), - last_root, + max_root, lowest_cleanup_slot: RwLock::::default(), slots_stats: SlotsStats::default(), }; @@ -471,16 +471,6 @@ impl Blockstore { self.orphans_cf.get(slot) } - /// Returns the max root or 0 if it does not exist. - pub fn max_root(&self) -> Slot { - self.db - .iter::(IteratorMode::End) - .expect("Couldn't get rooted iterator for max_root()") - .next() - .map(|(slot, _)| slot) - .unwrap_or(0) - } - pub fn slot_meta_iterator( &self, slot: Slot, @@ -1192,7 +1182,7 @@ impl Blockstore { return false; } - if !Blockstore::should_insert_coding_shred(&shred, &self.last_root) { + if !Blockstore::should_insert_coding_shred(&shred, self.max_root()) { metrics.num_coding_shreds_invalid += 1; return false; } @@ -1391,7 +1381,7 @@ impl Blockstore { &shred, slot_meta, just_inserted_shreds, - &self.last_root, + self.max_root(), leader_schedule, shred_source, duplicate_shreds, @@ -1419,9 +1409,9 @@ impl Blockstore { Ok(newly_completed_data_sets) } - fn should_insert_coding_shred(shred: &Shred, last_root: &RwLock) -> bool { + fn should_insert_coding_shred(shred: &Shred, max_root: Slot) -> bool { debug_assert_matches!(shred.sanitize(), Ok(())); - shred.is_code() && shred.slot() > *last_root.read().unwrap() + shred.is_code() && shred.slot() > max_root } fn insert_coding_shred( @@ -1473,7 +1463,7 @@ impl Blockstore { shred: &Shred, slot_meta: &SlotMeta, just_inserted_shreds: &HashMap, - last_root: &RwLock, + max_root: Slot, leader_schedule: Option<&LeaderScheduleCache>, shred_source: ShredSource, duplicate_shreds: &mut Vec, @@ -1568,12 +1558,11 @@ impl Blockstore { return false; } - let last_root = *last_root.read().unwrap(); // TODO Shouldn't this use shred.parent() instead and update // slot_meta.parent_slot accordingly? slot_meta .parent_slot - .map(|parent_slot| verify_shred_slots(slot, parent_slot, last_root)) + .map(|parent_slot| verify_shred_slots(slot, parent_slot, max_root)) .unwrap_or_default() } @@ -1584,7 +1573,7 @@ impl Blockstore { sender, SlotPohTimingInfo::new_slot_full_poh_time_point( slot, - Some(self.last_root()), + Some(self.max_root()), solana_sdk::timing::timestamp(), ), ); @@ -2491,10 +2480,10 @@ impl Blockstore { "blockstore-rpc-api", ("method", "get_complete_transaction", String) ); - let last_root = self.last_root(); + let max_root = self.max_root(); let confirmed_unrooted_slots: HashSet<_> = AncestorIterator::new_inclusive(highest_confirmed_slot, self) - .take_while(|&slot| slot > last_root) + .take_while(|&slot| slot > max_root) .collect(); self.get_transaction_with_status(signature, &confirmed_unrooted_slots) } @@ -2642,10 +2631,10 @@ impl Blockstore { "blockstore-rpc-api", ("method", "get_confirmed_signatures_for_address2", String) ); - let last_root = self.last_root(); + let max_root = self.max_root(); let confirmed_unrooted_slots: HashSet<_> = AncestorIterator::new_inclusive(highest_slot, self) - .take_while(|&slot| slot > last_root) + .take_while(|&slot| slot > max_root) .collect(); // Figure the `slot` to start listing signatures at, based on the ledger location of the @@ -3247,12 +3236,8 @@ impl Blockstore { } self.db.write(write_batch)?; - - let mut last_root = self.last_root.write().unwrap(); - if *last_root == std::u64::MAX { - *last_root = 0; - } - *last_root = cmp::max(max_new_rooted_slot, *last_root); + self.max_root + .fetch_max(max_new_rooted_slot, Ordering::Relaxed); Ok(()) } @@ -3355,8 +3340,17 @@ impl Blockstore { Ok(duplicate_slots_iterator.map(|(slot, _)| slot)) } + /// Returns the max root or 0 if it does not exist + pub fn max_root(&self) -> Slot { + self.max_root.load(Ordering::Relaxed) + } + + #[deprecated( + since = "1.18.0", + note = "Please use `solana_ledger::blockstore::Blockstore::max_root()` instead" + )] pub fn last_root(&self) -> Slot { - *self.last_root.read().unwrap() + self.max_root() } // find the first available slot in blockstore that has some data in it @@ -3370,7 +3364,7 @@ impl Blockstore { } } // This means blockstore is empty, should never get here aside from right at boot. - self.last_root() + self.max_root() } fn lowest_slot_with_genesis(&self) -> Slot { @@ -3383,7 +3377,7 @@ impl Blockstore { } } // This means blockstore is empty, should never get here aside from right at boot. - self.last_root() + self.max_root() } /// Returns the highest available slot in the blockstore @@ -3458,7 +3452,7 @@ impl Blockstore { } slot } else { - self.last_root() + self.max_root() }; let end_slot = end_slot.unwrap_or(*lowest_cleanup_slot); let ancestor_iterator = @@ -6590,7 +6584,7 @@ pub mod tests { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - let last_root = RwLock::new(0); + let max_root = 0; // Insert the first 5 shreds, we don't have a "is_last" shred yet blockstore @@ -6620,7 +6614,7 @@ pub mod tests { &empty_shred, &slot_meta, &HashMap::new(), - &last_root, + max_root, None, ShredSource::Repaired, &mut Vec::new(), @@ -6645,7 +6639,7 @@ pub mod tests { &shred7, &slot_meta, &HashMap::new(), - &last_root, + max_root, None, ShredSource::Repaired, &mut duplicate_shreds, @@ -6676,7 +6670,7 @@ pub mod tests { &shred8, &slot_meta, &HashMap::new(), - &last_root, + max_root, None, ShredSource::Repaired, &mut duplicate_shreds, @@ -6784,7 +6778,7 @@ pub mod tests { fn test_should_insert_coding_shred() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - let last_root = RwLock::new(0); + let max_root = 0; let slot = 1; let mut coding_shred = Shred::new_from_parity_shard( @@ -6801,7 +6795,7 @@ pub mod tests { // Insert a good coding shred assert!(Blockstore::should_insert_coding_shred( &coding_shred, - &last_root + max_root )); // Insertion should succeed @@ -6814,7 +6808,7 @@ pub mod tests { { assert!(Blockstore::should_insert_coding_shred( &coding_shred, - &last_root + max_root )); } @@ -6822,16 +6816,16 @@ pub mod tests { coding_shred.set_index(coding_shred.index() + 1); assert!(Blockstore::should_insert_coding_shred( &coding_shred, - &last_root + max_root )); // Trying to insert value into slot <= than last root should fail { let mut coding_shred = coding_shred.clone(); - coding_shred.set_slot(*last_root.read().unwrap()); + coding_shred.set_slot(max_root); assert!(!Blockstore::should_insert_coding_shred( &coding_shred, - &last_root + max_root )); } } @@ -6896,11 +6890,11 @@ pub mod tests { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let chained_slots = vec![0, 2, 4, 7, 12, 15]; - assert_eq!(blockstore.last_root(), 0); + assert_eq!(blockstore.max_root(), 0); blockstore.set_roots(chained_slots.iter()).unwrap(); - assert_eq!(blockstore.last_root(), 15); + assert_eq!(blockstore.max_root(), 15); for i in chained_slots { assert!(blockstore.is_root(i)); @@ -7071,9 +7065,9 @@ pub mod tests { // Make shred for slot 1 let (shreds1, _) = make_slot_entries(1, 0, 1, /*merkle_variant:*/ true); - let last_root = 100; + let max_root = 100; - blockstore.set_roots(std::iter::once(&last_root)).unwrap(); + blockstore.set_roots(std::iter::once(&max_root)).unwrap(); // Insert will fail, slot < root blockstore From 04e4efd8ae88005572c00fa60d329101c5a3e1de Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 10 Nov 2023 19:44:23 -0500 Subject: [PATCH 96/98] Puts create_tmp_accounts_dir_for_tests() behind DCOU (#34022) --- runtime/src/snapshot_utils.rs | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 38dd13d78850e4..9daaa0f6831fa8 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -19,9 +19,7 @@ use { regex::Regex, solana_accounts_db::{ account_storage::AccountStorageMap, - accounts_db::{ - self, create_accounts_run_and_snapshot_dirs, AccountStorageEntry, AtomicAppendVecId, - }, + accounts_db::{self, AccountStorageEntry, AtomicAppendVecId}, accounts_file::AccountsFileError, append_vec::AppendVec, hardened_unpack::{self, ParallelSelector, UnpackError}, @@ -1999,7 +1997,9 @@ pub fn verify_snapshot_archive( ) { let temp_dir = tempfile::TempDir::new().unwrap(); let unpack_dir = temp_dir.path(); - let unpack_account_dir = create_accounts_run_and_snapshot_dirs(unpack_dir).unwrap().0; + let unpack_account_dir = accounts_db::create_accounts_run_and_snapshot_dirs(unpack_dir) + .unwrap() + .0; untar_snapshot_in( snapshot_archive, unpack_dir, @@ -2170,9 +2170,16 @@ pub fn should_take_incremental_snapshot( && last_full_snapshot_slot.is_some() } +/// Creates an "accounts path" directory for tests +/// +/// This temporary directory will contain the "run" and "snapshot" +/// sub-directories required by a validator. +#[cfg(feature = "dev-context-only-utils")] pub fn create_tmp_accounts_dir_for_tests() -> (TempDir, PathBuf) { let tmp_dir = tempfile::TempDir::new().unwrap(); - let account_dir = create_accounts_run_and_snapshot_dirs(&tmp_dir).unwrap().0; + let account_dir = accounts_db::create_accounts_run_and_snapshot_dirs(&tmp_dir) + .unwrap() + .0; (tmp_dir, account_dir) } From e457c0287906f313ad0284e0f317c5d00a42d467 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Sat, 11 Nov 2023 21:14:18 -0500 Subject: [PATCH 97/98] add merkle root meta column to blockstore (#33979) * add merkle root meta column to blockstore * pr feedback: remove write/reads to column * pr feedback: u64 -> u32 + revert * pr feedback: fec_set_index u32, use Self::Index * pr feedback: key size 16 -> 12 --- ledger/src/blockstore.rs | 4 ++ ledger/src/blockstore/blockstore_purge.rs | 8 ++++ ledger/src/blockstore_db.rs | 51 +++++++++++++++++++++++ ledger/src/blockstore_meta.rs | 10 +++++ 4 files changed, 73 insertions(+) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 28c463646c43c7..3010a65be7f90c 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -215,6 +215,7 @@ pub struct Blockstore { bank_hash_cf: LedgerColumn, optimistic_slots_cf: LedgerColumn, max_root: AtomicU64, + merkle_root_meta_cf: LedgerColumn, insert_shreds_lock: Mutex<()>, new_shreds_signals: Mutex>>, completed_slots_senders: Mutex>, @@ -315,6 +316,7 @@ impl Blockstore { let program_costs_cf = db.column(); let bank_hash_cf = db.column(); let optimistic_slots_cf = db.column(); + let merkle_root_meta_cf = db.column(); let db = Arc::new(db); @@ -352,6 +354,7 @@ impl Blockstore { program_costs_cf, bank_hash_cf, optimistic_slots_cf, + merkle_root_meta_cf, new_shreds_signals: Mutex::default(), completed_slots_senders: Mutex::default(), shred_timing_point_sender: None, @@ -711,6 +714,7 @@ impl Blockstore { self.program_costs_cf.submit_rocksdb_cf_metrics(); self.bank_hash_cf.submit_rocksdb_cf_metrics(); self.optimistic_slots_cf.submit_rocksdb_cf_metrics(); + self.merkle_root_meta_cf.submit_rocksdb_cf_metrics(); } fn try_shred_recovery( diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index 9669f8bd305a00..f6b3662ed19e28 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -220,6 +220,10 @@ impl Blockstore { & self .db .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .is_ok() + & self + .db + .delete_range_cf::(&mut write_batch, from_slot, to_slot) .is_ok(); match purge_type { PurgeType::Exact => { @@ -329,6 +333,10 @@ impl Blockstore { .db .delete_file_in_range_cf::(from_slot, to_slot) .is_ok() + & self + .db + .delete_file_in_range_cf::(from_slot, to_slot) + .is_ok() } /// Returns true if the special columns, TransactionStatus and diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index b65df82ee00c9e..0b2b14445539d6 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -2,6 +2,7 @@ pub use rocksdb::Direction as IteratorDirection; use { crate::{ blockstore_meta, + blockstore_meta::MerkleRootMeta, blockstore_metrics::{ maybe_enable_rocksdb_perf, report_rocksdb_read_perf, report_rocksdb_write_perf, BlockstoreRocksDbColumnFamilyMetrics, PerfSamplingStatus, PERF_METRIC_OP_NAME_GET, @@ -103,6 +104,8 @@ const BLOCK_HEIGHT_CF: &str = "block_height"; const PROGRAM_COSTS_CF: &str = "program_costs"; /// Column family for optimistic slots const OPTIMISTIC_SLOTS_CF: &str = "optimistic_slots"; +/// Column family for merkle roots +const MERKLE_ROOT_META_CF: &str = "merkle_root_meta"; #[derive(Error, Debug)] pub enum BlockstoreError { @@ -339,6 +342,19 @@ pub mod columns { /// * value type: [`blockstore_meta::OptimisticSlotMetaVersioned`] pub struct OptimisticSlots; + #[derive(Debug)] + /// The merkle root meta column + /// + /// Each merkle shred is part of a merkle tree for + /// its FEC set. This column stores that merkle root and associated + /// meta information about the first shred received. + /// + /// Its index type is (Slot, fec_set_index). + /// + /// * index type: `crate::shred::ErasureSetId` `(Slot, fec_set_index: u32)` + /// * value type: [`blockstore_meta::MerkleRootMeta`]` + pub struct MerkleRootMeta; + // When adding a new column ... // - Add struct below and implement `Column` and `ColumnName` traits // - Add descriptor in Rocks::cf_descriptors() and name in Rocks::columns() @@ -474,6 +490,7 @@ impl Rocks { new_cf_descriptor::(options, oldest_slot), new_cf_descriptor::(options, oldest_slot), new_cf_descriptor::(options, oldest_slot), + new_cf_descriptor::(options, oldest_slot), ] } @@ -501,6 +518,7 @@ impl Rocks { BlockHeight::NAME, ProgramCosts::NAME, OptimisticSlots::NAME, + MerkleRootMeta::NAME, ] } @@ -1227,6 +1245,39 @@ impl TypedColumn for columns::OptimisticSlots { type Type = blockstore_meta::OptimisticSlotMetaVersioned; } +impl Column for columns::MerkleRootMeta { + type Index = (Slot, /*fec_set_index:*/ u32); + + fn index(key: &[u8]) -> Self::Index { + let slot = BigEndian::read_u64(&key[..8]); + let fec_set_index = BigEndian::read_u32(&key[8..]); + + (slot, fec_set_index) + } + + fn key((slot, fec_set_index): Self::Index) -> Vec { + let mut key = vec![0; 12]; + BigEndian::write_u64(&mut key[..8], slot); + BigEndian::write_u32(&mut key[8..], fec_set_index); + key + } + + fn slot((slot, _fec_set_index): Self::Index) -> Slot { + slot + } + + fn as_index(slot: Slot) -> Self::Index { + (slot, 0) + } +} + +impl ColumnName for columns::MerkleRootMeta { + const NAME: &'static str = MERKLE_ROOT_META_CF; +} +impl TypedColumn for columns::MerkleRootMeta { + type Type = MerkleRootMeta; +} + #[derive(Debug)] pub struct Database { backend: Arc, diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index 79954ee96b6d04..41a16c9ae3fee3 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -138,6 +138,16 @@ pub(crate) struct ErasureConfig { num_coding: usize, } +#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct MerkleRootMeta { + /// The merkle root + merkle_root: Hash, + /// The first received shred index + first_received_shred_index: u32, + /// The shred type of the first received shred + first_received_shred_type: ShredType, +} + #[derive(Deserialize, Serialize)] pub struct DuplicateSlotProof { #[serde(with = "serde_bytes")] From ae3057258543e6d0a4d7c27b300bc08acbd7812f Mon Sep 17 00:00:00 2001 From: Brooks Date: Sun, 12 Nov 2023 00:01:17 -0500 Subject: [PATCH 98/98] Use AtomicAppendVecId type alias in verify_and_unarchive_snapshot() (#34019) --- runtime/src/snapshot_utils.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 9daaa0f6831fa8..c890c3196f936d 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -36,7 +36,7 @@ use { path::{Path, PathBuf}, process::ExitStatus, str::FromStr, - sync::{atomic::AtomicU32, Arc, Mutex}, + sync::{Arc, Mutex}, thread::{Builder, JoinHandle}, }, tar::{self, Archive}, @@ -1234,7 +1234,11 @@ pub fn verify_and_unarchive_snapshots( full_snapshot_archive_info: &FullSnapshotArchiveInfo, incremental_snapshot_archive_info: Option<&IncrementalSnapshotArchiveInfo>, account_paths: &[PathBuf], -) -> Result<(UnarchivedSnapshot, Option, AtomicU32)> { +) -> Result<( + UnarchivedSnapshot, + Option, + AtomicAppendVecId, +)> { check_are_snapshots_compatible( full_snapshot_archive_info, incremental_snapshot_archive_info,