From adee97fe38b175ae185d34a7aa71b2e7bfd570e8 Mon Sep 17 00:00:00 2001 From: Brooks Date: Sat, 2 Sep 2023 16:17:03 -0400 Subject: [PATCH 001/407] Ledger-tool CLI can specify accounts hash cache path (#33118) --- ledger-tool/src/args.rs | 19 ++++++++++++++++++- ledger-tool/src/main.rs | 10 ++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index 41d9c292de1e53..c11954a56780ab 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -2,10 +2,11 @@ use { crate::LEDGER_TOOL_DIRECTORY, clap::{value_t, values_t_or_exit, ArgMatches}, solana_accounts_db::{ - accounts_db::{AccountsDbConfig, FillerAccountsConfig}, + accounts_db::{AccountsDb, AccountsDbConfig, FillerAccountsConfig}, accounts_index::{AccountsIndexConfig, IndexLimitMb}, partitioned_rewards::TestPartitionedEpochRewards, }, + solana_runtime::snapshot_utils, solana_sdk::clock::Slot, std::path::{Path, PathBuf}, }; @@ -57,9 +58,25 @@ pub fn get_accounts_db_config( size: value_t!(arg_matches, "accounts_filler_size", usize).unwrap_or(0), }; + let accounts_hash_cache_path = arg_matches + .value_of("accounts_hash_cache_path") + .map(Into::into) + .unwrap_or_else(|| { + ledger_tool_ledger_path.join(AccountsDb::DEFAULT_ACCOUNTS_HASH_CACHE_DIR) + }); + let accounts_hash_cache_path = + snapshot_utils::create_and_canonicalize_directories(&[accounts_hash_cache_path]) + .unwrap_or_else(|err| { + eprintln!("Unable to access accounts hash cache path: {err}"); + std::process::exit(1); + }) + .pop() + .unwrap(); + AccountsDbConfig { index: Some(accounts_index_config), base_working_path: Some(ledger_tool_ledger_path), + accounts_hash_cache_path: Some(accounts_hash_cache_path), filler_accounts_config, ancient_append_vec_offset: value_t!(arg_matches, "accounts_db_ancient_append_vecs", i64) .ok(), diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 49ed077fc334f2..0db9ae21eb5d0f 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -1146,6 +1146,11 @@ fn main() { .value_name("PATHS") .takes_value(true) .help("Comma separated persistent accounts location"); + let accounts_hash_cache_path_arg = Arg::with_name("accounts_hash_cache_path") + .long("accounts-hash-cache-path") + .value_name("PATH") + .takes_value(true) + .help("Use PATH as accounts hash cache location"); let accounts_index_path_arg = Arg::with_name("accounts_index_path") .long("accounts-index-path") .value_name("PATH") @@ -1593,6 +1598,7 @@ fn main() { .about("Verify the ledger") .arg(&no_snapshot_arg) .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) .arg(&accounts_index_path_arg) .arg(&halt_at_slot_arg) .arg(&limit_load_slot_count_from_snapshot_arg) @@ -1676,6 +1682,7 @@ fn main() { .about("Create a Graphviz rendering of the ledger") .arg(&no_snapshot_arg) .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) .arg(&accounts_index_bins) .arg(&accounts_index_limit) .arg(&disable_disk_index) @@ -1711,6 +1718,7 @@ fn main() { .about("Create a new ledger snapshot") .arg(&no_snapshot_arg) .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) .arg(&accounts_index_bins) .arg(&accounts_index_limit) .arg(&disable_disk_index) @@ -1904,6 +1912,7 @@ fn main() { .about("Print account stats and contents after processing the ledger") .arg(&no_snapshot_arg) .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) .arg(&accounts_index_bins) .arg(&accounts_index_limit) .arg(&disable_disk_index) @@ -1937,6 +1946,7 @@ fn main() { .about("Print capitalization (aka, total supply) while checksumming it") .arg(&no_snapshot_arg) .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) .arg(&accounts_index_bins) .arg(&accounts_index_limit) .arg(&disable_disk_index) From 6679153ca17f8e96f7e3b57bc4c8e565774f421f Mon Sep 17 00:00:00 2001 From: Alessandro Decina Date: Tue, 5 Sep 2023 14:27:26 +0700 Subject: [PATCH 002/407] CPI: improve test coverage (#31986) * programs/sbf: add TEST_[FORBID|ALLOW]_WRITE_AFTER_OWNERSHIP_CHANGE* * programs/sbf: add tests for the AccessViolation -> InstructionError mapping * cpi: add more tests * programs/sbf: add tests for immutable AccountInfo pointers * programs/sbf: add tests for verification of SolAccountInfo pointers too * programs/sbf: add tests for ref_to_len_in_vm handling in CPI Add TEST_FORBID_LEN_UPDATE_AFTER_OWNERSHIP_CHANGE_MOVING_DATA_POINTER and TEST_FORBID_LEN_UPDATE_AFTER_OWNERSHIP_CHANGE that exercise the new logic. * cpi: tweak tests Remove some copy pasta and rename two tests to better describe what they're doing * cpi: add tests that check that CPI updates all accounts at once * direct mapping: test that writes to executable accounts trigger ExecutableDataModified * programs/sbf: add explicit tests for when an account's data allocation changes --- programs/sbf/Cargo.lock | 1 + programs/sbf/c/src/invoke/invoke.c | 73 ++ .../sbf/rust/deprecated_loader/src/lib.rs | 152 ++++- programs/sbf/rust/invoke/Cargo.toml | 1 + programs/sbf/rust/invoke/src/instructions.rs | 18 + programs/sbf/rust/invoke/src/processor.rs | 614 ++++++++++++++++- programs/sbf/rust/invoked/src/instructions.rs | 1 + programs/sbf/rust/invoked/src/processor.rs | 8 + programs/sbf/rust/realloc/src/instructions.rs | 1 + programs/sbf/rust/realloc/src/processor.rs | 8 + programs/sbf/tests/programs.rs | 645 +++++++++++++++++- 11 files changed, 1487 insertions(+), 35 deletions(-) diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 22a0e9f5c8bbe0..18a669098b13d7 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5755,6 +5755,7 @@ version = "1.17.0" dependencies = [ "solana-program", "solana-sbf-rust-invoked", + "solana-sbf-rust-realloc", ] [[package]] diff --git a/programs/sbf/c/src/invoke/invoke.c b/programs/sbf/c/src/invoke/invoke.c index 4cb038cc7f6950..1ff4e6b69a096c 100644 --- a/programs/sbf/c/src/invoke/invoke.c +++ b/programs/sbf/c/src/invoke/invoke.c @@ -31,6 +31,12 @@ static const uint8_t TEST_RETURN_DATA_TOO_LARGE = 18; static const uint8_t TEST_DUPLICATE_PRIVILEGE_ESCALATION_SIGNER = 19; static const uint8_t TEST_DUPLICATE_PRIVILEGE_ESCALATION_WRITABLE = 20; static const uint8_t TEST_MAX_ACCOUNT_INFOS_EXCEEDED = 21; +// TEST_CPI_INVALID_* must match the definitions in +// https://github.com/solana-labs/solana/blob/master/programs/sbf/rust/invoke/src/instructions.rs +static const uint8_t TEST_CPI_INVALID_KEY_POINTER = 34; +static const uint8_t TEST_CPI_INVALID_OWNER_POINTER = 35; +static const uint8_t TEST_CPI_INVALID_LAMPORTS_POINTER = 36; +static const uint8_t TEST_CPI_INVALID_DATA_POINTER = 37; static const int MINT_INDEX = 0; static const int ARGUMENT_INDEX = 1; @@ -663,6 +669,73 @@ extern uint64_t entrypoint(const uint8_t *input) { sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts)); break; } + case TEST_CPI_INVALID_KEY_POINTER: + { + sol_log("Test TEST_CPI_INVALID_KEY_POINTER"); + SolAccountMeta arguments[] = { + {accounts[ARGUMENT_INDEX].key, false, false}, + {accounts[INVOKED_ARGUMENT_INDEX].key, false, false}, + }; + uint8_t data[] = {}; + SolPubkey key = *accounts[ARGUMENT_INDEX].key; + accounts[ARGUMENT_INDEX].key = &key; + + const SolInstruction instruction = {accounts[INVOKED_PROGRAM_INDEX].key, + arguments, SOL_ARRAY_SIZE(arguments), + data, SOL_ARRAY_SIZE(data)}; + sol_invoke(&instruction, accounts, 4); + break; + } + case TEST_CPI_INVALID_LAMPORTS_POINTER: + { + sol_log("Test TEST_CPI_INVALID_LAMPORTS_POINTER"); + SolAccountMeta arguments[] = { + {accounts[ARGUMENT_INDEX].key, false, false}, + {accounts[INVOKED_ARGUMENT_INDEX].key, false, false}, + }; + uint8_t data[] = {}; + uint64_t lamports = *accounts[ARGUMENT_INDEX].lamports; + accounts[ARGUMENT_INDEX].lamports = &lamports; + + const SolInstruction instruction = {accounts[INVOKED_PROGRAM_INDEX].key, + arguments, SOL_ARRAY_SIZE(arguments), + data, SOL_ARRAY_SIZE(data)}; + sol_invoke(&instruction, accounts, 4); + break; + } + case TEST_CPI_INVALID_OWNER_POINTER: + { + sol_log("Test TEST_CPI_INVALID_OWNER_POINTER"); + SolAccountMeta arguments[] = { + {accounts[ARGUMENT_INDEX].key, false, false}, + {accounts[INVOKED_ARGUMENT_INDEX].key, false, false}, + }; + uint8_t data[] = {}; + SolPubkey owner = *accounts[ARGUMENT_INDEX].owner; + accounts[ARGUMENT_INDEX].owner = &owner; + + const SolInstruction instruction = {accounts[INVOKED_PROGRAM_INDEX].key, + arguments, SOL_ARRAY_SIZE(arguments), + data, SOL_ARRAY_SIZE(data)}; + sol_invoke(&instruction, accounts, 4); + break; + } + case TEST_CPI_INVALID_DATA_POINTER: + { + sol_log("Test TEST_CPI_INVALID_DATA_POINTER"); + SolAccountMeta arguments[] = { + {accounts[ARGUMENT_INDEX].key, false, false}, + {accounts[INVOKED_ARGUMENT_INDEX].key, false, false}, + }; + uint8_t data[] = {}; + accounts[ARGUMENT_INDEX].data = data; + + const SolInstruction instruction = {accounts[INVOKED_PROGRAM_INDEX].key, + arguments, SOL_ARRAY_SIZE(arguments), + data, SOL_ARRAY_SIZE(data)}; + sol_invoke(&instruction, accounts, 4); + break; + } default: sol_panic(); diff --git a/programs/sbf/rust/deprecated_loader/src/lib.rs b/programs/sbf/rust/deprecated_loader/src/lib.rs index 772e0c0f594cd2..a9b801b5e43eea 100644 --- a/programs/sbf/rust/deprecated_loader/src/lib.rs +++ b/programs/sbf/rust/deprecated_loader/src/lib.rs @@ -5,10 +5,21 @@ extern crate solana_program; use solana_program::{ - account_info::AccountInfo, bpf_loader, entrypoint_deprecated::ProgramResult, log::*, msg, + account_info::AccountInfo, + bpf_loader, + entrypoint_deprecated::ProgramResult, + instruction::{AccountMeta, Instruction}, + log::*, + msg, + program::invoke, pubkey::Pubkey, }; +pub const REALLOC: u8 = 1; +pub const REALLOC_EXTEND_FROM_SLICE: u8 = 12; +pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS: u8 = 28; +pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_NESTED: u8 = 29; + #[derive(Debug, PartialEq)] struct SStruct { x: u64, @@ -39,37 +50,122 @@ fn process_instruction( assert!(!bpf_loader::check_id(program_id)); - // Log the provided account keys and instruction input data. In the case of - // the no-op program, no account keys or input data are expected but real - // programs will have specific requirements so they can do their work. - msg!("Account keys and instruction input data:"); - sol_log_params(accounts, instruction_data); - - { - // Test - use std methods, unwrap - - // valid bytes, in a stack-allocated array - let sparkle_heart = [240, 159, 146, 150]; - let result_str = std::str::from_utf8(&sparkle_heart).unwrap(); - assert_eq!(4, result_str.len()); - assert_eq!("💖", result_str); - msg!(result_str); + // test_sol_alloc_free_no_longer_deployable calls this program with + // bpf_loader instead of bpf_loader_deprecated, so instruction_data isn't + // deserialized correctly and is empty. + match instruction_data.first() { + Some(&REALLOC) => { + let (bytes, _) = instruction_data[2..].split_at(std::mem::size_of::()); + let new_len = usize::from_le_bytes(bytes.try_into().unwrap()); + msg!("realloc to {}", new_len); + let account = &accounts[0]; + account.realloc(new_len, false)?; + assert_eq!(new_len, account.data_len()); + } + Some(&REALLOC_EXTEND_FROM_SLICE) => { + msg!("realloc extend from slice deprecated"); + let data = &instruction_data[1..]; + let account = &accounts[0]; + let prev_len = account.data_len(); + account.realloc(prev_len + data.len(), false)?; + account.data.borrow_mut()[prev_len..].copy_from_slice(data); + } + Some(&TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS) => { + msg!("DEPRECATED TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS"); + const ARGUMENT_INDEX: usize = 1; + const CALLEE_PROGRAM_INDEX: usize = 3; + let account = &accounts[ARGUMENT_INDEX]; + let callee_program_id = accounts[CALLEE_PROGRAM_INDEX].key; + + let expected = { + let data = &instruction_data[1..]; + let prev_len = account.data_len(); + // when direct mapping is off, this will accidentally clobber + // whatever comes after the data slice (owner, executable, rent + // epoch etc). When direct mapping is on, you get an + // InvalidRealloc error. + account.realloc(prev_len + data.len(), false)?; + account.data.borrow_mut()[prev_len..].copy_from_slice(data); + account.data.borrow().to_vec() + }; + + let mut instruction_data = vec![TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_NESTED]; + instruction_data.extend_from_slice(&expected); + invoke( + &create_instruction( + *callee_program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (callee_program_id, false, false), + ], + instruction_data, + ), + accounts, + ) + .unwrap(); + } + Some(&TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_NESTED) => { + msg!("DEPRECATED LOADER TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_NESTED"); + const ARGUMENT_INDEX: usize = 0; + let account = &accounts[ARGUMENT_INDEX]; + assert_eq!(*account.data.borrow(), &instruction_data[1..]); + } + _ => { + { + // Log the provided account keys and instruction input data. In the case of + // the no-op program, no account keys or input data are expected but real + // programs will have specific requirements so they can do their work. + msg!("Account keys and instruction input data:"); + sol_log_params(accounts, instruction_data); + + // Test - use std methods, unwrap + + // valid bytes, in a stack-allocated array + let sparkle_heart = [240, 159, 146, 150]; + let result_str = std::str::from_utf8(&sparkle_heart).unwrap(); + assert_eq!(4, result_str.len()); + assert_eq!("💖", result_str); + msg!(result_str); + } + + { + // Test - struct return + + let s = return_sstruct(); + assert_eq!(s.x + s.y + s.z, 6); + } + + { + // Test - arch config + #[cfg(not(target_os = "solana"))] + panic!(); + } + } } - { - // Test - struct return - - let s = return_sstruct(); - assert_eq!(s.x + s.y + s.z, 6); - } + Ok(()) +} - { - // Test - arch config - #[cfg(not(target_os = "solana"))] - panic!(); +pub fn create_instruction( + program_id: Pubkey, + arguments: &[(&Pubkey, bool, bool)], + data: Vec, +) -> Instruction { + let accounts = arguments + .iter() + .map(|(key, is_writable, is_signer)| { + if *is_writable { + AccountMeta::new(**key, *is_signer) + } else { + AccountMeta::new_readonly(**key, *is_signer) + } + }) + .collect(); + Instruction { + program_id, + accounts, + data, } - - Ok(()) } #[cfg(test)] diff --git a/programs/sbf/rust/invoke/Cargo.toml b/programs/sbf/rust/invoke/Cargo.toml index 616beab7a49b30..66b07e500d897f 100644 --- a/programs/sbf/rust/invoke/Cargo.toml +++ b/programs/sbf/rust/invoke/Cargo.toml @@ -16,6 +16,7 @@ program = [] [dependencies] solana-program = { workspace = true } solana-sbf-rust-invoked = { workspace = true } +solana-sbf-rust-realloc = { workspace = true } [lib] crate-type = ["lib", "cdylib"] diff --git a/programs/sbf/rust/invoke/src/instructions.rs b/programs/sbf/rust/invoke/src/instructions.rs index db8be12dea5619..b335fb52f5b6b1 100644 --- a/programs/sbf/rust/invoke/src/instructions.rs +++ b/programs/sbf/rust/invoke/src/instructions.rs @@ -21,6 +21,24 @@ pub const TEST_RETURN_DATA_TOO_LARGE: u8 = 18; pub const TEST_DUPLICATE_PRIVILEGE_ESCALATION_SIGNER: u8 = 19; pub const TEST_DUPLICATE_PRIVILEGE_ESCALATION_WRITABLE: u8 = 20; pub const TEST_MAX_ACCOUNT_INFOS_EXCEEDED: u8 = 21; +pub const TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLEE: u8 = 22; +pub const TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLEE_NESTED: u8 = 23; +pub const TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLER: u8 = 24; +pub const TEST_FORBID_LEN_UPDATE_AFTER_OWNERSHIP_CHANGE_MOVING_DATA_POINTER: u8 = 25; +pub const TEST_FORBID_LEN_UPDATE_AFTER_OWNERSHIP_CHANGE: u8 = 26; +pub const TEST_ALLOW_WRITE_AFTER_OWNERSHIP_CHANGE_TO_CALLER: u8 = 27; +pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS: u8 = 28; +pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_NESTED: u8 = 29; +pub const TEST_CPI_ACCOUNT_UPDATE_CALLEE_GROWS: u8 = 30; +pub const TEST_CPI_ACCOUNT_UPDATE_CALLEE_SHRINKS_SMALLER_THAN_ORIGINAL_LEN: u8 = 31; +pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_CALLEE_SHRINKS: u8 = 32; +pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_CALLEE_SHRINKS_NESTED: u8 = 33; +pub const TEST_CPI_INVALID_KEY_POINTER: u8 = 34; +pub const TEST_CPI_INVALID_OWNER_POINTER: u8 = 35; +pub const TEST_CPI_INVALID_LAMPORTS_POINTER: u8 = 36; +pub const TEST_CPI_INVALID_DATA_POINTER: u8 = 37; +pub const TEST_CPI_CHANGE_ACCOUNT_DATA_MEMORY_ALLOCATION: u8 = 38; +pub const TEST_WRITE_ACCOUNT: u8 = 39; pub const MINT_INDEX: usize = 0; pub const ARGUMENT_INDEX: usize = 1; diff --git a/programs/sbf/rust/invoke/src/processor.rs b/programs/sbf/rust/invoke/src/processor.rs index dce6492ecefff0..2e1ee8cac9cc42 100644 --- a/programs/sbf/rust/invoke/src/processor.rs +++ b/programs/sbf/rust/invoke/src/processor.rs @@ -2,11 +2,13 @@ #![cfg(feature = "program")] #![allow(unreachable_code)] +#![allow(clippy::integer_arithmetic)] use { crate::instructions::*, solana_program::{ account_info::AccountInfo, + bpf_loader_deprecated, entrypoint::{ProgramResult, MAX_PERMITTED_DATA_INCREASE}, instruction::Instruction, msg, @@ -16,9 +18,11 @@ use { syscalls::{ MAX_CPI_ACCOUNT_INFOS, MAX_CPI_INSTRUCTION_ACCOUNTS, MAX_CPI_INSTRUCTION_DATA_LEN, }, - system_instruction, + system_instruction, system_program, }, solana_sbf_rust_invoked::instructions::*, + solana_sbf_rust_realloc::instructions::*, + std::{cell::RefCell, mem, rc::Rc, slice}, }; fn do_nested_invokes(num_nested_invokes: u64, accounts: &[AccountInfo]) -> ProgramResult { @@ -688,8 +692,614 @@ fn process_instruction( invoked_instruction.accounts[1].is_writable = true; invoke(&invoked_instruction, accounts)?; } - _ => panic!(), + TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLEE => { + msg!("TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLEE"); + invoke( + &create_instruction( + *program_id, + &[ + (program_id, false, false), + (accounts[ARGUMENT_INDEX].key, true, false), + ], + vec![ + TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLEE_NESTED, + 42, + 42, + 42, + ], + ), + accounts, + ) + .unwrap(); + let account = &accounts[ARGUMENT_INDEX]; + // this should cause the tx to fail since the callee changed ownership + unsafe { + *account + .data + .borrow_mut() + .get_unchecked_mut(instruction_data[1] as usize) = 42 + }; + } + TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLEE_NESTED => { + msg!("TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLEE_NESTED"); + let account = &accounts[ARGUMENT_INDEX]; + account.data.borrow_mut().fill(0); + account.assign(&system_program::id()); + } + TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLER => { + msg!("TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLER"); + let account = &accounts[ARGUMENT_INDEX]; + let invoked_program_id = accounts[INVOKED_PROGRAM_INDEX].key; + account.data.borrow_mut().fill(0); + account.assign(invoked_program_id); + invoke( + &create_instruction( + *invoked_program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (invoked_program_id, false, false), + ], + vec![RETURN_OK], + ), + accounts, + ) + .unwrap(); + // this should cause the tx to failsince invoked_program_id now owns + // the account + unsafe { + *account + .data + .borrow_mut() + .get_unchecked_mut(instruction_data[1] as usize) = 42 + }; + } + TEST_FORBID_LEN_UPDATE_AFTER_OWNERSHIP_CHANGE_MOVING_DATA_POINTER => { + msg!("TEST_FORBID_LEN_UPDATE_AFTER_OWNERSHIP_CHANGE_MOVING_DATA_POINTER"); + const INVOKE_PROGRAM_INDEX: usize = 3; + const REALLOC_PROGRAM_INDEX: usize = 4; + let account = &accounts[ARGUMENT_INDEX]; + let realloc_program_id = accounts[REALLOC_PROGRAM_INDEX].key; + let invoke_program_id = accounts[INVOKE_PROGRAM_INDEX].key; + account.realloc(0, false).unwrap(); + account.assign(realloc_program_id); + + // Place a RcBox> in the account data. This + // allows us to test having CallerAccount::ref_to_len_in_vm in an + // account region. + let rc_box_addr = + account.data.borrow_mut().as_mut_ptr() as *mut RcBox>; + let rc_box_size = mem::size_of::>>(); + unsafe { + std::ptr::write( + rc_box_addr, + RcBox { + strong: 1, + weak: 0, + // We're testing what happens if we make CPI update the + // slice length after we put the slice in the account + // address range. To do so, we need to move the data + // pointer past the RcBox or CPI will clobber the length + // change when it copies the callee's account data back + // into the caller's account data + // https://github.com/solana-labs/solana/blob/fa28958bd69054d1c2348e0a731011e93d44d7af/programs/bpf_loader/src/syscalls/cpi.rs#L1487 + value: RefCell::new(slice::from_raw_parts_mut( + account.data.borrow_mut().as_mut_ptr().add(rc_box_size), + 0, + )), + }, + ); + } + + // CPI now will update the serialized length in the wrong place, + // since we moved the account data slice. To hit the corner case we + // want to hit, we'll need to update the serialized length manually + // or during deserialize_parameters() we'll get + // AccountDataSizeChanged + let serialized_len_ptr = + unsafe { account.data.borrow_mut().as_mut_ptr().offset(-8) as *mut u64 }; + unsafe { + std::ptr::write( + &account.data as *const _ as usize as *mut Rc>, + Rc::from_raw(((rc_box_addr as usize) + mem::size_of::() * 2) as *mut _), + ); + } + + let mut instruction_data = vec![REALLOC, 0]; + instruction_data.extend_from_slice(&rc_box_size.to_le_bytes()); + + // check that the account is empty before we CPI + assert_eq!(account.data_len(), 0); + + invoke( + &create_instruction( + *realloc_program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (realloc_program_id, false, false), + (invoke_program_id, false, false), + ], + instruction_data.to_vec(), + ), + accounts, + ) + .unwrap(); + + // verify that CPI did update `ref_to_len_in_vm` + assert_eq!(account.data_len(), rc_box_size); + + // update the serialized length so we don't error out early with AccountDataSizeChanged + unsafe { *serialized_len_ptr = rc_box_size as u64 }; + + // hack to avoid dropping the RcBox, which is supposed to be on the + // heap but we put it into account data. If we don't do this, + // dropping the Rc will cause + // global_deallocator.dealloc(rc_box_addr) which is invalid and + // happens to write a poison value into the account. + unsafe { + std::ptr::write( + &account.data as *const _ as usize as *mut Rc>, + Rc::new(RefCell::new(&mut [])), + ); + } + } + TEST_FORBID_LEN_UPDATE_AFTER_OWNERSHIP_CHANGE => { + msg!("TEST_FORBID_LEN_UPDATE_AFTER_OWNERSHIP_CHANGE"); + const INVOKE_PROGRAM_INDEX: usize = 3; + const REALLOC_PROGRAM_INDEX: usize = 4; + let account = &accounts[ARGUMENT_INDEX]; + let target_account_index = instruction_data[1] as usize; + let target_account = &accounts[target_account_index]; + let realloc_program_id = accounts[REALLOC_PROGRAM_INDEX].key; + let invoke_program_id = accounts[INVOKE_PROGRAM_INDEX].key; + account.realloc(0, false).unwrap(); + account.assign(realloc_program_id); + target_account.realloc(0, false).unwrap(); + target_account.assign(realloc_program_id); + + let rc_box_addr = + target_account.data.borrow_mut().as_mut_ptr() as *mut RcBox>; + let rc_box_size = mem::size_of::>>(); + unsafe { + std::ptr::write( + rc_box_addr, + RcBox { + strong: 1, + weak: 0, + // The difference with + // TEST_FORBID_LEN_UPDATE_AFTER_OWNERSHIP_CHANGE_MOVING_DATA_POINTER + // is that we don't move the data pointer past the + // RcBox. This is needed to avoid the "Invalid account + // info pointer" check when direct mapping is enabled. + // This also means we don't need to update the + // serialized len like we do in the other test. + value: RefCell::new(slice::from_raw_parts_mut( + account.data.borrow_mut().as_mut_ptr(), + 0, + )), + }, + ); + } + + let serialized_len_ptr = + unsafe { account.data.borrow_mut().as_mut_ptr().offset(-8) as *mut u64 }; + // Place a RcBox> in the account data. This + // allows us to test having CallerAccount::ref_to_len_in_vm in an + // account region. + unsafe { + std::ptr::write( + &account.data as *const _ as usize as *mut Rc>, + Rc::from_raw(((rc_box_addr as usize) + mem::size_of::() * 2) as *mut _), + ); + } + + let mut instruction_data = vec![REALLOC, 0]; + instruction_data.extend_from_slice(&rc_box_size.to_le_bytes()); + + // check that the account is empty before we CPI + assert_eq!(account.data_len(), 0); + + invoke( + &create_instruction( + *realloc_program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (target_account.key, true, false), + (realloc_program_id, false, false), + (invoke_program_id, false, false), + ], + instruction_data.to_vec(), + ), + accounts, + ) + .unwrap(); + + unsafe { *serialized_len_ptr = rc_box_size as u64 }; + // hack to avoid dropping the RcBox, which is supposed to be on the + // heap but we put it into account data. If we don't do this, + // dropping the Rc will cause + // global_deallocator.dealloc(rc_box_addr) which is invalid and + // happens to write a poison value into the account. + unsafe { + std::ptr::write( + &account.data as *const _ as usize as *mut Rc>, + Rc::new(RefCell::new(&mut [])), + ); + } + } + TEST_ALLOW_WRITE_AFTER_OWNERSHIP_CHANGE_TO_CALLER => { + msg!("TEST_ALLOW_WRITE_AFTER_OWNERSHIP_CHANGE_TO_CALLER"); + const INVOKE_PROGRAM_INDEX: usize = 3; + let account = &accounts[ARGUMENT_INDEX]; + let invoked_program_id = accounts[INVOKED_PROGRAM_INDEX].key; + let invoke_program_id = accounts[INVOKE_PROGRAM_INDEX].key; + invoke( + &create_instruction( + *invoked_program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (invoked_program_id, false, false), + (invoke_program_id, false, false), + ], + vec![ASSIGN_ACCOUNT_TO_CALLER], + ), + accounts, + ) + .unwrap(); + // this should succeed since the callee gave us ownership of the + // account + unsafe { + *account + .data + .borrow_mut() + .get_unchecked_mut(instruction_data[1] as usize) = 42 + }; + } + TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS => { + msg!("TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS"); + const CALLEE_PROGRAM_INDEX: usize = 3; + let account = &accounts[ARGUMENT_INDEX]; + let callee_program_id = accounts[CALLEE_PROGRAM_INDEX].key; + + let expected = { + let data = &instruction_data[1..]; + let prev_len = account.data_len(); + account.realloc(prev_len + data.len(), false)?; + account.data.borrow_mut()[prev_len..].copy_from_slice(data); + account.data.borrow().to_vec() + }; + + let mut instruction_data = vec![TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_NESTED]; + instruction_data.extend_from_slice(&expected); + invoke( + &create_instruction( + *callee_program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (callee_program_id, false, false), + ], + instruction_data, + ), + accounts, + ) + .unwrap(); + } + TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_NESTED => { + msg!("TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_NESTED"); + const ARGUMENT_INDEX: usize = 0; + let account = &accounts[ARGUMENT_INDEX]; + assert_eq!(*account.data.borrow(), &instruction_data[1..]); + } + TEST_CPI_ACCOUNT_UPDATE_CALLEE_GROWS => { + msg!("TEST_CPI_ACCOUNT_UPDATE_CALLEE_GROWS"); + const REALLOC_PROGRAM_INDEX: usize = 2; + const INVOKE_PROGRAM_INDEX: usize = 3; + let account = &accounts[ARGUMENT_INDEX]; + let realloc_program_id = accounts[REALLOC_PROGRAM_INDEX].key; + let realloc_program_owner = accounts[REALLOC_PROGRAM_INDEX].owner; + let invoke_program_id = accounts[INVOKE_PROGRAM_INDEX].key; + let mut instruction_data = instruction_data.to_vec(); + let mut expected = account.data.borrow().to_vec(); + expected.extend_from_slice(&instruction_data[1..]); + instruction_data[0] = REALLOC_EXTEND_FROM_SLICE; + invoke( + &create_instruction( + *realloc_program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (realloc_program_id, false, false), + (invoke_program_id, false, false), + ], + instruction_data.to_vec(), + ), + accounts, + ) + .unwrap(); + + if !bpf_loader_deprecated::check_id(realloc_program_owner) { + assert_eq!(&*account.data.borrow(), &expected); + } + } + TEST_CPI_ACCOUNT_UPDATE_CALLEE_SHRINKS_SMALLER_THAN_ORIGINAL_LEN => { + msg!("TEST_CPI_ACCOUNT_UPDATE_CALLEE_SHRINKS_SMALLER_THAN_ORIGINAL_LEN"); + const REALLOC_PROGRAM_INDEX: usize = 2; + const INVOKE_PROGRAM_INDEX: usize = 3; + let account = &accounts[ARGUMENT_INDEX]; + let realloc_program_id = accounts[REALLOC_PROGRAM_INDEX].key; + let realloc_program_owner = accounts[REALLOC_PROGRAM_INDEX].owner; + let invoke_program_id = accounts[INVOKE_PROGRAM_INDEX].key; + let new_len = usize::from_le_bytes(instruction_data[1..9].try_into().unwrap()); + let prev_len = account.data_len(); + let expected = account.data.borrow()[..new_len].to_vec(); + let mut instruction_data = vec![REALLOC, 0]; + instruction_data.extend_from_slice(&new_len.to_le_bytes()); + invoke( + &create_instruction( + *realloc_program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (realloc_program_id, false, false), + (invoke_program_id, false, false), + ], + instruction_data, + ), + accounts, + ) + .unwrap(); + + // deserialize_parameters_unaligned predates realloc support, and + // hardcodes the account data length to the original length. + if !bpf_loader_deprecated::check_id(realloc_program_owner) { + assert_eq!(&*account.data.borrow(), &expected); + assert_eq!( + unsafe { + slice::from_raw_parts( + account.data.borrow().as_ptr().add(new_len), + prev_len - new_len, + ) + }, + &vec![0; prev_len - new_len] + ); + } + } + TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_CALLEE_SHRINKS => { + msg!("TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_CALLEE_SHRINKS"); + const INVOKE_PROGRAM_INDEX: usize = 3; + const SENTINEL: u8 = 42; + let account = &accounts[ARGUMENT_INDEX]; + let invoke_program_id = accounts[INVOKE_PROGRAM_INDEX].key; + + let prev_data = { + let data = &instruction_data[9..]; + let prev_len = account.data_len(); + account.realloc(prev_len + data.len(), false)?; + account.data.borrow_mut()[prev_len..].copy_from_slice(data); + unsafe { + // write a sentinel value just outside the account data to + // check that when CPI zeroes the realloc region it doesn't + // zero too much + *account + .data + .borrow_mut() + .as_mut_ptr() + .add(prev_len + data.len()) = SENTINEL; + }; + account.data.borrow().to_vec() + }; + + let mut expected = account.data.borrow().to_vec(); + let new_len = usize::from_le_bytes(instruction_data[1..9].try_into().unwrap()); + expected.extend_from_slice(&instruction_data[9..]); + let mut instruction_data = + vec![TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_CALLEE_SHRINKS_NESTED]; + instruction_data.extend_from_slice(&new_len.to_le_bytes()); + invoke( + &create_instruction( + *invoke_program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (invoke_program_id, false, false), + ], + instruction_data, + ), + accounts, + ) + .unwrap(); + + assert_eq!(*account.data.borrow(), &prev_data[..new_len]); + assert_eq!( + unsafe { + slice::from_raw_parts( + account.data.borrow().as_ptr().add(new_len), + prev_data.len() - new_len, + ) + }, + &vec![0; prev_data.len() - new_len] + ); + assert_eq!( + unsafe { *account.data.borrow().as_ptr().add(prev_data.len()) }, + SENTINEL + ); + } + TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_CALLEE_SHRINKS_NESTED => { + msg!("TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_CALLEE_SHRINKS_NESTED"); + const ARGUMENT_INDEX: usize = 0; + let account = &accounts[ARGUMENT_INDEX]; + let new_len = usize::from_le_bytes(instruction_data[1..9].try_into().unwrap()); + account.realloc(new_len, false).unwrap(); + } + TEST_CPI_INVALID_KEY_POINTER => { + msg!("TEST_CPI_INVALID_KEY_POINTER"); + const CALLEE_PROGRAM_INDEX: usize = 2; + let account = &accounts[ARGUMENT_INDEX]; + let key = *account.key; + let key = &key as *const _ as usize; + unsafe { + *mem::transmute::<_, *mut *const Pubkey>(&account.key) = key as *const Pubkey; + } + let callee_program_id = accounts[CALLEE_PROGRAM_INDEX].key; + + invoke( + &create_instruction( + *callee_program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (callee_program_id, false, false), + ], + vec![], + ), + accounts, + ) + .unwrap(); + } + TEST_CPI_INVALID_LAMPORTS_POINTER => { + msg!("TEST_CPI_INVALID_LAMPORTS_POINTER"); + const CALLEE_PROGRAM_INDEX: usize = 2; + let account = &accounts[ARGUMENT_INDEX]; + let mut lamports = account.lamports(); + account + .lamports + .replace(unsafe { mem::transmute(&mut lamports) }); + let callee_program_id = accounts[CALLEE_PROGRAM_INDEX].key; + + invoke( + &create_instruction( + *callee_program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (callee_program_id, false, false), + ], + vec![], + ), + accounts, + ) + .unwrap(); + } + TEST_CPI_INVALID_OWNER_POINTER => { + msg!("TEST_CPI_INVALID_OWNER_POINTER"); + const CALLEE_PROGRAM_INDEX: usize = 2; + let account = &accounts[ARGUMENT_INDEX]; + let owner = account.owner as *const _ as usize + 1; + unsafe { + *mem::transmute::<_, *mut *const Pubkey>(&account.owner) = owner as *const Pubkey; + } + let callee_program_id = accounts[CALLEE_PROGRAM_INDEX].key; + + invoke( + &create_instruction( + *callee_program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (callee_program_id, false, false), + ], + vec![], + ), + accounts, + ) + .unwrap(); + } + TEST_CPI_INVALID_DATA_POINTER => { + msg!("TEST_CPI_INVALID_DATA_POINTER"); + const CALLEE_PROGRAM_INDEX: usize = 2; + let account = &accounts[ARGUMENT_INDEX]; + let data = unsafe { + slice::from_raw_parts_mut(account.data.borrow_mut()[1..].as_mut_ptr(), 0) + }; + account.data.replace(data); + let callee_program_id = accounts[CALLEE_PROGRAM_INDEX].key; + + invoke( + &create_instruction( + *callee_program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (callee_program_id, false, false), + ], + vec![], + ), + accounts, + ) + .unwrap(); + } + TEST_CPI_CHANGE_ACCOUNT_DATA_MEMORY_ALLOCATION => { + msg!("TEST_CPI_CHANGE_ACCOUNT_DATA_MEMORY_ALLOCATION"); + const CALLEE_PROGRAM_INDEX: usize = 2; + let account = &accounts[ARGUMENT_INDEX]; + let callee_program_id = accounts[CALLEE_PROGRAM_INDEX].key; + let original_data_len = account.data_len(); + + // Initial data is all [0xFF; 20] + assert_eq!(&*account.data.borrow(), &[0xFF; 20]); + + // Realloc to [0xFE; 10] + invoke( + &create_instruction( + *callee_program_id, + &[ + (account.key, true, false), + (callee_program_id, false, false), + ], + vec![0xFE; 10], + ), + accounts, + ) + .unwrap(); + + // Check that [10..20] is zeroed + let new_len = account.data_len(); + assert_eq!(&*account.data.borrow(), &[0xFE; 10]); + assert_eq!( + unsafe { + slice::from_raw_parts( + account.data.borrow().as_ptr().add(new_len), + original_data_len - new_len, + ) + }, + &vec![0; original_data_len - new_len] + ); + + // Realloc to [0xFD; 5] + invoke( + &create_instruction( + *callee_program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (callee_program_id, false, false), + ], + vec![0xFD; 5], + ), + accounts, + ) + .unwrap(); + + // Check that [5..20] is zeroed + let new_len = account.data_len(); + assert_eq!(&*account.data.borrow(), &[0xFD; 5]); + assert_eq!( + unsafe { + slice::from_raw_parts( + account.data.borrow().as_ptr().add(new_len), + original_data_len - new_len, + ) + }, + &vec![0; original_data_len - new_len] + ); + } + TEST_WRITE_ACCOUNT => { + msg!("TEST_WRITE_ACCOUNT"); + let target_account_index = instruction_data[1] as usize; + let target_account = &accounts[target_account_index]; + let byte_index = usize::from_le_bytes(instruction_data[2..10].try_into().unwrap()); + target_account.data.borrow_mut()[byte_index] = instruction_data[10]; + } + _ => panic!("unexpected program data"), } Ok(()) } + +#[repr(C)] +struct RcBox { + strong: usize, + weak: usize, + value: T, +} diff --git a/programs/sbf/rust/invoked/src/instructions.rs b/programs/sbf/rust/invoked/src/instructions.rs index 9f98da7d92aa52..46a4f1d9f2f45c 100644 --- a/programs/sbf/rust/invoked/src/instructions.rs +++ b/programs/sbf/rust/invoked/src/instructions.rs @@ -19,6 +19,7 @@ pub const VERIFY_PRIVILEGE_DEESCALATION_ESCALATION_WRITABLE: u8 = 10; pub const WRITE_ACCOUNT: u8 = 11; pub const CREATE_AND_INIT: u8 = 12; pub const SET_RETURN_DATA: u8 = 13; +pub const ASSIGN_ACCOUNT_TO_CALLER: u8 = 14; pub fn create_instruction( program_id: Pubkey, diff --git a/programs/sbf/rust/invoked/src/processor.rs b/programs/sbf/rust/invoked/src/processor.rs index 73bf25cac79bfc..52d02dc99a6c21 100644 --- a/programs/sbf/rust/invoked/src/processor.rs +++ b/programs/sbf/rust/invoked/src/processor.rs @@ -297,6 +297,14 @@ fn process_instruction( set_return_data(b"Set by invoked"); } + ASSIGN_ACCOUNT_TO_CALLER => { + msg!("Assigning account to caller"); + const ARGUMENT_INDEX: usize = 0; + const CALLER_PROGRAM_ID: usize = 2; + let account = &accounts[ARGUMENT_INDEX]; + let caller_program_id = accounts[CALLER_PROGRAM_ID].key; + account.assign(caller_program_id); + } _ => panic!(), } diff --git a/programs/sbf/rust/realloc/src/instructions.rs b/programs/sbf/rust/realloc/src/instructions.rs index 831a69029ed166..e15ba5d48c5e21 100644 --- a/programs/sbf/rust/realloc/src/instructions.rs +++ b/programs/sbf/rust/realloc/src/instructions.rs @@ -16,6 +16,7 @@ pub const CHECK: u8 = 8; pub const ZERO_INIT: u8 = 9; pub const REALLOC_EXTEND_AND_UNDO: u8 = 10; pub const EXTEND_AND_WRITE_U64: u8 = 11; +pub const REALLOC_EXTEND_FROM_SLICE: u8 = 12; pub fn realloc(program_id: &Pubkey, address: &Pubkey, size: usize, bump: &mut u8) -> Instruction { let mut instruction_data = vec![REALLOC, *bump]; diff --git a/programs/sbf/rust/realloc/src/processor.rs b/programs/sbf/rust/realloc/src/processor.rs index 782eb1cd505519..172ed7498f88c8 100644 --- a/programs/sbf/rust/realloc/src/processor.rs +++ b/programs/sbf/rust/realloc/src/processor.rs @@ -1,6 +1,7 @@ //! Example Rust-based SBF realloc test program #![cfg(feature = "program")] +#![allow(clippy::integer_arithmetic)] extern crate solana_program; use { @@ -184,6 +185,13 @@ fn process_instruction( } } } + REALLOC_EXTEND_FROM_SLICE => { + msg!("realloc extend from slice"); + let data = &instruction_data[1..]; + let prev_len = account.data_len(); + account.realloc(prev_len.saturating_add(data.len()), false)?; + account.data.borrow_mut()[prev_len..].copy_from_slice(data); + } _ => panic!(), } diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 6acb18e4793c6e..b690ea2ffef434 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -347,9 +347,9 @@ fn test_program_sbf_sanity() { let instruction = Instruction::new_with_bytes(program_id, &[1], account_metas); let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); if program.1 { - assert!(result.is_ok()); + assert!(result.is_ok(), "{result:?}"); } else { - assert!(result.is_err()); + assert!(result.is_err(), "{result:?}"); } } } @@ -389,7 +389,7 @@ fn test_program_sbf_loader_deprecated() { .advance_slot(1, &Pubkey::default()) .expect("Failed to advance the slot"); let account_metas = vec![AccountMeta::new(mint_keypair.pubkey(), true)]; - let instruction = Instruction::new_with_bytes(program_id, &[1], account_metas); + let instruction = Instruction::new_with_bytes(program_id, &[255], account_metas); let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); assert!(result.is_ok()); } @@ -437,7 +437,7 @@ fn test_sol_alloc_free_no_longer_deployable() { Message::new( &[Instruction::new_with_bytes( program_address, - &[1], + &[255], vec![AccountMeta::new(mint_keypair.pubkey(), true)], )], Some(&mint_keypair.pubkey()), @@ -3939,5 +3939,640 @@ fn test_program_sbf_inner_instruction_alignment_checks() { instruction.data[0] += 1; let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction.clone()); - assert!(result.is_ok()); + assert!(result.is_ok(), "{result:?}"); +} + +#[test] +#[cfg(feature = "sbf_rust")] +fn test_cpi_account_ownership_writability() { + solana_logger::setup(); + + for direct_mapping in [false, true] { + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(100_123_456_789); + let mut bank = Bank::new_for_tests(&genesis_config); + let mut feature_set = FeatureSet::all_enabled(); + if !direct_mapping { + feature_set.deactivate(&feature_set::bpf_account_data_direct_mapping::id()); + } + bank.feature_set = Arc::new(feature_set); + let bank = Arc::new(bank); + let mut bank_client = BankClient::new_shared(bank); + + let invoke_program_id = load_program( + &bank_client, + &bpf_loader::id(), + &mint_keypair, + "solana_sbf_rust_invoke", + ); + + let invoked_program_id = load_program( + &bank_client, + &bpf_loader::id(), + &mint_keypair, + "solana_sbf_rust_invoked", + ); + + let (bank, realloc_program_id) = load_program_and_advance_slot( + &mut bank_client, + &bpf_loader::id(), + &mint_keypair, + "solana_sbf_rust_realloc", + ); + + let account_keypair = Keypair::new(); + + let mint_pubkey = mint_keypair.pubkey(); + let account_metas = vec![ + AccountMeta::new(mint_pubkey, true), + AccountMeta::new(account_keypair.pubkey(), false), + AccountMeta::new_readonly(invoked_program_id, false), + AccountMeta::new_readonly(invoke_program_id, false), + AccountMeta::new_readonly(realloc_program_id, false), + ]; + + for (account_size, byte_index) in [ + (0, 0), // first realloc byte + (0, MAX_PERMITTED_DATA_INCREASE as u8), // last realloc byte + (2, 0), // first data byte + (2, 1), // last data byte + (2, 3), // first realloc byte + (2, 2 + MAX_PERMITTED_DATA_INCREASE as u8), // last realloc byte + ] { + for instruction_id in [ + TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLEE, + TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLER, + ] { + bank.register_recent_blockhash(&Hash::new_unique()); + let account = AccountSharedData::new(42, account_size, &invoke_program_id); + bank.store_account(&account_keypair.pubkey(), &account); + + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &[instruction_id, byte_index, 42, 42], + account_metas.clone(), + ); + + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + + if (byte_index as usize) < account_size || direct_mapping { + assert_eq!( + result.unwrap_err().unwrap(), + TransactionError::InstructionError( + 0, + InstructionError::ExternalAccountDataModified + ) + ); + } else { + // without direct mapping, changes to the realloc padding + // outside the account length are ignored + assert!(result.is_ok(), "{result:?}"); + } + } + } + // Test that the CPI code that updates `ref_to_len_in_vm` fails if we + // make it write to an invalid location. This is the first variant which + // correctly triggers ExternalAccountDataModified when direct mapping is + // disabled. When direct mapping is enabled this tests fails early + // because we move the account data pointer. + // TEST_FORBID_LEN_UPDATE_AFTER_OWNERSHIP_CHANGE is able to make more + // progress when direct mapping is on. + let account = AccountSharedData::new(42, 0, &invoke_program_id); + bank.store_account(&account_keypair.pubkey(), &account); + let instruction_data = vec![ + TEST_FORBID_LEN_UPDATE_AFTER_OWNERSHIP_CHANGE_MOVING_DATA_POINTER, + 42, + 42, + 42, + ]; + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &instruction_data, + account_metas.clone(), + ); + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + assert_eq!( + result.unwrap_err().unwrap(), + if direct_mapping { + // We move the data pointer, direct mapping doesn't allow it + // anymore so it errors out earlier. See + // test_cpi_invalid_account_info_pointers. + TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete) + } else { + // We managed to make CPI write into the account data, but the + // usual checks still apply and we get an error. + TransactionError::InstructionError(0, InstructionError::ExternalAccountDataModified) + } + ); + + // We're going to try and make CPI write ref_to_len_in_vm into a 2nd + // account, so we add an extra one here. + let account2_keypair = Keypair::new(); + let mut account_metas = account_metas.clone(); + account_metas.push(AccountMeta::new(account2_keypair.pubkey(), false)); + + for target_account in [1, account_metas.len() as u8 - 1] { + // Similar to the test above where we try to make CPI write into account + // data. This variant is for when direct mapping is enabled. + let account = AccountSharedData::new(42, 0, &invoke_program_id); + bank.store_account(&account_keypair.pubkey(), &account); + let account = AccountSharedData::new(42, 0, &invoke_program_id); + bank.store_account(&account2_keypair.pubkey(), &account); + let instruction_data = vec![ + TEST_FORBID_LEN_UPDATE_AFTER_OWNERSHIP_CHANGE, + target_account, + 42, + 42, + ]; + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &instruction_data, + account_metas.clone(), + ); + let message = Message::new(&[instruction], Some(&mint_pubkey)); + let tx = Transaction::new(&[&mint_keypair], message.clone(), bank.last_blockhash()); + let (result, _, logs) = process_transaction_and_record_inner(&bank, tx); + if direct_mapping { + assert_eq!( + result.unwrap_err(), + TransactionError::InstructionError( + 0, + InstructionError::ProgramFailedToComplete + ) + ); + // We haven't moved the data pointer, but ref_to_len_vm _is_ in + // the account data vm range and that's not allowed either. + assert!( + logs.iter().any(|log| log.contains("Invalid pointer")), + "{logs:?}" + ); + } else { + // we expect this to succeed as after updating `ref_to_len_in_vm`, + // CPI will sync the actual account data between the callee and the + // caller, _always_ writing over the location pointed by + // `ref_to_len_in_vm`. To verify this, we check that the account + // data is in fact all zeroes like it is in the callee. + result.unwrap(); + let account = bank.get_account(&account_keypair.pubkey()).unwrap(); + assert_eq!(account.data(), vec![0; 40]); + } + } + } +} + +#[test] +#[cfg(feature = "sbf_rust")] +fn test_cpi_account_data_updates() { + solana_logger::setup(); + + for direct_mapping in [false, true] { + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(100_123_456_789); + let mut bank = Bank::new_for_tests(&genesis_config); + let mut feature_set = FeatureSet::all_enabled(); + if !direct_mapping { + feature_set.deactivate(&feature_set::bpf_account_data_direct_mapping::id()); + } + bank.feature_set = Arc::new(feature_set); + let bank = Arc::new(bank); + let mut bank_client = BankClient::new_shared(bank); + + let invoke_program_id = load_program( + &bank_client, + &bpf_loader::id(), + &mint_keypair, + "solana_sbf_rust_invoke", + ); + + let (bank, realloc_program_id) = load_program_and_advance_slot( + &mut bank_client, + &bpf_loader::id(), + &mint_keypair, + "solana_sbf_rust_realloc", + ); + + let account_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let account_metas = vec![ + AccountMeta::new(mint_pubkey, true), + AccountMeta::new(account_keypair.pubkey(), false), + AccountMeta::new_readonly(realloc_program_id, false), + AccountMeta::new_readonly(invoke_program_id, false), + ]; + + // This tests the case where a caller extends an account beyond the original + // data length. The callee should see the extended data (asserted in the + // callee program, not here). + let mut account = AccountSharedData::new(42, 0, &invoke_program_id); + account.set_data(b"foo".to_vec()); + bank.store_account(&account_keypair.pubkey(), &account); + let mut instruction_data = vec![TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS]; + instruction_data.extend_from_slice(b"bar"); + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &instruction_data, + account_metas.clone(), + ); + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + assert!(result.is_ok(), "{result:?}"); + let account = bank.get_account(&account_keypair.pubkey()).unwrap(); + // "bar" here was copied from the realloc region + assert_eq!(account.data(), b"foobar"); + + // This tests the case where a callee extends an account beyond the original + // data length. The caller should see the extended data where the realloc + // region contains the new data. In this test the callee owns the account, + // the caller can't write but the CPI glue still updates correctly. + let mut account = AccountSharedData::new(42, 0, &realloc_program_id); + account.set_data(b"foo".to_vec()); + bank.store_account(&account_keypair.pubkey(), &account); + let mut instruction_data = vec![TEST_CPI_ACCOUNT_UPDATE_CALLEE_GROWS]; + instruction_data.extend_from_slice(b"bar"); + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &instruction_data, + account_metas.clone(), + ); + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + result.unwrap(); + let account = bank.get_account(&account_keypair.pubkey()).unwrap(); + // "bar" here was copied from the realloc region + assert_eq!(account.data(), b"foobar"); + + // This tests the case where a callee shrinks an account, the caller data + // slice must be truncated accordingly and post_len..original_data_len must + // be zeroed (zeroing is checked in the invoked program not here). Same as + // above, the callee owns the account but the changes are still reflected in + // the caller even if things are readonly from the caller's POV. + let mut account = AccountSharedData::new(42, 0, &realloc_program_id); + account.set_data(b"foobar".to_vec()); + bank.store_account(&account_keypair.pubkey(), &account); + let mut instruction_data = + vec![TEST_CPI_ACCOUNT_UPDATE_CALLEE_SHRINKS_SMALLER_THAN_ORIGINAL_LEN]; + instruction_data.extend_from_slice(4usize.to_le_bytes().as_ref()); + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &instruction_data, + account_metas.clone(), + ); + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + assert!(result.is_ok(), "{result:?}"); + let account = bank.get_account(&account_keypair.pubkey()).unwrap(); + assert_eq!(account.data(), b"foob"); + + // This tests the case where the program extends an account, then calls + // itself and in the inner call it shrinks the account to a size that is + // still larger than the original size. The account data must be set to the + // correct value in the caller frame, and the realloc region must be zeroed + // (again tested in the invoked program). + let mut account = AccountSharedData::new(42, 0, &invoke_program_id); + account.set_data(b"foo".to_vec()); + bank.store_account(&account_keypair.pubkey(), &account); + let mut instruction_data = vec![TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_CALLEE_SHRINKS]; + // realloc to "foobazbad" then shrink to "foobazb" + instruction_data.extend_from_slice(7usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(b"bazbad"); + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &instruction_data, + account_metas.clone(), + ); + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + assert!(result.is_ok(), "{result:?}"); + let account = bank.get_account(&account_keypair.pubkey()).unwrap(); + assert_eq!(account.data(), b"foobazb"); + + // Similar to the test above, but this time the nested invocation shrinks to + // _below_ the original data length. Both the spare capacity in the account + // data _end_ the realloc region must be zeroed. + let mut account = AccountSharedData::new(42, 0, &invoke_program_id); + account.set_data(b"foo".to_vec()); + bank.store_account(&account_keypair.pubkey(), &account); + let mut instruction_data = vec![TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_CALLEE_SHRINKS]; + // realloc to "foobazbad" then shrink to "f" + instruction_data.extend_from_slice(1usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(b"bazbad"); + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &instruction_data, + account_metas.clone(), + ); + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + assert!(result.is_ok(), "{result:?}"); + let account = bank.get_account(&account_keypair.pubkey()).unwrap(); + assert_eq!(account.data(), b"f"); + } +} + +#[test] +#[cfg(feature = "sbf_rust")] +fn test_cpi_deprecated_loader_realloc() { + solana_logger::setup(); + + for direct_mapping in [false, true] { + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(100_123_456_789); + let mut bank = Bank::new_for_tests(&genesis_config); + let mut feature_set = FeatureSet::all_enabled(); + if !direct_mapping { + feature_set.deactivate(&feature_set::bpf_account_data_direct_mapping::id()); + } + bank.feature_set = Arc::new(feature_set); + let bank = Arc::new(bank); + + let deprecated_program_id = create_program( + &bank, + &bpf_loader_deprecated::id(), + "solana_sbf_rust_deprecated_loader", + ); + + let mut bank_client = BankClient::new_shared(bank); + + let (bank, invoke_program_id) = load_program_and_advance_slot( + &mut bank_client, + &bpf_loader::id(), + &mint_keypair, + "solana_sbf_rust_invoke", + ); + + let account_keypair = Keypair::new(); + + let mint_pubkey = mint_keypair.pubkey(); + let account_metas = vec![ + AccountMeta::new(mint_pubkey, true), + AccountMeta::new(account_keypair.pubkey(), false), + AccountMeta::new_readonly(deprecated_program_id, false), + AccountMeta::new_readonly(deprecated_program_id, false), + AccountMeta::new_readonly(invoke_program_id, false), + ]; + + // If a bpf_loader_deprecated program extends an account, the callee + // accidentally sees the extended data when direct mapping is off, but + // direct mapping fixes the issue + let mut account = AccountSharedData::new(42, 0, &deprecated_program_id); + account.set_data(b"foo".to_vec()); + bank.store_account(&account_keypair.pubkey(), &account); + let mut instruction_data = vec![TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS]; + instruction_data.extend_from_slice(b"bar"); + let instruction = Instruction::new_with_bytes( + deprecated_program_id, + &instruction_data, + account_metas.clone(), + ); + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + // when direct mapping is off, the realloc will accidentally clobber + // whatever comes after the data slice (owner, executable, rent epoch + // etc). When direct mapping is on, you get an InvalidRealloc error. + if direct_mapping { + assert_eq!( + result.unwrap_err().unwrap(), + TransactionError::InstructionError(0, InstructionError::InvalidRealloc) + ); + } else { + assert_eq!( + result.unwrap_err().unwrap(), + TransactionError::InstructionError(0, InstructionError::ModifiedProgramId) + ); + } + + // check that if a bpf_loader_deprecated program extends an account, the + // extended data is ignored + let mut account = AccountSharedData::new(42, 0, &deprecated_program_id); + account.set_data(b"foo".to_vec()); + bank.store_account(&account_keypair.pubkey(), &account); + let mut instruction_data = vec![TEST_CPI_ACCOUNT_UPDATE_CALLEE_GROWS]; + instruction_data.extend_from_slice(b"bar"); + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &instruction_data, + account_metas.clone(), + ); + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + assert!(result.is_ok(), "{result:?}"); + let account = bank.get_account(&account_keypair.pubkey()).unwrap(); + assert_eq!(account.data(), b"foo"); + + // check that if a bpf_loader_deprecated program truncates an account, + // the caller doesn't see the truncation + let mut account = AccountSharedData::new(42, 0, &deprecated_program_id); + account.set_data(b"foobar".to_vec()); + bank.store_account(&account_keypair.pubkey(), &account); + let mut instruction_data = + vec![TEST_CPI_ACCOUNT_UPDATE_CALLEE_SHRINKS_SMALLER_THAN_ORIGINAL_LEN]; + instruction_data.extend_from_slice(4usize.to_le_bytes().as_ref()); + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &instruction_data, + account_metas.clone(), + ); + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + assert!(result.is_ok(), "{result:?}"); + let account = bank.get_account(&account_keypair.pubkey()).unwrap(); + assert_eq!(account.data(), b"foobar"); + } +} + +#[test] +#[cfg(feature = "sbf_rust")] +fn test_cpi_change_account_data_memory_allocation() { + use solana_program_runtime::{declare_process_instruction, loaded_programs::LoadedProgram}; + + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(100_123_456_789); + let mut bank = Bank::new_for_tests(&genesis_config); + let feature_set = FeatureSet::all_enabled(); + bank.feature_set = Arc::new(feature_set); + + declare_process_instruction!(process_instruction, 42, |invoke_context| { + let transaction_context = &invoke_context.transaction_context; + let instruction_context = transaction_context.get_current_instruction_context()?; + let instruction_data = instruction_context.get_instruction_data(); + + let index_in_transaction = + instruction_context.get_index_of_instruction_account_in_transaction(0)?; + + let mut account = transaction_context + .accounts() + .get(index_in_transaction) + .unwrap() + .borrow_mut(); + + // Test changing the account data both in place and by changing the + // underlying vector. CPI will have to detect the vector change and + // update the corresponding memory region. In both cases CPI will have + // to zero the spare bytes correctly. + if instruction_data[0] == 0xFE { + account.set_data(instruction_data.to_vec()); + } else { + account.set_data_from_slice(instruction_data); + } + + Ok(()) + }); + + let builtin_program_id = Pubkey::new_unique(); + bank.add_builtin( + builtin_program_id, + "test_cpi_change_account_data_memory_allocation_builtin".to_string(), + LoadedProgram::new_builtin(0, 42, process_instruction), + ); + + let bank = Arc::new(bank); + let mut bank_client = BankClient::new_shared(bank); + + let (bank, invoke_program_id) = load_program_and_advance_slot( + &mut bank_client, + &bpf_loader::id(), + &mint_keypair, + "solana_sbf_rust_invoke", + ); + + let account_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let account_metas = vec![ + AccountMeta::new(mint_pubkey, true), + AccountMeta::new(account_keypair.pubkey(), false), + AccountMeta::new_readonly(builtin_program_id, false), + AccountMeta::new_readonly(invoke_program_id, false), + ]; + + let mut account = AccountSharedData::new(42, 20, &builtin_program_id); + account.set_data(vec![0xFF; 20]); + bank.store_account(&account_keypair.pubkey(), &account); + let mut instruction_data = vec![TEST_CPI_CHANGE_ACCOUNT_DATA_MEMORY_ALLOCATION]; + instruction_data.extend_from_slice(builtin_program_id.as_ref()); + let instruction = + Instruction::new_with_bytes(invoke_program_id, &instruction_data, account_metas.clone()); + + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + assert!(result.is_ok(), "{result:?}"); +} + +#[test] +#[cfg(feature = "sbf_rust")] +fn test_cpi_invalid_account_info_pointers() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(100_123_456_789); + let mut bank = Bank::new_for_tests(&genesis_config); + let feature_set = FeatureSet::all_enabled(); + bank.feature_set = Arc::new(feature_set); + let bank = Arc::new(bank); + let mut bank_client = BankClient::new_shared(bank); + + let c_invoke_program_id = + load_program(&bank_client, &bpf_loader::id(), &mint_keypair, "invoke"); + + let (bank, invoke_program_id) = load_program_and_advance_slot( + &mut bank_client, + &bpf_loader::id(), + &mint_keypair, + "solana_sbf_rust_invoke", + ); + + let account_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let account_metas = vec![ + AccountMeta::new(mint_pubkey, true), + AccountMeta::new(account_keypair.pubkey(), false), + AccountMeta::new_readonly(invoke_program_id, false), + AccountMeta::new_readonly(c_invoke_program_id, false), + ]; + + for invoke_program_id in [invoke_program_id, c_invoke_program_id] { + for ix in [ + TEST_CPI_INVALID_KEY_POINTER, + TEST_CPI_INVALID_LAMPORTS_POINTER, + TEST_CPI_INVALID_OWNER_POINTER, + TEST_CPI_INVALID_DATA_POINTER, + ] { + let account = AccountSharedData::new(42, 5, &invoke_program_id); + bank.store_account(&account_keypair.pubkey(), &account); + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &[ix, 42, 42, 42], + account_metas.clone(), + ); + + let message = Message::new(&[instruction], Some(&mint_pubkey)); + let tx = Transaction::new(&[&mint_keypair], message.clone(), bank.last_blockhash()); + let (result, _, logs) = process_transaction_and_record_inner(&bank, tx); + assert!(result.is_err(), "{result:?}"); + assert!( + logs.iter().any(|log| log.contains("Invalid pointer")), + "{logs:?}" + ); + } + } +} + +#[test] +#[cfg(feature = "sbf_rust")] +fn test_deny_executable_write() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(100_123_456_789); + + for direct_mapping in [false, true] { + let mut bank = Bank::new_for_tests(&genesis_config); + let feature_set = Arc::make_mut(&mut bank.feature_set); + // by default test banks have all features enabled, so we only need to + // disable when needed + if !direct_mapping { + feature_set.deactivate(&feature_set::bpf_account_data_direct_mapping::id()); + } + let bank = Arc::new(bank); + let mut bank_client = BankClient::new_shared(bank); + + let (_bank, invoke_program_id) = load_program_and_advance_slot( + &mut bank_client, + &bpf_loader::id(), + &mint_keypair, + "solana_sbf_rust_invoke", + ); + + let account_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let account_metas = vec![ + AccountMeta::new(mint_pubkey, true), + AccountMeta::new(account_keypair.pubkey(), false), + AccountMeta::new_readonly(invoke_program_id, false), + ]; + + let mut instruction_data = vec![TEST_WRITE_ACCOUNT, 2]; + instruction_data.extend_from_slice(4usize.to_le_bytes().as_ref()); + instruction_data.push(42); + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &instruction_data, + account_metas.clone(), + ); + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + assert_eq!( + result.unwrap_err().unwrap(), + TransactionError::InstructionError(0, InstructionError::ExecutableDataModified) + ); + } } From 552dc8351173032396089ef59ebc6c4f000550f3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 12:25:39 +0000 Subject: [PATCH 003/407] build(deps): bump syn from 2.0.29 to 2.0.31 (#33135) * build(deps): bump syn from 2.0.29 to 2.0.31 Bumps [syn](https://github.com/dtolnay/syn) from 2.0.29 to 2.0.31. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.29...2.0.31) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 44 ++++++++++++++++++++--------------------- programs/sbf/Cargo.lock | 42 +++++++++++++++++++-------------------- 2 files changed, 43 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4a8e56a1e1b54c..322cba4f80b63a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -436,7 +436,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -590,7 +590,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1497,7 +1497,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1508,7 +1508,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1700,7 +1700,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1800,7 +1800,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -2075,7 +2075,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -3381,7 +3381,7 @@ dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -3888,7 +3888,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -4051,7 +4051,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -4739,7 +4739,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -4784,7 +4784,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -4834,7 +4834,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -5946,7 +5946,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -6957,7 +6957,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -7701,9 +7701,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.29" +version = "2.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" +checksum = "718fa2415bcb8d8bd775917a1bf12a7931b6dfa890753378538118181e0cb398" dependencies = [ "proc-macro2", "quote", @@ -7906,7 +7906,7 @@ checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -8055,7 +8055,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -8546,7 +8546,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", "wasm-bindgen-shared", ] @@ -8580,7 +8580,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8926,7 +8926,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 18a669098b13d7..9c4acdf5a85d27 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -410,7 +410,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -564,7 +564,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1207,7 +1207,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1218,7 +1218,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1393,7 +1393,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1496,7 +1496,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1745,7 +1745,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -3010,7 +3010,7 @@ dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -3446,7 +3446,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -3581,7 +3581,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -4159,7 +4159,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -4204,7 +4204,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -4962,7 +4962,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -6057,7 +6057,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -6623,9 +6623,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.29" +version = "2.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" +checksum = "718fa2415bcb8d8bd775917a1bf12a7931b6dfa890753378538118181e0cb398" dependencies = [ "proc-macro2", "quote", @@ -6773,7 +6773,7 @@ checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -6906,7 +6906,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -7383,7 +7383,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", "wasm-bindgen-shared", ] @@ -7417,7 +7417,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -7754,7 +7754,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] From 170ad08cc835314fc2c1d6796fa3da5d9033028c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 12:28:00 +0000 Subject: [PATCH 004/407] build(deps): bump regex from 1.9.4 to 1.9.5 (#33133) * build(deps): bump regex from 1.9.4 to 1.9.5 Bumps [regex](https://github.com/rust-lang/regex) from 1.9.4 to 1.9.5. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.9.4...1.9.5) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 14 +++++++------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 12 ++++++------ 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 322cba4f80b63a..f4c0608c91a6c5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3056,9 +3056,9 @@ checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" [[package]] name = "memmap2" @@ -4332,13 +4332,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" +checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" dependencies = [ "aho-corasick 1.0.1", "memchr", - "regex-automata 0.3.7", + "regex-automata 0.3.8", "regex-syntax 0.7.5", ] @@ -4350,9 +4350,9 @@ checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" [[package]] name = "regex-automata" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" +checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" dependencies = [ "aho-corasick 1.0.1", "memchr", diff --git a/Cargo.toml b/Cargo.toml index 59751edc7d1eca..ad103ee1b16803 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -271,7 +271,7 @@ raptorq = "1.7.0" rayon = "1.7.0" rcgen = "0.10.0" reed-solomon-erasure = "6.0.0" -regex = "1.9.4" +regex = "1.9.5" rolling-file = "0.2.0" reqwest = { version = "0.11.20", default-features = false } rpassword = "7.2" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 9c4acdf5a85d27..dc3fc7ca822746 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2684,9 +2684,9 @@ checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" [[package]] name = "memmap2" @@ -3806,9 +3806,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" +checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" dependencies = [ "aho-corasick 1.0.1", "memchr", @@ -3818,9 +3818,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" +checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" dependencies = [ "aho-corasick 1.0.1", "memchr", From bd61ff72aa27a30d1e9f94311fa38aff0e1c0430 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 12:30:06 +0000 Subject: [PATCH 005/407] build(deps): bump ctrlc from 3.4.0 to 3.4.1 (#33131) Bumps [ctrlc](https://github.com/Detegr/rust-ctrlc) from 3.4.0 to 3.4.1. - [Release notes](https://github.com/Detegr/rust-ctrlc/releases) - [Commits](https://github.com/Detegr/rust-ctrlc/compare/3.4.0...3.4.1) --- updated-dependencies: - dependency-name: ctrlc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 25 ++++++++++++++++++------- Cargo.toml | 2 +- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f4c0608c91a6c5..6721cbc55158aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1454,11 +1454,11 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.4.0" +version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e" +checksum = "82e95fbd621905b854affdc67943b043a0fbb6ed7385fd5a25650d19a8a6cfdf" dependencies = [ - "nix", + "nix 0.27.1", "windows-sys 0.48.0", ] @@ -3215,6 +3215,17 @@ dependencies = [ "pin-utils", ] +[[package]] +name = "nix" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +dependencies = [ + "bitflags 2.3.3", + "cfg-if 1.0.0", + "libc", +] + [[package]] name = "nom" version = "7.0.0" @@ -6085,7 +6096,7 @@ dependencies = [ "dirs-next", "indicatif", "lazy_static", - "nix", + "nix 0.26.4", "reqwest", "scopeguard", "semver 1.0.18", @@ -6380,7 +6391,7 @@ dependencies = [ "clap 3.2.23", "crossbeam-channel", "log", - "nix", + "nix 0.26.4", "rand 0.8.5", "serde", "serde_derive", @@ -6417,7 +6428,7 @@ dependencies = [ "libc", "log", "matches", - "nix", + "nix 0.26.4", "rand 0.8.5", "rand_chacha 0.3.1", "rayon", @@ -7082,7 +7093,7 @@ dependencies = [ "itertools", "libc", "log", - "nix", + "nix 0.26.4", "pem", "percentage", "pkcs8", diff --git a/Cargo.toml b/Cargo.toml index ad103ee1b16803..f97ddb12cde328 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -170,7 +170,7 @@ criterion = "0.5.1" criterion-stats = "0.3.0" crossbeam-channel = "0.5.8" csv = "1.2.2" -ctrlc = "3.4.0" +ctrlc = "3.4.1" curve25519-dalek = "3.2.1" dashmap = "4.0.2" derivation-path = { version = "0.2.0", default-features = false } From 9ab5c34543ba3e7b1c0b312c6cee11d3f179f01a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 12:44:50 +0000 Subject: [PATCH 006/407] build(deps): bump thiserror from 1.0.47 to 1.0.48 (#33134) * build(deps): bump thiserror from 1.0.47 to 1.0.48 Bumps [thiserror](https://github.com/dtolnay/thiserror) from 1.0.47 to 1.0.48. - [Release notes](https://github.com/dtolnay/thiserror/releases) - [Commits](https://github.com/dtolnay/thiserror/compare/1.0.47...1.0.48) --- updated-dependencies: - dependency-name: thiserror dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6721cbc55158aa..d14603617b4a97 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7902,18 +7902,18 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.47" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" +checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.47" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" +checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index f97ddb12cde328..8be6e0ef6f97c2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -387,7 +387,7 @@ tar = "0.4.40" tarpc = "0.29.0" tempfile = "3.8.0" test-case = "3.1.0" -thiserror = "1.0.47" +thiserror = "1.0.48" tiny-bip39 = "0.8.2" tokio = "1.29.1" tokio-serde = "0.8" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index dc3fc7ca822746..2ab0a3e3d82dae 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6758,18 +6758,18 @@ checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" [[package]] name = "thiserror" -version = "1.0.47" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" +checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.47" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" +checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", From e9542200e81840001bd38cb122a7ff313aaf5d1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Tue, 5 Sep 2023 15:01:20 +0200 Subject: [PATCH 007/407] Fix - Superfluous `if` condition for overwriting `environments` in feature activation code (#33138) Removes the `if` block around the overwriting of the `environments`. --- runtime/src/bank.rs | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 22e33a3f5500a0..5a6f2923b4f1fb 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -8086,23 +8086,15 @@ impl Bank { ) .unwrap(); let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); - if *loaded_programs_cache.environments.program_runtime_v1 - != program_runtime_environment_v1 - { - loaded_programs_cache.environments.program_runtime_v1 = - Arc::new(program_runtime_environment_v1); - } + loaded_programs_cache.environments.program_runtime_v1 = + Arc::new(program_runtime_environment_v1); let program_runtime_environment_v2 = solana_loader_v4_program::create_program_runtime_environment_v2( &self.runtime_config.compute_budget.unwrap_or_default(), false, /* debugging_features */ ); - if *loaded_programs_cache.environments.program_runtime_v2 - != program_runtime_environment_v2 - { - loaded_programs_cache.environments.program_runtime_v2 = - Arc::new(program_runtime_environment_v2); - } + loaded_programs_cache.environments.program_runtime_v2 = + Arc::new(program_runtime_environment_v2); loaded_programs_cache.prune_feature_set_transition(); } for builtin in BUILTINS.iter() { From 19306bac74f62bf87325bb8e6e709a924a98c367 Mon Sep 17 00:00:00 2001 From: Callum McIntyre Date: Tue, 5 Sep 2023 15:35:48 +0200 Subject: [PATCH 008/407] Correct docs for getMultipleAccounts RPC (#32684) --- docs/src/api/methods/_getMultipleAccounts.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/src/api/methods/_getMultipleAccounts.mdx b/docs/src/api/methods/_getMultipleAccounts.mdx index feace5e86c0174..b9c73a2a4d7090 100644 --- a/docs/src/api/methods/_getMultipleAccounts.mdx +++ b/docs/src/api/methods/_getMultipleAccounts.mdx @@ -19,7 +19,7 @@ Returns the account information for a list of Pubkeys. ### Parameters: - + An array of Pubkeys to query, as base-58 encoded strings (up to a maximum of 100) @@ -50,7 +50,7 @@ Data slicing is only available for base58, base64, or ::: - + encoding format for the returned Account data From 3dee9cb489b88d7e0168259399822ea15c6b1475 Mon Sep 17 00:00:00 2001 From: Proph3t Date: Tue, 5 Sep 2023 15:05:34 +0000 Subject: [PATCH 009/407] Fix a typo & phrasing of `bpf_loader_upgadeable` docs (#33141) * Fix a typo & phrasing of `bpf_loader_upgradeable` docs Originally, I was just going to fix the typo, replacing 'the' with 'that.' But I thought that the sentence was phrased awkwardly anyway, so I tried to correct that as well. * Fix second typo Co-authored-by: ripatel-fd --------- Co-authored-by: ripatel-fd --- sdk/program/src/bpf_loader_upgradeable.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/sdk/program/src/bpf_loader_upgradeable.rs b/sdk/program/src/bpf_loader_upgradeable.rs index f68131334823ce..870b0006d8d891 100644 --- a/sdk/program/src/bpf_loader_upgradeable.rs +++ b/sdk/program/src/bpf_loader_upgradeable.rs @@ -2,11 +2,11 @@ //! //! The upgradeable BPF loader is responsible for deploying, upgrading, and //! executing BPF programs. The upgradeable loader allows a program's authority -//! to update the program at any time. This ability break's the "code is law" -//! contract the usually enforces the policy that once a program is on-chain it -//! becomes immutable. Because of this, care should be taken before executing -//! upgradeable programs which still have a functioning authority. For more -//! information refer to the [`loader_upgradeable_instruction`] module. +//! to update the program at any time. This ability breaks the "code is law" +//! contract that once a program is on-chain it is immutable. Because of this, +//! care should be taken before executing upgradeable programs which still have +//! a functioning authority. For more information refer to the +//! [`loader_upgradeable_instruction`] module. //! //! The `solana program deploy` CLI command uses the //! upgradeable BPF loader. Calling `solana program deploy --final` deploys a From d36ded20fc55d29658ab1c3c824ecbf4c50693b8 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 5 Sep 2023 08:54:32 -0700 Subject: [PATCH 010/407] Make TimedTracedEvent accessible outside banking_trace.rs (#32985) --- core/src/banking_trace.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/banking_trace.rs b/core/src/banking_trace.rs index 7a5c8d42da45ae..760121dc7c557d 100644 --- a/core/src/banking_trace.rs +++ b/core/src/banking_trace.rs @@ -63,10 +63,10 @@ pub struct BankingTracer { } #[derive(Serialize, Deserialize, Debug)] -pub struct TimedTracedEvent(std::time::SystemTime, TracedEvent); +pub struct TimedTracedEvent(pub std::time::SystemTime, pub TracedEvent); #[derive(Serialize, Deserialize, Debug)] -enum TracedEvent { +pub enum TracedEvent { PacketBatch(ChannelLabel, BankingPacketBatch), BlockAndBankHash(Slot, Hash, Hash), } From 3a91d3cc6a2e11ca77904c77e6b281e0c3aef4c0 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Wed, 6 Sep 2023 01:20:42 +0800 Subject: [PATCH 011/407] ci: remove -f from cargo install rustfilt (#33143) --- sdk/cargo-build-sbf/tests/crates.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/cargo-build-sbf/tests/crates.rs b/sdk/cargo-build-sbf/tests/crates.rs index 8da5e7c46fd56b..c63308124b0130 100644 --- a/sdk/cargo-build-sbf/tests/crates.rs +++ b/sdk/cargo-build-sbf/tests/crates.rs @@ -61,7 +61,7 @@ fn test_build() { fn test_dump() { // This test requires rustfilt. assert_cmd::Command::new("cargo") - .args(["install", "-f", "rustfilt"]) + .args(["install", "rustfilt"]) .assert() .success(); run_cargo_build("noop", &["--dump"], false); From 25d3db0c188aaf43c531e1e067b3511dec9c2ed3 Mon Sep 17 00:00:00 2001 From: Alessandro Decina Date: Wed, 6 Sep 2023 01:01:20 +0700 Subject: [PATCH 012/407] Fix CI (#33139) * programs/sbf: fix invalid_reference_casting errors in tests * programs/sbf: enable dev-context-only-utils on solana-sdk * programs/sbf: switch to clippy::arithmetic_side_effects * solana-program: fix formatting --- programs/sbf/Cargo.lock | 1 + programs/sbf/Cargo.toml | 2 + programs/sbf/rust/invoke/Cargo.toml | 1 + programs/sbf/rust/invoke/src/lib.rs | 1 - programs/sbf/rust/invoke/src/processor.rs | 45 ++++++++++++++-------- programs/sbf/rust/realloc/src/processor.rs | 2 +- sdk/program/src/bpf_loader_upgradeable.rs | 6 +-- 7 files changed, 36 insertions(+), 22 deletions(-) diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 2ab0a3e3d82dae..f1c049055da630 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5753,6 +5753,7 @@ dependencies = [ name = "solana-sbf-rust-invoke" version = "1.17.0" dependencies = [ + "rustversion", "solana-program", "solana-sbf-rust-invoked", "solana-sbf-rust-realloc", diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index d2f3022616e609..04418970594d05 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -22,6 +22,7 @@ net2 = "0.2.37" num-derive = "0.3" num-traits = "0.2" rand = "0.8" +rustversion = "1.0.14" serde = "1.0.112" serde_json = "1.0.56" solana_rbpf = "=0.6.0" @@ -101,6 +102,7 @@ solana_rbpf = { workspace = true } [dev-dependencies] solana-ledger = { workspace = true } +solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } [[bench]] name = "bpf_loader" diff --git a/programs/sbf/rust/invoke/Cargo.toml b/programs/sbf/rust/invoke/Cargo.toml index 66b07e500d897f..3c20ba6cb78f6e 100644 --- a/programs/sbf/rust/invoke/Cargo.toml +++ b/programs/sbf/rust/invoke/Cargo.toml @@ -14,6 +14,7 @@ default = ["program"] program = [] [dependencies] +rustversion = { workspace = true } solana-program = { workspace = true } solana-sbf-rust-invoked = { workspace = true } solana-sbf-rust-realloc = { workspace = true } diff --git a/programs/sbf/rust/invoke/src/lib.rs b/programs/sbf/rust/invoke/src/lib.rs index eb0dde96b85804..f7b0b7bb400e93 100644 --- a/programs/sbf/rust/invoke/src/lib.rs +++ b/programs/sbf/rust/invoke/src/lib.rs @@ -1,4 +1,3 @@ //! Example Rust-based SBF program that issues a cross-program-invocation - pub mod instructions; pub mod processor; diff --git a/programs/sbf/rust/invoke/src/processor.rs b/programs/sbf/rust/invoke/src/processor.rs index 2e1ee8cac9cc42..7c689cfcf860ae 100644 --- a/programs/sbf/rust/invoke/src/processor.rs +++ b/programs/sbf/rust/invoke/src/processor.rs @@ -2,7 +2,7 @@ #![cfg(feature = "program")] #![allow(unreachable_code)] -#![allow(clippy::integer_arithmetic)] +#![allow(clippy::arithmetic_side_effects)] use { crate::instructions::*, @@ -797,9 +797,10 @@ fn process_instruction( // AccountDataSizeChanged let serialized_len_ptr = unsafe { account.data.borrow_mut().as_mut_ptr().offset(-8) as *mut u64 }; + unsafe { - std::ptr::write( - &account.data as *const _ as usize as *mut Rc>, + overwrite_account_data( + account, Rc::from_raw(((rc_box_addr as usize) + mem::size_of::() * 2) as *mut _), ); } @@ -836,10 +837,7 @@ fn process_instruction( // global_deallocator.dealloc(rc_box_addr) which is invalid and // happens to write a poison value into the account. unsafe { - std::ptr::write( - &account.data as *const _ as usize as *mut Rc>, - Rc::new(RefCell::new(&mut [])), - ); + overwrite_account_data(account, Rc::new(RefCell::new(&mut []))); } } TEST_FORBID_LEN_UPDATE_AFTER_OWNERSHIP_CHANGE => { @@ -886,8 +884,8 @@ fn process_instruction( // allows us to test having CallerAccount::ref_to_len_in_vm in an // account region. unsafe { - std::ptr::write( - &account.data as *const _ as usize as *mut Rc>, + overwrite_account_data( + account, Rc::from_raw(((rc_box_addr as usize) + mem::size_of::() * 2) as *mut _), ); } @@ -920,10 +918,7 @@ fn process_instruction( // global_deallocator.dealloc(rc_box_addr) which is invalid and // happens to write a poison value into the account. unsafe { - std::ptr::write( - &account.data as *const _ as usize as *mut Rc>, - Rc::new(RefCell::new(&mut [])), - ); + overwrite_account_data(account, Rc::new(RefCell::new(&mut []))); } } TEST_ALLOW_WRITE_AFTER_OWNERSHIP_CHANGE_TO_CALLER => { @@ -1133,9 +1128,13 @@ fn process_instruction( let account = &accounts[ARGUMENT_INDEX]; let key = *account.key; let key = &key as *const _ as usize; - unsafe { - *mem::transmute::<_, *mut *const Pubkey>(&account.key) = key as *const Pubkey; + #[rustversion::attr(since(1.72), allow(invalid_reference_casting))] + fn overwrite_account_key(account: &AccountInfo, key: *const Pubkey) { + unsafe { + *mem::transmute::<_, *mut *const Pubkey>(&account.key) = key; + } } + overwrite_account_key(account, key as *const Pubkey); let callee_program_id = accounts[CALLEE_PROGRAM_INDEX].key; invoke( @@ -1179,9 +1178,13 @@ fn process_instruction( const CALLEE_PROGRAM_INDEX: usize = 2; let account = &accounts[ARGUMENT_INDEX]; let owner = account.owner as *const _ as usize + 1; - unsafe { - *mem::transmute::<_, *mut *const Pubkey>(&account.owner) = owner as *const Pubkey; + #[rustversion::attr(since(1.72), allow(invalid_reference_casting))] + fn overwrite_account_owner(account: &AccountInfo, owner: *const Pubkey) { + unsafe { + *mem::transmute::<_, *mut *const Pubkey>(&account.owner) = owner; + } } + overwrite_account_owner(account, owner as *const Pubkey); let callee_program_id = accounts[CALLEE_PROGRAM_INDEX].key; invoke( @@ -1303,3 +1306,11 @@ struct RcBox { weak: usize, value: T, } + +#[rustversion::attr(since(1.72), allow(invalid_reference_casting))] +unsafe fn overwrite_account_data(account: &AccountInfo, data: Rc>) { + std::ptr::write( + &account.data as *const _ as usize as *mut Rc>, + data, + ); +} diff --git a/programs/sbf/rust/realloc/src/processor.rs b/programs/sbf/rust/realloc/src/processor.rs index 172ed7498f88c8..2fb0ca70076d14 100644 --- a/programs/sbf/rust/realloc/src/processor.rs +++ b/programs/sbf/rust/realloc/src/processor.rs @@ -1,7 +1,7 @@ //! Example Rust-based SBF realloc test program #![cfg(feature = "program")] -#![allow(clippy::integer_arithmetic)] +#![allow(clippy::arithmetic_side_effects)] extern crate solana_program; use { diff --git a/sdk/program/src/bpf_loader_upgradeable.rs b/sdk/program/src/bpf_loader_upgradeable.rs index 870b0006d8d891..907a953d026706 100644 --- a/sdk/program/src/bpf_loader_upgradeable.rs +++ b/sdk/program/src/bpf_loader_upgradeable.rs @@ -3,9 +3,9 @@ //! The upgradeable BPF loader is responsible for deploying, upgrading, and //! executing BPF programs. The upgradeable loader allows a program's authority //! to update the program at any time. This ability breaks the "code is law" -//! contract that once a program is on-chain it is immutable. Because of this, -//! care should be taken before executing upgradeable programs which still have -//! a functioning authority. For more information refer to the +//! contract that once a program is on-chain it is immutable. Because of this, +//! care should be taken before executing upgradeable programs which still have +//! a functioning authority. For more information refer to the //! [`loader_upgradeable_instruction`] module. //! //! The `solana program deploy` CLI command uses the From 3b108564f9f6ce2bfeeea2a7cf948df4d52666fe Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 5 Sep 2023 21:04:39 +0200 Subject: [PATCH 013/407] Demote Arc parameter to &Bank (#33130) --- core/src/cache_block_meta_service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/cache_block_meta_service.rs b/core/src/cache_block_meta_service.rs index 689bb80052c62d..5d0dbefa7802a2 100644 --- a/core/src/cache_block_meta_service.rs +++ b/core/src/cache_block_meta_service.rs @@ -41,7 +41,7 @@ impl CacheBlockMetaService { } Ok(bank) => { let mut cache_block_meta_timer = Measure::start("cache_block_meta_timer"); - Self::cache_block_meta(bank, &blockstore); + Self::cache_block_meta(&bank, &blockstore); cache_block_meta_timer.stop(); if cache_block_meta_timer.as_ms() > CACHE_BLOCK_TIME_WARNING_MS { warn!( @@ -57,7 +57,7 @@ impl CacheBlockMetaService { Self { thread_hdl } } - fn cache_block_meta(bank: Arc, blockstore: &Blockstore) { + fn cache_block_meta(bank: &Bank, blockstore: &Blockstore) { if let Err(e) = blockstore.cache_block_time(bank.slot(), bank.clock().unix_timestamp) { error!("cache_block_time failed: slot {:?} {:?}", bank.slot(), e); } From 2cf53b6293917d71a20e5ae336a80b2cf6539ff0 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 5 Sep 2023 17:06:35 -0400 Subject: [PATCH 014/407] Purge banks, with the same slot, sequentially (#33149) --- runtime/src/accounts_background_service.rs | 162 +++++++++++++++++++-- 1 file changed, 149 insertions(+), 13 deletions(-) diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 365913502bc0ff..0e7c158375cb41 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -502,21 +502,28 @@ pub struct PrunedBanksRequestHandler { impl PrunedBanksRequestHandler { pub fn handle_request(&self, bank: &Bank, is_serialized_with_abs: bool) -> usize { - let slots = self.pruned_banks_receiver.try_iter().collect::>(); - let count = slots.len(); - bank.rc.accounts.accounts_db.thread_pool_clean.install(|| { - slots - .into_par_iter() - .for_each(|(pruned_slot, pruned_bank_id)| { - bank.rc.accounts.accounts_db.purge_slot( - pruned_slot, - pruned_bank_id, - is_serialized_with_abs, - ); - }); + let mut banks_to_purge: Vec<_> = self.pruned_banks_receiver.try_iter().collect(); + // We need a stable sort to ensure we purge banks—with the same slot—in the same order + // they were sent into the channel. + banks_to_purge.sort_by_key(|(slot, _id)| *slot); + let num_banks_to_purge = banks_to_purge.len(); + + // Group the banks into slices with the same slot + let grouped_banks_to_purge: Vec<_> = + GroupBy::new(banks_to_purge.as_slice(), |a, b| a.0 == b.0).collect(); + + // Purge all the slots in parallel + // Banks for the same slot are purged sequentially + let accounts_db = bank.rc.accounts.accounts_db.as_ref(); + accounts_db.thread_pool_clean.install(|| { + grouped_banks_to_purge.into_par_iter().for_each(|group| { + group.iter().for_each(|(slot, bank_id)| { + accounts_db.purge_slot(*slot, *bank_id, is_serialized_with_abs); + }) + }); }); - count + num_banks_to_purge } fn remove_dead_slots( @@ -790,6 +797,56 @@ fn cmp_requests_by_priority( .then(slot_a.cmp(&slot_b)) } +/// An iterator over a slice producing non-overlapping runs +/// of elements using a predicate to separate them. +/// +/// This can be used to extract sorted subslices. +/// +/// (`Vec::group_by()`](https://doc.rust-lang.org/std/vec/struct.Vec.html#method.group_by) +/// is currently a nightly-only experimental API. Once the API is stablized, use it instead. +/// +/// tracking issue: https://github.com/rust-lang/rust/issues/80552 +/// rust-lang PR: https://github.com/rust-lang/rust/pull/79895/ +/// implementation permalink: https://github.com/Kerollmops/rust/blob/8b53be660444d736bb6a6e1c6ba42c8180c968e7/library/core/src/slice/iter.rs#L2972-L3023 +struct GroupBy<'a, T: 'a, P> { + slice: &'a [T], + predicate: P, +} +impl<'a, T: 'a, P> GroupBy<'a, T, P> +where + P: FnMut(&T, &T) -> bool, +{ + fn new(slice: &'a [T], predicate: P) -> Self { + GroupBy { slice, predicate } + } +} +impl<'a, T: 'a, P> Iterator for GroupBy<'a, T, P> +where + P: FnMut(&T, &T) -> bool, +{ + type Item = &'a [T]; + + #[inline] + fn next(&mut self) -> Option { + if self.slice.is_empty() { + None + } else { + let mut len = 1; + let mut iter = self.slice.windows(2); + while let Some([l, r]) = iter.next() { + if (self.predicate)(l, r) { + len += 1 + } else { + break; + } + } + let (head, tail) = self.slice.split_at(len); + self.slice = tail; + Some(head) + } + } +} + #[cfg(test)] mod test { use { @@ -1036,4 +1093,83 @@ mod test { .get_next_snapshot_request(Some(480)) .is_none()); } + + /// Ensure that we can prune banks with the same slot (if they were on different forks) + #[test] + fn test_pruned_banks_request_handler_handle_request() { + let (pruned_banks_sender, pruned_banks_receiver) = crossbeam_channel::unbounded(); + let pruned_banks_request_handler = PrunedBanksRequestHandler { + pruned_banks_receiver, + }; + let genesis_config_info = create_genesis_config(10); + let bank = Bank::new_for_tests(&genesis_config_info.genesis_config); + bank.set_startup_verification_complete(); + bank.rc.accounts.accounts_db.enable_bank_drop_callback(); + bank.set_callback(Some(Box::new(SendDroppedBankCallback::new( + pruned_banks_sender, + )))); + + let fork0_bank0 = Arc::new(bank); + let fork0_bank1 = Arc::new(Bank::new_from_parent( + fork0_bank0.clone(), + &Pubkey::new_unique(), + fork0_bank0.slot() + 1, + )); + let fork1_bank1 = Arc::new(Bank::new_from_parent( + fork0_bank0.clone(), + &Pubkey::new_unique(), + fork0_bank0.slot() + 1, + )); + let fork2_bank1 = Arc::new(Bank::new_from_parent( + fork0_bank0.clone(), + &Pubkey::new_unique(), + fork0_bank0.slot() + 1, + )); + let fork0_bank2 = Arc::new(Bank::new_from_parent( + fork0_bank1.clone(), + &Pubkey::new_unique(), + fork0_bank1.slot() + 1, + )); + let fork1_bank2 = Arc::new(Bank::new_from_parent( + fork1_bank1.clone(), + &Pubkey::new_unique(), + fork1_bank1.slot() + 1, + )); + let fork0_bank3 = Arc::new(Bank::new_from_parent( + fork0_bank2.clone(), + &Pubkey::new_unique(), + fork0_bank2.slot() + 1, + )); + let fork3_bank3 = Arc::new(Bank::new_from_parent( + fork0_bank2.clone(), + &Pubkey::new_unique(), + fork0_bank2.slot() + 1, + )); + fork0_bank3.squash(); + + drop(fork3_bank3); + drop(fork1_bank2); + drop(fork0_bank2); + drop(fork1_bank1); + drop(fork2_bank1); + drop(fork0_bank1); + drop(fork0_bank0); + let num_banks_purged = pruned_banks_request_handler.handle_request(&fork0_bank3, true); + assert_eq!(num_banks_purged, 7); + } + + // This test is for our copied impl of GroupBy, above. + // When it is removed, this test can be removed. + #[test] + fn test_group_by() { + let slice = &[1, 1, 1, 3, 3, 2, 2, 2, 1, 0]; + + let mut iter = GroupBy::new(slice, |a, b| a == b); + assert_eq!(iter.next(), Some(&[1, 1, 1][..])); + assert_eq!(iter.next(), Some(&[3, 3][..])); + assert_eq!(iter.next(), Some(&[2, 2, 2][..])); + assert_eq!(iter.next(), Some(&[1][..])); + assert_eq!(iter.next(), Some(&[0][..])); + assert_eq!(iter.next(), None); + } } From efb6846fe71d161fa67f13b3f62ddfe792914141 Mon Sep 17 00:00:00 2001 From: Tyera Date: Tue, 5 Sep 2023 15:24:17 -0600 Subject: [PATCH 015/407] Boot solana-test-validator without enable_partitioned_epoch_reward feature (#33146) --- validator/src/bin/solana-test-validator.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index 69f749cfcbeb2d..39f206116ecaf7 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -19,6 +19,7 @@ use { account::AccountSharedData, clock::Slot, epoch_schedule::EpochSchedule, + feature_set, native_token::sol_to_lamports, pubkey::Pubkey, rent::Rent, @@ -348,7 +349,9 @@ fn main() { exit(1); }); - let features_to_deactivate = pubkeys_of(&matches, "deactivate_feature").unwrap_or_default(); + let mut features_to_deactivate = pubkeys_of(&matches, "deactivate_feature").unwrap_or_default(); + // Remove this when client support is ready for the enable_partitioned_epoch_reward feature + features_to_deactivate.push(feature_set::enable_partitioned_epoch_reward::id()); if TestValidatorGenesis::ledger_exists(&ledger_path) { for (name, long) in &[ From 05622c17daa293645e42512ca460e419c9f2367f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Tue, 5 Sep 2023 23:57:25 +0200 Subject: [PATCH 016/407] Refactor - Minor fixes in the error handling of executing tombstones (#33145) Minor fixes in the error handling of executing tombstones. --- programs/bpf_loader/src/lib.rs | 11 +++++------ programs/loader-v4/src/lib.rs | 10 +++++----- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 69151d41e2b423..ad4445a4d5f46b 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -498,12 +498,10 @@ fn process_instruction_inner( let mut get_or_create_executor_time = Measure::start("get_or_create_executor_time"); let executor = invoke_context .find_program_in_cache(program_account.get_key()) - .ok_or(InstructionError::InvalidAccountData)?; - - if executor.is_tombstone() { - return Err(Box::new(InstructionError::InvalidAccountData)); - } - + .ok_or_else(|| { + ic_logger_msg!(log_collector, "Program is not cached"); + InstructionError::InvalidAccountData + })?; drop(program_account); get_or_create_executor_time.stop(); saturating_add_assign!( @@ -516,6 +514,7 @@ fn process_instruction_inner( LoadedProgramType::FailedVerification(_) | LoadedProgramType::Closed | LoadedProgramType::DelayVisibility => { + ic_logger_msg!(log_collector, "Program is not deployed"); Err(Box::new(InstructionError::InvalidAccountData) as Box) } LoadedProgramType::LegacyV0(executable) => execute(executable, invoke_context), diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index a7bac5d89ff4a2..5ebab07677b7e8 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -595,11 +595,10 @@ pub fn process_instruction_inner( let mut get_or_create_executor_time = Measure::start("get_or_create_executor_time"); let loaded_program = invoke_context .find_program_in_cache(program.get_key()) - .ok_or(InstructionError::InvalidAccountData)?; - - if loaded_program.is_tombstone() { - return Err(Box::new(InstructionError::InvalidAccountData)); - } + .ok_or_else(|| { + ic_logger_msg!(log_collector, "Program is not cached"); + InstructionError::InvalidAccountData + })?; get_or_create_executor_time.stop(); saturating_add_assign!( invoke_context.timings.get_or_create_executor_us, @@ -613,6 +612,7 @@ pub fn process_instruction_inner( LoadedProgramType::FailedVerification(_) | LoadedProgramType::Closed | LoadedProgramType::DelayVisibility => { + ic_logger_msg!(log_collector, "Program is not deployed"); Err(Box::new(InstructionError::InvalidAccountData) as Box) } LoadedProgramType::Typed(executable) => execute(invoke_context, executable), From 17b1b5646ddee9763649dc7ec93d7988f322acd4 Mon Sep 17 00:00:00 2001 From: Tyera Date: Tue, 5 Sep 2023 16:52:49 -0600 Subject: [PATCH 017/407] rpc: turn off getStakeActivation epoch parameter (#33156) * Return error on past-epoch parameter * Update docs --- docs/src/api/methods/_getStakeActivation.mdx | 1 + rpc/src/rpc.rs | 10 +++------- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/docs/src/api/methods/_getStakeActivation.mdx b/docs/src/api/methods/_getStakeActivation.mdx index abd1d0bafb5966..28b2d8a81438b2 100644 --- a/docs/src/api/methods/_getStakeActivation.mdx +++ b/docs/src/api/methods/_getStakeActivation.mdx @@ -41,6 +41,7 @@ Configuration object containing the following fields: epoch for which to calculate activation details. If parameter not provided, defaults to current epoch. + **DEPRECATED**, inputs other than the current epoch return an error. diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 393c53b6a84084..3e284b1f942574 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -1719,14 +1719,10 @@ impl JsonRpcRequestProcessor { min_context_slot: config.min_context_slot, })?; let epoch = config.epoch.unwrap_or_else(|| bank.epoch()); - if bank.epoch().saturating_sub(epoch) > solana_sdk::stake_history::MAX_ENTRIES as u64 { + if epoch != bank.epoch() { return Err(Error::invalid_params(format!( - "Invalid param: epoch {epoch:?} is too far in the past" - ))); - } - if epoch > bank.epoch() { - return Err(Error::invalid_params(format!( - "Invalid param: epoch {epoch:?} has not yet started" + "Invalid param: epoch {epoch:?}. Only the current epoch ({:?}) is supported", + bank.epoch() ))); } From f8d304c6108b13e5cbdfc27180f007892afaa7d4 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 5 Sep 2023 16:08:48 -0700 Subject: [PATCH 018/407] Drop poh_service to avoid unwanted ticking (#33150) --- core/src/banking_stage/decision_maker.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/src/banking_stage/decision_maker.rs b/core/src/banking_stage/decision_maker.rs index edf67960d6ce58..6d26a9d0fcc02a 100644 --- a/core/src/banking_stage/decision_maker.rs +++ b/core/src/banking_stage/decision_maker.rs @@ -152,6 +152,9 @@ mod tests { let blockstore = Arc::new(Blockstore::open(ledger_path.as_path()).unwrap()); let (exit, poh_recorder, poh_service, _entry_receiver) = create_test_recorder(bank.clone(), blockstore, None, None); + // Drop the poh service immediately to avoid potential ticking + exit.store(true, Ordering::Relaxed); + poh_service.join().unwrap(); let my_pubkey = Pubkey::new_unique(); let decision_maker = DecisionMaker::new(my_pubkey, poh_recorder.clone()); @@ -206,9 +209,6 @@ mod tests { let decision = decision_maker.make_consume_or_forward_decision(); assert_matches!(decision, BufferedPacketsDecision::Forward); } - - exit.store(true, Ordering::Relaxed); - poh_service.join().unwrap(); } #[test] From a3399d0164970f6c196633ac4b42a6fa7a6e90fc Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Tue, 5 Sep 2023 18:01:18 -0700 Subject: [PATCH 019/407] Add loader-v4 instruction constructors (#33151) * Add loader-v4 instruction constructors * address review comments * clippy fixes --- sdk/program/src/loader_v4.rs | 304 ++++++++++++++++++++++++++++++++++- 1 file changed, 302 insertions(+), 2 deletions(-) diff --git a/sdk/program/src/loader_v4.rs b/sdk/program/src/loader_v4.rs index c1728cf1e078c7..9180c00a718243 100644 --- a/sdk/program/src/loader_v4.rs +++ b/sdk/program/src/loader_v4.rs @@ -2,7 +2,12 @@ //! //! This is the loader of the program runtime v2. -use crate::pubkey::Pubkey; +use crate::{ + instruction::{AccountMeta, Instruction}, + loader_v4_instruction::LoaderV4Instruction, + pubkey::Pubkey, + system_instruction, +}; crate::declare_id!("LoaderV411111111111111111111111111111111111"); @@ -41,9 +46,147 @@ impl LoaderV4State { } } +pub fn is_write_instruction(instruction_data: &[u8]) -> bool { + !instruction_data.is_empty() && 0 == instruction_data[0] +} + +pub fn is_truncate_instruction(instruction_data: &[u8]) -> bool { + !instruction_data.is_empty() && 1 == instruction_data[0] +} + +pub fn is_deploy_instruction(instruction_data: &[u8]) -> bool { + !instruction_data.is_empty() && 2 == instruction_data[0] +} + +pub fn is_retract_instruction(instruction_data: &[u8]) -> bool { + !instruction_data.is_empty() && 3 == instruction_data[0] +} + +pub fn is_transfer_authority_instruction(instruction_data: &[u8]) -> bool { + !instruction_data.is_empty() && 4 == instruction_data[0] +} + +/// Returns the instructions required to initialize a program/buffer account. +pub fn create_buffer( + payer_address: &Pubkey, + buffer_address: &Pubkey, + lamports: u64, + authority: &Pubkey, + new_size: u32, + recipient_address: &Pubkey, +) -> Vec { + vec![ + system_instruction::create_account(payer_address, buffer_address, lamports, 0, &id()), + Instruction::new_with_bincode( + id(), + &LoaderV4Instruction::Truncate { new_size }, + vec![ + AccountMeta::new(*buffer_address, true), + AccountMeta::new_readonly(*authority, true), + AccountMeta::new(*recipient_address, false), + ], + ), + ] +} + +/// Returns the instructions required to set the length of the program account. +pub fn truncate( + program_address: &Pubkey, + authority: &Pubkey, + new_size: u32, + recipient_address: &Pubkey, +) -> Instruction { + Instruction::new_with_bincode( + id(), + &LoaderV4Instruction::Truncate { new_size }, + vec![ + AccountMeta::new(*program_address, false), + AccountMeta::new_readonly(*authority, true), + AccountMeta::new(*recipient_address, false), + ], + ) +} + +/// Returns the instructions required to write a chunk of program data to a +/// buffer account. +pub fn write( + program_address: &Pubkey, + authority: &Pubkey, + offset: u32, + bytes: Vec, +) -> Instruction { + Instruction::new_with_bincode( + id(), + &LoaderV4Instruction::Write { offset, bytes }, + vec![ + AccountMeta::new(*program_address, false), + AccountMeta::new_readonly(*authority, true), + ], + ) +} + +/// Returns the instructions required to deploy a program. +pub fn deploy(program_address: &Pubkey, authority: &Pubkey) -> Instruction { + Instruction::new_with_bincode( + id(), + &LoaderV4Instruction::Deploy, + vec![ + AccountMeta::new(*program_address, false), + AccountMeta::new_readonly(*authority, true), + ], + ) +} + +/// Returns the instructions required to deploy a program using a buffer. +pub fn deploy_from_source( + program_address: &Pubkey, + authority: &Pubkey, + source_address: &Pubkey, +) -> Instruction { + Instruction::new_with_bincode( + id(), + &LoaderV4Instruction::Deploy, + vec![ + AccountMeta::new(*program_address, false), + AccountMeta::new_readonly(*authority, true), + AccountMeta::new(*source_address, false), + ], + ) +} + +/// Returns the instructions required to retract a program. +pub fn retract(program_address: &Pubkey, authority: &Pubkey) -> Instruction { + Instruction::new_with_bincode( + id(), + &LoaderV4Instruction::Retract, + vec![ + AccountMeta::new(*program_address, false), + AccountMeta::new_readonly(*authority, true), + ], + ) +} + +/// Returns the instructions required to transfer authority over a program. +pub fn transfer_authority( + program_address: &Pubkey, + authority: &Pubkey, + new_authority: Option<&Pubkey>, +) -> Instruction { + let mut accounts = vec![ + AccountMeta::new(*program_address, false), + AccountMeta::new_readonly(*authority, true), + ]; + + if let Some(new_auth) = new_authority { + accounts.push(AccountMeta::new_readonly(*new_auth, true)); + } + + Instruction::new_with_bincode(id(), &LoaderV4Instruction::TransferAuthority, accounts) +} + #[cfg(test)] mod tests { - use {super::*, memoffset::offset_of}; + use {super::*, crate::system_program, memoffset::offset_of}; #[test] fn test_layout() { @@ -52,4 +195,161 @@ mod tests { assert_eq!(offset_of!(LoaderV4State, status), 0x28); assert_eq!(LoaderV4State::program_data_offset(), 0x30); } + + #[test] + fn test_create_buffer_instruction() { + let payer = Pubkey::new_unique(); + let program = Pubkey::new_unique(); + let authority = Pubkey::new_unique(); + let recipient = Pubkey::new_unique(); + let instructions = create_buffer(&payer, &program, 123, &authority, 10, &recipient); + assert_eq!(instructions.len(), 2); + let instruction0 = &instructions[0]; + assert_eq!(instruction0.program_id, system_program::id()); + assert_eq!(instruction0.accounts.len(), 2); + assert_eq!(instruction0.accounts[0].pubkey, payer); + assert!(instruction0.accounts[0].is_writable); + assert!(instruction0.accounts[0].is_signer); + assert_eq!(instruction0.accounts[1].pubkey, program); + assert!(instruction0.accounts[1].is_writable); + assert!(instruction0.accounts[1].is_signer); + + let instruction1 = &instructions[1]; + assert!(is_truncate_instruction(&instruction1.data)); + assert_eq!(instruction1.program_id, id()); + assert_eq!(instruction1.accounts.len(), 3); + assert_eq!(instruction1.accounts[0].pubkey, program); + assert!(instruction1.accounts[0].is_writable); + assert!(instruction1.accounts[0].is_signer); + assert_eq!(instruction1.accounts[1].pubkey, authority); + assert!(!instruction1.accounts[1].is_writable); + assert!(instruction1.accounts[1].is_signer); + assert_eq!(instruction1.accounts[2].pubkey, recipient); + assert!(instruction1.accounts[2].is_writable); + assert!(!instruction1.accounts[2].is_signer); + } + + #[test] + fn test_write_instruction() { + let program = Pubkey::new_unique(); + let authority = Pubkey::new_unique(); + let instruction = write(&program, &authority, 123, vec![1, 2, 3, 4]); + assert!(is_write_instruction(&instruction.data)); + assert_eq!(instruction.program_id, id()); + assert_eq!(instruction.accounts.len(), 2); + assert_eq!(instruction.accounts[0].pubkey, program); + assert!(instruction.accounts[0].is_writable); + assert!(!instruction.accounts[0].is_signer); + assert_eq!(instruction.accounts[1].pubkey, authority); + assert!(!instruction.accounts[1].is_writable); + assert!(instruction.accounts[1].is_signer); + } + + #[test] + fn test_truncate_instruction() { + let program = Pubkey::new_unique(); + let authority = Pubkey::new_unique(); + let recipient = Pubkey::new_unique(); + let instruction = truncate(&program, &authority, 10, &recipient); + assert!(is_truncate_instruction(&instruction.data)); + assert_eq!(instruction.program_id, id()); + assert_eq!(instruction.accounts.len(), 3); + assert_eq!(instruction.accounts[0].pubkey, program); + assert!(instruction.accounts[0].is_writable); + assert!(!instruction.accounts[0].is_signer); + assert_eq!(instruction.accounts[1].pubkey, authority); + assert!(!instruction.accounts[1].is_writable); + assert!(instruction.accounts[1].is_signer); + assert_eq!(instruction.accounts[2].pubkey, recipient); + assert!(instruction.accounts[2].is_writable); + assert!(!instruction.accounts[2].is_signer); + } + + #[test] + fn test_deploy_instruction() { + let program = Pubkey::new_unique(); + let authority = Pubkey::new_unique(); + let instruction = deploy(&program, &authority); + assert!(is_deploy_instruction(&instruction.data)); + assert_eq!(instruction.program_id, id()); + assert_eq!(instruction.accounts.len(), 2); + assert_eq!(instruction.accounts[0].pubkey, program); + assert!(instruction.accounts[0].is_writable); + assert!(!instruction.accounts[0].is_signer); + assert_eq!(instruction.accounts[1].pubkey, authority); + assert!(!instruction.accounts[1].is_writable); + assert!(instruction.accounts[1].is_signer); + } + + #[test] + fn test_deploy_from_source_instruction() { + let program = Pubkey::new_unique(); + let authority = Pubkey::new_unique(); + let source = Pubkey::new_unique(); + let instruction = deploy_from_source(&program, &authority, &source); + assert!(is_deploy_instruction(&instruction.data)); + assert_eq!(instruction.program_id, id()); + assert_eq!(instruction.accounts.len(), 3); + assert_eq!(instruction.accounts[0].pubkey, program); + assert!(instruction.accounts[0].is_writable); + assert!(!instruction.accounts[0].is_signer); + assert_eq!(instruction.accounts[1].pubkey, authority); + assert!(!instruction.accounts[1].is_writable); + assert!(instruction.accounts[1].is_signer); + assert_eq!(instruction.accounts[2].pubkey, source); + assert!(instruction.accounts[2].is_writable); + assert!(!instruction.accounts[2].is_signer); + } + + #[test] + fn test_retract_instruction() { + let program = Pubkey::new_unique(); + let authority = Pubkey::new_unique(); + let instruction = retract(&program, &authority); + assert!(is_retract_instruction(&instruction.data)); + assert_eq!(instruction.program_id, id()); + assert_eq!(instruction.accounts.len(), 2); + assert_eq!(instruction.accounts[0].pubkey, program); + assert!(instruction.accounts[0].is_writable); + assert!(!instruction.accounts[0].is_signer); + assert_eq!(instruction.accounts[1].pubkey, authority); + assert!(!instruction.accounts[1].is_writable); + assert!(instruction.accounts[1].is_signer); + } + + #[test] + fn test_transfer_authority_instruction() { + let program = Pubkey::new_unique(); + let authority = Pubkey::new_unique(); + let new_authority = Pubkey::new_unique(); + let instruction = transfer_authority(&program, &authority, Some(&new_authority)); + assert!(is_transfer_authority_instruction(&instruction.data)); + assert_eq!(instruction.program_id, id()); + assert_eq!(instruction.accounts.len(), 3); + assert_eq!(instruction.accounts[0].pubkey, program); + assert!(instruction.accounts[0].is_writable); + assert!(!instruction.accounts[0].is_signer); + assert_eq!(instruction.accounts[1].pubkey, authority); + assert!(!instruction.accounts[1].is_writable); + assert!(instruction.accounts[1].is_signer); + assert_eq!(instruction.accounts[2].pubkey, new_authority); + assert!(!instruction.accounts[2].is_writable); + assert!(instruction.accounts[2].is_signer); + } + + #[test] + fn test_transfer_authority_finalize_instruction() { + let program = Pubkey::new_unique(); + let authority = Pubkey::new_unique(); + let instruction = transfer_authority(&program, &authority, None); + assert!(is_transfer_authority_instruction(&instruction.data)); + assert_eq!(instruction.program_id, id()); + assert_eq!(instruction.accounts.len(), 2); + assert_eq!(instruction.accounts[0].pubkey, program); + assert!(instruction.accounts[0].is_writable); + assert!(!instruction.accounts[0].is_signer); + assert_eq!(instruction.accounts[1].pubkey, authority); + assert!(!instruction.accounts[1].is_writable); + assert!(instruction.accounts[1].is_signer); + } } From d077b13efa6288aad100d3b08a97021720333508 Mon Sep 17 00:00:00 2001 From: Illia Bobyr Date: Tue, 5 Sep 2023 20:30:17 -0700 Subject: [PATCH 020/407] accounts-db: test_hash_stored_account: Avoid UB. (#33083) unsafe { transmute }` in the test is generating undefined behavior, as it assigns a value into `bool` that is neither 0 nor 1. We see discrepancy between release and debug builds due to this. It is better to avoid `unsafe` blocks if there alternatives that let the compiler check everything. --- accounts-db/src/accounts_db.rs | 67 +++++++++++++++------------------- 1 file changed, 30 insertions(+), 37 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 7b49d4a25461fd..85c32eb057342f 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -12549,40 +12549,36 @@ pub mod tests { #[test] fn test_hash_stored_account() { - // This test uses some UNSAFE tricks to detect most of hashing code changes, resulting from - // account's field additions and deletions of StoredAccountMeta and AccountSharedData and - // hashing-order changes. - + // Number are just sequential. + let slot: Slot = 0x01_02_03_04_05_06_07_08; + let meta = StoredMeta { + write_version_obsolete: 0x09_0a_0b_0c_0d_0e_0f_10, + data_len: 0x11_12_13_14_15_16_17_18, + pubkey: Pubkey::from([ + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, + 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, + 0x35, 0x36, 0x37, 0x38, + ]), + }; + let account_meta = AccountMeta { + lamports: 0x39_3a_3b_3c_3d_3e_3f_40, + rent_epoch: 0x41_42_43_44_45_46_47_48, + owner: Pubkey::from([ + 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, + 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, + 0x65, 0x66, 0x67, 0x68, + ]), + executable: false, + }; const ACCOUNT_DATA_LEN: usize = 3; - // the type of InputFields elements must not contain references; - // they should be simple scalars or data blobs - // repr(C) is needed for abi-stability in the dirtiest variant of std::mem::transmute(). - #[repr(C)] - struct InputFields( - Slot, - StoredMeta, - AccountMeta, - [u8; ACCOUNT_DATA_LEN], - usize, // for StoredAccountMeta::offset - Hash, - ); - const INPUT_LEN: usize = std::mem::size_of::(); - type InputBlob = [u8; INPUT_LEN]; - let mut blob: InputBlob = [0u8; INPUT_LEN]; - - // spray memory with decreasing integers so that, data layout change and/or hashing - // reordering can be detected. note that just zeroed blob can't detect field reordering. - for (i, byte) in blob.iter_mut().enumerate() { - *byte = (INPUT_LEN - i) as u8; - } + let data: [u8; ACCOUNT_DATA_LEN] = [0x69, 0x6a, 0x6b]; + let offset: usize = 0x6c_6d_6e_6f_70_71_72_73; + let hash = Hash::from([ + 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, + 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, + ]); - //UNSAFE: forcibly cast the special byte pattern to actual account fields. - let InputFields(slot, meta, account_meta, data, offset, hash) = - unsafe { std::mem::transmute::(blob) }; - - // When adding a field to the following constructor, make sure this is sourced from - // InputFields as well after adding new corresponding one to it. Needless to say, but note - // that the hashing code itself must be adjusted let stored_account = StoredAccountMeta::AppendVec(AppendVecStoredAccountMeta { meta: &meta, account_meta: &account_meta, @@ -12593,11 +12589,8 @@ pub mod tests { }); let account = stored_account.to_account_shared_data(); - let expected_account_hash = if cfg!(debug_assertions) { - Hash::from_str("8GiQSN2VvWASKPUuZgFkH4v66ihEanrDVXAkMFvLwEa8").unwrap() - } else { - Hash::from_str("9MYASra3mm8oXzMapYUonB6TcRsKFPtjhNXVgY3MPPUX").unwrap() - }; + let expected_account_hash = + Hash::from_str("6VeAL4x4PVkECKL1hD1avwPE1uMCRoWiZJzVMvVNYhTq").unwrap(); assert_eq!( AccountsDb::hash_account( From a8e83c872006c28e83a21c7f716335d25e940bae Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Tue, 5 Sep 2023 21:29:53 -0700 Subject: [PATCH 021/407] replay: send duplicate proofs from blockstore to state machine (#32962) * replay: send duplicate proofs from blockstore to state machine * pr feedback: bank.slot() -> slot * pr feedback --- core/src/consensus.rs | 1 + core/src/replay_stage.rs | 73 +++++++++- local-cluster/src/integration_tests.rs | 18 +++ local-cluster/tests/local_cluster.rs | 176 ++++++++++++++++++++++--- 4 files changed, 245 insertions(+), 23 deletions(-) diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 9223b494452ae3..0e204dbe562343 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -293,6 +293,7 @@ impl Tower { bank_forks.frozen_banks().values().cloned().collect(), node_pubkey, vote_account, + vec![], ); let root = root_bank.slot(); diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 58e57cacf2b18c..55e9b7ad21aab1 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -549,6 +549,7 @@ impl ReplayStage { &bank_forks, &my_pubkey, &vote_account, + &blockstore, ); let mut current_leader = None; let mut last_reset = Hash::default(); @@ -1230,16 +1231,29 @@ impl ReplayStage { bank_forks: &RwLock, my_pubkey: &Pubkey, vote_account: &Pubkey, + blockstore: &Blockstore, ) -> (ProgressMap, HeaviestSubtreeForkChoice) { - let (root_bank, frozen_banks) = { + let (root_bank, frozen_banks, duplicate_slot_hashes) = { let bank_forks = bank_forks.read().unwrap(); + let duplicate_slots = blockstore + .duplicate_slots_iterator(bank_forks.root_bank().slot()) + .unwrap(); + let duplicate_slot_hashes = duplicate_slots + .filter_map(|slot| bank_forks.bank_hash(slot).map(|hash| (slot, hash))); ( bank_forks.root_bank(), bank_forks.frozen_banks().values().cloned().collect(), + duplicate_slot_hashes.collect::>(), ) }; - Self::initialize_progress_and_fork_choice(&root_bank, frozen_banks, my_pubkey, vote_account) + Self::initialize_progress_and_fork_choice( + &root_bank, + frozen_banks, + my_pubkey, + vote_account, + duplicate_slot_hashes, + ) } pub fn initialize_progress_and_fork_choice( @@ -1247,6 +1261,7 @@ impl ReplayStage { mut frozen_banks: Vec>, my_pubkey: &Pubkey, vote_account: &Pubkey, + duplicate_slot_hashes: Vec<(Slot, Hash)>, ) -> (ProgressMap, HeaviestSubtreeForkChoice) { let mut progress = ProgressMap::default(); @@ -1261,11 +1276,15 @@ impl ReplayStage { ); } let root = root_bank.slot(); - let heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_frozen_banks( + let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_frozen_banks( (root, root_bank.hash()), &frozen_banks, ); + for slot_hash in duplicate_slot_hashes { + heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&slot_hash); + } + (progress, heaviest_subtree_fork_choice) } @@ -2086,6 +2105,30 @@ impl ReplayStage { purge_repair_slot_counter, SlotStateUpdate::Dead(dead_state), ); + + // If we previously marked this slot as duplicate in blockstore, let the state machine know + if !duplicate_slots_tracker.contains(&slot) && blockstore.get_duplicate_slot(slot).is_some() + { + let duplicate_state = DuplicateState::new_from_state( + slot, + gossip_duplicate_confirmed_slots, + heaviest_subtree_fork_choice, + || true, + || None, + ); + check_slot_agrees_with_cluster( + slot, + root, + blockstore, + duplicate_slots_tracker, + epoch_slots_frozen_slots, + heaviest_subtree_fork_choice, + duplicate_slots_to_repair, + ancestor_hashes_replay_update_sender, + purge_repair_slot_counter, + SlotStateUpdate::Duplicate(duplicate_state), + ); + } } #[allow(clippy::too_many_arguments)] @@ -2827,6 +2870,30 @@ impl ReplayStage { purge_repair_slot_counter, SlotStateUpdate::BankFrozen(bank_frozen_state), ); + // If we previously marked this slot as duplicate in blockstore, let the state machine know + if !duplicate_slots_tracker.contains(&bank.slot()) + && blockstore.get_duplicate_slot(bank.slot()).is_some() + { + let duplicate_state = DuplicateState::new_from_state( + bank.slot(), + gossip_duplicate_confirmed_slots, + heaviest_subtree_fork_choice, + || false, + || Some(bank.hash()), + ); + check_slot_agrees_with_cluster( + bank.slot(), + bank_forks.read().unwrap().root(), + blockstore, + duplicate_slots_tracker, + epoch_slots_frozen_slots, + heaviest_subtree_fork_choice, + duplicate_slots_to_repair, + ancestor_hashes_replay_update_sender, + purge_repair_slot_counter, + SlotStateUpdate::Duplicate(duplicate_state), + ); + } if let Some(sender) = bank_notification_sender { sender .sender diff --git a/local-cluster/src/integration_tests.rs b/local-cluster/src/integration_tests.rs index e7691bc1c63edb..41e803799fcd52 100644 --- a/local-cluster/src/integration_tests.rs +++ b/local-cluster/src/integration_tests.rs @@ -26,6 +26,7 @@ use { solana_ledger::{ ancestor_iterator::AncestorIterator, blockstore::{Blockstore, PurgeType}, + blockstore_meta::DuplicateSlotProof, blockstore_options::{AccessType, BlockstoreOptions}, leader_schedule::{FixedSchedule, LeaderSchedule}, }, @@ -153,6 +154,23 @@ pub fn wait_for_last_vote_in_tower_to_land_in_ledger( }) } +/// Waits roughly 10 seconds for duplicate proof to appear in blockstore at `dup_slot`. Returns proof if found. +pub fn wait_for_duplicate_proof(ledger_path: &Path, dup_slot: Slot) -> Option { + for _ in 0..10 { + let duplicate_fork_validator_blockstore = open_blockstore(ledger_path); + if let Some((found_dup_slot, found_duplicate_proof)) = + duplicate_fork_validator_blockstore.get_first_duplicate_proof() + { + if found_dup_slot == dup_slot { + return Some(found_duplicate_proof); + }; + } + + sleep(Duration::from_millis(1000)); + } + None +} + pub fn copy_blocks(end_slot: Slot, source: &Blockstore, dest: &Blockstore) { for slot in std::iter::once(end_slot).chain(AncestorIterator::new(end_slot, source)) { let source_meta = source.meta(slot).unwrap().unwrap(); diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 118332ed384049..2280bd98f48cdc 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -24,10 +24,9 @@ use { ancestor_iterator::AncestorIterator, bank_forks_utils, blockstore::{entries_to_test_shreds, Blockstore}, - blockstore_meta::DuplicateSlotProof, blockstore_processor::ProcessOptions, leader_schedule::FixedSchedule, - shred::Shred, + shred::{ProcessShredsStats, ReedSolomonCache, Shred, Shredder}, use_snapshot_archives_at_startup::UseSnapshotArchivesAtStartup, }, solana_local_cluster::{ @@ -39,7 +38,7 @@ use { last_root_in_tower, last_vote_in_tower, ms_for_n_slots, open_blockstore, purge_slots_with_count, remove_tower, remove_tower_if_exists, restore_tower, run_cluster_partition, run_kill_partition_switch_threshold, save_tower, - setup_snapshot_validator_config, test_faulty_node, + setup_snapshot_validator_config, test_faulty_node, wait_for_duplicate_proof, wait_for_last_vote_in_tower_to_land_in_ledger, SnapshotValidatorConfig, ValidatorTestConfig, DEFAULT_CLUSTER_LAMPORTS, DEFAULT_NODE_STAKE, RUST_LOG_FILTER, }, @@ -69,7 +68,7 @@ use { client::{AsyncClient, SyncClient}, clock::{self, Slot, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE}, commitment_config::CommitmentConfig, - epoch_schedule::MINIMUM_SLOTS_PER_EPOCH, + epoch_schedule::{DEFAULT_SLOTS_PER_EPOCH, MINIMUM_SLOTS_PER_EPOCH}, genesis_config::ClusterType, hard_forks::HardForks, hash::Hash, @@ -5145,22 +5144,6 @@ fn test_duplicate_shreds_switch_failure() { } } - fn wait_for_duplicate_proof(ledger_path: &Path, dup_slot: Slot) -> Option { - for _ in 0..10 { - let duplicate_fork_validator_blockstore = open_blockstore(ledger_path); - if let Some((found_dup_slot, found_duplicate_proof)) = - duplicate_fork_validator_blockstore.get_first_duplicate_proof() - { - if found_dup_slot == dup_slot { - return Some(found_duplicate_proof); - }; - } - - sleep(Duration::from_millis(1000)); - } - None - } - solana_logger::setup_with_default(RUST_LOG_FILTER); let validator_keypairs = [ "28bN3xyvrP4E8LwEgtLjhnkb7cY4amQb6DrYAbAYjgRV4GAGgkVM2K7wnxnAS7WDneuavza7x21MiafLu1HkwQt4", @@ -5506,3 +5489,156 @@ fn test_duplicate_shreds_switch_failure() { SocketAddrSpace::Unspecified, ); } + +/// Forks previous marked invalid should be marked as such in fork choice on restart +#[test] +#[serial] +fn test_invalid_forks_persisted_on_restart() { + solana_logger::setup_with("info,solana_metrics=off,solana_ledger=off"); + + let dup_slot = 10; + let validator_keypairs = [ + "28bN3xyvrP4E8LwEgtLjhnkb7cY4amQb6DrYAbAYjgRV4GAGgkVM2K7wnxnAS7WDneuavza7x21MiafLu1HkwQt4", + "2saHBBoTkLMmttmPQP8KfBkcCw45S5cwtV3wTdGCscRC8uxdgvHxpHiWXKx4LvJjNJtnNcbSv5NdheokFFqnNDt8", + ] + .iter() + .map(|s| (Arc::new(Keypair::from_base58_string(s)), true)) + .collect::>(); + let majority_keypair = validator_keypairs[1].0.clone(); + + let validators = validator_keypairs + .iter() + .map(|(kp, _)| kp.pubkey()) + .collect::>(); + + let node_stakes = vec![DEFAULT_NODE_STAKE, 100 * DEFAULT_NODE_STAKE]; + let (target_pubkey, majority_pubkey) = (validators[0], validators[1]); + // Need majority validator to make the dup_slot + let validator_to_slots = vec![ + (majority_pubkey, dup_slot as usize + 5), + (target_pubkey, DEFAULT_SLOTS_PER_EPOCH as usize), + ]; + let leader_schedule = create_custom_leader_schedule(validator_to_slots.into_iter()); + let mut default_config = ValidatorConfig::default_for_test(); + default_config.fixed_leader_schedule = Some(FixedSchedule { + leader_schedule: Arc::new(leader_schedule), + }); + let mut validator_configs = make_identical_validator_configs(&default_config, 2); + // Majority shouldn't duplicate confirm anything + validator_configs[1].voting_disabled = true; + + let mut cluster = LocalCluster::new( + &mut ClusterConfig { + cluster_lamports: DEFAULT_CLUSTER_LAMPORTS + node_stakes.iter().sum::(), + validator_configs, + node_stakes, + validator_keys: Some(validator_keypairs), + skip_warmup_slots: true, + ..ClusterConfig::default() + }, + SocketAddrSpace::Unspecified, + ); + + let target_ledger_path = cluster.ledger_path(&target_pubkey); + + // Wait for us to vote past duplicate slot + let timer = Instant::now(); + loop { + if let Some(slot) = + wait_for_last_vote_in_tower_to_land_in_ledger(&target_ledger_path, &target_pubkey) + { + if slot > dup_slot { + break; + } + } + + assert!( + timer.elapsed() < Duration::from_secs(30), + "Did not make more than 10 blocks in 30 seconds" + ); + sleep(Duration::from_millis(100)); + } + + // Send duplicate + let parent = { + let blockstore = open_blockstore(&target_ledger_path); + let parent = blockstore + .meta(dup_slot) + .unwrap() + .unwrap() + .parent_slot + .unwrap(); + + let entries = create_ticks( + 64 * (std::cmp::max(1, dup_slot - parent)), + 0, + cluster.genesis_config.hash(), + ); + let last_hash = entries.last().unwrap().hash; + let version = solana_sdk::shred_version::version_from_hash(&last_hash); + let dup_shreds = Shredder::new(dup_slot, parent, 0, version) + .unwrap() + .entries_to_shreds( + &majority_keypair, + &entries, + true, // is_full_slot + 0, // next_shred_index, + 0, // next_code_index + false, // merkle_variant + &ReedSolomonCache::default(), + &mut ProcessShredsStats::default(), + ) + .0; + + info!("Sending duplicate shreds for {dup_slot}"); + cluster.send_shreds_to_validator(dup_shreds.iter().collect(), &target_pubkey); + wait_for_duplicate_proof(&target_ledger_path, dup_slot) + .expect("Duplicate proof for {dup_slot} not found"); + parent + }; + + info!("Duplicate proof for {dup_slot} has landed, restarting node"); + let info = cluster.exit_node(&target_pubkey); + + { + let blockstore = open_blockstore(&target_ledger_path); + purge_slots_with_count(&blockstore, dup_slot + 5, 100); + } + + // Restart, should create an entirely new fork + cluster.restart_node(&target_pubkey, info, SocketAddrSpace::Unspecified); + + info!("Waiting for fork built off {parent}"); + let timer = Instant::now(); + let mut checked_children: HashSet = HashSet::default(); + let mut done = false; + while !done { + let blockstore = open_blockstore(&target_ledger_path); + let parent_meta = blockstore.meta(parent).unwrap().expect("Meta must exist"); + for child in parent_meta.next_slots { + if checked_children.contains(&child) { + continue; + } + + if blockstore.is_full(child) { + let shreds = blockstore + .get_data_shreds_for_slot(child, 0) + .expect("Child is full"); + let mut is_our_block = true; + for shred in shreds { + is_our_block &= shred.verify(&target_pubkey); + } + if is_our_block { + done = true; + } + checked_children.insert(child); + } + } + + assert!( + timer.elapsed() < Duration::from_secs(30), + "Did not create a new fork off parent {parent} in 30 seconds after restart" + ); + sleep(Duration::from_millis(100)); + } +} From 903c615559fda3dbebe11e6f74a33d1007dc5139 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Wed, 6 Sep 2023 16:49:08 +0800 Subject: [PATCH 022/407] ci: use codecov command line tool (#33132) * ci: install codecov to nigthly image * ci: use codecov command line tool --- ci/docker-rust-nightly/Dockerfile | 6 +++++- ci/test-coverage.sh | 5 +---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile index fbd0903a075351..6a0b5523ff847e 100644 --- a/ci/docker-rust-nightly/Dockerfile +++ b/ci/docker-rust-nightly/Dockerfile @@ -10,4 +10,8 @@ RUN set -x \ && cargo --version \ && cargo install grcov \ && rustc +nightly-$date --version \ - && cargo +nightly-$date --version + && cargo +nightly-$date --version \ + # codecov + && curl -Os https://uploader.codecov.io/latest/linux/codecov \ + && chmod +x codecov \ + && mv codecov /usr/bin diff --git a/ci/test-coverage.sh b/ci/test-coverage.sh index 1235af81ae88b1..44231cd338a13e 100755 --- a/ci/test-coverage.sh +++ b/ci/test-coverage.sh @@ -29,10 +29,7 @@ if [[ -z "$CODECOV_TOKEN" ]]; then echo "^^^ +++" echo CODECOV_TOKEN undefined, codecov.io upload skipped else - # We normalize CI to `1`; but codecov expects it to be `true` to detect Buildkite... - # Unfortunately, codecov.io fails sometimes: - # curl: (7) Failed to connect to codecov.io port 443: Connection timed out - CI=true bash <(while ! curl -sS --retry 5 --retry-delay 2 --retry-connrefused --fail https://codecov.io/bash; do sleep 10; done) -Z -X gcov -f target/cov/lcov.info + codecov -t "${CODECOV_TOKEN}" annotate --style success --context codecov.io \ "CodeCov report: https://codecov.io/github/solana-labs/solana/commit/${CI_COMMIT:0:9}" From 424666e3415d0a9184e9a12e48346fb1212feaa8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Wed, 6 Sep 2023 10:54:15 +0200 Subject: [PATCH 023/407] Bump solana_rbpf to v0.7.0 (#33104) * Upgrades RBPF in Cargo.toml * Adjustments to updated interfaces. --- Cargo.lock | 5 +- Cargo.toml | 2 +- cli/src/program.rs | 16 ++- ledger-tool/src/program.rs | 25 +++-- program-runtime/src/invoke_context.rs | 3 +- program-runtime/src/loaded_programs.rs | 83 ++++++++------- programs/bpf_loader/Cargo.toml | 2 +- programs/bpf_loader/src/lib.rs | 53 +++------- programs/bpf_loader/src/syscalls/mod.rs | 97 +++++++++--------- programs/loader-v4/Cargo.toml | 1 - programs/loader-v4/src/lib.rs | 85 +++++++++------ programs/loader-v4/test_elfs/out/noop.so | Bin 1768 -> 0 bytes .../loader-v4/test_elfs/out/relative_call.so | Bin 0 -> 5384 bytes programs/loader-v4/test_elfs/out/rodata.so | Bin 1904 -> 0 bytes .../loader-v4/test_elfs/out/rodata_section.so | Bin 0 -> 5424 bytes programs/sbf/Cargo.lock | 6 +- programs/sbf/Cargo.toml | 2 +- programs/sbf/benches/bpf_loader.rs | 68 +++++------- 18 files changed, 214 insertions(+), 234 deletions(-) delete mode 100755 programs/loader-v4/test_elfs/out/noop.so create mode 100644 programs/loader-v4/test_elfs/out/relative_call.so delete mode 100755 programs/loader-v4/test_elfs/out/rodata.so create mode 100644 programs/loader-v4/test_elfs/out/rodata_section.so diff --git a/Cargo.lock b/Cargo.lock index d14603617b4a97..551b056c5ee9a5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6255,7 +6255,6 @@ version = "1.17.0" dependencies = [ "bincode", "log", - "rand 0.8.5", "solana-measure", "solana-program-runtime", "solana-sdk", @@ -7528,9 +7527,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3082ec3a1d4ef7879eb5b84916d5acde057abd59733eec3647e0ab8885283ef" +checksum = "339e8963a8e2721227e46cf7a8488957db94cde0f35d3a769e292baaebdbeb44" dependencies = [ "byteorder", "combine", diff --git a/Cargo.toml b/Cargo.toml index 8be6e0ef6f97c2..0c7669ea09bb98 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -294,7 +294,7 @@ siphasher = "0.3.11" smpl_jwt = "0.7.1" socket2 = "0.5.3" soketto = "0.7" -solana_rbpf = "=0.6.0" +solana_rbpf = "=0.7.0" solana-account-decoder = { path = "account-decoder", version = "=1.17.0" } solana-accounts-db = { path = "accounts-db", version = "=1.17.0" } solana-address-lookup-table-program = { path = "programs/address-lookup-table", version = "=1.17.0" } diff --git a/cli/src/program.rs b/cli/src/program.rs index 61a470c9133cdb..81f6b3a3f57f00 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -27,10 +27,7 @@ use { tpu_client::{TpuClient, TpuClientConfig}, }, solana_program_runtime::{compute_budget::ComputeBudget, invoke_context::InvokeContext}, - solana_rbpf::{ - elf::Executable, - verifier::{RequisiteVerifier, TautologyVerifier}, - }, + solana_rbpf::{elf::Executable, verifier::RequisiteVerifier}, solana_remote_wallet::remote_wallet::RemoteWalletManager, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_api::{ @@ -2033,13 +2030,12 @@ fn read_and_verify_elf(program_location: &str) -> Result, Box::from_elf( - &program_data, - Arc::new(program_runtime_environment), - ) - .map_err(|err| format!("ELF error: {err}"))?; + let executable = + Executable::::from_elf(&program_data, Arc::new(program_runtime_environment)) + .map_err(|err| format!("ELF error: {err}"))?; - let _ = Executable::::verified(executable) + executable + .verify::() .map_err(|err| format!("ELF error: {err}"))?; Ok(program_data) diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index 7ed1ee06f7d14e..4349190955546d 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -283,11 +283,11 @@ impl Debug for Output { // https://github.com/rust-lang/rust/issues/74465 struct LazyAnalysis<'a, 'b> { analysis: Option>, - executable: &'a Executable>, + executable: &'a Executable>, } impl<'a, 'b> LazyAnalysis<'a, 'b> { - fn new(executable: &'a Executable>) -> Self { + fn new(executable: &'a Executable>) -> Self { Self { analysis: None, executable, @@ -330,7 +330,7 @@ fn load_program<'a>( filename: &Path, program_id: Pubkey, invoke_context: &InvokeContext<'a>, -) -> Executable> { +) -> Executable> { let mut file = File::open(filename).unwrap(); let mut magic = [0u8; 4]; file.read_exact(&mut magic).unwrap(); @@ -374,22 +374,25 @@ fn load_program<'a>( Err(err) => Err(format!("Loading executable failed: {err:?}")), } } else { - let executable = assemble::( + assemble::( std::str::from_utf8(contents.as_slice()).unwrap(), Arc::new(program_runtime_environment), ) - .unwrap(); - Executable::::verified(executable) - .map_err(|err| format!("Assembling executable failed: {err:?}")) + .map_err(|err| format!("Assembling executable failed: {err:?}")) + .and_then(|executable| { + executable + .verify::() + .map_err(|err| format!("Verifying executable failed: {err:?}"))?; + Ok(executable) + }) } .unwrap(); #[cfg(all(not(target_os = "windows"), target_arch = "x86_64"))] verified_executable.jit_compile().unwrap(); unsafe { - std::mem::transmute::< - Executable>, - Executable>, - >(verified_executable) + std::mem::transmute::>, Executable>>( + verified_executable, + ) } } diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index a105048ac3388c..12f82300d78521 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -748,7 +748,8 @@ impl<'a> InvokeContext<'a> { .ok_or(InstructionError::UnsupportedProgramId)?; let process_instruction = match &entry.program { LoadedProgramType::Builtin(program) => program - .lookup_function(ENTRYPOINT_KEY) + .get_function_registry() + .lookup_by_key(ENTRYPOINT_KEY) .map(|(_name, process_instruction)| process_instruction), _ => None, } diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 73b5b8660abb9b..77246479ed782f 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -1,5 +1,3 @@ -#[cfg(all(not(target_os = "windows"), target_arch = "x86_64"))] -use solana_rbpf::error::EbpfError; use { crate::{ invoke_context::{InvokeContext, ProcessInstructionWithContext}, @@ -9,7 +7,11 @@ use { log::{debug, log_enabled, trace}, percentage::PercentageInteger, solana_measure::measure::Measure, - solana_rbpf::{elf::Executable, verifier::RequisiteVerifier, vm::BuiltinProgram}, + solana_rbpf::{ + elf::{Executable, FunctionRegistry}, + verifier::RequisiteVerifier, + vm::{BuiltinProgram, Config}, + }, solana_sdk::{ bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, clock::Slot, loader_v4, pubkey::Pubkey, saturating_add_assign, @@ -66,9 +68,9 @@ pub enum LoadedProgramType { DelayVisibility, /// Successfully verified but not currently compiled, used to track usage statistics when a compiled program is evicted from memory. Unloaded(Arc>>), - LegacyV0(Executable>), - LegacyV1(Executable>), - Typed(Executable>), + LegacyV0(Executable>), + LegacyV1(Executable>), + Typed(Executable>), #[cfg(test)] TestLoaded(Arc>>), Builtin(BuiltinProgram>), @@ -228,39 +230,35 @@ impl LoadedProgram { metrics: &mut LoadProgramMetrics, ) -> Result> { let mut load_elf_time = Measure::start("load_elf_time"); - let executable = Executable::load(elf_bytes, program_runtime_environment.clone())?; + let mut executable = Executable::load(elf_bytes, program_runtime_environment.clone())?; load_elf_time.stop(); metrics.load_elf_us = load_elf_time.as_us(); let mut verify_code_time = Measure::start("verify_code_time"); - - // Allowing mut here, since it may be needed for jit compile, which is under a config flag - #[allow(unused_mut)] - let mut program = if bpf_loader_deprecated::check_id(loader_key) { - LoadedProgramType::LegacyV0(Executable::verified(executable)?) - } else if bpf_loader::check_id(loader_key) || bpf_loader_upgradeable::check_id(loader_key) { - LoadedProgramType::LegacyV1(Executable::verified(executable)?) - } else if loader_v4::check_id(loader_key) { - LoadedProgramType::Typed(Executable::verified(executable)?) - } else { - panic!(); - }; + executable.verify::()?; verify_code_time.stop(); metrics.verify_code_us = verify_code_time.as_us(); #[cfg(all(not(target_os = "windows"), target_arch = "x86_64"))] { let mut jit_compile_time = Measure::start("jit_compile_time"); - match &mut program { - LoadedProgramType::LegacyV0(executable) => executable.jit_compile(), - LoadedProgramType::LegacyV1(executable) => executable.jit_compile(), - LoadedProgramType::Typed(executable) => executable.jit_compile(), - _ => Err(EbpfError::JitNotCompiled), - }?; + executable.jit_compile()?; jit_compile_time.stop(); metrics.jit_compile_us = jit_compile_time.as_us(); } + // Allowing mut here, since it may be needed for jit compile, which is under a config flag + #[allow(unused_mut)] + let mut program = if bpf_loader_deprecated::check_id(loader_key) { + LoadedProgramType::LegacyV0(executable) + } else if bpf_loader::check_id(loader_key) || bpf_loader_upgradeable::check_id(loader_key) { + LoadedProgramType::LegacyV1(executable) + } else if loader_v4::check_id(loader_key) { + LoadedProgramType::Typed(executable) + } else { + panic!(); + }; + Ok(Self { deployment_slot, account_size, @@ -298,9 +296,9 @@ impl LoadedProgram { account_size: usize, entrypoint: ProcessInstructionWithContext, ) -> Self { - let mut program = BuiltinProgram::default(); - program - .register_function(b"entrypoint", entrypoint) + let mut function_registry = FunctionRegistry::default(); + function_registry + .register_function_hashed(*b"entrypoint", entrypoint) .unwrap(); Self { deployment_slot, @@ -308,7 +306,7 @@ impl LoadedProgram { effective_slot: deployment_slot, maybe_expiration_slot: None, tx_usage_counter: AtomicU64::new(0), - program: LoadedProgramType::Builtin(program), + program: LoadedProgramType::Builtin(BuiltinProgram::new_builtin(function_registry)), ix_usage_counter: AtomicU64::new(0), } } @@ -347,7 +345,7 @@ impl LoadedProgram { } } -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug)] pub struct ProgramRuntimeEnvironments { /// Globally shared RBPF config and syscall registry pub program_runtime_v1: Arc>>, @@ -355,6 +353,19 @@ pub struct ProgramRuntimeEnvironments { pub program_runtime_v2: Arc>>, } +impl Default for ProgramRuntimeEnvironments { + fn default() -> Self { + let empty_loader = Arc::new(BuiltinProgram::new_loader( + Config::default(), + FunctionRegistry::default(), + )); + Self { + program_runtime_v1: empty_loader.clone(), + program_runtime_v2: empty_loader, + } + } +} + #[derive(Debug, Default)] pub struct LoadedPrograms { /// A two level index: @@ -832,7 +843,7 @@ mod tests { }, assert_matches::assert_matches, percentage::Percentage, - solana_rbpf::vm::{BuiltinProgram, Config}, + solana_rbpf::vm::BuiltinProgram, solana_sdk::{clock::Slot, pubkey::Pubkey}, std::{ ops::ControlFlow, @@ -845,7 +856,7 @@ mod tests { fn new_test_builtin_program(deployment_slot: Slot, effective_slot: Slot) -> Arc { Arc::new(LoadedProgram { - program: LoadedProgramType::Builtin(BuiltinProgram::default()), + program: LoadedProgramType::Builtin(BuiltinProgram::new_mock()), account_size: 0, deployment_slot, effective_slot, @@ -920,7 +931,7 @@ mod tests { programs.push((program1, *deployment_slot, usage_counter)); }); - let env = Arc::new(BuiltinProgram::new_loader(Config::default())); + let env = Arc::new(BuiltinProgram::new_mock()); for slot in 21..31 { set_tombstone( &mut cache, @@ -1118,7 +1129,7 @@ mod tests { fn test_replace_tombstones() { let mut cache = LoadedPrograms::default(); let program1 = Pubkey::new_unique(); - let env = Arc::new(BuiltinProgram::new_loader(Config::default())); + let env = Arc::new(BuiltinProgram::new_mock()); set_tombstone( &mut cache, program1, @@ -1134,7 +1145,7 @@ mod tests { #[test] fn test_tombstone() { - let env = Arc::new(BuiltinProgram::new_loader(Config::default())); + let env = Arc::new(BuiltinProgram::new_mock()); let tombstone = LoadedProgram::new_tombstone(0, LoadedProgramType::FailedVerification(env.clone())); assert_matches!(tombstone.program, LoadedProgramType::FailedVerification(_)); @@ -1359,7 +1370,7 @@ mod tests { usage_counter: AtomicU64, expiry: Option, ) -> Arc { - let env = Arc::new(BuiltinProgram::new_loader(Config::default())); + let env = Arc::new(BuiltinProgram::new_mock()); Arc::new(LoadedProgram { program: LoadedProgramType::TestLoaded(env), account_size: 0, diff --git a/programs/bpf_loader/Cargo.toml b/programs/bpf_loader/Cargo.toml index f979b73688b5e0..16a52c07928620 100644 --- a/programs/bpf_loader/Cargo.toml +++ b/programs/bpf_loader/Cargo.toml @@ -14,7 +14,6 @@ bincode = { workspace = true } byteorder = { workspace = true } libsecp256k1 = { workspace = true } log = { workspace = true } -rand = { workspace = true } scopeguard = { workspace = true } solana-measure = { workspace = true } solana-program-runtime = { workspace = true } @@ -26,6 +25,7 @@ thiserror = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } memoffset = { workspace = true } +rand = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } [lib] diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index ad4445a4d5f46b..342d3836321b5c 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -22,7 +22,6 @@ use { elf::Executable, error::EbpfError, memory_region::{AccessType, MemoryCowCallback, MemoryMapping, MemoryRegion}, - verifier::RequisiteVerifier, vm::{BuiltinProgram, ContextObject, EbpfVm, ProgramResult}, }, solana_sdk::{ @@ -191,7 +190,7 @@ pub fn calculate_heap_cost(heap_size: u64, heap_cost: u64, enable_rounding_fix: /// Only used in macro, do not use directly! pub fn create_vm<'a, 'b>( - program: &'a Executable>, + program: &'a Executable>, regions: Vec, accounts_metadata: Vec, invoke_context: &'a mut InvokeContext<'b>, @@ -285,24 +284,21 @@ macro_rules! create_vm { #[macro_export] macro_rules! mock_create_vm { ($vm:ident, $additional_regions:expr, $accounts_metadata:expr, $invoke_context:expr $(,)?) => { - let loader = std::sync::Arc::new(BuiltinProgram::new_loader( - solana_rbpf::vm::Config::default(), - )); - let function_registry = solana_rbpf::vm::FunctionRegistry::default(); - let executable = solana_rbpf::elf::Executable::< - solana_rbpf::verifier::TautologyVerifier, - InvokeContext, - >::from_text_bytes( + let loader = std::sync::Arc::new(BuiltinProgram::new_mock()); + let function_registry = solana_rbpf::elf::FunctionRegistry::default(); + let executable = solana_rbpf::elf::Executable::::from_text_bytes( &[0x95, 0, 0, 0, 0, 0, 0, 0], loader, SBPFVersion::V2, function_registry, ) .unwrap(); - let verified_executable = solana_rbpf::elf::Executable::verified(executable).unwrap(); + executable + .verify::() + .unwrap(); $crate::create_vm!( $vm, - &verified_executable, + &executable, $additional_regions, $accounts_metadata, $invoke_context, @@ -311,7 +307,7 @@ macro_rules! mock_create_vm { } fn create_memory_mapping<'a, 'b, C: ContextObject>( - executable: &'a Executable, + executable: &'a Executable, stack: &'b mut AlignedMemory<{ HOST_ALIGN }>, heap: &'b mut AlignedMemory<{ HOST_ALIGN }>, additional_regions: Vec, @@ -1483,14 +1479,12 @@ fn process_loader_instruction(invoke_context: &mut InvokeContext) -> Result<(), } fn execute<'a, 'b: 'a>( - executable: &'a Executable>, + executable: &'a Executable>, invoke_context: &'a mut InvokeContext<'b>, ) -> Result<(), Box> { // We dropped the lifetime tracking in the Executor by setting it to 'static, // thus we need to reintroduce the correct lifetime of InvokeContext here again. - let executable = unsafe { - mem::transmute::<_, &'a Executable>>(executable) - }; + let executable = unsafe { mem::transmute::<_, &'a Executable>>(executable) }; let log_collector = invoke_context.get_log_collector(); let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; @@ -1728,11 +1722,7 @@ mod tests { solana_program_runtime::{ invoke_context::mock_process_instruction, with_mock_invoke_context, }, - solana_rbpf::{ - elf::SBPFVersion, - verifier::Verifier, - vm::{Config, ContextObject, FunctionRegistry}, - }, + solana_rbpf::vm::ContextObject, solana_sdk::{ account::{ create_account_shared_data_for_test as create_account_for_test, AccountSharedData, @@ -1796,21 +1786,6 @@ mod tests { program_account } - #[test] - #[should_panic(expected = "LDDWCannotBeLast")] - fn test_bpf_loader_check_load_dw() { - let prog = &[ - 0x18, 0x00, 0x00, 0x00, 0x88, 0x77, 0x66, 0x55, // first half of lddw - ]; - RequisiteVerifier::verify( - prog, - &Config::default(), - &SBPFVersion::V2, - &FunctionRegistry::default(), - ) - .unwrap(); - } - #[test] fn test_bpf_loader_write() { let loader_id = bpf_loader::id(); @@ -4103,7 +4078,7 @@ mod tests { let transaction_accounts = vec![]; with_mock_invoke_context!(invoke_context, transaction_context, transaction_accounts); let program_id = Pubkey::new_unique(); - let env = Arc::new(BuiltinProgram::new_loader(Config::default())); + let env = Arc::new(BuiltinProgram::new_mock()); let program = LoadedProgram { program: LoadedProgramType::Unloaded(env), account_size: 0, @@ -4143,7 +4118,7 @@ mod tests { let transaction_accounts = vec![]; with_mock_invoke_context!(invoke_context, transaction_context, transaction_accounts); let program_id = Pubkey::new_unique(); - let env = Arc::new(BuiltinProgram::new_loader(Config::default())); + let env = Arc::new(BuiltinProgram::new_mock()); let program = LoadedProgram { program: LoadedProgramType::Unloaded(env), account_size: 0, diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 2a52bd9aad915a..eb31edf3cda0c3 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -16,8 +16,9 @@ use { stable_log, timings::ExecuteTimings, }, solana_rbpf::{ + elf::FunctionRegistry, memory_region::{AccessType, MemoryMapping}, - vm::{BuiltinProgram, Config, ProgramResult, PROGRAM_ENVIRONMENT_KEY_SHIFT}, + vm::{BuiltinFunction, BuiltinProgram, Config, ProgramResult}, }, solana_sdk::{ account::{ReadableAccount, WritableAccount}, @@ -139,9 +140,9 @@ fn consume_compute_meter(invoke_context: &InvokeContext, amount: u64) -> Result< macro_rules! register_feature_gated_function { ($result:expr, $is_feature_active:expr, $name:expr, $call:expr $(,)?) => { if $is_feature_active { - $result.register_function($name, $call) + $result.register_function_hashed($name, $call) } else { - Ok(()) + Ok(0) } }; } @@ -167,7 +168,6 @@ pub fn create_program_runtime_environment_v1<'a>( // When adding new features for RBPF here, // also add them to `Bank::apply_builtin_program_feature_transitions()`. - use rand::Rng; let config = Config { max_call_depth: compute_budget.max_call_depth, stack_frame_size: compute_budget.stack_frame_size, @@ -180,10 +180,7 @@ pub fn create_program_runtime_environment_v1<'a>( reject_broken_elfs: reject_deployment_of_broken_elfs, noop_instruction_rate: 256, sanitize_user_provided_values: true, - runtime_environment_key: rand::thread_rng() - .gen::() - .checked_shr(PROGRAM_ENVIRONMENT_KEY_SHIFT) - .unwrap_or(0), + encrypt_runtime_environment: true, external_internal_function_hash_collision: feature_set .is_active(&error_on_syscall_bpf_function_hash_collisions::id()), reject_callx_r10: feature_set.is_active(&reject_callx_r10::id()), @@ -194,44 +191,44 @@ pub fn create_program_runtime_environment_v1<'a>( aligned_memory_mapping: !feature_set.is_active(&bpf_account_data_direct_mapping::id()), // Warning, do not use `Config::default()` so that configuration here is explicit. }; - let mut result = BuiltinProgram::new_loader(config); + let mut result = FunctionRegistry::>::default(); // Abort - result.register_function(b"abort", SyscallAbort::call)?; + result.register_function_hashed(*b"abort", SyscallAbort::call)?; // Panic - result.register_function(b"sol_panic_", SyscallPanic::call)?; + result.register_function_hashed(*b"sol_panic_", SyscallPanic::call)?; // Logging - result.register_function(b"sol_log_", SyscallLog::call)?; - result.register_function(b"sol_log_64_", SyscallLogU64::call)?; - result.register_function(b"sol_log_compute_units_", SyscallLogBpfComputeUnits::call)?; - result.register_function(b"sol_log_pubkey", SyscallLogPubkey::call)?; + result.register_function_hashed(*b"sol_log_", SyscallLog::call)?; + result.register_function_hashed(*b"sol_log_64_", SyscallLogU64::call)?; + result.register_function_hashed(*b"sol_log_compute_units_", SyscallLogBpfComputeUnits::call)?; + result.register_function_hashed(*b"sol_log_pubkey", SyscallLogPubkey::call)?; // Program defined addresses (PDA) - result.register_function( - b"sol_create_program_address", + result.register_function_hashed( + *b"sol_create_program_address", SyscallCreateProgramAddress::call, )?; - result.register_function( - b"sol_try_find_program_address", + result.register_function_hashed( + *b"sol_try_find_program_address", SyscallTryFindProgramAddress::call, )?; // Sha256 - result.register_function(b"sol_sha256", SyscallSha256::call)?; + result.register_function_hashed(*b"sol_sha256", SyscallSha256::call)?; // Keccak256 - result.register_function(b"sol_keccak256", SyscallKeccak256::call)?; + result.register_function_hashed(*b"sol_keccak256", SyscallKeccak256::call)?; // Secp256k1 Recover - result.register_function(b"sol_secp256k1_recover", SyscallSecp256k1Recover::call)?; + result.register_function_hashed(*b"sol_secp256k1_recover", SyscallSecp256k1Recover::call)?; // Blake3 register_feature_gated_function!( result, blake3_syscall_enabled, - b"sol_blake3", + *b"sol_blake3", SyscallBlake3::call, )?; @@ -239,78 +236,78 @@ pub fn create_program_runtime_environment_v1<'a>( register_feature_gated_function!( result, curve25519_syscall_enabled, - b"sol_curve_validate_point", + *b"sol_curve_validate_point", SyscallCurvePointValidation::call, )?; register_feature_gated_function!( result, curve25519_syscall_enabled, - b"sol_curve_group_op", + *b"sol_curve_group_op", SyscallCurveGroupOps::call, )?; register_feature_gated_function!( result, curve25519_syscall_enabled, - b"sol_curve_multiscalar_mul", + *b"sol_curve_multiscalar_mul", SyscallCurveMultiscalarMultiplication::call, )?; // Sysvars - result.register_function(b"sol_get_clock_sysvar", SyscallGetClockSysvar::call)?; - result.register_function( - b"sol_get_epoch_schedule_sysvar", + result.register_function_hashed(*b"sol_get_clock_sysvar", SyscallGetClockSysvar::call)?; + result.register_function_hashed( + *b"sol_get_epoch_schedule_sysvar", SyscallGetEpochScheduleSysvar::call, )?; register_feature_gated_function!( result, !disable_fees_sysvar, - b"sol_get_fees_sysvar", + *b"sol_get_fees_sysvar", SyscallGetFeesSysvar::call, )?; - result.register_function(b"sol_get_rent_sysvar", SyscallGetRentSysvar::call)?; + result.register_function_hashed(*b"sol_get_rent_sysvar", SyscallGetRentSysvar::call)?; register_feature_gated_function!( result, last_restart_slot_syscall_enabled, - b"sol_get_last_restart_slot", + *b"sol_get_last_restart_slot", SyscallGetLastRestartSlotSysvar::call, )?; register_feature_gated_function!( result, epoch_rewards_syscall_enabled, - b"sol_get_epoch_rewards_sysvar", + *b"sol_get_epoch_rewards_sysvar", SyscallGetEpochRewardsSysvar::call, )?; // Memory ops - result.register_function(b"sol_memcpy_", SyscallMemcpy::call)?; - result.register_function(b"sol_memmove_", SyscallMemmove::call)?; - result.register_function(b"sol_memcmp_", SyscallMemcmp::call)?; - result.register_function(b"sol_memset_", SyscallMemset::call)?; + result.register_function_hashed(*b"sol_memcpy_", SyscallMemcpy::call)?; + result.register_function_hashed(*b"sol_memmove_", SyscallMemmove::call)?; + result.register_function_hashed(*b"sol_memcmp_", SyscallMemcmp::call)?; + result.register_function_hashed(*b"sol_memset_", SyscallMemset::call)?; // Processed sibling instructions - result.register_function( - b"sol_get_processed_sibling_instruction", + result.register_function_hashed( + *b"sol_get_processed_sibling_instruction", SyscallGetProcessedSiblingInstruction::call, )?; // Stack height - result.register_function(b"sol_get_stack_height", SyscallGetStackHeight::call)?; + result.register_function_hashed(*b"sol_get_stack_height", SyscallGetStackHeight::call)?; // Return data - result.register_function(b"sol_set_return_data", SyscallSetReturnData::call)?; - result.register_function(b"sol_get_return_data", SyscallGetReturnData::call)?; + result.register_function_hashed(*b"sol_set_return_data", SyscallSetReturnData::call)?; + result.register_function_hashed(*b"sol_get_return_data", SyscallGetReturnData::call)?; // Cross-program invocation - result.register_function(b"sol_invoke_signed_c", SyscallInvokeSignedC::call)?; - result.register_function(b"sol_invoke_signed_rust", SyscallInvokeSignedRust::call)?; + result.register_function_hashed(*b"sol_invoke_signed_c", SyscallInvokeSignedC::call)?; + result.register_function_hashed(*b"sol_invoke_signed_rust", SyscallInvokeSignedRust::call)?; // Memory allocator register_feature_gated_function!( result, !disable_deploy_of_alloc_free_syscall, - b"sol_alloc_free_", + *b"sol_alloc_free_", SyscallAllocFree::call, )?; @@ -318,7 +315,7 @@ pub fn create_program_runtime_environment_v1<'a>( register_feature_gated_function!( result, enable_alt_bn128_syscall, - b"sol_alt_bn128_group_op", + *b"sol_alt_bn128_group_op", SyscallAltBn128::call, )?; @@ -326,7 +323,7 @@ pub fn create_program_runtime_environment_v1<'a>( register_feature_gated_function!( result, enable_big_mod_exp_syscall, - b"sol_big_mod_exp", + *b"sol_big_mod_exp", SyscallBigModExp::call, )?; @@ -334,14 +331,14 @@ pub fn create_program_runtime_environment_v1<'a>( register_feature_gated_function!( result, enable_poseidon_syscall, - b"sol_poseidon", + *b"sol_poseidon", SyscallPoseidon::call, )?; // Log data - result.register_function(b"sol_log_data", SyscallLogData::call)?; + result.register_function_hashed(*b"sol_log_data", SyscallLogData::call)?; - Ok(result) + Ok(BuiltinProgram::new_loader(config, result)) } fn address_is_aligned(address: u64) -> bool { diff --git a/programs/loader-v4/Cargo.toml b/programs/loader-v4/Cargo.toml index 0ffdd87f6de05a..a9f91dc8d479cc 100644 --- a/programs/loader-v4/Cargo.toml +++ b/programs/loader-v4/Cargo.toml @@ -10,7 +10,6 @@ edition = { workspace = true } [dependencies] log = { workspace = true } -rand = { workspace = true } solana-measure = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index 5ebab07677b7e8..312c14f7acf888 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -1,5 +1,4 @@ use { - rand::Rng, solana_measure::measure::Measure, solana_program_runtime::{ compute_budget::ComputeBudget, @@ -14,13 +13,9 @@ use { solana_rbpf::{ aligned_memory::AlignedMemory, ebpf, - elf::Executable, + elf::{Executable, FunctionRegistry}, memory_region::{MemoryMapping, MemoryRegion}, - verifier::RequisiteVerifier, - vm::{ - BuiltinProgram, Config, ContextObject, EbpfVm, ProgramResult, - PROGRAM_ENVIRONMENT_KEY_SHIFT, - }, + vm::{BuiltinProgram, Config, ContextObject, EbpfVm, ProgramResult}, }, solana_sdk::{ entrypoint::{HEAP_LENGTH, SUCCESS}, @@ -86,10 +81,7 @@ pub fn create_program_runtime_environment_v2<'a>( reject_broken_elfs: true, noop_instruction_rate: 256, sanitize_user_provided_values: true, - runtime_environment_key: rand::thread_rng() - .gen::() - .checked_shr(PROGRAM_ENVIRONMENT_KEY_SHIFT) - .unwrap_or(0), + encrypt_runtime_environment: true, external_internal_function_hash_collision: true, reject_callx_r10: true, enable_sbpf_v1: false, @@ -99,7 +91,7 @@ pub fn create_program_runtime_environment_v2<'a>( aligned_memory_mapping: true, // Warning, do not use `Config::default()` so that configuration here is explicit. }; - BuiltinProgram::new_loader(config) + BuiltinProgram::new_loader(config, FunctionRegistry::default()) } fn calculate_heap_cost(heap_size: u64, heap_cost: u64) -> u64 { @@ -116,7 +108,7 @@ fn calculate_heap_cost(heap_size: u64, heap_cost: u64) -> u64 { /// Create the SBF virtual machine pub fn create_vm<'a, 'b>( invoke_context: &'a mut InvokeContext<'b>, - program: &'a Executable>, + program: &'a Executable>, ) -> Result>, Box> { let config = program.get_config(); let sbpf_version = program.get_sbpf_version(); @@ -152,13 +144,12 @@ pub fn create_vm<'a, 'b>( fn execute<'a, 'b: 'a>( invoke_context: &'a mut InvokeContext<'b>, - executable: &'a Executable>, + executable: &'a Executable>, ) -> Result<(), Box> { // We dropped the lifetime tracking in the Executor by setting it to 'static, // thus we need to reintroduce the correct lifetime of InvokeContext here again. - let executable = unsafe { - std::mem::transmute::<_, &'a Executable>>(executable) - }; + let executable = + unsafe { std::mem::transmute::<_, &'a Executable>>(executable) }; let log_collector = invoke_context.get_log_collector(); let stack_height = invoke_context.get_stack_height(); let transaction_context = &invoke_context.transaction_context; @@ -763,7 +754,11 @@ mod tests { let transaction_accounts = vec![ ( Pubkey::new_unique(), - load_program_account_from_elf(authority_address, LoaderV4Status::Deployed, "noop"), + load_program_account_from_elf( + authority_address, + LoaderV4Status::Deployed, + "relative_call", + ), ), ( authority_address, @@ -771,7 +766,11 @@ mod tests { ), ( Pubkey::new_unique(), - load_program_account_from_elf(authority_address, LoaderV4Status::Finalized, "noop"), + load_program_account_from_elf( + authority_address, + LoaderV4Status::Finalized, + "relative_call", + ), ), ( clock::id(), @@ -853,7 +852,11 @@ mod tests { let transaction_accounts = vec![ ( Pubkey::new_unique(), - load_program_account_from_elf(authority_address, LoaderV4Status::Retracted, "noop"), + load_program_account_from_elf( + authority_address, + LoaderV4Status::Retracted, + "relative_call", + ), ), ( authority_address, @@ -861,7 +864,11 @@ mod tests { ), ( Pubkey::new_unique(), - load_program_account_from_elf(authority_address, LoaderV4Status::Deployed, "noop"), + load_program_account_from_elf( + authority_address, + LoaderV4Status::Deployed, + "relative_call", + ), ), ( clock::id(), @@ -942,7 +949,11 @@ mod tests { let mut transaction_accounts = vec![ ( Pubkey::new_unique(), - load_program_account_from_elf(authority_address, LoaderV4Status::Retracted, "noop"), + load_program_account_from_elf( + authority_address, + LoaderV4Status::Retracted, + "relative_call", + ), ), ( authority_address, @@ -954,19 +965,23 @@ mod tests { ), ( Pubkey::new_unique(), - AccountSharedData::new(20000000, 0, &loader_v4::id()), + AccountSharedData::new(40000000, 0, &loader_v4::id()), ), ( Pubkey::new_unique(), load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "rodata", + "rodata_section", ), ), ( Pubkey::new_unique(), - load_program_account_from_elf(authority_address, LoaderV4Status::Deployed, "noop"), + load_program_account_from_elf( + authority_address, + LoaderV4Status::Deployed, + "relative_call", + ), ), ( clock::id(), @@ -1194,7 +1209,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "rodata", + "rodata_section", ), ), ( @@ -1203,7 +1218,11 @@ mod tests { ), ( Pubkey::new_unique(), - load_program_account_from_elf(authority_address, LoaderV4Status::Retracted, "noop"), + load_program_account_from_elf( + authority_address, + LoaderV4Status::Retracted, + "relative_call", + ), ), ( Pubkey::new_unique(), @@ -1338,7 +1357,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Deployed, - "rodata", + "rodata_section", ), ), ( @@ -1354,7 +1373,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "rodata", + "rodata_section", ), ), (clock::id(), clock(1000)), @@ -1418,7 +1437,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Deployed, - "rodata", + "rodata_section", ), ), ( @@ -1426,7 +1445,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "rodata", + "rodata_section", ), ), ( @@ -1519,7 +1538,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Finalized, - "rodata", + "rodata_section", ), ), ( @@ -1535,7 +1554,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "rodata", + "rodata_section", ), ), ( diff --git a/programs/loader-v4/test_elfs/out/noop.so b/programs/loader-v4/test_elfs/out/noop.so deleted file mode 100755 index 7e74e2d7a78cde3e2a2a7dda703855bccd0eb505..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1768 zcmbVM!D6L3Gn%6f4n1R}}ayXW0^-@KdG_d53*OG{4HbSvfTyAhj) zADi9@0GsZXYvT8eTXj?(!%%!3BdpP)@)tknyhGhG#sf=T$5kJO3}&WlV0s*5*K|kW z36rlQUO7k=*ob?-#h86Q+$*roEy*)SfqGeTfkX0~1&U;yjNiW69v48`dk?Ok&-Y)5*1FCK$~>s9Lvlf8a+ zJLq)U!L=LVMtHNj`nOO0vimWwuax)QXfEE7)|vmG^@Mr?=#HVw407bxD*#6Jdd9P2a`PWo=2CY={W3G-_o=8lgC?+ zw(shA|BbqP_6|+=EsRR`&yT)?m=!qO?vBkZ@+nZ@s5?> z+TQVW=^%b4#y25vorRQku@Kc?-@@^rZCa8)0C9>LcZsp7(YV zL;3@AgsQWy7k>v@_PF%wQ=#gVzK&bavL@XV+|oYThWh^zulj`#v7p?FA$~D3GLrYm mdsh8A*N*WPwt$vBh~9$nj`E6r3P*289Pyozm#ZZFy8bs<*>lGL diff --git a/programs/loader-v4/test_elfs/out/relative_call.so b/programs/loader-v4/test_elfs/out/relative_call.so new file mode 100644 index 0000000000000000000000000000000000000000..9f24730e209597b7bb901191dc47dca8bd3a6887 GIT binary patch literal 5384 zcmeHLzi-n}5PnSx=9fe%5@JG@m>96^rb!HRD560^3Kb(|i5$nFp|KMj2Q})z#EP;o zFv35Ql|O@jfrT~P_ugFzrbtMPypvwOdw2KUJzx9=-`iIEab;~yiBy%NcP<%V-K7Qj zvT4vI*j1sb((gs(=$LbGZQ#dCfbD?ofbD?ofbD?ofbD?ofbD?ofbGD4-GSMLd3=We-_a-fBV4Hu z+!g0CDa+;Z^$lYxWBN#2EP{8zcY*l!uL znGKMRZ+$JqMQR)ea?^m)vj@_B_FrQA6-GWb@BSW8Ym+LvgwUVU(voCFJ{yk)S)tm4 z>?q0ayY2R#8`M4D+m@i;5job?H{H8cFP^ueE0{ViI}g}?ObD(#???D6mD70#wvhL< z&OEPIB*%sFdb2D_2eDQrv(#XbD=!|UDLs?&$XY~Q!ox|C8_l4~lVMa0-Xxtk8V`_n8g2BWMqDWdqOlPCS4c^HKCFziKJ{d%)etA$DH zEb7)z>NF)%t-0Os{a)nvlX@cxqLy0ee`oJt|KZaok8}h6@drJ|I(^@GFLS4PDSvsc zg!=<-mE#bPd@`pRvt7@;NEbf8{<;j_ZQKe`Paq3sLw{a;2qMX_G;)e=Fi!PbK}mI&mWLYddHy}zB0f3 VCre$4eeehVZJt|xmO#cb{|AeCfu;Ze literal 0 HcmV?d00001 diff --git a/programs/loader-v4/test_elfs/out/rodata.so b/programs/loader-v4/test_elfs/out/rodata.so deleted file mode 100755 index 9b8a8b8ed028086865511620166e94704ff3d223..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1904 zcmb_cO=}cE5Uu^tM5CAwl%ODU^OIo{4TxS6jDoUy$jN{p)TB!)FCUZsayAhj)jkz3z2?qIKMzVogtmkqBN z5`G>}6Ziv*=f1vLpK<>}Y-d?2<-G+axS+TsRavzCGJlthFYn3XaelgzjpxN9G*6xRmaD$_>5t_REnEInd9aL*&-EkcEH7k#}zgc-iJ%ji2c+L2~@)%~F z{Y=6%X-8QkVLOkbey>$H!@Tb>OC{WkKZMpKmm#5sXVF{5?l4Q8 z=TO|_VQc24z1iM`9GLZ@-ITe!O8mw~6VA`=|66qMrK-lw-{zl<%I%caJfcP47DTy{YZj zbeeE|{s)M6Et2nn>vMv2b&!q^I;T{6z$x};yL!`dMStlL3v KoF>)x^?v|FRc+S* diff --git a/programs/loader-v4/test_elfs/out/rodata_section.so b/programs/loader-v4/test_elfs/out/rodata_section.so new file mode 100644 index 0000000000000000000000000000000000000000..8868f7e63d7e85b0963ee207dc796f3c0b2fdd51 GIT binary patch literal 5424 zcmeHL&ubGw6rTLhrnaEAAUWwAgW!_IrVTx%wH1VI@gSmJ!e+DDpxKno3eBm1h5v^K z5B@3rH#`Ym;`?^rx5OnE@g&TH+3(Gp@4Ywkm|2*)J`JANYc)e@n2oXr>p%xCcM8P{ z3{2lN<-27z4LG+jbPu0G7!7FEMkEhoO=(vBFit+OB@IHX`rWACjc~vxzJ!wqk$!ct z-Imr-zmDkeBYqVfLM-zZX7_a!a20SBa20SBa20SBa20SBa20SBa20SB_^&JQO)kM! zR`630_u+-!kH6@u9xFan#GM^-zW~!#KYbqHb=&OfxP~*omdhDT$N|A!9Kw71R?#(W z#r#b{^p?iIqxNmZ2a10S$iJhrfNmKePO@}yIh!OIEjf$t0Kp{r7^jCe2u`fu^Sa(% zNn5KRcGLV=RtF!6p=RoNUT_%y4T$wd`5u8Td6pJhfu;yHoX1f%nI&G>?~SAP`}<_xNBw9tj>mo+`=@55p2L&3=SOERp5+Vr#}8_T#{GkE z=yT&)=qtkd(zGE8_;?S3SZU~6-KbT5wSQN*$lZ_z=}3b=-<9}&RPJApM0gIczp>Yu zpZlNhSzVAt;E%Y>&vRkLhTQb$`Dp2Z::from_elf( - &elf, - program_runtime_environment.clone(), - ) - .unwrap(); + let _ = Executable::::from_elf(&elf, program_runtime_environment.clone()) + .unwrap(); }); } @@ -124,19 +118,16 @@ fn bench_program_alu(bencher: &mut Bencher) { true, false, ); - let executable = Executable::::from_elf( - &elf, - Arc::new(program_runtime_environment.unwrap()), - ) - .unwrap(); + let mut executable = + Executable::::from_elf(&elf, Arc::new(program_runtime_environment.unwrap())) + .unwrap(); - let mut verified_executable = - Executable::::verified(executable).unwrap(); + executable.verify::().unwrap(); - verified_executable.jit_compile().unwrap(); + executable.jit_compile().unwrap(); create_vm!( vm, - &verified_executable, + &executable, vec![MemoryRegion::new_writable(&mut inner_iter, MM_INPUT_START)], vec![], &mut invoke_context, @@ -146,7 +137,7 @@ fn bench_program_alu(bencher: &mut Bencher) { println!("Interpreted:"); vm.context_object_pointer .mock_set_remaining(std::i64::MAX as u64); - let (instructions, result) = vm.execute_program(&verified_executable, true); + let (instructions, result) = vm.execute_program(&executable, true); assert_eq!(SUCCESS, result.unwrap()); assert_eq!(ARMSTRONG_LIMIT, LittleEndian::read_u64(&inner_iter)); assert_eq!( @@ -157,7 +148,7 @@ fn bench_program_alu(bencher: &mut Bencher) { bencher.iter(|| { vm.context_object_pointer .mock_set_remaining(std::i64::MAX as u64); - vm.execute_program(&verified_executable, true).1.unwrap(); + vm.execute_program(&executable, true).1.unwrap(); }); let summary = bencher.bench(|_bencher| Ok(())).unwrap().unwrap(); println!(" {:?} instructions", instructions); @@ -168,10 +159,7 @@ fn bench_program_alu(bencher: &mut Bencher) { println!("{{ \"type\": \"bench\", \"name\": \"bench_program_alu_interpreted_mips\", \"median\": {:?}, \"deviation\": 0 }}", mips); println!("JIT to native:"); - assert_eq!( - SUCCESS, - vm.execute_program(&verified_executable, false).1.unwrap() - ); + assert_eq!(SUCCESS, vm.execute_program(&executable, false).1.unwrap()); assert_eq!(ARMSTRONG_LIMIT, LittleEndian::read_u64(&inner_iter)); assert_eq!( ARMSTRONG_EXPECTED, @@ -181,7 +169,7 @@ fn bench_program_alu(bencher: &mut Bencher) { bencher.iter(|| { vm.context_object_pointer .mock_set_remaining(std::i64::MAX as u64); - vm.execute_program(&verified_executable, false).1.unwrap(); + vm.execute_program(&executable, false).1.unwrap(); }); let summary = bencher.bench(|_bencher| Ok(())).unwrap().unwrap(); println!(" {:?} instructions", instructions); @@ -243,14 +231,11 @@ fn bench_create_vm(bencher: &mut Bencher) { true, false, ); - let executable = Executable::::from_elf( - &elf, - Arc::new(program_runtime_environment.unwrap()), - ) - .unwrap(); + let executable = + Executable::::from_elf(&elf, Arc::new(program_runtime_environment.unwrap())) + .unwrap(); - let verified_executable = - Executable::::verified(executable).unwrap(); + executable.verify::().unwrap(); // Serialize account data let (_serialized, regions, account_lengths) = serialize_parameters( @@ -267,7 +252,7 @@ fn bench_create_vm(bencher: &mut Bencher) { bencher.iter(|| { create_vm!( vm, - &verified_executable, + &executable, clone_regions(®ions), account_lengths.clone(), &mut invoke_context, @@ -305,18 +290,15 @@ fn bench_instruction_count_tuner(_bencher: &mut Bencher) { true, false, ); - let executable = Executable::::from_elf( - &elf, - Arc::new(program_runtime_environment.unwrap()), - ) - .unwrap(); + let executable = + Executable::::from_elf(&elf, Arc::new(program_runtime_environment.unwrap())) + .unwrap(); - let verified_executable = - Executable::::verified(executable).unwrap(); + executable.verify::().unwrap(); create_vm!( vm, - &verified_executable, + &executable, regions, account_lengths, &mut invoke_context, @@ -324,7 +306,7 @@ fn bench_instruction_count_tuner(_bencher: &mut Bencher) { let mut vm = vm.unwrap(); let mut measure = Measure::start("tune"); - let (instructions, _result) = vm.execute_program(&verified_executable, true); + let (instructions, _result) = vm.execute_program(&executable, true); measure.stop(); assert_eq!( From 51b2b2d745dc88d7dc79ee83f2a8150bbbc7445c Mon Sep 17 00:00:00 2001 From: Kevin Ji <1146876+kevinji@users.noreply.github.com> Date: Wed, 6 Sep 2023 08:29:48 -0400 Subject: [PATCH 024/407] docs(contributing): fix syntax highlighting (#32928) --- CONTRIBUTING.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1051568ec0f61d..5894203afc291a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -250,7 +250,7 @@ before the PR can be merged. Here are the steps: * Under the newly-created directory, create a Cargo.toml file. Below is an example template: -``` +```toml [package] name = "solana-" version = "0.0.1" @@ -285,7 +285,9 @@ edition = "2021" * All Rust code is linted with Clippy. If you'd prefer to ignore its advice, do so explicitly: - ```rust #[allow(clippy::too_many_arguments)] ``` + ```rust + #[allow(clippy::too_many_arguments)] + ``` Note: Clippy defaults can be overridden in the top-level file `.clippy.toml`. From 47d828cd7166ba90ff3d03e0d9633cf54b5e5185 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Sep 2023 12:33:00 +0000 Subject: [PATCH 025/407] build(deps): bump chrono from 0.4.28 to 0.4.29 (#33160) * build(deps): bump chrono from 0.4.28 to 0.4.29 Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.28 to 0.4.29. - [Release notes](https://github.com/chronotope/chrono/releases) - [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md) - [Commits](https://github.com/chronotope/chrono/compare/v0.4.28...v0.4.29) --- updated-dependencies: - dependency-name: chrono dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 551b056c5ee9a5..1c39cf4e23c328 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1017,9 +1017,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ed24df0632f708f5f6d8082675bef2596f7084dee3dd55f632290bf35bfe0f" +checksum = "d87d9d13be47a5b7c3907137f1290b0459a7f80efb26be8c52afb11963bccb02" dependencies = [ "android-tzdata", "iana-time-zone", diff --git a/Cargo.toml b/Cargo.toml index 0c7669ea09bb98..fe99fa0ad75593 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -158,7 +158,7 @@ bzip2 = "0.4.4" caps = "0.5.5" cargo_metadata = "0.15.4" cc = "1.0.83" -chrono = { version = "0.4.28", default-features = false } +chrono = { version = "0.4.29", default-features = false } chrono-humanize = "0.2.3" clap = "2.33.1" console = "0.15.7" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index a1e5efda06447d..84cf0ac517c4ca 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -901,9 +901,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ed24df0632f708f5f6d8082675bef2596f7084dee3dd55f632290bf35bfe0f" +checksum = "d87d9d13be47a5b7c3907137f1290b0459a7f80efb26be8c52afb11963bccb02" dependencies = [ "android-tzdata", "iana-time-zone", From 865276f0edc3cd7ad87eec6742554fffc9749425 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Sep 2023 12:33:55 +0000 Subject: [PATCH 026/407] build(deps): bump bytemuck from 1.13.1 to 1.14.0 (#33161) * build(deps): bump bytemuck from 1.13.1 to 1.14.0 Bumps [bytemuck](https://github.com/Lokathor/bytemuck) from 1.13.1 to 1.14.0. - [Changelog](https://github.com/Lokathor/bytemuck/blob/main/changelog.md) - [Commits](https://github.com/Lokathor/bytemuck/compare/v1.13.1...v1.14.0) --- updated-dependencies: - dependency-name: bytemuck dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1c39cf4e23c328..27c50c07475220 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -870,9 +870,9 @@ checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" [[package]] name = "bytemuck" -version = "1.13.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea" +checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" dependencies = [ "bytemuck_derive", ] diff --git a/Cargo.toml b/Cargo.toml index fe99fa0ad75593..f1ee5fd826412b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -151,7 +151,7 @@ bs58 = "0.4.0" bv = "0.11.1" byte-unit = "4.0.19" bytecount = "0.6.3" -bytemuck = "1.13.1" +bytemuck = "1.14.0" byteorder = "1.4.3" bytes = "1.2" bzip2 = "0.4.4" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 84cf0ac517c4ca..895200bce4c5e1 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -801,9 +801,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "bytemuck" -version = "1.13.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea" +checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" dependencies = [ "bytemuck_derive", ] From 224eea42d5711b327354dc2d537cb6b5f7947eb6 Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 6 Sep 2023 09:07:09 -0500 Subject: [PATCH 027/407] Add unit test for Bank::register_hard_fork() (#32902) --- runtime/src/bank/tests.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 0a7dbdb2a2cb34..c7bc57b60c7a01 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -13627,6 +13627,30 @@ fn test_calculate_stake_vote_rewards() { ); } +#[test] +fn test_register_hard_fork() { + fn get_hard_forks(bank: &Bank) -> Vec { + bank.hard_forks().iter().map(|(slot, _)| *slot).collect() + } + + let (genesis_config, _mint_keypair) = create_genesis_config(10); + let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + + let bank7 = Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 7); + bank7.register_hard_fork(6); + bank7.register_hard_fork(7); + bank7.register_hard_fork(8); + // Bank7 will reject slot 6 since it is older, but allow the other two hard forks + assert_eq!(get_hard_forks(&bank7), vec![7, 8]); + + let bank9 = Bank::new_from_parent(bank0, &Pubkey::default(), 9); + bank9.freeze(); + bank9.register_hard_fork(9); + bank9.register_hard_fork(10); + // Bank9 will reject slot 9 since it has already been frozen + assert_eq!(get_hard_forks(&bank9), vec![7, 8, 10]); +} + #[test] fn test_last_restart_slot() { fn last_restart_slot_dirty(bank: &Bank) -> bool { From 904b2a76726cd250b08cb35a1bfb247d03936f8a Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Wed, 6 Sep 2023 09:58:58 -0500 Subject: [PATCH 028/407] finalize prioritization_fee_cache after oc-ed bank is frozen (#33100) --- rpc/src/optimistically_confirmed_bank_tracker.rs | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/rpc/src/optimistically_confirmed_bank_tracker.rs b/rpc/src/optimistically_confirmed_bank_tracker.rs index 272dbc6b657cb8..3d3643b44a1050 100644 --- a/rpc/src/optimistically_confirmed_bank_tracker.rs +++ b/rpc/src/optimistically_confirmed_bank_tracker.rs @@ -181,6 +181,7 @@ impl OptimisticallyConfirmedBankTracker { last_notified_confirmed_slot: &mut Slot, pending_optimistically_confirmed_banks: &mut HashSet, slot_notification_subscribers: &Option>>>, + prioritization_fee_cache: &PrioritizationFeeCache, ) { if bank.is_frozen() { if bank.slot() > *last_notified_confirmed_slot { @@ -194,6 +195,9 @@ impl OptimisticallyConfirmedBankTracker { slot_notification_subscribers, SlotNotification::OptimisticallyConfirmed(bank.slot()), ); + + // finalize block's minimum prioritization fee cache for this bank + prioritization_fee_cache.finalize_priority_fee(bank.slot()); } } else if bank.slot() > bank_forks.read().unwrap().root() { pending_optimistically_confirmed_banks.insert(bank.slot()); @@ -209,6 +213,7 @@ impl OptimisticallyConfirmedBankTracker { last_notified_confirmed_slot: &mut Slot, pending_optimistically_confirmed_banks: &mut HashSet, slot_notification_subscribers: &Option>>>, + prioritization_fee_cache: &PrioritizationFeeCache, ) { for confirmed_bank in bank.parents_inclusive().iter().rev() { if confirmed_bank.slot() > slot_threshold { @@ -223,6 +228,7 @@ impl OptimisticallyConfirmedBankTracker { last_notified_confirmed_slot, pending_optimistically_confirmed_banks, slot_notification_subscribers, + prioritization_fee_cache, ); } } @@ -291,6 +297,7 @@ impl OptimisticallyConfirmedBankTracker { last_notified_confirmed_slot, pending_optimistically_confirmed_banks, slot_notification_subscribers, + prioritization_fee_cache, ); *highest_confirmed_slot = slot; @@ -307,9 +314,6 @@ impl OptimisticallyConfirmedBankTracker { slot, timestamp: timestamp(), }); - - // finalize block's minimum prioritization fee cache for this bank - prioritization_fee_cache.finalize_priority_fee(slot); } BankNotification::Frozen(bank) => { let frozen_slot = bank.slot(); @@ -348,6 +352,7 @@ impl OptimisticallyConfirmedBankTracker { last_notified_confirmed_slot, pending_optimistically_confirmed_banks, slot_notification_subscribers, + prioritization_fee_cache, ); let mut w_optimistically_confirmed_bank = From d921b9a44e263f5e30af44ae636ab5efb653ab45 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 6 Sep 2023 11:38:30 -0400 Subject: [PATCH 029/407] Adds metrics when purging banks with the same slot (#33153) --- runtime/src/accounts_background_service.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 0e7c158375cb41..2b6b9fef2bb6f2 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -512,6 +512,18 @@ impl PrunedBanksRequestHandler { let grouped_banks_to_purge: Vec<_> = GroupBy::new(banks_to_purge.as_slice(), |a, b| a.0 == b.0).collect(); + // Log whenever we need to handle banks with the same slot. Purposely do this *before* we + // call `purge_slot()` to ensure we get the datapoint (in case there's an assert/panic). + let num_banks_with_same_slot = + num_banks_to_purge.saturating_sub(grouped_banks_to_purge.len()); + if num_banks_with_same_slot > 0 { + datapoint_info!( + "pruned_banks_request_handler", + ("num_pruned_banks", num_banks_to_purge, i64), + ("num_banks_with_same_slot", num_banks_with_same_slot, i64), + ); + } + // Purge all the slots in parallel // Banks for the same slot are purged sequentially let accounts_db = bank.rc.accounts.accounts_db.as_ref(); From 9e156f88f4dd7b240cf31efd9ca42cf3794ae9e0 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 6 Sep 2023 12:00:18 -0400 Subject: [PATCH 030/407] Removes invariant `is_serialized_with_abs` param (#33154) --- runtime/src/accounts_background_service.rs | 8 ++++---- runtime/src/bank/tests.rs | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 2b6b9fef2bb6f2..dc635f7e67c8fe 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -501,7 +501,7 @@ pub struct PrunedBanksRequestHandler { } impl PrunedBanksRequestHandler { - pub fn handle_request(&self, bank: &Bank, is_serialized_with_abs: bool) -> usize { + pub fn handle_request(&self, bank: &Bank) -> usize { let mut banks_to_purge: Vec<_> = self.pruned_banks_receiver.try_iter().collect(); // We need a stable sort to ensure we purge banks—with the same slot—in the same order // they were sent into the channel. @@ -530,7 +530,7 @@ impl PrunedBanksRequestHandler { accounts_db.thread_pool_clean.install(|| { grouped_banks_to_purge.into_par_iter().for_each(|group| { group.iter().for_each(|(slot, bank_id)| { - accounts_db.purge_slot(*slot, *bank_id, is_serialized_with_abs); + accounts_db.purge_slot(*slot, *bank_id, true); }) }); }); @@ -545,7 +545,7 @@ impl PrunedBanksRequestHandler { total_remove_slots_time: &mut u64, ) { let mut remove_slots_time = Measure::start("remove_slots_time"); - *removed_slots_count += self.handle_request(bank, true); + *removed_slots_count += self.handle_request(bank); remove_slots_time.stop(); *total_remove_slots_time += remove_slots_time.as_us(); @@ -1166,7 +1166,7 @@ mod test { drop(fork2_bank1); drop(fork0_bank1); drop(fork0_bank0); - let num_banks_purged = pruned_banks_request_handler.handle_request(&fork0_bank3, true); + let num_banks_purged = pruned_banks_request_handler.handle_request(&fork0_bank3); assert_eq!(num_banks_purged, 7); } diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index c7bc57b60c7a01..b0f758a25f82c1 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -8562,7 +8562,7 @@ fn test_store_scan_consistency_unrooted() { current_major_fork_bank.clean_accounts_for_tests(); // Move purge here so that Bank::drop()->purge_slots() doesn't race // with clean. Simulates the call from AccountsBackgroundService - pruned_banks_request_handler.handle_request(¤t_major_fork_bank, true); + pruned_banks_request_handler.handle_request(¤t_major_fork_bank); } }, Some(Box::new(SendDroppedBankCallback::new(pruned_banks_sender))), From a8bc6ebe38dfd1c20ea71f98fa67eefd3475e0e1 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Wed, 6 Sep 2023 11:03:05 -0500 Subject: [PATCH 031/407] Use `write_all` for binned account hash file writes (#33095) write_all Co-authored-by: HaoranYi --- accounts-db/src/accounts_hash.rs | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 155597ea38a5ae..ac4134cf80a936 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -97,18 +97,16 @@ impl AccountHashesFile { )); } let count_and_writer = self.count_and_writer.as_mut().unwrap(); - assert_eq!( - std::mem::size_of::(), - count_and_writer - .1 - .write(hash.as_ref()) - .unwrap_or_else(|err| { - panic!( - "Unable to write file within {}: {err}", - self.dir_for_temp_cache_files.display() - ) - }) - ); + count_and_writer + .1 + .write_all(hash.as_ref()) + .unwrap_or_else(|err| { + panic!( + "Unable to write file within {}: {err}", + self.dir_for_temp_cache_files.display() + ) + }); + count_and_writer.0 += 1; } } From 0c896c60764e0835f1ec0d2aad1f0f3cb4866d94 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 7 Sep 2023 00:19:00 +0800 Subject: [PATCH 032/407] ci: trigger client_targets pipeline when the cargo file changes (#33163) --- .github/workflows/client-targets.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/client-targets.yml b/.github/workflows/client-targets.yml index 3b3d1779a1be29..97118918ef8442 100644 --- a/.github/workflows/client-targets.yml +++ b/.github/workflows/client-targets.yml @@ -12,6 +12,8 @@ on: - "sdk/**" - ".github/workflows/client-targets.yml" - "ci/rust-version.sh" + - "**/Cargo.toml" + - "**/Cargo.lock" env: CARGO_TERM_COLOR: always From 88ee8f58207a7b5f3fd08c17ae98ac0c3bce99a2 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 6 Sep 2023 16:46:51 +0000 Subject: [PATCH 033/407] replaces once_cell::sync::OnceCell with std::sync::OnceLock (#33140) std::sync::OnceLock has become stable since rust 1.70.0 and there is no longer a need for an external crate dependency. --- Cargo.lock | 3 --- Cargo.toml | 1 - accounts-db/Cargo.toml | 1 - accounts-db/src/accounts_index.rs | 7 +++--- accounts-db/src/tiered_storage.rs | 8 +++---- frozen-abi/Cargo.toml | 2 -- frozen-abi/src/abi_example.rs | 6 ----- programs/sbf/Cargo.lock | 3 --- runtime/Cargo.toml | 1 - runtime/src/vote_account.rs | 39 ++++++++++++++++++++++++------- 10 files changed, 37 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 27c50c07475220..2f46b23e538d3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5173,7 +5173,6 @@ dependencies = [ "num-traits", "num_cpus", "num_enum 0.6.1", - "once_cell", "ouroboros", "percentage", "qualifier_attr", @@ -5937,7 +5936,6 @@ dependencies = [ "lazy_static", "log", "memmap2", - "once_cell", "rustc_version 0.4.0", "serde", "serde_bytes", @@ -6854,7 +6852,6 @@ dependencies = [ "num-traits", "num_cpus", "num_enum 0.6.1", - "once_cell", "ouroboros", "percentage", "rand 0.8.5", diff --git a/Cargo.toml b/Cargo.toml index f1ee5fd826412b..1971c42658cfc3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -243,7 +243,6 @@ num_cpus = "1.16.0" num_enum = "0.6.1" num-derive = "0.3" num-traits = "0.2" -once_cell = "1.18.0" openssl = "0.10" ouroboros = "0.15.6" parking_lot = "0.12" diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index b53e1d15efe72a..2e2685b901bb5c 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -36,7 +36,6 @@ num-derive = { workspace = true } num-traits = { workspace = true } num_cpus = { workspace = true } num_enum = { workspace = true } -once_cell = { workspace = true } ouroboros = { workspace = true } percentage = { workspace = true } qualifier_attr = { workspace = true } diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 916a641723f79f..b9038441d12515 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -13,7 +13,6 @@ use { secondary_index::*, }, log::*, - once_cell::sync::OnceCell, ouroboros::self_referencing, rand::{thread_rng, Rng}, rayon::{ @@ -37,7 +36,7 @@ use { path::PathBuf, sync::{ atomic::{AtomicBool, AtomicU64, AtomicU8, AtomicUsize, Ordering}, - Arc, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, + Arc, Mutex, OnceLock, RwLock, RwLockReadGuard, RwLockWriteGuard, }, }, thiserror::Error, @@ -703,7 +702,7 @@ pub struct AccountsIndex + Into> { pub max_distance_to_min_scan_slot: AtomicU64, /// populated at generate_index time - accounts that could possibly be rent paying - pub rent_paying_accounts_by_partition: OnceCell, + pub rent_paying_accounts_by_partition: OnceLock, } impl + Into> AccountsIndex { @@ -737,7 +736,7 @@ impl + Into> AccountsIndex { roots_removed: AtomicUsize::default(), active_scans: AtomicUsize::default(), max_distance_to_min_scan_slot: AtomicU64::default(), - rent_paying_accounts_by_partition: OnceCell::default(), + rent_paying_accounts_by_partition: OnceLock::default(), } } diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index 5ced4d537b981d..65d3485dccb064 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -19,13 +19,13 @@ use { error::TieredStorageError, footer::{AccountBlockFormat, AccountMetaFormat, OwnersBlockFormat}, index::AccountIndexFormat, - once_cell::sync::OnceCell, readable::TieredStorageReader, solana_sdk::{account::ReadableAccount, hash::Hash}, std::{ borrow::Borrow, fs::OpenOptions, path::{Path, PathBuf}, + sync::OnceLock, }, writer::TieredStorageWriter, }; @@ -45,7 +45,7 @@ pub struct TieredStorageFormat { #[derive(Debug)] pub struct TieredStorage { - reader: OnceCell, + reader: OnceLock, format: Option, path: PathBuf, } @@ -66,7 +66,7 @@ impl TieredStorage { /// is called. pub fn new_writable(path: impl Into, format: TieredStorageFormat) -> Self { Self { - reader: OnceCell::::new(), + reader: OnceLock::::new(), format: Some(format), path: path.into(), } @@ -77,7 +77,7 @@ impl TieredStorage { pub fn new_readonly(path: impl Into) -> TieredStorageResult { let path = path.into(); Ok(Self { - reader: OnceCell::with_value(TieredStorageReader::new_from_path(&path)?), + reader: TieredStorageReader::new_from_path(&path).map(OnceLock::from)?, format: None, path, }) diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index 3cd9ef73447fd3..3121b6968ebdf5 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -14,7 +14,6 @@ bs58 = { workspace = true } bv = { workspace = true, features = ["serde"] } lazy_static = { workspace = true } log = { workspace = true, features = ["std"] } -once_cell = { workspace = true } serde = { workspace = true, features = ["derive", "rc"] } serde_bytes = { workspace = true } serde_derive = { workspace = true } @@ -32,7 +31,6 @@ either = { workspace = true, features = ["use_std"] } generic-array = { workspace = true, features = ["serde", "more_lengths"] } im = { workspace = true, features = ["rayon", "serde"] } memmap2 = { workspace = true } -once_cell = { workspace = true, features = ["alloc", "race"] } subtle = { workspace = true } [target.'cfg(any(unix, windows))'.dependencies] diff --git a/frozen-abi/src/abi_example.rs b/frozen-abi/src/abi_example.rs index 6ac6804861fd67..c7765c4a573544 100644 --- a/frozen-abi/src/abi_example.rs +++ b/frozen-abi/src/abi_example.rs @@ -555,9 +555,3 @@ impl AbiEnumVisitor for Result { digester.create_child() } } - -impl AbiExample for once_cell::sync::OnceCell { - fn example() -> Self { - Self::with_value(T::example()) - } -} diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 895200bce4c5e1..bc743d1bf0aa77 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4496,7 +4496,6 @@ dependencies = [ "num-traits", "num_cpus", "num_enum 0.6.1", - "once_cell", "ouroboros", "percentage", "qualifier_attr", @@ -4942,7 +4941,6 @@ dependencies = [ "lazy_static", "log", "memmap2", - "once_cell", "rustc_version", "serde", "serde_bytes", @@ -5548,7 +5546,6 @@ dependencies = [ "num-traits", "num_cpus", "num_enum 0.6.1", - "once_cell", "ouroboros", "percentage", "rand 0.8.5", diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index cfc8b65496a79a..238d5dfb36653d 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -37,7 +37,6 @@ num-derive = { workspace = true } num-traits = { workspace = true } num_cpus = { workspace = true } num_enum = { workspace = true } -once_cell = { workspace = true } ouroboros = { workspace = true } percentage = { workspace = true } rand = { workspace = true } diff --git a/runtime/src/vote_account.rs b/runtime/src/vote_account.rs index efbddb40ef3b08..93789e3eed87af 100644 --- a/runtime/src/vote_account.rs +++ b/runtime/src/vote_account.rs @@ -1,6 +1,7 @@ +#[cfg(RUSTC_WITH_SPECIALIZATION)] +use solana_frozen_abi::abi_example::AbiExample; use { itertools::Itertools, - once_cell::sync::OnceCell, serde::ser::{Serialize, Serializer}, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, @@ -12,7 +13,7 @@ use { cmp::Ordering, collections::{hash_map::Entry, HashMap}, iter::FromIterator, - sync::Arc, + sync::{Arc, OnceLock}, }, thiserror::Error, }; @@ -29,20 +30,20 @@ pub enum Error { InvalidOwner(/*owner:*/ Pubkey), } -#[derive(Debug, AbiExample)] +#[derive(Debug)] struct VoteAccountInner { account: AccountSharedData, - vote_state: OnceCell>, + vote_state: OnceLock>, } pub type VoteAccountsHashMap = HashMap; -#[derive(Clone, Debug, AbiExample, Deserialize)] +#[derive(Clone, Debug, Deserialize)] #[serde(from = "Arc")] pub struct VoteAccounts { vote_accounts: Arc, // Inner Arc is meant to implement copy-on-write semantics. - staked_nodes: OnceCell< + staked_nodes: OnceLock< Arc< HashMap< Pubkey, // VoteAccount.vote_state.node_pubkey. @@ -243,7 +244,7 @@ impl TryFrom for VoteAccountInner { } Ok(Self { account, - vote_state: OnceCell::new(), + vote_state: OnceLock::new(), }) } } @@ -262,7 +263,7 @@ impl Default for VoteAccounts { fn default() -> Self { Self { vote_accounts: Arc::default(), - staked_nodes: OnceCell::new(), + staked_nodes: OnceLock::new(), } } } @@ -281,7 +282,7 @@ impl From> for VoteAccounts { fn from(vote_accounts: Arc) -> Self { Self { vote_accounts, - staked_nodes: OnceCell::new(), + staked_nodes: OnceLock::new(), } } } @@ -316,6 +317,26 @@ impl Serialize for VoteAccounts { } } +#[cfg(RUSTC_WITH_SPECIALIZATION)] +impl AbiExample for VoteAccountInner { + fn example() -> Self { + Self { + account: AccountSharedData::example(), + vote_state: OnceLock::from(Result::::example()), + } + } +} + +#[cfg(RUSTC_WITH_SPECIALIZATION)] +impl AbiExample for VoteAccounts { + fn example() -> Self { + Self { + vote_accounts: Arc::::example(), + staked_nodes: OnceLock::from(Arc::>::example()), + } + } +} + #[cfg(test)] mod tests { use { From a80819b0296ff8888050c293688c4d2188ad575c Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 6 Sep 2023 13:02:52 -0400 Subject: [PATCH 034/407] PrunedBanksRequestHandler::handle_request() is only pub with dcou (#33155) --- Cargo.lock | 1 + programs/sbf/Cargo.lock | 1 + runtime/Cargo.toml | 1 + runtime/src/accounts_background_service.rs | 5 ++++- 4 files changed, 7 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 2f46b23e538d3e..0ba7bfa59c281e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6854,6 +6854,7 @@ dependencies = [ "num_enum 0.6.1", "ouroboros", "percentage", + "qualifier_attr", "rand 0.8.5", "rand_chacha 0.3.1", "rayon", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index bc743d1bf0aa77..23158b856071b1 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5548,6 +5548,7 @@ dependencies = [ "num_enum 0.6.1", "ouroboros", "percentage", + "qualifier_attr", "rand 0.8.5", "rayon", "regex", diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 238d5dfb36653d..319e6f4d7f0f38 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -39,6 +39,7 @@ num_cpus = { workspace = true } num_enum = { workspace = true } ouroboros = { workspace = true } percentage = { workspace = true } +qualifier_attr = { workspace = true } rand = { workspace = true } rayon = { workspace = true } regex = { workspace = true } diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index dc635f7e67c8fe..627ccbf76adaa5 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -3,6 +3,8 @@ //! This can be expensive since we have to walk the append vecs being cleaned up. mod stats; +#[cfg(feature = "dev-context-only-utils")] +use qualifier_attr::qualifiers; use { crate::{ bank::{Bank, BankSlotDelta, DropCallback}, @@ -501,7 +503,8 @@ pub struct PrunedBanksRequestHandler { } impl PrunedBanksRequestHandler { - pub fn handle_request(&self, bank: &Bank) -> usize { + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] + fn handle_request(&self, bank: &Bank) -> usize { let mut banks_to_purge: Vec<_> = self.pruned_banks_receiver.try_iter().collect(); // We need a stable sort to ensure we purge banks—with the same slot—in the same order // they were sent into the channel. From a3dc3eb37c18db16f381d7cfb70f5c65892c87eb Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 6 Sep 2023 14:19:01 -0400 Subject: [PATCH 035/407] Takes inner HashSet when dropping CacheHashData (#33169) --- accounts-db/src/cache_hash_data.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/accounts-db/src/cache_hash_data.rs b/accounts-db/src/cache_hash_data.rs index fb50e2113d86ca..a86822eb85633d 100644 --- a/accounts-db/src/cache_hash_data.rs +++ b/accounts-db/src/cache_hash_data.rs @@ -226,12 +226,12 @@ impl CacheHashData { result } fn delete_old_cache_files(&self) { - let pre_existing_cache_files = self.pre_existing_cache_files.lock().unwrap(); - if !pre_existing_cache_files.is_empty() { + let old_cache_files = std::mem::take(&mut *self.pre_existing_cache_files.lock().unwrap()); + if !old_cache_files.is_empty() { self.stats .unused_cache_files - .fetch_add(pre_existing_cache_files.len(), Ordering::Relaxed); - for file_name in pre_existing_cache_files.iter() { + .fetch_add(old_cache_files.len(), Ordering::Relaxed); + for file_name in old_cache_files.iter() { let result = self.cache_dir.join(file_name); let _ = fs::remove_file(result); } From 5d1538013206c1afe6f9d3c8a1a870cb0bfa9dcd Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 7 Sep 2023 02:47:36 +0800 Subject: [PATCH 036/407] Bump solana_rbpf to v0.7.1 (#33168) bump solana_rbpf to 0.7.1 --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- programs/sbf/Cargo.toml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0ba7bfa59c281e..5a6781019d15c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7525,9 +7525,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "339e8963a8e2721227e46cf7a8488957db94cde0f35d3a769e292baaebdbeb44" +checksum = "d08e812351a5c726e51fa6aaae8687c661acfeb9a8b651bd58fc413a58701a58" dependencies = [ "byteorder", "combine", diff --git a/Cargo.toml b/Cargo.toml index 1971c42658cfc3..1e08f9173f4d11 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -293,7 +293,7 @@ siphasher = "0.3.11" smpl_jwt = "0.7.1" socket2 = "0.5.3" soketto = "0.7" -solana_rbpf = "=0.7.0" +solana_rbpf = "=0.7.1" solana-account-decoder = { path = "account-decoder", version = "=1.17.0" } solana-accounts-db = { path = "accounts-db", version = "=1.17.0" } solana-address-lookup-table-program = { path = "programs/address-lookup-table", version = "=1.17.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 23158b856071b1..03a54b3056b984 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6447,9 +6447,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "339e8963a8e2721227e46cf7a8488957db94cde0f35d3a769e292baaebdbeb44" +checksum = "d08e812351a5c726e51fa6aaae8687c661acfeb9a8b651bd58fc413a58701a58" dependencies = [ "byteorder 1.4.3", "combine", diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index f68d633c1b75de..cd5ed1f479c2f2 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -25,7 +25,7 @@ rand = "0.8" rustversion = "1.0.14" serde = "1.0.112" serde_json = "1.0.56" -solana_rbpf = "=0.7.0" +solana_rbpf = "=0.7.1" solana-account-decoder = { path = "../../account-decoder", version = "=1.17.0" } solana-accounts-db = { path = "../../accounts-db", version = "=1.17.0" } solana-address-lookup-table-program = { path = "../../programs/address-lookup-table", version = "=1.17.0" } From 377b6003a888880b79ea2f43a0bfaad661cdd4a3 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Wed, 6 Sep 2023 11:54:18 -0700 Subject: [PATCH 037/407] Check loader-v4::id() as a valid program owner (#33167) --- runtime/src/bank.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 5a6f2923b4f1fb..daa6bfff186f41 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5079,6 +5079,7 @@ impl Bank { bpf_loader_upgradeable::id(), bpf_loader::id(), bpf_loader_deprecated::id(), + loader_v4::id(), ]; let program_owners_refs: Vec<&Pubkey> = program_owners.iter().collect(); let mut program_accounts_map = self.rc.accounts.filter_executable_program_accounts( From f4816dc0cf141ec73edc0810d9cceb104735ce74 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 6 Sep 2023 15:25:58 -0400 Subject: [PATCH 038/407] Moves CacheHashData test-only methods into tests module (#33170) --- accounts-db/src/cache_hash_data.rs | 53 ++++++++++++++++-------------- 1 file changed, 28 insertions(+), 25 deletions(-) diff --git a/accounts-db/src/cache_hash_data.rs b/accounts-db/src/cache_hash_data.rs index a86822eb85633d..196474f49c13dd 100644 --- a/accounts-db/src/cache_hash_data.rs +++ b/accounts-db/src/cache_hash_data.rs @@ -254,23 +254,6 @@ impl CacheHashData { } } - #[cfg(test)] - /// load from 'file_name' into 'accumulator' - pub(crate) fn load( - &self, - file_name: impl AsRef, - accumulator: &mut SavedType, - start_bin_index: usize, - bin_calculator: &PubkeyBinCalculator24, - ) -> Result<(), std::io::Error> { - let mut m = Measure::start("overall"); - let cache_file = self.load_map(file_name)?; - cache_file.load_all(accumulator, start_bin_index, bin_calculator); - m.stop(); - self.stats.load_us.fetch_add(m.as_us(), Ordering::Relaxed); - Ok(()) - } - /// open a cache hash file, but don't map it. /// This allows callers to know a file exists, but preserves the # mmapped files. pub(crate) fn get_file_reference_to_map_later( @@ -298,13 +281,6 @@ impl CacheHashData { }) } - #[cfg(test)] - /// map 'file_name' into memory - fn load_map(&self, file_name: impl AsRef) -> Result { - let reference = self.get_file_reference_to_map_later(file_name)?; - reference.map() - } - pub(crate) fn pre_existing_cache_file_will_be_used(&self, file_name: impl AsRef) { self.pre_existing_cache_files .lock() @@ -382,9 +358,36 @@ impl CacheHashData { } #[cfg(test)] -pub mod tests { +mod tests { use {super::*, rand::Rng}; + impl CacheHashData { + /// load from 'file_name' into 'accumulator' + fn load( + &self, + file_name: impl AsRef, + accumulator: &mut SavedType, + start_bin_index: usize, + bin_calculator: &PubkeyBinCalculator24, + ) -> Result<(), std::io::Error> { + let mut m = Measure::start("overall"); + let cache_file = self.load_map(file_name)?; + cache_file.load_all(accumulator, start_bin_index, bin_calculator); + m.stop(); + self.stats.load_us.fetch_add(m.as_us(), Ordering::Relaxed); + Ok(()) + } + + /// map 'file_name' into memory + fn load_map( + &self, + file_name: impl AsRef, + ) -> Result { + let reference = self.get_file_reference_to_map_later(file_name)?; + reference.map() + } + } + #[test] fn test_read_write() { // generate sample data From 528a03f32af9f93187d38b1fadde923b55568321 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Thu, 7 Sep 2023 12:52:57 +0000 Subject: [PATCH 039/407] removes outdated matches crate from dependencies (#33172) removes outdated matches crate from the dependencies std::matches has been stable since rust 1.42.0. Other use-cases are covered by assert_matches crate. --- Cargo.lock | 14 ++++++-------- Cargo.toml | 1 - core/Cargo.toml | 2 +- core/src/lib.rs | 2 +- entry/Cargo.toml | 2 +- entry/src/poh.rs | 2 +- gossip/Cargo.toml | 2 +- gossip/src/contact_info.rs | 2 +- gossip/src/crds.rs | 2 +- gossip/src/lib.rs | 2 +- ledger/Cargo.toml | 1 - ledger/src/blockstore_processor.rs | 2 +- ledger/src/shred.rs | 2 +- ledger/src/shred/legacy.rs | 2 +- ledger/src/shred/merkle.rs | 2 +- ledger/src/shredder.rs | 2 +- ledger/src/sigverify_shreds.rs | 2 +- merkle-tree/Cargo.toml | 4 ---- merkle-tree/src/lib.rs | 4 ---- perf/Cargo.toml | 2 +- perf/src/lib.rs | 2 +- poh/Cargo.toml | 2 +- poh/src/lib.rs | 2 +- programs/sbf/Cargo.lock | 3 +-- turbine/Cargo.toml | 2 +- turbine/src/lib.rs | 2 +- 26 files changed, 27 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5a6781019d15c7..35e158490e47e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5722,6 +5722,7 @@ dependencies = [ name = "solana-core" version = "1.17.0" dependencies = [ + "assert_matches", "base64 0.21.3", "bincode", "bs58", @@ -5737,7 +5738,6 @@ dependencies = [ "lazy_static", "log", "lru", - "matches", "min-max-heap", "num_enum 0.6.1", "quinn", @@ -5879,12 +5879,12 @@ dependencies = [ name = "solana-entry" version = "1.17.0" dependencies = [ + "assert_matches", "bincode", "crossbeam-channel", "dlopen2", "lazy_static", "log", - "matches", "rand 0.8.5", "rayon", "serde", @@ -6033,6 +6033,7 @@ dependencies = [ name = "solana-gossip" version = "1.17.0" dependencies = [ + "assert_matches", "bincode", "bv", "clap 2.33.3", @@ -6042,7 +6043,6 @@ dependencies = [ "itertools", "log", "lru", - "matches", "num-traits", "num_cpus", "rand 0.8.5", @@ -6151,7 +6151,6 @@ dependencies = [ "libc", "log", "lru", - "matches", "num_cpus", "num_enum 0.6.1", "prost", @@ -6349,7 +6348,6 @@ version = "1.17.0" dependencies = [ "fast-math", "hex", - "matches", "solana-program", ] @@ -6415,6 +6413,7 @@ name = "solana-perf" version = "1.17.0" dependencies = [ "ahash 0.8.3", + "assert_matches", "bincode", "bv", "caps", @@ -6424,7 +6423,6 @@ dependencies = [ "lazy_static", "libc", "log", - "matches", "nix 0.26.4", "rand 0.8.5", "rand_chacha 0.3.1", @@ -6442,11 +6440,11 @@ dependencies = [ name = "solana-poh" version = "1.17.0" dependencies = [ + "assert_matches", "bincode", "core_affinity", "crossbeam-channel", "log", - "matches", "rand 0.8.5", "solana-entry", "solana-ledger", @@ -7276,6 +7274,7 @@ dependencies = [ name = "solana-turbine" version = "1.17.0" dependencies = [ + "assert_matches", "bincode", "bytes", "crossbeam-channel", @@ -7283,7 +7282,6 @@ dependencies = [ "itertools", "log", "lru", - "matches", "quinn", "rand 0.8.5", "rand_chacha 0.3.1", diff --git a/Cargo.toml b/Cargo.toml index 1e08f9173f4d11..4a4c95baed54f9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -231,7 +231,6 @@ light-poseidon = "0.1.1" log = "0.4.20" lru = "0.7.7" lz4 = "1.24.0" -matches = "0.1.10" memmap2 = "0.5.10" memoffset = "0.9" merlin = "3" diff --git a/core/Cargo.toml b/core/Cargo.toml index a78ad1af40e8c5..555e72a7ffecc6 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -76,8 +76,8 @@ tokio = { workspace = true, features = ["full"] } trees = { workspace = true } [dev-dependencies] +assert_matches = { workspace = true } fs_extra = { workspace = true } -matches = { workspace = true } raptorq = { workspace = true } serde_json = { workspace = true } serial_test = { workspace = true } diff --git a/core/src/lib.rs b/core/src/lib.rs index c1ee7dda1be957..99ac98b5d422cc 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -69,4 +69,4 @@ extern crate solana_frozen_abi_macro; #[cfg(test)] #[macro_use] -extern crate matches; +extern crate assert_matches; diff --git a/entry/Cargo.toml b/entry/Cargo.toml index 73fc1b322ecf24..a9bde85d833e7c 100644 --- a/entry/Cargo.toml +++ b/entry/Cargo.toml @@ -26,7 +26,7 @@ solana-rayon-threadlimit = { workspace = true } solana-sdk = { workspace = true } [dev-dependencies] -matches = { workspace = true } +assert_matches = { workspace = true } solana-logger = { workspace = true } [lib] diff --git a/entry/src/poh.rs b/entry/src/poh.rs index 500ebaae9eb832..31dd1abbb6af1b 100644 --- a/entry/src/poh.rs +++ b/entry/src/poh.rs @@ -128,7 +128,7 @@ pub fn compute_hashes_per_tick(duration: Duration, hashes_sample_size: u64) -> u mod tests { use { crate::poh::{Poh, PohEntry}, - matches::assert_matches, + assert_matches::assert_matches, solana_sdk::hash::{hash, hashv, Hash}, std::time::Duration, }; diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index c3736c2b8b0883..fcbcf9c9a9f1b4 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -10,6 +10,7 @@ license = { workspace = true } edition = { workspace = true } [dependencies] +assert_matches = { workspace = true } bincode = { workspace = true } bv = { workspace = true, features = ["serde"] } clap = { workspace = true } @@ -19,7 +20,6 @@ indexmap = { workspace = true, features = ["rayon"] } itertools = { workspace = true } log = { workspace = true } lru = { workspace = true } -matches = { workspace = true } num-traits = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } diff --git a/gossip/src/contact_info.rs b/gossip/src/contact_info.rs index e3e51f59f5136c..b09957f2ce5db7 100644 --- a/gossip/src/contact_info.rs +++ b/gossip/src/contact_info.rs @@ -1,6 +1,6 @@ use { crate::crds_value::MAX_WALLCLOCK, - matches::{assert_matches, debug_assert_matches}, + assert_matches::{assert_matches, debug_assert_matches}, serde::{Deserialize, Deserializer, Serialize}, solana_sdk::{ pubkey::Pubkey, diff --git a/gossip/src/crds.rs b/gossip/src/crds.rs index a89cc8b3736944..d8ab6e45b3d593 100644 --- a/gossip/src/crds.rs +++ b/gossip/src/crds.rs @@ -32,13 +32,13 @@ use { crds_value::{CrdsData, CrdsValue, CrdsValueLabel}, legacy_contact_info::LegacyContactInfo as ContactInfo, }, + assert_matches::debug_assert_matches, bincode::serialize, indexmap::{ map::{rayon::ParValues, Entry, IndexMap}, set::IndexSet, }, lru::LruCache, - matches::debug_assert_matches, rayon::{prelude::*, ThreadPool}, solana_sdk::{ clock::Slot, diff --git a/gossip/src/lib.rs b/gossip/src/lib.rs index 459d16babcfc95..11b609f3a37f52 100644 --- a/gossip/src/lib.rs +++ b/gossip/src/lib.rs @@ -31,7 +31,7 @@ extern crate log; #[cfg(test)] #[macro_use] -extern crate matches; +extern crate assert_matches; #[macro_use] extern crate serde_derive; diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index e94f936d5156f3..fa77b944d300da 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -75,7 +75,6 @@ features = ["lz4"] [dev-dependencies] bs58 = { workspace = true } -matches = { workspace = true } solana-account-decoder = { workspace = true } solana-logger = { workspace = true } test-case = { workspace = true } diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index c9221377dae34d..9c988f1fcbc4f2 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -1859,7 +1859,7 @@ pub mod tests { create_genesis_config, create_genesis_config_with_leader, GenesisConfigInfo, }, }, - matches::assert_matches, + assert_matches::assert_matches, rand::{thread_rng, Rng}, solana_entry::entry::{create_ticks, next_entry, next_entry_mut}, solana_program_runtime::declare_process_instruction, diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 05414ddc297725..5fda160e29b976 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -1031,8 +1031,8 @@ pub fn verify_test_data_shred( mod tests { use { super::*, + assert_matches::assert_matches, bincode::serialized_size, - matches::assert_matches, rand::Rng, rand_chacha::{rand_core::SeedableRng, ChaChaRng}, solana_sdk::{shred_version, signature::Signer, signer::keypair::keypair_from_seed}, diff --git a/ledger/src/shred/legacy.rs b/ledger/src/shred/legacy.rs index 18fe942523e91d..ad2e59fc4dd7e3 100644 --- a/ledger/src/shred/legacy.rs +++ b/ledger/src/shred/legacy.rs @@ -330,7 +330,7 @@ mod test { use { super::*, crate::shred::{shred_code::MAX_CODE_SHREDS_PER_SLOT, ShredType, MAX_DATA_SHREDS_PER_SLOT}, - matches::assert_matches, + assert_matches::assert_matches, }; #[test] diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index 133788c36a3e73..4f1cd22111e07f 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -1052,8 +1052,8 @@ mod test { use { super::*, crate::shred::{ShredFlags, ShredId, SignedData}, + assert_matches::assert_matches, itertools::Itertools, - matches::assert_matches, rand::{seq::SliceRandom, CryptoRng, Rng}, rayon::ThreadPoolBuilder, solana_sdk::signature::{Keypair, Signer}, diff --git a/ledger/src/shredder.rs b/ledger/src/shredder.rs index 3a4b17515ced13..1a597c41f984d4 100644 --- a/ledger/src/shredder.rs +++ b/ledger/src/shredder.rs @@ -472,8 +472,8 @@ mod tests { ShredType, MAX_CODE_SHREDS_PER_SLOT, }, }, + assert_matches::assert_matches, bincode::serialized_size, - matches::assert_matches, rand::{seq::SliceRandom, Rng}, solana_sdk::{ hash::{hash, Hash}, diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index 0d5575ced2e3d6..238ab42f9c93e1 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -465,7 +465,7 @@ mod tests { shred::{ProcessShredsStats, Shred, ShredFlags, LEGACY_SHRED_DATA_CAPACITY}, shredder::{ReedSolomonCache, Shredder}, }, - matches::assert_matches, + assert_matches::assert_matches, rand::{seq::SliceRandom, Rng}, rayon::ThreadPoolBuilder, solana_entry::entry::Entry, diff --git a/merkle-tree/Cargo.toml b/merkle-tree/Cargo.toml index d71dc7fcc4f22b..9b9f566ade7406 100644 --- a/merkle-tree/Cargo.toml +++ b/merkle-tree/Cargo.toml @@ -13,10 +13,6 @@ edition = { workspace = true } fast-math = { workspace = true } solana-program = { workspace = true } -# This can go once the SBF toolchain target Rust 1.42.0+ -[target.bpfel-unknown-unknown.dependencies] -matches = { workspace = true } - [dev-dependencies] hex = { workspace = true } diff --git a/merkle-tree/src/lib.rs b/merkle-tree/src/lib.rs index 8d544444ab3110..606c487dcf84cb 100644 --- a/merkle-tree/src/lib.rs +++ b/merkle-tree/src/lib.rs @@ -1,8 +1,4 @@ #![allow(clippy::arithmetic_side_effects)] -#[cfg(target_os = "solana")] -#[macro_use] -extern crate matches; - pub mod merkle_tree; pub use merkle_tree::MerkleTree; diff --git a/perf/Cargo.toml b/perf/Cargo.toml index 100c52df510b05..aea478da078c35 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -35,7 +35,7 @@ nix = { workspace = true } name = "solana_perf" [dev-dependencies] -matches = { workspace = true } +assert_matches = { workspace = true } rand_chacha = { workspace = true } solana-logger = { workspace = true } test-case = { workspace = true } diff --git a/perf/src/lib.rs b/perf/src/lib.rs index 6cffe0afbf5f28..8d277d7ad69778 100644 --- a/perf/src/lib.rs +++ b/perf/src/lib.rs @@ -18,7 +18,7 @@ extern crate log; #[cfg(test)] #[macro_use] -extern crate matches; +extern crate assert_matches; #[macro_use] extern crate solana_metrics; diff --git a/poh/Cargo.toml b/poh/Cargo.toml index e4109dfe7262e3..4df76178d61841 100644 --- a/poh/Cargo.toml +++ b/poh/Cargo.toml @@ -22,8 +22,8 @@ solana-sdk = { workspace = true } thiserror = { workspace = true } [dev-dependencies] +assert_matches = { workspace = true } bincode = { workspace = true } -matches = { workspace = true } rand = { workspace = true } solana-logger = { workspace = true } solana-perf = { workspace = true } diff --git a/poh/src/lib.rs b/poh/src/lib.rs index 80a02148f3fc94..ff60171d7a8d89 100644 --- a/poh/src/lib.rs +++ b/poh/src/lib.rs @@ -8,4 +8,4 @@ extern crate solana_metrics; #[cfg(test)] #[macro_use] -extern crate matches; +extern crate assert_matches; diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 03a54b3056b984..929ef4225464ad 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5012,6 +5012,7 @@ dependencies = [ name = "solana-gossip" version = "1.17.0" dependencies = [ + "assert_matches", "bincode", "bv", "clap 2.33.3", @@ -5021,7 +5022,6 @@ dependencies = [ "itertools", "log", "lru", - "matches", "num-traits", "rand 0.8.5", "rand_chacha 0.3.1", @@ -5151,7 +5151,6 @@ name = "solana-merkle-tree" version = "1.17.0" dependencies = [ "fast-math", - "matches", "solana-program", ] diff --git a/turbine/Cargo.toml b/turbine/Cargo.toml index 1cddfcef4854fb..8562ab6525a069 100644 --- a/turbine/Cargo.toml +++ b/turbine/Cargo.toml @@ -41,7 +41,7 @@ thiserror = { workspace = true } tokio = { workspace = true } [dev-dependencies] -matches = { workspace = true } +assert_matches = { workspace = true } solana-logger = { workspace = true } [[bench]] diff --git a/turbine/src/lib.rs b/turbine/src/lib.rs index 0f7d538542be46..2c7f3c9368f416 100644 --- a/turbine/src/lib.rs +++ b/turbine/src/lib.rs @@ -14,4 +14,4 @@ extern crate solana_metrics; #[cfg(test)] #[macro_use] -extern crate matches; +extern crate assert_matches; From e331275178f4a32885a9c80b06a01444df29b400 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 7 Sep 2023 09:07:37 -0700 Subject: [PATCH 040/407] remove coupling of ShrinkCandidates to HashMap (#33176) --- accounts-db/src/accounts_db.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 85c32eb057342f..9fec6783a3edd7 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4311,8 +4311,8 @@ impl AccountsDb { // Working from the beginning of store_usage which are the most sparse and see when we can stop // shrinking while still achieving the overall goals. - let mut shrink_slots: ShrinkCandidates = HashMap::new(); - let mut shrink_slots_next_batch: ShrinkCandidates = HashMap::new(); + let mut shrink_slots = ShrinkCandidates::new(); + let mut shrink_slots_next_batch = ShrinkCandidates::new(); for usage in &store_usage { let store = &usage.store; let alive_ratio = (total_alive_bytes as f64) / (total_bytes as f64); @@ -8161,7 +8161,7 @@ impl AccountsDb { assert!(self.storage.no_shrink_in_progress()); let mut dead_slots = HashSet::new(); - let mut new_shrink_candidates: ShrinkCandidates = HashMap::new(); + let mut new_shrink_candidates = ShrinkCandidates::new(); let mut measure = Measure::start("remove"); for (slot, account_info) in reclaims { // No cached accounts should make it here @@ -13250,7 +13250,7 @@ pub mod tests { fn test_select_candidates_by_total_usage_no_candidates() { // no input candidates -- none should be selected solana_logger::setup(); - let candidates: ShrinkCandidates = HashMap::new(); + let candidates = ShrinkCandidates::new(); let (selected_candidates, next_candidates) = AccountsDb::select_candidates_by_total_usage( &candidates, @@ -13266,7 +13266,7 @@ pub mod tests { fn test_select_candidates_by_total_usage_3_way_split_condition() { // three candidates, one selected for shrink, one is put back to the candidate list and one is ignored solana_logger::setup(); - let mut candidates: ShrinkCandidates = HashMap::new(); + let mut candidates = ShrinkCandidates::new(); let common_store_path = Path::new(""); let slot_id_1 = 12; @@ -13340,7 +13340,7 @@ pub mod tests { fn test_select_candidates_by_total_usage_2_way_split_condition() { // three candidates, 2 are selected for shrink, one is ignored solana_logger::setup(); - let mut candidates: ShrinkCandidates = HashMap::new(); + let mut candidates = ShrinkCandidates::new(); let common_store_path = Path::new(""); let slot_id_1 = 12; @@ -13410,7 +13410,7 @@ pub mod tests { fn test_select_candidates_by_total_usage_all_clean() { // 2 candidates, they must be selected to achieve the target alive ratio solana_logger::setup(); - let mut candidates: ShrinkCandidates = HashMap::new(); + let mut candidates = ShrinkCandidates::new(); let slot1 = 12; let common_store_path = Path::new(""); From 9ff0b35f298a73163b7de06da379a049789e364c Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Thu, 7 Sep 2023 18:00:25 +0000 Subject: [PATCH 041/407] adds QUIC endpoint for repair service (#33057) Working towards using QUIC protocol for repair, the commit adds a QUIC endpoint for repair service. Outgoing local requests are sent as struct LocalRequest { remote_address: SocketAddr, bytes: Vec, num_expected_responses: usize, response_sender: Sender<(SocketAddr, Vec)>, } to the client-side of the endpoint. The client opens a bidirectional stream with the LocalRequest.remote_address and once received the response, sends it down the LocalRequest.response_sender channel. Incoming requests from remote nodes are received from bidirectional streams and sent as struct RemoteRequest { remote_pubkey: Option, remote_address: SocketAddr, bytes: Vec, response_sender: Option>>>, } to the repair-service. The response is received from the receiver end of RemoteRequest.response_sender channel and send back to the remote node using the send side of the bidirectional stream. --- Cargo.lock | 4 + core/Cargo.toml | 4 + core/src/repair/mod.rs | 1 + core/src/repair/quic_endpoint.rs | 654 +++++++++++++++++++++++++++++++ programs/sbf/Cargo.lock | 4 + 5 files changed, 667 insertions(+) create mode 100644 core/src/repair/quic_endpoint.rs diff --git a/Cargo.lock b/Cargo.lock index 35e158490e47e8..6b68897a0f360a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5733,6 +5733,7 @@ dependencies = [ "eager", "etcd-client", "fs_extra", + "futures 0.3.28", "histogram", "itertools", "lazy_static", @@ -5745,9 +5746,12 @@ dependencies = [ "rand_chacha 0.3.1", "raptorq", "rayon", + "rcgen", "rolling-file", "rustc_version 0.4.0", + "rustls", "serde", + "serde_bytes", "serde_derive", "serde_json", "serial_test", diff --git a/core/Cargo.toml b/core/Cargo.toml index 555e72a7ffecc6..ce32e45d70deaf 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -23,6 +23,7 @@ crossbeam-channel = { workspace = true } dashmap = { workspace = true, features = ["rayon", "raw-api"] } eager = { workspace = true } etcd-client = { workspace = true, features = ["tls"] } +futures = { workspace = true } histogram = { workspace = true } itertools = { workspace = true } lazy_static = { workspace = true } @@ -34,8 +35,11 @@ quinn = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } rayon = { workspace = true } +rcgen = { workspace = true } rolling-file = { workspace = true } +rustls = { workspace = true } serde = { workspace = true } +serde_bytes = { workspace = true } serde_derive = { workspace = true } solana-accounts-db = { workspace = true } solana-address-lookup-table-program = { workspace = true } diff --git a/core/src/repair/mod.rs b/core/src/repair/mod.rs index a363fccbc500df..8514527480eb6f 100644 --- a/core/src/repair/mod.rs +++ b/core/src/repair/mod.rs @@ -3,6 +3,7 @@ pub mod cluster_slot_state_verifier; pub mod duplicate_repair_status; pub mod outstanding_requests; pub mod packet_threshold; +pub(crate) mod quic_endpoint; pub mod repair_generic_traversal; pub mod repair_response; pub mod repair_service; diff --git a/core/src/repair/quic_endpoint.rs b/core/src/repair/quic_endpoint.rs new file mode 100644 index 00000000000000..ec2c2db07a70c5 --- /dev/null +++ b/core/src/repair/quic_endpoint.rs @@ -0,0 +1,654 @@ +#![allow(dead_code)] +use { + bincode::Options, + crossbeam_channel::Sender, + futures::future::TryJoin, + itertools::Itertools, + log::error, + quinn::{ + ClientConfig, ConnectError, Connecting, Connection, ConnectionError, Endpoint, + EndpointConfig, ReadToEndError, RecvStream, SendStream, ServerConfig, TokioRuntime, + TransportConfig, VarInt, WriteError, + }, + rcgen::RcgenError, + rustls::{Certificate, PrivateKey}, + serde_bytes::ByteBuf, + solana_quic_client::nonblocking::quic_client::SkipServerVerification, + solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey, signature::Keypair}, + solana_streamer::{ + quic::SkipClientVerification, tls_certificates::new_self_signed_tls_certificate, + }, + std::{ + collections::{hash_map::Entry, HashMap}, + io::{Cursor, Error as IoError}, + net::{IpAddr, SocketAddr, UdpSocket}, + ops::Deref, + sync::Arc, + time::Duration, + }, + thiserror::Error, + tokio::{ + sync::{ + mpsc::{Receiver as AsyncReceiver, Sender as AsyncSender}, + oneshot::Sender as OneShotSender, + RwLock, + }, + task::JoinHandle, + }, +}; + +const ALPN_REPAIR_PROTOCOL_ID: &[u8] = b"solana-repair"; +const CONNECT_SERVER_NAME: &str = "solana-repair"; + +const CLIENT_CHANNEL_CAPACITY: usize = 1 << 14; +const MAX_CONCURRENT_BIDI_STREAMS: VarInt = VarInt::from_u32(512); + +const CONNECTION_CLOSE_ERROR_CODE_SHUTDOWN: VarInt = VarInt::from_u32(1); +const CONNECTION_CLOSE_ERROR_CODE_DROPPED: VarInt = VarInt::from_u32(2); +const CONNECTION_CLOSE_ERROR_CODE_INVALID_IDENTITY: VarInt = VarInt::from_u32(3); +const CONNECTION_CLOSE_ERROR_CODE_REPLACED: VarInt = VarInt::from_u32(4); + +const CONNECTION_CLOSE_REASON_SHUTDOWN: &[u8] = b"SHUTDOWN"; +const CONNECTION_CLOSE_REASON_DROPPED: &[u8] = b"DROPPED"; +const CONNECTION_CLOSE_REASON_INVALID_IDENTITY: &[u8] = b"INVALID_IDENTITY"; +const CONNECTION_CLOSE_REASON_REPLACED: &[u8] = b"REPLACED"; + +pub(crate) type AsyncTryJoinHandle = TryJoin, JoinHandle<()>>; +type ConnectionCache = HashMap<(SocketAddr, Option), Arc>>>; + +// Outgoing local requests. +pub struct LocalRequest { + pub(crate) remote_address: SocketAddr, + pub(crate) bytes: Vec, + pub(crate) num_expected_responses: usize, + pub(crate) response_sender: Sender<(SocketAddr, Vec)>, +} + +// Incomming requests from remote nodes. +// remote_pubkey and response_sender are None only when adapting UDP packets. +pub struct RemoteRequest { + pub(crate) remote_pubkey: Option, + pub(crate) remote_address: SocketAddr, + pub(crate) bytes: Vec, + pub(crate) response_sender: Option>>>, +} + +#[derive(Error, Debug)] +#[allow(clippy::enum_variant_names)] +pub(crate) enum Error { + #[error(transparent)] + BincodeError(#[from] bincode::Error), + #[error(transparent)] + CertificateError(#[from] RcgenError), + #[error(transparent)] + ConnectError(#[from] ConnectError), + #[error(transparent)] + ConnectionError(#[from] ConnectionError), + #[error("Channel Send Error")] + ChannelSendError, + #[error("Invalid Identity: {0:?}")] + InvalidIdentity(SocketAddr), + #[error(transparent)] + IoError(#[from] IoError), + #[error("No Response Received")] + NoResponseReceived, + #[error(transparent)] + ReadToEndError(#[from] ReadToEndError), + #[error("read_to_end Timeout")] + ReadToEndTimeout, + #[error(transparent)] + WriteError(#[from] WriteError), + #[error(transparent)] + TlsError(#[from] rustls::Error), +} + +#[allow(clippy::type_complexity)] +pub(crate) fn new_quic_endpoint( + runtime: &tokio::runtime::Handle, + keypair: &Keypair, + socket: UdpSocket, + address: IpAddr, + remote_request_sender: Sender, +) -> Result<(Endpoint, AsyncSender, AsyncTryJoinHandle), Error> { + let (cert, key) = new_self_signed_tls_certificate(keypair, address)?; + let server_config = new_server_config(cert.clone(), key.clone())?; + let client_config = new_client_config(cert, key)?; + let mut endpoint = { + // Endpoint::new requires entering the runtime context, + // otherwise the code below will panic. + let _guard = runtime.enter(); + Endpoint::new( + EndpointConfig::default(), + Some(server_config), + socket, + Arc::new(TokioRuntime), + )? + }; + endpoint.set_default_client_config(client_config); + let cache = Arc::>::default(); + let (client_sender, client_receiver) = tokio::sync::mpsc::channel(CLIENT_CHANNEL_CAPACITY); + let server_task = runtime.spawn(run_server( + endpoint.clone(), + remote_request_sender.clone(), + cache.clone(), + )); + let client_task = runtime.spawn(run_client( + endpoint.clone(), + client_receiver, + remote_request_sender, + cache, + )); + let task = futures::future::try_join(server_task, client_task); + Ok((endpoint, client_sender, task)) +} + +pub(crate) fn close_quic_endpoint(endpoint: &Endpoint) { + endpoint.close( + CONNECTION_CLOSE_ERROR_CODE_SHUTDOWN, + CONNECTION_CLOSE_REASON_SHUTDOWN, + ); +} + +fn new_server_config(cert: Certificate, key: PrivateKey) -> Result { + let mut config = rustls::ServerConfig::builder() + .with_safe_defaults() + .with_client_cert_verifier(Arc::new(SkipClientVerification {})) + .with_single_cert(vec![cert], key)?; + config.alpn_protocols = vec![ALPN_REPAIR_PROTOCOL_ID.to_vec()]; + let mut config = ServerConfig::with_crypto(Arc::new(config)); + config + .transport_config(Arc::new(new_transport_config())) + .use_retry(true) + .migration(false); + Ok(config) +} + +fn new_client_config(cert: Certificate, key: PrivateKey) -> Result { + let mut config = rustls::ClientConfig::builder() + .with_safe_defaults() + .with_custom_certificate_verifier(Arc::new(SkipServerVerification {})) + .with_client_auth_cert(vec![cert], key)?; + config.enable_early_data = true; + config.alpn_protocols = vec![ALPN_REPAIR_PROTOCOL_ID.to_vec()]; + let mut config = ClientConfig::new(Arc::new(config)); + config.transport_config(Arc::new(new_transport_config())); + Ok(config) +} + +fn new_transport_config() -> TransportConfig { + let mut config = TransportConfig::default(); + config + .max_concurrent_bidi_streams(MAX_CONCURRENT_BIDI_STREAMS) + .max_concurrent_uni_streams(VarInt::from(0u8)) + .datagram_receive_buffer_size(None); + config +} + +async fn run_server( + endpoint: Endpoint, + remote_request_sender: Sender, + cache: Arc>, +) { + while let Some(connecting) = endpoint.accept().await { + tokio::task::spawn(handle_connecting_error( + endpoint.clone(), + connecting, + remote_request_sender.clone(), + cache.clone(), + )); + } +} + +async fn run_client( + endpoint: Endpoint, + mut receiver: AsyncReceiver, + remote_request_sender: Sender, + cache: Arc>, +) { + while let Some(request) = receiver.recv().await { + tokio::task::spawn(send_request_task( + endpoint.clone(), + request, + remote_request_sender.clone(), + cache.clone(), + )); + } + close_quic_endpoint(&endpoint); +} + +async fn handle_connecting_error( + endpoint: Endpoint, + connecting: Connecting, + remote_request_sender: Sender, + cache: Arc>, +) { + if let Err(err) = handle_connecting(endpoint, connecting, remote_request_sender, cache).await { + error!("handle_connecting: {err:?}"); + } +} + +async fn handle_connecting( + endpoint: Endpoint, + connecting: Connecting, + remote_request_sender: Sender, + cache: Arc>, +) -> Result<(), Error> { + let connection = connecting.await?; + let remote_address = connection.remote_address(); + let remote_pubkey = get_remote_pubkey(&connection)?; + handle_connection_error( + endpoint, + remote_address, + remote_pubkey, + connection, + remote_request_sender, + cache, + ) + .await; + Ok(()) +} + +async fn handle_connection_error( + endpoint: Endpoint, + remote_address: SocketAddr, + remote_pubkey: Pubkey, + connection: Connection, + remote_request_sender: Sender, + cache: Arc>, +) { + cache_connection(remote_address, remote_pubkey, connection.clone(), &cache).await; + if let Err(err) = handle_connection( + &endpoint, + remote_address, + remote_pubkey, + &connection, + &remote_request_sender, + ) + .await + { + drop_connection(remote_address, remote_pubkey, &connection, &cache).await; + error!("handle_connection: {remote_pubkey}, {remote_address}, {err:?}"); + } +} + +async fn handle_connection( + endpoint: &Endpoint, + remote_address: SocketAddr, + remote_pubkey: Pubkey, + connection: &Connection, + remote_request_sender: &Sender, +) -> Result<(), Error> { + loop { + let (send_stream, recv_stream) = connection.accept_bi().await?; + tokio::task::spawn(handle_streams_task( + endpoint.clone(), + remote_address, + remote_pubkey, + send_stream, + recv_stream, + remote_request_sender.clone(), + )); + } +} + +async fn handle_streams_task( + endpoint: Endpoint, + remote_address: SocketAddr, + remote_pubkey: Pubkey, + send_stream: SendStream, + recv_stream: RecvStream, + remote_request_sender: Sender, +) { + if let Err(err) = handle_streams( + &endpoint, + remote_address, + remote_pubkey, + send_stream, + recv_stream, + &remote_request_sender, + ) + .await + { + error!("handle_stream: {remote_address}, {remote_pubkey}, {err:?}"); + } +} + +async fn handle_streams( + endpoint: &Endpoint, + remote_address: SocketAddr, + remote_pubkey: Pubkey, + mut send_stream: SendStream, + mut recv_stream: RecvStream, + remote_request_sender: &Sender, +) -> Result<(), Error> { + // Assert that send won't block. + debug_assert_eq!(remote_request_sender.capacity(), None); + const READ_TIMEOUT_DURATION: Duration = Duration::from_secs(2); + let bytes = tokio::time::timeout( + READ_TIMEOUT_DURATION, + recv_stream.read_to_end(PACKET_DATA_SIZE), + ) + .await + .map_err(|_| Error::ReadToEndTimeout)??; + let (response_sender, response_receiver) = tokio::sync::oneshot::channel(); + let remote_request = RemoteRequest { + remote_pubkey: Some(remote_pubkey), + remote_address, + bytes, + response_sender: Some(response_sender), + }; + if let Err(err) = remote_request_sender.send(remote_request) { + close_quic_endpoint(endpoint); + return Err(Error::from(err)); + } + let Ok(response) = response_receiver.await else { + return Err(Error::NoResponseReceived); + }; + for chunk in response { + let size = chunk.len() as u64; + send_stream.write_all(&size.to_le_bytes()).await?; + send_stream.write_all(&chunk).await?; + } + send_stream.finish().await.map_err(Error::from) +} + +async fn send_request_task( + endpoint: Endpoint, + request: LocalRequest, + remote_request_sender: Sender, + cache: Arc>, +) { + if let Err(err) = send_request(&endpoint, request, remote_request_sender, cache).await { + error!("send_request_task: {err:?}"); + } +} + +async fn send_request( + endpoint: &Endpoint, + LocalRequest { + remote_address, + bytes, + num_expected_responses, + response_sender, + }: LocalRequest, + remote_request_sender: Sender, + cache: Arc>, +) -> Result<(), Error> { + // Assert that send won't block. + debug_assert_eq!(response_sender.capacity(), None); + const READ_TIMEOUT_DURATION: Duration = Duration::from_secs(10); + let connection = get_connection(endpoint, remote_address, remote_request_sender, cache).await?; + let (mut send_stream, mut recv_stream) = connection.open_bi().await?; + send_stream.write_all(&bytes).await?; + send_stream.finish().await?; + // Each response is at most PACKET_DATA_SIZE bytes and requires + // an additional 8 bytes to encode its length. + let size = PACKET_DATA_SIZE + .saturating_add(8) + .saturating_mul(num_expected_responses); + let response = tokio::time::timeout(READ_TIMEOUT_DURATION, recv_stream.read_to_end(size)) + .await + .map_err(|_| Error::ReadToEndTimeout)??; + let remote_address = connection.remote_address(); + let mut cursor = Cursor::new(&response[..]); + std::iter::repeat_with(|| { + bincode::options() + .with_limit(response.len() as u64) + .with_fixint_encoding() + .allow_trailing_bytes() + .deserialize_from::<_, ByteBuf>(&mut cursor) + .map(ByteBuf::into_vec) + .ok() + }) + .while_some() + .try_for_each(|chunk| { + response_sender + .send((remote_address, chunk)) + .map_err(|err| { + close_quic_endpoint(endpoint); + Error::from(err) + }) + }) +} + +async fn get_connection( + endpoint: &Endpoint, + remote_address: SocketAddr, + remote_request_sender: Sender, + cache: Arc>, +) -> Result { + let entry = get_cache_entry(remote_address, &cache).await; + { + let connection: Option = entry.read().await.clone(); + if let Some(connection) = connection { + if connection.close_reason().is_none() { + return Ok(connection); + } + } + } + let connection = { + // Need to write lock here so that only one task initiates + // a new connection to the same remote_address. + let mut entry = entry.write().await; + if let Some(connection) = entry.deref() { + if connection.close_reason().is_none() { + return Ok(connection.clone()); + } + } + let connection = endpoint + .connect(remote_address, CONNECT_SERVER_NAME)? + .await?; + entry.insert(connection).clone() + }; + tokio::task::spawn(handle_connection_error( + endpoint.clone(), + connection.remote_address(), + get_remote_pubkey(&connection)?, + connection.clone(), + remote_request_sender, + cache, + )); + Ok(connection) +} + +fn get_remote_pubkey(connection: &Connection) -> Result { + match solana_streamer::nonblocking::quic::get_remote_pubkey(connection) { + Some(remote_pubkey) => Ok(remote_pubkey), + None => { + connection.close( + CONNECTION_CLOSE_ERROR_CODE_INVALID_IDENTITY, + CONNECTION_CLOSE_REASON_INVALID_IDENTITY, + ); + Err(Error::InvalidIdentity(connection.remote_address())) + } + } +} + +async fn get_cache_entry( + remote_address: SocketAddr, + cache: &RwLock, +) -> Arc>> { + let key = (remote_address, /*remote_pubkey:*/ None); + if let Some(entry) = cache.read().await.get(&key) { + return entry.clone(); + } + cache.write().await.entry(key).or_default().clone() +} + +async fn cache_connection( + remote_address: SocketAddr, + remote_pubkey: Pubkey, + connection: Connection, + cache: &RwLock, +) { + // The 2nd cache entry with remote_pubkey == None allows to lookup an entry + // only by SocketAddr when establishing outgoing connections. + let entries: [Arc>>; 2] = { + let mut cache = cache.write().await; + [Some(remote_pubkey), None].map(|remote_pubkey| { + let key = (remote_address, remote_pubkey); + cache.entry(key).or_default().clone() + }) + }; + let mut entry = entries[0].write().await; + *entries[1].write().await = Some(connection.clone()); + if let Some(old) = entry.replace(connection) { + drop(entry); + old.close( + CONNECTION_CLOSE_ERROR_CODE_REPLACED, + CONNECTION_CLOSE_REASON_REPLACED, + ); + } +} + +async fn drop_connection( + remote_address: SocketAddr, + remote_pubkey: Pubkey, + connection: &Connection, + cache: &RwLock, +) { + if connection.close_reason().is_none() { + connection.close( + CONNECTION_CLOSE_ERROR_CODE_DROPPED, + CONNECTION_CLOSE_REASON_DROPPED, + ); + } + let key = (remote_address, Some(remote_pubkey)); + if let Entry::Occupied(entry) = cache.write().await.entry(key) { + if matches!(entry.get().read().await.deref(), + Some(entry) if entry.stable_id() == connection.stable_id()) + { + entry.remove(); + } + } + // Cache entry for (remote_address, None) will be lazily evicted. +} + +impl From> for Error { + fn from(_: crossbeam_channel::SendError) -> Self { + Error::ChannelSendError + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + itertools::{izip, multiunzip}, + solana_sdk::signature::Signer, + std::{iter::repeat_with, net::Ipv4Addr, time::Duration}, + }; + + #[test] + fn test_quic_endpoint() { + const NUM_ENDPOINTS: usize = 3; + const RECV_TIMEOUT: Duration = Duration::from_secs(30); + let runtime = tokio::runtime::Builder::new_multi_thread() + .worker_threads(8) + .enable_all() + .build() + .unwrap(); + let keypairs: Vec = repeat_with(Keypair::new).take(NUM_ENDPOINTS).collect(); + let sockets: Vec = repeat_with(|| UdpSocket::bind((Ipv4Addr::LOCALHOST, 0))) + .take(NUM_ENDPOINTS) + .collect::>() + .unwrap(); + let addresses: Vec = sockets + .iter() + .map(UdpSocket::local_addr) + .collect::>() + .unwrap(); + let (remote_request_senders, remote_request_receivers): (Vec<_>, Vec<_>) = + repeat_with(crossbeam_channel::unbounded::) + .take(NUM_ENDPOINTS) + .unzip(); + let (endpoints, senders, tasks): (Vec<_>, Vec<_>, Vec<_>) = multiunzip( + keypairs + .iter() + .zip(sockets) + .zip(remote_request_senders) + .map(|((keypair, socket), remote_request_sender)| { + new_quic_endpoint( + runtime.handle(), + keypair, + socket, + IpAddr::V4(Ipv4Addr::LOCALHOST), + remote_request_sender, + ) + .unwrap() + }), + ); + let (response_senders, response_receivers): (Vec<_>, Vec<_>) = + repeat_with(crossbeam_channel::unbounded::<(SocketAddr, Vec)>) + .take(NUM_ENDPOINTS) + .unzip(); + // Send a unique request from each endpoint to every other endpoint. + for (i, (keypair, &address, sender)) in izip!(&keypairs, &addresses, &senders).enumerate() { + for (j, (&remote_address, response_sender)) in + addresses.iter().zip(&response_senders).enumerate() + { + if i != j { + let mut bytes: Vec = format!("{i}=>{j}").into_bytes(); + bytes.resize(PACKET_DATA_SIZE, 0xa5); + let request = LocalRequest { + remote_address, + bytes, + num_expected_responses: j + 1, + response_sender: response_sender.clone(), + }; + sender.blocking_send(request).unwrap(); + } + } + // Verify all requests are received and respond to each. + for (j, remote_request_receiver) in remote_request_receivers.iter().enumerate() { + if i != j { + let RemoteRequest { + remote_pubkey, + remote_address, + bytes, + response_sender, + } = remote_request_receiver.recv_timeout(RECV_TIMEOUT).unwrap(); + assert_eq!(remote_pubkey, Some(keypair.pubkey())); + assert_eq!(remote_address, address); + assert_eq!(bytes, { + let mut bytes = format!("{i}=>{j}").into_bytes(); + bytes.resize(PACKET_DATA_SIZE, 0xa5); + bytes + }); + let response: Vec> = (0..=j) + .map(|k| { + let mut bytes = format!("{j}=>{i}({k})").into_bytes(); + bytes.resize(PACKET_DATA_SIZE, 0xd5); + bytes + }) + .collect(); + response_sender.unwrap().send(response).unwrap(); + } + } + // Verify responses. + for (j, (&remote_address, response_receiver)) in + addresses.iter().zip(&response_receivers).enumerate() + { + if i != j { + for k in 0..=j { + let (address, response) = + response_receiver.recv_timeout(RECV_TIMEOUT).unwrap(); + assert_eq!(address, remote_address); + assert_eq!(response, { + let mut bytes = format!("{j}=>{i}({k})").into_bytes(); + bytes.resize(PACKET_DATA_SIZE, 0xd5); + bytes + }); + } + } + } + } + drop(senders); + for endpoint in endpoints { + close_quic_endpoint(&endpoint); + } + for task in tasks { + runtime.block_on(task).unwrap(); + } + } +} diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 929ef4225464ad..b23da9afdf759c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4793,6 +4793,7 @@ dependencies = [ "dashmap", "eager", "etcd-client", + "futures 0.3.28", "histogram", "itertools", "lazy_static", @@ -4804,9 +4805,12 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", + "rcgen", "rolling-file", "rustc_version", + "rustls", "serde", + "serde_bytes", "serde_derive", "solana-accounts-db", "solana-address-lookup-table-program", From 350caaeec0dd3812ef96f6de5f6c8d76e42fd159 Mon Sep 17 00:00:00 2001 From: Will Hickey Date: Thu, 7 Sep 2023 13:16:43 -0500 Subject: [PATCH 042/407] Add release-with-debug profile (#33096) --- Cargo.toml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 4a4c95baed54f9..4957b088240fa1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,3 +1,8 @@ +[profile.release-with-debug] +inherits = "release" +debug = true +split-debuginfo = "packed" + [workspace] members = [ "account-decoder", From d54ee351031c81bb00d132d5f6cf61623518f3a9 Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Thu, 7 Sep 2023 11:31:23 -0700 Subject: [PATCH 043/407] Improve plugin load/reload unit tests (#33126) * Improve plugin load/reload unit tests * check expectations after reload --- .../src/geyser_plugin_manager.rs | 46 +++++++++++++++---- 1 file changed, 37 insertions(+), 9 deletions(-) diff --git a/geyser-plugin-manager/src/geyser_plugin_manager.rs b/geyser-plugin-manager/src/geyser_plugin_manager.rs index 2bacfbe51871b4..0698cf1a656363 100644 --- a/geyser-plugin-manager/src/geyser_plugin_manager.rs +++ b/geyser-plugin-manager/src/geyser_plugin_manager.rs @@ -324,6 +324,11 @@ pub(crate) fn load_plugin_from_config( Ok((plugin, lib, config_file)) } +#[cfg(test)] +const TESTPLUGIN_CONFIG: &str = "TESTPLUGIN_CONFIG"; +#[cfg(test)] +const TESTPLUGIN2_CONFIG: &str = "TESTPLUGIN2_CONFIG"; + // This is mocked for tests to avoid having to do IO with a dynamically linked library // across different architectures at test time // @@ -331,15 +336,31 @@ pub(crate) fn load_plugin_from_config( /// (The geyser plugin interface requires a &str for the on_load method). #[cfg(test)] pub(crate) fn load_plugin_from_config( - _geyser_plugin_config_file: &Path, + geyser_plugin_config_file: &Path, ) -> Result<(Box, Library, &str), GeyserPluginManagerError> { - Ok(tests::dummy_plugin_and_library(tests::TestPlugin)) + if geyser_plugin_config_file.ends_with(TESTPLUGIN_CONFIG) { + Ok(tests::dummy_plugin_and_library( + tests::TestPlugin, + TESTPLUGIN_CONFIG, + )) + } else if geyser_plugin_config_file.ends_with(TESTPLUGIN2_CONFIG) { + Ok(tests::dummy_plugin_and_library( + tests::TestPlugin2, + TESTPLUGIN2_CONFIG, + )) + } else { + Err(GeyserPluginManagerError::CannotOpenConfigFile( + geyser_plugin_config_file.to_str().unwrap().to_string(), + )) + } } #[cfg(test)] mod tests { use { - crate::geyser_plugin_manager::GeyserPluginManager, + crate::geyser_plugin_manager::{ + GeyserPluginManager, TESTPLUGIN2_CONFIG, TESTPLUGIN_CONFIG, + }, libloading::Library, solana_geyser_plugin_interface::geyser_plugin_interface::GeyserPlugin, std::sync::{Arc, RwLock}, @@ -347,11 +368,12 @@ mod tests { pub(super) fn dummy_plugin_and_library( plugin: P, + config_path: &'static str, ) -> (Box, Library, &'static str) { ( Box::new(plugin), Library::from(libloading::os::unix::Library::this()), - DUMMY_CONFIG, + config_path, ) } @@ -391,7 +413,7 @@ mod tests { ); // Mock having loaded plugin (TestPlugin) - let (mut plugin, lib, config) = dummy_plugin_and_library(TestPlugin); + let (mut plugin, lib, config) = dummy_plugin_and_library(TestPlugin, DUMMY_CONFIG); plugin.on_load(config).unwrap(); plugin_manager_lock.plugins.push(plugin); plugin_manager_lock.libs.push(lib); @@ -408,8 +430,14 @@ mod tests { ); // Now try a (dummy) reload, replacing TestPlugin with TestPlugin2 - let reload_result = plugin_manager_lock.reload_plugin(DUMMY_NAME, DUMMY_CONFIG); + let reload_result = plugin_manager_lock.reload_plugin(DUMMY_NAME, TESTPLUGIN2_CONFIG); assert!(reload_result.is_ok()); + + // The plugin is now replaced with ANOTHER_DUMMY_NAME + let plugins = plugin_manager_lock.list_plugins().unwrap(); + assert!(plugins.iter().any(|name| name.eq(ANOTHER_DUMMY_NAME))); + // DUMMY_NAME should no longer be present. + assert!(!plugins.iter().any(|name| name.eq(DUMMY_NAME))); } #[test] @@ -420,12 +448,12 @@ mod tests { // Load two plugins // First - let (mut plugin, lib, config) = dummy_plugin_and_library(TestPlugin); + let (mut plugin, lib, config) = dummy_plugin_and_library(TestPlugin, TESTPLUGIN_CONFIG); plugin.on_load(config).unwrap(); plugin_manager_lock.plugins.push(plugin); plugin_manager_lock.libs.push(lib); // Second - let (mut plugin, lib, config) = dummy_plugin_and_library(TestPlugin2); + let (mut plugin, lib, config) = dummy_plugin_and_library(TestPlugin2, TESTPLUGIN2_CONFIG); plugin.on_load(config).unwrap(); plugin_manager_lock.plugins.push(plugin); plugin_manager_lock.libs.push(lib); @@ -443,7 +471,7 @@ mod tests { let mut plugin_manager_lock = plugin_manager.write().unwrap(); // Load rpc call - let load_result = plugin_manager_lock.load_plugin(DUMMY_CONFIG); + let load_result = plugin_manager_lock.load_plugin(TESTPLUGIN_CONFIG); assert!(load_result.is_ok()); assert_eq!(plugin_manager_lock.plugins.len(), 1); From 0083e426b211389b26180401362cbf482582d966 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 7 Sep 2023 12:28:05 -0700 Subject: [PATCH 044/407] ShrinkCandidates only holds slot (#33173) --- accounts-db/src/accounts_db.rs | 145 ++++++++++--------------- accounts-db/src/ancient_append_vecs.rs | 11 +- 2 files changed, 61 insertions(+), 95 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 9fec6783a3edd7..dded2a35304b14 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -772,7 +772,7 @@ type AccountSlots = HashMap>; type SlotOffsets = HashMap>; type ReclaimResult = (AccountSlots, SlotOffsets); type PubkeysRemovedFromAccountsIndex = HashSet; -type ShrinkCandidates = HashMap>; +type ShrinkCandidates = HashSet; trait Versioned { fn version(&self) -> u64; @@ -2481,7 +2481,7 @@ impl AccountsDb { recycle_stores: RwLock::new(RecycleStores::default()), uncleaned_pubkeys: DashMap::new(), next_id: AtomicAppendVecId::new(0), - shrink_candidate_slots: Mutex::new(HashMap::new()), + shrink_candidate_slots: Mutex::new(ShrinkCandidates::new()), write_cache_limit_bytes: None, write_version: AtomicU64::new(0), paths: vec![], @@ -4268,6 +4268,7 @@ impl AccountsDb { /// achieved, it will stop and return the filtered-down candidates and the candidates which /// are skipped in this round and might be eligible for the future shrink. fn select_candidates_by_total_usage( + &self, shrink_slots: &ShrinkCandidates, shrink_ratio: f64, oldest_non_ancient_slot: Option, @@ -4283,7 +4284,7 @@ impl AccountsDb { let mut candidates_count: usize = 0; let mut total_bytes: u64 = 0; let mut total_candidate_stores: usize = 0; - for (slot, store) in shrink_slots { + for slot in shrink_slots { if oldest_non_ancient_slot .map(|oldest_non_ancient_slot| slot < &oldest_non_ancient_slot) .unwrap_or_default() @@ -4291,6 +4292,9 @@ impl AccountsDb { // this slot will be 'shrunk' by ancient code continue; } + let Some(store) = self.storage.get_slot_storage_entry(*slot) else { + continue; + }; candidates_count += 1; total_alive_bytes += Self::page_align(store.alive_bytes() as u64); total_bytes += store.capacity(); @@ -4326,7 +4330,7 @@ impl AccountsDb { usage.slot, total_alive_bytes, total_bytes, alive_ratio, shrink_ratio ); if usage.alive_ratio < shrink_ratio { - shrink_slots_next_batch.insert(usage.slot, store.clone()); + shrink_slots_next_batch.insert(usage.slot); } else { break; } @@ -4335,7 +4339,7 @@ impl AccountsDb { let after_shrink_size = Self::page_align(store.alive_bytes() as u64); let bytes_saved = current_store_size.saturating_sub(after_shrink_size); total_bytes -= bytes_saved; - shrink_slots.insert(usage.slot, store.clone()); + shrink_slots.insert(usage.slot); } } measure.stop(); @@ -4743,8 +4747,8 @@ impl AccountsDb { let (shrink_slots, shrink_slots_next_batch) = { if let AccountShrinkThreshold::TotalSpace { shrink_ratio } = self.shrink_ratio { - let (shrink_slots, shrink_slots_next_batch) = - Self::select_candidates_by_total_usage( + let (shrink_slots, shrink_slots_next_batch) = self + .select_candidates_by_total_usage( &shrink_candidates_slots, shrink_ratio, self.ancient_append_vec_offset @@ -4771,14 +4775,14 @@ impl AccountsDb { let num_candidates = shrink_slots.len(); let shrink_candidates_count = shrink_slots.len(); self.thread_pool_clean.install(|| { - shrink_slots - .into_par_iter() - .for_each(|(slot, slot_shrink_candidate)| { - let mut measure = Measure::start("shrink_candidate_slots-ms"); + shrink_slots.into_par_iter().for_each(|slot| { + let mut measure = Measure::start("shrink_candidate_slots-ms"); + if let Some(slot_shrink_candidate) = self.storage.get_slot_storage_entry(slot) { self.do_shrink_slot_store(slot, &slot_shrink_candidate); - measure.stop(); - inc_new_counter_info!("shrink_candidate_slots-ms", measure.as_ms() as usize); - }); + } + measure.stop(); + inc_new_counter_info!("shrink_candidate_slots-ms", measure.as_ms() as usize); + }); }); measure_shrink_all_candidates.stop(); inc_new_counter_info!( @@ -4790,8 +4794,8 @@ impl AccountsDb { if let Some(shrink_slots_next_batch) = shrink_slots_next_batch { let mut shrink_slots = self.shrink_candidate_slots.lock().unwrap(); pended_counts += shrink_slots_next_batch.len(); - for (slot, store) in shrink_slots_next_batch { - shrink_slots.insert(slot, store); + for slot in shrink_slots_next_batch { + shrink_slots.insert(slot); } } inc_new_counter_info!("shrink_pended_stores-count", pended_counts); @@ -8199,7 +8203,7 @@ impl AccountsDb { // because slots should only have one storage entry, namely the one that was // created by `flush_slot_cache()`. { - new_shrink_candidates.insert(*slot, store); + new_shrink_candidates.insert(*slot); } } } @@ -8211,18 +8215,8 @@ impl AccountsDb { let mut measure = Measure::start("shrink"); let mut shrink_candidate_slots = self.shrink_candidate_slots.lock().unwrap(); - for (slot, store) in new_shrink_candidates { - debug!( - "adding: {} {} to shrink candidates: count: {}/{} bytes: {}/{}", - store.append_vec_id(), - slot, - store.approx_stored_count(), - store.count(), - store.alive_bytes(), - store.capacity() - ); - - shrink_candidate_slots.insert(slot, store); + for slot in new_shrink_candidates { + shrink_candidate_slots.insert(slot); measure.stop(); self.clean_accounts_stats .remove_dead_accounts_shrink_us @@ -13250,13 +13244,11 @@ pub mod tests { fn test_select_candidates_by_total_usage_no_candidates() { // no input candidates -- none should be selected solana_logger::setup(); - let candidates = ShrinkCandidates::new(); + let candidates: ShrinkCandidates = ShrinkCandidates::new(); + let db = AccountsDb::new_single_for_tests(); - let (selected_candidates, next_candidates) = AccountsDb::select_candidates_by_total_usage( - &candidates, - DEFAULT_ACCOUNTS_SHRINK_RATIO, - None, - ); + let (selected_candidates, next_candidates) = + db.select_candidates_by_total_usage(&candidates, DEFAULT_ACCOUNTS_SHRINK_RATIO, None); assert_eq!(0, selected_candidates.len()); assert_eq!(0, next_candidates.len()); @@ -13267,6 +13259,7 @@ pub mod tests { // three candidates, one selected for shrink, one is put back to the candidate list and one is ignored solana_logger::setup(); let mut candidates = ShrinkCandidates::new(); + let db = AccountsDb::new_single_for_tests(); let common_store_path = Path::new(""); let slot_id_1 = 12; @@ -13281,7 +13274,7 @@ pub mod tests { )); store1.alive_bytes.store(0, Ordering::Release); - candidates.insert(slot_id_1, store1.clone()); + candidates.insert(slot_id_1); let slot_id_2 = 13; @@ -13298,7 +13291,7 @@ pub mod tests { store2 .alive_bytes .store(store2_alive_bytes, Ordering::Release); - candidates.insert(slot_id_2, store2.clone()); + candidates.insert(slot_id_2); let slot_id_3 = 14; let store3_id = 55; @@ -13309,13 +13302,17 @@ pub mod tests { store_file_size, )); + db.storage.insert(slot_id_1, Arc::clone(&store1)); + db.storage.insert(slot_id_2, Arc::clone(&store2)); + db.storage.insert(slot_id_3, Arc::clone(&entry3)); + // The store3's alive ratio is 1.0 as its page-aligned alive size is 2 pages let store3_alive_bytes = (PAGE_SIZE + 1) as usize; entry3 .alive_bytes .store(store3_alive_bytes, Ordering::Release); - candidates.insert(slot_id_3, entry3); + candidates.insert(slot_id_3); // Set the target alive ratio to 0.6 so that we can just get rid of store1, the remaining two stores // alive ratio can be > the target ratio: the actual ratio is 0.75 because of 3 alive pages / 4 total pages. @@ -13323,23 +13320,18 @@ pub mod tests { // to the candidates list for next round. let target_alive_ratio = 0.6; let (selected_candidates, next_candidates) = - AccountsDb::select_candidates_by_total_usage(&candidates, target_alive_ratio, None); + db.select_candidates_by_total_usage(&candidates, target_alive_ratio, None); assert_eq!(1, selected_candidates.len()); - assert_eq!( - selected_candidates[&slot_id_1].append_vec_id(), - store1.append_vec_id() - ); + assert!(selected_candidates.contains(&slot_id_1)); assert_eq!(1, next_candidates.len()); - assert_eq!( - next_candidates[&slot_id_2].append_vec_id(), - store2.append_vec_id() - ); + assert!(next_candidates.contains(&slot_id_2)); } #[test] fn test_select_candidates_by_total_usage_2_way_split_condition() { // three candidates, 2 are selected for shrink, one is ignored solana_logger::setup(); + let db = AccountsDb::new_single_for_tests(); let mut candidates = ShrinkCandidates::new(); let common_store_path = Path::new(""); @@ -13354,8 +13346,8 @@ pub mod tests { store_file_size, )); store1.alive_bytes.store(0, Ordering::Release); - - candidates.insert(slot_id_1, store1.clone()); + db.storage.insert(slot_id_1, Arc::clone(&store1)); + candidates.insert(slot_id_1); let slot_id_2 = 13; let store2_id = 44; @@ -13365,13 +13357,14 @@ pub mod tests { store2_id, store_file_size, )); + db.storage.insert(slot_id_2, Arc::clone(&store2)); // The store2's alive_ratio is 0.5: as its page aligned alive size is 1 page. let store2_alive_bytes = (PAGE_SIZE - 1) as usize; store2 .alive_bytes .store(store2_alive_bytes, Ordering::Release); - candidates.insert(slot_id_2, store2.clone()); + candidates.insert(slot_id_2); let slot_id_3 = 14; let store3_id = 55; @@ -13388,21 +13381,15 @@ pub mod tests { .alive_bytes .store(store3_alive_bytes, Ordering::Release); - candidates.insert(slot_id_3, entry3); + candidates.insert(slot_id_3); // Set the target ratio to default (0.8), both store1 and store2 must be selected and store3 is ignored. let target_alive_ratio = DEFAULT_ACCOUNTS_SHRINK_RATIO; let (selected_candidates, next_candidates) = - AccountsDb::select_candidates_by_total_usage(&candidates, target_alive_ratio, None); + db.select_candidates_by_total_usage(&candidates, target_alive_ratio, None); assert_eq!(2, selected_candidates.len()); - assert_eq!( - selected_candidates[&slot_id_1].append_vec_id(), - store1.append_vec_id() - ); - assert_eq!( - selected_candidates[&slot_id_2].append_vec_id(), - store2.append_vec_id() - ); + assert!(selected_candidates.contains(&slot_id_1)); + assert!(selected_candidates.contains(&slot_id_2)); assert_eq!(0, next_candidates.len()); } @@ -13410,6 +13397,7 @@ pub mod tests { fn test_select_candidates_by_total_usage_all_clean() { // 2 candidates, they must be selected to achieve the target alive ratio solana_logger::setup(); + let db = AccountsDb::new_single_for_tests(); let mut candidates = ShrinkCandidates::new(); let slot1 = 12; @@ -13430,7 +13418,8 @@ pub mod tests { .alive_bytes .store(store1_alive_bytes, Ordering::Release); - candidates.insert(slot1, store1.clone()); + candidates.insert(slot1); + db.storage.insert(slot1, Arc::clone(&store1)); let store2_id = 44; let slot2 = 44; @@ -13447,17 +13436,17 @@ pub mod tests { .alive_bytes .store(store2_alive_bytes, Ordering::Release); - candidates.insert(slot2, store2.clone()); + candidates.insert(slot2); + db.storage.insert(slot2, Arc::clone(&store2)); for newest_ancient_slot in [None, Some(slot1), Some(slot2)] { // Set the target ratio to default (0.8), both stores from the two different slots must be selected. let target_alive_ratio = DEFAULT_ACCOUNTS_SHRINK_RATIO; - let (selected_candidates, next_candidates) = - AccountsDb::select_candidates_by_total_usage( - &candidates, - target_alive_ratio, - newest_ancient_slot.map(|newest_ancient_slot| newest_ancient_slot + 1), - ); + let (selected_candidates, next_candidates) = db.select_candidates_by_total_usage( + &candidates, + target_alive_ratio, + newest_ancient_slot.map(|newest_ancient_slot| newest_ancient_slot + 1), + ); assert_eq!( if newest_ancient_slot == Some(slot1) { 1 @@ -13473,19 +13462,8 @@ pub mod tests { selected_candidates.contains(&slot1) ); - if newest_ancient_slot.is_none() { - assert_eq!( - selected_candidates[&slot1].append_vec_id(), - store1.append_vec_id() - ); - } if newest_ancient_slot != Some(slot2) { assert!(selected_candidates.contains(&slot2)); - - assert_eq!( - selected_candidates[&slot2].append_vec_id(), - store2.append_vec_id() - ); } assert_eq!(0, next_candidates.len()); } @@ -14989,10 +14967,9 @@ pub mod tests { db.clean_accounts(Some(1), false, None, &EpochSchedule::default()); // Shrink Slot 0 - let slot0_store = db.get_and_assert_single_storage(0); { let mut shrink_candidate_slots = db.shrink_candidate_slots.lock().unwrap(); - shrink_candidate_slots.insert(0, slot0_store); + shrink_candidate_slots.insert(0); } db.shrink_candidate_slots(&epoch_schedule); @@ -15323,11 +15300,7 @@ pub mod tests { return; } // Simulate adding shrink candidates from clean_accounts() - let store = db.get_and_assert_single_storage(slot); - db.shrink_candidate_slots - .lock() - .unwrap() - .insert(slot, store.clone()); + db.shrink_candidate_slots.lock().unwrap().insert(slot); db.shrink_candidate_slots(&epoch_schedule); }) .unwrap() diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 27f0eaee0dbc1b..3f4b7bb71f9a47 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -1347,10 +1347,7 @@ pub mod tests { let storage = db.storage.get_slot_storage_entry(slot); assert!(storage.is_some()); if in_shrink_candidate_slots { - db.shrink_candidate_slots - .lock() - .unwrap() - .insert(slot, storage.unwrap()); + db.shrink_candidate_slots.lock().unwrap().insert(slot); } }); @@ -1379,11 +1376,7 @@ pub mod tests { ); slots.clone().for_each(|slot| { - assert!(!db - .shrink_candidate_slots - .lock() - .unwrap() - .contains_key(&slot)); + assert!(!db.shrink_candidate_slots.lock().unwrap().contains(&slot)); }); let roots_after = db From ece376fdec26a871c9ff0c51a21571aff76512ca Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Thu, 7 Sep 2023 20:55:56 -0700 Subject: [PATCH 045/407] [zk-token-proof] Add public compute unit variables for instructions (#33129) add public compute unit variables --- programs/zk-token-proof/src/lib.rs | 46 ++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 15 deletions(-) diff --git a/programs/zk-token-proof/src/lib.rs b/programs/zk-token-proof/src/lib.rs index e52f6520d3cff6..6ed1fb1f33e17f 100644 --- a/programs/zk-token-proof/src/lib.rs +++ b/programs/zk-token-proof/src/lib.rs @@ -16,6 +16,22 @@ use { std::result::Result, }; +pub const CLOSE_CONTEXT_STATE_COMPUTE_UNITS: u64 = 3_300; +pub const VERIFY_ZERO_BALANCE_COMPUTE_UNITS: u64 = 6012; +pub const VERIFY_WITHDRAW_COMPUTE_UNITS: u64 = 112_454; +pub const VERIFY_CIPHERTEXT_CIPHERTEXT_EQUALITY_COMPUTE_UNITS: u64 = 7_943; +pub const VERIFY_TRANSFER_COMPUTE_UNITS: u64 = 219_290; +pub const VERIFY_TRANSFER_WITH_FEE_COMPUTE_UNITS: u64 = 407_121; +pub const VERIFY_PUBKEY_VALIDITY_COMPUTE_UNITS: u64 = 2_619; +pub const VERIFY_RANGE_PROOF_U64_COMPUTE_UNITS: u64 = 105_066; +pub const VERIFY_BATCHED_RANGE_PROOF_U64_COMPUTE_UNITS: u64 = 111_478; +pub const VERIFY_BATCHED_RANGE_PROOF_U128_COMPUTE_UNITS: u64 = 204_512; +pub const VERIFY_BATCHED_RANGE_PROOF_U256_COMPUTE_UNITS: u64 = 368_000; +pub const VERIFY_CIPHERTEXT_COMMITMENT_EQUALITY_COMPUTE_UNITS: u64 = 6_424; +pub const VERIFY_GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_COMPUTE_UNITS: u64 = 6_440; +pub const VERIFY_BATCHED_GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_COMPUTE_UNITS: u64 = 12_575; +pub const VERIFY_FEE_SIGMA_COMPUTE_UNITS: u64 = 6_547; + fn process_verify_proof(invoke_context: &mut InvokeContext) -> Result<(), InstructionError> where T: Pod + ZkProofData, @@ -136,7 +152,7 @@ declare_process_instruction!(process_instruction, 0, |invoke_context| { ProofInstruction::CloseContextState => { if native_programs_consume_cu { invoke_context - .consume_checked(3_300) + .consume_checked(CLOSE_CONTEXT_STATE_COMPUTE_UNITS) .map_err(|_| InstructionError::ComputationalBudgetExceeded)?; } ic_msg!(invoke_context, "CloseContextState"); @@ -145,7 +161,7 @@ declare_process_instruction!(process_instruction, 0, |invoke_context| { ProofInstruction::VerifyZeroBalance => { if native_programs_consume_cu { invoke_context - .consume_checked(6_012) + .consume_checked(VERIFY_ZERO_BALANCE_COMPUTE_UNITS) .map_err(|_| InstructionError::ComputationalBudgetExceeded)?; } ic_msg!(invoke_context, "VerifyZeroBalance"); @@ -154,7 +170,7 @@ declare_process_instruction!(process_instruction, 0, |invoke_context| { ProofInstruction::VerifyWithdraw => { if native_programs_consume_cu { invoke_context - .consume_checked(112_454) + .consume_checked(VERIFY_WITHDRAW_COMPUTE_UNITS) .map_err(|_| InstructionError::ComputationalBudgetExceeded)?; } ic_msg!(invoke_context, "VerifyWithdraw"); @@ -163,7 +179,7 @@ declare_process_instruction!(process_instruction, 0, |invoke_context| { ProofInstruction::VerifyCiphertextCiphertextEquality => { if native_programs_consume_cu { invoke_context - .consume_checked(7_943) + .consume_checked(VERIFY_CIPHERTEXT_CIPHERTEXT_EQUALITY_COMPUTE_UNITS) .map_err(|_| InstructionError::ComputationalBudgetExceeded)?; } ic_msg!(invoke_context, "VerifyCiphertextCiphertextEquality"); @@ -175,7 +191,7 @@ declare_process_instruction!(process_instruction, 0, |invoke_context| { ProofInstruction::VerifyTransfer => { if native_programs_consume_cu { invoke_context - .consume_checked(219_290) + .consume_checked(VERIFY_TRANSFER_COMPUTE_UNITS) .map_err(|_| InstructionError::ComputationalBudgetExceeded)?; } ic_msg!(invoke_context, "VerifyTransfer"); @@ -184,7 +200,7 @@ declare_process_instruction!(process_instruction, 0, |invoke_context| { ProofInstruction::VerifyTransferWithFee => { if native_programs_consume_cu { invoke_context - .consume_checked(407_121) + .consume_checked(VERIFY_TRANSFER_WITH_FEE_COMPUTE_UNITS) .map_err(|_| InstructionError::ComputationalBudgetExceeded)?; } ic_msg!(invoke_context, "VerifyTransferWithFee"); @@ -193,7 +209,7 @@ declare_process_instruction!(process_instruction, 0, |invoke_context| { ProofInstruction::VerifyPubkeyValidity => { if native_programs_consume_cu { invoke_context - .consume_checked(2_619) + .consume_checked(VERIFY_PUBKEY_VALIDITY_COMPUTE_UNITS) .map_err(|_| InstructionError::ComputationalBudgetExceeded)?; } ic_msg!(invoke_context, "VerifyPubkeyValidity"); @@ -202,7 +218,7 @@ declare_process_instruction!(process_instruction, 0, |invoke_context| { ProofInstruction::VerifyRangeProofU64 => { if native_programs_consume_cu { invoke_context - .consume_checked(105_066) + .consume_checked(VERIFY_RANGE_PROOF_U64_COMPUTE_UNITS) .map_err(|_| InstructionError::ComputationalBudgetExceeded)?; } ic_msg!(invoke_context, "VerifyRangeProof"); @@ -211,7 +227,7 @@ declare_process_instruction!(process_instruction, 0, |invoke_context| { ProofInstruction::VerifyBatchedRangeProofU64 => { if native_programs_consume_cu { invoke_context - .consume_checked(111_478) + .consume_checked(VERIFY_BATCHED_RANGE_PROOF_U64_COMPUTE_UNITS) .map_err(|_| InstructionError::ComputationalBudgetExceeded)?; } ic_msg!(invoke_context, "VerifyBatchedRangeProof64"); @@ -222,7 +238,7 @@ declare_process_instruction!(process_instruction, 0, |invoke_context| { ProofInstruction::VerifyBatchedRangeProofU128 => { if native_programs_consume_cu { invoke_context - .consume_checked(204_512) + .consume_checked(VERIFY_BATCHED_RANGE_PROOF_U128_COMPUTE_UNITS) .map_err(|_| InstructionError::ComputationalBudgetExceeded)?; } ic_msg!(invoke_context, "VerifyBatchedRangeProof128"); @@ -233,7 +249,7 @@ declare_process_instruction!(process_instruction, 0, |invoke_context| { ProofInstruction::VerifyBatchedRangeProofU256 => { if native_programs_consume_cu { invoke_context - .consume_checked(368_000) + .consume_checked(VERIFY_BATCHED_RANGE_PROOF_U256_COMPUTE_UNITS) .map_err(|_| InstructionError::ComputationalBudgetExceeded)?; } ic_msg!(invoke_context, "VerifyBatchedRangeProof256"); @@ -243,7 +259,7 @@ declare_process_instruction!(process_instruction, 0, |invoke_context| { } ProofInstruction::VerifyCiphertextCommitmentEquality => { invoke_context - .consume_checked(6_424) + .consume_checked(VERIFY_CIPHERTEXT_COMMITMENT_EQUALITY_COMPUTE_UNITS) .map_err(|_| InstructionError::ComputationalBudgetExceeded)?; ic_msg!(invoke_context, "VerifyCiphertextCommitmentEquality"); process_verify_proof::< @@ -253,7 +269,7 @@ declare_process_instruction!(process_instruction, 0, |invoke_context| { } ProofInstruction::VerifyGroupedCiphertext2HandlesValidity => { invoke_context - .consume_checked(6_440) + .consume_checked(VERIFY_GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_COMPUTE_UNITS) .map_err(|_| InstructionError::ComputationalBudgetExceeded)?; ic_msg!(invoke_context, "VerifyGroupedCiphertext2HandlesValidity"); process_verify_proof::< @@ -263,7 +279,7 @@ declare_process_instruction!(process_instruction, 0, |invoke_context| { } ProofInstruction::VerifyBatchedGroupedCiphertext2HandlesValidity => { invoke_context - .consume_checked(12_575) + .consume_checked(VERIFY_BATCHED_GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_COMPUTE_UNITS) .map_err(|_| InstructionError::ComputationalBudgetExceeded)?; ic_msg!( invoke_context, @@ -276,7 +292,7 @@ declare_process_instruction!(process_instruction, 0, |invoke_context| { } ProofInstruction::VerifyFeeSigma => { invoke_context - .consume_checked(6_547) + .consume_checked(VERIFY_FEE_SIGMA_COMPUTE_UNITS) .map_err(|_| InstructionError::ComputationalBudgetExceeded)?; ic_msg!(invoke_context, "VerifyFeeSigma"); process_verify_proof::(invoke_context) From 70f17ceb9f06ee09f1c1192b485dd5a35fde3068 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Sep 2023 12:25:25 +0000 Subject: [PATCH 046/407] build(deps): bump bytes from 1.4.0 to 1.5.0 (#33190) * build(deps): bump bytes from 1.4.0 to 1.5.0 Bumps [bytes](https://github.com/tokio-rs/bytes) from 1.4.0 to 1.5.0. - [Release notes](https://github.com/tokio-rs/bytes/releases) - [Changelog](https://github.com/tokio-rs/bytes/blob/master/CHANGELOG.md) - [Commits](https://github.com/tokio-rs/bytes/compare/v1.4.0...v1.5.0) --- updated-dependencies: - dependency-name: bytes dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6b68897a0f360a..4680af74154117 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -896,9 +896,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" [[package]] name = "bytesize" diff --git a/Cargo.toml b/Cargo.toml index 4957b088240fa1..a5cb1b895a6cff 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -158,7 +158,7 @@ byte-unit = "4.0.19" bytecount = "0.6.3" bytemuck = "1.14.0" byteorder = "1.4.3" -bytes = "1.2" +bytes = "1.5" bzip2 = "0.4.4" caps = "0.5.5" cargo_metadata = "0.15.4" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b23da9afdf759c..86c6e0b1e433d4 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -833,9 +833,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" [[package]] name = "bzip2" From b588bebce001e8148c17f05bad228b7aedcee50c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Sep 2023 12:25:44 +0000 Subject: [PATCH 047/407] build(deps): bump chrono from 0.4.29 to 0.4.30 (#33191) * build(deps): bump chrono from 0.4.29 to 0.4.30 Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.29 to 0.4.30. - [Release notes](https://github.com/chronotope/chrono/releases) - [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md) - [Commits](https://github.com/chronotope/chrono/compare/v0.4.29...v0.4.30) --- updated-dependencies: - dependency-name: chrono dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 29 +++++++++-------------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 27 ++++++++------------------- 3 files changed, 18 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4680af74154117..8dced6cf77be65 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -327,7 +327,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.9", + "time", ] [[package]] @@ -1017,16 +1017,15 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.29" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d87d9d13be47a5b7c3907137f1290b0459a7f80efb26be8c52afb11963bccb02" +checksum = "defd4e7873dbddba6c7c91e199c7fcb946abc4a6a4ac3195400bcfb01b5de877" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", "serde", - "time 0.1.43", "wasm-bindgen", "windows-targets 0.48.0", ] @@ -2244,7 +2243,7 @@ dependencies = [ "serde_json", "simpl", "smpl_jwt", - "time 0.3.9", + "time", "tokio", ] @@ -4285,7 +4284,7 @@ checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", "ring", - "time 0.3.9", + "time", "yasna", ] @@ -5034,7 +5033,7 @@ dependencies = [ "serde_derive", "serde_json", "simpl", - "time 0.3.9", + "time", ] [[package]] @@ -7771,7 +7770,7 @@ dependencies = [ "lazy_static", "libc", "nom", - "time 0.3.9", + "time", "winapi 0.3.9", ] @@ -7955,16 +7954,6 @@ dependencies = [ "tikv-jemalloc-sys", ] -[[package]] -name = "time" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" -dependencies = [ - "libc", - "winapi 0.3.9", -] - [[package]] name = "time" version = "0.3.9" @@ -8889,7 +8878,7 @@ dependencies = [ "oid-registry", "rusticata-macros", "thiserror", - "time 0.3.9", + "time", ] [[package]] @@ -8916,7 +8905,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346d34a236c9d3e5f3b9b74563f238f955bbd05fa0b8b4efa53c130c43982f4c" dependencies = [ - "time 0.3.9", + "time", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index a5cb1b895a6cff..da2390c2520046 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -163,7 +163,7 @@ bzip2 = "0.4.4" caps = "0.5.5" cargo_metadata = "0.15.4" cc = "1.0.83" -chrono = { version = "0.4.29", default-features = false } +chrono = { version = "0.4.30", default-features = false } chrono-humanize = "0.2.3" clap = "2.33.1" console = "0.15.7" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 86c6e0b1e433d4..54ffec2adb1f09 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -315,7 +315,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.9", + "time", ] [[package]] @@ -901,16 +901,15 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.29" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d87d9d13be47a5b7c3907137f1290b0459a7f80efb26be8c52afb11963bccb02" +checksum = "defd4e7873dbddba6c7c91e199c7fcb946abc4a6a4ac3195400bcfb01b5de877" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", "serde", - "time 0.1.43", "wasm-bindgen", "windows-targets 0.48.0", ] @@ -1874,7 +1873,7 @@ dependencies = [ "serde_json", "simpl", "smpl_jwt", - "time 0.3.9", + "time", "tokio", ] @@ -3751,7 +3750,7 @@ checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", "ring", - "time 0.3.9", + "time", "yasna", ] @@ -4406,7 +4405,7 @@ dependencies = [ "serde_derive", "serde_json", "simpl", - "time 0.3.9", + "time", ] [[package]] @@ -6806,16 +6805,6 @@ dependencies = [ "tikv-jemalloc-sys", ] -[[package]] -name = "time" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" -dependencies = [ - "libc", - "winapi 0.3.9", -] - [[package]] name = "time" version = "0.3.9" @@ -7716,7 +7705,7 @@ dependencies = [ "oid-registry", "rusticata-macros", "thiserror", - "time 0.3.9", + "time", ] [[package]] @@ -7734,7 +7723,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346d34a236c9d3e5f3b9b74563f238f955bbd05fa0b8b4efa53c130c43982f4c" dependencies = [ - "time 0.3.9", + "time", ] [[package]] From dc6b1eb6531a9010a3199b84447a004b885eb4e0 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 8 Sep 2023 08:45:32 -0700 Subject: [PATCH 048/407] in shrink, get stores prior to shrink starting (#33194) --- accounts-db/src/accounts_db.rs | 37 +++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index dded2a35304b14..7ca0e7ce5a5dd4 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4265,14 +4265,16 @@ impl AccountsDb { /// Given the input `ShrinkCandidates`, this function sorts the stores by their alive ratio /// in increasing order with the most sparse entries in the front. It will then simulate the /// shrinking by working on the most sparse entries first and if the overall alive ratio is - /// achieved, it will stop and return the filtered-down candidates and the candidates which + /// achieved, it will stop and return: + /// first tuple element: the filtered-down candidates and + /// second duple element: the candidates which /// are skipped in this round and might be eligible for the future shrink. fn select_candidates_by_total_usage( &self, shrink_slots: &ShrinkCandidates, shrink_ratio: f64, oldest_non_ancient_slot: Option, - ) -> (ShrinkCandidates, ShrinkCandidates) { + ) -> (HashMap>, ShrinkCandidates) { struct StoreUsageInfo { slot: Slot, alive_ratio: f64, @@ -4315,7 +4317,7 @@ impl AccountsDb { // Working from the beginning of store_usage which are the most sparse and see when we can stop // shrinking while still achieving the overall goals. - let mut shrink_slots = ShrinkCandidates::new(); + let mut shrink_slots = HashMap::new(); let mut shrink_slots_next_batch = ShrinkCandidates::new(); for usage in &store_usage { let store = &usage.store; @@ -4339,7 +4341,7 @@ impl AccountsDb { let after_shrink_size = Self::page_align(store.alive_bytes() as u64); let bytes_saved = current_store_size.saturating_sub(after_shrink_size); total_bytes -= bytes_saved; - shrink_slots.insert(usage.slot); + shrink_slots.insert(usage.slot, Arc::clone(store)); } } measure.stop(); @@ -4756,7 +4758,18 @@ impl AccountsDb { ); (shrink_slots, Some(shrink_slots_next_batch)) } else { - (shrink_candidates_slots, None) + ( + // lookup storage for each slot + shrink_candidates_slots + .into_iter() + .filter_map(|slot| { + self.storage + .get_slot_storage_entry(slot) + .map(|storage| (slot, storage)) + }) + .collect(), + None, + ) } }; @@ -4775,14 +4788,14 @@ impl AccountsDb { let num_candidates = shrink_slots.len(); let shrink_candidates_count = shrink_slots.len(); self.thread_pool_clean.install(|| { - shrink_slots.into_par_iter().for_each(|slot| { - let mut measure = Measure::start("shrink_candidate_slots-ms"); - if let Some(slot_shrink_candidate) = self.storage.get_slot_storage_entry(slot) { + shrink_slots + .into_par_iter() + .for_each(|(slot, slot_shrink_candidate)| { + let mut measure = Measure::start("shrink_candidate_slots-ms"); self.do_shrink_slot_store(slot, &slot_shrink_candidate); - } - measure.stop(); - inc_new_counter_info!("shrink_candidate_slots-ms", measure.as_ms() as usize); - }); + measure.stop(); + inc_new_counter_info!("shrink_candidate_slots-ms", measure.as_ms() as usize); + }); }); measure_shrink_all_candidates.stop(); inc_new_counter_info!( From a145ade564c5181b264247adb4a3347eb883aed6 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 8 Sep 2023 12:55:54 -0700 Subject: [PATCH 049/407] stat for time spent copying generate index contents (#33187) * stat for time spent copying generate index contents * rework to move stat to generate_index * fix fmt --- accounts-db/src/accounts_db.rs | 10 ++++++++-- accounts-db/src/accounts_index.rs | 7 ++++++- accounts-db/src/bucket_map_holder.rs | 5 ++++- accounts-db/src/in_mem_accounts_index.rs | 15 +++++++++++++-- 4 files changed, 31 insertions(+), 6 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 7ca0e7ce5a5dd4..48798260ed1d03 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -60,6 +60,7 @@ use { cache_hash_data::{CacheHashData, CacheHashDataFileReference}, contains::Contains, epoch_accounts_hash::EpochAccountsHashManager, + in_mem_accounts_index::StartupStats, partitioned_rewards::{PartitionedEpochRewardsConfig, TestPartitionedEpochRewards}, pubkey_bins::PubkeyBinCalculator24, read_only_accounts_cache::ReadOnlyAccountsCache, @@ -636,7 +637,7 @@ struct StorageSizeAndCount { type StorageSizeAndCountMap = DashMap; impl GenerateIndexTimings { - pub fn report(&self) { + pub fn report(&self, startup_stats: &StartupStats) { datapoint_info!( "generate_index", ("overall_us", self.total_time_us, i64), @@ -695,6 +696,11 @@ impl GenerateIndexTimings { ), ("total_slots", self.total_slots, i64), ("slots_to_clean", self.slots_to_clean, i64), + ( + "copy_data_us", + startup_stats.copy_data_us.swap(0, Ordering::Relaxed), + i64 + ), ); } } @@ -9369,7 +9375,7 @@ impl AccountsDb { } total_time.stop(); timings.total_time_us = total_time.as_us(); - timings.report(); + timings.report(self.accounts_index.get_startup_stats()); } self.accounts_index.log_secondary_indexes(); diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index b9038441d12515..45ecc13b851133 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -5,7 +5,7 @@ use { ancestors::Ancestors, bucket_map_holder::{Age, BucketMapHolder}, contains::Contains, - in_mem_accounts_index::{InMemAccountsIndex, InsertNewEntryResults}, + in_mem_accounts_index::{InMemAccountsIndex, InsertNewEntryResults, StartupStats}, inline_spl_token::{self, GenericTokenAccount}, inline_spl_token_2022, pubkey_bins::PubkeyBinCalculator24, @@ -1336,6 +1336,11 @@ impl + Into> AccountsIndex { iter.hold_range_in_memory(range, start_holding, thread_pool); } + /// get stats related to startup + pub(crate) fn get_startup_stats(&self) -> &StartupStats { + &self.storage.storage.startup_stats + } + pub fn set_startup(&self, value: Startup) { self.storage.set_startup(value); } diff --git a/accounts-db/src/bucket_map_holder.rs b/accounts-db/src/bucket_map_holder.rs index dfc77671ed43a9..77ae98bdfe9574 100644 --- a/accounts-db/src/bucket_map_holder.rs +++ b/accounts-db/src/bucket_map_holder.rs @@ -2,7 +2,7 @@ use { crate::{ accounts_index::{AccountsIndexConfig, DiskIndexValue, IndexLimitMb, IndexValue}, bucket_map_holder_stats::BucketMapHolderStats, - in_mem_accounts_index::InMemAccountsIndex, + in_mem_accounts_index::{InMemAccountsIndex, StartupStats}, waitable_condvar::WaitableCondvar, }, solana_bucket_map::bucket_map::{BucketMap, BucketMapConfig}, @@ -68,6 +68,8 @@ pub struct BucketMapHolder + Into> /// Note startup is an optimization and is not required for correctness. startup: AtomicBool, _phantom: PhantomData, + + pub(crate) startup_stats: Arc, } impl + Into> Debug for BucketMapHolder { @@ -259,6 +261,7 @@ impl + Into> BucketMapHolder mem_budget_mb, threads, _phantom: PhantomData, + startup_stats: Arc::default(), } } diff --git a/accounts-db/src/in_mem_accounts_index.rs b/accounts-db/src/in_mem_accounts_index.rs index 067ab65d6ca051..3d943956cab23d 100644 --- a/accounts-db/src/in_mem_accounts_index.rs +++ b/accounts-db/src/in_mem_accounts_index.rs @@ -27,6 +27,11 @@ type CacheRangesHeld = RwLock>>; type InMemMap = HashMap>; +#[derive(Debug, Default)] +pub struct StartupStats { + pub copy_data_us: AtomicU64, +} + #[derive(Debug)] pub struct PossibleEvictions { /// vec per age in the future, up to size 'ages_to_stay_in_cache' @@ -116,6 +121,9 @@ pub struct InMemAccountsIndex + Into< /// Higher numbers mean we flush less buckets/s /// Lower numbers mean we flush more buckets/s num_ages_to_distribute_flushes: Age, + + /// stats related to starting up + pub(crate) startup_stats: Arc, } impl + Into> Debug for InMemAccountsIndex { @@ -182,6 +190,7 @@ impl + Into> InMemAccountsIndex + Into> InMemAccountsIndex Date: Sat, 9 Sep 2023 09:51:23 -0700 Subject: [PATCH 050/407] TransactionScheduler: TransactionStateContainer (#33002) Co-authored-by: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> --- .../transaction_scheduler/mod.rs | 6 + .../transaction_priority_id.rs | 27 ++ .../transaction_state.rs | 323 ++++++++++++++++++ .../transaction_state_container.rs | 311 +++++++++++++++++ 4 files changed, 667 insertions(+) create mode 100644 core/src/banking_stage/transaction_scheduler/transaction_priority_id.rs create mode 100644 core/src/banking_stage/transaction_scheduler/transaction_state.rs create mode 100644 core/src/banking_stage/transaction_scheduler/transaction_state_container.rs diff --git a/core/src/banking_stage/transaction_scheduler/mod.rs b/core/src/banking_stage/transaction_scheduler/mod.rs index c884c20aa142e0..c723f3af9a0da4 100644 --- a/core/src/banking_stage/transaction_scheduler/mod.rs +++ b/core/src/banking_stage/transaction_scheduler/mod.rs @@ -1,2 +1,8 @@ #[allow(dead_code)] mod thread_aware_account_locks; + +mod transaction_priority_id; +#[allow(dead_code)] +mod transaction_state; +#[allow(dead_code)] +mod transaction_state_container; diff --git a/core/src/banking_stage/transaction_scheduler/transaction_priority_id.rs b/core/src/banking_stage/transaction_scheduler/transaction_priority_id.rs new file mode 100644 index 00000000000000..178a9cdf582d5f --- /dev/null +++ b/core/src/banking_stage/transaction_scheduler/transaction_priority_id.rs @@ -0,0 +1,27 @@ +use crate::banking_stage::scheduler_messages::TransactionId; + +/// A unique identifier tied with priority ordering for a transaction/packet: +/// - `id` has no effect on ordering +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub(crate) struct TransactionPriorityId { + pub(crate) priority: u64, + pub(crate) id: TransactionId, +} + +impl TransactionPriorityId { + pub(crate) fn new(priority: u64, id: TransactionId) -> Self { + Self { priority, id } + } +} + +impl Ord for TransactionPriorityId { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.priority.cmp(&other.priority) + } +} + +impl PartialOrd for TransactionPriorityId { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state.rs b/core/src/banking_stage/transaction_scheduler/transaction_state.rs new file mode 100644 index 00000000000000..da3916cd20ec1c --- /dev/null +++ b/core/src/banking_stage/transaction_scheduler/transaction_state.rs @@ -0,0 +1,323 @@ +use { + solana_runtime::transaction_priority_details::TransactionPriorityDetails, + solana_sdk::{slot_history::Slot, transaction::SanitizedTransaction}, +}; + +/// Simple wrapper type to tie a sanitized transaction to max age slot. +pub(crate) struct SanitizedTransactionTTL { + pub(crate) transaction: SanitizedTransaction, + pub(crate) max_age_slot: Slot, +} + +/// TransactionState is used to track the state of a transaction in the transaction scheduler +/// and banking stage as a whole. +/// +/// There are two states a transaction can be in: +/// 1. `Unprocessed` - The transaction is available for scheduling. +/// 2. `Pending` - The transaction is currently scheduled or being processed. +/// +/// Newly received transactions are initially in the `Unprocessed` state. +/// When a transaction is scheduled, it is transitioned to the `Pending` state, +/// using the `transition_to_pending` method. +/// When a transaction finishes processing it may be retryable. If it is retryable, +/// the transaction is transitioned back to the `Unprocessed` state using the +/// `transition_to_unprocessed` method. If it is not retryable, the state should +/// be dropped. +/// +/// For performance, when a transaction is transitioned to the `Pending` state, the +/// internal `SanitizedTransaction` is moved out of the `TransactionState` and sent +/// to the appropriate thread for processing. This is done to avoid cloning the +/// `SanitizedTransaction`. +#[allow(clippy::large_enum_variant)] +pub(crate) enum TransactionState { + /// The transaction is available for scheduling. + Unprocessed { + transaction_ttl: SanitizedTransactionTTL, + transaction_priority_details: TransactionPriorityDetails, + forwarded: bool, + }, + /// The transaction is currently scheduled or being processed. + Pending { + transaction_priority_details: TransactionPriorityDetails, + forwarded: bool, + }, +} + +impl TransactionState { + /// Creates a new `TransactionState` in the `Unprocessed` state. + pub(crate) fn new( + transaction_ttl: SanitizedTransactionTTL, + transaction_priority_details: TransactionPriorityDetails, + ) -> Self { + Self::Unprocessed { + transaction_ttl, + transaction_priority_details, + forwarded: false, + } + } + + /// Returns a reference to the priority details of the transaction. + pub(crate) fn transaction_priority_details(&self) -> &TransactionPriorityDetails { + match self { + Self::Unprocessed { + transaction_priority_details, + .. + } => transaction_priority_details, + Self::Pending { + transaction_priority_details, + .. + } => transaction_priority_details, + } + } + + /// Returns the priority of the transaction. + pub(crate) fn priority(&self) -> u64 { + self.transaction_priority_details().priority + } + + /// Returns whether or not the transaction has already been forwarded. + pub(crate) fn forwarded(&self) -> bool { + match self { + Self::Unprocessed { forwarded, .. } => *forwarded, + Self::Pending { forwarded, .. } => *forwarded, + } + } + + /// Sets the transaction as forwarded. + pub(crate) fn set_forwarded(&self) { + match self { + Self::Unprocessed { forwarded, .. } => *forwarded = true, + Self::Pending { forwarded, .. } => *forwarded = true, + } + } + + /// Intended to be called when a transaction is scheduled. This method will + /// transition the transaction from `Unprocessed` to `Pending` and return the + /// `SanitizedTransactionTTL` for processing. + /// + /// # Panics + /// This method will panic if the transaction is already in the `Pending` state, + /// as this is an invalid state transition. + pub(crate) fn transition_to_pending(&mut self) -> SanitizedTransactionTTL { + match self.take() { + TransactionState::Unprocessed { + transaction_ttl, + transaction_priority_details, + forwarded, + } => { + *self = TransactionState::Pending { + transaction_priority_details, + forwarded, + }; + transaction_ttl + } + TransactionState::Pending { .. } => { + panic!("transaction already pending"); + } + } + } + + /// Intended to be called when a transaction is retried. This method will + /// transition the transaction from `Pending` to `Unprocessed`. + /// + /// # Panics + /// This method will panic if the transaction is already in the `Unprocessed` + /// state, as this is an invalid state transition. + pub(crate) fn transition_to_unprocessed(&mut self, transaction_ttl: SanitizedTransactionTTL) { + match self.take() { + TransactionState::Unprocessed { .. } => panic!("already unprocessed"), + TransactionState::Pending { + transaction_priority_details, + forwarded, + } => { + *self = Self::Unprocessed { + transaction_ttl, + transaction_priority_details, + forwarded, + } + } + } + } + + /// Get a reference to the `SanitizedTransactionTTL` for the transaction. + /// + /// # Panics + /// This method will panic if the transaction is in the `Pending` state. + pub(crate) fn transaction_ttl(&self) -> &SanitizedTransactionTTL { + match self { + Self::Unprocessed { + transaction_ttl, .. + } => transaction_ttl, + Self::Pending { .. } => panic!("transaction is pending"), + } + } + + /// Internal helper to transitioning between states. + /// Replaces `self` with a dummy state that will immediately be overwritten in transition. + fn take(&mut self) -> Self { + core::mem::replace( + self, + Self::Pending { + transaction_priority_details: TransactionPriorityDetails { + priority: 0, + compute_unit_limit: 0, + }, + forwarded: false, + }, + ) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_sdk::{ + compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, + signature::Keypair, signer::Signer, system_instruction, transaction::Transaction, + }, + }; + + fn create_transaction_state(priority: u64) -> TransactionState { + let from_keypair = Keypair::new(); + let ixs = vec![ + system_instruction::transfer( + &from_keypair.pubkey(), + &solana_sdk::pubkey::new_rand(), + 1, + ), + ComputeBudgetInstruction::set_compute_unit_price(priority), + ]; + let message = Message::new(&ixs, Some(&from_keypair.pubkey())); + let tx = Transaction::new(&[&from_keypair], message, Hash::default()); + + let transaction_ttl = SanitizedTransactionTTL { + transaction: SanitizedTransaction::from_transaction_for_tests(tx), + max_age_slot: Slot::MAX, + }; + + TransactionState::new( + transaction_ttl, + TransactionPriorityDetails { + priority, + compute_unit_limit: 0, + }, + ) + } + + #[test] + #[should_panic(expected = "already pending")] + fn test_transition_to_pending_panic() { + let mut transaction_state = create_transaction_state(0); + transaction_state.transition_to_pending(); + transaction_state.transition_to_pending(); // invalid transition + } + + #[test] + fn test_transition_to_pending() { + let mut transaction_state = create_transaction_state(0); + assert!(matches!( + transaction_state, + TransactionState::Unprocessed { .. } + )); + let _ = transaction_state.transition_to_pending(); + assert!(matches!( + transaction_state, + TransactionState::Pending { .. } + )); + } + + #[test] + #[should_panic(expected = "already unprocessed")] + fn test_transition_to_unprocessed_panic() { + let mut transaction_state = create_transaction_state(0); + + // Manually clone `SanitizedTransactionTTL` + let SanitizedTransactionTTL { + transaction, + max_age_slot, + } = transaction_state.transaction_ttl(); + let transaction_ttl = SanitizedTransactionTTL { + transaction: transaction.clone(), + max_age_slot: *max_age_slot, + }; + transaction_state.transition_to_unprocessed(transaction_ttl); // invalid transition + } + + #[test] + fn test_transition_to_unprocessed() { + let mut transaction_state = create_transaction_state(0); + assert!(matches!( + transaction_state, + TransactionState::Unprocessed { .. } + )); + let transaction_ttl = transaction_state.transition_to_pending(); + assert!(matches!( + transaction_state, + TransactionState::Pending { .. } + )); + transaction_state.transition_to_unprocessed(transaction_ttl); + assert!(matches!( + transaction_state, + TransactionState::Unprocessed { .. } + )); + } + + #[test] + fn test_transaction_priority_details() { + let priority = 15; + let mut transaction_state = create_transaction_state(priority); + assert_eq!(transaction_state.priority(), priority); + + // ensure priority is not lost through state transitions + let transaction_ttl = transaction_state.transition_to_pending(); + assert_eq!(transaction_state.priority(), priority); + transaction_state.transition_to_unprocessed(transaction_ttl); + assert_eq!(transaction_state.priority(), priority); + } + + #[test] + #[should_panic(expected = "transaction is pending")] + fn test_transaction_ttl_panic() { + let mut transaction_state = create_transaction_state(0); + let transaction_ttl = transaction_state.transaction_ttl(); + assert!(matches!( + transaction_state, + TransactionState::Unprocessed { .. } + )); + assert_eq!(transaction_ttl.max_age_slot, Slot::MAX); + + let _ = transaction_state.transition_to_pending(); + assert!(matches!( + transaction_state, + TransactionState::Pending { .. } + )); + let _ = transaction_state.transaction_ttl(); // pending state, the transaction ttl is not available + } + + #[test] + fn test_transaction_ttl() { + let mut transaction_state = create_transaction_state(0); + let transaction_ttl = transaction_state.transaction_ttl(); + assert!(matches!( + transaction_state, + TransactionState::Unprocessed { .. } + )); + assert_eq!(transaction_ttl.max_age_slot, Slot::MAX); + + // ensure transaction_ttl is not lost through state transitions + let transaction_ttl = transaction_state.transition_to_pending(); + assert!(matches!( + transaction_state, + TransactionState::Pending { .. } + )); + + transaction_state.transition_to_unprocessed(transaction_ttl); + let transaction_ttl = transaction_state.transaction_ttl(); + assert!(matches!( + transaction_state, + TransactionState::Unprocessed { .. } + )); + assert_eq!(transaction_ttl.max_age_slot, Slot::MAX); + } +} diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs new file mode 100644 index 00000000000000..f5f80f30aceb40 --- /dev/null +++ b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs @@ -0,0 +1,311 @@ +use { + super::{ + transaction_priority_id::TransactionPriorityId, + transaction_state::{SanitizedTransactionTTL, TransactionState}, + }, + crate::banking_stage::scheduler_messages::TransactionId, + min_max_heap::MinMaxHeap, + solana_runtime::transaction_priority_details::TransactionPriorityDetails, + std::collections::HashMap, +}; + +/// This structure will hold `TransactionState` for the entirety of a +/// transaction's lifetime in the scheduler and BankingStage as a whole. +/// +/// Transaction Lifetime: +/// 1. Received from `SigVerify` by `BankingStage` +/// 2. Inserted into `TransactionStateContainer` by `BankingStage` +/// 3. Popped in priority-order by scheduler, and transitioned to `Pending` state +/// 4. Processed by `ConsumeWorker` +/// a. If consumed, remove `Pending` state from the `TransactionStateContainer` +/// b. If retryable, transition back to `Unprocessed` state. +/// Re-insert to the queue, and return to step 3. +/// +/// The structure is composed of two main components: +/// 1. A priority queue of wrapped `TransactionId`s, which are used to +/// order transactions by priority for selection by the scheduler. +/// 2. A map of `TransactionId` to `TransactionState`, which is used to +/// track the state of each transaction. +/// +/// When `Pending`, the associated `TransactionId` is not in the queue, but +/// is still in the map. +/// The entry in the map should exist before insertion into the queue, and be +/// be removed only after the id is removed from the queue. +/// +/// The container maintains a fixed capacity. If the queue is full when pushing +/// a new transaction, the lowest priority transaction will be dropped. +pub(crate) struct TransactionStateContainer { + priority_queue: MinMaxHeap, + id_to_transaction_state: HashMap, +} + +impl TransactionStateContainer { + pub(crate) fn with_capacity(capacity: usize) -> Self { + Self { + priority_queue: MinMaxHeap::with_capacity(capacity), + id_to_transaction_state: HashMap::with_capacity(capacity), + } + } + + /// Returns true if the queue is empty. + pub(crate) fn is_empty(&self) -> bool { + self.priority_queue.is_empty() + } + + /// Returns the remaining capacity of the queue + pub(crate) fn remaining_queue_capacity(&self) -> usize { + self.priority_queue.capacity() - self.priority_queue.len() + } + + /// Get an iterator of the top `n` transaction ids in the priority queue. + /// This will remove the ids from the queue, but not drain the remainder + /// of the queue. + pub(crate) fn take_top_n( + &mut self, + n: usize, + ) -> impl Iterator + '_ { + (0..n).map_while(|_| self.priority_queue.pop_max()) + } + + /// Serialize entire priority queue. `hold` indicates whether the priority queue should + /// be drained or not. + /// If `hold` is true, these ids should not be removed from the map while processing. + pub(crate) fn priority_ordered_ids(&mut self, hold: bool) -> Vec { + let priority_queue = if hold { + self.priority_queue.clone() + } else { + let capacity = self.priority_queue.capacity(); + core::mem::replace( + &mut self.priority_queue, + MinMaxHeap::with_capacity(capacity), + ) + }; + + priority_queue.into_vec_desc() + } + + /// Get mutable transaction state by id. + pub(crate) fn get_mut_transaction_state( + &mut self, + id: &TransactionId, + ) -> Option<&mut TransactionState> { + self.id_to_transaction_state.get_mut(id) + } + + /// Get reference to `SanitizedTransactionTTL` by id. + /// Panics if the transaction does not exist. + pub(crate) fn get_transaction_ttl( + &self, + id: &TransactionId, + ) -> Option<&SanitizedTransactionTTL> { + self.id_to_transaction_state + .get(id) + .map(|state| state.transaction_ttl()) + } + + /// Take `SanitizedTransactionTTL` by id. + /// This transitions the transaction to `Pending` state. + /// Panics if the transaction does not exist. + pub(crate) fn take_transaction(&mut self, id: &TransactionId) -> SanitizedTransactionTTL { + self.id_to_transaction_state + .get_mut(id) + .expect("transaction must exist") + .transition_to_pending() + } + + /// Insert a new transaction into the container's queues and maps. + pub(crate) fn insert_new_transaction( + &mut self, + transaction_id: TransactionId, + transaction_ttl: SanitizedTransactionTTL, + transaction_priority_details: TransactionPriorityDetails, + ) { + let priority_id = + TransactionPriorityId::new(transaction_priority_details.priority, transaction_id); + self.id_to_transaction_state.insert( + transaction_id, + TransactionState::new(transaction_ttl, transaction_priority_details), + ); + self.push_id_into_queue(priority_id) + } + + /// Retries a transaction - inserts transaction back into map (but not packet). + /// This transitions the transaction to `Unprocessed` state. + pub(crate) fn retry_transaction( + &mut self, + transaction_id: TransactionId, + transaction_ttl: SanitizedTransactionTTL, + ) { + let transaction_state = self + .get_mut_transaction_state(&transaction_id) + .expect("transaction must exist"); + let priority_id = TransactionPriorityId::new(transaction_state.priority(), transaction_id); + transaction_state.transition_to_unprocessed(transaction_ttl); + self.push_id_into_queue(priority_id); + } + + /// Pushes a transaction id into the priority queue. If the queue is full, the lowest priority + /// transaction will be dropped (removed from the queue and map). + pub(crate) fn push_id_into_queue(&mut self, priority_id: TransactionPriorityId) { + if self.remaining_queue_capacity() == 0 { + let popped_id = self.priority_queue.push_pop_min(priority_id); + self.remove_by_id(&popped_id.id); + } else { + self.priority_queue.push(priority_id); + } + } + + /// Remove transaction by id. + pub(crate) fn remove_by_id(&mut self, id: &TransactionId) { + self.id_to_transaction_state + .remove(id) + .expect("transaction must exist"); + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_sdk::{ + compute_budget::ComputeBudgetInstruction, + hash::Hash, + message::Message, + signature::Keypair, + signer::Signer, + slot_history::Slot, + system_instruction, + transaction::{SanitizedTransaction, Transaction}, + }, + }; + + fn test_transaction(priority: u64) -> (SanitizedTransactionTTL, TransactionPriorityDetails) { + let from_keypair = Keypair::new(); + let ixs = vec![ + system_instruction::transfer( + &from_keypair.pubkey(), + &solana_sdk::pubkey::new_rand(), + 1, + ), + ComputeBudgetInstruction::set_compute_unit_price(priority), + ]; + let message = Message::new(&ixs, Some(&from_keypair.pubkey())); + let tx = Transaction::new(&[&from_keypair], message, Hash::default()); + + let transaction_ttl = SanitizedTransactionTTL { + transaction: SanitizedTransaction::from_transaction_for_tests(tx), + max_age_slot: Slot::MAX, + }; + ( + transaction_ttl, + TransactionPriorityDetails { + priority, + compute_unit_limit: 0, + }, + ) + } + + fn push_to_container(container: &mut TransactionStateContainer, num: usize) { + for id in 0..num as u64 { + let priority = id; + let (transaction_ttl, transaction_priority_details) = test_transaction(priority); + container.insert_new_transaction( + TransactionId::new(id), + transaction_ttl, + transaction_priority_details, + ); + } + } + + #[test] + fn test_is_empty() { + let mut container = TransactionStateContainer::with_capacity(1); + assert!(container.is_empty()); + + push_to_container(&mut container, 1); + assert!(!container.is_empty()); + } + + #[test] + fn test_priority_queue_capacity() { + let mut container = TransactionStateContainer::with_capacity(1); + push_to_container(&mut container, 5); + + assert_eq!(container.priority_queue.len(), 1); + assert_eq!(container.id_to_transaction_state.len(), 1); + assert_eq!( + container + .id_to_transaction_state + .iter() + .map(|ts| ts.1.priority()) + .next() + .unwrap(), + 4 + ); + } + + #[test] + fn test_take_top_n() { + let mut container = TransactionStateContainer::with_capacity(5); + push_to_container(&mut container, 5); + + let taken = container.take_top_n(3).collect::>(); + assert_eq!( + taken, + vec![ + TransactionPriorityId::new(4, TransactionId::new(4)), + TransactionPriorityId::new(3, TransactionId::new(3)), + TransactionPriorityId::new(2, TransactionId::new(2)), + ] + ); + // The remainder of the queue should not be empty + assert_eq!(container.priority_queue.len(), 2); + } + + #[test] + fn test_priority_ordered_ids() { + let mut container = TransactionStateContainer::with_capacity(5); + push_to_container(&mut container, 5); + + let ordered = container.priority_ordered_ids(false); + assert_eq!( + ordered, + vec![ + TransactionPriorityId::new(4, TransactionId::new(4)), + TransactionPriorityId::new(3, TransactionId::new(3)), + TransactionPriorityId::new(2, TransactionId::new(2)), + TransactionPriorityId::new(1, TransactionId::new(1)), + TransactionPriorityId::new(0, TransactionId::new(0)), + ] + ); + assert!(container.priority_queue.is_empty()); + + push_to_container(&mut container, 5); + let ordered = container.priority_ordered_ids(true); + assert_eq!( + ordered, + vec![ + TransactionPriorityId::new(4, TransactionId::new(4)), + TransactionPriorityId::new(3, TransactionId::new(3)), + TransactionPriorityId::new(2, TransactionId::new(2)), + TransactionPriorityId::new(1, TransactionId::new(1)), + TransactionPriorityId::new(0, TransactionId::new(0)), + ] + ); + assert_eq!(container.priority_queue.len(), 5); + } + + #[test] + fn test_get_mut_transaction_state() { + let mut container = TransactionStateContainer::with_capacity(5); + push_to_container(&mut container, 5); + + let existing_id = TransactionId::new(3); + let non_existing_id = TransactionId::new(7); + assert!(container.get_mut_transaction_state(&existing_id).is_some()); + assert!(container.get_mut_transaction_state(&existing_id).is_some()); + assert!(container + .get_mut_transaction_state(&non_existing_id) + .is_none()); + } +} From 4f4ce69f5f7cd25183cca771034e190148dc39f6 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Mon, 11 Sep 2023 10:08:55 -0500 Subject: [PATCH 051/407] purge duplicated bank prioritization fee from cache (#33062) * purge duplicated bank prioritization fee from cache * add test for purge dup bank * Added metrics counts to monitor anomalies * fix a flaky test --- .../optimistically_confirmed_bank_tracker.rs | 2 +- rpc/src/rpc.rs | 6 +- runtime/src/prioritization_fee.rs | 48 ++- runtime/src/prioritization_fee_cache.rs | 330 +++++++++++++----- 4 files changed, 278 insertions(+), 108 deletions(-) diff --git a/rpc/src/optimistically_confirmed_bank_tracker.rs b/rpc/src/optimistically_confirmed_bank_tracker.rs index 3d3643b44a1050..3179e570920383 100644 --- a/rpc/src/optimistically_confirmed_bank_tracker.rs +++ b/rpc/src/optimistically_confirmed_bank_tracker.rs @@ -197,7 +197,7 @@ impl OptimisticallyConfirmedBankTracker { ); // finalize block's minimum prioritization fee cache for this bank - prioritization_fee_cache.finalize_priority_fee(bank.slot()); + prioritization_fee_cache.finalize_priority_fee(bank.slot(), bank.bank_id()); } } else if bank.slot() > bank_forks.read().unwrap().root() { pending_optimistically_confirmed_banks.insert(bank.slot()); diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 3e284b1f942574..90fd6a2a214162 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -8658,6 +8658,7 @@ pub mod tests { 0 ); let slot0 = rpc.working_bank().slot(); + let bank0_id = rpc.working_bank().bank_id(); let account0 = Pubkey::new_unique(); let account1 = Pubkey::new_unique(); let account2 = Pubkey::new_unique(); @@ -8677,7 +8678,7 @@ pub mod tests { ]; rpc.update_prioritization_fee_cache(transactions); let cache = rpc.get_prioritization_fee_cache(); - cache.finalize_priority_fee(slot0); + cache.finalize_priority_fee(slot0, bank0_id); wait_for_cache_blocks(cache, 1); let request = create_test_request("getRecentPrioritizationFees", None); @@ -8721,6 +8722,7 @@ pub mod tests { rpc.advance_bank_to_confirmed_slot(1); let slot1 = rpc.working_bank().slot(); + let bank1_id = rpc.working_bank().bank_id(); let price1 = 11; let transactions = vec![ Transaction::new_unsigned(Message::new( @@ -8737,7 +8739,7 @@ pub mod tests { ]; rpc.update_prioritization_fee_cache(transactions); let cache = rpc.get_prioritization_fee_cache(); - cache.finalize_priority_fee(slot1); + cache.finalize_priority_fee(slot1, bank1_id); wait_for_cache_blocks(cache, 2); let request = create_test_request("getRecentPrioritizationFees", None); diff --git a/runtime/src/prioritization_fee.rs b/runtime/src/prioritization_fee.rs index a7d28f11fc77ea..bb5f7632c97e01 100644 --- a/runtime/src/prioritization_fee.rs +++ b/runtime/src/prioritization_fee.rs @@ -19,6 +19,9 @@ struct PrioritizationFeeMetrics { // Count of transactions that have zero prioritization fee. non_prioritized_transactions_count: u64, + // Count of attempted update on finalized PrioritizationFee + attempted_update_on_finalized_fee_count: u64, + // Total prioritization fees included in this slot. total_prioritization_fee: u64, @@ -41,6 +44,10 @@ impl PrioritizationFeeMetrics { saturating_add_assign!(self.total_update_elapsed_us, val); } + fn increment_attempted_update_on_finalized_fee_count(&mut self, val: u64) { + saturating_add_assign!(self.attempted_update_on_finalized_fee_count, val); + } + fn update_prioritization_fee(&mut self, fee: u64) { if fee == 0 { saturating_add_assign!(self.non_prioritized_transactions_count, 1); @@ -82,6 +89,11 @@ impl PrioritizationFeeMetrics { self.non_prioritized_transactions_count as i64, i64 ), + ( + "attempted_update_on_finalized_fee_count", + self.attempted_update_on_finalized_fee_count as i64, + i64 + ), ( "total_prioritization_fee", self.total_prioritization_fee as i64, @@ -106,6 +118,7 @@ impl PrioritizationFeeMetrics { } } +#[derive(Debug)] pub enum PrioritizationFeeError { // Not able to get account locks from sanitized transaction, which is required to update block // minimum fees. @@ -159,22 +172,27 @@ impl PrioritizationFee { ) -> Result<(), PrioritizationFeeError> { let (_, update_time) = measure!( { - if transaction_fee < self.min_transaction_fee { - self.min_transaction_fee = transaction_fee; + if !self.is_finalized { + if transaction_fee < self.min_transaction_fee { + self.min_transaction_fee = transaction_fee; + } + + for write_account in writable_accounts.iter() { + self.min_writable_account_fees + .entry(*write_account) + .and_modify(|write_lock_fee| { + *write_lock_fee = std::cmp::min(*write_lock_fee, transaction_fee) + }) + .or_insert(transaction_fee); + } + + self.metrics + .accumulate_total_prioritization_fee(transaction_fee); + self.metrics.update_prioritization_fee(transaction_fee); + } else { + self.metrics + .increment_attempted_update_on_finalized_fee_count(1); } - - for write_account in writable_accounts.iter() { - self.min_writable_account_fees - .entry(*write_account) - .and_modify(|write_lock_fee| { - *write_lock_fee = std::cmp::min(*write_lock_fee, transaction_fee) - }) - .or_insert(transaction_fee); - } - - self.metrics - .accumulate_total_prioritization_fee(transaction_fee); - self.metrics.update_prioritization_fee(transaction_fee); }, "update_time", ); diff --git a/runtime/src/prioritization_fee_cache.rs b/runtime/src/prioritization_fee_cache.rs index 78f6214bb499a9..e1005bd634de0b 100644 --- a/runtime/src/prioritization_fee_cache.rs +++ b/runtime/src/prioritization_fee_cache.rs @@ -4,17 +4,20 @@ use { transaction_priority_details::GetTransactionPriorityDetails, }, crossbeam_channel::{unbounded, Receiver, Sender}, + dashmap::DashMap, log::*, lru::LruCache, solana_measure::measure, solana_sdk::{ - clock::Slot, pubkey::Pubkey, saturating_add_assign, transaction::SanitizedTransaction, + clock::{BankId, Slot}, + pubkey::Pubkey, + transaction::SanitizedTransaction, }, std::{ collections::HashMap, sync::{ atomic::{AtomicU64, Ordering}, - Arc, Mutex, RwLock, + Arc, RwLock, }, thread::{Builder, JoinHandle}, }, @@ -30,15 +33,15 @@ struct PrioritizationFeeCacheMetrics { // Count of transactions that successfully updated each slot's prioritization fee cache. successful_transaction_update_count: AtomicU64, + // Count of duplicated banks being purged + purged_duplicated_bank_count: AtomicU64, + // Accumulated time spent on tracking prioritization fee for each slot. total_update_elapsed_us: AtomicU64, // Accumulated time spent on acquiring cache write lock. total_cache_lock_elapsed_us: AtomicU64, - // Accumulated time spent on acquiring each block entry's lock.. - total_entry_lock_elapsed_us: AtomicU64, - // Accumulated time spent on updating block prioritization fees. total_entry_update_elapsed_us: AtomicU64, @@ -52,6 +55,11 @@ impl PrioritizationFeeCacheMetrics { .fetch_add(val, Ordering::Relaxed); } + fn accumulate_total_purged_duplicated_bank_count(&self, val: u64) { + self.purged_duplicated_bank_count + .fetch_add(val, Ordering::Relaxed); + } + fn accumulate_total_update_elapsed_us(&self, val: u64) { self.total_update_elapsed_us .fetch_add(val, Ordering::Relaxed); @@ -62,11 +70,6 @@ impl PrioritizationFeeCacheMetrics { .fetch_add(val, Ordering::Relaxed); } - fn accumulate_total_entry_lock_elapsed_us(&self, val: u64) { - self.total_entry_lock_elapsed_us - .fetch_add(val, Ordering::Relaxed); - } - fn accumulate_total_entry_update_elapsed_us(&self, val: u64) { self.total_entry_update_elapsed_us .fetch_add(val, Ordering::Relaxed); @@ -87,6 +90,11 @@ impl PrioritizationFeeCacheMetrics { .swap(0, Ordering::Relaxed) as i64, i64 ), + ( + "purged_duplicated_bank_count", + self.purged_duplicated_bank_count.swap(0, Ordering::Relaxed) as i64, + i64 + ), ( "total_update_elapsed_us", self.total_update_elapsed_us.swap(0, Ordering::Relaxed) as i64, @@ -97,11 +105,6 @@ impl PrioritizationFeeCacheMetrics { self.total_cache_lock_elapsed_us.swap(0, Ordering::Relaxed) as i64, i64 ), - ( - "total_entry_lock_elapsed_us", - self.total_entry_lock_elapsed_us.swap(0, Ordering::Relaxed) as i64, - i64 - ), ( "total_entry_update_elapsed_us", self.total_entry_update_elapsed_us @@ -121,20 +124,26 @@ impl PrioritizationFeeCacheMetrics { enum CacheServiceUpdate { TransactionUpdate { slot: Slot, + bank_id: BankId, transaction_fee: u64, writable_accounts: Arc>, }, - BankFrozen { + BankFinalized { slot: Slot, + bank_id: BankId, }, Exit, } +/// Potentially there are more than one bank that updates Prioritization Fee +/// for a slot. The updates are tracked and finalized by bank_id. +type SlotPrioritizationFee = DashMap; + /// Stores up to MAX_NUM_RECENT_BLOCKS recent block's prioritization fee, /// A separate internal thread `service_thread` handles additional tasks when a bank is frozen, /// and collecting stats and reporting metrics. pub struct PrioritizationFeeCache { - cache: Arc>>>>, + cache: Arc>>>, service_thread: Option>, sender: Sender, metrics: Arc, @@ -184,14 +193,14 @@ impl PrioritizationFeeCache { /// Get prioritization fee entry, create new entry if necessary fn get_prioritization_fee( - cache: Arc>>>>, + cache: Arc>>>, slot: &Slot, - ) -> Arc> { + ) -> Arc { let mut cache = cache.write().unwrap(); match cache.get(slot) { Some(entry) => Arc::clone(entry), None => { - let entry = Arc::new(Mutex::new(PrioritizationFee::default())); + let entry = Arc::new(SlotPrioritizationFee::default()); cache.put(*slot, Arc::clone(&entry)); entry } @@ -202,7 +211,6 @@ impl PrioritizationFeeCache { /// transactions have both valid priority_detail and account_locks will be used to update /// fee_cache asynchronously. pub fn update<'a>(&self, bank: &Bank, txs: impl Iterator) { - let mut successful_transaction_update_count: u64 = 0; let (_, send_updates_time) = measure!( { for sanitized_transaction in txs { @@ -241,6 +249,7 @@ impl PrioritizationFeeCache { self.sender .send(CacheServiceUpdate::TransactionUpdate { slot: bank.slot(), + bank_id: bank.bank_id(), transaction_fee: priority_details.priority, writable_accounts, }) @@ -250,7 +259,6 @@ impl PrioritizationFeeCache { err ); }); - saturating_add_assign!(successful_transaction_update_count, 1) } }, "send_updates", @@ -258,15 +266,13 @@ impl PrioritizationFeeCache { self.metrics .accumulate_total_update_elapsed_us(send_updates_time.as_us()); - self.metrics - .accumulate_successful_transaction_update_count(successful_transaction_update_count); } /// Finalize prioritization fee when it's bank is completely replayed from blockstore, /// by pruning irrelevant accounts to save space, and marking its availability for queries. - pub fn finalize_priority_fee(&self, slot: Slot) { + pub fn finalize_priority_fee(&self, slot: Slot, bank_id: BankId) { self.sender - .send(CacheServiceUpdate::BankFrozen { slot }) + .send(CacheServiceUpdate::BankFinalized { slot, bank_id }) .unwrap_or_else(|err| { warn!( "prioritization fee cache signalling bank frozen failed: {:?}", @@ -278,53 +284,76 @@ impl PrioritizationFeeCache { /// Internal function is invoked by worker thread to update slot's minimum prioritization fee, /// Cache lock contends here. fn update_cache( - cache: Arc>>>>, + cache: Arc>>>, slot: &Slot, + bank_id: &BankId, transaction_fee: u64, writable_accounts: Arc>, metrics: Arc, ) { - let (block_prioritization_fee, cache_lock_time) = + let (slot_prioritization_fee, cache_lock_time) = measure!(Self::get_prioritization_fee(cache, slot), "cache_lock_time"); - let (mut block_prioritization_fee, entry_lock_time) = - measure!(block_prioritization_fee.lock().unwrap(), "entry_lock_time"); - let (_, entry_update_time) = measure!( - block_prioritization_fee.update(transaction_fee, &writable_accounts), + { + let mut block_prioritization_fee = slot_prioritization_fee + .entry(*bank_id) + .or_insert(PrioritizationFee::default()); + block_prioritization_fee.update(transaction_fee, &writable_accounts) + }, "entry_update_time" ); metrics.accumulate_total_cache_lock_elapsed_us(cache_lock_time.as_us()); - metrics.accumulate_total_entry_lock_elapsed_us(entry_lock_time.as_us()); metrics.accumulate_total_entry_update_elapsed_us(entry_update_time.as_us()); + metrics.accumulate_successful_transaction_update_count(1); } fn finalize_slot( - cache: Arc>>>>, + cache: Arc>>>, slot: &Slot, + bank_id: &BankId, metrics: Arc, ) { - let (block_prioritization_fee, cache_lock_time) = + let (slot_prioritization_fee, cache_lock_time) = measure!(Self::get_prioritization_fee(cache, slot), "cache_lock_time"); - let (mut block_prioritization_fee, entry_lock_time) = - measure!(block_prioritization_fee.lock().unwrap(), "entry_lock_time"); - // prune cache by evicting write account entry from prioritization fee if its fee is less // or equal to block's minimum transaction fee, because they are irrelevant in calculating // block minimum fee. - let (_, slot_finalize_time) = measure!( - block_prioritization_fee.mark_block_completed(), + let (result, slot_finalize_time) = measure!( + { + let pre_purge_bank_count = slot_prioritization_fee.len() as u64; + slot_prioritization_fee.retain(|id, _| id == bank_id); + let post_purge_bank_count = slot_prioritization_fee.len() as u64; + metrics.accumulate_total_purged_duplicated_bank_count( + pre_purge_bank_count.saturating_sub(post_purge_bank_count), + ); + if post_purge_bank_count == 0 { + warn!("Prioritization fee cache unexpected finalized on non-existing bank. slot {slot} bank id {bank_id}"); + } + + let mut block_prioritization_fee = slot_prioritization_fee + .entry(*bank_id) + .or_insert(PrioritizationFee::default()); + let result = block_prioritization_fee.mark_block_completed(); + block_prioritization_fee.report_metrics(*slot); + result + }, "slot_finalize_time" ); - block_prioritization_fee.report_metrics(*slot); metrics.accumulate_total_cache_lock_elapsed_us(cache_lock_time.as_us()); - metrics.accumulate_total_entry_lock_elapsed_us(entry_lock_time.as_us()); metrics.accumulate_total_block_finalize_elapsed_us(slot_finalize_time.as_us()); + + if let Err(err) = result { + error!( + "Unsuccessful finalizing slot {slot}, bank ID {bank_id}: {:?}", + err + ); + } } fn service_loop( - cache: Arc>>>>, + cache: Arc>>>, receiver: Receiver, metrics: Arc, ) { @@ -332,17 +361,19 @@ impl PrioritizationFeeCache { match update { CacheServiceUpdate::TransactionUpdate { slot, + bank_id, transaction_fee, writable_accounts, } => Self::update_cache( cache.clone(), &slot, + &bank_id, transaction_fee, writable_accounts, metrics.clone(), ), - CacheServiceUpdate::BankFrozen { slot } => { - Self::finalize_slot(cache.clone(), &slot, metrics.clone()); + CacheServiceUpdate::BankFinalized { slot, bank_id } => { + Self::finalize_slot(cache.clone(), &slot, &bank_id, metrics.clone()); metrics.report(slot); } @@ -359,7 +390,11 @@ impl PrioritizationFeeCache { .read() .unwrap() .iter() - .filter(|(_slot, prioritization_fee)| prioritization_fee.lock().unwrap().is_finalized()) + .filter(|(_slot, slot_prioritization_fee)| { + slot_prioritization_fee + .iter() + .any(|prioritization_fee| prioritization_fee.is_finalized()) + }) .count() } @@ -368,21 +403,24 @@ impl PrioritizationFeeCache { .read() .unwrap() .iter() - .filter_map(|(slot, prioritization_fee)| { - let prioritization_fee_read = prioritization_fee.lock().unwrap(); - prioritization_fee_read.is_finalized().then(|| { - let mut fee = prioritization_fee_read - .get_min_transaction_fee() - .unwrap_or_default(); - for account_key in account_keys { - if let Some(account_fee) = - prioritization_fee_read.get_writable_account_fee(account_key) - { - fee = std::cmp::max(fee, account_fee); - } - } - Some((*slot, fee)) - }) + .filter_map(|(slot, slot_prioritization_fee)| { + slot_prioritization_fee + .iter() + .find_map(|prioritization_fee| { + prioritization_fee.is_finalized().then(|| { + let mut fee = prioritization_fee + .get_min_transaction_fee() + .unwrap_or_default(); + for account_key in account_keys { + if let Some(account_fee) = + prioritization_fee.get_writable_account_fee(account_key) + { + fee = std::cmp::max(fee, account_fee); + } + } + Some((*slot, fee)) + }) + }) }) .flatten() .collect() @@ -427,21 +465,22 @@ mod tests { fn sync_update<'a>( prioritization_fee_cache: &PrioritizationFeeCache, bank: Arc, - txs: impl Iterator, + txs: impl Iterator + ExactSizeIterator, ) { - prioritization_fee_cache.update(&bank, txs); + let expected_update_count = prioritization_fee_cache + .metrics + .successful_transaction_update_count + .load(Ordering::Relaxed) + .saturating_add(txs.len() as u64); - let block_fee = PrioritizationFeeCache::get_prioritization_fee( - prioritization_fee_cache.cache.clone(), - &bank.slot(), - ); + prioritization_fee_cache.update(&bank, txs); - // wait till update is done - while block_fee - .lock() - .unwrap() - .get_min_transaction_fee() - .is_none() + // wait till expected number of transaction updates have occurred... + while prioritization_fee_cache + .metrics + .successful_transaction_update_count + .load(Ordering::Relaxed) + != expected_update_count { std::thread::sleep(std::time::Duration::from_millis(100)); } @@ -451,15 +490,19 @@ mod tests { fn sync_finalize_priority_fee_for_test( prioritization_fee_cache: &PrioritizationFeeCache, slot: Slot, + bank_id: BankId, ) { - prioritization_fee_cache.finalize_priority_fee(slot); + prioritization_fee_cache.finalize_priority_fee(slot, bank_id); let fee = PrioritizationFeeCache::get_prioritization_fee( prioritization_fee_cache.cache.clone(), &slot, ); // wait till finalization is done - while !fee.lock().unwrap().is_finalized() { + while !fee + .get(&bank_id) + .map_or(false, |block_fee| block_fee.is_finalized()) + { std::thread::sleep(std::time::Duration::from_millis(100)); } } @@ -490,7 +533,7 @@ mod tests { let slot = bank.slot(); let prioritization_fee_cache = PrioritizationFeeCache::default(); - sync_update(&prioritization_fee_cache, bank, txs.iter()); + sync_update(&prioritization_fee_cache, bank.clone(), txs.iter()); // assert block minimum fee and account a, b, c fee accordingly { @@ -498,7 +541,7 @@ mod tests { prioritization_fee_cache.cache.clone(), &slot, ); - let fee = fee.lock().unwrap(); + let fee = fee.get(&bank.bank_id()).unwrap(); assert_eq!(2, fee.get_min_transaction_fee().unwrap()); assert_eq!(2, fee.get_writable_account_fee(&write_account_a).unwrap()); assert_eq!(5, fee.get_writable_account_fee(&write_account_b).unwrap()); @@ -511,12 +554,12 @@ mod tests { // assert after prune, account a and c should be removed from cache to save space { - sync_finalize_priority_fee_for_test(&prioritization_fee_cache, slot); + sync_finalize_priority_fee_for_test(&prioritization_fee_cache, slot, bank.bank_id()); let fee = PrioritizationFeeCache::get_prioritization_fee( prioritization_fee_cache.cache.clone(), &slot, ); - let fee = fee.lock().unwrap(); + let fee = fee.get(&bank.bank_id()).unwrap(); assert_eq!(2, fee.get_min_transaction_fee().unwrap()); assert!(fee.get_writable_account_fee(&write_account_a).is_none()); assert_eq!(5, fee.get_writable_account_fee(&write_account_b).unwrap()); @@ -532,20 +575,22 @@ mod tests { prioritization_fee_cache.cache.clone(), &1 ) - .lock() - .unwrap() + .entry(1) + .or_default() .mark_block_completed() .is_ok()); assert!(PrioritizationFeeCache::get_prioritization_fee( prioritization_fee_cache.cache.clone(), &2 ) - .lock() - .unwrap() + .entry(2) + .or_default() .mark_block_completed() .is_ok()); // add slot 3 entry to cache, but not finalize it - PrioritizationFeeCache::get_prioritization_fee(prioritization_fee_cache.cache.clone(), &3); + PrioritizationFeeCache::get_prioritization_fee(prioritization_fee_cache.cache.clone(), &3) + .entry(3) + .or_default(); // assert available block count should be 2 finalized blocks assert_eq!(2, prioritization_fee_cache.available_block_count()); @@ -603,7 +648,7 @@ mod tests { &Pubkey::new_unique(), ), ]; - sync_update(&prioritization_fee_cache, bank1, txs.iter()); + sync_update(&prioritization_fee_cache, bank1.clone(), txs.iter()); // before block is marked as completed assert!(prioritization_fee_cache .get_prioritization_fees(&[]) @@ -624,7 +669,7 @@ mod tests { .get_prioritization_fees(&[write_account_a, write_account_b, write_account_c]) .is_empty()); // after block is completed - sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 1); + sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 1, bank1.bank_id()); assert_eq!( hashmap_of(vec![(1, 1)]), prioritization_fee_cache.get_prioritization_fees(&[]) @@ -666,7 +711,7 @@ mod tests { &Pubkey::new_unique(), ), ]; - sync_update(&prioritization_fee_cache, bank2, txs.iter()); + sync_update(&prioritization_fee_cache, bank2.clone(), txs.iter()); // before block is marked as completed assert_eq!( hashmap_of(vec![(1, 1)]), @@ -698,7 +743,7 @@ mod tests { ]) ); // after block is completed - sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 2); + sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 2, bank2.bank_id()); assert_eq!( hashmap_of(vec![(2, 3), (1, 1)]), prioritization_fee_cache.get_prioritization_fees(&[]), @@ -740,7 +785,7 @@ mod tests { &Pubkey::new_unique(), ), ]; - sync_update(&prioritization_fee_cache, bank3, txs.iter()); + sync_update(&prioritization_fee_cache, bank3.clone(), txs.iter()); // before block is marked as completed assert_eq!( hashmap_of(vec![(2, 3), (1, 1)]), @@ -772,7 +817,7 @@ mod tests { ]), ); // after block is completed - sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 3); + sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 3, bank3.bank_id()); assert_eq!( hashmap_of(vec![(3, 5), (2, 3), (1, 1)]), prioritization_fee_cache.get_prioritization_fees(&[]), @@ -804,4 +849,109 @@ mod tests { ); } } + + #[test] + fn test_purge_duplicated_bank() { + // duplicated bank can exists for same slot before OC. + // prioritization_fee_cache should only have data from OC-ed bank + solana_logger::setup(); + let write_account_a = Pubkey::new_unique(); + let write_account_b = Pubkey::new_unique(); + let write_account_c = Pubkey::new_unique(); + + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank0 = Bank::new_for_benches(&genesis_config); + let bank_forks = BankForks::new(bank0); + let bank = bank_forks.working_bank(); + let collector = solana_sdk::pubkey::new_rand(); + let slot: Slot = 999; + let bank1 = Arc::new(Bank::new_from_parent(bank.clone(), &collector, slot)); + let bank2 = Arc::new(Bank::new_from_parent(bank, &collector, slot)); + + let prioritization_fee_cache = PrioritizationFeeCache::default(); + + // Assert after add transactions for bank1 of slot 1 + { + let txs = vec![ + build_sanitized_transaction_for_test(2, &write_account_a, &write_account_b), + build_sanitized_transaction_for_test( + 1, + &Pubkey::new_unique(), + &Pubkey::new_unique(), + ), + ]; + sync_update(&prioritization_fee_cache, bank1.clone(), txs.iter()); + + let slot_prioritization_fee = PrioritizationFeeCache::get_prioritization_fee( + prioritization_fee_cache.cache.clone(), + &slot, + ); + assert_eq!(1, slot_prioritization_fee.len()); + assert!(slot_prioritization_fee.contains_key(&bank1.bank_id())); + } + + // Assert after add transactions for bank2 of slot 1 + { + let txs = vec![ + build_sanitized_transaction_for_test(4, &write_account_b, &write_account_c), + build_sanitized_transaction_for_test( + 3, + &Pubkey::new_unique(), + &Pubkey::new_unique(), + ), + ]; + sync_update(&prioritization_fee_cache, bank2.clone(), txs.iter()); + + let slot_prioritization_fee = PrioritizationFeeCache::get_prioritization_fee( + prioritization_fee_cache.cache.clone(), + &slot, + ); + assert_eq!(2, slot_prioritization_fee.len()); + assert!(slot_prioritization_fee.contains_key(&bank1.bank_id())); + assert!(slot_prioritization_fee.contains_key(&bank2.bank_id())); + } + + // Assert after finalize with bank1 of slot 1, + { + sync_finalize_priority_fee_for_test(&prioritization_fee_cache, slot, bank1.bank_id()); + + let slot_prioritization_fee = PrioritizationFeeCache::get_prioritization_fee( + prioritization_fee_cache.cache.clone(), + &slot, + ); + assert_eq!(1, slot_prioritization_fee.len()); + assert!(slot_prioritization_fee.contains_key(&bank1.bank_id())); + + // and data available for query are from bank1 + assert_eq!( + hashmap_of(vec![(slot, 1)]), + prioritization_fee_cache.get_prioritization_fees(&[]) + ); + assert_eq!( + hashmap_of(vec![(slot, 2)]), + prioritization_fee_cache.get_prioritization_fees(&[write_account_a]) + ); + assert_eq!( + hashmap_of(vec![(slot, 2)]), + prioritization_fee_cache.get_prioritization_fees(&[write_account_b]) + ); + assert_eq!( + hashmap_of(vec![(slot, 1)]), + prioritization_fee_cache.get_prioritization_fees(&[write_account_c]) + ); + assert_eq!( + hashmap_of(vec![(slot, 2)]), + prioritization_fee_cache + .get_prioritization_fees(&[write_account_a, write_account_b]) + ); + assert_eq!( + hashmap_of(vec![(slot, 2)]), + prioritization_fee_cache.get_prioritization_fees(&[ + write_account_a, + write_account_b, + write_account_c + ]) + ); + } + } } From 297ffad7977ac42cf5d08abec62c181e7f3d31e1 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 11 Sep 2023 09:34:48 -0700 Subject: [PATCH 052/407] set_forwarded needs mut ref (#33203) --- .../banking_stage/transaction_scheduler/transaction_state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state.rs b/core/src/banking_stage/transaction_scheduler/transaction_state.rs index da3916cd20ec1c..c3ea1df03d036d 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state.rs @@ -84,7 +84,7 @@ impl TransactionState { } /// Sets the transaction as forwarded. - pub(crate) fn set_forwarded(&self) { + pub(crate) fn set_forwarded(&mut self) { match self { Self::Unprocessed { forwarded, .. } => *forwarded = true, Self::Pending { forwarded, .. } => *forwarded = true, From 7fc6fea8d86bd0a3c97d820718998dd77872d339 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Mon, 11 Sep 2023 16:57:10 +0000 Subject: [PATCH 053/407] serves remote repair requests from QUIC endpoint (#33069) The commit implements server-side of repair using QUIC protocol. UDP repair requests are adapted as RemoteRequest and sent down the same channel as remote requests arriving over QUIC, and the rest of the server code is update to process over RemoteRequest type. --- core/src/repair/ancestor_hashes_service.rs | 24 ++- core/src/repair/quic_endpoint.rs | 8 + core/src/repair/serve_repair.rs | 184 ++++++++++++++------- core/src/repair/serve_repair_service.rs | 46 +++++- core/src/validator.rs | 5 + 5 files changed, 184 insertions(+), 83 deletions(-) diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs index 0be84e2f74f8bb..29f08862580d5d 100644 --- a/core/src/repair/ancestor_hashes_service.rs +++ b/core/src/repair/ancestor_hashes_service.rs @@ -124,16 +124,7 @@ impl AncestorRepairRequestsStats { .ancestor_requests .slot_pubkeys .iter() - .map(|(slot, slot_repairs)| { - ( - slot, - slot_repairs - .pubkey_repairs() - .iter() - .map(|(_key, count)| count) - .sum::(), - ) - }) + .map(|(slot, slot_repairs)| (slot, slot_repairs.pubkey_repairs().values().sum::())) .collect(); let repair_total = self.ancestor_requests.count; @@ -161,8 +152,7 @@ impl AncestorHashesService { repair_info: RepairInfo, ancestor_hashes_replay_update_receiver: AncestorHashesReplayUpdateReceiver, ) -> Self { - let outstanding_requests: Arc> = - Arc::new(RwLock::new(OutstandingAncestorHashesRepairs::default())); + let outstanding_requests = Arc::>::default(); let (response_sender, response_receiver) = unbounded(); let t_receiver = streamer::receiver( ancestor_hashes_request_socket.clone(), @@ -864,6 +854,7 @@ mod test { cluster_slot_state_verifier::{DuplicateSlotsToRepair, PurgeRepairSlotCounter}, duplicate_repair_status::DuplicateAncestorDecision, serve_repair::MAX_ANCESTOR_RESPONSES, + serve_repair_service::adapt_repair_requests_packets, }, replay_stage::{ tests::{replay_blockstore_components, ReplayBlockstoreComponents}, @@ -1189,6 +1180,7 @@ mod test { struct ResponderThreads { t_request_receiver: JoinHandle<()>, t_listen: JoinHandle<()>, + t_packet_adapter: JoinHandle<()>, exit: Arc, responder_info: ContactInfo, response_receiver: PacketBatchReceiver, @@ -1200,6 +1192,7 @@ mod test { self.exit.store(true, Ordering::Relaxed); self.t_request_receiver.join().unwrap(); self.t_listen.join().unwrap(); + self.t_packet_adapter.join().unwrap(); } fn new(slot_to_query: Slot) -> Self { @@ -1255,9 +1248,13 @@ mod test { false, None, ); + let (remote_request_sender, remote_request_receiver) = unbounded(); + let t_packet_adapter = Builder::new() + .spawn(|| adapt_repair_requests_packets(requests_receiver, remote_request_sender)) + .unwrap(); let t_listen = responder_serve_repair.listen( blockstore, - requests_receiver, + remote_request_receiver, response_sender, exit.clone(), ); @@ -1265,6 +1262,7 @@ mod test { Self { t_request_receiver, t_listen, + t_packet_adapter, exit, responder_info: responder_node.info, response_receiver, diff --git a/core/src/repair/quic_endpoint.rs b/core/src/repair/quic_endpoint.rs index ec2c2db07a70c5..03dfa42bd2b05e 100644 --- a/core/src/repair/quic_endpoint.rs +++ b/core/src/repair/quic_endpoint.rs @@ -41,6 +41,7 @@ const ALPN_REPAIR_PROTOCOL_ID: &[u8] = b"solana-repair"; const CONNECT_SERVER_NAME: &str = "solana-repair"; const CLIENT_CHANNEL_CAPACITY: usize = 1 << 14; +const CONNECTION_CACHE_CAPACITY: usize = 4096; const MAX_CONCURRENT_BIDI_STREAMS: VarInt = VarInt::from_u32(512); const CONNECTION_CLOSE_ERROR_CODE_SHUTDOWN: VarInt = VarInt::from_u32(1); @@ -485,6 +486,13 @@ async fn cache_connection( // only by SocketAddr when establishing outgoing connections. let entries: [Arc>>; 2] = { let mut cache = cache.write().await; + if cache.len() >= CONNECTION_CACHE_CAPACITY { + connection.close( + CONNECTION_CLOSE_ERROR_CODE_DROPPED, + CONNECTION_CLOSE_REASON_DROPPED, + ); + return; + } [Some(remote_pubkey), None].map(|remote_pubkey| { let key = (remote_address, remote_pubkey); cache.entry(key).or_default().clone() diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index 024b18088f7532..0610e2ea7ae763 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -3,14 +3,15 @@ use { cluster_slots_service::cluster_slots::ClusterSlots, repair::{ duplicate_repair_status::get_ancestor_hash_repair_sample_size, + quic_endpoint::RemoteRequest, repair_response, repair_service::{OutstandingShredRepairs, RepairStats, REPAIR_MS}, request_response::RequestResponse, result::{Error, RepairVerifyError, Result}, }, }, - bincode::serialize, - crossbeam_channel::RecvTimeoutError, + bincode::{serialize, Options}, + crossbeam_channel::{Receiver, RecvTimeoutError}, lru::LruCache, rand::{ distributions::{Distribution, WeightedError, WeightedIndex}, @@ -45,7 +46,7 @@ use { solana_streamer::{ sendmmsg::{batch_send, SendPktsError}, socket::SocketAddrSpace, - streamer::{PacketBatchReceiver, PacketBatchSender}, + streamer::PacketBatchSender, }, std::{ cmp::Reverse, @@ -58,6 +59,7 @@ use { thread::{Builder, JoinHandle}, time::{Duration, Instant}, }, + tokio::sync::oneshot::Sender as OneShotSender, }; /// the number of slots to respond with when responding to `Orphan` requests @@ -248,19 +250,13 @@ const REPAIR_REQUEST_PONG_SERIALIZED_BYTES: usize = PUBKEY_BYTES + HASH_BYTES + const REPAIR_REQUEST_MIN_BYTES: usize = REPAIR_REQUEST_PONG_SERIALIZED_BYTES; fn discard_malformed_repair_requests( - batch: &mut PacketBatch, + requests: &mut Vec, stats: &mut ServeRepairStats, ) -> usize { - let mut well_formed_requests = 0; - for packet in batch.iter_mut() { - if packet.meta().size < REPAIR_REQUEST_MIN_BYTES { - stats.err_malformed += 1; - packet.meta_mut().set_discard(true); - } else { - well_formed_requests += 1; - } - } - well_formed_requests + let num_requests = requests.len(); + requests.retain(|request| request.bytes.len() >= REPAIR_REQUEST_MIN_BYTES); + stats.err_malformed += num_requests - requests.len(); + requests.len() } #[derive(Debug, AbiEnumVisitor, AbiExample, Deserialize, Serialize)] @@ -386,6 +382,7 @@ struct RepairRequestWithMeta { from_addr: SocketAddr, stake: u64, whitelisted: bool, + response_sender: Option>>>, } impl ServeRepair { @@ -514,20 +511,28 @@ impl ServeRepair { } fn decode_request( - packet: &Packet, + remote_request: RemoteRequest, epoch_staked_nodes: &Option>>, whitelist: &HashSet, my_id: &Pubkey, socket_addr_space: &SocketAddrSpace, ) -> Result { - let Ok(request) = packet.deserialize_slice(..) else { + let Ok(request) = deserialize_request::(&remote_request) else { return Err(Error::from(RepairVerifyError::Malformed)); }; - let from_addr = packet.meta().socket_addr(); + let from_addr = remote_request.remote_address; if !ContactInfo::is_valid_address(&from_addr, socket_addr_space) { return Err(Error::from(RepairVerifyError::Malformed)); } - Self::verify_signed_packet(my_id, packet, &request)?; + Self::verify_signed_packet(my_id, &remote_request.bytes, &request)?; + if let Some(remote_pubkey) = remote_request.remote_pubkey { + if &remote_pubkey != request.sender() { + error!( + "remote pubkey {remote_pubkey} != request sender {}", + request.sender() + ); + } + } if request.sender() == my_id { error!("self repair: from_addr={from_addr} my_id={my_id} request={request:?}"); return Err(Error::from(RepairVerifyError::SelfRepair)); @@ -544,6 +549,7 @@ impl ServeRepair { from_addr, stake, whitelisted, + response_sender: remote_request.response_sender, }) } @@ -574,16 +580,16 @@ impl ServeRepair { } fn decode_requests( - reqs_v: Vec, + requests: Vec, epoch_staked_nodes: &Option>>, whitelist: &HashSet, my_id: &Pubkey, socket_addr_space: &SocketAddrSpace, stats: &mut ServeRepairStats, ) -> Vec { - let decode_packet = |packet| { + let decode_request = |request| { let result = Self::decode_request( - packet, + request, epoch_staked_nodes, whitelist, my_id, @@ -603,12 +609,7 @@ impl ServeRepair { } result.ok() }; - reqs_v - .iter() - .flatten() - .filter(|packet| !packet.meta().discard()) - .filter_map(decode_packet) - .collect() + requests.into_iter().filter_map(decode_request).collect() } /// Process messages from the network @@ -617,16 +618,15 @@ impl ServeRepair { ping_cache: &mut PingCache, recycler: &PacketBatchRecycler, blockstore: &Blockstore, - requests_receiver: &PacketBatchReceiver, + requests_receiver: &Receiver, response_sender: &PacketBatchSender, stats: &mut ServeRepairStats, data_budget: &DataBudget, ) -> std::result::Result<(), RecvTimeoutError> { - //TODO cache connections - let timeout = Duration::new(1, 0); - let mut reqs_v = vec![requests_receiver.recv_timeout(timeout)?]; + const TIMEOUT: Duration = Duration::from_secs(1); + let mut requests = vec![requests_receiver.recv_timeout(TIMEOUT)?]; const MAX_REQUESTS_PER_ITERATION: usize = 1024; - let mut total_requests = reqs_v[0].len(); + let mut total_requests = requests.len(); let socket_addr_space = *self.cluster_info.socket_addr_space(); let root_bank = self.bank_forks.read().unwrap().root_bank(); @@ -641,8 +641,12 @@ impl ServeRepair { }; let mut dropped_requests = 0; - let mut well_formed_requests = discard_malformed_repair_requests(&mut reqs_v[0], stats); - for mut more in requests_receiver.try_iter() { + let mut well_formed_requests = discard_malformed_repair_requests(&mut requests, stats); + loop { + let mut more: Vec<_> = requests_receiver.try_iter().collect(); + if more.is_empty() { + break; + } total_requests += more.len(); if well_formed_requests > max_buffered_packets { // Already exceeded max. Don't waste time discarding @@ -652,7 +656,7 @@ impl ServeRepair { let retained = discard_malformed_repair_requests(&mut more, stats); well_formed_requests += retained; if retained > 0 && well_formed_requests <= max_buffered_packets { - reqs_v.push(more); + requests.extend(more); } else { dropped_requests += more.len(); } @@ -665,7 +669,7 @@ impl ServeRepair { let mut decoded_requests = { let whitelist = self.repair_whitelist.read().unwrap(); Self::decode_requests( - reqs_v, + requests, &epoch_staked_nodes, &whitelist, &my_id, @@ -789,7 +793,7 @@ impl ServeRepair { pub fn listen( self, blockstore: Arc, - requests_receiver: PacketBatchReceiver, + requests_receiver: Receiver, response_sender: PacketBatchSender, exit: Arc, ) -> JoinHandle<()> { @@ -840,11 +844,7 @@ impl ServeRepair { .unwrap() } - fn verify_signed_packet( - my_id: &Pubkey, - packet: &Packet, - request: &RepairProtocol, - ) -> Result<()> { + fn verify_signed_packet(my_id: &Pubkey, bytes: &[u8], request: &RepairProtocol) -> Result<()> { match request { RepairProtocol::LegacyWindowIndex(_, _, _) | RepairProtocol::LegacyHighestWindowIndex(_, _, _) @@ -871,14 +871,14 @@ impl ServeRepair { if u128::from(time_diff_ms) > SIGNED_REPAIR_TIME_WINDOW.as_millis() { return Err(Error::from(RepairVerifyError::TimeSkew)); } - let Some(leading_buf) = packet.data(..4) else { + let Some(leading_buf) = bytes.get(..4) else { debug_assert!( false, "request should have failed deserialization: {request:?}", ); return Err(Error::from(RepairVerifyError::Malformed)); }; - let Some(trailing_buf) = packet.data(4 + SIGNATURE_BYTES..) else { + let Some(trailing_buf) = bytes.get(4 + SIGNATURE_BYTES..) else { debug_assert!( false, "request should have failed deserialization: {request:?}", @@ -946,7 +946,7 @@ impl ServeRepair { recycler: &PacketBatchRecycler, blockstore: &Blockstore, requests: Vec, - response_sender: &PacketBatchSender, + packet_batch_sender: &PacketBatchSender, stats: &mut ServeRepairStats, data_budget: &DataBudget, ) { @@ -957,14 +957,16 @@ impl ServeRepair { request, from_addr, stake, - .. + whitelisted: _, + response_sender, } in requests.into_iter() { if !data_budget.check(request.max_response_bytes()) { stats.dropped_requests_outbound_bandwidth += 1; continue; } - if !matches!(&request, RepairProtocol::Pong(_)) { + // Bypass ping/pong check for requests comming from QUIC endpoint. + if !matches!(&request, RepairProtocol::Pong(_)) && response_sender.is_none() { let (check, ping_pkt) = Self::check_ping_cache(ping_cache, &request, &from_addr, &identity_keypair); if let Some(ping_pkt) = ping_pkt { @@ -983,7 +985,9 @@ impl ServeRepair { }; let num_response_packets = rsp.len(); let num_response_bytes = rsp.iter().map(|p| p.meta().size).sum(); - if data_budget.take(num_response_bytes) && response_sender.send(rsp).is_ok() { + if data_budget.take(num_response_bytes) + && send_response(rsp, packet_batch_sender, response_sender) + { stats.total_response_packets += num_response_packets; match stake > 0 { true => stats.total_response_bytes_staked += num_response_bytes, @@ -998,7 +1002,7 @@ impl ServeRepair { if !pending_pings.is_empty() { stats.pings_sent += pending_pings.len(); let batch = PacketBatch::new(pending_pings); - let _ignore = response_sender.send(batch); + let _ = packet_batch_sender.send(batch); } } @@ -1363,6 +1367,36 @@ pub(crate) fn get_repair_protocol(_: ClusterType) -> Protocol { Protocol::UDP } +fn deserialize_request(request: &RemoteRequest) -> std::result::Result +where + T: serde::de::DeserializeOwned, +{ + bincode::options() + .with_limit(request.bytes.len() as u64) + .with_fixint_encoding() + .reject_trailing_bytes() + .deserialize(&request.bytes) +} + +// Returns true on success. +fn send_response( + packets: PacketBatch, + packet_batch_sender: &PacketBatchSender, + response_sender: Option>>>, +) -> bool { + match response_sender { + None => packet_batch_sender.send(packets).is_ok(), + Some(response_sender) => { + let response = packets + .iter() + .filter_map(|packet| packet.data(..)) + .map(Vec::from) + .collect(); + response_sender.send(response).is_ok() + } + } +} + #[cfg(test)] mod tests { use { @@ -1432,6 +1466,15 @@ mod tests { } } + fn make_remote_request(packet: &Packet) -> RemoteRequest { + RemoteRequest { + remote_pubkey: None, + remote_address: packet.meta().socket_addr(), + bytes: packet.data(..).map(Vec::from).unwrap(), + response_sender: None, + } + } + #[test] fn test_check_well_formed_repair_request() { let mut rng = rand::thread_rng(); @@ -1440,12 +1483,12 @@ mod tests { let pong = Pong::new(&ping, &keypair).unwrap(); let request = RepairProtocol::Pong(pong); let mut pkt = Packet::from_data(None, request).unwrap(); - let mut batch = PacketBatch::new(vec![pkt.clone()]); + let mut batch = vec![make_remote_request(&pkt)]; let mut stats = ServeRepairStats::default(); let num_well_formed = discard_malformed_repair_requests(&mut batch, &mut stats); assert_eq!(num_well_formed, 1); pkt.meta_mut().size = 5; - let mut batch = PacketBatch::new(vec![pkt]); + let mut batch = vec![make_remote_request(&pkt)]; let mut stats = ServeRepairStats::default(); let num_well_formed = discard_malformed_repair_requests(&mut batch, &mut stats); assert_eq!(num_well_formed, 0); @@ -1457,12 +1500,12 @@ mod tests { shred_index: 456, }; let mut pkt = Packet::from_data(None, request).unwrap(); - let mut batch = PacketBatch::new(vec![pkt.clone()]); + let mut batch = vec![make_remote_request(&pkt)]; let mut stats = ServeRepairStats::default(); let num_well_formed = discard_malformed_repair_requests(&mut batch, &mut stats); assert_eq!(num_well_formed, 1); pkt.meta_mut().size = 8; - let mut batch = PacketBatch::new(vec![pkt]); + let mut batch = vec![make_remote_request(&pkt)]; let mut stats = ServeRepairStats::default(); let num_well_formed = discard_malformed_repair_requests(&mut batch, &mut stats); assert_eq!(num_well_formed, 0); @@ -1473,12 +1516,12 @@ mod tests { slot: 123, }; let mut pkt = Packet::from_data(None, request).unwrap(); - let mut batch = PacketBatch::new(vec![pkt.clone()]); + let mut batch = vec![make_remote_request(&pkt)]; let mut stats = ServeRepairStats::default(); let num_well_formed = discard_malformed_repair_requests(&mut batch, &mut stats); assert_eq!(num_well_formed, 1); pkt.meta_mut().size = 1; - let mut batch = PacketBatch::new(vec![pkt]); + let mut batch = vec![make_remote_request(&pkt)]; let mut stats = ServeRepairStats::default(); let num_well_formed = discard_malformed_repair_requests(&mut batch, &mut stats); assert_eq!(num_well_formed, 0); @@ -1486,12 +1529,12 @@ mod tests { let request = RepairProtocol::LegacyOrphan(LegacyContactInfo::default(), 123); let mut pkt = Packet::from_data(None, request).unwrap(); - let mut batch = PacketBatch::new(vec![pkt.clone()]); + let mut batch = vec![make_remote_request(&pkt)]; let mut stats = ServeRepairStats::default(); let num_well_formed = discard_malformed_repair_requests(&mut batch, &mut stats); assert_eq!(num_well_formed, 1); pkt.meta_mut().size = 3; - let mut batch = PacketBatch::new(vec![pkt]); + let mut batch = vec![make_remote_request(&pkt)]; let mut stats = ServeRepairStats::default(); let num_well_formed = discard_malformed_repair_requests(&mut batch, &mut stats); assert_eq!(num_well_formed, 0); @@ -1701,8 +1744,13 @@ mod tests { packet }; let request: RepairProtocol = packet.deserialize_slice(..).unwrap(); - assert!( - ServeRepair::verify_signed_packet(&other_keypair.pubkey(), &packet, &request).is_ok() + assert_matches!( + ServeRepair::verify_signed_packet( + &other_keypair.pubkey(), + packet.data(..).unwrap(), + &request + ), + Ok(()) ); // recipient mismatch @@ -1721,7 +1769,11 @@ mod tests { }; let request: RepairProtocol = packet.deserialize_slice(..).unwrap(); assert_matches!( - ServeRepair::verify_signed_packet(&my_keypair.pubkey(), &packet, &request), + ServeRepair::verify_signed_packet( + &my_keypair.pubkey(), + packet.data(..).unwrap(), + &request + ), Err(Error::RepairVerify(RepairVerifyError::IdMismatch)) ); @@ -1743,7 +1795,11 @@ mod tests { }; let request: RepairProtocol = packet.deserialize_slice(..).unwrap(); assert_matches!( - ServeRepair::verify_signed_packet(&other_keypair.pubkey(), &packet, &request), + ServeRepair::verify_signed_packet( + &other_keypair.pubkey(), + packet.data(..).unwrap(), + &request + ), Err(Error::RepairVerify(RepairVerifyError::TimeSkew)) ); @@ -1763,7 +1819,11 @@ mod tests { }; let request: RepairProtocol = packet.deserialize_slice(..).unwrap(); assert_matches!( - ServeRepair::verify_signed_packet(&other_keypair.pubkey(), &packet, &request), + ServeRepair::verify_signed_packet( + &other_keypair.pubkey(), + packet.data(..).unwrap(), + &request + ), Err(Error::RepairVerify(RepairVerifyError::SigVerify)) ); } diff --git a/core/src/repair/serve_repair_service.rs b/core/src/repair/serve_repair_service.rs index bd0298aacb9ddc..9819d0ea43855d 100644 --- a/core/src/repair/serve_repair_service.rs +++ b/core/src/repair/serve_repair_service.rs @@ -1,8 +1,8 @@ use { - crate::repair::serve_repair::ServeRepair, - crossbeam_channel::{unbounded, Sender}, + crate::repair::{quic_endpoint::RemoteRequest, serve_repair::ServeRepair}, + crossbeam_channel::{unbounded, Receiver, Sender}, solana_ledger::blockstore::Blockstore, - solana_perf::recycler::Recycler, + solana_perf::{packet::PacketBatch, recycler::Recycler}, solana_streamer::{ socket::SocketAddrSpace, streamer::{self, StreamerReceiveStats}, @@ -10,7 +10,7 @@ use { std::{ net::UdpSocket, sync::{atomic::AtomicBool, Arc}, - thread::{self, JoinHandle}, + thread::{self, Builder, JoinHandle}, time::Duration, }, }; @@ -22,6 +22,8 @@ pub struct ServeRepairService { impl ServeRepairService { pub fn new( serve_repair: ServeRepair, + remote_request_sender: Sender, + remote_request_receiver: Receiver, blockstore: Arc, serve_repair_socket: UdpSocket, socket_addr_space: SocketAddrSpace, @@ -42,9 +44,13 @@ impl ServeRepairService { Recycler::default(), Arc::new(StreamerReceiveStats::new("serve_repair_receiver")), Duration::from_millis(1), // coalesce - false, - None, + false, // use_pinned_memory + None, // in_vote_only_mode ); + let t_packet_adapter = Builder::new() + .name(String::from("solServRAdapt")) + .spawn(|| adapt_repair_requests_packets(request_receiver, remote_request_sender)) + .unwrap(); let (response_sender, response_receiver) = unbounded(); let t_responder = streamer::responder( "Repair", @@ -53,9 +59,10 @@ impl ServeRepairService { socket_addr_space, Some(stats_reporter_sender), ); - let t_listen = serve_repair.listen(blockstore, request_receiver, response_sender, exit); + let t_listen = + serve_repair.listen(blockstore, remote_request_receiver, response_sender, exit); - let thread_hdls = vec![t_receiver, t_responder, t_listen]; + let thread_hdls = vec![t_receiver, t_packet_adapter, t_responder, t_listen]; Self { thread_hdls } } @@ -63,3 +70,26 @@ impl ServeRepairService { self.thread_hdls.into_iter().try_for_each(JoinHandle::join) } } + +// Adapts incoming UDP repair requests into RemoteRequest struct. +pub(crate) fn adapt_repair_requests_packets( + packets_receiver: Receiver, + remote_request_sender: Sender, +) { + for packets in packets_receiver { + for packet in &packets { + let Some(bytes) = packet.data(..).map(Vec::from) else { + continue; + }; + let request = RemoteRequest { + remote_pubkey: None, + remote_address: packet.meta().socket_addr(), + bytes, + response_sender: None, + }; + if remote_request_sender.send(request).is_err() { + return; // The receiver end of the channel is disconnected. + } + } + } +} diff --git a/core/src/validator.rs b/core/src/validator.rs index f892dfae3d3065..ec77b58612ba52 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1043,8 +1043,13 @@ impl Validator { bank_forks.clone(), config.repair_whitelist.clone(), ); + let (repair_quic_endpoint_sender, repair_quic_endpoint_receiver) = unbounded(); let serve_repair_service = ServeRepairService::new( serve_repair, + // Incoming UDP repair requests are adapted into RemoteRequest + // and also sent through the same channel. + repair_quic_endpoint_sender, + repair_quic_endpoint_receiver, blockstore.clone(), node.sockets.serve_repair, socket_addr_space, From 4dfe62a2f0a21c5e5c5bd67faa2e7386ea138cdd Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 11 Sep 2023 10:57:21 -0700 Subject: [PATCH 054/407] rework accounts hash calc dedup to avoid kernel file error (#33195) * in hash calc, calculate max_inclusive_num_pubkeys * in hash calc, dedup uses mmap files to avoid os panic * as_mut_ptr * remove unsafe code * refactor count in hash files --------- Co-authored-by: HaoranYi --- accounts-db/src/accounts_hash.rs | 136 +++++++++++++++++++------------ 1 file changed, 84 insertions(+), 52 deletions(-) diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index ac4134cf80a936..124e5b06903dd7 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -19,8 +19,7 @@ use { std::{ borrow::Borrow, convert::TryInto, - fs::File, - io::{BufWriter, Write}, + io::{Seek, SeekFrom, Write}, path::PathBuf, sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, @@ -33,81 +32,96 @@ pub const MERKLE_FANOUT: usize = 16; /// 1 file containing account hashes sorted by pubkey, mapped into memory struct MmapAccountHashesFile { + /// raw slice of `Hash` values. Can be a larger slice than `count` mmap: MmapMut, + /// # of valid Hash entries in `mmap` + count: usize, } impl MmapAccountHashesFile { /// return a slice of account hashes starting at 'index' fn read(&self, index: usize) -> &[Hash] { let start = std::mem::size_of::() * index; - let item_slice: &[u8] = &self.mmap[start..]; + let item_slice: &[u8] = &self.mmap[start..self.count * std::mem::size_of::()]; let remaining_elements = item_slice.len() / std::mem::size_of::(); unsafe { let item = item_slice.as_ptr() as *const Hash; std::slice::from_raw_parts(item, remaining_elements) } } + + /// write a hash to the end of mmap file. + fn write(&mut self, hash: &Hash) { + let start = self.count * std::mem::size_of::(); + let end = start + std::mem::size_of::(); + self.mmap[start..end].copy_from_slice(hash.as_ref()); + self.count += 1; + } } /// 1 file containing account hashes sorted by pubkey pub struct AccountHashesFile { /// # hashes and an open file that will be deleted on drop. None if there are zero hashes to represent, and thus, no file. - count_and_writer: Option<(usize, BufWriter)>, + writer: Option, /// The directory where temporary cache files are put dir_for_temp_cache_files: PathBuf, + /// # bytes allocated + capacity: usize, } impl AccountHashesFile { - /// map the file into memory and return a reader that can access it by slice - fn get_reader(&mut self) -> Option<(usize, MmapAccountHashesFile)> { - std::mem::take(&mut self.count_and_writer).map(|(count, writer)| { - let file = Some(writer.into_inner().unwrap()); - ( - count, - MmapAccountHashesFile { - mmap: unsafe { MmapMut::map_mut(file.as_ref().unwrap()).unwrap() }, - }, - ) - }) + /// return a mmap reader that can be accessed by slice + fn get_reader(&mut self) -> Option { + std::mem::take(&mut self.writer) } /// # hashes stored in this file pub fn count(&self) -> usize { - self.count_and_writer + self.writer .as_ref() - .map(|(count, _)| *count) + .map(|writer| writer.count) .unwrap_or_default() } /// write 'hash' to the file /// If the file isn't open, create it first. pub fn write(&mut self, hash: &Hash) { - if self.count_and_writer.is_none() { + if self.writer.is_none() { // we have hashes to write but no file yet, so create a file that will auto-delete on drop - self.count_and_writer = Some(( - 0, - BufWriter::new( - tempfile_in(&self.dir_for_temp_cache_files).unwrap_or_else(|err| { - panic!( - "Unable to create file within {}: {err}", - self.dir_for_temp_cache_files.display() - ) - }), - ), - )); - } - let count_and_writer = self.count_and_writer.as_mut().unwrap(); - count_and_writer - .1 - .write_all(hash.as_ref()) - .unwrap_or_else(|err| { + + let mut data = tempfile_in(&self.dir_for_temp_cache_files).unwrap_or_else(|err| { panic!( - "Unable to write file within {}: {err}", + "Unable to create file within {}: {err}", self.dir_for_temp_cache_files.display() ) }); - count_and_writer.0 += 1; + // Theoretical performance optimization: write a zero to the end of + // the file so that we won't have to resize it later, which may be + // expensive. + data.seek(SeekFrom::Start((self.capacity - 1) as u64)) + .unwrap(); + data.write_all(&[0]).unwrap(); + data.rewind().unwrap(); + data.flush().unwrap(); + + //UNSAFE: Required to create a Mmap + let map = unsafe { MmapMut::map_mut(&data) }; + let map = map.unwrap_or_else(|e| { + error!( + "Failed to map the data file (size: {}): {}.\n + Please increase sysctl vm.max_map_count or equivalent for your platform.", + self.capacity, e + ); + std::process::exit(1); + }); + + self.writer = Some(MmapAccountHashesFile { + mmap: map, + count: 0, + }); + } + self.writer.as_mut().unwrap().write(hash); } } @@ -338,7 +352,8 @@ impl CumulativeHashesFromFiles { let mut readers = Vec::with_capacity(hashes.len()); let cumulative = CumulativeOffsets::new(hashes.into_iter().filter_map(|mut hash_file| { // ignores all hashfiles that have zero entries - hash_file.get_reader().map(|(count, reader)| { + hash_file.get_reader().map(|reader| { + let count = reader.count; readers.push(reader); count }) @@ -985,15 +1000,12 @@ impl<'a> AccountsHasher<'a> { // map from index of an item in first_items[] to index of the corresponding item in sorted_data_by_pubkey[] // this will change as items in sorted_data_by_pubkey[] are exhausted let mut first_item_to_pubkey_division = Vec::with_capacity(len); - let mut hashes = AccountHashesFile { - count_and_writer: None, - dir_for_temp_cache_files: self.dir_for_temp_cache_files.clone(), - }; + // initialize 'first_items', which holds the current lowest item in each slot group - sorted_data_by_pubkey + let max_inclusive_num_pubkeys = sorted_data_by_pubkey .iter() .enumerate() - .for_each(|(i, hash_data)| { + .map(|(i, hash_data)| { let first_pubkey_in_bin = Self::find_first_pubkey_in_bin(hash_data, pubkey_bin, bins, &binner, stats); if let Some(first_pubkey_in_bin) = first_pubkey_in_bin { @@ -1001,8 +1013,27 @@ impl<'a> AccountsHasher<'a> { first_items.push(k); first_item_to_pubkey_division.push(i); indexes.push(first_pubkey_in_bin); + let mut first_pubkey_in_next_bin = first_pubkey_in_bin + 1; + while first_pubkey_in_next_bin < hash_data.len() { + if binner.bin_from_pubkey(&hash_data[first_pubkey_in_next_bin].pubkey) + != pubkey_bin + { + break; + } + first_pubkey_in_next_bin += 1; + } + first_pubkey_in_next_bin - first_pubkey_in_bin + } else { + 0 } - }); + }) + .sum::(); + let mut hashes = AccountHashesFile { + writer: None, + dir_for_temp_cache_files: self.dir_for_temp_cache_files.clone(), + capacity: max_inclusive_num_pubkeys * std::mem::size_of::(), + }; + let mut overall_sum = 0; let mut duplicate_pubkey_indexes = Vec::with_capacity(len); let filler_accounts_enabled = self.filler_accounts_enabled(); @@ -1238,8 +1269,9 @@ pub mod tests { impl AccountHashesFile { fn new(dir_for_temp_cache_files: PathBuf) -> Self { Self { - count_and_writer: None, + writer: None, dir_for_temp_cache_files, + capacity: 1024, /* default 1k for tests */ } } } @@ -1308,16 +1340,16 @@ pub mod tests { // 1 hash file.write(&hashes[0]); let reader = file.get_reader().unwrap(); - assert_eq!(&[hashes[0]][..], reader.1.read(0)); - assert!(reader.1.read(1).is_empty()); + assert_eq!(&[hashes[0]][..], reader.read(0)); + assert!(reader.read(1).is_empty()); // multiple hashes let mut file = AccountHashesFile::new(dir_for_temp_cache_files.path().to_path_buf()); assert!(file.get_reader().is_none()); hashes.iter().for_each(|hash| file.write(hash)); let reader = file.get_reader().unwrap(); - (0..2).for_each(|i| assert_eq!(&hashes[i..], reader.1.read(i))); - assert!(reader.1.read(2).is_empty()); + (0..2).for_each(|i| assert_eq!(&hashes[i..], reader.read(i))); + assert!(reader.read(2).is_empty()); } #[test] @@ -1476,7 +1508,7 @@ pub mod tests { let accounts_hasher = AccountsHasher::new(dir_for_temp_cache_files.path().to_path_buf()); let (mut hashes, lamports) = accounts_hasher.de_dup_accounts_in_parallel(&slice, 0, 1, &HashStats::default()); - assert_eq!(&[Hash::default()], hashes.get_reader().unwrap().1.read(0)); + assert_eq!(&[Hash::default()], hashes.get_reader().unwrap().read(0)); assert_eq!(lamports, 1); } @@ -1486,7 +1518,7 @@ pub mod tests { fn get_vec(mut hashes: AccountHashesFile) -> Vec { hashes .get_reader() - .map(|r| r.1.read(0).to_vec()) + .map(|r| r.read(0).to_vec()) .unwrap_or_default() } From 18f321525a8ddb300f0fe53c00f9758090203cff Mon Sep 17 00:00:00 2001 From: drebaglioni <57418452+drebaglioni@users.noreply.github.com> Date: Mon, 11 Sep 2023 11:33:11 -0700 Subject: [PATCH 055/407] Update SECURITY.md (#33198) * Update SECURITY.md Updated language to reflect new wording around Solana Foundation's responsibilities in awarding a bounty * Update SECURITY.md wrapped long line around 80characters --- SECURITY.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index 48326f1497ab0c..905316c2dc3da4 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -90,10 +90,10 @@ We currently do not use the Github workflow to publish security advisories. Once ## Security Bug Bounties -The Solana Foundation offer bounties for critical Solana security issues. Please -see below for more details. Either a demonstration or a valid bug report is all -that's necessary to submit a bug bounty. A patch to fix the issue isn't -required. +At its sole discretion, the Solana Foundation may offer a bounty for +[valid reports](#reporting) of critical Solana vulnerabilities. Please see below +for more details. The submitter is not required to provide a +mitigation to qualify. #### Loss of Funds: $2,000,000 USD in locked SOL tokens (locked for 12 months) From bbb57be0a54f025beed23e549b9678faf637053e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Mon, 11 Sep 2023 21:10:40 +0200 Subject: [PATCH 056/407] Refactor - Move interfaces of address-lookup-table into the program SDK (#33165) * Adds a module `address_lookup_table` to the SDK. * Adds a module `address_lookup_table::instruction` to the SDK. * Adds a module `address_lookup_table::error` to the SDK. * Adds a module `address_lookup_table::state` to the SDK. * Moves AddressLookupTable into SDK as well. * Moves AddressLookupTableAccount into address_lookup_table. * Adds deprecation messages. * Disentangles dependencies across cargo files. --- Cargo.lock | 6 -- account-decoder/Cargo.toml | 1 - account-decoder/src/parse_account_data.rs | 5 +- .../src/parse_address_lookup_table.rs | 9 +-- accounts-db/Cargo.toml | 1 - accounts-db/src/accounts.rs | 10 ++-- cli/Cargo.toml | 1 - cli/src/address_lookup_table.rs | 35 ++++++----- core/Cargo.toml | 1 - core/src/banking_stage/consumer.rs | 7 ++- .../banking_stage/read_write_account_set.rs | 7 ++- cost-model/src/block_cost_limits.rs | 6 +- .../developing/runtime-facilities/programs.md | 5 ++ .../tests/close_lookup_table_ix.rs | 2 +- .../tests/common.rs | 16 ++--- .../tests/create_lookup_table_ix.rs | 22 ++++--- .../tests/deactivate_lookup_table_ix.rs | 4 +- .../tests/extend_lookup_table_ix.rs | 8 +-- .../tests/freeze_lookup_table_ix.rs | 4 +- programs/address-lookup-table/src/lib.rs | 16 ++--- .../address-lookup-table/src/processor.rs | 29 ++++----- programs/sbf/Cargo.lock | 5 -- programs/sbf/Cargo.toml | 1 - programs/sbf/rust/dep_crate/Cargo.toml | 1 - rpc/Cargo.toml | 1 - rpc/src/rpc.rs | 7 ++- runtime/src/bank/address_lookup_table.rs | 2 +- runtime/src/builtins.rs | 2 +- .../src/address_lookup_table}/error.rs | 0 .../src/address_lookup_table}/instruction.rs | 60 +++++++++---------- sdk/program/src/address_lookup_table/mod.rs | 20 +++++++ .../src/address_lookup_table}/state.rs | 24 ++++---- .../src/address_lookup_table_account.rs | 13 ---- sdk/program/src/example_mocks.rs | 14 ++++- sdk/program/src/lib.rs | 10 +++- sdk/program/src/message/versions/v0/mod.rs | 9 ++- sdk/src/lib.rs | 23 +++---- transaction-status/Cargo.toml | 1 - .../src/parse_address_lookup_table.rs | 11 ++-- transaction-status/src/parse_instruction.rs | 6 +- 40 files changed, 219 insertions(+), 186 deletions(-) rename {programs/address-lookup-table/src => sdk/program/src/address_lookup_table}/error.rs (100%) rename {programs/address-lookup-table/src => sdk/program/src/address_lookup_table}/instruction.rs (99%) create mode 100644 sdk/program/src/address_lookup_table/mod.rs rename {programs/address-lookup-table/src => sdk/program/src/address_lookup_table}/state.rs (99%) delete mode 100644 sdk/program/src/address_lookup_table_account.rs diff --git a/Cargo.lock b/Cargo.lock index 8dced6cf77be65..a53a445676469b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5085,7 +5085,6 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "solana-address-lookup-table-program", "solana-config-program", "solana-sdk", "spl-token", @@ -5184,7 +5183,6 @@ dependencies = [ "serde_derive", "siphasher", "solana-accounts-db", - "solana-address-lookup-table-program", "solana-bpf-loader-program", "solana-bucket-map", "solana-compute-budget-program", @@ -5542,7 +5540,6 @@ dependencies = [ "serde_derive", "serde_json", "solana-account-decoder", - "solana-address-lookup-table-program", "solana-bpf-loader-program", "solana-clap-utils", "solana-cli-config", @@ -5755,7 +5752,6 @@ dependencies = [ "serde_json", "serial_test", "solana-accounts-db", - "solana-address-lookup-table-program", "solana-bloom", "solana-client", "solana-core", @@ -6693,7 +6689,6 @@ dependencies = [ "soketto", "solana-account-decoder", "solana-accounts-db", - "solana-address-lookup-table-program", "solana-client", "solana-entry", "solana-faucet", @@ -7264,7 +7259,6 @@ dependencies = [ "serde_derive", "serde_json", "solana-account-decoder", - "solana-address-lookup-table-program", "solana-sdk", "spl-associated-token-account", "spl-memo", diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml index 4e6fce49eb88f4..bb82b077dcf911 100644 --- a/account-decoder/Cargo.toml +++ b/account-decoder/Cargo.toml @@ -19,7 +19,6 @@ lazy_static = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } serde_json = { workspace = true } -solana-address-lookup-table-program = { workspace = true } solana-config-program = { workspace = true } solana-sdk = { workspace = true } spl-token = { workspace = true, features = ["no-entrypoint"] } diff --git a/account-decoder/src/parse_account_data.rs b/account-decoder/src/parse_account_data.rs index a9ec74b8723cca..81886e94986632 100644 --- a/account-decoder/src/parse_account_data.rs +++ b/account-decoder/src/parse_account_data.rs @@ -8,14 +8,15 @@ use { inflector::Inflector, serde_json::Value, solana_sdk::{ - instruction::InstructionError, pubkey::Pubkey, stake, system_program, sysvar, vote, + address_lookup_table, instruction::InstructionError, pubkey::Pubkey, stake, system_program, + sysvar, vote, }, std::collections::HashMap, thiserror::Error, }; lazy_static! { - static ref ADDRESS_LOOKUP_PROGRAM_ID: Pubkey = solana_address_lookup_table_program::id(); + static ref ADDRESS_LOOKUP_PROGRAM_ID: Pubkey = address_lookup_table::program::id(); static ref BPF_UPGRADEABLE_LOADER_PROGRAM_ID: Pubkey = solana_sdk::bpf_loader_upgradeable::id(); static ref CONFIG_PROGRAM_ID: Pubkey = solana_config_program::id(); static ref STAKE_PROGRAM_ID: Pubkey = stake::program::id(); diff --git a/account-decoder/src/parse_address_lookup_table.rs b/account-decoder/src/parse_address_lookup_table.rs index 03dc395d543953..39c5d44f37b32c 100644 --- a/account-decoder/src/parse_address_lookup_table.rs +++ b/account-decoder/src/parse_address_lookup_table.rs @@ -1,7 +1,6 @@ use { crate::parse_account_data::{ParsableAccount, ParseAccountError}, - solana_address_lookup_table_program::state::AddressLookupTable, - solana_sdk::instruction::InstructionError, + solana_sdk::{address_lookup_table::state::AddressLookupTable, instruction::InstructionError}, }; pub fn parse_address_lookup_table( @@ -62,8 +61,10 @@ impl<'a> From> for UiLookupTable { mod test { use { super::*, - solana_address_lookup_table_program::state::{LookupTableMeta, LOOKUP_TABLE_META_SIZE}, - solana_sdk::pubkey::Pubkey, + solana_sdk::{ + address_lookup_table::state::{LookupTableMeta, LOOKUP_TABLE_META_SIZE}, + pubkey::Pubkey, + }, std::borrow::Cow, }; diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 2e2685b901bb5c..680412fcbaf98d 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -45,7 +45,6 @@ regex = { workspace = true } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } siphasher = { workspace = true } -solana-address-lookup-table-program = { workspace = true } solana-bpf-loader-program = { workspace = true } solana-bucket-map = { workspace = true } solana-compute-budget-program = { workspace = true } diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 6c6c6a5d180462..aa16edd94f163d 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -24,7 +24,6 @@ use { dashmap::DashMap, itertools::Itertools, log::*, - solana_address_lookup_table_program::{error::AddressLookupError, state::AddressLookupTable}, solana_program_runtime::{ compute_budget::{self, ComputeBudget}, loaded_programs::LoadedProgramsForTxBatch, @@ -32,6 +31,7 @@ use { solana_sdk::{ account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, account_utils::StateMut, + address_lookup_table::{self, error::AddressLookupError, state::AddressLookupTable}, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::{BankId, Slot}, feature_set::{ @@ -781,7 +781,7 @@ impl Accounts { .map(|(account, _rent)| account) .ok_or(AddressLookupError::LookupTableAccountNotFound)?; - if table_account.owner() == &solana_address_lookup_table_program::id() { + if table_account.owner() == &address_lookup_table::program::id() { let current_slot = ancestors.max_slot(); let lookup_table = AddressLookupTable::deserialize(table_account.data()) .map_err(|_ix_err| AddressLookupError::InvalidAccountData)?; @@ -1475,12 +1475,12 @@ mod tests { transaction_results::{DurableNonceFee, TransactionExecutionDetails}, }, assert_matches::assert_matches, - solana_address_lookup_table_program::state::LookupTableMeta, solana_program_runtime::prioritization_fee::{ PrioritizationFeeDetails, PrioritizationFeeType, }, solana_sdk::{ account::{AccountSharedData, WritableAccount}, + address_lookup_table::state::LookupTableMeta, compute_budget::ComputeBudgetInstruction, epoch_schedule::EpochSchedule, genesis_config::ClusterType, @@ -2356,7 +2356,7 @@ mod tests { let invalid_table_key = Pubkey::new_unique(); let invalid_table_account = - AccountSharedData::new(1, 0, &solana_address_lookup_table_program::id()); + AccountSharedData::new(1, 0, &address_lookup_table::program::id()); accounts.store_slow_uncached(0, &invalid_table_key, &invalid_table_account); let address_table_lookup = MessageAddressTableLookup { @@ -2395,7 +2395,7 @@ mod tests { AccountSharedData::create( 1, table_state.serialize_for_tests().unwrap(), - solana_address_lookup_table_program::id(), + address_lookup_table::program::id(), false, 0, ) diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 4c10112f40ff89..9879b06218c61a 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -29,7 +29,6 @@ serde = { workspace = true } serde_derive = { workspace = true } serde_json = { workspace = true } solana-account-decoder = { workspace = true } -solana-address-lookup-table-program = { workspace = true } solana-bpf-loader-program = { workspace = true } solana-clap-utils = { workspace = true } solana-cli-config = { workspace = true } diff --git a/cli/src/address_lookup_table.rs b/cli/src/address_lookup_table.rs index d9f6dd073576c6..a1be08a577c07f 100644 --- a/cli/src/address_lookup_table.rs +++ b/cli/src/address_lookup_table.rs @@ -1,21 +1,28 @@ use { crate::cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult}, clap::{App, AppSettings, Arg, ArgMatches, SubCommand}, - solana_address_lookup_table_program::{ - instruction::{ - close_lookup_table, create_lookup_table, create_lookup_table_signed, - deactivate_lookup_table, extend_lookup_table, freeze_lookup_table, - }, - state::AddressLookupTable, - }, solana_clap_utils::{self, input_parsers::*, input_validators::*, keypair::*}, solana_cli_output::{CliAddressLookupTable, CliAddressLookupTableCreated, CliSignature}, solana_remote_wallet::remote_wallet::RemoteWalletManager, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_api::config::RpcSendTransactionConfig, solana_sdk::{ - account::from_account, clock::Clock, commitment_config::CommitmentConfig, message::Message, - pubkey::Pubkey, signer::Signer, sysvar, transaction::Transaction, + account::from_account, + address_lookup_table::{ + self, + instruction::{ + close_lookup_table, create_lookup_table, create_lookup_table_signed, + deactivate_lookup_table, extend_lookup_table, freeze_lookup_table, + }, + state::AddressLookupTable, + }, + clock::Clock, + commitment_config::CommitmentConfig, + message::Message, + pubkey::Pubkey, + signer::Signer, + sysvar, + transaction::Transaction, }, std::{rc::Rc, sync::Arc}, }; @@ -604,7 +611,7 @@ fn process_freeze_lookup_table( let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") })?; - if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + if !address_lookup_table::program::check_id(&lookup_table_account.owner) { return Err(format!( "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", ) @@ -662,7 +669,7 @@ fn process_extend_lookup_table( let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") })?; - if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + if !address_lookup_table::program::check_id(&lookup_table_account.owner) { return Err(format!( "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", ) @@ -721,7 +728,7 @@ fn process_deactivate_lookup_table( let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") })?; - if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + if !address_lookup_table::program::check_id(&lookup_table_account.owner) { return Err(format!( "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", ) @@ -774,7 +781,7 @@ fn process_close_lookup_table( let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") })?; - if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + if !address_lookup_table::program::check_id(&lookup_table_account.owner) { return Err(format!( "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", ) @@ -827,7 +834,7 @@ fn process_show_lookup_table( let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") })?; - if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + if !address_lookup_table::program::check_id(&lookup_table_account.owner) { return Err(format!( "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", ) diff --git a/core/Cargo.toml b/core/Cargo.toml index ce32e45d70deaf..df083bdf0508c2 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -42,7 +42,6 @@ serde = { workspace = true } serde_bytes = { workspace = true } serde_derive = { workspace = true } solana-accounts-db = { workspace = true } -solana-address-lookup-table-program = { workspace = true } solana-bloom = { workspace = true } solana-client = { workspace = true } solana-cost-model = { workspace = true } diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index acac38db43b9d1..0104792ccd4d4b 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -737,7 +737,6 @@ mod tests { unprocessed_transaction_storage::ThreadType, }, crossbeam_channel::{unbounded, Receiver}, - solana_address_lookup_table_program::state::{AddressLookupTable, LookupTableMeta}, solana_cost_model::cost_model::CostModel, solana_entry::entry::{next_entry, next_versioned_entry}, solana_ledger::{ @@ -754,6 +753,10 @@ mod tests { solana_runtime::prioritization_fee_cache::PrioritizationFeeCache, solana_sdk::{ account::AccountSharedData, + address_lookup_table::{ + self, + state::{AddressLookupTable, LookupTableMeta}, + }, instruction::InstructionError, message::{v0, v0::MessageAddressTableLookup, MessageHeader, VersionedMessage}, poh_config::PohConfig, @@ -844,7 +847,7 @@ mod tests { ) -> AccountSharedData { let data = address_lookup_table.serialize_for_tests().unwrap(); let mut account = - AccountSharedData::new(1, data.len(), &solana_address_lookup_table_program::id()); + AccountSharedData::new(1, data.len(), &address_lookup_table::program::id()); account.set_data(data); bank.store_account(&account_address, &account); diff --git a/core/src/banking_stage/read_write_account_set.rs b/core/src/banking_stage/read_write_account_set.rs index 3ab159c470670c..691f81d0f58f67 100644 --- a/core/src/banking_stage/read_write_account_set.rs +++ b/core/src/banking_stage/read_write_account_set.rs @@ -105,11 +105,14 @@ impl ReadWriteAccountSet { mod tests { use { super::ReadWriteAccountSet, - solana_address_lookup_table_program::state::{AddressLookupTable, LookupTableMeta}, solana_ledger::genesis_utils::GenesisConfigInfo, solana_runtime::{bank::Bank, genesis_utils::create_genesis_config}, solana_sdk::{ account::AccountSharedData, + address_lookup_table::{ + self, + state::{AddressLookupTable, LookupTableMeta}, + }, hash::Hash, message::{ v0::{self, MessageAddressTableLookup}, @@ -178,7 +181,7 @@ mod tests { let address_table_key = Pubkey::new_unique(); let data = address_lookup_table.serialize_for_tests().unwrap(); let mut account = - AccountSharedData::new(1, data.len(), &solana_address_lookup_table_program::id()); + AccountSharedData::new(1, data.len(), &address_lookup_table::program::id()); account.set_data(data); bank.store_account(&address_table_key, &account); diff --git a/cost-model/src/block_cost_limits.rs b/cost-model/src/block_cost_limits.rs index af751bdf01baed..328d89cd04198b 100644 --- a/cost-model/src/block_cost_limits.rs +++ b/cost-model/src/block_cost_limits.rs @@ -3,8 +3,8 @@ use { lazy_static::lazy_static, solana_sdk::{ - bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, compute_budget, ed25519_program, - loader_v4, pubkey::Pubkey, secp256k1_program, + address_lookup_table, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, + compute_budget, ed25519_program, loader_v4, pubkey::Pubkey, secp256k1_program, }, std::collections::HashMap, }; @@ -37,7 +37,7 @@ lazy_static! { (solana_vote_program::id(), solana_vote_program::vote_processor::DEFAULT_COMPUTE_UNITS), (solana_system_program::id(), solana_system_program::system_processor::DEFAULT_COMPUTE_UNITS), (compute_budget::id(), solana_compute_budget_program::DEFAULT_COMPUTE_UNITS), - (solana_address_lookup_table_program::id(), solana_address_lookup_table_program::processor::DEFAULT_COMPUTE_UNITS), + (address_lookup_table::program::id(), solana_address_lookup_table_program::processor::DEFAULT_COMPUTE_UNITS), (bpf_loader_upgradeable::id(), solana_bpf_loader_program::UPGRADEABLE_LOADER_COMPUTE_UNITS), (bpf_loader_deprecated::id(), solana_bpf_loader_program::DEPRECATED_LOADER_COMPUTE_UNITS), (bpf_loader::id(), solana_bpf_loader_program::DEFAULT_LOADER_COMPUTE_UNITS), diff --git a/docs/src/developing/runtime-facilities/programs.md b/docs/src/developing/runtime-facilities/programs.md index fee20f57ae2335..787b6444efc3fe 100644 --- a/docs/src/developing/runtime-facilities/programs.md +++ b/docs/src/developing/runtime-facilities/programs.md @@ -50,6 +50,11 @@ Create and manage accounts that track validator voting state and rewards. - Program id: `Vote111111111111111111111111111111111111111` - Instructions: [VoteInstruction](https://docs.rs/solana-vote-program/VERSION_FOR_DOCS_RS/solana_vote_program/vote_instruction/enum.VoteInstruction.html) +## Address Lookup Table Program + +- Program id: `AddressLookupTab1e1111111111111111111111111` +- Instructions: [AddressLookupTableInstruction](https://docs.rs/solana-sdk/VERSION_FOR_DOCS_RS/solana_sdk/address_lookup_table/instruction/enum.ProgramInstruction.html) + ## BPF Loader Deploys, upgrades, and executes programs on the chain. diff --git a/programs/address-lookup-table-tests/tests/close_lookup_table_ix.rs b/programs/address-lookup-table-tests/tests/close_lookup_table_ix.rs index 415f2a11fe3059..231d82473fd5cc 100644 --- a/programs/address-lookup-table-tests/tests/close_lookup_table_ix.rs +++ b/programs/address-lookup-table-tests/tests/close_lookup_table_ix.rs @@ -4,9 +4,9 @@ use { add_lookup_table_account, assert_ix_error, new_address_lookup_table, overwrite_slot_hashes_with_slots, setup_test_context, }, - solana_address_lookup_table_program::instruction::close_lookup_table, solana_program_test::*, solana_sdk::{ + address_lookup_table::instruction::close_lookup_table, clock::Clock, instruction::InstructionError, pubkey::Pubkey, diff --git a/programs/address-lookup-table-tests/tests/common.rs b/programs/address-lookup-table-tests/tests/common.rs index 0a4104300e2325..48b80199312a14 100644 --- a/programs/address-lookup-table-tests/tests/common.rs +++ b/programs/address-lookup-table-tests/tests/common.rs @@ -1,13 +1,13 @@ #![allow(dead_code)] use { - solana_address_lookup_table_program::{ - id, - processor::process_instruction, - state::{AddressLookupTable, LookupTableMeta}, - }, + solana_address_lookup_table_program::processor::process_instruction, solana_program_test::*, solana_sdk::{ account::AccountSharedData, + address_lookup_table::{ + program::id, + state::{AddressLookupTable, LookupTableMeta}, + }, clock::Slot, hash::Hash, instruction::{Instruction, InstructionError}, @@ -80,11 +80,7 @@ pub async fn add_lookup_table_account( let rent = context.banks_client.get_rent().await.unwrap(); let rent_exempt_balance = rent.minimum_balance(data.len()); - let mut account = AccountSharedData::new( - rent_exempt_balance, - data.len(), - &solana_address_lookup_table_program::id(), - ); + let mut account = AccountSharedData::new(rent_exempt_balance, data.len(), &id()); account.set_data_from_slice(&data); context.set_account(&account_address, &account); diff --git a/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs b/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs index 6330e02c385b02..183de53e31382a 100644 --- a/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs +++ b/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs @@ -1,16 +1,22 @@ use { assert_matches::assert_matches, common::{assert_ix_error, overwrite_slot_hashes_with_slots, setup_test_context}, - solana_address_lookup_table_program::{ - id, - instruction::{create_lookup_table, create_lookup_table_signed}, - processor::process_instruction, - state::{AddressLookupTable, LOOKUP_TABLE_META_SIZE}, - }, + solana_address_lookup_table_program::processor::process_instruction, solana_program_test::*, solana_sdk::{ - clock::Slot, feature_set, instruction::InstructionError, pubkey::Pubkey, rent::Rent, - signature::Signer, signer::keypair::Keypair, transaction::Transaction, + address_lookup_table::{ + instruction::{create_lookup_table, create_lookup_table_signed}, + program::id, + state::{AddressLookupTable, LOOKUP_TABLE_META_SIZE}, + }, + clock::Slot, + feature_set, + instruction::InstructionError, + pubkey::Pubkey, + rent::Rent, + signature::Signer, + signer::keypair::Keypair, + transaction::Transaction, }, }; diff --git a/programs/address-lookup-table-tests/tests/deactivate_lookup_table_ix.rs b/programs/address-lookup-table-tests/tests/deactivate_lookup_table_ix.rs index 81050aca123150..664f38dad4d6f4 100644 --- a/programs/address-lookup-table-tests/tests/deactivate_lookup_table_ix.rs +++ b/programs/address-lookup-table-tests/tests/deactivate_lookup_table_ix.rs @@ -3,11 +3,9 @@ use { common::{ add_lookup_table_account, assert_ix_error, new_address_lookup_table, setup_test_context, }, - solana_address_lookup_table_program::{ - instruction::deactivate_lookup_table, state::AddressLookupTable, - }, solana_program_test::*, solana_sdk::{ + address_lookup_table::{instruction::deactivate_lookup_table, state::AddressLookupTable}, instruction::InstructionError, pubkey::Pubkey, signature::{Keypair, Signer}, diff --git a/programs/address-lookup-table-tests/tests/extend_lookup_table_ix.rs b/programs/address-lookup-table-tests/tests/extend_lookup_table_ix.rs index 1bbc973c24f0cb..e0e56daca92155 100644 --- a/programs/address-lookup-table-tests/tests/extend_lookup_table_ix.rs +++ b/programs/address-lookup-table-tests/tests/extend_lookup_table_ix.rs @@ -3,13 +3,13 @@ use { common::{ add_lookup_table_account, assert_ix_error, new_address_lookup_table, setup_test_context, }, - solana_address_lookup_table_program::{ - instruction::extend_lookup_table, - state::{AddressLookupTable, LookupTableMeta}, - }, solana_program_test::*, solana_sdk::{ account::{ReadableAccount, WritableAccount}, + address_lookup_table::{ + instruction::extend_lookup_table, + state::{AddressLookupTable, LookupTableMeta}, + }, clock::Clock, instruction::{Instruction, InstructionError}, pubkey::{Pubkey, PUBKEY_BYTES}, diff --git a/programs/address-lookup-table-tests/tests/freeze_lookup_table_ix.rs b/programs/address-lookup-table-tests/tests/freeze_lookup_table_ix.rs index bb169fda293b0f..45583db1e0f3fb 100644 --- a/programs/address-lookup-table-tests/tests/freeze_lookup_table_ix.rs +++ b/programs/address-lookup-table-tests/tests/freeze_lookup_table_ix.rs @@ -3,11 +3,9 @@ use { common::{ add_lookup_table_account, assert_ix_error, new_address_lookup_table, setup_test_context, }, - solana_address_lookup_table_program::{ - instruction::freeze_lookup_table, state::AddressLookupTable, - }, solana_program_test::*, solana_sdk::{ + address_lookup_table::{instruction::freeze_lookup_table, state::AddressLookupTable}, instruction::InstructionError, pubkey::Pubkey, signature::{Keypair, Signer}, diff --git a/programs/address-lookup-table/src/lib.rs b/programs/address-lookup-table/src/lib.rs index c26a815ebaa610..11d9b4b0dd34e4 100644 --- a/programs/address-lookup-table/src/lib.rs +++ b/programs/address-lookup-table/src/lib.rs @@ -2,12 +2,14 @@ #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] #![cfg_attr(RUSTC_NEEDS_PROC_MACRO_HYGIENE, feature(proc_macro_hygiene))] -use solana_program::declare_id; - -pub mod error; -pub mod instruction; -#[cfg(not(target_os = "solana"))] pub mod processor; -pub mod state; -declare_id!("AddressLookupTab1e1111111111111111111111111"); +#[deprecated( + since = "1.17.0", + note = "Please use `solana_sdk::address_lookup_table` instead" +)] +pub use solana_sdk::address_lookup_table::{ + error, instruction, + program::{check_id, id, ID}, + state, +}; diff --git a/programs/address-lookup-table/src/processor.rs b/programs/address-lookup-table/src/processor.rs index 68416015dc5a8d..6f71b293d03a4c 100644 --- a/programs/address-lookup-table/src/processor.rs +++ b/programs/address-lookup-table/src/processor.rs @@ -1,13 +1,14 @@ use { - crate::{ - instruction::ProgramInstruction, - state::{ - AddressLookupTable, LookupTableMeta, LookupTableStatus, ProgramState, - LOOKUP_TABLE_MAX_ADDRESSES, LOOKUP_TABLE_META_SIZE, - }, - }, solana_program_runtime::{declare_process_instruction, ic_msg, invoke_context::InvokeContext}, solana_sdk::{ + address_lookup_table::{ + instruction::ProgramInstruction, + program::{check_id, id}, + state::{ + AddressLookupTable, LookupTableMeta, LookupTableStatus, ProgramState, + LOOKUP_TABLE_MAX_ADDRESSES, LOOKUP_TABLE_META_SIZE, + }, + }, clock::Slot, feature_set, instruction::InstructionError, @@ -117,7 +118,7 @@ impl Processor { &derivation_slot.to_le_bytes(), &[bump_seed], ], - &crate::id(), + &id(), )?; if table_key != derived_table_key { @@ -132,7 +133,7 @@ impl Processor { if invoke_context .feature_set .is_active(&feature_set::relax_authority_signer_check_for_lookup_table_creation::id()) - && crate::check_id(&lookup_table_owner) + && check_id(&lookup_table_owner) { return Ok(()); } @@ -157,7 +158,7 @@ impl Processor { )?; invoke_context.native_invoke( - system_instruction::assign(&table_key, &crate::id()).into(), + system_instruction::assign(&table_key, &id()).into(), &[table_key], )?; @@ -178,7 +179,7 @@ impl Processor { let lookup_table_account = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - if *lookup_table_account.get_owner() != crate::id() { + if *lookup_table_account.get_owner() != id() { return Err(InstructionError::InvalidAccountOwner); } drop(lookup_table_account); @@ -233,7 +234,7 @@ impl Processor { let lookup_table_account = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; let table_key = *lookup_table_account.get_key(); - if *lookup_table_account.get_owner() != crate::id() { + if *lookup_table_account.get_owner() != id() { return Err(InstructionError::InvalidAccountOwner); } drop(lookup_table_account); @@ -348,7 +349,7 @@ impl Processor { let lookup_table_account = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - if *lookup_table_account.get_owner() != crate::id() { + if *lookup_table_account.get_owner() != id() { return Err(InstructionError::InvalidAccountOwner); } drop(lookup_table_account); @@ -397,7 +398,7 @@ impl Processor { let lookup_table_account = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - if *lookup_table_account.get_owner() != crate::id() { + if *lookup_table_account.get_owner() != id() { return Err(InstructionError::InvalidAccountOwner); } drop(lookup_table_account); diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 54ffec2adb1f09..f3a5a0987edb5b 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4456,7 +4456,6 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "solana-address-lookup-table-program", "solana-config-program", "solana-sdk", "spl-token", @@ -4505,7 +4504,6 @@ dependencies = [ "serde", "serde_derive", "siphasher", - "solana-address-lookup-table-program", "solana-bpf-loader-program", "solana-bucket-map", "solana-compute-budget-program", @@ -4812,7 +4810,6 @@ dependencies = [ "serde_bytes", "serde_derive", "solana-accounts-db", - "solana-address-lookup-table-program", "solana-bloom", "solana-client", "solana-cost-model", @@ -5684,7 +5681,6 @@ name = "solana-sbf-rust-dep-crate" version = "1.17.0" dependencies = [ "byteorder 1.4.3", - "solana-address-lookup-table-program", "solana-program", ] @@ -6255,7 +6251,6 @@ dependencies = [ "serde_derive", "serde_json", "solana-account-decoder", - "solana-address-lookup-table-program", "solana-sdk", "spl-associated-token-account", "spl-memo", diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index cd5ed1f479c2f2..2ef02e54b8e49d 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -28,7 +28,6 @@ serde_json = "1.0.56" solana_rbpf = "=0.7.1" solana-account-decoder = { path = "../../account-decoder", version = "=1.17.0" } solana-accounts-db = { path = "../../accounts-db", version = "=1.17.0" } -solana-address-lookup-table-program = { path = "../../programs/address-lookup-table", version = "=1.17.0" } solana-bpf-loader-program = { path = "../bpf_loader", version = "=1.17.0" } solana-cli-output = { path = "../../cli-output", version = "=1.17.0" } solana-ledger = { path = "../../ledger", version = "=1.17.0" } diff --git a/programs/sbf/rust/dep_crate/Cargo.toml b/programs/sbf/rust/dep_crate/Cargo.toml index 56ee7bcb34b067..afeec6766e7736 100644 --- a/programs/sbf/rust/dep_crate/Cargo.toml +++ b/programs/sbf/rust/dep_crate/Cargo.toml @@ -12,7 +12,6 @@ edition = { workspace = true } [dependencies] byteorder = { workspace = true } # list of crates which must be buildable for bpf programs -solana-address-lookup-table-program = { workspace = true } solana-program = { workspace = true } [lib] diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 92bc7710c28d1e..62ae098cb6cc34 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -61,7 +61,6 @@ tokio-util = { workspace = true, features = ["codec", "compat"] } [dev-dependencies] serial_test = { workspace = true } -solana-address-lookup-table-program = { workspace = true } solana-net-utils = { workspace = true } solana-stake-program = { workspace = true } symlink = { workspace = true } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 90fd6a2a214162..ff70bdee116263 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -4631,7 +4631,6 @@ pub mod tests { jsonrpc_core_client::transports::local, serde::de::DeserializeOwned, solana_accounts_db::{inline_spl_token, inline_spl_token_2022}, - solana_address_lookup_table_program::state::{AddressLookupTable, LookupTableMeta}, solana_entry::entry::next_versioned_entry, solana_gossip::socketaddr, solana_ledger::{ @@ -4653,6 +4652,10 @@ pub mod tests { }, solana_sdk::{ account::{Account, WritableAccount}, + address_lookup_table::{ + self, + state::{AddressLookupTable, LookupTableMeta}, + }, clock::MAX_RECENT_BLOCKHASHES, compute_budget::ComputeBudgetInstruction, fee_calculator::{FeeRateGovernor, DEFAULT_BURN_PERCENT}, @@ -4934,7 +4937,7 @@ pub mod tests { AccountSharedData::create( min_balance_lamports, address_table_data, - solana_address_lookup_table_program::id(), + address_lookup_table::program::id(), false, 0, ) diff --git a/runtime/src/bank/address_lookup_table.rs b/runtime/src/bank/address_lookup_table.rs index 02debe6c1c57f8..07c82acf6da8b1 100644 --- a/runtime/src/bank/address_lookup_table.rs +++ b/runtime/src/bank/address_lookup_table.rs @@ -1,7 +1,7 @@ use { super::Bank, - solana_address_lookup_table_program::error::AddressLookupError, solana_sdk::{ + address_lookup_table::error::AddressLookupError, message::{ v0::{LoadedAddresses, MessageAddressTableLookup}, AddressLoaderError, diff --git a/runtime/src/builtins.rs b/runtime/src/builtins.rs index d692101aaab42f..5a21424cc35a81 100644 --- a/runtime/src/builtins.rs +++ b/runtime/src/builtins.rs @@ -91,7 +91,7 @@ pub static BUILTINS: &[BuiltinPrototype] = &[ }, BuiltinPrototype { feature_id: None, - program_id: solana_address_lookup_table_program::id(), + program_id: solana_sdk::address_lookup_table::program::id(), name: "address_lookup_table_program", entrypoint: solana_address_lookup_table_program::processor::process_instruction, }, diff --git a/programs/address-lookup-table/src/error.rs b/sdk/program/src/address_lookup_table/error.rs similarity index 100% rename from programs/address-lookup-table/src/error.rs rename to sdk/program/src/address_lookup_table/error.rs diff --git a/programs/address-lookup-table/src/instruction.rs b/sdk/program/src/address_lookup_table/instruction.rs similarity index 99% rename from programs/address-lookup-table/src/instruction.rs rename to sdk/program/src/address_lookup_table/instruction.rs index 573dbe561a87e6..ccf4bbe3a19022 100644 --- a/programs/address-lookup-table/src/instruction.rs +++ b/sdk/program/src/address_lookup_table/instruction.rs @@ -1,12 +1,12 @@ use { - crate::id, - serde::{Deserialize, Serialize}, - solana_program::{ + crate::{ + address_lookup_table::program::id, clock::Slot, instruction::{AccountMeta, Instruction}, pubkey::Pubkey, system_program, }, + serde::{Deserialize, Serialize}, }; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] @@ -78,6 +78,33 @@ pub fn derive_lookup_table_address( ) } +/// Constructs an instruction to create a table account and returns +/// the instruction and the table account's derived address. +fn create_lookup_table_common( + authority_address: Pubkey, + payer_address: Pubkey, + recent_slot: Slot, + authority_is_signer: bool, +) -> (Instruction, Pubkey) { + let (lookup_table_address, bump_seed) = + derive_lookup_table_address(&authority_address, recent_slot); + let instruction = Instruction::new_with_bincode( + id(), + &ProgramInstruction::CreateLookupTable { + recent_slot, + bump_seed, + }, + vec![ + AccountMeta::new(lookup_table_address, false), + AccountMeta::new_readonly(authority_address, authority_is_signer), + AccountMeta::new(payer_address, true), + AccountMeta::new_readonly(system_program::id(), false), + ], + ); + + (instruction, lookup_table_address) +} + /// Constructs an instruction to create a table account and returns /// the instruction and the table account's derived address. /// @@ -110,33 +137,6 @@ pub fn create_lookup_table( create_lookup_table_common(authority_address, payer_address, recent_slot, false) } -/// Constructs an instruction to create a table account and returns -/// the instruction and the table account's derived address. -fn create_lookup_table_common( - authority_address: Pubkey, - payer_address: Pubkey, - recent_slot: Slot, - authority_is_signer: bool, -) -> (Instruction, Pubkey) { - let (lookup_table_address, bump_seed) = - derive_lookup_table_address(&authority_address, recent_slot); - let instruction = Instruction::new_with_bincode( - id(), - &ProgramInstruction::CreateLookupTable { - recent_slot, - bump_seed, - }, - vec![ - AccountMeta::new(lookup_table_address, false), - AccountMeta::new_readonly(authority_address, authority_is_signer), - AccountMeta::new(payer_address, true), - AccountMeta::new_readonly(system_program::id(), false), - ], - ); - - (instruction, lookup_table_address) -} - /// Constructs an instruction that freezes an address lookup /// table so that it can never be closed or extended again. Empty /// lookup tables cannot be frozen. diff --git a/sdk/program/src/address_lookup_table/mod.rs b/sdk/program/src/address_lookup_table/mod.rs new file mode 100644 index 00000000000000..c7a712e4592df3 --- /dev/null +++ b/sdk/program/src/address_lookup_table/mod.rs @@ -0,0 +1,20 @@ +//! The [address lookup table program][np]. +//! +//! [np]: https://docs.solana.com/developing/runtime-facilities/programs#address-lookup-table-program + +pub mod error; +pub mod instruction; +pub mod state; + +pub mod program { + crate::declare_id!("AddressLookupTab1e1111111111111111111111111"); +} + +/// The definition of address lookup table accounts. +/// +/// As used by the `crate::message::v0` message format. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct AddressLookupTableAccount { + pub key: crate::pubkey::Pubkey, + pub addresses: Vec, +} diff --git a/programs/address-lookup-table/src/state.rs b/sdk/program/src/address_lookup_table/state.rs similarity index 99% rename from programs/address-lookup-table/src/state.rs rename to sdk/program/src/address_lookup_table/state.rs index 610fe6d20c2105..b5f6640ad4457e 100644 --- a/programs/address-lookup-table/src/state.rs +++ b/sdk/program/src/address_lookup_table/state.rs @@ -1,8 +1,8 @@ use { - crate::error::AddressLookupError, serde::{Deserialize, Serialize}, solana_frozen_abi_macro::{AbiEnumVisitor, AbiExample}, solana_program::{ + address_lookup_table::error::AddressLookupError, clock::Slot, instruction::InstructionError, pubkey::Pubkey, @@ -17,16 +17,6 @@ pub const LOOKUP_TABLE_MAX_ADDRESSES: usize = 256; /// The serialized size of lookup table metadata pub const LOOKUP_TABLE_META_SIZE: usize = 56; -/// Program account states -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, AbiExample, AbiEnumVisitor)] -#[allow(clippy::large_enum_variant)] -pub enum ProgramState { - /// Account is not initialized. - Uninitialized, - /// Initialized `LookupTable` account. - LookupTable(LookupTableMeta), -} - /// Activation status of a lookup table #[derive(Debug, PartialEq, Eq, Clone)] pub enum LookupTableStatus { @@ -112,6 +102,16 @@ impl LookupTableMeta { } } +/// Program account states +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, AbiExample, AbiEnumVisitor)] +#[allow(clippy::large_enum_variant)] +pub enum ProgramState { + /// Account is not initialized. + Uninitialized, + /// Initialized `LookupTable` account. + LookupTable(LookupTableMeta), +} + #[derive(Debug, PartialEq, Eq, Clone, AbiExample)] pub struct AddressLookupTable<'a> { pub meta: LookupTableMeta, @@ -218,7 +218,7 @@ impl<'a> AddressLookupTable<'a> { #[cfg(test)] mod tests { - use {super::*, solana_sdk::hash::Hash}; + use {super::*, crate::hash::Hash}; impl AddressLookupTable<'_> { fn new_for_tests(meta: LookupTableMeta, num_addresses: usize) -> Self { diff --git a/sdk/program/src/address_lookup_table_account.rs b/sdk/program/src/address_lookup_table_account.rs deleted file mode 100644 index bbc04259ea27fa..00000000000000 --- a/sdk/program/src/address_lookup_table_account.rs +++ /dev/null @@ -1,13 +0,0 @@ -//! The definition of address lookup table accounts. -//! -//! As used by the [`v0` message format][v0]. -//! -//! [v0]: crate::message::v0 - -use solana_program::pubkey::Pubkey; - -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct AddressLookupTableAccount { - pub key: Pubkey, - pub addresses: Vec, -} diff --git a/sdk/program/src/example_mocks.rs b/sdk/program/src/example_mocks.rs index f3873573e0a32a..48f3355710e1f2 100644 --- a/sdk/program/src/example_mocks.rs +++ b/sdk/program/src/example_mocks.rs @@ -113,7 +113,7 @@ pub mod solana_rpc_client_nonce_utils { /// programs. pub mod solana_sdk { pub use crate::{ - address_lookup_table_account, hash, instruction, keccak, message, nonce, + hash, instruction, keccak, message, nonce, pubkey::{self, Pubkey}, system_instruction, system_program, sysvar::{ @@ -273,10 +273,20 @@ pub mod solana_sdk { } } } + + #[deprecated( + since = "1.17.0", + note = "Please use `solana_sdk::address_lookup_table` instead" + )] + pub use crate::address_lookup_table as address_lookup_table_account; } +#[deprecated( + since = "1.17.0", + note = "Please use `solana_sdk::address_lookup_table` instead" +)] pub mod solana_address_lookup_table_program { - crate::declare_id!("AddressLookupTab1e1111111111111111111111111"); + pub use crate::address_lookup_table::program::{check_id, id, ID}; pub mod state { use { diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index 76d86e1bc86fe1..63da3fa7f1e09f 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -471,7 +471,7 @@ extern crate self as solana_program; pub mod account_info; -pub mod address_lookup_table_account; +pub mod address_lookup_table; pub mod alt_bn128; pub(crate) mod atomic_u64; pub mod big_mod_exp; @@ -534,6 +534,14 @@ pub mod sysvar; pub mod vote; pub mod wasm; +#[deprecated( + since = "1.17.0", + note = "Please use `solana_sdk::address_lookup_table::AddressLookupTableAccount` instead" +)] +pub mod address_lookup_table_account { + pub use crate::address_lookup_table::AddressLookupTableAccount; +} + #[cfg(target_os = "solana")] pub use solana_sdk_macro::wasm_bindgen_stub as wasm_bindgen; /// Re-export of [wasm-bindgen]. diff --git a/sdk/program/src/message/versions/v0/mod.rs b/sdk/program/src/message/versions/v0/mod.rs index c2f87cf508c3f0..eb4b4590b5be22 100644 --- a/sdk/program/src/message/versions/v0/mod.rs +++ b/sdk/program/src/message/versions/v0/mod.rs @@ -179,24 +179,22 @@ impl Message { /// /// # Examples /// - /// This example uses the [`solana_address_lookup_table_program`], [`solana_rpc_client`], [`solana_sdk`], and [`anyhow`] crates. + /// This example uses the [`solana_rpc_client`], [`solana_sdk`], and [`anyhow`] crates. /// - /// [`solana_address_lookup_table_program`]: https://docs.rs/solana-address-lookup-table-program /// [`solana_rpc_client`]: https://docs.rs/solana-rpc-client /// [`solana_sdk`]: https://docs.rs/solana-sdk /// [`anyhow`]: https://docs.rs/anyhow /// /// ``` /// # use solana_program::example_mocks::{ - /// # solana_address_lookup_table_program, /// # solana_rpc_client, /// # solana_sdk, /// # }; /// # use std::borrow::Cow; /// # use solana_sdk::account::Account; /// use anyhow::Result; - /// use solana_address_lookup_table_program::state::AddressLookupTable; /// use solana_rpc_client::rpc_client::RpcClient; + /// use solana_program::address_lookup_table::{self, state::{AddressLookupTable, LookupTableMeta}}; /// use solana_sdk::{ /// address_lookup_table_account::AddressLookupTableAccount, /// instruction::{AccountMeta, Instruction}, @@ -215,9 +213,10 @@ impl Message { /// # client.set_get_account_response(address_lookup_table_key, Account { /// # lamports: 1, /// # data: AddressLookupTable { + /// # meta: LookupTableMeta::default(), /// # addresses: Cow::Owned(instruction.accounts.iter().map(|meta| meta.pubkey).collect()), /// # }.serialize_for_tests().unwrap(), - /// # owner: solana_address_lookup_table_program::ID, + /// # owner: address_lookup_table::program::id(), /// # executable: false, /// # rent_epoch: 1, /// # }); diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 98b7694a41dfc3..825c09e2c31a01 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -40,20 +40,21 @@ extern crate self as solana_sdk; pub use signer::signers; // These solana_program imports could be *-imported, but that causes a bunch of // confusing duplication in the docs due to a rustdoc bug. #26211 +#[allow(deprecated)] +pub use solana_program::address_lookup_table_account; #[cfg(not(target_os = "solana"))] pub use solana_program::program_stubs; pub use solana_program::{ - account_info, address_lookup_table_account, alt_bn128, big_mod_exp, blake3, borsh, borsh0_10, - borsh0_9, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, clock, config, - custom_heap_default, custom_panic_default, debug_account_data, declare_deprecated_sysvar_id, - declare_sysvar_id, decode_error, ed25519_program, epoch_rewards, epoch_schedule, - fee_calculator, impl_sysvar_get, incinerator, instruction, keccak, lamports, - loader_instruction, loader_upgradeable_instruction, loader_v4, loader_v4_instruction, message, - msg, native_token, nonce, poseidon, program, program_error, program_memory, program_option, - program_pack, rent, sanitize, sdk_ids, secp256k1_program, secp256k1_recover, serde_varint, - serialize_utils, short_vec, slot_hashes, slot_history, stable_layout, stake, stake_history, - syscalls, system_instruction, system_program, sysvar, unchecked_div_by_const, vote, - wasm_bindgen, + account_info, address_lookup_table, alt_bn128, big_mod_exp, blake3, borsh, borsh0_10, borsh0_9, + bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, clock, config, custom_heap_default, + custom_panic_default, debug_account_data, declare_deprecated_sysvar_id, declare_sysvar_id, + decode_error, ed25519_program, epoch_rewards, epoch_schedule, fee_calculator, impl_sysvar_get, + incinerator, instruction, keccak, lamports, loader_instruction, loader_upgradeable_instruction, + loader_v4, loader_v4_instruction, message, msg, native_token, nonce, poseidon, program, + program_error, program_memory, program_option, program_pack, rent, sanitize, sdk_ids, + secp256k1_program, secp256k1_recover, serde_varint, serialize_utils, short_vec, slot_hashes, + slot_history, stable_layout, stake, stake_history, syscalls, system_instruction, + system_program, sysvar, unchecked_div_by_const, vote, wasm_bindgen, }; pub mod account; diff --git a/transaction-status/Cargo.toml b/transaction-status/Cargo.toml index 066fd513421673..bbdbc6b0bd6926 100644 --- a/transaction-status/Cargo.toml +++ b/transaction-status/Cargo.toml @@ -22,7 +22,6 @@ serde = { workspace = true } serde_derive = { workspace = true } serde_json = { workspace = true } solana-account-decoder = { workspace = true } -solana-address-lookup-table-program = { workspace = true } solana-sdk = { workspace = true } spl-associated-token-account = { workspace = true, features = ["no-entrypoint"] } spl-memo = { workspace = true, features = ["no-entrypoint"] } diff --git a/transaction-status/src/parse_address_lookup_table.rs b/transaction-status/src/parse_address_lookup_table.rs index f30b61ad7fe8b5..94127c8e06e203 100644 --- a/transaction-status/src/parse_address_lookup_table.rs +++ b/transaction-status/src/parse_address_lookup_table.rs @@ -4,8 +4,10 @@ use { }, bincode::deserialize, serde_json::json, - solana_address_lookup_table_program::instruction::ProgramInstruction, - solana_sdk::{instruction::CompiledInstruction, message::AccountKeys}, + solana_sdk::{ + address_lookup_table::instruction::ProgramInstruction, instruction::CompiledInstruction, + message::AccountKeys, + }, }; pub fn parse_address_lookup_table( @@ -115,8 +117,9 @@ fn check_num_address_lookup_table_accounts( mod test { use { super::*, - solana_address_lookup_table_program::instruction, - solana_sdk::{message::Message, pubkey::Pubkey, system_program}, + solana_sdk::{ + address_lookup_table::instruction, message::Message, pubkey::Pubkey, system_program, + }, std::str::FromStr, }; diff --git a/transaction-status/src/parse_instruction.rs b/transaction-status/src/parse_instruction.rs index 9cab2802d149cf..0f53c79b57df33 100644 --- a/transaction-status/src/parse_instruction.rs +++ b/transaction-status/src/parse_instruction.rs @@ -13,8 +13,8 @@ use { serde_json::Value, solana_account_decoder::parse_token::spl_token_ids, solana_sdk::{ - instruction::CompiledInstruction, message::AccountKeys, pubkey::Pubkey, stake, - system_program, vote, + address_lookup_table, instruction::CompiledInstruction, message::AccountKeys, + pubkey::Pubkey, stake, system_program, vote, }, std::{ collections::HashMap, @@ -24,7 +24,7 @@ use { }; lazy_static! { - static ref ADDRESS_LOOKUP_PROGRAM_ID: Pubkey = solana_address_lookup_table_program::id(); + static ref ADDRESS_LOOKUP_PROGRAM_ID: Pubkey = address_lookup_table::program::id(); static ref ASSOCIATED_TOKEN_PROGRAM_ID: Pubkey = spl_associated_token_id(); static ref BPF_LOADER_PROGRAM_ID: Pubkey = solana_sdk::bpf_loader::id(); static ref BPF_UPGRADEABLE_LOADER_PROGRAM_ID: Pubkey = solana_sdk::bpf_loader_upgradeable::id(); From 6298c6c31e6f5cf9306b54cc62960da80afd3882 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 11 Sep 2023 15:55:48 -0400 Subject: [PATCH 057/407] Shares accounts hash cache data between full and incremental (#33164) --- accounts-db/src/accounts_db.rs | 38 +++++++++++++++++------------- accounts-db/src/cache_hash_data.rs | 17 +++++++------ 2 files changed, 32 insertions(+), 23 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 48798260ed1d03..2468fe82385496 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1493,8 +1493,7 @@ pub struct AccountsDb { #[allow(dead_code)] base_working_temp_dir: Option, - full_accounts_hash_cache_path: PathBuf, - incremental_accounts_hash_cache_path: PathBuf, + accounts_hash_cache_path: PathBuf, transient_accounts_hash_cache_path: PathBuf, pub shrink_paths: RwLock>>, @@ -2458,8 +2457,15 @@ impl AccountsDb { (base_working_path, Some(base_working_temp_dir)) }; - let accounts_hash_cache_path = accounts_hash_cache_path - .unwrap_or_else(|| base_working_path.join(Self::DEFAULT_ACCOUNTS_HASH_CACHE_DIR)); + let accounts_hash_cache_path = accounts_hash_cache_path.unwrap_or_else(|| { + let accounts_hash_cache_path = + base_working_path.join(Self::DEFAULT_ACCOUNTS_HASH_CACHE_DIR); + if !accounts_hash_cache_path.exists() { + fs_err::create_dir(&accounts_hash_cache_path) + .expect("create accounts hash cache dir"); + } + accounts_hash_cache_path + }); let mut bank_hash_stats = HashMap::new(); bank_hash_stats.insert(0, BankHashStats::default()); @@ -2493,9 +2499,8 @@ impl AccountsDb { paths: vec![], base_working_path, base_working_temp_dir, - full_accounts_hash_cache_path: accounts_hash_cache_path.join("full"), - incremental_accounts_hash_cache_path: accounts_hash_cache_path.join("incremental"), transient_accounts_hash_cache_path: accounts_hash_cache_path.join("transient"), + accounts_hash_cache_path, shrink_paths: RwLock::new(None), temp_paths: None, file_size: DEFAULT_FILE_SIZE, @@ -7649,18 +7654,20 @@ impl AccountsDb { fn get_cache_hash_data( accounts_hash_cache_path: PathBuf, config: &CalcAccountsHashConfig<'_>, + kind: CalcAccountsHashKind, slot: Slot, ) -> CacheHashData { - if !config.store_detailed_debug_info_on_failure { - CacheHashData::new(accounts_hash_cache_path) + let accounts_hash_cache_path = if !config.store_detailed_debug_info_on_failure { + accounts_hash_cache_path } else { // this path executes when we are failing with a hash mismatch let failed_dir = accounts_hash_cache_path .join("failed_calculate_accounts_hash_cache") .join(slot.to_string()); - let _ = std::fs::remove_dir_all(&failed_dir); - CacheHashData::new(failed_dir) - } + _ = std::fs::remove_dir_all(&failed_dir); + failed_dir + }; + CacheHashData::new(accounts_hash_cache_path, kind == CalcAccountsHashKind::Full) } // modeled after calculate_accounts_delta_hash @@ -7676,7 +7683,6 @@ impl AccountsDb { storages, stats, CalcAccountsHashKind::Full, - self.full_accounts_hash_cache_path.clone(), )?; let AccountsHashKind::Full(accounts_hash) = accounts_hash else { panic!("calculate_accounts_hash_from_storages must return a FullAccountsHash"); @@ -7704,7 +7710,6 @@ impl AccountsDb { storages, stats, CalcAccountsHashKind::Incremental, - self.incremental_accounts_hash_cache_path.clone(), )?; let AccountsHashKind::Incremental(incremental_accounts_hash) = accounts_hash else { panic!("calculate_incremental_accounts_hash must return an IncrementalAccountsHash"); @@ -7718,7 +7723,6 @@ impl AccountsDb { storages: &SortedStorages<'_>, mut stats: HashStats, kind: CalcAccountsHashKind, - accounts_hash_cache_path: PathBuf, ) -> Result<(AccountsHashKind, u64), AccountsHashVerificationError> { let total_time = Measure::start(""); let _guard = self.active_stats.activate(ActiveStatItem::Hash); @@ -7728,10 +7732,12 @@ impl AccountsDb { let slot = storages.max_slot_inclusive(); let use_bg_thread_pool = config.use_bg_thread_pool; + let accounts_hash_cache_path = self.accounts_hash_cache_path.clone(); let scan_and_hash = || { let (cache_hash_data, cache_hash_data_us) = measure_us!(Self::get_cache_hash_data( accounts_hash_cache_path, config, + kind, slot )); stats.cache_hash_data_us += cache_hash_data_us; @@ -9984,7 +9990,7 @@ pub mod tests { let temp_dir = TempDir::new().unwrap(); let accounts_hash_cache_path = temp_dir.path().to_path_buf(); self.scan_snapshot_stores_with_cache( - &CacheHashData::new(accounts_hash_cache_path), + &CacheHashData::new(accounts_hash_cache_path, true), storage, stats, bins, @@ -11024,7 +11030,7 @@ pub mod tests { }; let result = accounts_db.scan_account_storage_no_bank( - &CacheHashData::new(accounts_hash_cache_path), + &CacheHashData::new(accounts_hash_cache_path, true), &CalcAccountsHashConfig::default(), &get_storage_refs(&[storage]), test_scan, diff --git a/accounts-db/src/cache_hash_data.rs b/accounts-db/src/cache_hash_data.rs index 196474f49c13dd..50e85af9a89116 100644 --- a/accounts-db/src/cache_hash_data.rs +++ b/accounts-db/src/cache_hash_data.rs @@ -196,29 +196,32 @@ impl CacheHashDataFile { } } -pub type PreExistingCacheFiles = HashSet; pub struct CacheHashData { cache_dir: PathBuf, - pre_existing_cache_files: Arc>, + pre_existing_cache_files: Arc>>, + should_delete_old_cache_files_on_drop: bool, pub stats: Arc, } impl Drop for CacheHashData { fn drop(&mut self) { - self.delete_old_cache_files(); + if self.should_delete_old_cache_files_on_drop { + self.delete_old_cache_files(); + } self.stats.report(); } } impl CacheHashData { - pub fn new(cache_dir: PathBuf) -> CacheHashData { + pub fn new(cache_dir: PathBuf, should_delete_old_cache_files_on_drop: bool) -> CacheHashData { std::fs::create_dir_all(&cache_dir).unwrap_or_else(|err| { panic!("error creating cache dir {}: {err}", cache_dir.display()) }); let result = CacheHashData { cache_dir, - pre_existing_cache_files: Arc::new(Mutex::new(PreExistingCacheFiles::default())), + pre_existing_cache_files: Arc::new(Mutex::new(HashSet::default())), + should_delete_old_cache_files_on_drop, stats: Arc::default(), }; @@ -281,7 +284,7 @@ impl CacheHashData { }) } - pub(crate) fn pre_existing_cache_file_will_be_used(&self, file_name: impl AsRef) { + fn pre_existing_cache_file_will_be_used(&self, file_name: impl AsRef) { self.pre_existing_cache_files .lock() .unwrap() @@ -424,7 +427,7 @@ mod tests { data_this_pass.push(this_bin_data); } } - let cache = CacheHashData::new(cache_dir.clone()); + let cache = CacheHashData::new(cache_dir.clone(), true); let file_name = PathBuf::from("test"); cache.save(&file_name, &data_this_pass).unwrap(); cache.get_cache_files(); From d724af863c88a0a0ed7b7b7ae601a6e22fc160e9 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 11 Sep 2023 13:20:28 -0700 Subject: [PATCH 058/407] add a few accounts hash dedup tests (#33208) --- accounts-db/src/accounts_hash.rs | 76 ++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 124e5b06903dd7..12c10231006c35 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -1798,6 +1798,82 @@ pub mod tests { assert_eq!((get_vec(hashfile), lamports), (vec![], 0)); } + #[test] + fn test_accountsdb_dup_pubkey_2_chunks() { + // 2 chunks, a dup pubkey in each chunk + for reverse in [false, true] { + let key = Pubkey::new_from_array([1; 32]); // key is BEFORE key2 + let key2 = Pubkey::new_from_array([2; 32]); + let hash = Hash::new_unique(); + let mut account_maps = Vec::new(); + let mut account_maps2 = Vec::new(); + let val = CalculateHashIntermediate::new(hash, 1, key); + account_maps.push(val.clone()); + let val2 = CalculateHashIntermediate::new(hash, 2, key2); + account_maps.push(val2.clone()); + let val3 = CalculateHashIntermediate::new(hash, 3, key2); + account_maps2.push(val3.clone()); + + let mut vecs = vec![account_maps.to_vec(), account_maps2.to_vec()]; + if reverse { + vecs = vecs.into_iter().rev().collect(); + } + let slice = convert_to_slice(&vecs); + let (hashfile, lamports) = test_de_dup_accounts_in_parallel(&slice); + assert_eq!( + (get_vec(hashfile), lamports), + ( + vec![val.hash, if reverse { val2.hash } else { val3.hash }], + val.lamports + + if reverse { + val2.lamports + } else { + val3.lamports + } + ), + "reverse: {reverse}" + ); + } + } + + #[test] + fn test_accountsdb_dup_pubkey_2_chunks_backwards() { + // 2 chunks, a dup pubkey in each chunk + for reverse in [false, true] { + let key = Pubkey::new_from_array([3; 32]); // key is AFTER key2 + let key2 = Pubkey::new_from_array([2; 32]); + let hash = Hash::new_unique(); + let mut account_maps = Vec::new(); + let mut account_maps2 = Vec::new(); + let val2 = CalculateHashIntermediate::new(hash, 2, key2); + account_maps.push(val2.clone()); + let val = CalculateHashIntermediate::new(hash, 1, key); + account_maps.push(val.clone()); + let val3 = CalculateHashIntermediate::new(hash, 3, key2); + account_maps2.push(val3.clone()); + + let mut vecs = vec![account_maps.to_vec(), account_maps2.to_vec()]; + if reverse { + vecs = vecs.into_iter().rev().collect(); + } + let slice = convert_to_slice(&vecs); + let (hashfile, lamports) = test_de_dup_accounts_in_parallel(&slice); + assert_eq!( + (get_vec(hashfile), lamports), + ( + vec![if reverse { val2.hash } else { val3.hash }, val.hash], + val.lamports + + if reverse { + val2.lamports + } else { + val3.lamports + } + ), + "reverse: {reverse}" + ); + } + } + #[test] fn test_accountsdb_cumulative_offsets1_d() { let input = vec![vec![0, 1], vec![], vec![2, 3, 4], vec![]]; From 659fc6837b4f2d3809ec3a83df24ded3be1751ae Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 11 Sep 2023 16:49:51 -0400 Subject: [PATCH 059/407] Removes `pub` from accounts_hash.rs items (#33209) --- accounts-db/src/accounts_hash.rs | 72 ++++++++++++++++---------------- 1 file changed, 37 insertions(+), 35 deletions(-) diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 12c10231006c35..8191c04fe2447f 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -60,7 +60,7 @@ impl MmapAccountHashesFile { } /// 1 file containing account hashes sorted by pubkey -pub struct AccountHashesFile { +struct AccountHashesFile { /// # hashes and an open file that will be deleted on drop. None if there are zero hashes to represent, and thus, no file. writer: Option, /// The directory where temporary cache files are put @@ -76,7 +76,7 @@ impl AccountHashesFile { } /// # hashes stored in this file - pub fn count(&self) -> usize { + fn count(&self) -> usize { self.writer .as_ref() .map(|writer| writer.count) @@ -85,7 +85,7 @@ impl AccountHashesFile { /// write 'hash' to the file /// If the file isn't open, create it first. - pub fn write(&mut self, hash: &Hash) { + fn write(&mut self, hash: &Hash) { if self.writer.is_none() { // we have hashes to write but no file yet, so create a file that will auto-delete on drop @@ -331,14 +331,14 @@ impl<'b, T: 'b> ExtractSliceFromRawData<'b, T> for Vec>> { // Allow retrieving &[start..end] from a logical src: Vec, where src is really Vec> (or later Vec>>) // This model prevents callers from having to flatten which saves both working memory and time. #[derive(Default, Debug)] -pub struct CumulativeOffsets { +struct CumulativeOffsets { cumulative_offsets: Vec, total_count: usize, } /// used by merkle tree calculation to lookup account hashes by overall index #[derive(Default)] -pub struct CumulativeHashesFromFiles { +struct CumulativeHashesFromFiles { /// source of hashes in order readers: Vec, /// look up reader index and offset by overall index @@ -348,7 +348,7 @@ pub struct CumulativeHashesFromFiles { impl CumulativeHashesFromFiles { /// Calculate offset from overall index to which file and offset within that file based on the length of each hash file. /// Also collect readers to access the data. - pub fn from_files(hashes: Vec) -> Self { + fn from_files(hashes: Vec) -> Self { let mut readers = Vec::with_capacity(hashes.len()); let cumulative = CumulativeOffsets::new(hashes.into_iter().filter_map(|mut hash_file| { // ignores all hashfiles that have zero entries @@ -365,12 +365,12 @@ impl CumulativeHashesFromFiles { } /// total # of items referenced - pub fn total_count(&self) -> usize { + fn total_count(&self) -> usize { self.cumulative.total_count } // return the biggest slice possible that starts at the overall index 'start' - pub fn get_slice(&self, start: usize) -> &[Hash] { + fn get_slice(&self, start: usize) -> &[Hash] { let (start, offset) = self.cumulative.find(start); let data_source_index = offset.index[0]; let data = &self.readers[data_source_index]; @@ -380,7 +380,7 @@ impl CumulativeHashesFromFiles { } impl CumulativeOffsets { - pub fn new(iter: I) -> Self + fn new(iter: I) -> Self where I: Iterator, { @@ -404,33 +404,10 @@ impl CumulativeOffsets { } } - pub fn from_raw(raw: &[Vec]) -> Self { + fn from_raw(raw: &[Vec]) -> Self { Self::new(raw.iter().map(|v| v.len())) } - pub fn from_raw_2d(raw: &[Vec>]) -> Self { - let mut total_count: usize = 0; - let mut cumulative_offsets = Vec::with_capacity(0); - for (i, v_outer) in raw.iter().enumerate() { - for (j, v) in v_outer.iter().enumerate() { - let len = v.len(); - if len > 0 { - if cumulative_offsets.is_empty() { - // the first inner, non-empty vector we find gives us an approximate rectangular shape - cumulative_offsets = Vec::with_capacity(raw.len() * v_outer.len()); - } - cumulative_offsets.push(CumulativeOffset::new(vec![i, j], total_count)); - total_count += len; - } - } - } - - Self { - cumulative_offsets, - total_count, - } - } - /// find the index of the data source that contains 'start' fn find_index(&self, start: usize) -> usize { assert!(!self.cumulative_offsets.is_empty()); @@ -451,7 +428,7 @@ impl CumulativeOffsets { } // return the biggest slice possible that starts at 'start' - pub fn get_slice<'a, 'b, T, U>(&'a self, raw: &'b U, start: usize) -> &'b [T] + fn get_slice<'a, 'b, T, U>(&'a self, raw: &'b U, start: usize) -> &'b [T] where U: ExtractSliceFromRawData<'b, T> + 'b, { @@ -1248,7 +1225,7 @@ impl From for SerdeIncrementalAccountsHash { } #[cfg(test)] -pub mod tests { +mod tests { use {super::*, itertools::Itertools, std::str::FromStr, tempfile::tempdir}; lazy_static! { @@ -1276,6 +1253,31 @@ pub mod tests { } } + impl CumulativeOffsets { + fn from_raw_2d(raw: &[Vec>]) -> Self { + let mut total_count: usize = 0; + let mut cumulative_offsets = Vec::with_capacity(0); + for (i, v_outer) in raw.iter().enumerate() { + for (j, v) in v_outer.iter().enumerate() { + let len = v.len(); + if len > 0 { + if cumulative_offsets.is_empty() { + // the first inner, non-empty vector we find gives us an approximate rectangular shape + cumulative_offsets = Vec::with_capacity(raw.len() * v_outer.len()); + } + cumulative_offsets.push(CumulativeOffset::new(vec![i, j], total_count)); + total_count += len; + } + } + } + + Self { + cumulative_offsets, + total_count, + } + } + } + #[test] fn test_find_first_pubkey_in_bin() { let stats = HashStats::default(); From cf35799b2a4079dd09a7cb7384dc769edba17cac Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Mon, 11 Sep 2023 16:03:37 -0500 Subject: [PATCH 060/407] remove unused stats (#33213) Co-authored-by: HaoranYi --- accounts-db/src/cache_hash_data_stats.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/accounts-db/src/cache_hash_data_stats.rs b/accounts-db/src/cache_hash_data_stats.rs index 915b1b82cbdf30..f8d3364f8f81e0 100644 --- a/accounts-db/src/cache_hash_data_stats.rs +++ b/accounts-db/src/cache_hash_data_stats.rs @@ -14,7 +14,6 @@ pub struct CacheHashDataStats { pub create_save_us: AtomicU64, pub load_us: AtomicU64, pub read_us: AtomicU64, - pub merge_us: AtomicU64, pub unused_cache_files: AtomicUsize, } From e01269a9de52b9c4d122af5bacb3c053e791e76d Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Mon, 11 Sep 2023 22:22:04 +0000 Subject: [PATCH 061/407] sends repair requests over QUIC protocol (#33016) The commit implements client-side of serve-repair and ancestor-hash-service over QUIC protocol. --- core/src/repair/ancestor_hashes_service.rs | 107 +++++++++++++++++++-- core/src/repair/quic_endpoint.rs | 1 - core/src/repair/repair_service.rs | 15 ++- core/src/repair/result.rs | 6 +- core/src/repair/serve_repair.rs | 57 +++++++++-- core/src/shred_fetch_stage.rs | 100 +++++++++++++++++-- core/src/tvu.rs | 11 ++- core/src/validator.rs | 45 ++++++++- core/src/window_service.rs | 6 ++ 9 files changed, 313 insertions(+), 35 deletions(-) diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs index 29f08862580d5d..3214c89e14ea15 100644 --- a/core/src/repair/ancestor_hashes_service.rs +++ b/core/src/repair/ancestor_hashes_service.rs @@ -7,12 +7,15 @@ use { }, outstanding_requests::OutstandingRequests, packet_threshold::DynamicPacketToProcessThreshold, + quic_endpoint::LocalRequest, repair_service::{AncestorDuplicateSlotsSender, RepairInfo, RepairStatsGroup}, + request_response::RequestResponse, serve_repair::{ self, AncestorHashesRepairType, AncestorHashesResponse, RepairProtocol, ServeRepair, }, }, replay_stage::DUPLICATE_THRESHOLD, + shred_fetch_stage::receive_repair_quic_packets, }, bincode::serialize, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, @@ -36,7 +39,7 @@ use { std::{ collections::HashSet, io::{Cursor, Read}, - net::UdpSocket, + net::{SocketAddr, UdpSocket}, sync::{ atomic::{AtomicBool, Ordering}, Arc, RwLock, @@ -44,6 +47,7 @@ use { thread::{self, sleep, Builder, JoinHandle}, time::{Duration, Instant}, }, + tokio::sync::mpsc::Sender as AsyncSender, }; #[derive(Debug, PartialEq, Eq)] @@ -149,6 +153,7 @@ impl AncestorHashesService { exit: Arc, blockstore: Arc, ancestor_hashes_request_socket: Arc, + quic_endpoint_sender: AsyncSender, repair_info: RepairInfo, ancestor_hashes_replay_update_receiver: AncestorHashesReplayUpdateReceiver, ) -> Self { @@ -157,16 +162,31 @@ impl AncestorHashesService { let t_receiver = streamer::receiver( ancestor_hashes_request_socket.clone(), exit.clone(), - response_sender, + response_sender.clone(), Recycler::default(), Arc::new(StreamerReceiveStats::new( "ancestor_hashes_response_receiver", )), Duration::from_millis(1), // coalesce - false, - None, + false, // use_pinned_memory + None, // in_vote_only_mode ); + let (quic_endpoint_response_sender, quic_endpoint_response_receiver) = unbounded(); + let t_receiver_quic = { + let exit = exit.clone(); + Builder::new() + .name(String::from("solAncHashQuic")) + .spawn(|| { + receive_repair_quic_packets( + quic_endpoint_response_receiver, + response_sender, + Recycler::default(), + exit, + ) + }) + .unwrap() + }; let ancestor_hashes_request_statuses: Arc> = Arc::new(DashMap::new()); let (retryable_slots_sender, retryable_slots_receiver) = unbounded(); @@ -188,14 +208,22 @@ impl AncestorHashesService { let t_ancestor_requests = Self::run_manage_ancestor_requests( ancestor_hashes_request_statuses, ancestor_hashes_request_socket, + quic_endpoint_sender, + quic_endpoint_response_sender, repair_info, outstanding_requests, exit, ancestor_hashes_replay_update_receiver, retryable_slots_receiver, ); - let thread_hdls = vec![t_receiver, t_ancestor_hashes_responses, t_ancestor_requests]; - Self { thread_hdls } + Self { + thread_hdls: vec![ + t_receiver, + t_receiver_quic, + t_ancestor_hashes_responses, + t_ancestor_requests, + ], + } } pub(crate) fn join(self) -> thread::Result<()> { @@ -551,6 +579,8 @@ impl AncestorHashesService { fn run_manage_ancestor_requests( ancestor_hashes_request_statuses: Arc>, ancestor_hashes_request_socket: Arc, + quic_endpoint_sender: AsyncSender, + quic_endpoint_response_sender: Sender<(SocketAddr, Vec)>, repair_info: RepairInfo, outstanding_requests: Arc>, exit: Arc, @@ -587,10 +617,11 @@ impl AncestorHashesService { if exit.load(Ordering::Relaxed) { return; } - Self::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, + &quic_endpoint_sender, + &quic_endpoint_response_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -612,6 +643,8 @@ impl AncestorHashesService { fn manage_ancestor_requests( ancestor_hashes_request_statuses: &DashMap, ancestor_hashes_request_socket: &UdpSocket, + quic_endpoint_sender: &AsyncSender, + quic_endpoint_response_sender: &Sender<(SocketAddr, Vec)>, repair_info: &RepairInfo, outstanding_requests: &RwLock, ancestor_hashes_replay_update_receiver: &AncestorHashesReplayUpdateReceiver, @@ -710,6 +743,8 @@ impl AncestorHashesService { if Self::initiate_ancestor_hashes_requests_for_duplicate_slot( ancestor_hashes_request_statuses, ancestor_hashes_request_socket, + quic_endpoint_sender, + quic_endpoint_response_sender, &repair_info.cluster_slots, serve_repair, &repair_info.repair_validators, @@ -787,6 +822,8 @@ impl AncestorHashesService { fn initiate_ancestor_hashes_requests_for_duplicate_slot( ancestor_hashes_request_statuses: &DashMap, ancestor_hashes_request_socket: &UdpSocket, + quic_endpoint_sender: &AsyncSender, + quic_endpoint_response_sender: &Sender<(SocketAddr, Vec)>, cluster_slots: &ClusterSlots, serve_repair: &ServeRepair, repair_validators: &Option>, @@ -811,10 +848,11 @@ impl AncestorHashesService { repair_stats .ancestor_requests .update(pubkey, duplicate_slot, 0); + let ancestor_hashes_repair_type = AncestorHashesRepairType(duplicate_slot); let nonce = outstanding_requests .write() .unwrap() - .add_request(AncestorHashesRepairType(duplicate_slot), timestamp()); + .add_request(ancestor_hashes_repair_type, timestamp()); let Ok(request_bytes) = serve_repair.ancestor_repair_request_bytes( identity_keypair, pubkey, @@ -827,7 +865,21 @@ impl AncestorHashesService { Protocol::UDP => { let _ = ancestor_hashes_request_socket.send_to(&request_bytes, socket_addr); } - Protocol::QUIC => todo!(), + Protocol::QUIC => { + let num_expected_responses = + usize::try_from(ancestor_hashes_repair_type.num_expected_responses()) + .unwrap(); + let request = LocalRequest { + remote_address: *socket_addr, + bytes: request_bytes, + num_expected_responses, + response_sender: quic_endpoint_response_sender.clone(), + }; + if quic_endpoint_sender.blocking_send(request).is_err() { + // The receiver end of the channel is disconnected. + break; + } + } } } @@ -1441,10 +1493,14 @@ mod test { repair_validators, .. } = repair_info; - + let (quic_endpoint_response_sender, _quic_endpoint_response_receiver) = unbounded(); + let (quic_endpoint_sender, _quic_endpoint_sender) = + tokio::sync::mpsc::channel(/*buffer:*/ 128); AncestorHashesService::initiate_ancestor_hashes_requests_for_duplicate_slot( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, + &quic_endpoint_sender, + &quic_endpoint_response_sender, &cluster_slots, &requester_serve_repair, &repair_validators, @@ -1494,6 +1550,8 @@ mod test { AncestorHashesService::initiate_ancestor_hashes_requests_for_duplicate_slot( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, + &quic_endpoint_sender, + &quic_endpoint_response_sender, &cluster_slots, &requester_serve_repair, &repair_validators, @@ -1555,6 +1613,8 @@ mod test { AncestorHashesService::initiate_ancestor_hashes_requests_for_duplicate_slot( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, + &quic_endpoint_sender, + &quic_endpoint_response_sender, &cluster_slots, &requester_serve_repair, &repair_validators, @@ -1640,10 +1700,15 @@ mod test { } = repair_info; cluster_info.insert_info(responder_node.info); bank_forks.read().unwrap().root_bank().epoch_schedule(); + let (quic_endpoint_response_sender, _quic_endpoint_response_receiver) = unbounded(); + let (quic_endpoint_sender, _quic_endpoint_sender) = + tokio::sync::mpsc::channel(/*buffer:*/ 128); // 1) No signals from ReplayStage, no requests should be made AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, + &quic_endpoint_sender, + &quic_endpoint_response_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -1686,6 +1751,8 @@ mod test { AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, + &quic_endpoint_sender, + &quic_endpoint_response_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -1725,6 +1792,8 @@ mod test { AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, + &quic_endpoint_sender, + &quic_endpoint_response_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -1756,6 +1825,8 @@ mod test { AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, + &quic_endpoint_sender, + &quic_endpoint_response_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -1793,6 +1864,8 @@ mod test { AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, + &quic_endpoint_sender, + &quic_endpoint_response_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -1833,6 +1906,8 @@ mod test { AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, + &quic_endpoint_sender, + &quic_endpoint_response_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -1989,10 +2064,15 @@ mod test { &leader_schedule_cache, ); + let (quic_endpoint_response_sender, _quic_endpoint_response_receiver) = unbounded(); + let (quic_endpoint_sender, _quic_endpoint_sender) = + tokio::sync::mpsc::channel(/*buffer:*/ 128); // Simulate making a request AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, + &quic_endpoint_sender, + &quic_endpoint_response_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -2088,6 +2168,9 @@ mod test { &repair_info.ancestor_duplicate_slots_sender, &retryable_slots_sender, ); + let (quic_endpoint_response_sender, _quic_endpoint_response_receiver) = unbounded(); + let (quic_endpoint_sender, _quic_endpoint_sender) = + tokio::sync::mpsc::channel(/*buffer:*/ 128); // Simulate ancestor request thread getting the retry signal assert!(dead_slot_pool.is_empty()); @@ -2096,6 +2179,8 @@ mod test { AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, + &quic_endpoint_sender, + &quic_endpoint_response_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -2134,6 +2219,8 @@ mod test { AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, + &quic_endpoint_sender, + &quic_endpoint_response_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, diff --git a/core/src/repair/quic_endpoint.rs b/core/src/repair/quic_endpoint.rs index 03dfa42bd2b05e..f7b445011c937a 100644 --- a/core/src/repair/quic_endpoint.rs +++ b/core/src/repair/quic_endpoint.rs @@ -1,4 +1,3 @@ -#![allow(dead_code)] use { bincode::Options, crossbeam_channel::Sender, diff --git a/core/src/repair/repair_service.rs b/core/src/repair/repair_service.rs index 592500929dc0bd..c7cfab03f8f8ed 100644 --- a/core/src/repair/repair_service.rs +++ b/core/src/repair/repair_service.rs @@ -13,6 +13,7 @@ use { ancestor_hashes_service::{AncestorHashesReplayUpdateReceiver, AncestorHashesService}, duplicate_repair_status::AncestorDuplicateSlotToRepair, outstanding_requests::OutstandingRequests, + quic_endpoint::LocalRequest, repair_weight::RepairWeight, serve_repair::{self, ServeRepair, ShredRepairType, REPAIR_PEERS_CACHE_CAPACITY}, }, @@ -46,6 +47,7 @@ use { thread::{self, sleep, Builder, JoinHandle}, time::{Duration, Instant}, }, + tokio::sync::mpsc::Sender as AsyncSender, }; // Time to defer repair requests to allow for turbine propagation @@ -239,6 +241,8 @@ impl RepairService { exit: Arc, repair_socket: Arc, ancestor_hashes_socket: Arc, + quic_endpoint_sender: AsyncSender, + quic_endpoint_response_sender: CrossbeamSender<(SocketAddr, Vec)>, repair_info: RepairInfo, verified_vote_receiver: VerifiedVoteReceiver, outstanding_requests: Arc>, @@ -250,6 +254,7 @@ impl RepairService { let blockstore = blockstore.clone(); let exit = exit.clone(); let repair_info = repair_info.clone(); + let quic_endpoint_sender = quic_endpoint_sender.clone(); Builder::new() .name("solRepairSvc".to_string()) .spawn(move || { @@ -257,6 +262,8 @@ impl RepairService { &blockstore, &exit, &repair_socket, + &quic_endpoint_sender, + &quic_endpoint_response_sender, repair_info, verified_vote_receiver, &outstanding_requests, @@ -271,6 +278,7 @@ impl RepairService { exit, blockstore, ancestor_hashes_socket, + quic_endpoint_sender, repair_info, ancestor_hashes_replay_update_receiver, ); @@ -281,10 +289,13 @@ impl RepairService { } } + #[allow(clippy::too_many_arguments)] fn run( blockstore: &Blockstore, exit: &AtomicBool, repair_socket: &UdpSocket, + quic_endpoint_sender: &AsyncSender, + quic_endpoint_response_sender: &CrossbeamSender<(SocketAddr, Vec)>, repair_info: RepairInfo, verified_vote_receiver: VerifiedVoteReceiver, outstanding_requests: &RwLock, @@ -433,9 +444,11 @@ impl RepairService { &repair_info.repair_validators, &mut outstanding_requests, identity_keypair, + quic_endpoint_sender, + quic_endpoint_response_sender, repair_protocol, ) - .ok()?; + .ok()??; Some((req, to)) }) .collect() diff --git a/core/src/repair/result.rs b/core/src/repair/result.rs index b222817704a51e..86329fda31a8c4 100644 --- a/core/src/repair/result.rs +++ b/core/src/repair/result.rs @@ -26,11 +26,13 @@ pub enum Error { #[error(transparent)] InvalidContactInfo(#[from] contact_info::Error), #[error(transparent)] + RepairVerify(#[from] RepairVerifyError), + #[error("Send Error")] + SendError, + #[error(transparent)] Serialize(#[from] std::boxed::Box), #[error(transparent)] WeightedIndex(#[from] rand::distributions::weighted::WeightedError), - #[error(transparent)] - RepairVerify(#[from] RepairVerifyError), } pub type Result = std::result::Result; diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index 0610e2ea7ae763..8ab42c28829f1d 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -3,7 +3,7 @@ use { cluster_slots_service::cluster_slots::ClusterSlots, repair::{ duplicate_repair_status::get_ancestor_hash_repair_sample_size, - quic_endpoint::RemoteRequest, + quic_endpoint::{LocalRequest, RemoteRequest}, repair_response, repair_service::{OutstandingShredRepairs, RepairStats, REPAIR_MS}, request_response::RequestResponse, @@ -11,7 +11,7 @@ use { }, }, bincode::{serialize, Options}, - crossbeam_channel::{Receiver, RecvTimeoutError}, + crossbeam_channel::{Receiver, RecvTimeoutError, Sender}, lru::LruCache, rand::{ distributions::{Distribution, WeightedError, WeightedIndex}, @@ -59,7 +59,7 @@ use { thread::{Builder, JoinHandle}, time::{Duration, Instant}, }, - tokio::sync::oneshot::Sender as OneShotSender, + tokio::sync::{mpsc::Sender as AsyncSender, oneshot::Sender as OneShotSender}, }; /// the number of slots to respond with when responding to `Orphan` requests @@ -132,6 +132,7 @@ impl RequestResponse for ShredRepairType { } } +#[derive(Copy, Clone)] pub struct AncestorHashesRepairType(pub Slot); impl AncestorHashesRepairType { pub fn slot(&self) -> Slot { @@ -339,7 +340,6 @@ pub(crate) struct RepairPeers { struct Node { pubkey: Pubkey, serve_repair: SocketAddr, - #[allow(dead_code)] serve_repair_quic: SocketAddr, } @@ -1027,6 +1027,7 @@ impl ServeRepair { Self::repair_proto_to_bytes(&request, keypair) } + #[allow(clippy::too_many_arguments)] pub(crate) fn repair_request( &self, cluster_slots: &ClusterSlots, @@ -1036,8 +1037,10 @@ impl ServeRepair { repair_validators: &Option>, outstanding_requests: &mut OutstandingShredRepairs, identity_keypair: &Keypair, + quic_endpoint_sender: &AsyncSender, + quic_endpoint_response_sender: &Sender<(SocketAddr, Vec)>, repair_protocol: Protocol, - ) -> Result<(SocketAddr, Vec)> { + ) -> Result)>> { // find a peer that appears to be accepting replication and has the desired slot, as indicated // by a valid tvu port location let slot = repair_request.slot(); @@ -1067,8 +1070,21 @@ impl ServeRepair { repair_request ); match repair_protocol { - Protocol::UDP => Ok((peer.serve_repair, out)), - Protocol::QUIC => todo!(), + Protocol::UDP => Ok(Some((peer.serve_repair, out))), + Protocol::QUIC => { + let num_expected_responses = + usize::try_from(repair_request.num_expected_responses()).unwrap(); + let request = LocalRequest { + remote_address: peer.serve_repair_quic, + bytes: out, + num_expected_responses, + response_sender: quic_endpoint_response_sender.clone(), + }; + quic_endpoint_sender + .blocking_send(request) + .map_err(|_| Error::SendError) + .map(|()| None) + } } } @@ -1970,6 +1986,10 @@ mod tests { ); let identity_keypair = cluster_info.keypair().clone(); let mut outstanding_requests = OutstandingShredRepairs::default(); + let (quic_endpoint_sender, _quic_endpoint_receiver) = + tokio::sync::mpsc::channel(/*buffer:*/ 128); + let (quic_endpoint_response_sender, _quic_endpoint_response_receiver) = + crossbeam_channel::unbounded(); let rv = serve_repair.repair_request( &cluster_slots, ShredRepairType::Shred(0, 0), @@ -1978,6 +1998,8 @@ mod tests { &None, &mut outstanding_requests, &identity_keypair, + &quic_endpoint_sender, + &quic_endpoint_response_sender, Protocol::UDP, // repair_protocol ); assert_matches!(rv, Err(Error::ClusterInfo(ClusterInfoError::NoPeers))); @@ -2009,8 +2031,11 @@ mod tests { &None, &mut outstanding_requests, &identity_keypair, + &quic_endpoint_sender, + &quic_endpoint_response_sender, Protocol::UDP, // repair_protocol ) + .unwrap() .unwrap(); assert_eq!(nxt.serve_repair(Protocol::UDP).unwrap(), serve_repair_addr); assert_eq!(rv.0, nxt.serve_repair(Protocol::UDP).unwrap()); @@ -2046,8 +2071,11 @@ mod tests { &None, &mut outstanding_requests, &identity_keypair, + &quic_endpoint_sender, + &quic_endpoint_response_sender, Protocol::UDP, // repair_protocol ) + .unwrap() .unwrap(); if rv.0 == serve_repair_addr { one = true; @@ -2294,7 +2322,10 @@ mod tests { let cluster_slots = ClusterSlots::default(); let cluster_info = Arc::new(new_test_cluster_info()); let me = cluster_info.my_contact_info(); - + let (quic_endpoint_sender, _quic_endpoint_receiver) = + tokio::sync::mpsc::channel(/*buffer:*/ 128); + let (quic_endpoint_response_sender, _quic_endpoint_response_receiver) = + crossbeam_channel::unbounded(); // Insert two peers on the network let contact_info2 = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); @@ -2325,6 +2356,8 @@ mod tests { &known_validators, &mut OutstandingShredRepairs::default(), &identity_keypair, + &quic_endpoint_sender, + &quic_endpoint_response_sender, Protocol::UDP, // repair_protocol ), Err(Error::ClusterInfo(ClusterInfoError::NoPeers)) @@ -2345,9 +2378,11 @@ mod tests { &known_validators, &mut OutstandingShredRepairs::default(), &identity_keypair, + &quic_endpoint_sender, + &quic_endpoint_response_sender, Protocol::UDP, // repair_protocol ), - Ok(_) + Ok(Some(_)) ); // Using no known validators should default to all @@ -2369,9 +2404,11 @@ mod tests { &None, &mut OutstandingShredRepairs::default(), &identity_keypair, + &quic_endpoint_sender, + &quic_endpoint_response_sender, Protocol::UDP, // repair_protocol ), - Ok(_) + Ok(Some(_)) ); } diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index fa49afb522ec40..62733953cc724f 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -162,8 +162,9 @@ impl ShredFetchStage { #[allow(clippy::too_many_arguments)] pub(crate) fn new( sockets: Vec>, - quic_endpoint_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, + turbine_quic_endpoint_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, repair_socket: Arc, + repair_quic_endpoint_receiver: Receiver<(SocketAddr, Vec)>, sender: Sender, shred_version: u16, bank_forks: Arc>, @@ -202,13 +203,55 @@ impl ShredFetchStage { tvu_threads.extend(repair_receiver); tvu_threads.push(tvu_filter); tvu_threads.push(repair_handler); - + // Repair shreds fetched over QUIC protocol. + { + let (packet_sender, packet_receiver) = unbounded(); + let bank_forks = bank_forks.clone(); + let recycler = recycler.clone(); + let exit = exit.clone(); + let sender = sender.clone(); + let turbine_disabled = turbine_disabled.clone(); + tvu_threads.extend([ + Builder::new() + .name("solTvuRecvRpr".to_string()) + .spawn(|| { + receive_repair_quic_packets( + repair_quic_endpoint_receiver, + packet_sender, + recycler, + exit, + ) + }) + .unwrap(), + Builder::new() + .name("solTvuFetchRpr".to_string()) + .spawn(move || { + Self::modify_packets( + packet_receiver, + sender, + &bank_forks, + shred_version, + "shred_fetch_repair_quic", + PacketFlags::REPAIR, + None, // repair_context; no ping packets! + turbine_disabled, + ) + }) + .unwrap(), + ]); + } + // Turbine shreds fetched over QUIC protocol. let (packet_sender, packet_receiver) = unbounded(); tvu_threads.extend([ Builder::new() .name("solTvuRecvQuic".to_string()) .spawn(|| { - receive_quic_datagrams(quic_endpoint_receiver, packet_sender, recycler, exit) + receive_quic_datagrams( + turbine_quic_endpoint_receiver, + packet_sender, + recycler, + exit, + ) }) .unwrap(), Builder::new() @@ -241,14 +284,14 @@ impl ShredFetchStage { } fn receive_quic_datagrams( - quic_endpoint_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, + turbine_quic_endpoint_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, sender: Sender, recycler: PacketBatchRecycler, exit: Arc, ) { const RECV_TIMEOUT: Duration = Duration::from_secs(1); while !exit.load(Ordering::Relaxed) { - let entry = match quic_endpoint_receiver.recv_timeout(RECV_TIMEOUT) { + let entry = match turbine_quic_endpoint_receiver.recv_timeout(RECV_TIMEOUT) { Ok(entry) => entry, Err(RecvTimeoutError::Timeout) => continue, Err(RecvTimeoutError::Disconnected) => return, @@ -260,7 +303,7 @@ fn receive_quic_datagrams( }; let deadline = Instant::now() + PACKET_COALESCE_DURATION; let entries = std::iter::once(entry).chain( - std::iter::repeat_with(|| quic_endpoint_receiver.recv_deadline(deadline).ok()) + std::iter::repeat_with(|| turbine_quic_endpoint_receiver.recv_deadline(deadline).ok()) .while_some(), ); let size = entries @@ -285,6 +328,51 @@ fn receive_quic_datagrams( } } +pub(crate) fn receive_repair_quic_packets( + repair_quic_endpoint_receiver: Receiver<(SocketAddr, Vec)>, + sender: Sender, + recycler: PacketBatchRecycler, + exit: Arc, +) { + const RECV_TIMEOUT: Duration = Duration::from_secs(1); + while !exit.load(Ordering::Relaxed) { + let entry = match repair_quic_endpoint_receiver.recv_timeout(RECV_TIMEOUT) { + Ok(entry) => entry, + Err(RecvTimeoutError::Timeout) => continue, + Err(RecvTimeoutError::Disconnected) => return, + }; + let mut packet_batch = + PacketBatch::new_with_recycler(&recycler, PACKETS_PER_BATCH, "receive_quic_datagrams"); + unsafe { + packet_batch.set_len(PACKETS_PER_BATCH); + }; + let deadline = Instant::now() + PACKET_COALESCE_DURATION; + let entries = std::iter::once(entry).chain( + std::iter::repeat_with(|| repair_quic_endpoint_receiver.recv_deadline(deadline).ok()) + .while_some(), + ); + let size = entries + .filter(|(_, bytes)| bytes.len() <= PACKET_DATA_SIZE) + .zip(packet_batch.iter_mut()) + .map(|((addr, bytes), packet)| { + *packet.meta_mut() = Meta { + size: bytes.len(), + addr: addr.ip(), + port: addr.port(), + flags: PacketFlags::REPAIR, + }; + packet.buffer_mut()[..bytes.len()].copy_from_slice(&bytes); + }) + .count(); + if size > 0 { + packet_batch.truncate(size); + if sender.send(packet_batch).is_err() { + return; // The receiver end of the channel is disconnected. + } + } + } +} + #[cfg(test)] mod tests { use { diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 1fbf211124c0b3..d3d57c1314caed 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -15,7 +15,7 @@ use { cost_update_service::CostUpdateService, drop_bank_service::DropBankService, ledger_cleanup_service::LedgerCleanupService, - repair::repair_service::RepairInfo, + repair::{quic_endpoint::LocalRequest, repair_service::RepairInfo}, replay_stage::{ReplayStage, ReplayStageConfig}, rewards_recorder_service::RewardsRecorderSender, shred_fetch_stage::ShredFetchStage, @@ -138,6 +138,7 @@ impl Tvu { banking_tracer: Arc, turbine_quic_endpoint_sender: AsyncSender<(SocketAddr, Bytes)>, turbine_quic_endpoint_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, + repair_quic_endpoint_sender: AsyncSender, ) -> Result { let TvuSockets { repair: repair_socket, @@ -151,10 +152,13 @@ impl Tvu { let repair_socket = Arc::new(repair_socket); let ancestor_hashes_socket = Arc::new(ancestor_hashes_socket); let fetch_sockets: Vec> = fetch_sockets.into_iter().map(Arc::new).collect(); + let (repair_quic_endpoint_response_sender, repair_quic_endpoint_response_receiver) = + unbounded(); let fetch_stage = ShredFetchStage::new( fetch_sockets, turbine_quic_endpoint_receiver, repair_socket.clone(), + repair_quic_endpoint_response_receiver, fetch_sender, tvu_config.shred_version, bank_forks.clone(), @@ -209,6 +213,8 @@ impl Tvu { retransmit_sender, repair_socket, ancestor_hashes_socket, + repair_quic_endpoint_sender, + repair_quic_endpoint_response_sender, exit.clone(), repair_info, leader_schedule_cache.clone(), @@ -401,6 +407,8 @@ pub mod tests { let (turbine_quic_endpoint_sender, _turbine_quic_endpoint_receiver) = tokio::sync::mpsc::channel(/*capacity:*/ 128); let (_turbine_quic_endpoint_sender, turbine_quic_endpoint_receiver) = unbounded(); + let (repair_quic_endpoint_sender, _repair_quic_endpoint_receiver) = + tokio::sync::mpsc::channel(/*buffer:*/ 128); //start cluster_info1 let cluster_info1 = ClusterInfo::new(target1.info.clone(), keypair, SocketAddrSpace::Unspecified); @@ -484,6 +492,7 @@ pub mod tests { BankingTracer::new_disabled(), turbine_quic_endpoint_sender, turbine_quic_endpoint_receiver, + repair_quic_endpoint_sender, ) .expect("assume success"); exit.store(true, Ordering::Relaxed); diff --git a/core/src/validator.rs b/core/src/validator.rs index ec77b58612ba52..80f06464bc69d5 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -16,7 +16,7 @@ use { }, ledger_metric_report_service::LedgerMetricReportService, poh_timing_report_service::PohTimingReportService, - repair::{serve_repair::ServeRepair, serve_repair_service::ServeRepairService}, + repair::{self, serve_repair::ServeRepair, serve_repair_service::ServeRepairService}, rewards_recorder_service::{RewardsRecorderSender, RewardsRecorderService}, sample_performance_service::SamplePerformanceService, sigverify, @@ -132,6 +132,7 @@ use { }, strum::VariantNames, strum_macros::{Display, EnumString, EnumVariantNames, IntoStaticStr}, + tokio::runtime::Runtime as TokioRuntime, }; const MAX_COMPLETED_DATA_SETS_IN_CHANNEL: usize = 100_000; @@ -463,8 +464,11 @@ pub struct Validator { accounts_background_service: AccountsBackgroundService, accounts_hash_verifier: AccountsHashVerifier, turbine_quic_endpoint: Endpoint, - turbine_quic_endpoint_runtime: Option, + turbine_quic_endpoint_runtime: Option, turbine_quic_endpoint_join_handle: solana_turbine::quic_endpoint::AsyncTryJoinHandle, + repair_quic_endpoint: Endpoint, + repair_quic_endpoint_runtime: Option, + repair_quic_endpoint_join_handle: repair::quic_endpoint::AsyncTryJoinHandle, } impl Validator { @@ -1048,7 +1052,7 @@ impl Validator { serve_repair, // Incoming UDP repair requests are adapted into RemoteRequest // and also sent through the same channel. - repair_quic_endpoint_sender, + repair_quic_endpoint_sender.clone(), repair_quic_endpoint_receiver, blockstore.clone(), node.sockets.serve_repair, @@ -1149,7 +1153,7 @@ impl Validator { ) = solana_turbine::quic_endpoint::new_quic_endpoint( turbine_quic_endpoint_runtime .as_ref() - .map(tokio::runtime::Runtime::handle) + .map(TokioRuntime::handle) .unwrap_or_else(|| current_runtime_handle.as_ref().unwrap()), &identity_keypair, node.sockets.tvu_quic, @@ -1161,6 +1165,30 @@ impl Validator { ) .unwrap(); + // Repair quic endpoint. + let repair_quic_endpoint_runtime = current_runtime_handle.is_err().then(|| { + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .thread_name("solRepairQuic") + .build() + .unwrap() + }); + let (repair_quic_endpoint, repair_quic_endpoint_sender, repair_quic_endpoint_join_handle) = + repair::quic_endpoint::new_quic_endpoint( + repair_quic_endpoint_runtime + .as_ref() + .map(TokioRuntime::handle) + .unwrap_or_else(|| current_runtime_handle.as_ref().unwrap()), + &identity_keypair, + node.sockets.serve_repair_quic, + node.info + .serve_repair(Protocol::QUIC) + .expect("Operator must spin up node with valid QUIC serve-repair address") + .ip(), + repair_quic_endpoint_sender, + ) + .unwrap(); + let (replay_vote_sender, replay_vote_receiver) = unbounded(); let tvu = Tvu::new( vote_account, @@ -1213,6 +1241,7 @@ impl Validator { banking_tracer.clone(), turbine_quic_endpoint_sender.clone(), turbine_quic_endpoint_receiver, + repair_quic_endpoint_sender, )?; let tpu = Tpu::new( @@ -1301,6 +1330,9 @@ impl Validator { turbine_quic_endpoint, turbine_quic_endpoint_runtime, turbine_quic_endpoint_join_handle, + repair_quic_endpoint, + repair_quic_endpoint_runtime, + repair_quic_endpoint_join_handle, }) } @@ -1410,9 +1442,14 @@ impl Validator { } self.gossip_service.join().expect("gossip_service"); + repair::quic_endpoint::close_quic_endpoint(&self.repair_quic_endpoint); self.serve_repair_service .join() .expect("serve_repair_service"); + self.repair_quic_endpoint_runtime + .map(|runtime| runtime.block_on(self.repair_quic_endpoint_join_handle)) + .transpose() + .unwrap(); self.stats_reporter_service .join() .expect("stats_reporter_service"); diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 7efe981275e5a2..a68a20e2078471 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -7,6 +7,7 @@ use { completed_data_sets_service::CompletedDataSetsSender, repair::{ ancestor_hashes_service::AncestorHashesReplayUpdateReceiver, + quic_endpoint::LocalRequest, repair_response, repair_service::{ DumpedSlotsReceiver, OutstandingShredRepairs, PopularPrunedForksSender, RepairInfo, @@ -39,6 +40,7 @@ use { thread::{self, Builder, JoinHandle}, time::{Duration, Instant}, }, + tokio::sync::mpsc::Sender as AsyncSender, }; type ShredPayload = Vec; @@ -325,6 +327,8 @@ impl WindowService { retransmit_sender: Sender>, repair_socket: Arc, ancestor_hashes_socket: Arc, + repair_quic_endpoint_sender: AsyncSender, + repair_quic_endpoint_response_sender: Sender<(SocketAddr, Vec)>, exit: Arc, repair_info: RepairInfo, leader_schedule_cache: Arc, @@ -344,6 +348,8 @@ impl WindowService { exit.clone(), repair_socket, ancestor_hashes_socket, + repair_quic_endpoint_sender, + repair_quic_endpoint_response_sender, repair_info, verified_vote_receiver, outstanding_requests.clone(), From 09936aac0e6c16865729b13b7a4242b04807db93 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 11 Sep 2023 18:33:25 -0400 Subject: [PATCH 062/407] Removes `new()` from POD CalculateHashIntermediate and CumulativeOffset (#33211) --- accounts-db/src/accounts_db.rs | 48 ++++--- accounts-db/src/accounts_hash.rs | 195 +++++++++++++++++++---------- accounts-db/src/cache_hash_data.rs | 10 +- 3 files changed, 168 insertions(+), 85 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 2468fe82385496..4543cec7484ca9 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -2412,7 +2412,11 @@ impl<'a> AppendVecScan for ScanState<'a> { self.mismatch_found.fetch_add(1, Ordering::Relaxed); } } - let source_item = CalculateHashIntermediate::new(loaded_hash, balance, *pubkey); + let source_item = CalculateHashIntermediate { + hash: loaded_hash, + lamports: balance, + pubkey: *pubkey, + }; self.init_accum(self.range); self.accum[self.pubkey_to_bin_index].push(source_item); } @@ -10388,10 +10392,26 @@ pub mod tests { let pubkey255 = Pubkey::from([0xffu8; 32]); let mut raw_expected = vec![ - CalculateHashIntermediate::new(Hash::default(), 1, pubkey0), - CalculateHashIntermediate::new(Hash::default(), 128, pubkey127), - CalculateHashIntermediate::new(Hash::default(), 129, pubkey128), - CalculateHashIntermediate::new(Hash::default(), 256, pubkey255), + CalculateHashIntermediate { + hash: Hash::default(), + lamports: 1, + pubkey: pubkey0, + }, + CalculateHashIntermediate { + hash: Hash::default(), + lamports: 128, + pubkey: pubkey127, + }, + CalculateHashIntermediate { + hash: Hash::default(), + lamports: 129, + pubkey: pubkey128, + }, + CalculateHashIntermediate { + hash: Hash::default(), + lamports: 256, + pubkey: pubkey255, + }, ]; let expected_hashes = [ @@ -10983,11 +11003,11 @@ pub mod tests { self.calls.fetch_add(1, Ordering::Relaxed); assert_eq!(loaded_account.pubkey(), &self.pubkey); assert_eq!(self.slot_expected, self.current_slot); - self.accum.push(vec![CalculateHashIntermediate::new( - Hash::default(), - self.value_to_use_for_lamports, - self.pubkey, - )]); + self.accum.push(vec![CalculateHashIntermediate { + hash: Hash::default(), + lamports: self.value_to_use_for_lamports, + pubkey: self.pubkey, + }]); } fn scanning_complete(self) -> BinnedHashData { self.accum @@ -11044,11 +11064,11 @@ pub mod tests { assert_eq!(calls.load(Ordering::Relaxed), 1); assert_scan( result2, - vec![vec![vec![CalculateHashIntermediate::new( - Hash::default(), - expected, + vec![vec![vec![CalculateHashIntermediate { + hash: Hash::default(), + lamports: expected, pubkey, - )]]], + }]]], 1, 0, 1, diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 8191c04fe2447f..30b38714896ec9 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -287,31 +287,12 @@ pub struct CalculateHashIntermediate { pub pubkey: Pubkey, } -impl CalculateHashIntermediate { - pub fn new(hash: Hash, lamports: u64, pubkey: Pubkey) -> Self { - Self { - hash, - lamports, - pubkey, - } - } -} - #[derive(Default, Debug, PartialEq, Eq)] pub struct CumulativeOffset { pub index: Vec, pub start_offset: usize, } -impl CumulativeOffset { - pub fn new(index: Vec, start_offset: usize) -> CumulativeOffset { - Self { - index, - start_offset, - } - } -} - pub trait ExtractSliceFromRawData<'b, T: 'b> { fn extract<'a>(&'b self, offset: &'a CumulativeOffset, start: usize) -> &'b [T]; } @@ -389,7 +370,10 @@ impl CumulativeOffsets { .enumerate() .filter_map(|(i, len)| { if len > 0 { - let result = CumulativeOffset::new(vec![i], total_count); + let result = CumulativeOffset { + index: vec![i], + start_offset: total_count, + }; total_count += len; Some(result) } else { @@ -1265,7 +1249,10 @@ mod tests { // the first inner, non-empty vector we find gives us an approximate rectangular shape cumulative_offsets = Vec::with_capacity(raw.len() * v_outer.len()); } - cumulative_offsets.push(CumulativeOffset::new(vec![i, j], total_count)); + cumulative_offsets.push(CumulativeOffset { + index: vec![i, j], + start_offset: total_count, + }); total_count += len; } } @@ -1297,11 +1284,11 @@ mod tests { .flat_map(|(bin, count)| { (0..*count).map(move |_| { let binner = PubkeyBinCalculator24::new(bins); - CalculateHashIntermediate::new( - Hash::default(), - 0, - binner.lowest_pubkey_from_bin(bin, bins), - ) + CalculateHashIntermediate { + hash: Hash::default(), + lamports: 0, + pubkey: binner.lowest_pubkey_from_bin(bin, bins), + } }) }) .collect::>(); @@ -1449,15 +1436,23 @@ mod tests { let mut account_maps = Vec::new(); - let key = Pubkey::from([11u8; 32]); + let pubkey = Pubkey::from([11u8; 32]); let hash = Hash::new(&[1u8; 32]); - let val = CalculateHashIntermediate::new(hash, 88, key); + let val = CalculateHashIntermediate { + hash, + lamports: 88, + pubkey, + }; account_maps.push(val); // 2nd key - zero lamports, so will be removed - let key = Pubkey::from([12u8; 32]); + let pubkey = Pubkey::from([12u8; 32]); let hash = Hash::new(&[2u8; 32]); - let val = CalculateHashIntermediate::new(hash, 0, key); + let val = CalculateHashIntermediate { + hash, + lamports: 0, + pubkey, + }; account_maps.push(val); let dir_for_temp_cache_files = tempdir().unwrap(); @@ -1468,9 +1463,13 @@ mod tests { assert_eq!((result.0, result.1), (expected_hash, 88)); // 3rd key - with pubkey value before 1st key so it will be sorted first - let key = Pubkey::from([10u8; 32]); + let pubkey = Pubkey::from([10u8; 32]); let hash = Hash::new(&[2u8; 32]); - let val = CalculateHashIntermediate::new(hash, 20, key); + let val = CalculateHashIntermediate { + hash, + lamports: 20, + pubkey, + }; account_maps.insert(0, val); let result = accounts_hash @@ -1479,9 +1478,13 @@ mod tests { assert_eq!((result.0, result.1), (expected_hash, 108)); // 3rd key - with later slot - let key = Pubkey::from([10u8; 32]); + let pubkey = Pubkey::from([10u8; 32]); let hash = Hash::new(&[99u8; 32]); - let val = CalculateHashIntermediate::new(hash, 30, key); + let val = CalculateHashIntermediate { + hash, + lamports: 30, + pubkey, + }; account_maps.insert(1, val); let result = accounts_hash @@ -1576,7 +1579,11 @@ mod tests { let accounts: Vec<_> = hashes .zip(keys.iter()) .enumerate() - .map(|(i, (hash, key))| CalculateHashIntermediate::new(hash, (i + 1) as u64, *key)) + .map(|(i, (hash, &pubkey))| CalculateHashIntermediate { + hash, + lamports: (i + 1) as u64, + pubkey, + }) .collect(); type ExpectedType = (String, bool, u64, String); @@ -1727,13 +1734,21 @@ mod tests { #[test] fn test_accountsdb_compare_two_hash_entries() { solana_logger::setup(); - let key = Pubkey::new_unique(); + let pubkey = Pubkey::new_unique(); let hash = Hash::new_unique(); - let val = CalculateHashIntermediate::new(hash, 1, key); + let val = CalculateHashIntermediate { + hash, + lamports: 1, + pubkey, + }; // slot same, version < let hash2 = Hash::new_unique(); - let val2 = CalculateHashIntermediate::new(hash2, 4, key); + let val2 = CalculateHashIntermediate { + hash: hash2, + lamports: 4, + pubkey, + }; assert_eq!( std::cmp::Ordering::Equal, // no longer comparing slots or versions AccountsHasher::compare_two_hash_entries(&val, &val2) @@ -1741,7 +1756,11 @@ mod tests { // slot same, vers = let hash3 = Hash::new_unique(); - let val3 = CalculateHashIntermediate::new(hash3, 2, key); + let val3 = CalculateHashIntermediate { + hash: hash3, + lamports: 2, + pubkey, + }; assert_eq!( std::cmp::Ordering::Equal, AccountsHasher::compare_two_hash_entries(&val, &val3) @@ -1749,7 +1768,11 @@ mod tests { // slot same, vers > let hash4 = Hash::new_unique(); - let val4 = CalculateHashIntermediate::new(hash4, 6, key); + let val4 = CalculateHashIntermediate { + hash: hash4, + lamports: 6, + pubkey, + }; assert_eq!( std::cmp::Ordering::Equal, // no longer comparing slots or versions AccountsHasher::compare_two_hash_entries(&val, &val4) @@ -1757,7 +1780,11 @@ mod tests { // slot >, version < let hash5 = Hash::new_unique(); - let val5 = CalculateHashIntermediate::new(hash5, 8, key); + let val5 = CalculateHashIntermediate { + hash: hash5, + lamports: 8, + pubkey, + }; assert_eq!( std::cmp::Ordering::Equal, // no longer comparing slots or versions AccountsHasher::compare_two_hash_entries(&val, &val5) @@ -1776,10 +1803,14 @@ mod tests { fn test_accountsdb_remove_zero_balance_accounts() { solana_logger::setup(); - let key = Pubkey::new_unique(); + let pubkey = Pubkey::new_unique(); let hash = Hash::new_unique(); let mut account_maps = Vec::new(); - let val = CalculateHashIntermediate::new(hash, 1, key); + let val = CalculateHashIntermediate { + hash, + lamports: 1, + pubkey, + }; account_maps.push(val.clone()); let vecs = vec![account_maps.to_vec()]; @@ -1791,7 +1822,11 @@ mod tests { ); // zero original lamports, higher version - let val = CalculateHashIntermediate::new(hash, 0, key); + let val = CalculateHashIntermediate { + hash, + lamports: 0, + pubkey, + }; account_maps.push(val); // has to be after previous entry since account_maps are in slot order let vecs = vec![account_maps.to_vec()]; @@ -1809,11 +1844,23 @@ mod tests { let hash = Hash::new_unique(); let mut account_maps = Vec::new(); let mut account_maps2 = Vec::new(); - let val = CalculateHashIntermediate::new(hash, 1, key); + let val = CalculateHashIntermediate { + hash, + lamports: 1, + pubkey: key, + }; account_maps.push(val.clone()); - let val2 = CalculateHashIntermediate::new(hash, 2, key2); + let val2 = CalculateHashIntermediate { + hash, + lamports: 2, + pubkey: key2, + }; account_maps.push(val2.clone()); - let val3 = CalculateHashIntermediate::new(hash, 3, key2); + let val3 = CalculateHashIntermediate { + hash, + lamports: 3, + pubkey: key2, + }; account_maps2.push(val3.clone()); let mut vecs = vec![account_maps.to_vec(), account_maps2.to_vec()]; @@ -1847,11 +1894,23 @@ mod tests { let hash = Hash::new_unique(); let mut account_maps = Vec::new(); let mut account_maps2 = Vec::new(); - let val2 = CalculateHashIntermediate::new(hash, 2, key2); + let val2 = CalculateHashIntermediate { + hash, + lamports: 2, + pubkey: key2, + }; account_maps.push(val2.clone()); - let val = CalculateHashIntermediate::new(hash, 1, key); + let val = CalculateHashIntermediate { + hash, + lamports: 1, + pubkey: key, + }; account_maps.push(val.clone()); - let val3 = CalculateHashIntermediate::new(hash, 3, key2); + let val3 = CalculateHashIntermediate { + hash, + lamports: 3, + pubkey: key2, + }; account_maps2.push(val3.clone()); let mut vecs = vec![account_maps.to_vec(), account_maps2.to_vec()]; @@ -2214,12 +2273,16 @@ mod tests { let offset = 2; let input = vec![ - CalculateHashIntermediate::new( - Hash::new(&[1u8; 32]), - u64::MAX - offset, - Pubkey::new_unique(), - ), - CalculateHashIntermediate::new(Hash::new(&[2u8; 32]), offset + 1, Pubkey::new_unique()), + CalculateHashIntermediate { + hash: Hash::new(&[1u8; 32]), + lamports: u64::MAX - offset, + pubkey: Pubkey::new_unique(), + }, + CalculateHashIntermediate { + hash: Hash::new(&[2u8; 32]), + lamports: offset + 1, + pubkey: Pubkey::new_unique(), + }, ]; let dir_for_temp_cache_files = tempdir().unwrap(); let accounts_hasher = AccountsHasher::new(dir_for_temp_cache_files.path().to_path_buf()); @@ -2244,16 +2307,16 @@ mod tests { let offset = 2; let input = vec![ - vec![CalculateHashIntermediate::new( - Hash::new(&[1u8; 32]), - u64::MAX - offset, - Pubkey::new_unique(), - )], - vec![CalculateHashIntermediate::new( - Hash::new(&[2u8; 32]), - offset + 1, - Pubkey::new_unique(), - )], + vec![CalculateHashIntermediate { + hash: Hash::new(&[1u8; 32]), + lamports: u64::MAX - offset, + pubkey: Pubkey::new_unique(), + }], + vec![CalculateHashIntermediate { + hash: Hash::new(&[2u8; 32]), + lamports: offset + 1, + pubkey: Pubkey::new_unique(), + }], ]; let dir_for_temp_cache_files = tempdir().unwrap(); let accounts_hasher = AccountsHasher::new(dir_for_temp_cache_files.path().to_path_buf()); diff --git a/accounts-db/src/cache_hash_data.rs b/accounts-db/src/cache_hash_data.rs index 50e85af9a89116..c6a0dc217c9ce0 100644 --- a/accounts-db/src/cache_hash_data.rs +++ b/accounts-db/src/cache_hash_data.rs @@ -502,11 +502,11 @@ mod tests { } } - CalculateHashIntermediate::new( - solana_sdk::hash::Hash::new_unique(), - ct as u64, - pk, - ) + CalculateHashIntermediate { + hash: solana_sdk::hash::Hash::new_unique(), + lamports: ct as u64, + pubkey: pk, + } }) .collect::>() } else { From 3cd3994939d5939e14917702d5a3691fe0a82d08 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Mon, 11 Sep 2023 19:28:26 -0500 Subject: [PATCH 063/407] clean up pub crate (#33214) Co-authored-by: HaoranYi --- accounts-db/src/cache_hash_data.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/accounts-db/src/cache_hash_data.rs b/accounts-db/src/cache_hash_data.rs index c6a0dc217c9ce0..a58bf50d030025 100644 --- a/accounts-db/src/cache_hash_data.rs +++ b/accounts-db/src/cache_hash_data.rs @@ -196,7 +196,7 @@ impl CacheHashDataFile { } } -pub struct CacheHashData { +pub(crate) struct CacheHashData { cache_dir: PathBuf, pre_existing_cache_files: Arc>>, should_delete_old_cache_files_on_drop: bool, @@ -213,7 +213,10 @@ impl Drop for CacheHashData { } impl CacheHashData { - pub fn new(cache_dir: PathBuf, should_delete_old_cache_files_on_drop: bool) -> CacheHashData { + pub(crate) fn new( + cache_dir: PathBuf, + should_delete_old_cache_files_on_drop: bool, + ) -> CacheHashData { std::fs::create_dir_all(&cache_dir).unwrap_or_else(|err| { panic!("error creating cache dir {}: {err}", cache_dir.display()) }); @@ -292,7 +295,7 @@ impl CacheHashData { } /// save 'data' to 'file_name' - pub fn save( + pub(crate) fn save( &self, file_name: impl AsRef, data: &SavedTypeSlice, From 5f4f593acd112fab1d20af5c1abf976e6397c549 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Sep 2023 14:10:09 +0000 Subject: [PATCH 064/407] build(deps): bump socket2 from 0.5.3 to 0.5.4 (#33218) * build(deps): bump socket2 from 0.5.3 to 0.5.4 Bumps [socket2](https://github.com/rust-lang/socket2) from 0.5.3 to 0.5.4. - [Release notes](https://github.com/rust-lang/socket2/releases) - [Changelog](https://github.com/rust-lang/socket2/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/socket2/commits) --- updated-dependencies: - dependency-name: socket2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a53a445676469b..3ea52d5e508af3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4113,7 +4113,7 @@ checksum = "6df19e284d93757a9fb91d63672f7741b129246a669db09d1c0063071debc0c0" dependencies = [ "bytes", "libc", - "socket2 0.5.3", + "socket2 0.5.4", "tracing", "windows-sys 0.48.0", ] @@ -5048,9 +5048,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" dependencies = [ "libc", "windows-sys 0.48.0", @@ -6389,7 +6389,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_derive", - "socket2 0.5.3", + "socket2 0.5.4", "solana-logger", "solana-sdk", "solana-version", diff --git a/Cargo.toml b/Cargo.toml index da2390c2520046..7556fd58e6dfae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -295,7 +295,7 @@ sha3 = "0.10.4" signal-hook = "0.3.17" siphasher = "0.3.11" smpl_jwt = "0.7.1" -socket2 = "0.5.3" +socket2 = "0.5.4" soketto = "0.7" solana_rbpf = "=0.7.1" solana-account-decoder = { path = "account-decoder", version = "=1.17.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index f3a5a0987edb5b..f4bc65fa4f6113 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3626,7 +3626,7 @@ checksum = "6df19e284d93757a9fb91d63672f7741b129246a669db09d1c0063071debc0c0" dependencies = [ "bytes", "libc", - "socket2 0.5.3", + "socket2 0.5.4", "tracing", "windows-sys 0.48.0", ] @@ -4420,9 +4420,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" dependencies = [ "libc", "windows-sys 0.48.0", @@ -5179,7 +5179,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_derive", - "socket2 0.5.3", + "socket2 0.5.4", "solana-logger", "solana-sdk", "solana-version", From 212f1b473504cac2253c49acc18f0f564715efc8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Sep 2023 14:10:49 +0000 Subject: [PATCH 065/407] build(deps): bump base64 from 0.21.3 to 0.21.4 (#33202) * build(deps): bump base64 from 0.21.3 to 0.21.4 Bumps [base64](https://github.com/marshallpierce/rust-base64) from 0.21.3 to 0.21.4. - [Changelog](https://github.com/marshallpierce/rust-base64/blob/master/RELEASE-NOTES.md) - [Commits](https://github.com/marshallpierce/rust-base64/compare/v0.21.3...v0.21.4) --- updated-dependencies: - dependency-name: base64 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 38 +++++++++++++++++++------------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 36 ++++++++++++++++++------------------ 3 files changed, 38 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3ea52d5e508af3..cca7b391b6b040 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -553,9 +553,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.3" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "414dcefbc63d77c526a76b3afcf6fbb9b5e2791c19c3aa2297733208750c6e53" +checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" [[package]] name = "base64ct" @@ -4388,7 +4388,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" dependencies = [ "async-compression", - "base64 0.21.3", + "base64 0.21.4", "bytes", "encoding_rs", "futures-core", @@ -5077,7 +5077,7 @@ version = "1.17.0" dependencies = [ "Inflector", "assert_matches", - "base64 0.21.3", + "base64 0.21.4", "bincode", "bs58", "bv", @@ -5588,7 +5588,7 @@ name = "solana-cli-output" version = "1.17.0" dependencies = [ "Inflector", - "base64 0.21.3", + "base64 0.21.4", "chrono", "clap 2.33.3", "console", @@ -5719,7 +5719,7 @@ name = "solana-core" version = "1.17.0" dependencies = [ "assert_matches", - "base64 0.21.3", + "base64 0.21.4", "bincode", "bs58", "bytes", @@ -5961,7 +5961,7 @@ dependencies = [ name = "solana-genesis" version = "1.17.0" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bincode", "clap 2.33.3", "itertools", @@ -6482,7 +6482,7 @@ dependencies = [ "ark-serialize", "array-bytes", "assert_matches", - "base64 0.21.3", + "base64 0.21.4", "bincode", "bitflags 2.3.3", "blake3", @@ -6533,7 +6533,7 @@ name = "solana-program-runtime" version = "1.17.0" dependencies = [ "assert_matches", - "base64 0.21.3", + "base64 0.21.4", "bincode", "eager", "enum-iterator", @@ -6563,7 +6563,7 @@ version = "1.17.0" dependencies = [ "assert_matches", "async-trait", - "base64 0.21.3", + "base64 0.21.4", "bincode", "chrono-humanize", "crossbeam-channel", @@ -6667,7 +6667,7 @@ dependencies = [ name = "solana-rpc" version = "1.17.0" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bincode", "bs58", "crossbeam-channel", @@ -6726,7 +6726,7 @@ version = "1.17.0" dependencies = [ "assert_matches", "async-trait", - "base64 0.21.3", + "base64 0.21.4", "bincode", "bs58", "crossbeam-channel", @@ -6753,7 +6753,7 @@ dependencies = [ name = "solana-rpc-client-api" version = "1.17.0" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bs58", "jsonrpc-core", "reqwest", @@ -6819,7 +6819,7 @@ version = "1.17.0" dependencies = [ "arrayref", "assert_matches", - "base64 0.21.3", + "base64 0.21.4", "bincode", "blake3", "bv", @@ -6901,7 +6901,7 @@ version = "1.17.0" dependencies = [ "anyhow", "assert_matches", - "base64 0.21.3", + "base64 0.21.4", "bincode", "bitflags 2.3.3", "borsh 0.10.3", @@ -7122,7 +7122,7 @@ dependencies = [ name = "solana-test-validator" version = "1.17.0" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bincode", "crossbeam-channel", "log", @@ -7249,7 +7249,7 @@ name = "solana-transaction-status" version = "1.17.0" dependencies = [ "Inflector", - "base64 0.21.3", + "base64 0.21.4", "bincode", "borsh 0.9.3", "bs58", @@ -7495,7 +7495,7 @@ name = "solana-zk-token-sdk" version = "1.17.0" dependencies = [ "aes-gcm-siv", - "base64 0.21.3", + "base64 0.21.4", "bincode", "bytemuck", "byteorder", @@ -8161,7 +8161,7 @@ dependencies = [ "async-stream", "async-trait", "axum", - "base64 0.21.3", + "base64 0.21.4", "bytes", "futures-core", "futures-util", diff --git a/Cargo.toml b/Cargo.toml index 7556fd58e6dfae..d4f0d4ddd5c0b9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -146,7 +146,7 @@ async-mutex = "1.4.0" async-trait = "0.1.73" atty = "0.2.11" backoff = "0.4.0" -base64 = "0.21.3" +base64 = "0.21.4" bincode = "1.3.3" bitflags = { version = "2.3.3", features = ["serde"] } blake3 = "1.4.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index f4bc65fa4f6113..b1618a388a4cf2 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -527,9 +527,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.3" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "414dcefbc63d77c526a76b3afcf6fbb9b5e2791c19c3aa2297733208750c6e53" +checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" [[package]] name = "base64ct" @@ -3839,7 +3839,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" dependencies = [ "async-compression", - "base64 0.21.3", + "base64 0.21.4", "bytes", "encoding_rs", "futures-core", @@ -4448,7 +4448,7 @@ name = "solana-account-decoder" version = "1.17.0" dependencies = [ "Inflector", - "base64 0.21.3", + "base64 0.21.4", "bincode", "bs58", "bv", @@ -4687,7 +4687,7 @@ name = "solana-cli-output" version = "1.17.0" dependencies = [ "Inflector", - "base64 0.21.3", + "base64 0.21.4", "chrono", "clap 2.33.3", "console", @@ -4781,7 +4781,7 @@ dependencies = [ name = "solana-core" version = "1.17.0" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bincode", "bs58", "bytes", @@ -5236,7 +5236,7 @@ dependencies = [ "ark-ff", "ark-serialize", "array-bytes", - "base64 0.21.3", + "base64 0.21.4", "bincode", "bitflags 2.3.3", "blake3", @@ -5284,7 +5284,7 @@ dependencies = [ name = "solana-program-runtime" version = "1.17.0" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bincode", "eager", "enum-iterator", @@ -5312,7 +5312,7 @@ version = "1.17.0" dependencies = [ "assert_matches", "async-trait", - "base64 0.21.3", + "base64 0.21.4", "bincode", "chrono-humanize", "crossbeam-channel", @@ -5409,7 +5409,7 @@ dependencies = [ name = "solana-rpc" version = "1.17.0" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bincode", "bs58", "crossbeam-channel", @@ -5464,7 +5464,7 @@ name = "solana-rpc-client" version = "1.17.0" dependencies = [ "async-trait", - "base64 0.21.3", + "base64 0.21.4", "bincode", "bs58", "indicatif", @@ -5487,7 +5487,7 @@ dependencies = [ name = "solana-rpc-client-api" version = "1.17.0" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bs58", "jsonrpc-core", "reqwest", @@ -5519,7 +5519,7 @@ name = "solana-runtime" version = "1.17.0" dependencies = [ "arrayref", - "base64 0.21.3", + "base64 0.21.4", "bincode", "blake3", "bv", @@ -5997,7 +5997,7 @@ name = "solana-sdk" version = "1.17.0" dependencies = [ "assert_matches", - "base64 0.21.3", + "base64 0.21.4", "bincode", "bitflags 2.3.3", "borsh 0.10.3", @@ -6175,7 +6175,7 @@ dependencies = [ name = "solana-test-validator" version = "1.17.0" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bincode", "crossbeam-channel", "log", @@ -6241,7 +6241,7 @@ name = "solana-transaction-status" version = "1.17.0" dependencies = [ "Inflector", - "base64 0.21.3", + "base64 0.21.4", "bincode", "borsh 0.9.3", "bs58", @@ -6420,7 +6420,7 @@ name = "solana-zk-token-sdk" version = "1.17.0" dependencies = [ "aes-gcm-siv", - "base64 0.21.3", + "base64 0.21.4", "bincode", "bytemuck", "byteorder 1.4.3", @@ -7003,7 +7003,7 @@ dependencies = [ "async-stream", "async-trait", "axum", - "base64 0.21.3", + "base64 0.21.4", "bytes", "futures-core", "futures-util", From 2b1a30d81f1ea0320a192363e1527f286be00d80 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Sep 2023 14:11:13 +0000 Subject: [PATCH 066/407] build(deps): bump syn from 2.0.31 to 2.0.32 (#33201) * build(deps): bump syn from 2.0.31 to 2.0.32 Bumps [syn](https://github.com/dtolnay/syn) from 2.0.31 to 2.0.32. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.31...2.0.32) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 44 ++++++++++++++++++++--------------------- programs/sbf/Cargo.lock | 42 +++++++++++++++++++-------------------- 2 files changed, 43 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cca7b391b6b040..36e280c94c21f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -436,7 +436,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -590,7 +590,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1496,7 +1496,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1507,7 +1507,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1699,7 +1699,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1799,7 +1799,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -2074,7 +2074,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -3391,7 +3391,7 @@ dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -3898,7 +3898,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -4061,7 +4061,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -4749,7 +4749,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -4794,7 +4794,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -4844,7 +4844,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -5954,7 +5954,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -6961,7 +6961,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -7704,9 +7704,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.31" +version = "2.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718fa2415bcb8d8bd775917a1bf12a7931b6dfa890753378538118181e0cb398" +checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2" dependencies = [ "proc-macro2", "quote", @@ -7909,7 +7909,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -8048,7 +8048,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -8539,7 +8539,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", "wasm-bindgen-shared", ] @@ -8573,7 +8573,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8919,7 +8919,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b1618a388a4cf2..3249340040652f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -410,7 +410,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -564,7 +564,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1206,7 +1206,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1217,7 +1217,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1392,7 +1392,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1495,7 +1495,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1744,7 +1744,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -3009,7 +3009,7 @@ dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -3445,7 +3445,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -3580,7 +3580,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -4158,7 +4158,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -4203,7 +4203,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -4959,7 +4959,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -6052,7 +6052,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -6617,9 +6617,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.31" +version = "2.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718fa2415bcb8d8bd775917a1bf12a7931b6dfa890753378538118181e0cb398" +checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2" dependencies = [ "proc-macro2", "quote", @@ -6767,7 +6767,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -6890,7 +6890,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -7367,7 +7367,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", "wasm-bindgen-shared", ] @@ -7401,7 +7401,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -7738,7 +7738,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] From d34a881b12dcc50ceffcf8f1df98301c6a99d85d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Sep 2023 14:11:39 +0000 Subject: [PATCH 067/407] build(deps): bump serde_json from 1.0.105 to 1.0.106 (#33200) * build(deps): bump serde_json from 1.0.105 to 1.0.106 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.105 to 1.0.106. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.105...v1.0.106) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 36e280c94c21f7..1cd118299136d5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4754,9 +4754,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.105" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" +checksum = "2cc66a619ed80bf7a0f6b17dd063a84b88f6dea1813737cf469aef1d081142c2" dependencies = [ "itoa", "ryu", diff --git a/Cargo.toml b/Cargo.toml index d4f0d4ddd5c0b9..a1718c429af01b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -286,7 +286,7 @@ semver = "1.0.18" serde = "1.0.188" serde_bytes = "0.11.12" serde_derive = "1.0.103" -serde_json = "1.0.105" +serde_json = "1.0.106" serde_yaml = "0.9.25" serial_test = "2.0.0" serde_with = { version = "2.3.3", default-features = false } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 3249340040652f..ffd939ff5ffc75 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4163,9 +4163,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.105" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" +checksum = "2cc66a619ed80bf7a0f6b17dd063a84b88f6dea1813737cf469aef1d081142c2" dependencies = [ "itoa", "ryu", From c61ee20b4449976c0197d726ea9cbb3b7577a199 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 12 Sep 2023 11:23:07 -0400 Subject: [PATCH 068/407] Transient accounts hash cache dir is unnecessary (#33181) --- accounts-db/src/accounts_db.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 4543cec7484ca9..c27f7b5d67ea0e 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -97,7 +97,6 @@ use { borrow::{Borrow, Cow}, boxed::Box, collections::{hash_map, BTreeSet, HashMap, HashSet}, - fs, hash::{Hash as StdHash, Hasher as StdHasher}, io::Result as IoResult, ops::{Range, RangeBounds}, @@ -1494,7 +1493,6 @@ pub struct AccountsDb { base_working_temp_dir: Option, accounts_hash_cache_path: PathBuf, - transient_accounts_hash_cache_path: PathBuf, pub shrink_paths: RwLock>>, @@ -2503,7 +2501,6 @@ impl AccountsDb { paths: vec![], base_working_path, base_working_temp_dir, - transient_accounts_hash_cache_path: accounts_hash_cache_path.join("transient"), accounts_hash_cache_path, shrink_paths: RwLock::new(None), temp_paths: None, @@ -7737,6 +7734,10 @@ impl AccountsDb { let slot = storages.max_slot_inclusive(); let use_bg_thread_pool = config.use_bg_thread_pool; let accounts_hash_cache_path = self.accounts_hash_cache_path.clone(); + let transient_accounts_hash_cache_dir = TempDir::new_in(&accounts_hash_cache_path) + .expect("create transient accounts hash cache dir"); + let transient_accounts_hash_cache_path = + transient_accounts_hash_cache_dir.path().to_path_buf(); let scan_and_hash = || { let (cache_hash_data, cache_hash_data_us) = measure_us!(Self::get_cache_hash_data( accounts_hash_cache_path, @@ -7751,8 +7752,6 @@ impl AccountsDb { end: PUBKEY_BINS_FOR_CALCULATING_HASHES, }; - fs::create_dir_all(&self.transient_accounts_hash_cache_path) - .expect("create transient accounts hash cache dir"); let accounts_hasher = AccountsHasher { filler_account_suffix: if self.filler_accounts_config.count > 0 { self.filler_account_suffix @@ -7760,7 +7759,7 @@ impl AccountsDb { None }, zero_lamport_accounts: kind.zero_lamport_accounts(), - dir_for_temp_cache_files: self.transient_accounts_hash_cache_path.clone(), + dir_for_temp_cache_files: transient_accounts_hash_cache_path, active_stats: &self.active_stats, }; From 21e7f98b3a78adf65e91cc29a8a234cfc4983bf0 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 12 Sep 2023 11:26:08 -0400 Subject: [PATCH 069/407] Uses hex for the hash in accounts hash cache file names (#33196) --- accounts-db/src/accounts_db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index c27f7b5d67ea0e..2857180bf2ece5 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -7262,7 +7262,7 @@ impl AccountsDb { // so, build a file name: let hash = hasher.finish(); let file_name = format!( - "{}.{}.{}.{}.{}", + "{}.{}.{}.{}.{:016x}", range_this_chunk.start, range_this_chunk.end, bin_range.start, From 21513932712914d72d21b6397b7bb2be233c7b3a Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Tue, 12 Sep 2023 15:27:06 +0000 Subject: [PATCH 070/407] adds more context why the first data shred is inserted synchronously (#33092) --- .../src/broadcast_stage/standard_broadcast_run.rs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index 328bebcc79714e..031e72012340e7 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -125,7 +125,7 @@ impl StandardBroadcastRun { None => { // If the blockstore has shreds for the slot, it should not // recreate the slot: - // https://github.com/solana-labs/solana/blob/ff68bf6c2/ledger/src/leader_schedule_cache.rs#L142-L146 + // https://github.com/solana-labs/solana/blob/92a0b310c/ledger/src/leader_schedule_cache.rs##L139-L148 if let Some(slot_meta) = blockstore.meta(slot).unwrap() { if slot_meta.received > 0 || slot_meta.consumed > 0 { process_stats.num_extant_slots += 1; @@ -252,9 +252,13 @@ impl StandardBroadcastRun { .unwrap(); // Insert the first data shred synchronously so that blockstore stores // that the leader started this block. This must be done before the - // blocks are sent out over the wire. By contrast Self::insert skips - // the 1st data shred with index zero. - // https://github.com/solana-labs/solana/blob/53695ecd2/core/src/broadcast_stage/standard_broadcast_run.rs#L334-L339 + // blocks are sent out over the wire, so that the slots we have already + // sent a shred for are skipped (even if the node reboots): + // https://github.com/solana-labs/solana/blob/92a0b310c/ledger/src/leader_schedule_cache.rs#L139-L148 + // preventing the node from broadcasting duplicate blocks: + // https://github.com/solana-labs/solana/blob/92a0b310c/turbine/src/broadcast_stage/standard_broadcast_run.rs#L132-L142 + // By contrast Self::insert skips the 1st data shred with index zero: + // https://github.com/solana-labs/solana/blob/92a0b310c/turbine/src/broadcast_stage/standard_broadcast_run.rs#L367-L373 if let Some(shred) = data_shreds.first() { if shred.index() == 0 { blockstore @@ -350,7 +354,7 @@ impl StandardBroadcastRun { let insert_shreds_start = Instant::now(); let mut shreds = Arc::try_unwrap(shreds).unwrap_or_else(|shreds| (*shreds).clone()); // The first data shred is inserted synchronously. - // https://github.com/solana-labs/solana/blob/53695ecd2/core/src/broadcast_stage/standard_broadcast_run.rs#L239-L246 + // https://github.com/solana-labs/solana/blob/92a0b310c/turbine/src/broadcast_stage/standard_broadcast_run.rs#L268-L283 if let Some(shred) = shreds.first() { if shred.is_data() && shred.index() == 0 { shreds.swap_remove(0); From acd7ad96c38762859445559dfd148e1045c68bc7 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 12 Sep 2023 13:10:22 -0400 Subject: [PATCH 071/407] Purges old accounts hash cache dirs (#33183) --- core/src/validator.rs | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index 80f06464bc69d5..cb40bd0ff9d4d4 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -598,10 +598,23 @@ impl Validator { timer.stop(); info!("Cleaning orphaned account snapshot directories done. {timer}"); - // The accounts hash cache dir was renamed, so cleanup the old dir if it exists. - let old_accounts_hash_cache_dir = ledger_path.join("calculate_accounts_hash_cache"); - if old_accounts_hash_cache_dir.exists() { - snapshot_utils::move_and_async_delete_path(old_accounts_hash_cache_dir); + // The accounts hash cache dir was renamed, so cleanup any old dirs that exist. + let accounts_hash_cache_path = config + .accounts_db_config + .as_ref() + .and_then(|config| config.accounts_hash_cache_path.as_ref()) + .map(PathBuf::as_path) + .unwrap_or(ledger_path); + let old_accounts_hash_cache_dirs = [ + ledger_path.join("calculate_accounts_hash_cache"), + accounts_hash_cache_path.join("full"), + accounts_hash_cache_path.join("incremental"), + accounts_hash_cache_path.join("transient"), + ]; + for old_accounts_hash_cache_dir in old_accounts_hash_cache_dirs { + if old_accounts_hash_cache_dir.exists() { + snapshot_utils::move_and_async_delete_path(old_accounts_hash_cache_dir); + } } { From bdf7207eecafd813365d25670eec398c2fce901a Mon Sep 17 00:00:00 2001 From: bji Date: Tue, 12 Sep 2023 10:40:04 -0700 Subject: [PATCH 072/407] Implement timely vote credits feature. (#32957) --- cli-output/src/cli_output.rs | 36 +- cli/src/vote.rs | 4 +- programs/vote/benches/process_vote.rs | 2 +- programs/vote/src/vote_state/mod.rs | 680 +++++++++++++++++++++++--- rpc/src/rpc.rs | 8 +- sdk/program/src/vote/state/mod.rs | 66 ++- sdk/src/feature_set.rs | 5 + 7 files changed, 704 insertions(+), 97 deletions(-) diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index 68e12e9c803d55..6fc394f6709530 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -43,9 +43,7 @@ use { }, solana_vote_program::{ authorized_voters::AuthorizedVoters, - vote_state::{ - BlockTimestamp, LandedVote, Lockout, MAX_EPOCH_CREDITS_HISTORY, MAX_LOCKOUT_HISTORY, - }, + vote_state::{BlockTimestamp, LandedVote, MAX_EPOCH_CREDITS_HISTORY, MAX_LOCKOUT_HISTORY}, }, std::{ collections::{BTreeMap, HashMap}, @@ -1047,7 +1045,7 @@ impl fmt::Display for CliKeyedEpochRewards { fn show_votes_and_credits( f: &mut fmt::Formatter, - votes: &[CliLockout], + votes: &[CliLandedVote], epoch_voting_history: &[CliEpochVotingHistory], ) -> fmt::Result { if votes.is_empty() { @@ -1070,11 +1068,16 @@ fn show_votes_and_credits( )?; for vote in votes.iter().rev() { - writeln!( + write!( f, "- slot: {} (confirmation count: {})", vote.slot, vote.confirmation_count )?; + if vote.latency == 0 { + writeln!(f)?; + } else { + writeln!(f, " (latency {})", vote.latency)?; + } } if let Some(newest) = newest_history_entry { writeln!( @@ -1555,7 +1558,7 @@ pub struct CliVoteAccount { pub commission: u8, pub root_slot: Option, pub recent_timestamp: BlockTimestamp, - pub votes: Vec, + pub votes: Vec, pub epoch_voting_history: Vec, #[serde(skip_serializing)] pub use_lamports_unit: bool, @@ -1637,25 +1640,18 @@ pub struct CliEpochVotingHistory { #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct CliLockout { +pub struct CliLandedVote { + pub latency: u8, pub slot: Slot, pub confirmation_count: u32, } -impl From<&Lockout> for CliLockout { - fn from(lockout: &Lockout) -> Self { - Self { - slot: lockout.slot(), - confirmation_count: lockout.confirmation_count(), - } - } -} - -impl From<&LandedVote> for CliLockout { - fn from(vote: &LandedVote) -> Self { +impl From<&LandedVote> for CliLandedVote { + fn from(landed_vote: &LandedVote) -> Self { Self { - slot: vote.slot(), - confirmation_count: vote.confirmation_count(), + latency: landed_vote.latency, + slot: landed_vote.slot(), + confirmation_count: landed_vote.confirmation_count(), } } } diff --git a/cli/src/vote.rs b/cli/src/vote.rs index bde158295c3434..6c98e49c3bff42 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -23,7 +23,7 @@ use { offline::*, }, solana_cli_output::{ - return_signers_with_config, CliEpochVotingHistory, CliLockout, CliVoteAccount, + return_signers_with_config, CliEpochVotingHistory, CliLandedVote, CliVoteAccount, ReturnSignersConfig, }, solana_remote_wallet::remote_wallet::RemoteWalletManager, @@ -1215,7 +1215,7 @@ pub fn process_show_vote_account( let epoch_schedule = rpc_client.get_epoch_schedule()?; - let mut votes: Vec = vec![]; + let mut votes: Vec = vec![]; let mut epoch_voting_history: Vec = vec![]; if !vote_state.votes.is_empty() { for vote in &vote_state.votes { diff --git a/programs/vote/benches/process_vote.rs b/programs/vote/benches/process_vote.rs index c60fe5a68f4eba..6c9cb979c90484 100644 --- a/programs/vote/benches/process_vote.rs +++ b/programs/vote/benches/process_vote.rs @@ -48,7 +48,7 @@ fn create_accounts() -> (Slot, SlotHashes, Vec, Vec = vec![0; VoteState::size_of()]; let versioned = VoteStateVersions::new_current(vote_state); diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index f3a0904b13670f..e83171d06e0844 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -545,11 +545,12 @@ fn check_slots_are_valid( // popped off. pub fn process_new_vote_state( vote_state: &mut VoteState, - new_state: VecDeque, + mut new_state: VecDeque, new_root: Option, timestamp: Option, epoch: Epoch, - _feature_set: Option<&FeatureSet>, + current_slot: Slot, + feature_set: Option<&FeatureSet>, ) -> Result<(), VoteError> { assert!(!new_state.is_empty()); if new_state.len() > MAX_LOCKOUT_HISTORY { @@ -568,7 +569,7 @@ pub fn process_new_vote_state( _ => (), } - let mut previous_vote: Option<&Lockout> = None; + let mut previous_vote: Option<&LandedVote> = None; // Check that all the votes in the new proposed state are: // 1) Strictly sorted from oldest to newest vote @@ -597,7 +598,7 @@ pub fn process_new_vote_state( return Err(VoteError::SlotsNotOrdered); } else if previous_vote.confirmation_count() <= vote.confirmation_count() { return Err(VoteError::ConfirmationsNotOrdered); - } else if vote.slot() > previous_vote.last_locked_out_slot() { + } else if vote.slot() > previous_vote.lockout.last_locked_out_slot() { return Err(VoteError::NewVoteStateLockoutMismatch); } } @@ -609,27 +610,27 @@ pub fn process_new_vote_state( let mut current_vote_state_index: usize = 0; let mut new_vote_state_index = 0; - // Count the number of slots at and before the new root within the current vote state lockouts. Start with 1 - // for the new root. The purpose of this is to know how many slots were rooted by this state update: - // - The new root was rooted - // - As were any slots that were in the current state but are not in the new state. The only slots which - // can be in this set are those oldest slots in the current vote state that are not present in the - // new vote state; these have been "popped off the back" of the tower and thus represent finalized slots - let mut finalized_slot_count = 1_u64; + // Accumulate credits earned by newly rooted slots. The behavior changes with timely_vote_credits: prior to + // this feature, there was a bug that counted a new root slot as 1 credit even if it had never been voted on. + // timely_vote_credits fixes this bug by only awarding credits for slots actually voted on and finalized. + let timely_vote_credits = feature_set.map_or(false, |f| { + f.is_active(&feature_set::timely_vote_credits::id()) + }); + let mut earned_credits = if timely_vote_credits { 0_u64 } else { 1_u64 }; if let Some(new_root) = new_root { for current_vote in &vote_state.votes { // Find the first vote in the current vote state for a slot greater // than the new proposed root if current_vote.slot() <= new_root { + if timely_vote_credits || (current_vote.slot() != new_root) { + earned_credits = earned_credits + .checked_add(vote_state.credits_for_vote_at_index(current_vote_state_index)) + .expect("`earned_credits` does not overflow"); + } current_vote_state_index = current_vote_state_index .checked_add(1) .expect("`current_vote_state_index` is bounded by `MAX_LOCKOUT_HISTORY` when processing new root"); - if current_vote.slot() != new_root { - finalized_slot_count = finalized_slot_count - .checked_add(1) - .expect("`finalized_slot_count` is bounded by `MAX_LOCKOUT_HISTORY` when processing new root"); - } continue; } @@ -637,13 +638,30 @@ pub fn process_new_vote_state( } } + // For any slots newly added to the new vote state, the vote latency of that slot is not provided by the + // VoteStateUpdate instruction contents, but instead is computed from the actual latency of the VoteStateUpdate + // instruction. This prevents other validators from manipulating their own vote latencies within their vote states + // and forcing the rest of the cluster to accept these possibly fraudulent latency values. If the + // timly_vote_credits feature is not enabled then vote latency is set to 0 for new votes. + // + // For any slot that is in both the new state and the current state, the vote latency of the new state is taken + // from the current state. + // + // Thus vote latencies are set here for any newly vote-on slots when a VoteStateUpdate instruction is received. + // They are copied into the new vote state after every VoteStateUpdate for already voted-on slots. + // And when voted-on slots are rooted, the vote latencies stored in the vote state of all the rooted slots is used + // to compute credits earned. + // All validators compute the same vote latencies because all process the same VoteStateUpdate instruction at the + // same slot, and the only time vote latencies are ever computed is at the time that their slot is first voted on; + // after that, the latencies are retained unaltered until the slot is rooted. + // All the votes in our current vote state that are missing from the new vote state // must have been expired by later votes. Check that the lockouts match this assumption. while current_vote_state_index < vote_state.votes.len() && new_vote_state_index < new_state.len() { let current_vote = &vote_state.votes[current_vote_state_index]; - let new_vote = &new_state[new_vote_state_index]; + let new_vote = &mut new_state[new_vote_state_index]; // If the current slot is less than the new proposed slot, then the // new slot must have popped off the old slot, so check that the @@ -664,6 +682,9 @@ pub fn process_new_vote_state( return Err(VoteError::ConfirmationRollBack); } + // Copy the vote slot latency in from the current state to the new state + new_vote.latency = vote_state.votes[current_vote_state_index].latency; + current_vote_state_index = current_vote_state_index .checked_add(1) .expect("`current_vote_state_index` is bounded by `MAX_LOCKOUT_HISTORY` when slot is equal to proposed"); @@ -681,21 +702,32 @@ pub fn process_new_vote_state( // `new_vote_state` passed all the checks, finalize the change by rewriting // our state. + + // Now set the vote latencies on new slots not in the current state. New slots not in the current vote state will + // have had their latency initialized to 0 by the above loop. Those will now be updated to their actual latency. + // If the timely_vote_credits feature is not enabled, then the latency is left as 0 for such slots, which will + // result in 1 credit per slot when credits are calculated at the time that the slot is rooted. + if timely_vote_credits { + for new_vote in new_state.iter_mut() { + if new_vote.latency == 0 { + new_vote.latency = VoteState::compute_vote_latency(new_vote.slot(), current_slot); + } + } + } + if vote_state.root_slot != new_root { // Award vote credits based on the number of slots that were voted on and have reached finality // For each finalized slot, there was one voted-on slot in the new vote state that was responsible for // finalizing it. Each of those votes is awarded 1 credit. - vote_state.increment_credits(epoch, finalized_slot_count); + vote_state.increment_credits(epoch, earned_credits); } if let Some(timestamp) = timestamp { let last_slot = new_state.back().unwrap().slot(); vote_state.process_timestamp(last_slot, timestamp)?; } vote_state.root_slot = new_root; - vote_state.votes = new_state - .into_iter() - .map(|lockout| lockout.into()) - .collect(); + vote_state.votes = new_state; + Ok(()) } @@ -705,11 +737,12 @@ pub fn process_vote_unfiltered( vote: &Vote, slot_hashes: &[SlotHash], epoch: Epoch, + current_slot: Slot, ) -> Result<(), VoteError> { check_slots_are_valid(vote_state, vote_slots, &vote.hash, slot_hashes)?; vote_slots .iter() - .for_each(|s| vote_state.process_next_vote_slot(*s, epoch)); + .for_each(|s| vote_state.process_next_vote_slot(*s, epoch, current_slot)); Ok(()) } @@ -718,6 +751,7 @@ pub fn process_vote( vote: &Vote, slot_hashes: &[SlotHash], epoch: Epoch, + current_slot: Slot, ) -> Result<(), VoteError> { if vote.slots.is_empty() { return Err(VoteError::EmptySlots); @@ -732,7 +766,14 @@ pub fn process_vote( if vote_slots.is_empty() { return Err(VoteError::VotesTooOldAllFiltered); } - process_vote_unfiltered(vote_state, &vote_slots, vote, slot_hashes, epoch) + process_vote_unfiltered( + vote_state, + &vote_slots, + vote, + slot_hashes, + epoch, + current_slot, + ) } /// "unchecked" functions used by tests and Tower @@ -747,6 +788,7 @@ pub fn process_vote_unchecked(vote_state: &mut VoteState, vote: Vote) -> Result< &vote, &slot_hashes, vote_state.current_epoch(), + 0, ) } @@ -994,7 +1036,7 @@ pub fn process_vote_with_account( ) -> Result<(), InstructionError> { let mut vote_state = verify_and_get_vote_state(vote_account, clock, signers)?; - process_vote(&mut vote_state, vote, slot_hashes, clock.epoch)?; + process_vote(&mut vote_state, vote, slot_hashes, clock.epoch, clock.slot)?; if let Some(timestamp) = vote.timestamp { vote.slots .iter() @@ -1018,6 +1060,7 @@ pub fn process_vote_state_update( &mut vote_state, slot_hashes, clock.epoch, + clock.slot, vote_state_update, Some(feature_set), )?; @@ -1028,16 +1071,22 @@ pub fn do_process_vote_state_update( vote_state: &mut VoteState, slot_hashes: &[SlotHash], epoch: u64, + slot: u64, mut vote_state_update: VoteStateUpdate, feature_set: Option<&FeatureSet>, ) -> Result<(), VoteError> { check_update_vote_state_slots_are_valid(vote_state, &mut vote_state_update, slot_hashes)?; process_new_vote_state( vote_state, - vote_state_update.lockouts, + vote_state_update + .lockouts + .iter() + .map(|lockout| LandedVote::from(*lockout)) + .collect(), vote_state_update.root, vote_state_update.timestamp, epoch, + slot, feature_set, ) } @@ -1169,7 +1218,7 @@ mod tests { 134, 135, ] .into_iter() - .for_each(|v| vote_state.process_next_vote_slot(v, 4)); + .for_each(|v| vote_state.process_next_vote_slot(v, 4, 0)); let version1_14_11_serialized = bincode::serialize(&VoteStateVersions::V1_14_11(Box::new( VoteState1_14_11::from(vote_state.clone()), @@ -1461,11 +1510,11 @@ mod tests { let slot_hashes: Vec<_> = vote.slots.iter().rev().map(|x| (*x, vote.hash)).collect(); assert_eq!( - process_vote(&mut vote_state_a, &vote, &slot_hashes, 0), + process_vote(&mut vote_state_a, &vote, &slot_hashes, 0, 0), Ok(()) ); assert_eq!( - process_vote(&mut vote_state_b, &vote, &slot_hashes, 0), + process_vote(&mut vote_state_b, &vote, &slot_hashes, 0, 0), Ok(()) ); assert_eq!(recent_votes(&vote_state_a), recent_votes(&vote_state_b)); @@ -1478,12 +1527,12 @@ mod tests { let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(0, vote.hash)]; assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0), Ok(()) ); let recent = recent_votes(&vote_state); assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0), Err(VoteError::VoteTooOld) ); assert_eq!(recent, recent_votes(&vote_state)); @@ -1543,7 +1592,7 @@ mod tests { let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0), Ok(()) ); assert_eq!( @@ -1559,7 +1608,7 @@ mod tests { let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0), Ok(()) ); @@ -1578,7 +1627,7 @@ mod tests { let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0), Ok(()) ); @@ -1595,14 +1644,14 @@ mod tests { let vote = Vote::new(vec![], Hash::default()); assert_eq!( - process_vote(&mut vote_state, &vote, &[], 0), + process_vote(&mut vote_state, &vote, &[], 0, 0), Err(VoteError::EmptySlots) ); } - pub fn process_new_vote_state_from_votes( + pub fn process_new_vote_state_from_lockouts( vote_state: &mut VoteState, - new_state: VecDeque, + new_state: VecDeque, new_root: Option, timestamp: Option, epoch: Epoch, @@ -1610,10 +1659,11 @@ mod tests { ) -> Result<(), VoteError> { process_new_vote_state( vote_state, - new_state.into_iter().map(|vote| vote.lockout).collect(), + new_state.into_iter().map(LandedVote::from).collect(), new_root, timestamp, epoch, + 0, feature_set, ) } @@ -1680,12 +1730,13 @@ mod tests { // Now use the resulting new vote state to perform a vote state update on vote_state assert_eq!( - process_new_vote_state_from_votes( + process_new_vote_state( &mut vote_state, vote_state_after_vote.votes, vote_state_after_vote.root_slot, None, 0, + 0, Some(&feature_set) ), Ok(()) @@ -1699,6 +1750,304 @@ mod tests { } } + // Test vote credit updates after "timely vote credits" feature is enabled + #[test] + fn test_timely_credits() { + // Each of the following (Vec, Slot, u32) tuples gives a set of slots to cast votes on, a slot in which + // the vote was cast, and the number of credits that should have been earned by the vote account after this + // and all prior votes were cast. + let test_vote_groups: Vec<(Vec, Slot, u32)> = vec![ + // Initial set of votes that don't dequeue any slots, so no credits earned + ( + vec![1, 2, 3, 4, 5, 6, 7, 8], + 9, + // root: none, no credits earned + 0, + ), + ( + vec![ + 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, + ], + 34, + // lockouts full + // root: none, no credits earned + 0, + ), + // Now a single vote which should result in the first root and first credit earned + ( + vec![32], + 35, + // root: 1 + // when slot 1 was voted on in slot 9, it earned 2 credits + 2, + ), + // Now another vote, should earn one credit + ( + vec![33], + 36, + // root: 2 + // when slot 2 was voted on in slot 9, it earned 3 credits + 2 + 3, // 5 + ), + // Two votes in sequence + ( + vec![34, 35], + 37, + // root: 4 + // when slots 3 and 4 were voted on in slot 9, they earned 4 and 5 credits + 5 + 4 + 5, // 14 + ), + // 3 votes in sequence + ( + vec![36, 37, 38], + 39, + // root: 7 + // slots 5, 6, and 7 earned 6, 7, and 8 credits when voted in slot 9 + 14 + 6 + 7 + 8, // 35 + ), + ( + // 30 votes in sequence + vec![ + 39, 40, 41, 42, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + ], + 69, + // root: 37 + // slot 8 was voted in slot 9, earning 8 credits + // slots 9 - 25 earned 1 credit when voted in slot 34 + // slot 26, 27, 28, 29, 30, 31 earned 2, 3, 4, 5, 6, 7 credits when voted in slot 34 + // slot 32 earned 7 credits when voted in slot 35 + // slot 33 earned 7 credits when voted in slot 36 + // slot 34 and 35 earned 7 and 8 credits when voted in slot 37 + // slot 36 and 37 earned 7 and 8 credits when voted in slot 39 + 35 + 8 + ((25 - 9) + 1) + 2 + 3 + 4 + 5 + 6 + 7 + 7 + 7 + 7 + 8 + 7 + 8, // 131 + ), + // 31 votes in sequence + ( + vec![ + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + ], + 100, + // root: 68 + // slot 38 earned 8 credits when voted in slot 39 + // slot 39 - 60 earned 1 credit each when voted in slot 69 + // slot 61, 62, 63, 64, 65, 66, 67, 68 earned 2, 3, 4, 5, 6, 7, 8, and 8 credits when + // voted in slot 69 + 131 + 8 + ((60 - 39) + 1) + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 8, // 204 + ), + // Votes with expiry + ( + vec![115, 116, 117, 118, 119, 120, 121, 122, 123, 124], + 130, + // root: 74 + // slots 96 - 114 expire + // slots 69 - 74 earned 1 credit when voted in slot 100 + 204 + ((74 - 69) + 1), // 210 + ), + // More votes with expiry of a large number of votes + ( + vec![200, 201], + 202, + // root: 74 + // slots 119 - 124 expire + 210, + ), + ( + vec![ + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, + 218, 219, 220, 221, 222, 223, 224, 225, 226, + ], + 227, + // root: 95 + // slot 75 - 91 earned 1 credit each when voted in slot 100 + // slot 92, 93, 94, 95 earned 2, 3, 4, 5, credits when voted in slot 100 + 210 + ((91 - 75) + 1) + 2 + 3 + 4 + 5, // 241 + ), + ( + vec![227, 228, 229, 230, 231, 232, 233, 234, 235, 236], + 237, + // root: 205 + // slot 115 - 118 earned 1 credit when voted in slot 130 + // slot 200 and 201 earned 8 credits when voted in slot 202 + // slots 202 - 205 earned 1 credit when voted in slot 227 + 241 + 1 + 1 + 1 + 1 + 8 + 8 + 1 + 1 + 1 + 1, // 265 + ), + ]; + + let mut feature_set = FeatureSet::default(); + feature_set.activate(&feature_set::timely_vote_credits::id(), 1); + + // For each vote group, process all vote groups leading up to it and it itself, and ensure that the number of + // credits earned is correct for both regular votes and vote state updates + for i in 0..test_vote_groups.len() { + // Create a new VoteState for vote transaction + let mut vote_state_1 = VoteState::new(&VoteInit::default(), &Clock::default()); + // Create a new VoteState for vote state update transaction + let mut vote_state_2 = VoteState::new(&VoteInit::default(), &Clock::default()); + test_vote_groups.iter().take(i + 1).for_each(|vote_group| { + let vote = Vote { + slots: vote_group.0.clone(), //vote_group.0 is the set of slots to cast votes on + hash: Hash::new_unique(), + timestamp: None, + }; + let slot_hashes: Vec<_> = + vote.slots.iter().rev().map(|x| (*x, vote.hash)).collect(); + assert_eq!( + process_vote( + &mut vote_state_1, + &vote, + &slot_hashes, + 0, + vote_group.1 // vote_group.1 is the slot in which the vote was cast + ), + Ok(()) + ); + + assert_eq!( + process_new_vote_state( + &mut vote_state_2, + vote_state_1.votes.clone(), + vote_state_1.root_slot, + None, + 0, + vote_group.1, // vote_group.1 is the slot in which the vote was cast + Some(&feature_set) + ), + Ok(()) + ); + }); + + // Ensure that the credits earned is correct for both vote states + let vote_group = &test_vote_groups[i]; + assert_eq!(vote_state_1.credits(), vote_group.2 as u64); // vote_group.2 is the expected number of credits + assert_eq!(vote_state_2.credits(), vote_group.2 as u64); // vote_group.2 is the expected number of credits + } + } + + #[test] + fn test_retroactive_voting_timely_credits() { + // Each of the following (Vec<(Slot, int)>, Slot, Option, u32) tuples gives the following data: + // Vec<(Slot, int)> -- the set of slots and confirmation_counts that is the VoteStateUpdate + // Slot -- the slot in which the VoteStateUpdate occurred + // Option -- the root after processing the VoteStateUpdate + // u32 -- the credits after processing the VoteStateUpdate + #[allow(clippy::type_complexity)] + let test_vote_state_updates: Vec<(Vec<(Slot, u32)>, Slot, Option, u32)> = vec![ + // VoteStateUpdate to set initial vote state + ( + vec![(7, 4), (8, 3), (9, 2), (10, 1)], + 11, + // root: none + None, + // no credits earned + 0, + ), + // VoteStateUpdate to include the missing slots *prior to previously included slots* + ( + vec![ + (1, 10), + (2, 9), + (3, 8), + (4, 7), + (5, 6), + (6, 5), + (7, 4), + (8, 3), + (9, 2), + (10, 1), + ], + 12, + // root: none + None, + // no credits earned + 0, + ), + // Now a single VoteStateUpdate which roots all of the slots from 1 - 10 + ( + vec![ + (11, 31), + (12, 30), + (13, 29), + (14, 28), + (15, 27), + (16, 26), + (17, 25), + (18, 24), + (19, 23), + (20, 22), + (21, 21), + (22, 20), + (23, 19), + (24, 18), + (25, 17), + (26, 16), + (27, 15), + (28, 14), + (29, 13), + (30, 12), + (31, 11), + (32, 10), + (33, 9), + (34, 8), + (35, 7), + (36, 6), + (37, 5), + (38, 4), + (39, 3), + (40, 2), + (41, 1), + ], + 42, + // root: 10 + Some(10), + // when slots 1 - 6 were voted on in slot 12, they earned 1, 1, 1, 2, 3, and 4 credits + // when slots 7 - 10 were voted on in slot 11, they earned 6, 7, 8, and 8 credits + 1 + 1 + 1 + 2 + 3 + 4 + 6 + 7 + 8 + 8, + ), + ]; + + let mut feature_set = FeatureSet::default(); + feature_set.activate(&feature_set::timely_vote_credits::id(), 1); + + // Retroactive voting is only possible with VoteStateUpdate transactions, which is why Vote transactions are + // not tested here + + // Initial vote state + let mut vote_state = VoteState::new(&VoteInit::default(), &Clock::default()); + + // Process the vote state updates in sequence and ensure that the credits earned after each is processed is + // correct + test_vote_state_updates + .iter() + .for_each(|vote_state_update| { + let new_state = vote_state_update + .0 // vote_state_update.0 is the set of slots and confirmation_counts that is the VoteStateUpdate + .iter() + .map(|(slot, confirmation_count)| LandedVote { + latency: 0, + lockout: Lockout::new_with_confirmation_count(*slot, *confirmation_count), + }) + .collect::>(); + assert_eq!( + process_new_vote_state( + &mut vote_state, + new_state, + vote_state_update.2, // vote_state_update.2 is root after processing the VoteStateUpdate + None, + 0, + vote_state_update.1, // vote_state_update.1 is the slot in which the VoteStateUpdate occurred + Some(&feature_set) + ), + Ok(()) + ); + + // Ensure that the credits earned is correct + assert_eq!(vote_state.credits(), vote_state_update.3 as u64); + }); + } + #[test] fn test_process_new_vote_too_many_votes() { let mut vote_state1 = VoteState::default(); @@ -1713,7 +2062,14 @@ mod tests { let current_epoch = vote_state1.current_epoch(); assert_eq!( - process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None), + process_new_vote_state_from_lockouts( + &mut vote_state1, + bad_votes, + None, + None, + current_epoch, + None + ), Err(VoteError::TooManyVotes) ); } @@ -1736,12 +2092,13 @@ mod tests { let current_epoch = vote_state2.current_epoch(); assert_eq!( - process_new_vote_state_from_votes( + process_new_vote_state( &mut vote_state1, vote_state2.votes.clone(), lesser_root, None, current_epoch, + 0, None, ), Err(VoteError::RootRollBack) @@ -1750,18 +2107,120 @@ mod tests { // Trying to set root to None should error let none_root = None; assert_eq!( - process_new_vote_state_from_votes( + process_new_vote_state( &mut vote_state1, vote_state2.votes.clone(), none_root, None, current_epoch, + 0, None, ), Err(VoteError::RootRollBack) ); } + fn process_new_vote_state_replaced_root_vote_credits( + feature_set: &FeatureSet, + expected_credits: u64, + ) { + let mut vote_state1 = VoteState::default(); + + // Initial vote state: as if 31 votes had occurred on slots 0 - 30 (inclusive) + assert_eq!( + process_new_vote_state_from_lockouts( + &mut vote_state1, + (0..MAX_LOCKOUT_HISTORY) + .enumerate() + .map(|(index, slot)| Lockout::new_with_confirmation_count( + slot as Slot, + (MAX_LOCKOUT_HISTORY.checked_sub(index).unwrap()) as u32 + )) + .collect(), + None, + None, + 0, + Some(feature_set), + ), + Ok(()) + ); + + // Now vote as if new votes on slots 31 and 32 had occurred, yielding a new Root of 1 + assert_eq!( + process_new_vote_state_from_lockouts( + &mut vote_state1, + (2..(MAX_LOCKOUT_HISTORY.checked_add(2).unwrap())) + .enumerate() + .map(|(index, slot)| Lockout::new_with_confirmation_count( + slot as Slot, + (MAX_LOCKOUT_HISTORY.checked_sub(index).unwrap()) as u32 + )) + .collect(), + Some(1), + None, + 0, + Some(feature_set), + ), + Ok(()) + ); + + // Vote credits should be 2, since two voted-on slots were "popped off the back" of the tower + assert_eq!(vote_state1.credits(), 2); + + // Create a new vote state that represents the validator having not voted for a long time, then voting on + // slots 10001 through 10032 (inclusive) with an entirely new root of 10000 that was never previously voted + // on. This is valid because a vote state can include a root that it never voted on (if it votes after a very + // long delinquency, the new votes will have a root much newer than its most recently voted slot). + assert_eq!( + process_new_vote_state_from_lockouts( + &mut vote_state1, + (10001..(MAX_LOCKOUT_HISTORY.checked_add(10001).unwrap())) + .enumerate() + .map(|(index, slot)| Lockout::new_with_confirmation_count( + slot as Slot, + (MAX_LOCKOUT_HISTORY.checked_sub(index).unwrap()) as u32 + )) + .collect(), + Some(10000), + None, + 0, + Some(feature_set), + ), + Ok(()) + ); + + // The vote is valid, but no vote credits should be awarded because although there is a new root, it does not + // represent a slot previously voted on. + assert_eq!(vote_state1.credits(), expected_credits) + } + + #[test] + fn test_process_new_vote_state_replaced_root_vote_credits() { + let mut feature_set = FeatureSet::default(); + + // Always use allow_votes_to_directly_update_vote_state feature because VoteStateUpdate is being tested + feature_set.activate( + &feature_set::allow_votes_to_directly_update_vote_state::id(), + 1, + ); + + // Test without the timely_vote_credits feature. The expected credits here of 34 is *incorrect* but is what + // is expected using vote_state_update_credit_per_dequeue. With this feature, the credits earned will be + // calculated as: + // 2 (from initial vote state) + // + 31 (for votes which were "popped off of the back of the tower" by the new vote + // + 1 (just because there is a new root, even though it was never voted on -- this is the flaw) + feature_set.activate(&feature_set::vote_state_update_credit_per_dequeue::id(), 1); + process_new_vote_state_replaced_root_vote_credits(&feature_set, 34); + + // Now test using the timely_vote_credits feature. The expected credits here of 33 is *correct*. With + // this feature, the credits earned will be calculated as: + // 2 (from initial vote state) + // + 31 (for votes which were "popped off of the back of the tower" by the new vote) + feature_set.activate(&feature_set::timely_vote_credits::id(), 1); + process_new_vote_state_replaced_root_vote_credits(&feature_set, 33); + } + #[test] fn test_process_new_vote_state_zero_confirmations() { let mut vote_state1 = VoteState::default(); @@ -1774,7 +2233,14 @@ mod tests { .into_iter() .collect(); assert_eq!( - process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None), + process_new_vote_state_from_lockouts( + &mut vote_state1, + bad_votes, + None, + None, + current_epoch, + None + ), Err(VoteError::ZeroConfirmations) ); @@ -1785,7 +2251,14 @@ mod tests { .into_iter() .collect(); assert_eq!( - process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None), + process_new_vote_state_from_lockouts( + &mut vote_state1, + bad_votes, + None, + None, + current_epoch, + None + ), Err(VoteError::ZeroConfirmations) ); } @@ -1802,7 +2275,7 @@ mod tests { .into_iter() .collect(); - process_new_vote_state( + process_new_vote_state_from_lockouts( &mut vote_state1, good_votes, None, @@ -1820,7 +2293,14 @@ mod tests { .into_iter() .collect(); assert_eq!( - process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None), + process_new_vote_state_from_lockouts( + &mut vote_state1, + bad_votes, + None, + None, + current_epoch, + None + ), Err(VoteError::ConfirmationTooLarge) ); } @@ -1838,7 +2318,7 @@ mod tests { .into_iter() .collect(); assert_eq!( - process_new_vote_state( + process_new_vote_state_from_lockouts( &mut vote_state1, bad_votes, Some(root_slot), @@ -1856,7 +2336,7 @@ mod tests { .into_iter() .collect(); assert_eq!( - process_new_vote_state( + process_new_vote_state_from_lockouts( &mut vote_state1, bad_votes, Some(root_slot), @@ -1880,7 +2360,14 @@ mod tests { .into_iter() .collect(); assert_eq!( - process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None), + process_new_vote_state_from_lockouts( + &mut vote_state1, + bad_votes, + None, + None, + current_epoch, + None + ), Err(VoteError::SlotsNotOrdered) ); @@ -1891,7 +2378,14 @@ mod tests { .into_iter() .collect(); assert_eq!( - process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None), + process_new_vote_state_from_lockouts( + &mut vote_state1, + bad_votes, + None, + None, + current_epoch, + None + ), Err(VoteError::SlotsNotOrdered) ); } @@ -1908,7 +2402,14 @@ mod tests { .into_iter() .collect(); assert_eq!( - process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None), + process_new_vote_state_from_lockouts( + &mut vote_state1, + bad_votes, + None, + None, + current_epoch, + None + ), Err(VoteError::ConfirmationsNotOrdered) ); @@ -1919,7 +2420,14 @@ mod tests { .into_iter() .collect(); assert_eq!( - process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None), + process_new_vote_state_from_lockouts( + &mut vote_state1, + bad_votes, + None, + None, + current_epoch, + None + ), Err(VoteError::ConfirmationsNotOrdered) ); } @@ -1938,7 +2446,14 @@ mod tests { // Slot 7 should have expired slot 0 assert_eq!( - process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None), + process_new_vote_state_from_lockouts( + &mut vote_state1, + bad_votes, + None, + None, + current_epoch, + None + ), Err(VoteError::NewVoteStateLockoutMismatch) ); } @@ -1953,7 +2468,15 @@ mod tests { ] .into_iter() .collect(); - process_new_vote_state(&mut vote_state1, votes, None, None, current_epoch, None).unwrap(); + process_new_vote_state_from_lockouts( + &mut vote_state1, + votes, + None, + None, + current_epoch, + None, + ) + .unwrap(); let votes: VecDeque = vec![ Lockout::new_with_confirmation_count(0, 4), @@ -1966,7 +2489,14 @@ mod tests { // Should error because newer vote state should not have lower confirmation the same slot // 1 assert_eq!( - process_new_vote_state(&mut vote_state1, votes, None, None, current_epoch, None), + process_new_vote_state_from_lockouts( + &mut vote_state1, + votes, + None, + None, + current_epoch, + None + ), Err(VoteError::ConfirmationRollBack) ); } @@ -1991,12 +2521,13 @@ mod tests { process_slot_vote_unchecked(&mut vote_state2, new_vote as Slot); assert_ne!(vote_state1.root_slot, vote_state2.root_slot); - process_new_vote_state_from_votes( + process_new_vote_state( &mut vote_state1, vote_state2.votes.clone(), vote_state2.root_slot, None, vote_state2.current_epoch(), + 0, None, ) .unwrap(); @@ -2049,12 +2580,13 @@ mod tests { ); // See that on-chain vote state can update properly - process_new_vote_state_from_votes( + process_new_vote_state( &mut vote_state1, vote_state2.votes.clone(), vote_state2.root_slot, None, vote_state2.current_epoch(), + 0, None, ) .unwrap(); @@ -2091,12 +2623,13 @@ mod tests { // See that on-chain vote state can update properly assert_eq!( - process_new_vote_state_from_votes( + process_new_vote_state( &mut vote_state1, vote_state2.votes.clone(), vote_state2.root_slot, None, vote_state2.current_epoch(), + 0, None ), Err(VoteError::LockoutConflict) @@ -2133,12 +2666,13 @@ mod tests { // Both vote states contain `5`, but `5` is not part of the common prefix // of both vote states. However, the violation should still be detected. assert_eq!( - process_new_vote_state_from_votes( + process_new_vote_state( &mut vote_state1, vote_state2.votes.clone(), vote_state2.root_slot, None, vote_state2.current_epoch(), + 0, None ), Err(VoteError::LockoutConflict) @@ -2178,12 +2712,13 @@ mod tests { ); // Should be able to update vote_state1 - process_new_vote_state_from_votes( + process_new_vote_state( &mut vote_state1, vote_state2.votes.clone(), vote_state2.root_slot, None, vote_state2.current_epoch(), + 0, None, ) .unwrap(); @@ -2215,7 +2750,14 @@ mod tests { let current_epoch = vote_state1.current_epoch(); assert_eq!( - process_new_vote_state(&mut vote_state1, bad_votes, root, None, current_epoch, None), + process_new_vote_state_from_lockouts( + &mut vote_state1, + bad_votes, + root, + None, + current_epoch, + None + ), Err(VoteError::LockoutConflict) ); @@ -2227,12 +2769,13 @@ mod tests { .collect(); let current_epoch = vote_state1.current_epoch(); - process_new_vote_state_from_votes( + process_new_vote_state( &mut vote_state1, good_votes.clone(), root, None, current_epoch, + 0, None, ) .unwrap(); @@ -2249,7 +2792,7 @@ mod tests { // error with `VotesTooOldAllFiltered` let slot_hashes = vec![(3, Hash::new_unique()), (2, Hash::new_unique())]; assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0), Err(VoteError::VotesTooOldAllFiltered) ); @@ -2263,7 +2806,7 @@ mod tests { .1; let vote = Vote::new(vec![old_vote_slot, vote_slot], vote_slot_hash); - process_vote(&mut vote_state, &vote, &slot_hashes, 0).unwrap(); + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0).unwrap(); assert_eq!( vote_state .votes @@ -2292,7 +2835,8 @@ mod tests { .unwrap() .1; let vote = Vote::new(vote_slots, vote_hash); - process_vote_unfiltered(&mut vote_state, &vote.slots, &vote, slot_hashes, 0).unwrap(); + process_vote_unfiltered(&mut vote_state, &vote.slots, &vote, slot_hashes, 0, 0) + .unwrap(); } vote_state @@ -2415,6 +2959,7 @@ mod tests { &mut vote_state, &slot_hashes, 0, + 0, vote_state_update.clone(), Some(&FeatureSet::all_enabled()), ) @@ -2634,6 +3179,7 @@ mod tests { &mut vote_state, &slot_hashes, 0, + 0, vote_state_update, Some(&FeatureSet::all_enabled()), ) @@ -2680,6 +3226,7 @@ mod tests { &mut vote_state, &slot_hashes, 0, + 0, vote_state_update, Some(&FeatureSet::all_enabled()), ) @@ -2739,6 +3286,7 @@ mod tests { &mut vote_state, &slot_hashes, 0, + 0, vote_state_update, Some(&FeatureSet::all_enabled()), ) @@ -2896,6 +3444,7 @@ mod tests { &mut vote_state, &slot_hashes, 0, + 0, vote_state_update, Some(&FeatureSet::all_enabled()), ) @@ -2944,6 +3493,7 @@ mod tests { &mut vote_state, &slot_hashes, 0, + 0, vote_state_update, Some(&FeatureSet::all_enabled()) ), diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index ff70bdee116263..997102b3e6564c 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -7274,12 +7274,16 @@ pub mod tests { .unwrap(); assert_ne!(leader_info.activated_stake, 0); // Subtract one because the last vote always carries over to the next epoch - let expected_credits = TEST_SLOTS_PER_EPOCH - MAX_LOCKOUT_HISTORY as u64 - 1; + // Each slot earned maximum credits + let credits_per_slot = + solana_vote_program::vote_state::VOTE_CREDITS_MAXIMUM_PER_SLOT as u64; + let expected_credits = + (TEST_SLOTS_PER_EPOCH - MAX_LOCKOUT_HISTORY as u64 - 1) * credits_per_slot; assert_eq!( leader_info.epoch_credits, vec![ (0, expected_credits, 0), - (1, expected_credits + 1, expected_credits) // one vote in current epoch + (1, expected_credits + credits_per_slot, expected_credits) // one vote in current epoch ] ); diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index 5d7e14a70cddfc..6d77d3ab5d9dda 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -35,6 +35,12 @@ pub const MAX_EPOCH_CREDITS_HISTORY: usize = 64; // Offset of VoteState::prior_voters, for determining initialization status without deserialization const DEFAULT_PRIOR_VOTERS_OFFSET: usize = 114; +// Number of slots of grace period for which maximum vote credits are awarded - votes landing within this number of slots of the slot that is being voted on are awarded full credits. +pub const VOTE_CREDITS_GRACE_SLOTS: u8 = 2; + +// Maximum number of credits to award for a vote; this number of credits is awarded to votes on slots that land within the grace period. After that grace period, vote credits are reduced. +pub const VOTE_CREDITS_MAXIMUM_PER_SLOT: u8 = 8; + #[frozen_abi(digest = "Ch2vVEwos2EjAVqSHCyJjnN2MNX1yrpapZTGhMSCjWUH")] #[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample)] pub struct Vote { @@ -419,7 +425,12 @@ impl VoteState { } } - pub fn process_next_vote_slot(&mut self, next_vote_slot: Slot, epoch: Epoch) { + pub fn process_next_vote_slot( + &mut self, + next_vote_slot: Slot, + epoch: Epoch, + current_slot: Slot, + ) { // Ignore votes for slots earlier than we already have votes for if self .last_voted_slot() @@ -428,18 +439,22 @@ impl VoteState { return; } - let lockout = Lockout::new(next_vote_slot); - self.pop_expired_votes(next_vote_slot); + let landed_vote = LandedVote { + latency: Self::compute_vote_latency(next_vote_slot, current_slot), + lockout: Lockout::new(next_vote_slot), + }; + // Once the stack is full, pop the oldest lockout and distribute rewards if self.votes.len() == MAX_LOCKOUT_HISTORY { - let vote = self.votes.pop_front().unwrap(); - self.root_slot = Some(vote.slot()); + let credits = self.credits_for_vote_at_index(0); + let landed_vote = self.votes.pop_front().unwrap(); + self.root_slot = Some(landed_vote.slot()); - self.increment_credits(epoch, 1); + self.increment_credits(epoch, credits); } - self.votes.push_back(lockout.into()); + self.votes.push_back(landed_vote); self.double_lockouts(); } @@ -472,6 +487,43 @@ impl VoteState { self.epoch_credits.last().unwrap().1.saturating_add(credits); } + // Computes the vote latency for vote on voted_for_slot where the vote itself landed in current_slot + pub fn compute_vote_latency(voted_for_slot: Slot, current_slot: Slot) -> u8 { + std::cmp::min(current_slot.saturating_sub(voted_for_slot), u8::MAX as u64) as u8 + } + + /// Returns the credits to award for a vote at the given lockout slot index + pub fn credits_for_vote_at_index(&self, index: usize) -> u64 { + let latency = self + .votes + .get(index) + .map_or(0, |landed_vote| landed_vote.latency); + + // If latency is 0, this means that the Lockout was created and stored from a software version that did not + // store vote latencies; in this case, 1 credit is awarded + if latency == 0 { + 1 + } else { + match latency.checked_sub(VOTE_CREDITS_GRACE_SLOTS) { + None | Some(0) => { + // latency was <= VOTE_CREDITS_GRACE_SLOTS, so maximum credits are awarded + VOTE_CREDITS_MAXIMUM_PER_SLOT as u64 + } + + Some(diff) => { + // diff = latency - VOTE_CREDITS_GRACE_SLOTS, and diff > 0 + // Subtract diff from VOTE_CREDITS_MAXIMUM_PER_SLOT which is the number of credits to award + match VOTE_CREDITS_MAXIMUM_PER_SLOT.checked_sub(diff) { + // If diff >= VOTE_CREDITS_MAXIMUM_PER_SLOT, 1 credit is awarded + None | Some(0) => 1, + + Some(credits) => credits as u64, + } + } + } + } + } + pub fn nth_recent_lockout(&self, position: usize) -> Option<&Lockout> { if position < self.votes.len() { let pos = self diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 386a6969d2fd3c..2ce873fe5f3d91 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -687,6 +687,10 @@ pub mod enable_poseidon_syscall { solana_sdk::declare_id!("FL9RsQA6TVUoh5xJQ9d936RHSebA1NLQqe3Zv9sXZRpr"); } +pub mod timely_vote_credits { + solana_sdk::declare_id!("2oXpeh141pPZCTCFHBsvCwG2BtaHZZAtrVhwaxSy6brS"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -851,6 +855,7 @@ lazy_static! { (reduce_stake_warmup_cooldown::id(), "reduce stake warmup cooldown from 25% to 9%"), (revise_turbine_epoch_stakes::id(), "revise turbine epoch stakes"), (enable_poseidon_syscall::id(), "Enable Poseidon syscall"), + (timely_vote_credits::id(), "use timeliness of votes in determining credits to award"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() From 3ffd78f2d79d451560d0e09432713417a65e8dda Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 12 Sep 2023 13:51:27 -0700 Subject: [PATCH 073/407] add comments (#33226) --- accounts-db/src/accounts_db.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 2857180bf2ece5..deae0d0ce40648 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -630,7 +630,9 @@ struct GenerateIndexTimings { #[derive(Default, Debug, PartialEq, Eq)] struct StorageSizeAndCount { + /// total size stored, including both alive and dead bytes pub stored_size: usize, + /// number of accounts in the storage including both alive and dead accounts pub count: usize, } type StorageSizeAndCountMap = DashMap; From 99ef41160096793664cbc90dfa05fcd551a571b6 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Wed, 13 Sep 2023 12:12:20 +0800 Subject: [PATCH 074/407] ci: move solana-client-test to nextest (#33185) --- Cargo.lock | 1 + ci/stable/run-partition.sh | 1 - client-test/Cargo.toml | 1 + client-test/tests/client.rs | 8 +++----- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1cd118299136d5..829336f9ee5a89 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5646,6 +5646,7 @@ name = "solana-client-test" version = "1.17.0" dependencies = [ "futures-util", + "rand 0.8.5", "serde_json", "solana-client", "solana-ledger", diff --git a/ci/stable/run-partition.sh b/ci/stable/run-partition.sh index 0c4738c80a8a25..6fbbb0e47fcf83 100755 --- a/ci/stable/run-partition.sh +++ b/ci/stable/run-partition.sh @@ -31,7 +31,6 @@ if [ ! "$LIMIT" -gt "$INDEX" ]; then fi DONT_USE_NEXTEST_PACKAGES=( - solana-client-test solana-cargo-build-sbf solana-core ) diff --git a/client-test/Cargo.toml b/client-test/Cargo.toml index ac6fabf0a2cba8..5a88e4d114fb84 100644 --- a/client-test/Cargo.toml +++ b/client-test/Cargo.toml @@ -12,6 +12,7 @@ edition = { workspace = true } [dependencies] futures-util = { workspace = true } +rand = { workspace = true } serde_json = { workspace = true } solana-client = { workspace = true } solana-ledger = { workspace = true } diff --git a/client-test/tests/client.rs b/client-test/tests/client.rs index 00c075c228eeaa..65acd1adaae39d 100644 --- a/client-test/tests/client.rs +++ b/client-test/tests/client.rs @@ -1,5 +1,6 @@ use { futures_util::StreamExt, + rand::Rng, serde_json::{json, Value}, solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path}, solana_pubsub_client::{nonblocking, pubsub_client::PubsubClient}, @@ -28,7 +29,6 @@ use { commitment_config::{CommitmentConfig, CommitmentLevel}, native_token::sol_to_lamports, pubkey::Pubkey, - rpc_port, signature::{Keypair, Signer}, system_program, system_transaction, }, @@ -41,7 +41,7 @@ use { collections::HashSet, net::{IpAddr, SocketAddr}, sync::{ - atomic::{AtomicBool, AtomicU16, AtomicU64, Ordering}, + atomic::{AtomicBool, AtomicU64, Ordering}, Arc, RwLock, }, thread::sleep, @@ -51,12 +51,10 @@ use { tungstenite::connect, }; -static NEXT_RPC_PUBSUB_PORT: AtomicU16 = AtomicU16::new(rpc_port::DEFAULT_RPC_PUBSUB_PORT); - fn pubsub_addr() -> SocketAddr { SocketAddr::new( IpAddr::V4(Ipv4Addr::UNSPECIFIED), - NEXT_RPC_PUBSUB_PORT.fetch_add(1, Ordering::Relaxed), + rand::thread_rng().gen_range(1024..65535), ) } From 1cc681dd83300526220785a23db19f0dbebdfb4f Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Tue, 12 Sep 2023 21:27:38 -0700 Subject: [PATCH 075/407] [clap-v3-utils] Add replace deprecated `value_of` and `is_present` with `get_one` and `contains_id` (#33184) * add try variants to input parsers * replace deprecated `value_of` and `is_present` with `get_one` and `contains_id` --- clap-v3-utils/src/input_parsers.rs | 55 +++++++++++++++++++-- clap-v3-utils/src/keygen/derivation_path.rs | 5 +- clap-v3-utils/src/keygen/mnemonic.rs | 8 ++- clap-v3-utils/src/keygen/mod.rs | 2 +- clap-v3-utils/src/keypair.rs | 12 ++--- 5 files changed, 66 insertions(+), 16 deletions(-) diff --git a/clap-v3-utils/src/input_parsers.rs b/clap-v3-utils/src/input_parsers.rs index a8f18e20494b73..03b3ba3be1f35d 100644 --- a/clap-v3-utils/src/input_parsers.rs +++ b/clap-v3-utils/src/input_parsers.rs @@ -14,7 +14,7 @@ use { pubkey::Pubkey, signature::{read_keypair_file, Keypair, Signature, Signer}, }, - std::{rc::Rc, str::FromStr}, + std::{error, rc::Rc, str::FromStr}, }; // Sentinel value used to indicate to write to screen instead of file @@ -69,6 +69,15 @@ pub fn keypair_of(matches: &ArgMatches, name: &str) -> Option { } } +// Return the keypair for an argument with filename `name` or `None` if not present wrapped inside `Result`. +pub fn try_keypair_of( + matches: &ArgMatches, + name: &str, +) -> Result, Box> { + matches.try_contains_id(name)?; + Ok(keypair_of(matches, name)) +} + pub fn keypairs_of(matches: &ArgMatches, name: &str) -> Option> { matches.values_of(name).map(|values| { values @@ -84,12 +93,30 @@ pub fn keypairs_of(matches: &ArgMatches, name: &str) -> Option> { }) } +pub fn try_keypairs_of( + matches: &ArgMatches, + name: &str, +) -> Result>, Box> { + matches.try_contains_id(name)?; + Ok(keypairs_of(matches, name)) +} + // Return a pubkey for an argument that can itself be parsed into a pubkey, // or is a filename that can be read as a keypair pub fn pubkey_of(matches: &ArgMatches, name: &str) -> Option { value_of(matches, name).or_else(|| keypair_of(matches, name).map(|keypair| keypair.pubkey())) } +// Return a `Result` wrapped pubkey for an argument that can itself be parsed into a pubkey, +// or is a filename that can be read as a keypair +pub fn try_pubkey_of( + matches: &ArgMatches, + name: &str, +) -> Result, Box> { + matches.try_contains_id(name)?; + Ok(pubkey_of(matches, name)) +} + pub fn pubkeys_of(matches: &ArgMatches, name: &str) -> Option> { matches.values_of(name).map(|values| { values @@ -104,6 +131,14 @@ pub fn pubkeys_of(matches: &ArgMatches, name: &str) -> Option> { }) } +pub fn try_pubkeys_of( + matches: &ArgMatches, + name: &str, +) -> Result>, Box> { + matches.try_contains_id(name)?; + Ok(pubkeys_of(matches, name)) +} + // Return pubkey/signature pairs for a string of the form pubkey=signature pub fn pubkeys_sigs_of(matches: &ArgMatches, name: &str) -> Option> { matches.values_of(name).map(|values| { @@ -118,6 +153,16 @@ pub fn pubkeys_sigs_of(matches: &ArgMatches, name: &str) -> Option Result>, Box> { + matches.try_contains_id(name)?; + Ok(pubkeys_sigs_of(matches, name)) +} + // Return a signer from matches at `name` #[allow(clippy::type_complexity)] pub fn signer_of( @@ -125,7 +170,7 @@ pub fn signer_of( name: &str, wallet_manager: &mut Option>, ) -> Result<(Option>, Option), Box> { - if let Some(location) = matches.value_of(name) { + if let Some(location) = matches.try_get_one::(name)? { let signer = signer_from_path(matches, location, name, wallet_manager)?; let signer_pubkey = signer.pubkey(); Ok((Some(signer), Some(signer_pubkey))) @@ -139,7 +184,7 @@ pub fn pubkey_of_signer( name: &str, wallet_manager: &mut Option>, ) -> Result, Box> { - if let Some(location) = matches.value_of(name) { + if let Some(location) = matches.try_get_one::(name)? { Ok(Some(pubkey_from_path( matches, location, @@ -156,7 +201,7 @@ pub fn pubkeys_of_multiple_signers( name: &str, wallet_manager: &mut Option>, ) -> Result>, Box> { - if let Some(pubkey_matches) = matches.values_of(name) { + if let Some(pubkey_matches) = matches.try_get_many::(name)? { let mut pubkeys: Vec = vec![]; for signer in pubkey_matches { pubkeys.push(pubkey_from_path(matches, signer, name, wallet_manager)?); @@ -174,7 +219,7 @@ pub fn resolve_signer( ) -> Result, Box> { resolve_signer_from_path( matches, - matches.value_of(name).unwrap(), + matches.try_get_one::(name)?.unwrap(), name, wallet_manager, ) diff --git a/clap-v3-utils/src/keygen/derivation_path.rs b/clap-v3-utils/src/keygen/derivation_path.rs index 9c57323d0d5501..5e0d79a1cf38da 100644 --- a/clap-v3-utils/src/keygen/derivation_path.rs +++ b/clap-v3-utils/src/keygen/derivation_path.rs @@ -22,10 +22,11 @@ pub fn derivation_path_arg<'a>() -> Arg<'a> { pub fn acquire_derivation_path( matches: &ArgMatches, ) -> Result, Box> { - if matches.is_present("derivation_path") { + if matches.try_contains_id("derivation_path")? { Ok(Some(DerivationPath::from_absolute_path_str( matches - .value_of("derivation_path") + .try_get_one::("derivation_path")? + .map(|path| path.as_str()) .unwrap_or(DEFAULT_DERIVATION_PATH), )?)) } else { diff --git a/clap-v3-utils/src/keygen/mnemonic.rs b/clap-v3-utils/src/keygen/mnemonic.rs index c7c1d4d08508e9..5813e535098504 100644 --- a/clap-v3-utils/src/keygen/mnemonic.rs +++ b/clap-v3-utils/src/keygen/mnemonic.rs @@ -62,7 +62,11 @@ pub fn no_passphrase_arg<'a>() -> Arg<'a> { } pub fn acquire_language(matches: &ArgMatches) -> Language { - match matches.value_of(LANGUAGE_ARG.name).unwrap() { + match matches + .get_one::(LANGUAGE_ARG.name) + .unwrap() + .as_str() + { "english" => Language::English, "chinese-simplified" => Language::ChineseSimplified, "chinese-traditional" => Language::ChineseTraditional, @@ -82,7 +86,7 @@ pub fn no_passphrase_and_message() -> (String, String) { pub fn acquire_passphrase_and_message( matches: &ArgMatches, ) -> Result<(String, String), Box> { - if matches.is_present(NO_PASSPHRASE_ARG.name) { + if matches.try_contains_id(NO_PASSPHRASE_ARG.name)? { Ok(no_passphrase_and_message()) } else { match prompt_passphrase( diff --git a/clap-v3-utils/src/keygen/mod.rs b/clap-v3-utils/src/keygen/mod.rs index ec74df38237e46..f73700517c3248 100644 --- a/clap-v3-utils/src/keygen/mod.rs +++ b/clap-v3-utils/src/keygen/mod.rs @@ -38,7 +38,7 @@ pub fn check_for_overwrite( outfile: &str, matches: &ArgMatches, ) -> Result<(), Box> { - let force = matches.is_present("force"); + let force = matches.try_contains_id("force")?; if !force && Path::new(outfile).exists() { let err_msg = format!("Refusing to overwrite {outfile} without --force flag"); return Err(err_msg.into()); diff --git a/clap-v3-utils/src/keypair.rs b/clap-v3-utils/src/keypair.rs index 7ab48621b89da8..886deabfb4ce73 100644 --- a/clap-v3-utils/src/keypair.rs +++ b/clap-v3-utils/src/keypair.rs @@ -765,7 +765,7 @@ pub fn signer_from_path_with_config( } = parse_signer_source(path)?; match kind { SignerSourceKind::Prompt => { - let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); + let skip_validation = matches.try_contains_id(SKIP_SEED_PHRASE_VALIDATION_ARG.name)?; Ok(Box::new(keypair_from_seed_phrase( keypair_name, skip_validation, @@ -809,7 +809,7 @@ pub fn signer_from_path_with_config( .and_then(|presigners| presigner_from_pubkey_sigs(&pubkey, presigners)); if let Some(presigner) = presigner { Ok(Box::new(presigner)) - } else if config.allow_null_signer || matches.is_present(SIGN_ONLY_ARG.name) { + } else if config.allow_null_signer || matches.try_contains_id(SIGN_ONLY_ARG.name)? { Ok(Box::new(NullSigner::new(&pubkey))) } else { Err(std::io::Error::new( @@ -885,7 +885,7 @@ pub fn resolve_signer_from_path( } = parse_signer_source(path)?; match kind { SignerSourceKind::Prompt => { - let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); + let skip_validation = matches.try_contains_id(SKIP_SEED_PHRASE_VALIDATION_ARG.name)?; // This method validates the seed phrase, but returns `None` because there is no path // on disk or to a device keypair_from_seed_phrase( @@ -1004,7 +1004,7 @@ pub fn keypair_from_path( keypair_name: &str, confirm_pubkey: bool, ) -> Result> { - let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); + let skip_validation = matches.try_contains_id(SKIP_SEED_PHRASE_VALIDATION_ARG.name)?; let keypair = encodable_key_from_path(path, keypair_name, skip_validation)?; if confirm_pubkey { confirm_encodable_keypair_pubkey(&keypair, "pubkey"); @@ -1052,7 +1052,7 @@ pub fn elgamal_keypair_from_path( elgamal_keypair_name: &str, confirm_pubkey: bool, ) -> Result> { - let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); + let skip_validation = matches.try_contains_id(SKIP_SEED_PHRASE_VALIDATION_ARG.name)?; let elgamal_keypair = encodable_key_from_path(path, elgamal_keypair_name, skip_validation)?; if confirm_pubkey { confirm_encodable_keypair_pubkey(&elgamal_keypair, "ElGamal pubkey"); @@ -1107,7 +1107,7 @@ pub fn ae_key_from_path( path: &str, key_name: &str, ) -> Result> { - let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); + let skip_validation = matches.try_contains_id(SKIP_SEED_PHRASE_VALIDATION_ARG.name)?; encodable_key_from_path(path, key_name, skip_validation) } From ec9b30965ef8a16224b32f1f6cde79180918bc33 Mon Sep 17 00:00:00 2001 From: kirill lykov Date: Wed, 13 Sep 2023 11:20:40 +0200 Subject: [PATCH 076/407] Increase loaded accounts data size when padding program is used (#33219) * Increase loaded accounts data size when padding program is used * fix dos tool accordingly --- bench-tps/src/bench.rs | 58 +++++++++++++++++++++++++----------- bench-tps/src/keypairs.rs | 18 +++++++---- bench-tps/src/main.rs | 1 + bench-tps/tests/bench_tps.rs | 2 ++ dos/src/main.rs | 17 +++++++---- 5 files changed, 67 insertions(+), 29 deletions(-) diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index df60a24ff9f184..53288abd2dfa64 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -72,9 +72,18 @@ pub fn max_lamports_for_prioritization(compute_unit_price: &Option u32 { + if enable_padding { + TRANSFER_TRANSACTION_LOADED_ACCOUNTS_DATA_SIZE + PADDING_PROGRAM_ACCOUNT_DATA_SIZE + } else { + TRANSFER_TRANSACTION_LOADED_ACCOUNTS_DATA_SIZE + } +} pub type TimestampedTransaction = (Transaction, Option); pub type SharedTransactions = Arc>>>; @@ -610,18 +619,18 @@ fn transfer_with_compute_unit_price_and_padding( } else { transfer_instruction }; - let mut instructions = vec![instruction]; + let mut instructions = vec![ + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( + get_transaction_loaded_accounts_data_size(instruction_padding_config.is_some()), + ), + instruction, + ]; if let Some(compute_unit_price) = compute_unit_price { instructions.extend_from_slice(&[ ComputeBudgetInstruction::set_compute_unit_limit(TRANSFER_TRANSACTION_COMPUTE_UNIT), ComputeBudgetInstruction::set_compute_unit_price(compute_unit_price), ]) } - instructions.extend_from_slice(&[ - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( - TRANSFER_TRANSACTION_LOADED_ACCOUNTS_DATA_SIZE, - ), - ]); let message = Message::new(&instructions, Some(&from_pubkey)); Transaction::new(&[from_keypair], message, recent_blockhash) } @@ -708,12 +717,12 @@ fn nonced_transfer_with_padding( } else { transfer_instruction }; - let mut instructions = vec![instruction]; - instructions.extend_from_slice(&[ + let instructions = vec![ ComputeBudgetInstruction::set_loaded_accounts_data_size_limit( - TRANSFER_TRANSACTION_LOADED_ACCOUNTS_DATA_SIZE, + get_transaction_loaded_accounts_data_size(instruction_padding_config.is_some()), ), - ]); + instruction, + ]; let message = Message::new_with_nonce( instructions, Some(&from_pubkey), @@ -1028,13 +1037,21 @@ pub fn generate_and_fund_keypairs Result> { let rent = client.get_minimum_balance_for_rent_exemption(0)?; let lamports_per_account = lamports_per_account + rent; info!("Creating {} keypairs...", keypair_count); let (mut keypairs, extra) = generate_keypairs(funding_key, keypair_count as u64); - fund_keypairs(client, funding_key, &keypairs, extra, lamports_per_account)?; + fund_keypairs( + client, + funding_key, + &keypairs, + extra, + lamports_per_account, + enable_padding, + )?; // 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys. keypairs.truncate(keypair_count); @@ -1048,6 +1065,7 @@ pub fn fund_keypairs( keypairs: &[Keypair], extra: u64, lamports_per_account: u64, + enable_padding: bool, ) -> Result<()> { let rent = client.get_minimum_balance_for_rent_exemption(0)?; info!("Get lamports..."); @@ -1112,7 +1130,7 @@ pub fn fund_keypairs( total, max_fee, lamports_per_account, - TRANSFER_TRANSACTION_LOADED_ACCOUNTS_DATA_SIZE, + get_transaction_loaded_accounts_data_size(enable_padding), ); } Ok(()) @@ -1154,7 +1172,8 @@ mod tests { let keypair_count = config.tx_count * config.keypair_multiplier; let keypairs = - generate_and_fund_keypairs(client.clone(), &config.id, keypair_count, 20).unwrap(); + generate_and_fund_keypairs(client.clone(), &config.id, keypair_count, 20, false) + .unwrap(); do_bench_tps(client, config, keypairs, None); } @@ -1169,7 +1188,8 @@ mod tests { let rent = client.get_minimum_balance_for_rent_exemption(0).unwrap(); let keypairs = - generate_and_fund_keypairs(client.clone(), &id, keypair_count, lamports).unwrap(); + generate_and_fund_keypairs(client.clone(), &id, keypair_count, lamports, false) + .unwrap(); for kp in &keypairs { assert_eq!( @@ -1193,7 +1213,8 @@ mod tests { let rent = client.get_minimum_balance_for_rent_exemption(0).unwrap(); let keypairs = - generate_and_fund_keypairs(client.clone(), &id, keypair_count, lamports).unwrap(); + generate_and_fund_keypairs(client.clone(), &id, keypair_count, lamports, false) + .unwrap(); for kp in &keypairs { assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports + rent); @@ -1209,7 +1230,8 @@ mod tests { let lamports = 10_000_000; let authority_keypairs = - generate_and_fund_keypairs(client.clone(), &id, keypair_count, lamports).unwrap(); + generate_and_fund_keypairs(client.clone(), &id, keypair_count, lamports, false) + .unwrap(); let nonce_keypairs = generate_durable_nonce_accounts(client.clone(), &authority_keypairs); diff --git a/bench-tps/src/keypairs.rs b/bench-tps/src/keypairs.rs index e55a8597cc83ea..d5f839190bd638 100644 --- a/bench-tps/src/keypairs.rs +++ b/bench-tps/src/keypairs.rs @@ -16,6 +16,7 @@ pub fn get_keypairs( num_lamports_per_account: u64, client_ids_and_stake_file: &str, read_from_client_file: bool, + enable_padding: bool, ) -> Vec where T: 'static + BenchTpsClient + Send + Sync + ?Sized, @@ -56,6 +57,7 @@ where &keypairs, keypairs.len().saturating_sub(keypair_count) as u64, last_balance, + enable_padding, ) .unwrap_or_else(|e| { eprintln!("Error could not fund keys: {e:?}"); @@ -63,10 +65,16 @@ where }); keypairs } else { - generate_and_fund_keypairs(client, id, keypair_count, num_lamports_per_account) - .unwrap_or_else(|e| { - eprintln!("Error could not fund keys: {e:?}"); - exit(1); - }) + generate_and_fund_keypairs( + client, + id, + keypair_count, + num_lamports_per_account, + enable_padding, + ) + .unwrap_or_else(|e| { + eprintln!("Error could not fund keys: {e:?}"); + exit(1); + }) } } diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index 7969c402f08cbf..519612bd4237a7 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -345,6 +345,7 @@ fn main() { *num_lamports_per_account, client_ids_and_stake_file, *read_from_client_file, + instruction_padding_config.is_some(), ); let nonce_keypairs = if *use_durable_nonce { diff --git a/bench-tps/tests/bench_tps.rs b/bench-tps/tests/bench_tps.rs index 6111d550a355ec..e7cabdac44baed 100644 --- a/bench-tps/tests/bench_tps.rs +++ b/bench-tps/tests/bench_tps.rs @@ -95,6 +95,7 @@ fn test_bench_tps_local_cluster(config: Config) { &config.id, keypair_count, lamports_per_account, + false, ) .unwrap(); @@ -140,6 +141,7 @@ fn test_bench_tps_test_validator(config: Config) { &config.id, keypair_count, lamports_per_account, + false, ) .unwrap(); let nonce_keypairs = if config.use_durable_nonce { diff --git a/dos/src/main.rs b/dos/src/main.rs index 40782bdcd50fe5..8e6c3c5b2b11b5 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -550,12 +550,17 @@ fn create_payers( // transactions are built to be invalid so the the amount here is arbitrary let funding_key = Keypair::new(); let funding_key = Arc::new(funding_key); - let res = - generate_and_fund_keypairs(client.unwrap().clone(), &funding_key, size, 1_000_000) - .unwrap_or_else(|e| { - eprintln!("Error could not fund keys: {e:?}"); - exit(1); - }); + let res = generate_and_fund_keypairs( + client.unwrap().clone(), + &funding_key, + size, + 1_000_000, + false, + ) + .unwrap_or_else(|e| { + eprintln!("Error could not fund keys: {e:?}"); + exit(1); + }); res.into_iter().map(Some).collect() } else { std::iter::repeat_with(|| None).take(size).collect() From 5562f79cc557943314349081eb0916ac08c0d762 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Wed, 13 Sep 2023 06:25:56 -0700 Subject: [PATCH 077/407] Do not unnecessarily re-verify unloaded program (#32722) * Do not unnecessarily re-verify unloaded program * clippy fixes * new unit test for extract * fixes after rebase * fixes after rebase * address review comments * fix clippy --- ledger-tool/src/program.rs | 3 +- program-runtime/src/loaded_programs.rs | 405 +++++++++++++++++++++---- programs/bpf_loader/src/lib.rs | 39 ++- runtime/src/bank.rs | 66 +++- runtime/src/bank/tests.rs | 4 +- 5 files changed, 426 insertions(+), 91 deletions(-) diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index 4349190955546d..ef72c98a4da403 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -365,6 +365,7 @@ fn load_program<'a>( account_size, slot, Arc::new(program_runtime_environment), + false, ); match result { Ok(loaded_program) => match loaded_program.program { @@ -548,7 +549,7 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { .clone(), ); for key in cached_account_keys { - loaded_programs.replenish(key, bank.load_program(&key)); + loaded_programs.replenish(key, bank.load_program(&key, false)); debug!("Loaded program {}", key); } invoke_context.programs_loaded_for_tx_batch = &loaded_programs; diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 77246479ed782f..ed34ca523cd12f 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -228,16 +228,76 @@ impl LoadedProgram { elf_bytes: &[u8], account_size: usize, metrics: &mut LoadProgramMetrics, + ) -> Result> { + Self::new_internal( + loader_key, + program_runtime_environment, + deployment_slot, + effective_slot, + maybe_expiration_slot, + elf_bytes, + account_size, + metrics, + false, /* reloading */ + ) + } + + /// Reloads a user program, *without* running the verifier. + /// + /// # Safety + /// + /// This method is unsafe since it assumes that the program has already been verified. Should + /// only be called when the program was previously verified and loaded in the cache, but was + /// unloaded due to inactivity. It should also be checked that the `program_runtime_environment` + /// hasn't changed since it was unloaded. + pub unsafe fn reload( + loader_key: &Pubkey, + program_runtime_environment: Arc>>, + deployment_slot: Slot, + effective_slot: Slot, + maybe_expiration_slot: Option, + elf_bytes: &[u8], + account_size: usize, + metrics: &mut LoadProgramMetrics, + ) -> Result> { + Self::new_internal( + loader_key, + program_runtime_environment, + deployment_slot, + effective_slot, + maybe_expiration_slot, + elf_bytes, + account_size, + metrics, + true, /* reloading */ + ) + } + + fn new_internal( + loader_key: &Pubkey, + program_runtime_environment: Arc>>, + deployment_slot: Slot, + effective_slot: Slot, + maybe_expiration_slot: Option, + elf_bytes: &[u8], + account_size: usize, + metrics: &mut LoadProgramMetrics, + reloading: bool, ) -> Result> { let mut load_elf_time = Measure::start("load_elf_time"); + // The following unused_mut exception is needed for architectures that do not + // support JIT compilation. + #[allow(unused_mut)] let mut executable = Executable::load(elf_bytes, program_runtime_environment.clone())?; load_elf_time.stop(); metrics.load_elf_us = load_elf_time.as_us(); - let mut verify_code_time = Measure::start("verify_code_time"); - executable.verify::()?; - verify_code_time.stop(); - metrics.verify_code_us = verify_code_time.as_us(); + if !reloading { + let mut verify_code_time = Measure::start("verify_code_time"); + executable.verify::()?; + verify_code_time.stop(); + metrics.verify_code_us = verify_code_time.as_us(); + } #[cfg(all(not(target_os = "windows"), target_arch = "x86_64"))] { @@ -386,6 +446,12 @@ pub struct LoadedProgramsForTxBatch { pub environments: ProgramRuntimeEnvironments, } +pub struct ExtractedPrograms { + pub loaded: LoadedProgramsForTxBatch, + pub missing: Vec<(Pubkey, u64)>, + pub unloaded: Vec<(Pubkey, u64)>, +} + impl LoadedProgramsForTxBatch { pub fn new(slot: Slot, environments: ProgramRuntimeEnvironments) -> Self { Self { @@ -624,8 +690,6 @@ impl LoadedPrograms { } Self::matches_loaded_program_criteria(entry, match_criteria) - // If the program was unloaded. Consider it as unusable, so it can be reloaded. - && !matches!(entry.program, LoadedProgramType::Unloaded(_)) } /// Extracts a subset of the programs relevant to a transaction batch @@ -634,8 +698,9 @@ impl LoadedPrograms { &self, working_slot: &S, keys: impl Iterator, - ) -> (LoadedProgramsForTxBatch, Vec<(Pubkey, u64)>) { + ) -> ExtractedPrograms { let mut missing = Vec::new(); + let mut unloaded = Vec::new(); let found = keys .filter_map(|(key, (match_criteria, count))| { if let Some(second_level) = self.entries.get(&key) { @@ -650,6 +715,21 @@ impl LoadedPrograms { return None; } + if let LoadedProgramType::Unloaded(environment) = &entry.program { + if Arc::ptr_eq(environment, &self.environments.program_runtime_v1) + || Arc::ptr_eq( + environment, + &self.environments.program_runtime_v2, + ) + { + // if the environment hasn't changed since the entry was unloaded. + unloaded.push((key, count)); + } else { + missing.push((key, count)); + } + return None; + } + if current_slot >= entry.effective_slot { let mut usage_count = entry.tx_usage_counter.load(Ordering::Relaxed); @@ -682,14 +762,15 @@ impl LoadedPrograms { self.stats .hits .fetch_add(found.len() as u64, Ordering::Relaxed); - ( - LoadedProgramsForTxBatch { + ExtractedPrograms { + loaded: LoadedProgramsForTxBatch { entries: found, slot: working_slot.current_slot(), environments: self.environments.clone(), }, missing, - ) + unloaded, + } } pub fn merge(&mut self, tx_batch_cache: &LoadedProgramsForTxBatch) { @@ -838,8 +919,9 @@ impl solana_frozen_abi::abi_example::AbiExample for LoadedPrograms { mod tests { use { crate::loaded_programs::{ - BlockRelation, ForkGraph, LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, - LoadedPrograms, LoadedProgramsForTxBatch, WorkingSlot, DELAY_VISIBILITY_SLOT_OFFSET, + BlockRelation, ExtractedPrograms, ForkGraph, LoadedProgram, LoadedProgramMatchCriteria, + LoadedProgramType, LoadedPrograms, LoadedProgramsForTxBatch, WorkingSlot, + DELAY_VISIBILITY_SLOT_OFFSET, }, assert_matches::assert_matches, percentage::Percentage, @@ -881,9 +963,19 @@ mod tests { slot: Slot, ) -> Arc { let unloaded = Arc::new( - new_test_loaded_program(slot, slot.saturating_add(1)) - .to_unloaded() - .expect("Failed to unload the program"), + LoadedProgram { + program: LoadedProgramType::TestLoaded( + cache.environments.program_runtime_v1.clone(), + ), + account_size: 0, + deployment_slot: slot, + effective_slot: slot.saturating_add(1), + maybe_expiration_slot: None, + tx_usage_counter: AtomicU64::default(), + ix_usage_counter: AtomicU64::default(), + } + .to_unloaded() + .expect("Failed to unload the program"), ); cache.replenish(key, unloaded).1 } @@ -1471,7 +1563,11 @@ mod tests { // Testing fork 0 - 10 - 12 - 22 with current slot at 22 let working_slot = TestWorkingSlot::new(22, &[0, 10, 20, 22]); - let (found, missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing, + unloaded, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1487,10 +1583,15 @@ mod tests { assert!(missing.contains(&(program2, 2))); assert!(missing.contains(&(program3, 3))); + assert!(unloaded.is_empty()); // Testing fork 0 - 5 - 11 - 15 - 16 with current slot at 16 let mut working_slot = TestWorkingSlot::new(15, &[0, 5, 11, 15, 16, 18, 19, 23]); - let (found, missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing, + unloaded, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1511,10 +1612,15 @@ mod tests { assert_eq!(tombstone.deployment_slot, 15); assert!(missing.contains(&(program3, 1))); + assert!(unloaded.is_empty()); // Testing the same fork above, but current slot is now 18 (equal to effective slot of program4). working_slot.update_slot(18); - let (found, missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing, + unloaded, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1532,10 +1638,15 @@ mod tests { assert!(match_slot(&found, &program4, 15, 18)); assert!(missing.contains(&(program3, 1))); + assert!(unloaded.is_empty()); // Testing the same fork above, but current slot is now 23 (future slot than effective slot of program4). working_slot.update_slot(23); - let (found, missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing, + unloaded, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1553,10 +1664,15 @@ mod tests { assert!(match_slot(&found, &program4, 15, 23)); assert!(missing.contains(&(program3, 1))); + assert!(unloaded.is_empty()); // Testing fork 0 - 5 - 11 - 15 - 16 with current slot at 11 let working_slot = TestWorkingSlot::new(11, &[0, 5, 11, 15, 16]); - let (found, missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing, + unloaded, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1575,6 +1691,7 @@ mod tests { assert!(match_slot(&found, &program4, 5, 11)); assert!(missing.contains(&(program3, 1))); + assert!(unloaded.is_empty()); // The following is a special case, where there's an expiration slot let test_program = Arc::new(LoadedProgram { @@ -1590,7 +1707,11 @@ mod tests { // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 let working_slot = TestWorkingSlot::new(19, &[0, 5, 11, 15, 16, 18, 19, 21, 23]); - let (found, missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing, + unloaded, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1607,11 +1728,16 @@ mod tests { assert!(match_slot(&found, &program4, 19, 19)); assert!(missing.contains(&(program3, 1))); + assert!(unloaded.is_empty()); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 21 // This would cause program4 deployed at slot 19 to be expired. let working_slot = TestWorkingSlot::new(21, &[0, 5, 11, 15, 16, 18, 19, 21, 23]); - let (found, missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing, + unloaded, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1627,6 +1753,7 @@ mod tests { assert!(missing.contains(&(program3, 1))); assert!(missing.contains(&(program4, 1))); + assert!(unloaded.is_empty()); // Remove the expired entry to let the rest of the test continue if let Some(programs) = cache.entries.get_mut(&program4) { @@ -1652,7 +1779,11 @@ mod tests { // Testing fork 11 - 15 - 16- 19 - 22 with root at 5 and current slot at 22 let working_slot = TestWorkingSlot::new(22, &[5, 11, 15, 16, 19, 22, 23]); - let (found, missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing, + unloaded, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1669,10 +1800,15 @@ mod tests { assert!(match_slot(&found, &program4, 15, 22)); assert!(missing.contains(&(program3, 1))); + assert!(unloaded.is_empty()); // Testing fork 0 - 5 - 11 - 25 - 27 with current slot at 27 let working_slot = TestWorkingSlot::new(27, &[11, 25, 27]); - let (found, _missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing: _, + unloaded, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1683,6 +1819,7 @@ mod tests { .into_iter(), ); + assert!(unloaded.is_empty()); assert!(match_slot(&found, &program1, 0, 27)); assert!(match_slot(&found, &program2, 11, 27)); assert!(match_slot(&found, &program3, 25, 27)); @@ -1707,7 +1844,11 @@ mod tests { // Testing fork 16, 19, 23, with root at 15, current slot at 23 let working_slot = TestWorkingSlot::new(23, &[16, 19, 23]); - let (found, missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing, + unloaded, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1724,6 +1865,7 @@ mod tests { // program3 was deployed on slot 25, which has been pruned assert!(missing.contains(&(program3, 1))); + assert!(unloaded.is_empty()); } #[test] @@ -1763,7 +1905,11 @@ mod tests { // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 let working_slot = TestWorkingSlot::new(12, &[0, 5, 11, 12, 15, 16, 18, 19, 21, 23]); - let (found, missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing, + unloaded, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1777,9 +1923,14 @@ mod tests { assert!(match_slot(&found, &program2, 11, 12)); assert!(missing.contains(&(program3, 1))); + assert!(unloaded.is_empty()); // Test the same fork, but request the program modified at a later slot than what's in the cache. - let (found, missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing, + unloaded, + } = cache.extract( &working_slot, vec![ ( @@ -1799,6 +1950,126 @@ mod tests { assert!(missing.contains(&(program1, 1))); assert!(missing.contains(&(program3, 1))); + assert!(unloaded.is_empty()); + } + + #[test] + fn test_extract_unloaded() { + let mut cache = LoadedPrograms::default(); + + // Fork graph created for the test + // 0 + // / \ + // 10 5 + // | | + // 20 11 + // | | \ + // 22 15 25 + // | | + // 16 27 + // | + // 19 + // | + // 23 + + let mut fork_graph = TestForkGraphSpecific::default(); + fork_graph.insert_fork(&[0, 10, 20, 22]); + fork_graph.insert_fork(&[0, 5, 11, 15, 16, 19, 21, 23]); + fork_graph.insert_fork(&[0, 5, 11, 25, 27]); + + let program1 = Pubkey::new_unique(); + assert!(!cache.replenish(program1, new_test_loaded_program(0, 1)).0); + assert!(!cache.replenish(program1, new_test_loaded_program(20, 21)).0); + + let program2 = Pubkey::new_unique(); + assert!(!cache.replenish(program2, new_test_loaded_program(5, 6)).0); + assert!(!cache.replenish(program2, new_test_loaded_program(11, 12)).0); + + let program3 = Pubkey::new_unique(); + // Insert an unloaded program with correct/cache's environment at slot 25 + let _ = insert_unloaded_program(&mut cache, program3, 25); + + // Insert another unloaded program with a different environment at slot 20 + // Since this entry's environment won't match cache's environment, looking up this + // entry should return missing instead of unloaded entry. + assert!( + !cache + .replenish( + program3, + Arc::new( + new_test_loaded_program(20, 21) + .to_unloaded() + .expect("Failed to create unloaded program") + ) + ) + .0 + ); + + // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 + let working_slot = TestWorkingSlot::new(19, &[0, 5, 11, 12, 15, 16, 18, 19, 21, 23]); + let ExtractedPrograms { + loaded: found, + missing, + unloaded, + } = cache.extract( + &working_slot, + vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ] + .into_iter(), + ); + + assert!(match_slot(&found, &program1, 0, 19)); + assert!(match_slot(&found, &program2, 11, 19)); + + assert!(missing.contains(&(program3, 1))); + assert!(unloaded.is_empty()); + + // Testing fork 0 - 5 - 11 - 25 - 27 with current slot at 27 + let working_slot = TestWorkingSlot::new(27, &[0, 5, 11, 25, 27]); + let ExtractedPrograms { + loaded: found, + missing, + unloaded, + } = cache.extract( + &working_slot, + vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ] + .into_iter(), + ); + + assert!(match_slot(&found, &program1, 0, 27)); + assert!(match_slot(&found, &program2, 11, 27)); + + assert!(unloaded.contains(&(program3, 1))); + assert!(missing.is_empty()); + + // Testing fork 0 - 10 - 20 - 22 with current slot at 22 + let working_slot = TestWorkingSlot::new(22, &[0, 10, 20, 22]); + let ExtractedPrograms { + loaded: found, + missing, + unloaded, + } = cache.extract( + &working_slot, + vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ] + .into_iter(), + ); + + assert!(match_slot(&found, &program1, 20, 22)); + + assert!(missing.contains(&(program2, 1))); + assert!(missing.contains(&(program3, 1))); + assert!(unloaded.is_empty()); } #[test] @@ -1850,7 +2121,11 @@ mod tests { // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 let working_slot = TestWorkingSlot::new(12, &[0, 5, 11, 12, 15, 16, 18, 19, 21, 23]); - let (found, missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing, + unloaded, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1865,11 +2140,16 @@ mod tests { assert!(match_slot(&found, &program2, 11, 12)); assert!(missing.contains(&(program3, 1))); + assert!(unloaded.is_empty()); // Testing fork 0 - 5 - 11 - 12 - 15 - 16 - 19 - 21 - 23 with current slot at 15 // This would cause program4 deployed at slot 15 to be expired. let working_slot = TestWorkingSlot::new(15, &[0, 5, 11, 15, 16, 18, 19, 21, 23]); - let (found, missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing, + unloaded, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1878,6 +2158,7 @@ mod tests { ] .into_iter(), ); + assert!(unloaded.is_empty()); assert!(match_slot(&found, &program2, 11, 15)); @@ -1935,10 +2216,15 @@ mod tests { cache.prune(&fork_graph, 10); let working_slot = TestWorkingSlot::new(20, &[0, 10, 20]); - let (found, _missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing: _, + unloaded, + } = cache.extract( &working_slot, vec![(program1, (LoadedProgramMatchCriteria::NoCriteria, 1))].into_iter(), ); + assert!(unloaded.is_empty()); // The cache should have the program deployed at slot 0 assert_eq!( @@ -1977,7 +2263,11 @@ mod tests { assert!(!cache.replenish(program2, new_test_loaded_program(10, 11)).0); let working_slot = TestWorkingSlot::new(20, &[0, 10, 20]); - let (found, _missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing: _, + unloaded: _, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1990,7 +2280,11 @@ mod tests { assert!(match_slot(&found, &program2, 10, 20)); let working_slot = TestWorkingSlot::new(6, &[0, 5, 6]); - let (found, missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing, + unloaded: _, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2007,7 +2301,11 @@ mod tests { cache.prune_by_deployment_slot(5); let working_slot = TestWorkingSlot::new(20, &[0, 10, 20]); - let (found, _missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing: _, + unloaded: _, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2020,7 +2318,11 @@ mod tests { assert!(match_slot(&found, &program2, 10, 20)); let working_slot = TestWorkingSlot::new(6, &[0, 5, 6]); - let (found, missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing, + unloaded: _, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2037,7 +2339,11 @@ mod tests { cache.prune_by_deployment_slot(10); let working_slot = TestWorkingSlot::new(20, &[0, 10, 20]); - let (found, _missing) = cache.extract( + let ExtractedPrograms { + loaded: found, + missing: _, + unloaded: _, + } = cache.extract( &working_slot, vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2052,35 +2358,6 @@ mod tests { #[test] fn test_usable_entries_for_slot() { - let unloaded_entry = Arc::new( - new_test_loaded_program(0, 0) - .to_unloaded() - .expect("Failed to unload the program"), - ); - assert!(!LoadedPrograms::is_entry_usable( - &unloaded_entry, - 0, - &LoadedProgramMatchCriteria::NoCriteria - )); - - assert!(!LoadedPrograms::is_entry_usable( - &unloaded_entry, - 1, - &LoadedProgramMatchCriteria::NoCriteria - )); - - assert!(!LoadedPrograms::is_entry_usable( - &unloaded_entry, - 1, - &LoadedProgramMatchCriteria::Tombstone - )); - - assert!(!LoadedPrograms::is_entry_usable( - &unloaded_entry, - 1, - &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) - )); - let tombstone = Arc::new(LoadedProgram::new_tombstone(0, LoadedProgramType::Closed)); assert!(LoadedPrograms::is_entry_usable( diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 342d3836321b5c..9a91286327bc3b 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -75,22 +75,39 @@ pub fn load_program_from_bytes( account_size: usize, deployment_slot: Slot, program_runtime_environment: Arc>>, + reloading: bool, ) -> Result { let effective_slot = if feature_set.is_active(&delay_visibility_of_program_deployment::id()) { deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET) } else { deployment_slot }; - let loaded_program = LoadedProgram::new( - loader_key, - program_runtime_environment, - deployment_slot, - effective_slot, - None, - programdata, - account_size, - load_program_metrics, - ) + let loaded_program = if reloading { + // Safety: this is safe because the program is being reloaded in the cache. + unsafe { + LoadedProgram::reload( + loader_key, + program_runtime_environment, + deployment_slot, + effective_slot, + None, + programdata, + account_size, + load_program_metrics, + ) + } + } else { + LoadedProgram::new( + loader_key, + program_runtime_environment, + deployment_slot, + effective_slot, + None, + programdata, + account_size, + load_program_metrics, + ) + } .map_err(|err| { ic_logger_msg!(log_collector, "{}", err); InstructionError::InvalidAccountData @@ -123,6 +140,7 @@ macro_rules! deploy_program { $account_size, $slot, Arc::new(program_runtime_environment), + false, )?; if let Some(old_entry) = $invoke_context.find_program_in_cache(&$program_id) { executor.tx_usage_counter.store( @@ -1700,6 +1718,7 @@ pub mod test_utils { account.data().len(), 0, program_runtime_environment.clone(), + false, ) { invoke_context .programs_modified_by_tx diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index daa6bfff186f41..65546bb2055dcf 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -276,6 +276,7 @@ pub struct BankRc { #[cfg(RUSTC_WITH_SPECIALIZATION)] use solana_frozen_abi::abi_example::AbiExample; +use solana_program_runtime::loaded_programs::ExtractedPrograms; #[cfg(RUSTC_WITH_SPECIALIZATION)] impl AbiExample for BankRc { @@ -4656,7 +4657,7 @@ impl Bank { ProgramAccountLoadResult::InvalidAccountData } - pub fn load_program(&self, pubkey: &Pubkey) -> Arc { + pub fn load_program(&self, pubkey: &Pubkey, reload: bool) -> Arc { let environments = self .loaded_programs_cache .read() @@ -4689,6 +4690,7 @@ impl Bank { program_account.data().len(), 0, environments.program_runtime_v1.clone(), + reload, ) } @@ -4713,6 +4715,7 @@ impl Bank { .saturating_add(programdata_account.data().len()), slot, environments.program_runtime_v1.clone(), + reload, ) }), @@ -4721,16 +4724,32 @@ impl Bank { .data() .get(LoaderV4State::program_data_offset()..) .and_then(|elf_bytes| { - LoadedProgram::new( - &loader_v4::id(), - environments.program_runtime_v2.clone(), - slot, - slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET), - None, - elf_bytes, - program_account.data().len(), - &mut load_program_metrics, - ) + if reload { + // Safety: this is safe because the program is being reloaded in the cache. + unsafe { + LoadedProgram::reload( + &loader_v4::id(), + environments.program_runtime_v2.clone(), + slot, + slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET), + None, + elf_bytes, + program_account.data().len(), + &mut load_program_metrics, + ) + } + } else { + LoadedProgram::new( + &loader_v4::id(), + environments.program_runtime_v2.clone(), + slot, + slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET), + None, + elf_bytes, + program_account.data().len(), + &mut load_program_metrics, + ) + } .ok() }) .unwrap_or(LoadedProgram::new_tombstone( @@ -4987,17 +5006,31 @@ impl Bank { .collect() }; - let (mut loaded_programs_for_txs, missing_programs) = { + let ExtractedPrograms { + loaded: mut loaded_programs_for_txs, + missing, + unloaded, + } = { // Lock the global cache to figure out which programs need to be loaded let loaded_programs_cache = self.loaded_programs_cache.read().unwrap(); loaded_programs_cache.extract(self, programs_and_slots.into_iter()) }; // Load missing programs while global cache is unlocked - let missing_programs: Vec<(Pubkey, Arc)> = missing_programs + let missing_programs: Vec<(Pubkey, Arc)> = missing + .iter() + .map(|(key, count)| { + let program = self.load_program(key, false); + program.tx_usage_counter.store(*count, Ordering::Relaxed); + (*key, program) + }) + .collect(); + + // Reload unloaded programs while global cache is unlocked + let unloaded_programs: Vec<(Pubkey, Arc)> = unloaded .iter() .map(|(key, count)| { - let program = self.load_program(key); + let program = self.load_program(key, true); program.tx_usage_counter.store(*count, Ordering::Relaxed); (*key, program) }) @@ -5010,6 +5043,11 @@ impl Bank { // Use the returned entry as that might have been deduplicated globally loaded_programs_for_txs.replenish(key, entry); } + for (key, program) in unloaded_programs { + let (_was_occupied, entry) = loaded_programs_cache.replenish(key, program); + // Use the returned entry as that might have been deduplicated globally + loaded_programs_for_txs.replenish(key, entry); + } loaded_programs_for_txs } diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index b0f758a25f82c1..001494e5594e7a 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -7230,7 +7230,7 @@ fn test_bank_load_program() { programdata_account.set_rent_epoch(1); bank.store_account_and_update_capitalization(&key1, &program_account); bank.store_account_and_update_capitalization(&programdata_key, &programdata_account); - let program = bank.load_program(&key1); + let program = bank.load_program(&key1, false); assert_matches!(program.program, LoadedProgramType::LegacyV1(_)); assert_eq!( program.account_size, @@ -7385,7 +7385,7 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len() { assert_eq!(*elf.get(i).unwrap(), *byte); } - let loaded_program = bank.load_program(&program_keypair.pubkey()); + let loaded_program = bank.load_program(&program_keypair.pubkey(), false); // Invoke deployed program mock_process_instruction( From c40e88aef971fd78f608844c654503650e38f3cc Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 13 Sep 2023 09:28:19 -0400 Subject: [PATCH 078/407] Adds metrics for fastboot storages kept alive (#33222) --- core/src/accounts_hash_verifier.rs | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 3e453d43603baa..cb87cdc513a90c 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -64,7 +64,7 @@ impl AccountsHashVerifier { // To support fastboot, we must ensure the storages used in the latest POST snapshot are // not recycled nor removed early. Hold an Arc of their AppendVecs to prevent them from // expiring. - let mut last_snapshot_storages = None; + let mut fastboot_storages = None; loop { if exit.load(Ordering::Relaxed) { break; @@ -92,6 +92,7 @@ impl AccountsHashVerifier { .is_some() .then(|| accounts_package.snapshot_storages.clone()); + let slot = accounts_package.slot; let (_, handling_time_us) = measure_us!(Self::process_accounts_package( accounts_package, &cluster_info, @@ -103,7 +104,13 @@ impl AccountsHashVerifier { )); if let Some(snapshot_storages_for_fastboot) = snapshot_storages_for_fastboot { - last_snapshot_storages = Some(snapshot_storages_for_fastboot) + let num_storages = snapshot_storages_for_fastboot.len(); + fastboot_storages = Some(snapshot_storages_for_fastboot); + datapoint_info!( + "fastboot", + ("slot", slot, i64), + ("num_storages", num_storages, i64), + ); } datapoint_info!( @@ -122,14 +129,14 @@ impl AccountsHashVerifier { ("handling_time_us", handling_time_us, i64), ); } + info!("AccountsHashVerifier has stopped"); debug!( - "Number of snapshot storages kept alive for fastboot: {}", - last_snapshot_storages + "Number of storages kept alive for fastboot: {}", + fastboot_storages .as_ref() .map(|storages| storages.len()) .unwrap_or(0) ); - info!("AccountsHashVerifier has stopped"); }) .unwrap(); Self { From 525e59f01a7dcc5905f433f1fde110db55fdb876 Mon Sep 17 00:00:00 2001 From: Christian Kamm Date: Wed, 13 Sep 2023 16:57:08 +0200 Subject: [PATCH 079/407] sdk, programs/bpf_loader: add sol_remaining_compute_units syscall (#31640) bpf_loader: add sol_remaining_compute_units syscall Co-authored-by: jonch <9093549+jon-chuang@users.noreply.github.com> --- .../on-chain-programs/developing-c.md | 3 ++ .../on-chain-programs/developing-rust.md | 3 ++ program-runtime/src/compute_budget.rs | 3 ++ programs/bpf_loader/src/syscalls/mod.rs | 34 ++++++++++++++- programs/sbf/Cargo.lock | 10 +++++ programs/sbf/Cargo.toml | 1 + programs/sbf/build.rs | 1 + .../remaining_compute_units.c | 23 ++++++++++ .../rust/remaining_compute_units/Cargo.toml | 29 +++++++++++++ .../rust/remaining_compute_units/src/lib.rs | 29 +++++++++++++ .../rust/remaining_compute_units/tests/lib.rs | 27 ++++++++++++ runtime/src/bank.rs | 1 + sdk/program/src/compute_units.rs | 13 ++++++ sdk/program/src/lib.rs | 1 + sdk/program/src/program_stubs.rs | 8 ++++ sdk/program/src/syscalls/definitions.rs | 1 + sdk/sbf/c/inc/sol/compute_units.h | 42 +++++++++++++++++++ sdk/sbf/c/inc/sol/inc/compute_units.inc | 33 +++++++++++++++ sdk/sbf/c/inc/solana_sdk.h | 1 + sdk/src/feature_set.rs | 5 +++ 20 files changed, 266 insertions(+), 2 deletions(-) create mode 100644 programs/sbf/c/src/remaining_compute_units/remaining_compute_units.c create mode 100644 programs/sbf/rust/remaining_compute_units/Cargo.toml create mode 100644 programs/sbf/rust/remaining_compute_units/src/lib.rs create mode 100644 programs/sbf/rust/remaining_compute_units/tests/lib.rs create mode 100644 sdk/program/src/compute_units.rs create mode 100644 sdk/sbf/c/inc/sol/compute_units.h create mode 100644 sdk/sbf/c/inc/sol/inc/compute_units.inc diff --git a/docs/src/developing/on-chain-programs/developing-c.md b/docs/src/developing/on-chain-programs/developing-c.md index 15197f511c8140..cffbd1006bb067 100644 --- a/docs/src/developing/on-chain-programs/developing-c.md +++ b/docs/src/developing/on-chain-programs/developing-c.md @@ -157,6 +157,9 @@ with program logs. ## Compute Budget +Use the system call `sol_remaining_compute_units()` to return a `u64` indicating +the number of compute units remaining for this transaction. + Use the system call [`sol_log_compute_units()`](https://github.com/solana-labs/solana/blob/d3a3a7548c857f26ec2cb10e270da72d373020ec/sdk/sbf/c/inc/solana_sdk.h#L140) to log a message containing the remaining number of compute units the program diff --git a/docs/src/developing/on-chain-programs/developing-rust.md b/docs/src/developing/on-chain-programs/developing-rust.md index 423b214935499c..3e21799222077d 100644 --- a/docs/src/developing/on-chain-programs/developing-rust.md +++ b/docs/src/developing/on-chain-programs/developing-rust.md @@ -355,6 +355,9 @@ fn custom_panic(info: &core::panic::PanicInfo<'_>) { ## Compute Budget +Use the system call `sol_remaining_compute_units()` to return a `u64` indicating +the number of compute units remaining for this transaction. + Use the system call [`sol_log_compute_units()`](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/log.rs#L141) to log a message containing the remaining number of compute units the program diff --git a/program-runtime/src/compute_budget.rs b/program-runtime/src/compute_budget.rs index 02983f9b9ab871..44fb070b3786ae 100644 --- a/program-runtime/src/compute_budget.rs +++ b/program-runtime/src/compute_budget.rs @@ -130,6 +130,8 @@ pub struct ComputeBudget { /// of compute units consumed to call poseidon syscall for a given number /// of inputs. pub poseidon_cost_coefficient_c: u64, + /// Number of compute units consumed for accessing the remaining compute units. + pub get_remaining_compute_units_cost: u64, } impl Default for ComputeBudget { @@ -181,6 +183,7 @@ impl ComputeBudget { loaded_accounts_data_size_limit: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, poseidon_cost_coefficient_a: 61, poseidon_cost_coefficient_c: 542, + get_remaining_compute_units_cost: 100, } } diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index eb31edf3cda0c3..e8a0b70bc12d8f 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -40,8 +40,8 @@ use { enable_early_verification_of_account_modifications, enable_partitioned_epoch_reward, enable_poseidon_syscall, error_on_syscall_bpf_function_hash_collisions, last_restart_slot_sysvar, libsecp256k1_0_5_upgrade_enabled, reject_callx_r10, - stop_sibling_instruction_search_at_parent, stop_truncating_strings_in_syscalls, - switch_to_new_elf_parser, + remaining_compute_units_syscall_enabled, stop_sibling_instruction_search_at_parent, + stop_truncating_strings_in_syscalls, switch_to_new_elf_parser, }, hash::{Hasher, HASH_BYTES}, instruction::{ @@ -164,6 +164,8 @@ pub fn create_program_runtime_environment_v1<'a>( && feature_set.is_active(&disable_deploy_of_alloc_free_syscall::id()); let last_restart_slot_syscall_enabled = feature_set.is_active(&last_restart_slot_sysvar::id()); let enable_poseidon_syscall = feature_set.is_active(&enable_poseidon_syscall::id()); + let remaining_compute_units_syscall_enabled = + feature_set.is_active(&remaining_compute_units_syscall_enabled::id()); // !!! ATTENTION !!! // When adding new features for RBPF here, // also add them to `Bank::apply_builtin_program_feature_transitions()`. @@ -335,6 +337,14 @@ pub fn create_program_runtime_environment_v1<'a>( SyscallPoseidon::call, )?; + // Accessing remaining compute units + register_feature_gated_function!( + result, + remaining_compute_units_syscall_enabled, + *b"sol_remaining_compute_units", + SyscallRemainingComputeUnits::call + )?; + // Log data result.register_function_hashed(*b"sol_log_data", SyscallLogData::call)?; @@ -1877,6 +1887,26 @@ declare_syscall!( } ); +declare_syscall!( + /// Read remaining compute units + SyscallRemainingComputeUnits, + fn inner_call( + invoke_context: &mut InvokeContext, + _arg1: u64, + _arg2: u64, + _arg3: u64, + _arg4: u64, + _arg5: u64, + _memory_mapping: &mut MemoryMapping, + ) -> Result { + let budget = invoke_context.get_compute_budget(); + consume_compute_meter(invoke_context, budget.syscall_base_cost)?; + + use solana_rbpf::vm::ContextObject; + Ok(invoke_context.get_remaining()) + } +); + #[cfg(test)] #[allow(clippy::arithmetic_side_effects)] #[allow(clippy::indexing_slicing)] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index ffd939ff5ffc75..2dc3879a46b284 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5889,6 +5889,16 @@ dependencies = [ "solana-sbf-rust-realloc", ] +[[package]] +name = "solana-sbf-rust-remaining-compute-units" +version = "1.17.0" +dependencies = [ + "solana-program", + "solana-program-runtime", + "solana-program-test", + "solana-sdk", +] + [[package]] name = "solana-sbf-rust-ro-account_modify" version = "1.17.0" diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 2ef02e54b8e49d..363a3a4972d589 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -145,6 +145,7 @@ members = [ "rust/rand", "rust/realloc", "rust/realloc_invoke", + "rust/remaining_compute_units", "rust/ro_account_modify", "rust/ro_modify", "rust/sanity", diff --git a/programs/sbf/build.rs b/programs/sbf/build.rs index 8bffd48c9f2578..6bdfb9a4ea949a 100644 --- a/programs/sbf/build.rs +++ b/programs/sbf/build.rs @@ -96,6 +96,7 @@ fn main() { "rand", "realloc", "realloc_invoke", + "remaining_compute_units", "ro_modify", "ro_account_modify", "sanity", diff --git a/programs/sbf/c/src/remaining_compute_units/remaining_compute_units.c b/programs/sbf/c/src/remaining_compute_units/remaining_compute_units.c new file mode 100644 index 00000000000000..514529b909754f --- /dev/null +++ b/programs/sbf/c/src/remaining_compute_units/remaining_compute_units.c @@ -0,0 +1,23 @@ +/** + * @brief sol_remaining_compute_units Syscall test + */ +#include +#include + +extern uint64_t entrypoint(const uint8_t *input) { + char buffer[200]; + + int i = 0; + for (; i < 100000; ++i) { + if (i % 500 == 0) { + uint64_t remaining = sol_remaining_compute_units(); + snprintf(buffer, 200, "remaining compute units: %d", (int)remaining); + sol_log(buffer); + if (remaining < 25000) { + break; + } + } + } + + return SUCCESS; +} diff --git a/programs/sbf/rust/remaining_compute_units/Cargo.toml b/programs/sbf/rust/remaining_compute_units/Cargo.toml new file mode 100644 index 00000000000000..223679c07264a4 --- /dev/null +++ b/programs/sbf/rust/remaining_compute_units/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "solana-sbf-rust-remaining-compute-units" +documentation = "https://docs.rs/solana-sbf-rust-remaining-compute-units" +version = { workspace = true } +description = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[features] +no-entrypoint = [] +test-bpf = [] +dummy-for-ci-check = ["test-bpf"] + +[dependencies] +solana-program = { workspace = true } + +[dev-dependencies] +solana-program-runtime = { workspace = true } +solana-program-test = { workspace = true } +solana-sdk = { workspace = true } + +[lib] +crate-type = ["cdylib", "lib"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/sbf/rust/remaining_compute_units/src/lib.rs b/programs/sbf/rust/remaining_compute_units/src/lib.rs new file mode 100644 index 00000000000000..ecf0376397b519 --- /dev/null +++ b/programs/sbf/rust/remaining_compute_units/src/lib.rs @@ -0,0 +1,29 @@ +//! @brief Example Rust-based BPF program that exercises the sol_remaining_compute_units syscall + +extern crate solana_program; +use solana_program::{ + account_info::AccountInfo, compute_units::sol_remaining_compute_units, + entrypoint::ProgramResult, msg, pubkey::Pubkey, +}; +solana_program::entrypoint!(process_instruction); +pub fn process_instruction( + _program_id: &Pubkey, + _accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> ProgramResult { + let mut i = 0u32; + for _ in 0..100_000 { + if i % 500 == 0 { + let remaining = sol_remaining_compute_units(); + msg!("remaining compute units: {:?}", remaining); + if remaining < 25_000 { + break; + } + } + i = i.saturating_add(1); + } + + msg!("i: {:?}", i); + + Ok(()) +} diff --git a/programs/sbf/rust/remaining_compute_units/tests/lib.rs b/programs/sbf/rust/remaining_compute_units/tests/lib.rs new file mode 100644 index 00000000000000..30da15b2953a53 --- /dev/null +++ b/programs/sbf/rust/remaining_compute_units/tests/lib.rs @@ -0,0 +1,27 @@ +#![cfg(feature = "test-bpf")] + +use { + solana_program_test::*, + solana_sbf_rust_remaining_compute_units::process_instruction, + solana_sdk::{ + instruction::Instruction, pubkey::Pubkey, signature::Signer, transaction::Transaction, + }, +}; + +#[tokio::test] +async fn test_remaining_compute_units() { + let program_id = Pubkey::new_unique(); + let program_test = ProgramTest::new( + "solana_sbf_rust_remaining_compute_units", + program_id, + processor!(process_instruction), + ); + let (mut banks_client, payer, recent_blockhash) = program_test.start().await; + + let mut transaction = Transaction::new_with_payer( + &[Instruction::new_with_bincode(program_id, &(), vec![])], + Some(&payer.pubkey()), + ); + transaction.sign(&[&payer], recent_blockhash); + banks_client.process_transaction(transaction).await.unwrap(); +} diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 65546bb2055dcf..e110778a9e5797 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -8111,6 +8111,7 @@ impl Bank { feature_set::disable_deploy_of_alloc_free_syscall::id(), feature_set::last_restart_slot_sysvar::id(), feature_set::delay_visibility_of_program_deployment::id(), + feature_set::remaining_compute_units_syscall_enabled::id(), ]; if !only_apply_transitions_for_new_features || FEATURES_AFFECTING_RBPF diff --git a/sdk/program/src/compute_units.rs b/sdk/program/src/compute_units.rs new file mode 100644 index 00000000000000..6b7f27127139c7 --- /dev/null +++ b/sdk/program/src/compute_units.rs @@ -0,0 +1,13 @@ +/// Return the remaining compute units the program may consume +#[inline] +pub fn sol_remaining_compute_units() -> u64 { + #[cfg(target_os = "solana")] + unsafe { + crate::syscalls::sol_remaining_compute_units() + } + + #[cfg(not(target_os = "solana"))] + { + crate::program_stubs::sol_remaining_compute_units() + } +} diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index 63da3fa7f1e09f..edcc2e3cb8c961 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -483,6 +483,7 @@ pub mod bpf_loader; pub mod bpf_loader_deprecated; pub mod bpf_loader_upgradeable; pub mod clock; +pub mod compute_units; pub mod debug_account_data; pub mod decode_error; pub mod ed25519_program; diff --git a/sdk/program/src/program_stubs.rs b/sdk/program/src/program_stubs.rs index 24f4dc57d3d77b..cf890659fa68a1 100644 --- a/sdk/program/src/program_stubs.rs +++ b/sdk/program/src/program_stubs.rs @@ -30,6 +30,10 @@ pub trait SyscallStubs: Sync + Send { fn sol_log_compute_units(&self) { sol_log("SyscallStubs: sol_log_compute_units() not available"); } + fn sol_remaining_compute_units(&self) -> u64 { + sol_log("SyscallStubs: sol_remaining_compute_units() defaulting to 0"); + 0 + } fn sol_invoke_signed( &self, _instruction: &Instruction, @@ -126,6 +130,10 @@ pub(crate) fn sol_log_compute_units() { SYSCALL_STUBS.read().unwrap().sol_log_compute_units(); } +pub(crate) fn sol_remaining_compute_units() -> u64 { + SYSCALL_STUBS.read().unwrap().sol_remaining_compute_units() +} + pub(crate) fn sol_invoke_signed( instruction: &Instruction, account_infos: &[AccountInfo], diff --git a/sdk/program/src/syscalls/definitions.rs b/sdk/program/src/syscalls/definitions.rs index dbf6d5e3a403be..c3aa74ff8de211 100644 --- a/sdk/program/src/syscalls/definitions.rs +++ b/sdk/program/src/syscalls/definitions.rs @@ -70,6 +70,7 @@ define_syscall!(fn sol_alt_bn128_group_op(group_op: u64, input: *const u8, input define_syscall!(fn sol_big_mod_exp(params: *const u8, result: *mut u8) -> u64); define_syscall!(fn sol_get_epoch_rewards_sysvar(addr: *mut u8) -> u64); define_syscall!(fn sol_poseidon(parameters: u64, endianness: u64, vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64); +define_syscall!(fn sol_remaining_compute_units() -> u64); #[cfg(target_feature = "static-syscalls")] pub const fn sys_hash(name: &str) -> usize { diff --git a/sdk/sbf/c/inc/sol/compute_units.h b/sdk/sbf/c/inc/sol/compute_units.h new file mode 100644 index 00000000000000..a4a9f40bf4760e --- /dev/null +++ b/sdk/sbf/c/inc/sol/compute_units.h @@ -0,0 +1,42 @@ +#pragma once +/** + * @brief Solana logging utilities + */ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Prints a string to stdout + */ +/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/sbf/c/inc/sol/inc/compute_units.inc AND RUN `cargo run --bin gen-headers` */ +#ifndef SOL_SBFV2 +uint64_t sol_remaining_compute_units(); +#else +typedef uint64_t(*sol_remaining_compute_units_pointer_type)(); +static uint64_t sol_remaining_compute_units() { + sol_remaining_compute_units_pointer_type sol_remaining_compute_units_pointer = (sol_remaining_compute_units_pointer_type) 3991886574; + return sol_remaining_compute_units_pointer(); +} +#endif + +#ifdef SOL_TEST +/** + * Stub functions when building tests + */ + +uint64_t sol_remaining_compute_units() { + return UINT64_MAX; +} +#endif + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/inc/compute_units.inc b/sdk/sbf/c/inc/sol/inc/compute_units.inc new file mode 100644 index 00000000000000..4d13ac94f33dc1 --- /dev/null +++ b/sdk/sbf/c/inc/sol/inc/compute_units.inc @@ -0,0 +1,33 @@ +#pragma once +/** + * @brief Solana logging utilities + */ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Prints a string to stdout + */ +@SYSCALL uint64_t sol_remaining_compute_units(); + +#ifdef SOL_TEST +/** + * Stub functions when building tests + */ + +uint64_t sol_remaining_compute_units() { + return UINT64_MAX; +} +#endif + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/solana_sdk.h b/sdk/sbf/c/inc/solana_sdk.h index 5c8e4370185be5..7a3b49d5ec8879 100644 --- a/sdk/sbf/c/inc/solana_sdk.h +++ b/sdk/sbf/c/inc/solana_sdk.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 2ce873fe5f3d91..418d3287484486 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -691,6 +691,10 @@ pub mod timely_vote_credits { solana_sdk::declare_id!("2oXpeh141pPZCTCFHBsvCwG2BtaHZZAtrVhwaxSy6brS"); } +pub mod remaining_compute_units_syscall_enabled { + solana_sdk::declare_id!("5TuppMutoyzhUSfuYdhgzD47F92GL1g89KpCZQKqedxP"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -856,6 +860,7 @@ lazy_static! { (revise_turbine_epoch_stakes::id(), "revise turbine epoch stakes"), (enable_poseidon_syscall::id(), "Enable Poseidon syscall"), (timely_vote_credits::id(), "use timeliness of votes in determining credits to award"), + (remaining_compute_units_syscall_enabled::id(), "enable the remaining_compute_units syscall"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() From 093ae61e850ed5c2e23f98a9e7e37feb83972ed5 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 13 Sep 2023 12:37:01 -0700 Subject: [PATCH 080/407] single pass through each storage to populate info (#33227) --- accounts-db/src/accounts_db.rs | 112 +++++++++++++++++---------------- 1 file changed, 58 insertions(+), 54 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index deae0d0ce40648..c49928d6dbc409 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -8964,6 +8964,7 @@ impl AccountsDb { slot: &Slot, store_id: AppendVecId, rent_collector: &RentCollector, + storage_info: &StorageSizeAndCountMap, ) -> SlotIndexGenerationInfo { if accounts_map.is_empty() { return SlotIndexGenerationInfo::default(); @@ -8976,7 +8977,11 @@ impl AccountsDb { let mut num_accounts_rent_paying = 0; let num_accounts = accounts_map.len(); let mut amount_to_top_off_rent = 0; + // first collect into a local HashMap with no lock contention + let mut storage_info_local = StorageSizeAndCount::default(); + let items = accounts_map.into_iter().map(|(pubkey, stored_account)| { + storage_info_local.stored_size += stored_account.stored_size(); if secondary { self.accounts_index.update_secondary_indexes( &pubkey, @@ -9010,6 +9015,13 @@ impl AccountsDb { .accounts_index .insert_new_if_missing_into_primary_index(*slot, num_accounts, items); + { + // second, collect into the shared DashMap once we've figured out all the info per store_id + let mut info = storage_info.entry(store_id).or_default(); + info.stored_size += storage_info_local.stored_size; + info.count += num_accounts; + } + // dirty_pubkeys will contain a pubkey if an item has multiple rooted entries for // a given pubkey. If there is just a single item, there is no cleaning to // be done on that pubkey. Use only those pubkeys with multiple updates. @@ -9183,12 +9195,6 @@ impl AccountsDb { scan_time.stop(); scan_time_sum += scan_time.as_us(); - Self::update_storage_info( - &storage_info, - &accounts_map, - &storage_info_timings, - store_id, - ); let insert_us = if pass == 0 { // generate index @@ -9206,6 +9212,7 @@ impl AccountsDb { slot, store_id, &rent_collector, + &storage_info, ); rent_paying.fetch_add(rent_paying_this_slot, Ordering::Relaxed); amount_to_top_off_rent @@ -9481,36 +9488,6 @@ impl AccountsDb { (accounts_data_len_from_duplicates as u64, uncleaned_slots) } - fn update_storage_info( - storage_info: &StorageSizeAndCountMap, - accounts_map: &GenerateIndexAccountsMap<'_>, - timings: &Mutex, - store_id: AppendVecId, - ) { - let mut storage_size_accounts_map_time = Measure::start("storage_size_accounts_map"); - - // first collect into a local HashMap with no lock contention - let mut storage_info_local = StorageSizeAndCount::default(); - for (_, v) in accounts_map.iter() { - storage_info_local.stored_size += v.stored_size(); - storage_info_local.count += 1; - } - storage_size_accounts_map_time.stop(); - // second, collect into the shared DashMap once we've figured out all the info per store_id - let mut storage_size_accounts_map_flatten_time = - Measure::start("storage_size_accounts_map_flatten_time"); - if !accounts_map.is_empty() { - let mut info = storage_info.entry(store_id).or_default(); - info.stored_size += storage_info_local.stored_size; - info.count += storage_info_local.count; - } - storage_size_accounts_map_flatten_time.stop(); - - let mut timings = timings.lock().unwrap(); - timings.storage_size_accounts_map_us += storage_size_accounts_map_time.as_us(); - timings.storage_size_accounts_map_flatten_us += - storage_size_accounts_map_flatten_time.as_us(); - } fn set_storage_count_and_alive_bytes( &self, stored_sizes_and_counts: StorageSizeAndCountMap, @@ -15765,20 +15742,34 @@ pub mod tests { #[test] fn test_calculate_storage_count_and_alive_bytes() { let accounts = AccountsDb::new_single_for_tests(); + accounts.accounts_index.set_startup(Startup::Startup); let shared_key = solana_sdk::pubkey::new_rand(); let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner()); let slot0 = 0; - accounts.store_for_tests(slot0, &[(&shared_key, &account)]); - accounts.add_root_and_flush_write_cache(slot0); + + accounts.accounts_index.set_startup(Startup::Startup); + + let storage = accounts.create_and_insert_store(slot0, 4_000, "flush_slot_cache"); + let hashes = vec![Hash::default(); 1]; + let write_version = vec![0; 1]; + storage.accounts.append_accounts( + &StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( + &(slot0, &[(&shared_key, &account)][..]), + hashes, + write_version, + ), + 0, + ); let storage = accounts.storage.get_slot_storage_entry(slot0).unwrap(); let storage_info = StorageSizeAndCountMap::default(); let accounts_map = accounts.process_storage_slot(&storage); - AccountsDb::update_storage_info( + accounts.generate_index_for_slot( + accounts_map, + &slot0, + 0, + &RentCollector::default(), &storage_info, - &accounts_map, - &Mutex::default(), - storage.append_vec_id(), ); assert_eq!(storage_info.len(), 1); for entry in storage_info.iter() { @@ -15787,6 +15778,7 @@ pub mod tests { (&0, 1, 144) ); } + accounts.accounts_index.set_startup(Startup::Normal); } #[test] @@ -15796,11 +15788,12 @@ pub mod tests { let storage = accounts.create_and_insert_store(0, 1, "test"); let storage_info = StorageSizeAndCountMap::default(); let accounts_map = accounts.process_storage_slot(&storage); - AccountsDb::update_storage_info( + accounts.generate_index_for_slot( + accounts_map, + &0, + 0, + &RentCollector::default(), &storage_info, - &accounts_map, - &Mutex::default(), - storage.append_vec_id(), ); assert!(storage_info.is_empty()); } @@ -15812,6 +15805,8 @@ pub mod tests { solana_sdk::pubkey::Pubkey::from([0; 32]), solana_sdk::pubkey::Pubkey::from([255; 32]), ]; + accounts.accounts_index.set_startup(Startup::Startup); + // make sure accounts are in 2 different bins assert!( (accounts.accounts_index.bins() == 1) @@ -15827,18 +15822,26 @@ pub mod tests { let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner()); let account_big = AccountSharedData::new(1, 1000, AccountSharedData::default().owner()); let slot0 = 0; - accounts.store_for_tests(slot0, &[(&keys[0], &account)]); - accounts.store_for_tests(slot0, &[(&keys[1], &account_big)]); - accounts.add_root_and_flush_write_cache(slot0); + let storage = accounts.create_and_insert_store(slot0, 4_000, "flush_slot_cache"); + let hashes = vec![Hash::default(); 2]; + let write_version = vec![0; 2]; + storage.accounts.append_accounts( + &StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( + &(slot0, &[(&keys[0], &account), (&keys[1], &account_big)][..]), + hashes, + write_version, + ), + 0, + ); - let storage = accounts.storage.get_slot_storage_entry(slot0).unwrap(); let storage_info = StorageSizeAndCountMap::default(); let accounts_map = accounts.process_storage_slot(&storage); - AccountsDb::update_storage_info( + accounts.generate_index_for_slot( + accounts_map, + &0, + 0, + &RentCollector::default(), &storage_info, - &accounts_map, - &Mutex::default(), - storage.append_vec_id(), ); assert_eq!(storage_info.len(), 1); for entry in storage_info.iter() { @@ -15847,6 +15850,7 @@ pub mod tests { (&0, 2, 1280) ); } + accounts.accounts_index.set_startup(Startup::Normal); } #[test] From cb310a310a27a0e747ab2fa435b6b651dbd47539 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 13 Sep 2023 14:13:46 -0700 Subject: [PATCH 081/407] set approx store count at generate index time (#33240) --- accounts-db/src/accounts_db.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index c49928d6dbc409..db55b210d40c08 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -9508,6 +9508,9 @@ impl AccountsDb { ); store.count_and_status.write().unwrap().0 = entry.count; store.alive_bytes.store(entry.stored_size, Ordering::SeqCst); + store + .approx_store_count + .store(entry.count, Ordering::Relaxed); } else { trace!("id: {} clearing count", id); store.count_and_status.write().unwrap().0 = 0; From c2bf2a969d7c330203b5336859d119effb020518 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 13 Sep 2023 14:14:10 -0700 Subject: [PATCH 082/407] remove unused metrics (#33241) --- accounts-db/src/accounts_db.rs | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index db55b210d40c08..76791bf285f5b5 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -614,9 +614,7 @@ struct GenerateIndexTimings { pub min_bin_size: usize, pub max_bin_size: usize, pub total_items: usize, - pub storage_size_accounts_map_us: u64, pub storage_size_storages_us: u64, - pub storage_size_accounts_map_flatten_us: u64, pub index_flush_us: u64, pub rent_paying: AtomicUsize, pub amount_to_top_off_rent: AtomicU64, @@ -648,21 +646,11 @@ impl GenerateIndexTimings { ("insertion_time_us", self.insertion_time_us, i64), ("min_bin_size", self.min_bin_size as i64, i64), ("max_bin_size", self.max_bin_size as i64, i64), - ( - "storage_size_accounts_map_us", - self.storage_size_accounts_map_us as i64, - i64 - ), ( "storage_size_storages_us", self.storage_size_storages_us as i64, i64 ), - ( - "storage_size_accounts_map_flatten_us", - self.storage_size_accounts_map_flatten_us as i64, - i64 - ), ("index_flush_us", self.index_flush_us as i64, i64), ( "total_rent_paying", @@ -9173,7 +9161,6 @@ impl AccountsDb { let rent_paying = AtomicUsize::new(0); let amount_to_top_off_rent = AtomicU64::new(0); let total_including_duplicates = AtomicU64::new(0); - let storage_info_timings = Mutex::new(GenerateIndexTimings::default()); let scan_time: u64 = slots .par_chunks(chunk_size) .map(|slots| { @@ -9322,7 +9309,6 @@ impl AccountsDb { } let unique_pubkeys_by_bin = unique_pubkeys_by_bin.into_inner().unwrap(); - let storage_info_timings = storage_info_timings.into_inner().unwrap(); let mut timings = GenerateIndexTimings { index_flush_us, scan_time, @@ -9336,9 +9322,6 @@ impl AccountsDb { total_duplicate_slot_keys: total_duplicate_slot_keys.load(Ordering::Relaxed), populate_duplicate_keys_us, total_including_duplicates: total_including_duplicates.load(Ordering::Relaxed), - storage_size_accounts_map_us: storage_info_timings.storage_size_accounts_map_us, - storage_size_accounts_map_flatten_us: storage_info_timings - .storage_size_accounts_map_flatten_us, total_slots: slots.len() as u64, ..GenerateIndexTimings::default() }; From 602f0f6ecbad9ad61b9ad907fc736a405faaf289 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 13 Sep 2023 14:14:30 -0700 Subject: [PATCH 083/407] generate index: fn arg: &Slot -> Slot (#33229) fn arg: &Slot -> Slot --- accounts-db/src/accounts_db.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 76791bf285f5b5..b97f838b55daf9 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -8949,7 +8949,7 @@ impl AccountsDb { fn generate_index_for_slot( &self, accounts_map: GenerateIndexAccountsMap<'_>, - slot: &Slot, + slot: Slot, store_id: AppendVecId, rent_collector: &RentCollector, storage_info: &StorageSizeAndCountMap, @@ -9001,7 +9001,7 @@ impl AccountsDb { let (dirty_pubkeys, insert_time_us) = self .accounts_index - .insert_new_if_missing_into_primary_index(*slot, num_accounts, items); + .insert_new_if_missing_into_primary_index(slot, num_accounts, items); { // second, collect into the shared DashMap once we've figured out all the info per store_id @@ -9014,7 +9014,7 @@ impl AccountsDb { // a given pubkey. If there is just a single item, there is no cleaning to // be done on that pubkey. Use only those pubkeys with multiple updates. if !dirty_pubkeys.is_empty() { - self.uncleaned_pubkeys.insert(*slot, dirty_pubkeys); + self.uncleaned_pubkeys.insert(slot, dirty_pubkeys); } SlotIndexGenerationInfo { insert_time_us, @@ -9196,7 +9196,7 @@ impl AccountsDb { rent_paying_accounts_by_partition_this_slot, } = self.generate_index_for_slot( accounts_map, - slot, + *slot, store_id, &rent_collector, &storage_info, @@ -15752,7 +15752,7 @@ pub mod tests { let accounts_map = accounts.process_storage_slot(&storage); accounts.generate_index_for_slot( accounts_map, - &slot0, + slot0, 0, &RentCollector::default(), &storage_info, @@ -15776,7 +15776,7 @@ pub mod tests { let accounts_map = accounts.process_storage_slot(&storage); accounts.generate_index_for_slot( accounts_map, - &0, + 0, 0, &RentCollector::default(), &storage_info, @@ -15824,7 +15824,7 @@ pub mod tests { let accounts_map = accounts.process_storage_slot(&storage); accounts.generate_index_for_slot( accounts_map, - &0, + 0, 0, &RentCollector::default(), &storage_info, From 4e2d67f19ab2309c74f6c2224204194ce5a0f5ed Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 14 Sep 2023 12:11:31 +0800 Subject: [PATCH 084/407] ci: move solana-core to nextest (#33231) --- ci/stable/run-partition.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/ci/stable/run-partition.sh b/ci/stable/run-partition.sh index 6fbbb0e47fcf83..e9d6bb3269ec19 100755 --- a/ci/stable/run-partition.sh +++ b/ci/stable/run-partition.sh @@ -32,7 +32,6 @@ fi DONT_USE_NEXTEST_PACKAGES=( solana-cargo-build-sbf - solana-core ) if [ "$INDEX" -eq "$((LIMIT - 1))" ]; then From 30055b02b5f9192be8851f03522b5f388933d659 Mon Sep 17 00:00:00 2001 From: Jon Cinque Date: Thu, 14 Sep 2023 11:29:42 +0200 Subject: [PATCH 085/407] docs: Clarify when validator commission can be set (#32796) docs: Clarify when commission can be set --- docs/src/running-validator/vote-accounts.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/src/running-validator/vote-accounts.md b/docs/src/running-validator/vote-accounts.md index c4c572ff71479d..6bb021588d7698 100644 --- a/docs/src/running-validator/vote-accounts.md +++ b/docs/src/running-validator/vote-accounts.md @@ -150,6 +150,11 @@ When setting the commission, only integer values in the set [0-100] are accepted The integer represents the number of percentage points for the commission, so creating an account with `--commission 10` will set a 10% commission. +Note that validators can only update their commission during the first half of +any epoch. This prevents validators from stealing delegator rewards by setting a +low commission, increasing it right before the end of the epoch, and then +changing it back after reward distribution. + ## Key Rotation Rotating the vote account authority keys requires special handling when dealing From c6b0d4a4961ac6a4f106cceffa54d1883bfd13dd Mon Sep 17 00:00:00 2001 From: kirill lykov Date: Thu, 14 Sep 2023 14:18:58 +0200 Subject: [PATCH 086/407] specify compute budget when padding program is used (#33234) * specify compute budget when padding program is used * fix numeric const format --- bench-tps/src/bench.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index 53288abd2dfa64..029937f391cccd 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -50,6 +50,8 @@ const MAX_TX_QUEUE_AGE: u64 = (MAX_PROCESSING_AGE as f64 * DEFAULT_S_PER_SLOT) a const MAX_RANDOM_COMPUTE_UNIT_PRICE: u64 = 50; const COMPUTE_UNIT_PRICE_MULTIPLIER: u64 = 1_000; const TRANSFER_TRANSACTION_COMPUTE_UNIT: u32 = 600; // 1 transfer is plus 3 compute_budget ixs +const PADDED_TRANSFER_COMPUTE_UNIT: u32 = 3_000; // padding program execution requires consumes this amount + /// calculate maximum possible prioritization fee, if `use-randomized-compute-unit-price` is /// enabled, round to nearest lamports. pub fn max_lamports_for_prioritization(compute_unit_price: &Option) -> u64 { @@ -625,6 +627,13 @@ fn transfer_with_compute_unit_price_and_padding( ), instruction, ]; + if instruction_padding_config.is_some() { + // By default, CU budget is DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT which is much larger than needed + instructions.push(ComputeBudgetInstruction::set_compute_unit_limit( + PADDED_TRANSFER_COMPUTE_UNIT, + )); + } + if let Some(compute_unit_price) = compute_unit_price { instructions.extend_from_slice(&[ ComputeBudgetInstruction::set_compute_unit_limit(TRANSFER_TRANSACTION_COMPUTE_UNIT), From 5d29ed196f8293538ee70752afa7e4e85dd728b4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 14 Sep 2023 12:48:34 +0000 Subject: [PATCH 087/407] build(deps): bump serde_json from 1.0.106 to 1.0.107 (#33248) * build(deps): bump serde_json from 1.0.106 to 1.0.107 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.106 to 1.0.107. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.106...v1.0.107) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 829336f9ee5a89..c2a278aaf35fb0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4754,9 +4754,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.106" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cc66a619ed80bf7a0f6b17dd063a84b88f6dea1813737cf469aef1d081142c2" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ "itoa", "ryu", diff --git a/Cargo.toml b/Cargo.toml index a1718c429af01b..1704005fc93bba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -286,7 +286,7 @@ semver = "1.0.18" serde = "1.0.188" serde_bytes = "0.11.12" serde_derive = "1.0.103" -serde_json = "1.0.106" +serde_json = "1.0.107" serde_yaml = "0.9.25" serial_test = "2.0.0" serde_with = { version = "2.3.3", default-features = false } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 2dc3879a46b284..fee02eb5824eac 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4163,9 +4163,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.106" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cc66a619ed80bf7a0f6b17dd063a84b88f6dea1813737cf469aef1d081142c2" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ "itoa", "ryu", From 886eabd74d334dbaab1bb1e8be6dec23e042b0d8 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 14 Sep 2023 07:53:05 -0700 Subject: [PATCH 088/407] gen idx: refactor StorageSizeAndCount population (#33244) --- accounts-db/src/accounts_db.rs | 20 ++++++++----- accounts-db/src/accounts_index.rs | 48 ++++++++++++++++++++++++++----- 2 files changed, 54 insertions(+), 14 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index b97f838b55daf9..7316d96d9d1615 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -8965,11 +8965,10 @@ impl AccountsDb { let mut num_accounts_rent_paying = 0; let num_accounts = accounts_map.len(); let mut amount_to_top_off_rent = 0; - // first collect into a local HashMap with no lock contention - let mut storage_info_local = StorageSizeAndCount::default(); + let mut stored_size_alive = 0; let items = accounts_map.into_iter().map(|(pubkey, stored_account)| { - storage_info_local.stored_size += stored_account.stored_size(); + stored_size_alive += stored_account.stored_size(); if secondary { self.accounts_index.update_secondary_indexes( &pubkey, @@ -8999,15 +8998,15 @@ impl AccountsDb { ) }); - let (dirty_pubkeys, insert_time_us) = self + let (dirty_pubkeys, insert_time_us, generate_index_count) = self .accounts_index .insert_new_if_missing_into_primary_index(slot, num_accounts, items); { // second, collect into the shared DashMap once we've figured out all the info per store_id let mut info = storage_info.entry(store_id).or_default(); - info.stored_size += storage_info_local.stored_size; - info.count += num_accounts; + info.stored_size += stored_size_alive; + info.count += generate_index_count.count; } // dirty_pubkeys will contain a pubkey if an item has multiple rooted entries for @@ -9489,7 +9488,11 @@ impl AccountsDb { entry.count, store.count(), ); - store.count_and_status.write().unwrap().0 = entry.count; + { + let mut count_and_status = store.count_and_status.write().unwrap(); + assert_eq!(count_and_status.0, 0); + count_and_status.0 = entry.count; + } store.alive_bytes.store(entry.stored_size, Ordering::SeqCst); store .approx_store_count @@ -15853,6 +15856,8 @@ pub mod tests { // fake out the store count to avoid the assert for (_, store) in accounts.storage.iter() { store.alive_bytes.store(0, Ordering::Release); + let mut count_and_status = store.count_and_status.write().unwrap(); + count_and_status.0 = 0; } // populate based on made up hash data @@ -15864,6 +15869,7 @@ pub mod tests { count: 3, }, ); + accounts.set_storage_count_and_alive_bytes(dashmap, &mut GenerateIndexTimings::default()); assert_eq!(accounts.storage.len(), 1); for (_, store) in accounts.storage.iter() { diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 45ecc13b851133..0fe61b405af9de 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -71,6 +71,12 @@ pub type SlotSlice<'s, T> = &'s [(Slot, T)]; pub type RefCount = u64; pub type AccountMap = Arc>; +#[derive(Default, Debug, PartialEq, Eq)] +pub(crate) struct GenerateIndexCount { + /// number of accounts inserted in the index + pub count: usize, +} + #[derive(Debug, Clone, Copy, PartialEq, Eq)] /// how accounts index 'upsert' should handle reclaims pub enum UpsertReclaim { @@ -1586,13 +1592,14 @@ impl + Into> AccountsIndex { // Can save time when inserting lots of new keys. // But, does NOT update secondary index // This is designed to be called at startup time. + // returns (dirty_pubkeys, insertion_time_us, GenerateIndexCount) #[allow(clippy::needless_collect)] pub(crate) fn insert_new_if_missing_into_primary_index( &self, slot: Slot, item_len: usize, items: impl Iterator, - ) -> (Vec, u64) { + ) -> (Vec, u64, GenerateIndexCount) { // big enough so not likely to re-allocate, small enough to not over-allocate by too much // this assumes the largest bin contains twice the expected amount of the average size per bin let bins = self.bins(); @@ -1612,6 +1619,7 @@ impl + Into> AccountsIndex { (pubkey_bin, Vec::with_capacity(expected_items_per_bin)) }) .collect::>(); + let mut count = 0; let mut dirty_pubkeys = items .filter_map(|(pubkey, account_info)| { let pubkey_bin = self.bin_calculator.bin_from_pubkey(&pubkey); @@ -1631,6 +1639,7 @@ impl + Into> AccountsIndex { binned.into_iter().for_each(|(pubkey_bin, items)| { let r_account_maps = &self.account_maps[pubkey_bin]; let mut insert_time = Measure::start("insert_into_primary_index"); + count += items.len(); if use_disk { r_account_maps.startup_insert_only(items.into_iter()); } else { @@ -1660,7 +1669,11 @@ impl + Into> AccountsIndex { insertion_time.fetch_add(insert_time.as_us(), Ordering::Relaxed); }); - (dirty_pubkeys, insertion_time.load(Ordering::Relaxed)) + ( + dirty_pubkeys, + insertion_time.load(Ordering::Relaxed), + GenerateIndexCount { count }, + ) } /// use Vec<> because the internal vecs are already allocated per bin @@ -2195,7 +2208,10 @@ pub mod tests { let account_info = true; let items = vec![(*pubkey, account_info)]; index.set_startup(Startup::Startup); - index.insert_new_if_missing_into_primary_index(slot, items.len(), items.into_iter()); + let expected_len = items.len(); + let (_, _, result) = + index.insert_new_if_missing_into_primary_index(slot, items.len(), items.into_iter()); + assert_eq!(result.count, expected_len); index.set_startup(Startup::Normal); let mut ancestors = Ancestors::default(); @@ -2230,7 +2246,10 @@ pub mod tests { let account_info = false; let items = vec![(*pubkey, account_info)]; index.set_startup(Startup::Startup); - index.insert_new_if_missing_into_primary_index(slot, items.len(), items.into_iter()); + let expected_len = items.len(); + let (_, _, result) = + index.insert_new_if_missing_into_primary_index(slot, items.len(), items.into_iter()); + assert_eq!(result.count, expected_len); index.set_startup(Startup::Normal); let mut ancestors = Ancestors::default(); @@ -2337,7 +2356,10 @@ pub mod tests { index.set_startup(Startup::Startup); let items = vec![(key0, account_infos[0]), (key1, account_infos[1])]; - index.insert_new_if_missing_into_primary_index(slot0, items.len(), items.into_iter()); + let expected_len = items.len(); + let (_, _, result) = + index.insert_new_if_missing_into_primary_index(slot0, items.len(), items.into_iter()); + assert_eq!(result.count, expected_len); index.set_startup(Startup::Normal); for (i, key) in [key0, key1].iter().enumerate() { @@ -2388,7 +2410,13 @@ pub mod tests { } else { let items = vec![(key, account_infos[0])]; index.set_startup(Startup::Startup); - index.insert_new_if_missing_into_primary_index(slot0, items.len(), items.into_iter()); + let expected_len = items.len(); + let (_, _, result) = index.insert_new_if_missing_into_primary_index( + slot0, + items.len(), + items.into_iter(), + ); + assert_eq!(result.count, expected_len); index.set_startup(Startup::Normal); } assert!(gc.is_empty()); @@ -2433,7 +2461,13 @@ pub mod tests { let items = vec![(key, account_infos[1])]; index.set_startup(Startup::Startup); - index.insert_new_if_missing_into_primary_index(slot1, items.len(), items.into_iter()); + let expected_len = items.len(); + let (_, _, result) = index.insert_new_if_missing_into_primary_index( + slot1, + items.len(), + items.into_iter(), + ); + assert_eq!(result.count, expected_len); index.set_startup(Startup::Normal); } assert!(gc.is_empty()); From 3ad83940479a3ffdff7f1083efab0418fd007bef Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 14 Sep 2023 07:53:33 -0700 Subject: [PATCH 089/407] stop removing empty bins during index generation (#33242) --- accounts-db/src/accounts_index.rs | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 0fe61b405af9de..959bb8319e5080 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -1604,39 +1604,35 @@ impl + Into> AccountsIndex { // this assumes the largest bin contains twice the expected amount of the average size per bin let bins = self.bins(); let expected_items_per_bin = item_len * 2 / bins; - // offset bin 0 in the 'binned' array by a random amount. - // This results in calls to insert_new_entry_if_missing_with_lock from different threads starting at different bins. - let random_offset = thread_rng().gen_range(0..bins); let use_disk = self.storage.storage.disk.is_some(); let mut binned = (0..bins) - .map(|mut pubkey_bin| { - // opposite of (pubkey_bin + random_offset) % bins - pubkey_bin = if pubkey_bin < random_offset { - pubkey_bin + bins - random_offset - } else { - pubkey_bin - random_offset - }; - (pubkey_bin, Vec::with_capacity(expected_items_per_bin)) - }) + .map(|_| Vec::with_capacity(expected_items_per_bin)) .collect::>(); let mut count = 0; let mut dirty_pubkeys = items .filter_map(|(pubkey, account_info)| { let pubkey_bin = self.bin_calculator.bin_from_pubkey(&pubkey); - let binned_index = (pubkey_bin + random_offset) % bins; // this value is equivalent to what update() below would have created if we inserted a new item let is_zero_lamport = account_info.is_zero_lamport(); let result = if is_zero_lamport { Some(pubkey) } else { None }; - binned[binned_index].1.push((pubkey, (slot, account_info))); + binned[pubkey_bin].push((pubkey, (slot, account_info))); result }) .collect::>(); - binned.retain(|x| !x.1.is_empty()); let insertion_time = AtomicU64::new(0); - binned.into_iter().for_each(|(pubkey_bin, items)| { + // offset bin processing in the 'binned' array by a random amount. + // This results in calls to insert_new_entry_if_missing_with_lock from different threads starting at different bins to avoid + // lock contention. + let random_offset = thread_rng().gen_range(0..bins); + (0..bins).for_each(|pubkey_bin| { + let pubkey_bin = (pubkey_bin + random_offset) % bins; + let items = std::mem::take(&mut binned[pubkey_bin]); + if items.is_empty() { + return; + } let r_account_maps = &self.account_maps[pubkey_bin]; let mut insert_time = Measure::start("insert_into_primary_index"); count += items.len(); From 1e1e29641499e08ea1516ba2d81344e7875abc52 Mon Sep 17 00:00:00 2001 From: Tyera Date: Thu, 14 Sep 2023 08:58:46 -0600 Subject: [PATCH 090/407] Remove unused test code (#33247) --- programs/stake/src/stake_instruction.rs | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index 2a17c481b37ba9..bb77cefe4c4af9 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -472,9 +472,7 @@ mod tests { }, assert_matches::assert_matches, bincode::serialize, - solana_program_runtime::{ - invoke_context::mock_process_instruction, sysvar_cache::SysvarCache, - }, + solana_program_runtime::invoke_context::mock_process_instruction, solana_sdk::{ account::{ create_account_shared_data_for_test, AccountSharedData, ReadableAccount, @@ -6856,14 +6854,6 @@ mod tests { let stake_history = StakeHistory::default(); let current_epoch = 100; - let mut sysvar_cache_override = SysvarCache::default(); - sysvar_cache_override.set_stake_history(stake_history.clone()); - sysvar_cache_override.set_rent(rent); - sysvar_cache_override.set_clock(Clock { - epoch: current_epoch, - ..Clock::default() - }); - let authorized_staker = Pubkey::new_unique(); let vote_address = Pubkey::new_unique(); let new_vote_address = Pubkey::new_unique(); From a13fb1d220b3c55c2554665f29ec71fe86ece007 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 14 Sep 2023 16:14:36 +0000 Subject: [PATCH 091/407] build(deps): bump libc from 0.2.147 to 0.2.148 (#33249) * build(deps): bump libc from 0.2.147 to 0.2.148 Bumps [libc](https://github.com/rust-lang/libc) from 0.2.147 to 0.2.148. - [Release notes](https://github.com/rust-lang/libc/releases) - [Commits](https://github.com/rust-lang/libc/compare/0.2.147...0.2.148) --- updated-dependencies: - dependency-name: libc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c2a278aaf35fb0..436341a1f86c2b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2868,9 +2868,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.147" +version = "0.2.148" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" [[package]] name = "libloading" diff --git a/Cargo.toml b/Cargo.toml index 1704005fc93bba..41634dbadba47d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -229,7 +229,7 @@ jsonrpc-ipc-server = "18.0.0" jsonrpc-pubsub = "18.0.0" jsonrpc-server-utils = "18.0.0" lazy_static = "1.4.0" -libc = "0.2.147" +libc = "0.2.148" libloading = "0.7.4" libsecp256k1 = "0.6.0" light-poseidon = "0.1.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index fee02eb5824eac..00173d1c8bbcbf 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2463,9 +2463,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.147" +version = "0.2.148" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" [[package]] name = "libloading" From 432cacdb0ea0f2428df4bdae7ca77895722ade01 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 14 Sep 2023 16:15:22 +0000 Subject: [PATCH 092/407] build(deps): bump proc-macro2 from 1.0.66 to 1.0.67 (#33250) * build(deps): bump proc-macro2 from 1.0.66 to 1.0.67 Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.66 to 1.0.67. - [Release notes](https://github.com/dtolnay/proc-macro2/releases) - [Commits](https://github.com/dtolnay/proc-macro2/compare/1.0.66...1.0.67) --- updated-dependencies: - dependency-name: proc-macro2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 436341a1f86c2b..d84a664601370b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3946,9 +3946,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.66" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" dependencies = [ "unicode-ident", ] diff --git a/Cargo.toml b/Cargo.toml index 41634dbadba47d..e1da505b587049 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -257,7 +257,7 @@ pickledb = { version = "0.5.1", default-features = false } pkcs8 = "0.8.0" predicates = "2.1" pretty-hex = "0.3.0" -proc-macro2 = "1.0.66" +proc-macro2 = "1.0.67" proptest = "1.2" prost = "0.11.9" prost-types = "0.11.9" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 00173d1c8bbcbf..68ac072f0a03ce 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3493,9 +3493,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.66" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" dependencies = [ "unicode-ident", ] From 596e17f74b710422e39ba9d527ece5b49d9feafc Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 15 Sep 2023 01:31:07 +0800 Subject: [PATCH 093/407] ci: have new_with_external_ip_test_random use all threads (#33233) --- nextest.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nextest.toml b/nextest.toml index 5a6856845637c4..083c1c74cf9719 100644 --- a/nextest.toml +++ b/nextest.toml @@ -19,3 +19,7 @@ threads-required = "num-cpus" [[profile.ci.overrides]] filter = "package(solana-gossip) & test(/^test_star_network_push_ring_200/)" threads-required = "num-cpus" + +[[profile.ci.overrides]] +filter = "package(solana-gossip) & test(/^cluster_info::tests::new_with_external_ip_test_random/)" +threads-required = "num-cpus" From 99b24bd7412556f09405112962a17731ef86fe81 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 15 Sep 2023 02:44:12 +0800 Subject: [PATCH 094/407] check source_path before delete it (#33255) --- sdk/cargo-build-sbf/src/main.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sdk/cargo-build-sbf/src/main.rs b/sdk/cargo-build-sbf/src/main.rs index 6dd710e062a50f..1a9e4e1b622261 100644 --- a/sdk/cargo-build-sbf/src/main.rs +++ b/sdk/cargo-build-sbf/src/main.rs @@ -247,8 +247,10 @@ fn install_if_missing( let source_base = config.sbf_sdk.join("dependencies"); if source_base.exists() { let source_path = source_base.join(package); - debug!("Remove file {:?}", source_path); - fs::remove_file(source_path).map_err(|err| err.to_string())?; + if source_path.exists() { + debug!("Remove file {:?}", source_path); + fs::remove_file(source_path).map_err(|err| err.to_string())?; + } } } // Check whether the target path is an empty directory. This can From dfaec7897a2f3228269bc5abe5155bf46c8bb133 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Thu, 14 Sep 2023 17:22:51 -0500 Subject: [PATCH 095/407] loaded accounts data size cost does not apply to vote transaction (#33235) * loaded accounts data size cost does not apply to vote transaction * add a test for vote cost --- cost-model/src/cost_tracker.rs | 2 +- cost-model/src/transaction_cost.rs | 81 +++++++++++++++++++++++++++--- 2 files changed, 76 insertions(+), 7 deletions(-) diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index 062c619c7d3a6d..30df841abf9601 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -360,7 +360,7 @@ mod tests { ) .unwrap(); let mut tx_cost = TransactionCost::new_with_capacity(1); - tx_cost.bpf_execution_cost = 10; + tx_cost.builtins_execution_cost = 10; tx_cost.writable_accounts.push(mint_keypair.pubkey()); tx_cost.is_simple_vote = true; diff --git a/cost-model/src/transaction_cost.rs b/cost-model/src/transaction_cost.rs index e44014b3c934d7..cc0d987ec00f52 100644 --- a/cost-model/src/transaction_cost.rs +++ b/cost-model/src/transaction_cost.rs @@ -67,11 +67,80 @@ impl TransactionCost { } pub fn sum(&self) -> u64 { - self.signature_cost - .saturating_add(self.write_lock_cost) - .saturating_add(self.data_bytes_cost) - .saturating_add(self.builtins_execution_cost) - .saturating_add(self.bpf_execution_cost) - .saturating_add(self.loaded_accounts_data_size_cost) + if self.is_simple_vote { + self.signature_cost + .saturating_add(self.write_lock_cost) + .saturating_add(self.data_bytes_cost) + .saturating_add(self.builtins_execution_cost) + } else { + self.signature_cost + .saturating_add(self.write_lock_cost) + .saturating_add(self.data_bytes_cost) + .saturating_add(self.builtins_execution_cost) + .saturating_add(self.bpf_execution_cost) + .saturating_add(self.loaded_accounts_data_size_cost) + } + } +} + +#[cfg(test)] +mod tests { + use { + crate::cost_model::CostModel, + solana_sdk::{ + feature_set::FeatureSet, + hash::Hash, + message::SimpleAddressLoader, + signer::keypair::Keypair, + transaction::{MessageHash, SanitizedTransaction, VersionedTransaction}, + }, + solana_vote_program::vote_transaction, + }; + + #[test] + fn test_vote_transaction_cost() { + solana_logger::setup(); + let node_keypair = Keypair::new(); + let vote_keypair = Keypair::new(); + let auth_keypair = Keypair::new(); + let transaction = vote_transaction::new_vote_transaction( + vec![], + Hash::default(), + Hash::default(), + &node_keypair, + &vote_keypair, + &auth_keypair, + None, + ); + + // create a sanitized vote transaction + let vote_transaction = SanitizedTransaction::try_create( + VersionedTransaction::from(transaction.clone()), + MessageHash::Compute, + Some(true), + SimpleAddressLoader::Disabled, + ) + .unwrap(); + + // create a identical sanitized transaction, but identified as non-vote + let none_vote_transaction = SanitizedTransaction::try_create( + VersionedTransaction::from(transaction), + MessageHash::Compute, + Some(false), + SimpleAddressLoader::Disabled, + ) + .unwrap(); + + // expected vote tx cost: 2 write locks, 2 sig, 1 vite ix, and 11 CU tx data cost + let expected_vote_cost = 4151; + // expected non-vote tx cost would include default loaded accounts size cost (16384) additionally + let expected_none_vote_cost = 20535; + + let vote_cost = CostModel::calculate_cost(&vote_transaction, &FeatureSet::all_enabled()); + let none_vote_cost = + CostModel::calculate_cost(&none_vote_transaction, &FeatureSet::all_enabled()); + + assert_eq!(expected_vote_cost, vote_cost.sum()); + assert_eq!(expected_none_vote_cost, none_vote_cost.sum()); } } From c1090d395920350122e54bab4dc0a268378ff7af Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Fri, 15 Sep 2023 00:06:34 +0000 Subject: [PATCH 096/407] moves new_warmup_cooldown_rate_epoch outside iterators and for loops (#33259) Recalculating new_warmup_cooldown_rate_epoch for each item is redundant and wasteful and instead can be done only once outside the iterators and for loops. Also NewWarmupCooldownRateEpoch is unnecessary and verbose and is removed in this commit. --- programs/stake/src/stake_instruction.rs | 2 +- programs/stake/src/stake_state.rs | 5 +---- runtime/src/bank.rs | 24 +++++++++++++----------- sdk/src/feature_set.rs | 17 ++++++----------- 4 files changed, 21 insertions(+), 27 deletions(-) diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index bb77cefe4c4af9..c268009885edbb 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -481,7 +481,7 @@ mod tests { account_utils::StateMut, clock::{Epoch, UnixTimestamp}, epoch_schedule::EpochSchedule, - feature_set::{reduce_stake_warmup_cooldown::NewWarmupCooldownRateEpoch, FeatureSet}, + feature_set::FeatureSet, instruction::{AccountMeta, Instruction}, pubkey::Pubkey, rent::Rent, diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index 9557544e8142af..713054ae629db8 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -14,10 +14,7 @@ use { account::{AccountSharedData, ReadableAccount, WritableAccount}, account_utils::StateMut, clock::{Clock, Epoch}, - feature_set::{ - self, reduce_stake_warmup_cooldown::NewWarmupCooldownRateEpoch, - stake_merge_with_unmatched_credits_observed, FeatureSet, - }, + feature_set::{self, stake_merge_with_unmatched_credits_observed, FeatureSet}, instruction::{checked_add, InstructionError}, pubkey::Pubkey, rent::Rent, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index e110778a9e5797..4c957f75fdb911 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -137,7 +137,6 @@ use { self, add_set_tx_loaded_accounts_data_size_instruction, enable_early_verification_of_account_modifications, include_loaded_accounts_data_size_in_fee_calculation, - reduce_stake_warmup_cooldown::NewWarmupCooldownRateEpoch, remove_congestion_multiplier_from_fee_calculation, remove_deprecated_request_unit_ix, FeatureSet, }, @@ -2986,6 +2985,7 @@ impl Bank { VoteAccount::try_from(account).ok() }; + let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch(); let (points, measure_us) = measure_us!(thread_pool.install(|| { stake_delegations .par_iter() @@ -3007,7 +3007,7 @@ impl Bank { stake_account.stake_state(), vote_state, Some(stake_history), - self.new_warmup_cooldown_rate_epoch(), + new_warmup_cooldown_rate_epoch, ) .unwrap_or(0) }) @@ -3026,6 +3026,7 @@ impl Bank { thread_pool: &ThreadPool, metrics: &RewardsMetrics, ) -> Option { + let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch(); let (points, measure) = measure!(thread_pool.install(|| { vote_with_stake_delegations_map .par_iter() @@ -3043,7 +3044,7 @@ impl Bank { stake_account.stake_state(), vote_state, Some(stake_history), - self.new_warmup_cooldown_rate_epoch(), + new_warmup_cooldown_rate_epoch, ) .unwrap_or(0) }) @@ -3089,6 +3090,7 @@ impl Bank { VoteAccount::try_from(account).ok() }; + let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch(); let vote_account_rewards: VoteRewards = DashMap::new(); let total_stake_rewards = AtomicU64::default(); let (stake_rewards, measure_stake_rewards_us) = measure_us!(thread_pool.install(|| { @@ -3130,7 +3132,7 @@ impl Bank { &point_value, Some(stake_history), reward_calc_tracer.as_ref(), - self.new_warmup_cooldown_rate_epoch(), + new_warmup_cooldown_rate_epoch, ); let post_lamport = stake_account.lamports(); @@ -3202,6 +3204,7 @@ impl Bank { reward_calc_tracer: Option, metrics: &mut RewardsMetrics, ) -> (VoteRewards, StakeRewards) { + let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch(); let vote_account_rewards: VoteRewards = DashMap::with_capacity(vote_with_stake_delegations_map.len()); let stake_delegation_iterator = vote_with_stake_delegations_map.into_par_iter().flat_map( @@ -3248,7 +3251,7 @@ impl Bank { &point_value, Some(stake_history), reward_calc_tracer.as_ref(), - self.new_warmup_cooldown_rate_epoch(), + new_warmup_cooldown_rate_epoch, ); if let Ok((stakers_reward, voters_reward)) = redeemed { // track voter rewards @@ -6604,11 +6607,12 @@ impl Bank { ) { assert!(!self.freeze_started()); let mut m = Measure::start("stakes_cache.check_and_store"); + let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch(); (0..accounts.len()).for_each(|i| { self.stakes_cache.check_and_store( accounts.pubkey(i), accounts.account(i), - self.new_warmup_cooldown_rate_epoch(), + new_warmup_cooldown_rate_epoch, ) }); self.rc.accounts.store_accounts_cached(accounts); @@ -7730,6 +7734,7 @@ impl Bank { ) { debug_assert_eq!(txs.len(), execution_results.len()); debug_assert_eq!(txs.len(), loaded_txs.len()); + let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch(); izip!(txs, execution_results, loaded_txs) .filter(|(_, execution_result, _)| execution_result.was_executed_successfully()) .flat_map(|(tx, _, (load_result, _))| { @@ -7741,11 +7746,8 @@ impl Bank { .for_each(|(pubkey, account)| { // note that this could get timed to: self.rc.accounts.accounts_db.stats.stakes_cache_check_and_store_us, // but this code path is captured separately in ExecuteTimingType::UpdateStakesCacheUs - self.stakes_cache.check_and_store( - pubkey, - account, - self.new_warmup_cooldown_rate_epoch(), - ); + self.stakes_cache + .check_and_store(pubkey, account, new_warmup_cooldown_rate_epoch); }); } diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 418d3287484486..9a52f20dd25083 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -20,6 +20,7 @@ use { lazy_static::lazy_static, + solana_program::{epoch_schedule::EpochSchedule, stake_history::Epoch}, solana_sdk::{ clock::Slot, hash::{Hash, Hasher}, @@ -665,18 +666,7 @@ pub mod last_restart_slot_sysvar { } pub mod reduce_stake_warmup_cooldown { - use solana_program::{epoch_schedule::EpochSchedule, stake_history::Epoch}; solana_sdk::declare_id!("GwtDQBghCTBgmX2cpEGNPxTEBUTQRaDMGTr5qychdGMj"); - - pub trait NewWarmupCooldownRateEpoch { - fn new_warmup_cooldown_rate_epoch(&self, epoch_schedule: &EpochSchedule) -> Option; - } - impl NewWarmupCooldownRateEpoch for super::FeatureSet { - fn new_warmup_cooldown_rate_epoch(&self, epoch_schedule: &EpochSchedule) -> Option { - self.activated_slot(&id()) - .map(|slot| epoch_schedule.get_epoch(slot)) - } - } } pub mod revise_turbine_epoch_stakes { @@ -960,6 +950,11 @@ impl FeatureSet { self.active.remove(feature_id); self.inactive.insert(*feature_id); } + + pub fn new_warmup_cooldown_rate_epoch(&self, epoch_schedule: &EpochSchedule) -> Option { + self.activated_slot(&reduce_stake_warmup_cooldown::id()) + .map(|slot| epoch_schedule.get_epoch(slot)) + } } #[cfg(test)] From 648149656422942933e42a7fb693bab2c8d5dab2 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 15 Sep 2023 07:27:30 -0400 Subject: [PATCH 097/407] Derives Pod and Zeroable on Hash (#33256) --- sdk/program/src/hash.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sdk/program/src/hash.rs b/sdk/program/src/hash.rs index 64f5ecbee12b11..27d481b62b5441 100644 --- a/sdk/program/src/hash.rs +++ b/sdk/program/src/hash.rs @@ -6,6 +6,7 @@ use { crate::{sanitize::Sanitize, wasm_bindgen}, borsh::{BorshDeserialize, BorshSchema, BorshSerialize}, + bytemuck::{Pod, Zeroable}, sha2::{Digest, Sha256}, std::{convert::TryFrom, fmt, mem, str::FromStr}, thiserror::Error, @@ -42,6 +43,8 @@ const MAX_BASE58_LEN: usize = 44; PartialOrd, Hash, AbiExample, + Pod, + Zeroable, )] #[repr(transparent)] pub struct Hash(pub(crate) [u8; HASH_BYTES]); From 0df6749550cc882d0c91b8485b6a47b51e0bab47 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 15 Sep 2023 13:30:53 +0000 Subject: [PATCH 098/407] build(deps): bump syn from 2.0.32 to 2.0.33 (#33264) * build(deps): bump syn from 2.0.32 to 2.0.33 Bumps [syn](https://github.com/dtolnay/syn) from 2.0.32 to 2.0.33. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.32...2.0.33) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 44 ++++++++++++++++++++--------------------- programs/sbf/Cargo.lock | 42 +++++++++++++++++++-------------------- 2 files changed, 43 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d84a664601370b..50876eb549cca5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -436,7 +436,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -590,7 +590,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -1496,7 +1496,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -1507,7 +1507,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -1699,7 +1699,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -1799,7 +1799,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -2074,7 +2074,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -3391,7 +3391,7 @@ dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -3898,7 +3898,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -4061,7 +4061,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -4749,7 +4749,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -4794,7 +4794,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -4844,7 +4844,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -5955,7 +5955,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -6962,7 +6962,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -7705,9 +7705,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.32" +version = "2.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2" +checksum = "9caece70c63bfba29ec2fed841a09851b14a235c60010fa4de58089b6c025668" dependencies = [ "proc-macro2", "quote", @@ -7910,7 +7910,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -8049,7 +8049,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -8540,7 +8540,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", "wasm-bindgen-shared", ] @@ -8574,7 +8574,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8920,7 +8920,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 68ac072f0a03ce..471e5c9123e381 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -410,7 +410,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -564,7 +564,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -1206,7 +1206,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -1217,7 +1217,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -1392,7 +1392,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -1495,7 +1495,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -1744,7 +1744,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -3009,7 +3009,7 @@ dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -3445,7 +3445,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -3580,7 +3580,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -4158,7 +4158,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -4203,7 +4203,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -4959,7 +4959,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -6062,7 +6062,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -6627,9 +6627,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.32" +version = "2.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2" +checksum = "9caece70c63bfba29ec2fed841a09851b14a235c60010fa4de58089b6c025668" dependencies = [ "proc-macro2", "quote", @@ -6777,7 +6777,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -6900,7 +6900,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -7377,7 +7377,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", "wasm-bindgen-shared", ] @@ -7411,7 +7411,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -7748,7 +7748,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] From d4946ddfaaa1753d6fb5f94c437ea3f91e623f40 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 15 Sep 2023 22:24:43 +0800 Subject: [PATCH 099/407] ci: move solana-cargo-build-sbf to nextest (#33257) --- .buildkite/scripts/build-stable.sh | 2 +- ci/stable/run-partition.sh | 45 +++++++----------------------- nextest.toml | 4 +++ 3 files changed, 15 insertions(+), 36 deletions(-) diff --git a/.buildkite/scripts/build-stable.sh b/.buildkite/scripts/build-stable.sh index d7104eeae47130..e1d774e1669ab8 100755 --- a/.buildkite/scripts/build-stable.sh +++ b/.buildkite/scripts/build-stable.sh @@ -15,7 +15,7 @@ partitions=$( "command": ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/stable/run-partition.sh", "timeout_in_minutes": 30, "agent": "$agent", - "parallelism": 3, + "parallelism": 2, "retry": 3 } EOF diff --git a/ci/stable/run-partition.sh b/ci/stable/run-partition.sh index e9d6bb3269ec19..cb9fe8575ef66f 100755 --- a/ci/stable/run-partition.sh +++ b/ci/stable/run-partition.sh @@ -20,45 +20,20 @@ INDEX=${1:-"$BUILDKITE_PARALLEL_JOB"} # BUILDKITE_PARALLEL_JOB from 0 to (BUILDK LIMIT=${2:-"$BUILDKITE_PARALLEL_JOB_COUNT"} : "${LIMIT:?}" -if [ "$LIMIT" -lt 2 ]; then - echo "LIMIT(\$2) should >= 2" - exit 1 -fi - if [ ! "$LIMIT" -gt "$INDEX" ]; then echo "LIMIT(\$2) should greater than INDEX(\$1)" exit 1 fi -DONT_USE_NEXTEST_PACKAGES=( - solana-cargo-build-sbf +ARGS=( + --profile ci + --config-file ./nextest.toml + --workspace + --tests + --jobs "$JOBS" + --partition hash:"$((INDEX + 1))/$LIMIT" + --verbose + --exclude solana-local-cluster ) -if [ "$INDEX" -eq "$((LIMIT - 1))" ]; then - ARGS=( - --jobs "$JOBS" - --tests - --verbose - ) - for package in "${DONT_USE_NEXTEST_PACKAGES[@]}"; do - ARGS+=(-p "$package") - done - - _ cargo test "${ARGS[@]}" -else - ARGS=( - --profile ci - --config-file ./nextest.toml - --workspace - --tests - --jobs "$JOBS" - --partition hash:"$((INDEX + 1))/$((LIMIT - 1))" - --verbose - --exclude solana-local-cluster - ) - for package in "${DONT_USE_NEXTEST_PACKAGES[@]}"; do - ARGS+=(--exclude "$package") - done - - _ cargo nextest run "${ARGS[@]}" -fi +_ cargo nextest run "${ARGS[@]}" diff --git a/nextest.toml b/nextest.toml index 083c1c74cf9719..33dba724c027af 100644 --- a/nextest.toml +++ b/nextest.toml @@ -23,3 +23,7 @@ threads-required = "num-cpus" [[profile.ci.overrides]] filter = "package(solana-gossip) & test(/^cluster_info::tests::new_with_external_ip_test_random/)" threads-required = "num-cpus" + +[[profile.ci.overrides]] +filter = "package(solana-cargo-build-sbf)" +threads-required = "num-cpus" From 4c42413c1f72ef167c63aa9c961bee9183d94782 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 15 Sep 2023 11:09:01 -0400 Subject: [PATCH 100/407] Refactors out `unsafe` from MmapAccountHashesFile::read() (#33266) --- accounts-db/src/accounts_hash.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 30b38714896ec9..c9fb1aae2c5ac9 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -42,12 +42,9 @@ impl MmapAccountHashesFile { /// return a slice of account hashes starting at 'index' fn read(&self, index: usize) -> &[Hash] { let start = std::mem::size_of::() * index; - let item_slice: &[u8] = &self.mmap[start..self.count * std::mem::size_of::()]; - let remaining_elements = item_slice.len() / std::mem::size_of::(); - unsafe { - let item = item_slice.as_ptr() as *const Hash; - std::slice::from_raw_parts(item, remaining_elements) - } + let end = std::mem::size_of::() * self.count; + let bytes = &self.mmap[start..end]; + bytemuck::cast_slice(bytes) } /// write a hash to the end of mmap file. From f77b3d938921cc51e617768526a89153c55a3e90 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 15 Sep 2023 12:47:38 -0400 Subject: [PATCH 101/407] Derives Pod for CalculateHashIntermediate (#33267) --- accounts-db/src/accounts_db.rs | 38 +++++++++++++++--------------- accounts-db/src/accounts_hash.rs | 24 ++++++++++++------- accounts-db/src/cache_hash_data.rs | 4 ++-- 3 files changed, 37 insertions(+), 29 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 7316d96d9d1615..14294d73a9db6e 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -10667,10 +10667,10 @@ pub mod tests { ) .unwrap(); let mut expected = vec![Vec::new(); bins]; - expected[0].push(raw_expected[0].clone()); - expected[0].push(raw_expected[1].clone()); - expected[bins - 1].push(raw_expected[2].clone()); - expected[bins - 1].push(raw_expected[3].clone()); + expected[0].push(raw_expected[0]); + expected[0].push(raw_expected[1]); + expected[bins - 1].push(raw_expected[2]); + expected[bins - 1].push(raw_expected[3]); assert_scan(result, vec![expected], bins, 0, bins); let bins = 4; @@ -10689,10 +10689,10 @@ pub mod tests { ) .unwrap(); let mut expected = vec![Vec::new(); bins]; - expected[0].push(raw_expected[0].clone()); - expected[1].push(raw_expected[1].clone()); - expected[2].push(raw_expected[2].clone()); - expected[bins - 1].push(raw_expected[3].clone()); + expected[0].push(raw_expected[0]); + expected[1].push(raw_expected[1]); + expected[2].push(raw_expected[2]); + expected[bins - 1].push(raw_expected[3]); assert_scan(result, vec![expected], bins, 0, bins); let bins = 256; @@ -10711,10 +10711,10 @@ pub mod tests { ) .unwrap(); let mut expected = vec![Vec::new(); bins]; - expected[0].push(raw_expected[0].clone()); - expected[127].push(raw_expected[1].clone()); - expected[128].push(raw_expected[2].clone()); - expected[bins - 1].push(raw_expected.last().unwrap().clone()); + expected[0].push(raw_expected[0]); + expected[127].push(raw_expected[1]); + expected[128].push(raw_expected[2]); + expected[bins - 1].push(*raw_expected.last().unwrap()); assert_scan(result, vec![expected], bins, 0, bins); } @@ -10773,8 +10773,8 @@ pub mod tests { ) .unwrap(); let mut expected = vec![Vec::new(); half_bins]; - expected[0].push(raw_expected[0].clone()); - expected[0].push(raw_expected[1].clone()); + expected[0].push(raw_expected[0]); + expected[0].push(raw_expected[1]); assert_scan(result, vec![expected], bins, 0, half_bins); // just the second bin of 2 @@ -10795,8 +10795,8 @@ pub mod tests { let mut expected = vec![Vec::new(); half_bins]; let starting_bin_index = 0; - expected[starting_bin_index].push(raw_expected[2].clone()); - expected[starting_bin_index].push(raw_expected[3].clone()); + expected[starting_bin_index].push(raw_expected[2]); + expected[starting_bin_index].push(raw_expected[3]); assert_scan(result, vec![expected], bins, 1, bins - 1); // 1 bin at a time of 4 @@ -10818,7 +10818,7 @@ pub mod tests { ) .unwrap(); let mut expected = vec![Vec::new(); 1]; - expected[0].push(expected_item.clone()); + expected[0].push(*expected_item); assert_scan(result, vec![expected], bins, bin, 1); } @@ -10843,7 +10843,7 @@ pub mod tests { let mut expected = vec![]; if let Some(index) = bin_locations.iter().position(|&r| r == bin) { expected = vec![Vec::new(); range]; - expected[0].push(raw_expected[index].clone()); + expected[0].push(raw_expected[index]); } let mut result2 = (0..range).map(|_| Vec::default()).collect::>(); if let Some(m) = result.get(0) { @@ -10888,7 +10888,7 @@ pub mod tests { .unwrap(); assert_eq!(result.len(), 1); // 2 chunks, but 1 is empty so not included let mut expected = vec![Vec::new(); range]; - expected[0].push(raw_expected[1].clone()); + expected[0].push(raw_expected[1]); let mut result2 = (0..range).map(|_| Vec::default()).collect::>(); result[0].load_all(&mut result2, 0, &PubkeyBinCalculator24::new(range)); assert_eq!(result2.len(), 1); diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index c9fb1aae2c5ac9..77bdc31601f625 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -6,6 +6,7 @@ use { pubkey_bins::PubkeyBinCalculator24, rent_collector::RentCollector, }, + bytemuck::{Pod, Zeroable}, log::*, memmap2::MmapMut, rayon::prelude::*, @@ -277,13 +278,20 @@ impl HashStats { /// Note this can be saved/loaded during hash calculation to a memory mapped file whose contents are /// [CalculateHashIntermediate] #[repr(C)] -#[derive(Default, Debug, PartialEq, Eq, Clone)] +#[derive(Default, Debug, PartialEq, Eq, Clone, Copy, Pod, Zeroable)] pub struct CalculateHashIntermediate { pub hash: Hash, pub lamports: u64, pub pubkey: Pubkey, } +// In order to safely guarantee CalculateHashIntermediate is Pod, it cannot have any padding +const _: () = assert!( + std::mem::size_of::() + == std::mem::size_of::() + std::mem::size_of::() + std::mem::size_of::(), + "CalculateHashIntermediate cannot have any padding" +); + #[derive(Default, Debug, PartialEq, Eq)] pub struct CumulativeOffset { pub index: Vec, @@ -1808,7 +1816,7 @@ mod tests { lamports: 1, pubkey, }; - account_maps.push(val.clone()); + account_maps.push(val); let vecs = vec![account_maps.to_vec()]; let slice = convert_to_slice(&vecs); @@ -1846,19 +1854,19 @@ mod tests { lamports: 1, pubkey: key, }; - account_maps.push(val.clone()); + account_maps.push(val); let val2 = CalculateHashIntermediate { hash, lamports: 2, pubkey: key2, }; - account_maps.push(val2.clone()); + account_maps.push(val2); let val3 = CalculateHashIntermediate { hash, lamports: 3, pubkey: key2, }; - account_maps2.push(val3.clone()); + account_maps2.push(val3); let mut vecs = vec![account_maps.to_vec(), account_maps2.to_vec()]; if reverse { @@ -1896,19 +1904,19 @@ mod tests { lamports: 2, pubkey: key2, }; - account_maps.push(val2.clone()); + account_maps.push(val2); let val = CalculateHashIntermediate { hash, lamports: 1, pubkey: key, }; - account_maps.push(val.clone()); + account_maps.push(val); let val3 = CalculateHashIntermediate { hash, lamports: 3, pubkey: key2, }; - account_maps2.push(val3.clone()); + account_maps2.push(val3); let mut vecs = vec![account_maps.to_vec(), account_maps2.to_vec()]; if reverse { diff --git a/accounts-db/src/cache_hash_data.rs b/accounts-db/src/cache_hash_data.rs index a58bf50d030025..fbf7dce1b5baf5 100644 --- a/accounts-db/src/cache_hash_data.rs +++ b/accounts-db/src/cache_hash_data.rs @@ -120,7 +120,7 @@ impl CacheHashDataFile { "{pubkey_to_bin_index}, {start_bin_index}" ); // this would indicate we put a pubkey in too high of a bin pubkey_to_bin_index -= start_bin_index; - accumulator[pubkey_to_bin_index].push(d.clone()); // may want to avoid clone here + accumulator[pubkey_to_bin_index].push(*d); // may want to avoid copy here } m2.stop(); @@ -348,7 +348,7 @@ impl CacheHashData { x.iter().for_each(|item| { let d = cache_file.get_mut(i as u64); i += 1; - *d = item.clone(); + *d = *item; }) }); assert_eq!(i, entries); From a47f65d882eb0f61273b7ce6470241ed42b3bce2 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 15 Sep 2023 12:49:35 -0400 Subject: [PATCH 102/407] Refactors `unsafe` out of CacheHashDataFile's header (#33270) --- accounts-db/src/cache_hash_data.rs | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/accounts-db/src/cache_hash_data.rs b/accounts-db/src/cache_hash_data.rs index fbf7dce1b5baf5..4010361b57d747 100644 --- a/accounts-db/src/cache_hash_data.rs +++ b/accounts-db/src/cache_hash_data.rs @@ -3,6 +3,7 @@ use crate::pubkey_bins::PubkeyBinCalculator24; use { crate::{accounts_hash::CalculateHashIntermediate, cache_hash_data_stats::CacheHashDataStats}, + bytemuck::{Pod, Zeroable}, memmap2::MmapMut, solana_measure::measure::Measure, std::{ @@ -19,10 +20,19 @@ pub type SavedType = Vec>; pub type SavedTypeSlice = [Vec]; #[repr(C)] +#[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct Header { count: usize, } +// In order to safely guarantee Header is Pod, it cannot have any padding +// This is obvious by inspection, but this will also catch any inadvertent +// changes in the future (i.e. it is a test). +const _: () = assert!( + std::mem::size_of::
() == std::mem::size_of::(), + "Header cannot have any padding" +); + /// cache hash data file to be mmapped later pub(crate) struct CacheHashDataFileReference { file: File, @@ -169,13 +179,8 @@ impl CacheHashDataFile { } fn get_header_mut(&mut self) -> &mut Header { - let start = 0_usize; - let end = start + std::mem::size_of::
(); - let item_slice: &[u8] = &self.mmap[start..end]; - unsafe { - let item = item_slice.as_ptr() as *mut Header; - &mut *item - } + let bytes = &mut self.mmap[..std::mem::size_of::
()]; + bytemuck::from_bytes_mut(bytes) } fn new_map(file: impl AsRef, capacity: u64) -> Result { From 6283c1d568575e20b1a6074932f0253b40438473 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 15 Sep 2023 14:25:43 -0400 Subject: [PATCH 103/407] Refactors out `unsafe` from cache_hash_data.rs (#33271) --- accounts-db/src/cache_hash_data.rs | 40 +++++++++++------------------- 1 file changed, 14 insertions(+), 26 deletions(-) diff --git a/accounts-db/src/cache_hash_data.rs b/accounts-db/src/cache_hash_data.rs index 4010361b57d747..630d650b36f2b9 100644 --- a/accounts-db/src/cache_hash_data.rs +++ b/accounts-db/src/cache_hash_data.rs @@ -138,22 +138,25 @@ impl CacheHashDataFile { /// get '&mut EntryType' from cache file [ix] fn get_mut(&mut self, ix: u64) -> &mut EntryType { - let item_slice = self.get_slice_internal(ix); - unsafe { - let item = item_slice.as_ptr() as *mut EntryType; - &mut *item - } + let start = self.get_element_offset_byte(ix); + let end = start + std::mem::size_of::(); + assert!( + end <= self.capacity as usize, + "end: {end}, capacity: {}, ix: {ix}, cell size: {}", + self.capacity, + self.cell_size, + ); + let bytes = &mut self.mmap[start..end]; + bytemuck::from_bytes_mut(bytes) } /// get '&[EntryType]' from cache file [ix..] fn get_slice(&self, ix: u64) -> &[EntryType] { let start = self.get_element_offset_byte(ix); - let item_slice: &[u8] = &self.mmap[start..]; - let remaining_elements = item_slice.len() / std::mem::size_of::(); - unsafe { - let item = item_slice.as_ptr() as *const EntryType; - std::slice::from_raw_parts(item, remaining_elements) - } + let bytes = &self.mmap[start..]; + // the `bytes` slice *must* contain whole `EntryType`s + debug_assert_eq!(bytes.len() % std::mem::size_of::(), 0); + bytemuck::cast_slice(bytes) } /// return byte offset of entry 'ix' into a slice which contains a header and at least ix elements @@ -163,21 +166,6 @@ impl CacheHashDataFile { start } - /// get the bytes representing cache file [ix] - fn get_slice_internal(&self, ix: u64) -> &[u8] { - let start = self.get_element_offset_byte(ix); - let end = start + std::mem::size_of::(); - assert!( - end <= self.capacity as usize, - "end: {}, capacity: {}, ix: {}, cell size: {}", - end, - self.capacity, - ix, - self.cell_size - ); - &self.mmap[start..end] - } - fn get_header_mut(&mut self) -> &mut Header { let bytes = &mut self.mmap[..std::mem::size_of::
()]; bytemuck::from_bytes_mut(bytes) From 6300a43f5caf763898b7ef717c43c9c72ae5b986 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Sat, 16 Sep 2023 13:11:47 -0700 Subject: [PATCH 104/407] Set loader-v4 program deployment slot at actual deployment (#33278) --- programs/loader-v4/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index 312c14f7acf888..39e897270f9254 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -341,7 +341,7 @@ pub fn process_instruction_truncate( )?; if is_initialization { let state = get_state_mut(program.get_data_mut()?)?; - state.slot = invoke_context.get_sysvar_cache().get_clock()?.slot; + state.slot = 0; state.status = LoaderV4Status::Retracted; state.authority_address = *authority_address; } From 6db57f81db2c56e2db28e34bf6bc5a206c807db3 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Sat, 16 Sep 2023 13:12:27 -0700 Subject: [PATCH 105/407] Fix lamport calculation and transfer for loader-v4 program upgrade (#33279) --- programs/loader-v4/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index 39e897270f9254..3ea4f60f70d32e 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -429,8 +429,8 @@ pub fn process_instruction_deploy( load_program_metrics.submit_datapoint(&mut invoke_context.timings); if let Some(mut source_program) = source_program { let rent = invoke_context.get_sysvar_cache().get_rent()?; - let required_lamports = rent.minimum_balance(program.get_data().len()); - let transfer_lamports = program.get_lamports().saturating_sub(required_lamports); + let required_lamports = rent.minimum_balance(source_program.get_data().len()); + let transfer_lamports = required_lamports.saturating_sub(program.get_lamports()); program.set_data_from_slice(source_program.get_data())?; source_program.set_data_length(0)?; source_program.checked_sub_lamports(transfer_lamports)?; From 694e7b94564280cfd010e0f72494778734f4507d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 12:54:52 +0000 Subject: [PATCH 106/407] build(deps): bump chrono from 0.4.30 to 0.4.31 (#33284) * build(deps): bump chrono from 0.4.30 to 0.4.31 Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.30 to 0.4.31. - [Release notes](https://github.com/chronotope/chrono/releases) - [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md) - [Commits](https://github.com/chronotope/chrono/compare/v0.4.30...v0.4.31) --- updated-dependencies: - dependency-name: chrono dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 50876eb549cca5..b02dbf44c5afee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1017,9 +1017,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.30" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defd4e7873dbddba6c7c91e199c7fcb946abc4a6a4ac3195400bcfb01b5de877" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" dependencies = [ "android-tzdata", "iana-time-zone", diff --git a/Cargo.toml b/Cargo.toml index e1da505b587049..cd02cca5a429ce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -163,7 +163,7 @@ bzip2 = "0.4.4" caps = "0.5.5" cargo_metadata = "0.15.4" cc = "1.0.83" -chrono = { version = "0.4.30", default-features = false } +chrono = { version = "0.4.31", default-features = false } chrono-humanize = "0.2.3" clap = "2.33.1" console = "0.15.7" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 471e5c9123e381..a4e0eb619ef873 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -901,9 +901,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.30" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defd4e7873dbddba6c7c91e199c7fcb946abc4a6a4ac3195400bcfb01b5de877" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" dependencies = [ "android-tzdata", "iana-time-zone", From f20466d27877cd0203af7b24111f1471af2c8342 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 12:55:15 +0000 Subject: [PATCH 107/407] build(deps): bump syn from 2.0.33 to 2.0.37 (#33286) * build(deps): bump syn from 2.0.33 to 2.0.37 Bumps [syn](https://github.com/dtolnay/syn) from 2.0.33 to 2.0.37. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.33...2.0.37) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 44 ++++++++++++++++++++--------------------- programs/sbf/Cargo.lock | 42 +++++++++++++++++++-------------------- 2 files changed, 43 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b02dbf44c5afee..e82b348a7f706e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -436,7 +436,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -590,7 +590,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -1496,7 +1496,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -1507,7 +1507,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -1699,7 +1699,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -1799,7 +1799,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -2074,7 +2074,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -3391,7 +3391,7 @@ dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -3898,7 +3898,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -4061,7 +4061,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -4749,7 +4749,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -4794,7 +4794,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -4844,7 +4844,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -5955,7 +5955,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -6962,7 +6962,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -7705,9 +7705,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.33" +version = "2.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9caece70c63bfba29ec2fed841a09851b14a235c60010fa4de58089b6c025668" +checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" dependencies = [ "proc-macro2", "quote", @@ -7910,7 +7910,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -8049,7 +8049,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -8540,7 +8540,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", "wasm-bindgen-shared", ] @@ -8574,7 +8574,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8920,7 +8920,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index a4e0eb619ef873..63ed6c10914881 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -410,7 +410,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -564,7 +564,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -1206,7 +1206,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -1217,7 +1217,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -1392,7 +1392,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -1495,7 +1495,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -1744,7 +1744,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -3009,7 +3009,7 @@ dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -3445,7 +3445,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -3580,7 +3580,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -4158,7 +4158,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -4203,7 +4203,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -4959,7 +4959,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -6062,7 +6062,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -6627,9 +6627,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.33" +version = "2.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9caece70c63bfba29ec2fed841a09851b14a235c60010fa4de58089b6c025668" +checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" dependencies = [ "proc-macro2", "quote", @@ -6777,7 +6777,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -6900,7 +6900,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -7377,7 +7377,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", "wasm-bindgen-shared", ] @@ -7411,7 +7411,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -7748,7 +7748,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] From 196a3540935b6948add5d6ca3708462a71fde2aa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 13:26:04 +0000 Subject: [PATCH 108/407] build(deps): bump test-case from 3.1.0 to 3.2.1 (#33285) Bumps [test-case](https://github.com/frondeus/test-case) from 3.1.0 to 3.2.1. - [Release notes](https://github.com/frondeus/test-case/releases) - [Changelog](https://github.com/frondeus/test-case/blob/master/CHANGELOG.md) - [Commits](https://github.com/frondeus/test-case/compare/v3.1.0...v3.2.1) --- updated-dependencies: - dependency-name: test-case dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e82b348a7f706e..a479d9124e2d49 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7845,36 +7845,36 @@ checksum = "13a4ec180a2de59b57434704ccfad967f789b12737738798fa08798cd5824c16" [[package]] name = "test-case" -version = "3.1.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a1d6e7bde536b0412f20765b76e921028059adfd1b90d8974d33fd3c91b25df" +checksum = "c8f1e820b7f1d95a0cdbf97a5df9de10e1be731983ab943e56703ac1b8e9d425" dependencies = [ "test-case-macros", ] [[package]] name = "test-case-core" -version = "3.1.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d10394d5d1e27794f772b6fc854c7e91a2dc26e2cbf807ad523370c2a59c0cee" +checksum = "54c25e2cb8f5fcd7318157634e8838aa6f7e4715c96637f969fabaccd1ef5462" dependencies = [ "cfg-if 1.0.0", "proc-macro-error", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.33", ] [[package]] name = "test-case-macros" -version = "3.1.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeb9a44b1c6a54c1ba58b152797739dba2a83ca74e18168a68c980eb142f9404" +checksum = "37cfd7bbc88a0104e304229fba519bdc45501a30b760fb72240342f1289ad257" dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.33", "test-case-core", ] diff --git a/Cargo.toml b/Cargo.toml index cd02cca5a429ce..3653d88c1507fb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -389,7 +389,7 @@ systemstat = "0.2.3" tar = "0.4.40" tarpc = "0.29.0" tempfile = "3.8.0" -test-case = "3.1.0" +test-case = "3.2.1" thiserror = "1.0.48" tiny-bip39 = "0.8.2" tokio = "1.29.1" From 5dbc19ccbf3e73df44f96ca185d121358aefcd2d Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Mon, 18 Sep 2023 06:43:33 -0700 Subject: [PATCH 109/407] Processors to compile and send LoaderV4 CLI commands (#33228) * Processors to compile and send LoaderV4 CLI commands * suppress unused code warning * clippy fixes * redeploy program using source buffer * unify deploy and redeploy to a single function * clippy fixes * fixes after testing the CLI frontend --- Cargo.lock | 1 + cli/Cargo.toml | 1 + cli/src/lib.rs | 1 + cli/src/program.rs | 2 +- cli/src/program_v4.rs | 1004 ++++++++++++++++++++++++++++++++++ sdk/program/src/loader_v4.rs | 29 +- 6 files changed, 1028 insertions(+), 10 deletions(-) create mode 100644 cli/src/program_v4.rs diff --git a/Cargo.lock b/Cargo.lock index a479d9124e2d49..5c214c9c2b1cba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5547,6 +5547,7 @@ dependencies = [ "solana-client", "solana-config-program", "solana-faucet", + "solana-loader-v4-program", "solana-logger", "solana-program-runtime", "solana-pubsub-client", diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 9879b06218c61a..01d773ff9eaa4c 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -36,6 +36,7 @@ solana-cli-output = { workspace = true } solana-client = { workspace = true } solana-config-program = { workspace = true } solana-faucet = { workspace = true } +solana-loader-v4-program = { workspace = true } solana-logger = { workspace = true } solana-program-runtime = { workspace = true } solana-pubsub-client = { workspace = true } diff --git a/cli/src/lib.rs b/cli/src/lib.rs index e55b14a85424ed..e4e925b5872f1b 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -34,6 +34,7 @@ pub mod inflation; pub mod memo; pub mod nonce; pub mod program; +pub mod program_v4; pub mod spend_utils; pub mod stake; pub mod test_utils; diff --git a/cli/src/program.rs b/cli/src/program.rs index 81f6b3a3f57f00..1c6bf988615add 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -1719,7 +1719,7 @@ fn process_close( } } -fn calculate_max_chunk_size(create_msg: &F) -> usize +pub fn calculate_max_chunk_size(create_msg: &F) -> usize where F: Fn(u32, Vec) -> Message, { diff --git a/cli/src/program_v4.rs b/cli/src/program_v4.rs new file mode 100644 index 00000000000000..bb1a844da2771a --- /dev/null +++ b/cli/src/program_v4.rs @@ -0,0 +1,1004 @@ +use { + crate::{ + checks::*, + cli::{log_instruction_custom_error, CliConfig, ProcessResult}, + program::calculate_max_chunk_size, + }, + log::*, + solana_cli_output::CliProgramId, + solana_client::{ + connection_cache::ConnectionCache, + send_and_confirm_transactions_in_parallel::{ + send_and_confirm_transactions_in_parallel_blocking, SendAndConfirmConfig, + }, + tpu_client::{TpuClient, TpuClientConfig}, + }, + solana_rpc_client::rpc_client::RpcClient, + solana_rpc_client_api::config::RpcSendTransactionConfig, + solana_sdk::{ + account::Account, + hash::Hash, + instruction::Instruction, + loader_v4::{ + self, LoaderV4State, + LoaderV4Status::{self, Retracted}, + }, + message::Message, + pubkey::Pubkey, + signature::Signer, + system_instruction::{self, SystemError}, + transaction::Transaction, + }, + std::{cmp::Ordering, sync::Arc}, +}; + +// This function can be used for the following use-cases +// * Deploy a program +// - buffer_signer argument must contain program signer information +// (program_address must be same as buffer_signer.pubkey()) +// * Redeploy a program using original program account +// - buffer_signer argument must be None +// * Redeploy a program using a buffer account +// - buffer_signer argument must contain the temporary buffer account information +// (program_address must contain program ID and must NOT be same as buffer_signer.pubkey()) +#[allow(dead_code)] +fn process_deploy_program( + rpc_client: Arc, + config: &CliConfig, + program_data: &[u8], + program_data_len: u32, + program_address: &Pubkey, + buffer_signer: Option<&dyn Signer>, + authority_signer: &dyn Signer, +) -> ProcessResult { + let blockhash = rpc_client.get_latest_blockhash()?; + let payer_pubkey = config.signers[0].pubkey(); + + let (initial_messages, balance_needed, buffer_address) = + if let Some(buffer_signer) = buffer_signer { + let buffer_address = buffer_signer.pubkey(); + let (create_buffer_message, required_lamports) = build_create_buffer_message( + rpc_client.clone(), + config, + program_address, + &buffer_address, + &payer_pubkey, + &authority_signer.pubkey(), + program_data_len, + &blockhash, + )?; + + if let Some(message) = create_buffer_message { + (vec![message], required_lamports, buffer_address) + } else { + (vec![], 0, buffer_address) + } + } else { + build_retract_and_truncate_messages( + rpc_client.clone(), + config, + program_data_len, + program_address, + authority_signer, + ) + .map(|(messages, balance_needed)| (messages, balance_needed, *program_address))? + }; + + // Create and add write messages + let create_msg = |offset: u32, bytes: Vec| { + let instruction = + loader_v4::write(&buffer_address, &authority_signer.pubkey(), offset, bytes); + Message::new_with_blockhash(&[instruction], Some(&payer_pubkey), &blockhash) + }; + + let mut write_messages = vec![]; + let chunk_size = calculate_max_chunk_size(&create_msg); + for (chunk, i) in program_data.chunks(chunk_size).zip(0..) { + write_messages.push(create_msg((i * chunk_size) as u32, chunk.to_vec())); + } + + let final_messages = if *program_address != buffer_address { + build_retract_and_deploy_messages( + rpc_client.clone(), + config, + program_address, + &buffer_address, + authority_signer, + )? + } else { + // Create and add deploy message + vec![Message::new_with_blockhash( + &[loader_v4::deploy( + program_address, + &authority_signer.pubkey(), + )], + Some(&payer_pubkey), + &blockhash, + )] + }; + + check_payer( + &rpc_client, + config, + balance_needed, + &initial_messages, + &write_messages, + &final_messages, + )?; + + send_messages( + rpc_client, + config, + &initial_messages, + &write_messages, + &final_messages, + buffer_signer, + authority_signer, + )?; + + let program_id = CliProgramId { + program_id: program_address.to_string(), + }; + Ok(config.output_format.formatted_string(&program_id)) +} + +#[allow(dead_code)] +fn process_undeploy_program( + rpc_client: Arc, + config: &CliConfig, + program_address: &Pubkey, + authority_signer: &dyn Signer, +) -> ProcessResult { + let blockhash = rpc_client.get_latest_blockhash()?; + let payer_pubkey = config.signers[0].pubkey(); + + let Some(program_account) = rpc_client + .get_account_with_commitment(program_address, config.commitment)? + .value + else { + return Err("Program account does not exist".into()); + }; + + let retract_instruction = build_retract_instruction( + &program_account, + program_address, + &authority_signer.pubkey(), + )?; + + let mut initial_messages = if let Some(instruction) = retract_instruction { + vec![Message::new_with_blockhash( + &[instruction], + Some(&payer_pubkey), + &blockhash, + )] + } else { + vec![] + }; + + let truncate_instruction = loader_v4::truncate( + program_address, + &authority_signer.pubkey(), + 0, + &payer_pubkey, + ); + + initial_messages.push(Message::new_with_blockhash( + &[truncate_instruction], + Some(&payer_pubkey), + &blockhash, + )); + + check_payer(&rpc_client, config, 0, &initial_messages, &[], &[])?; + + send_messages( + rpc_client, + config, + &initial_messages, + &[], + &[], + None, + authority_signer, + )?; + + let program_id = CliProgramId { + program_id: program_address.to_string(), + }; + Ok(config.output_format.formatted_string(&program_id)) +} + +#[allow(dead_code)] +fn process_finalize_program( + rpc_client: Arc, + config: &CliConfig, + program_address: &Pubkey, + authority_signer: &dyn Signer, +) -> ProcessResult { + let blockhash = rpc_client.get_latest_blockhash()?; + let payer_pubkey = config.signers[0].pubkey(); + + let message = [Message::new_with_blockhash( + &[loader_v4::transfer_authority( + program_address, + &authority_signer.pubkey(), + None, + )], + Some(&payer_pubkey), + &blockhash, + )]; + check_payer(&rpc_client, config, 0, &message, &[], &[])?; + + send_messages( + rpc_client, + config, + &message, + &[], + &[], + None, + authority_signer, + )?; + + let program_id = CliProgramId { + program_id: program_address.to_string(), + }; + Ok(config.output_format.formatted_string(&program_id)) +} + +#[allow(dead_code)] +fn check_payer( + rpc_client: &RpcClient, + config: &CliConfig, + balance_needed: u64, + initial_messages: &[Message], + write_messages: &[Message], + other_messages: &[Message], +) -> Result<(), Box> { + let mut fee = 0; + for message in initial_messages { + fee += rpc_client.get_fee_for_message(message)?; + } + for message in other_messages { + fee += rpc_client.get_fee_for_message(message)?; + } + if !write_messages.is_empty() { + // Assume all write messages cost the same + if let Some(message) = write_messages.get(0) { + fee += rpc_client.get_fee_for_message(message)? * (write_messages.len() as u64); + } + } + check_account_for_spend_and_fee_with_commitment( + rpc_client, + &config.signers[0].pubkey(), + balance_needed, + fee, + config.commitment, + )?; + Ok(()) +} + +#[allow(dead_code)] +fn send_messages( + rpc_client: Arc, + config: &CliConfig, + initial_messages: &[Message], + write_messages: &[Message], + final_messages: &[Message], + program_signer: Option<&dyn Signer>, + authority_signer: &dyn Signer, +) -> Result<(), Box> { + let payer_signer = config.signers[0]; + + for message in initial_messages { + if message.header.num_required_signatures == 3 { + // The initial message that creates the account and truncates it to the required size requires + // 3 signatures (payer, program, and authority). + if let Some(initial_signer) = program_signer { + let blockhash = rpc_client.get_latest_blockhash()?; + + let mut initial_transaction = Transaction::new_unsigned(message.clone()); + initial_transaction + .try_sign(&[payer_signer, initial_signer, authority_signer], blockhash)?; + let result = + rpc_client.send_and_confirm_transaction_with_spinner(&initial_transaction); + log_instruction_custom_error::(result, config) + .map_err(|err| format!("Account allocation failed: {err}"))?; + } else { + return Err("Buffer account not created yet, must provide a key pair".into()); + } + } else if message.header.num_required_signatures == 2 { + // All other messages should require 2 signatures (payer, and authority) + let blockhash = rpc_client.get_latest_blockhash()?; + + let mut initial_transaction = Transaction::new_unsigned(message.clone()); + initial_transaction.try_sign(&[payer_signer, authority_signer], blockhash)?; + let result = rpc_client.send_and_confirm_transaction_with_spinner(&initial_transaction); + log_instruction_custom_error::(result, config) + .map_err(|err| format!("Failed to send initial message: {err}"))?; + } else { + return Err("Initial message requires incorrect number of signatures".into()); + } + } + + if !write_messages.is_empty() { + trace!("Writing program data"); + let connection_cache = if config.use_quic { + ConnectionCache::new_quic("connection_cache_cli_program_v4_quic", 1) + } else { + ConnectionCache::with_udp("connection_cache_cli_program_v4_udp", 1) + }; + let transaction_errors = match connection_cache { + ConnectionCache::Udp(cache) => TpuClient::new_with_connection_cache( + rpc_client.clone(), + &config.websocket_url, + TpuClientConfig::default(), + cache, + )? + .send_and_confirm_messages_with_spinner( + write_messages, + &[payer_signer, authority_signer], + ), + ConnectionCache::Quic(cache) => { + let tpu_client_fut = + solana_client::nonblocking::tpu_client::TpuClient::new_with_connection_cache( + rpc_client.get_inner_client().clone(), + config.websocket_url.as_str(), + solana_client::tpu_client::TpuClientConfig::default(), + cache, + ); + let tpu_client = rpc_client + .runtime() + .block_on(tpu_client_fut) + .expect("Should return a valid tpu client"); + + send_and_confirm_transactions_in_parallel_blocking( + rpc_client.clone(), + Some(tpu_client), + write_messages, + &[payer_signer, authority_signer], + SendAndConfirmConfig { + resign_txs_count: Some(5), + with_spinner: true, + }, + ) + } + } + .map_err(|err| format!("Data writes to account failed: {err}"))? + .into_iter() + .flatten() + .collect::>(); + + if !transaction_errors.is_empty() { + for transaction_error in &transaction_errors { + error!("{:?}", transaction_error); + } + return Err(format!("{} write transactions failed", transaction_errors.len()).into()); + } + } + + for message in final_messages { + let blockhash = rpc_client.get_latest_blockhash()?; + let mut final_tx = Transaction::new_unsigned(message.clone()); + final_tx.try_sign(&[payer_signer, authority_signer], blockhash)?; + rpc_client + .send_and_confirm_transaction_with_spinner_and_config( + &final_tx, + config.commitment, + RpcSendTransactionConfig { + skip_preflight: true, + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ) + .map_err(|e| format!("Deploying program failed: {e}"))?; + } + + Ok(()) +} + +#[allow(dead_code)] +fn build_create_buffer_message( + rpc_client: Arc, + config: &CliConfig, + program_address: &Pubkey, + buffer_address: &Pubkey, + payer_address: &Pubkey, + authority: &Pubkey, + program_data_length: u32, + blockhash: &Hash, +) -> Result<(Option, u64), Box> { + let expected_account_data_len = + LoaderV4State::program_data_offset().saturating_add(program_data_length as usize); + let lamports_required = + rpc_client.get_minimum_balance_for_rent_exemption(expected_account_data_len)?; + + if let Some(account) = rpc_client + .get_account_with_commitment(buffer_address, config.commitment)? + .value + { + if !loader_v4::check_id(&account.owner) { + return Err("Buffer account passed is already in use by another program".into()); + } + + if account.lamports < lamports_required || account.data.len() != expected_account_data_len { + if program_address == buffer_address { + return Err("Buffer account passed could be for a different deploy? It has different size/lamports".into()); + } + + let (truncate_instructions, balance_needed) = build_truncate_instructions( + rpc_client.clone(), + payer_address, + &account, + buffer_address, + authority, + program_data_length, + )?; + if !truncate_instructions.is_empty() { + Ok(( + Some(Message::new_with_blockhash( + &truncate_instructions, + Some(payer_address), + blockhash, + )), + balance_needed, + )) + } else { + Ok((None, 0)) + } + } else { + Ok((None, 0)) + } + } else { + Ok(( + Some(Message::new_with_blockhash( + &loader_v4::create_buffer( + payer_address, + buffer_address, + lamports_required, + authority, + program_data_length, + payer_address, + ), + Some(payer_address), + blockhash, + )), + lamports_required, + )) + } +} + +fn build_retract_and_truncate_messages( + rpc_client: Arc, + config: &CliConfig, + program_data_len: u32, + program_address: &Pubkey, + authority_signer: &dyn Signer, +) -> Result<(Vec, u64), Box> { + let payer_pubkey = config.signers[0].pubkey(); + let blockhash = rpc_client.get_latest_blockhash()?; + let Some(program_account) = rpc_client + .get_account_with_commitment(program_address, config.commitment)? + .value + else { + return Err("Program account does not exist".into()); + }; + + let retract_instruction = build_retract_instruction( + &program_account, + program_address, + &authority_signer.pubkey(), + )?; + + let mut messages = if let Some(instruction) = retract_instruction { + vec![Message::new_with_blockhash( + &[instruction], + Some(&payer_pubkey), + &blockhash, + )] + } else { + vec![] + }; + + let (truncate_instructions, balance_needed) = build_truncate_instructions( + rpc_client.clone(), + &payer_pubkey, + &program_account, + program_address, + &authority_signer.pubkey(), + program_data_len, + )?; + + if !truncate_instructions.is_empty() { + messages.push(Message::new_with_blockhash( + &truncate_instructions, + Some(&payer_pubkey), + &blockhash, + )); + } + + Ok((messages, balance_needed)) +} + +fn build_retract_and_deploy_messages( + rpc_client: Arc, + config: &CliConfig, + program_address: &Pubkey, + buffer_address: &Pubkey, + authority_signer: &dyn Signer, +) -> Result, Box> { + let blockhash = rpc_client.get_latest_blockhash()?; + let payer_pubkey = config.signers[0].pubkey(); + + let Some(program_account) = rpc_client + .get_account_with_commitment(program_address, config.commitment)? + .value + else { + return Err("Program account does not exist".into()); + }; + + let retract_instruction = build_retract_instruction( + &program_account, + program_address, + &authority_signer.pubkey(), + )?; + + let mut messages = if let Some(instruction) = retract_instruction { + vec![Message::new_with_blockhash( + &[instruction], + Some(&payer_pubkey), + &blockhash, + )] + } else { + vec![] + }; + + // Create and add deploy message + messages.push(Message::new_with_blockhash( + &[loader_v4::deploy_from_source( + program_address, + &authority_signer.pubkey(), + buffer_address, + )], + Some(&payer_pubkey), + &blockhash, + )); + Ok(messages) +} + +#[allow(dead_code)] +fn build_retract_instruction( + account: &Account, + buffer_address: &Pubkey, + authority: &Pubkey, +) -> Result, Box> { + if !loader_v4::check_id(&account.owner) { + return Err("Buffer account passed is already in use by another program".into()); + } + + if let Ok(LoaderV4State { + slot: _, + authority_address, + status, + }) = solana_loader_v4_program::get_state(&account.data) + { + if authority != authority_address { + return Err( + "Program authority does not match with the provided authority address".into(), + ); + } + + match status { + Retracted => Ok(None), + LoaderV4Status::Deployed => Ok(Some(loader_v4::retract(buffer_address, authority))), + LoaderV4Status::Finalized => Err("Program is immutable".into()), + } + } else { + Err("Program account's state could not be deserialized".into()) + } +} + +#[allow(dead_code)] +fn build_truncate_instructions( + rpc_client: Arc, + payer: &Pubkey, + account: &Account, + buffer_address: &Pubkey, + authority: &Pubkey, + program_data_length: u32, +) -> Result<(Vec, u64), Box> { + if !loader_v4::check_id(&account.owner) { + return Err("Buffer account passed is already in use by another program".into()); + } + + let truncate_instruction = if account.data.is_empty() { + loader_v4::truncate_uninitialized(buffer_address, authority, program_data_length, payer) + } else { + if let Ok(LoaderV4State { + slot: _, + authority_address, + status, + }) = solana_loader_v4_program::get_state(&account.data) + { + if authority != authority_address { + return Err( + "Program authority does not match with the provided authority address".into(), + ); + } + + if matches!(status, LoaderV4Status::Finalized) { + return Err("Program is immutable and it cannot be truncated".into()); + } + } else { + return Err("Program account's state could not be deserialized".into()); + } + + loader_v4::truncate(buffer_address, authority, program_data_length, payer) + }; + + let expected_account_data_len = + LoaderV4State::program_data_offset().saturating_add(program_data_length as usize); + + let lamports_required = + rpc_client.get_minimum_balance_for_rent_exemption(expected_account_data_len)?; + + match account.data.len().cmp(&expected_account_data_len) { + Ordering::Less => { + if account.lamports < lamports_required { + let extra_lamports_required = lamports_required.saturating_sub(account.lamports); + Ok(( + vec![ + system_instruction::transfer( + payer, + buffer_address, + extra_lamports_required, + ), + truncate_instruction, + ], + extra_lamports_required, + )) + } else { + Ok((vec![truncate_instruction], 0)) + } + } + Ordering::Equal => { + if account.lamports < lamports_required { + return Err("Program account has less lamports than required for its size".into()); + } + Ok((vec![], 0)) + } + Ordering::Greater => { + if account.lamports < lamports_required { + return Err("Program account has less lamports than required for its size".into()); + } + Ok((vec![truncate_instruction], 0)) + } + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + serde_json::json, + solana_rpc_client_api::{ + request::RpcRequest, + response::{Response, RpcResponseContext}, + }, + solana_sdk::signature::keypair_from_seed, + std::collections::HashMap, + }; + + fn program_authority() -> solana_sdk::signature::Keypair { + keypair_from_seed(&[3u8; 32]).unwrap() + } + + fn rpc_client_no_existing_program() -> RpcClient { + RpcClient::new_mock("succeeds".to_string()) + } + + fn rpc_client_with_program_data(data: &str, loader_is_owner: bool) -> RpcClient { + let owner = if loader_is_owner { + "LoaderV411111111111111111111111111111111111" + } else { + "Vote111111111111111111111111111111111111111" + }; + let account_info_response = json!(Response { + context: RpcResponseContext { + slot: 1, + api_version: None + }, + value: json!({ + "data": [data, "base64"], + "lamports": 42, + "owner": owner, + "executable": true, + "rentEpoch": 1, + }), + }); + let mut mocks = HashMap::new(); + mocks.insert(RpcRequest::GetAccountInfo, account_info_response); + RpcClient::new_mock_with_mocks("".to_string(), mocks) + } + + fn rpc_client_wrong_account_owner() -> RpcClient { + rpc_client_with_program_data( + "AAAAAAAAAADtSSjGKNHCxurpAziQWZVhKVknOlxj+TY2wUYUrIc30QAAAAAAAAAA", + false, + ) + } + + fn rpc_client_wrong_authority() -> RpcClient { + rpc_client_with_program_data( + "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + true, + ) + } + + fn rpc_client_with_program_retracted() -> RpcClient { + rpc_client_with_program_data( + "AAAAAAAAAADtSSjGKNHCxurpAziQWZVhKVknOlxj+TY2wUYUrIc30QAAAAAAAAAA", + true, + ) + } + + fn rpc_client_with_program_deployed() -> RpcClient { + rpc_client_with_program_data( + "AAAAAAAAAADtSSjGKNHCxurpAziQWZVhKVknOlxj+TY2wUYUrIc30QEAAAAAAAAA", + true, + ) + } + + fn rpc_client_with_program_finalized() -> RpcClient { + rpc_client_with_program_data( + "AAAAAAAAAADtSSjGKNHCxurpAziQWZVhKVknOlxj+TY2wUYUrIc30QIAAAAAAAAA", + true, + ) + } + + #[test] + fn test_deploy() { + let mut config = CliConfig::default(); + let data = [5u8; 2048]; + + let payer = keypair_from_seed(&[1u8; 32]).unwrap(); + let program_signer = keypair_from_seed(&[2u8; 32]).unwrap(); + let authority_signer = program_authority(); + + config.signers.push(&payer); + + assert!(process_deploy_program( + Arc::new(rpc_client_no_existing_program()), + &config, + &data, + data.len() as u32, + &program_signer.pubkey(), + Some(&program_signer), + &authority_signer, + ) + .is_ok()); + + assert!(process_deploy_program( + Arc::new(rpc_client_wrong_account_owner()), + &config, + &data, + data.len() as u32, + &program_signer.pubkey(), + Some(&program_signer), + &authority_signer, + ) + .is_err()); + + assert!(process_deploy_program( + Arc::new(rpc_client_with_program_deployed()), + &config, + &data, + data.len() as u32, + &program_signer.pubkey(), + Some(&program_signer), + &authority_signer, + ) + .is_err()); + } + + #[test] + fn test_redeploy() { + let mut config = CliConfig::default(); + let data = [5u8; 2048]; + + let payer = keypair_from_seed(&[1u8; 32]).unwrap(); + let program_address = Pubkey::new_unique(); + let authority_signer = program_authority(); + + config.signers.push(&payer); + + // Redeploying a non-existent program should fail + assert!(process_deploy_program( + Arc::new(rpc_client_no_existing_program()), + &config, + &data, + data.len() as u32, + &program_address, + None, + &authority_signer, + ) + .is_err()); + + assert!(process_deploy_program( + Arc::new(rpc_client_with_program_retracted()), + &config, + &data, + data.len() as u32, + &program_address, + None, + &authority_signer, + ) + .is_ok()); + + assert!(process_deploy_program( + Arc::new(rpc_client_with_program_deployed()), + &config, + &data, + data.len() as u32, + &program_address, + None, + &authority_signer, + ) + .is_ok()); + + assert!(process_deploy_program( + Arc::new(rpc_client_with_program_finalized()), + &config, + &data, + data.len() as u32, + &program_address, + None, + &authority_signer, + ) + .is_err()); + + assert!(process_deploy_program( + Arc::new(rpc_client_wrong_account_owner()), + &config, + &data, + data.len() as u32, + &program_address, + None, + &authority_signer, + ) + .is_err()); + + assert!(process_deploy_program( + Arc::new(rpc_client_wrong_authority()), + &config, + &data, + data.len() as u32, + &program_address, + None, + &authority_signer, + ) + .is_err()); + } + + #[test] + fn test_redeploy_from_source() { + let mut config = CliConfig::default(); + let data = [5u8; 2048]; + + let payer = keypair_from_seed(&[1u8; 32]).unwrap(); + let buffer_signer = keypair_from_seed(&[2u8; 32]).unwrap(); + let program_address = Pubkey::new_unique(); + let authority_signer = program_authority(); + + config.signers.push(&payer); + + // Redeploying a non-existent program should fail + assert!(process_deploy_program( + Arc::new(rpc_client_no_existing_program()), + &config, + &data, + data.len() as u32, + &program_address, + Some(&buffer_signer), + &authority_signer, + ) + .is_err()); + + assert!(process_deploy_program( + Arc::new(rpc_client_wrong_account_owner()), + &config, + &data, + data.len() as u32, + &program_address, + Some(&buffer_signer), + &authority_signer, + ) + .is_err()); + + assert!(process_deploy_program( + Arc::new(rpc_client_wrong_authority()), + &config, + &data, + data.len() as u32, + &program_address, + Some(&buffer_signer), + &authority_signer, + ) + .is_err()); + } + + #[test] + fn test_undeploy() { + let mut config = CliConfig::default(); + + let payer = keypair_from_seed(&[1u8; 32]).unwrap(); + let program_signer = keypair_from_seed(&[2u8; 32]).unwrap(); + let authority_signer = program_authority(); + + config.signers.push(&payer); + + assert!(process_undeploy_program( + Arc::new(rpc_client_no_existing_program()), + &config, + &program_signer.pubkey(), + &authority_signer, + ) + .is_err()); + + assert!(process_undeploy_program( + Arc::new(rpc_client_with_program_retracted()), + &config, + &program_signer.pubkey(), + &authority_signer, + ) + .is_ok()); + + assert!(process_undeploy_program( + Arc::new(rpc_client_with_program_deployed()), + &config, + &program_signer.pubkey(), + &authority_signer, + ) + .is_ok()); + + assert!(process_undeploy_program( + Arc::new(rpc_client_with_program_finalized()), + &config, + &program_signer.pubkey(), + &authority_signer, + ) + .is_err()); + + assert!(process_undeploy_program( + Arc::new(rpc_client_wrong_account_owner()), + &config, + &program_signer.pubkey(), + &authority_signer, + ) + .is_err()); + + assert!(process_undeploy_program( + Arc::new(rpc_client_wrong_authority()), + &config, + &program_signer.pubkey(), + &authority_signer, + ) + .is_err()); + } + + #[test] + fn test_finalize() { + let mut config = CliConfig::default(); + + let payer = keypair_from_seed(&[1u8; 32]).unwrap(); + let program_signer = keypair_from_seed(&[2u8; 32]).unwrap(); + let authority_signer = program_authority(); + + config.signers.push(&payer); + + assert!(process_finalize_program( + Arc::new(rpc_client_with_program_deployed()), + &config, + &program_signer.pubkey(), + &authority_signer, + ) + .is_ok()); + } +} diff --git a/sdk/program/src/loader_v4.rs b/sdk/program/src/loader_v4.rs index 9180c00a718243..e5706f51f22721 100644 --- a/sdk/program/src/loader_v4.rs +++ b/sdk/program/src/loader_v4.rs @@ -77,18 +77,29 @@ pub fn create_buffer( ) -> Vec { vec![ system_instruction::create_account(payer_address, buffer_address, lamports, 0, &id()), - Instruction::new_with_bincode( - id(), - &LoaderV4Instruction::Truncate { new_size }, - vec![ - AccountMeta::new(*buffer_address, true), - AccountMeta::new_readonly(*authority, true), - AccountMeta::new(*recipient_address, false), - ], - ), + truncate_uninitialized(buffer_address, authority, new_size, recipient_address), ] } +/// Returns the instructions required to set the length of an uninitialized program account. +/// This instruction will require the program account to also sign the transaction. +pub fn truncate_uninitialized( + program_address: &Pubkey, + authority: &Pubkey, + new_size: u32, + recipient_address: &Pubkey, +) -> Instruction { + Instruction::new_with_bincode( + id(), + &LoaderV4Instruction::Truncate { new_size }, + vec![ + AccountMeta::new(*program_address, true), + AccountMeta::new_readonly(*authority, true), + AccountMeta::new(*recipient_address, false), + ], + ) +} + /// Returns the instructions required to set the length of the program account. pub fn truncate( program_address: &Pubkey, From fe598a92735c3c1545f9c20da803b08430beeb73 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Mon, 18 Sep 2023 22:50:11 +0800 Subject: [PATCH 110/407] bump syn to 2.0.37 for test-case (#33288) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5c214c9c2b1cba..7ce6d9a51f2bc8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7863,7 +7863,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", ] [[package]] @@ -7875,7 +7875,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.37", "test-case-core", ] From 9e11ae6275138066d2c566e1a6a4f132b68ad967 Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 18 Sep 2023 10:59:03 -0500 Subject: [PATCH 111/407] Make program owners a const array instead of Vec<_> (#33275) The program owners pubkeys are constant, no need to reconstruct the Vec and Vec<&Pubkey> each time this function runs (every time we execute transactions). --- accounts-db/src/accounts.rs | 10 ++++++---- accounts-db/src/accounts_db.rs | 29 ++++++++++++++--------------- accounts-db/src/accounts_file.rs | 2 +- accounts-db/src/append_vec.rs | 17 ++++++++--------- runtime/src/bank.rs | 5 ++--- 5 files changed, 31 insertions(+), 32 deletions(-) diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index aa16edd94f163d..f570fbdd2ad42a 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -648,7 +648,7 @@ impl Accounts { ancestors: &Ancestors, txs: &[SanitizedTransaction], lock_results: &mut [TransactionCheckResult], - program_owners: &[&'a Pubkey], + program_owners: &'a [Pubkey], hash_queue: &BlockhashQueue, ) -> HashMap { let mut result: HashMap = HashMap::new(); @@ -678,7 +678,7 @@ impl Accounts { ) { program_owners .get(index) - .map(|owner| entry.insert((*owner, 1))); + .map(|owner| entry.insert((owner, 1))); } } }); @@ -2090,11 +2090,12 @@ mod tests { let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); let ancestors = vec![(0, 0)].into_iter().collect(); + let owners = &[program1_pubkey, program2_pubkey]; let programs = accounts.filter_executable_program_accounts( &ancestors, &[sanitized_tx1, sanitized_tx2], &mut [(Ok(()), None), (Ok(()), None)], - &[&program1_pubkey, &program2_pubkey], + owners, &hash_queue, ); @@ -2198,12 +2199,13 @@ mod tests { let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); let ancestors = vec![(0, 0)].into_iter().collect(); + let owners = &[program1_pubkey, program2_pubkey]; let mut lock_results = vec![(Ok(()), None), (Ok(()), None)]; let programs = accounts.filter_executable_program_accounts( &ancestors, &[sanitized_tx1, sanitized_tx2], &mut lock_results, - &[&program1_pubkey, &program2_pubkey], + owners, &hash_queue, ); diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 14294d73a9db6e..839f4df37e0609 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -865,7 +865,7 @@ impl<'a> LoadedAccountAccessor<'a> { } } - fn account_matches_owners(&self, owners: &[&Pubkey]) -> Result { + fn account_matches_owners(&self, owners: &[Pubkey]) -> Result { match self { LoadedAccountAccessor::Cached(cached_account) => cached_account .as_ref() @@ -875,7 +875,7 @@ impl<'a> LoadedAccountAccessor<'a> { } else { owners .iter() - .position(|entry| &cached_account.account.owner() == entry) + .position(|entry| cached_account.account.owner() == entry) } }) .ok_or(MatchAccountOwnerError::NoMatch), @@ -5074,7 +5074,7 @@ impl AccountsDb { &self, ancestors: &Ancestors, account: &Pubkey, - owners: &[&Pubkey], + owners: &[Pubkey], ) -> Result { let (slot, storage_location, _maybe_account_accesor) = self .read_index_for_accessor_or_load_slow(ancestors, account, None, false) @@ -5088,7 +5088,7 @@ impl AccountsDb { } else { owners .iter() - .position(|entry| &account.owner() == entry) + .position(|entry| account.owner() == entry) .ok_or(MatchAccountOwnerError::NoMatch) }; } @@ -14092,7 +14092,6 @@ pub mod tests { )); let owners: Vec = (0..2).map(|_| Pubkey::new_unique()).collect(); - let owners_refs: Vec<&Pubkey> = owners.iter().collect(); let account1_key = Pubkey::new_unique(); let account1 = AccountSharedData::new(321, 10, &owners[0]); @@ -14122,23 +14121,23 @@ pub mod tests { db.clean_accounts_for_tests(); assert_eq!( - db.account_matches_owners(&Ancestors::default(), &account1_key, &owners_refs), + db.account_matches_owners(&Ancestors::default(), &account1_key, &owners), Ok(0) ); assert_eq!( - db.account_matches_owners(&Ancestors::default(), &account2_key, &owners_refs), + db.account_matches_owners(&Ancestors::default(), &account2_key, &owners), Ok(1) ); assert_eq!( - db.account_matches_owners(&Ancestors::default(), &account3_key, &owners_refs), + db.account_matches_owners(&Ancestors::default(), &account3_key, &owners), Err(MatchAccountOwnerError::NoMatch) ); assert_eq!( - db.account_matches_owners(&Ancestors::default(), &account4_key, &owners_refs), + db.account_matches_owners(&Ancestors::default(), &account4_key, &owners), Err(MatchAccountOwnerError::NoMatch) ); assert_eq!( - db.account_matches_owners(&Ancestors::default(), &Pubkey::new_unique(), &owners_refs), + db.account_matches_owners(&Ancestors::default(), &Pubkey::new_unique(), &owners), Err(MatchAccountOwnerError::UnableToLoad) ); @@ -14156,23 +14155,23 @@ pub mod tests { .unwrap(); assert_eq!( - db.account_matches_owners(&Ancestors::default(), &account1_key, &owners_refs), + db.account_matches_owners(&Ancestors::default(), &account1_key, &owners), Ok(0) ); assert_eq!( - db.account_matches_owners(&Ancestors::default(), &account2_key, &owners_refs), + db.account_matches_owners(&Ancestors::default(), &account2_key, &owners), Ok(1) ); assert_eq!( - db.account_matches_owners(&Ancestors::default(), &account3_key, &owners_refs), + db.account_matches_owners(&Ancestors::default(), &account3_key, &owners), Err(MatchAccountOwnerError::NoMatch) ); assert_eq!( - db.account_matches_owners(&Ancestors::default(), &account4_key, &owners_refs), + db.account_matches_owners(&Ancestors::default(), &account4_key, &owners), Err(MatchAccountOwnerError::NoMatch) ); assert_eq!( - db.account_matches_owners(&Ancestors::default(), &Pubkey::new_unique(), &owners_refs), + db.account_matches_owners(&Ancestors::default(), &Pubkey::new_unique(), &owners), Err(MatchAccountOwnerError::UnableToLoad) ); } diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index 6e3ffd3df881d1..dedec30af24e88 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -116,7 +116,7 @@ impl AccountsFile { pub fn account_matches_owners( &self, offset: usize, - owners: &[&Pubkey], + owners: &[Pubkey], ) -> std::result::Result { match self { Self::AppendVec(av) => av.account_matches_owners(offset, owners), diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index 765fa97fdc200e..941c0a9afe298b 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -519,7 +519,7 @@ impl AppendVec { pub fn account_matches_owners( &self, offset: usize, - owners: &[&Pubkey], + owners: &[Pubkey], ) -> std::result::Result { let account_meta = self .get_account_meta(offset) @@ -529,7 +529,7 @@ impl AppendVec { } else { owners .iter() - .position(|entry| &&account_meta.owner == entry) + .position(|entry| &account_meta.owner == entry) .ok_or(MatchAccountOwnerError::NoMatch) } } @@ -1022,37 +1022,36 @@ pub mod tests { let path = get_append_vec_path("test_append_data"); let av = AppendVec::new(&path.path, true, 1024 * 1024); let owners: Vec = (0..2).map(|_| Pubkey::new_unique()).collect(); - let owners_refs: Vec<&Pubkey> = owners.iter().collect(); let mut account = create_test_account(5); account.1.set_owner(owners[0]); let index = av.append_account_test(&account).unwrap(); - assert_eq!(av.account_matches_owners(index, &owners_refs), Ok(0)); + assert_eq!(av.account_matches_owners(index, &owners), Ok(0)); let mut account1 = create_test_account(6); account1.1.set_owner(owners[1]); let index1 = av.append_account_test(&account1).unwrap(); - assert_eq!(av.account_matches_owners(index1, &owners_refs), Ok(1)); - assert_eq!(av.account_matches_owners(index, &owners_refs), Ok(0)); + assert_eq!(av.account_matches_owners(index1, &owners), Ok(1)); + assert_eq!(av.account_matches_owners(index, &owners), Ok(0)); let mut account2 = create_test_account(6); account2.1.set_owner(Pubkey::new_unique()); let index2 = av.append_account_test(&account2).unwrap(); assert_eq!( - av.account_matches_owners(index2, &owners_refs), + av.account_matches_owners(index2, &owners), Err(MatchAccountOwnerError::NoMatch) ); // tests for overflow assert_eq!( - av.account_matches_owners(usize::MAX - mem::size_of::(), &owners_refs), + av.account_matches_owners(usize::MAX - mem::size_of::(), &owners), Err(MatchAccountOwnerError::UnableToLoad) ); assert_eq!( av.account_matches_owners( usize::MAX - mem::size_of::() - mem::size_of::() + 1, - &owners_refs + &owners ), Err(MatchAccountOwnerError::UnableToLoad) ); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 4c957f75fdb911..def01b9d5bcbf1 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5116,18 +5116,17 @@ impl Bank { ); check_time.stop(); - let program_owners: Vec = vec![ + const PROGRAM_OWNERS: &[Pubkey] = &[ bpf_loader_upgradeable::id(), bpf_loader::id(), bpf_loader_deprecated::id(), loader_v4::id(), ]; - let program_owners_refs: Vec<&Pubkey> = program_owners.iter().collect(); let mut program_accounts_map = self.rc.accounts.filter_executable_program_accounts( &self.ancestors, sanitized_txs, &mut check_results, - &program_owners_refs, + PROGRAM_OWNERS, &self.blockhash_queue.read().unwrap(), ); let native_loader = native_loader::id(); From a15564d719685d17794049ccb3228ab770608d64 Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 18 Sep 2023 11:07:35 -0500 Subject: [PATCH 112/407] ledger-tool: Cleanup argument parsing logic (#33283) value_t!() macro and .unwrap_or() are more succinct than if/else blocks, and the value_t!() macro handles parsing values from strings. --- ledger-tool/src/main.rs | 34 +++++++++------------------------- 1 file changed, 9 insertions(+), 25 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 0db9ae21eb5d0f..10ef5c24665bfa 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -3982,21 +3982,10 @@ fn main() { force_update_to_open, enforce_ulimit_nofile, ); - let max_height = if let Some(height) = arg_matches.value_of("max_height") { - usize::from_str(height).expect("Maximum height must be a number") - } else { - usize::MAX - }; - let start_root = if let Some(height) = arg_matches.value_of("start_root") { - Slot::from_str(height).expect("Starting root must be a number") - } else { - 0 - }; - let num_roots = if let Some(roots) = arg_matches.value_of("num_roots") { - usize::from_str(roots).expect("Number of roots must be a number") - } else { - usize::from_str(DEFAULT_ROOT_COUNT).unwrap() - }; + + let max_height = value_t!(arg_matches, "max_height", usize).unwrap_or(usize::MAX); + let start_root = value_t!(arg_matches, "start_root", Slot).unwrap_or(0); + let num_roots = value_t_or_exit!(arg_matches, "num_roots", usize); let iter = blockstore .rooted_slot_iterator(start_root) @@ -4072,17 +4061,12 @@ fn main() { force_update_to_open, enforce_ulimit_nofile, ); - let start_root = if let Some(root) = arg_matches.value_of("start_root") { - Slot::from_str(root).expect("Before root must be a number") - } else { - blockstore.max_root() - }; + + let start_root = value_t!(arg_matches, "start_root", Slot) + .unwrap_or_else(|_| blockstore.max_root()); let max_slots = value_t_or_exit!(arg_matches, "max_slots", u64); - let end_root = if let Some(root) = arg_matches.value_of("end_root") { - Slot::from_str(root).expect("Until root must be a number") - } else { - start_root.saturating_sub(max_slots) - }; + let end_root = value_t!(arg_matches, "end_root", Slot) + .unwrap_or_else(|_| start_root.saturating_sub(max_slots)); assert!(start_root > end_root); let num_slots = start_root - end_root - 1; // Adjust by one since start_root need not be checked if arg_matches.is_present("end_root") && num_slots > max_slots { From 27caf4d1d2d4f1eaa126418d5d1af8a62a7e234d Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 18 Sep 2023 09:46:52 -0700 Subject: [PATCH 113/407] add test for duplicates in generate_index and fix approx stored count (#33290) add test --- accounts-db/src/accounts_db.rs | 89 ++++++++++++++++++++++++++++++++-- 1 file changed, 84 insertions(+), 5 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 839f4df37e0609..babe4d87562b1f 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -9494,9 +9494,12 @@ impl AccountsDb { count_and_status.0 = entry.count; } store.alive_bytes.store(entry.stored_size, Ordering::SeqCst); - store - .approx_store_count - .store(entry.count, Ordering::Relaxed); + assert!( + store.approx_stored_count() >= entry.count, + "{}, {}", + store.approx_stored_count(), + entry.count + ); } else { trace!("id: {} clearing count", id); store.count_and_status.write().unwrap().0 = 0; @@ -10054,6 +10057,74 @@ pub mod tests { } } + #[test] + fn test_generate_index_duplicates_within_slot() { + for reverse in [false, true] { + let db = AccountsDb::new_single_for_tests(); + let slot0 = 0; + + let pubkey = Pubkey::from([1; 32]); + + let append_vec = db.create_and_insert_store(slot0, 1000, "test"); + + let mut account_small = AccountSharedData::default(); + account_small.set_data(vec![1]); + account_small.set_lamports(1); + let mut account_big = AccountSharedData::default(); + account_big.set_data(vec![5; 10]); + account_big.set_lamports(2); + assert_ne!( + aligned_stored_size(account_big.data().len()), + aligned_stored_size(account_small.data().len()) + ); + // same account twice with different data lens + // Rules are the last one of each pubkey is the one that ends up in the index. + let mut data = vec![(&pubkey, &account_big), (&pubkey, &account_small)]; + if reverse { + data = data.into_iter().rev().collect(); + } + let expected_alive_bytes = if reverse { + aligned_stored_size(account_big.data().len()) + } else { + aligned_stored_size(account_small.data().len()) + }; + let expected_accounts_data_len = data.last().unwrap().1.data().len(); + let storable = (slot0, &data[..], INCLUDE_SLOT_IN_HASH_TESTS); + let hashes = data.iter().map(|_| Hash::default()).collect::>(); + let write_versions = data.iter().map(|_| 0).collect::>(); + let append = + StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( + &storable, + hashes, + write_versions, + ); + + // construct append vec with account to generate an index from + append_vec.accounts.append_accounts(&append, 0); + // append vecs set this at load + append_vec + .approx_store_count + .store(data.len(), Ordering::Relaxed); + + let genesis_config = GenesisConfig::default(); + assert!(db.accounts_index.get_account_read_entry(&pubkey).is_none()); + let result = db.generate_index(None, false, &genesis_config); + // index entry should only contain a single entry for the pubkey since index cannot hold more than 1 entry per slot + let entry = db.accounts_index.get_account_read_entry(&pubkey).unwrap(); + assert_eq!(entry.slot_list().len(), 1); + assert_eq!(append_vec.alive_bytes(), expected_alive_bytes); + // total # accounts in append vec + assert_eq!(append_vec.approx_stored_count(), 2); + // # alive accounts + assert_eq!(append_vec.count(), 1); + // all account data alive + assert_eq!( + result.accounts_data_len as usize, expected_accounts_data_len, + "reverse: {reverse}" + ); + } + } + #[test] fn test_maybe_unref_accounts_already_in_ancient() { let db = AccountsDb::new_single_for_tests(); @@ -15859,21 +15930,29 @@ pub mod tests { count_and_status.0 = 0; } + // count needs to be <= approx stored count in store. + // approx stored count is 1 in store since we added a single account. + let count = 1; + // populate based on made up hash data let dashmap = DashMap::default(); dashmap.insert( 0, StorageSizeAndCount { stored_size: 2, - count: 3, + count, }, ); + for (_, store) in accounts.storage.iter() { + assert_eq!(store.count_and_status.read().unwrap().0, 0); + assert_eq!(store.alive_bytes.load(Ordering::Acquire), 0); + } accounts.set_storage_count_and_alive_bytes(dashmap, &mut GenerateIndexTimings::default()); assert_eq!(accounts.storage.len(), 1); for (_, store) in accounts.storage.iter() { assert_eq!(store.append_vec_id(), 0); - assert_eq!(store.count_and_status.read().unwrap().0, 3); + assert_eq!(store.count_and_status.read().unwrap().0, count); assert_eq!(store.alive_bytes.load(Ordering::Acquire), 2); } } From e860019687734ce1e01702514217f5839c73a31d Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 18 Sep 2023 10:05:27 -0700 Subject: [PATCH 114/407] TransactionScheduler: Pipe BlockProductionMethod (#33217) --- banking-bench/src/main.rs | 2 +- core/benches/banking_stage.rs | 3 +++ core/src/banking_stage.rs | 49 +++++++++++++++++++++++++++++++++-- core/src/tpu.rs | 4 ++- core/src/validator.rs | 1 + 5 files changed, 55 insertions(+), 4 deletions(-) diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 5d402592ad49ec..bb5149f47c85b9 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -450,7 +450,7 @@ fn main() { DEFAULT_TPU_CONNECTION_POOL_SIZE, ), }; - let banking_stage = BankingStage::new_num_threads( + let banking_stage = BankingStage::new_thread_local_multi_iterator( &cluster_info, &poh_recorder, non_vote_receiver, diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 0b0e6876c3db15..6219f4abb9a265 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -1,6 +1,8 @@ #![allow(clippy::arithmetic_side_effects)] #![feature(test)] +use solana_core::validator::BlockProductionMethod; + extern crate test; use { @@ -291,6 +293,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { let cluster_info = Arc::new(cluster_info); let (s, _r) = unbounded(); let _banking_stage = BankingStage::new( + BlockProductionMethod::ThreadLocalMultiIterator, &cluster_info, &poh_recorder, non_vote_receiver, diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 398dad86d03644..7e9138048cb3dc 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -15,7 +15,10 @@ use { unprocessed_packet_batches::*, unprocessed_transaction_storage::{ThreadType, UnprocessedTransactionStorage}, }, - crate::{banking_trace::BankingPacketReceiver, tracer_packet_stats::TracerPacketStats}, + crate::{ + banking_trace::BankingPacketReceiver, tracer_packet_stats::TracerPacketStats, + validator::BlockProductionMethod, + }, crossbeam_channel::RecvTimeoutError, histogram::Histogram, solana_client::connection_cache::ConnectionCache, @@ -307,6 +310,7 @@ impl BankingStage { /// Create the stage using `bank`. Exit when `verified_receiver` is dropped. #[allow(clippy::too_many_arguments)] pub fn new( + block_production_method: BlockProductionMethod, cluster_info: &Arc, poh_recorder: &Arc>, non_vote_receiver: BankingPacketReceiver, @@ -320,6 +324,7 @@ impl BankingStage { prioritization_fee_cache: &Arc, ) -> Self { Self::new_num_threads( + block_production_method, cluster_info, poh_recorder, non_vote_receiver, @@ -337,6 +342,42 @@ impl BankingStage { #[allow(clippy::too_many_arguments)] pub fn new_num_threads( + block_production_method: BlockProductionMethod, + cluster_info: &Arc, + poh_recorder: &Arc>, + non_vote_receiver: BankingPacketReceiver, + tpu_vote_receiver: BankingPacketReceiver, + gossip_vote_receiver: BankingPacketReceiver, + num_threads: u32, + transaction_status_sender: Option, + replay_vote_sender: ReplayVoteSender, + log_messages_bytes_limit: Option, + connection_cache: Arc, + bank_forks: Arc>, + prioritization_fee_cache: &Arc, + ) -> Self { + match block_production_method { + BlockProductionMethod::ThreadLocalMultiIterator => { + Self::new_thread_local_multi_iterator( + cluster_info, + poh_recorder, + non_vote_receiver, + tpu_vote_receiver, + gossip_vote_receiver, + num_threads, + transaction_status_sender, + replay_vote_sender, + log_messages_bytes_limit, + connection_cache, + bank_forks, + prioritization_fee_cache, + ) + } + } + } + + #[allow(clippy::too_many_arguments)] + pub fn new_thread_local_multi_iterator( cluster_info: &Arc, poh_recorder: &Arc>, non_vote_receiver: BankingPacketReceiver, @@ -644,6 +685,7 @@ mod tests { let (replay_vote_sender, _replay_vote_receiver) = unbounded(); let banking_stage = BankingStage::new( + BlockProductionMethod::ThreadLocalMultiIterator, &cluster_info, &poh_recorder, non_vote_receiver, @@ -700,6 +742,7 @@ mod tests { let (replay_vote_sender, _replay_vote_receiver) = unbounded(); let banking_stage = BankingStage::new( + BlockProductionMethod::ThreadLocalMultiIterator, &cluster_info, &poh_recorder, non_vote_receiver, @@ -781,6 +824,7 @@ mod tests { let (replay_vote_sender, _replay_vote_receiver) = unbounded(); let banking_stage = BankingStage::new( + BlockProductionMethod::ThreadLocalMultiIterator, &cluster_info, &poh_recorder, non_vote_receiver, @@ -941,7 +985,7 @@ mod tests { create_test_recorder(bank.clone(), blockstore, Some(poh_config), None); let (_, cluster_info) = new_test_cluster_info(/*keypair:*/ None); let cluster_info = Arc::new(cluster_info); - let _banking_stage = BankingStage::new_num_threads( + let _banking_stage = BankingStage::new_thread_local_multi_iterator( &cluster_info, &poh_recorder, non_vote_receiver, @@ -1133,6 +1177,7 @@ mod tests { let (replay_vote_sender, _replay_vote_receiver) = unbounded(); let banking_stage = BankingStage::new( + BlockProductionMethod::ThreadLocalMultiIterator, &cluster_info, &poh_recorder, non_vote_receiver, diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 5b6b939f873ff5..884153d3d630af 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -15,7 +15,7 @@ use { sigverify_stage::SigVerifyStage, staked_nodes_updater_service::StakedNodesUpdaterService, tpu_entry_notifier::TpuEntryNotifier, - validator::GeneratorConfig, + validator::{BlockProductionMethod, GeneratorConfig}, }, bytes::Bytes, crossbeam_channel::{unbounded, Receiver}, @@ -112,6 +112,7 @@ impl Tpu { tracer_thread_hdl: TracerThread, tpu_enable_udp: bool, prioritization_fee_cache: &Arc, + block_production_method: BlockProductionMethod, _generator_config: Option, /* vestigial code for replay invalidator */ ) -> Self { let TpuSockets { @@ -221,6 +222,7 @@ impl Tpu { ); let banking_stage = BankingStage::new( + block_production_method, cluster_info, poh_recorder, non_vote_receiver, diff --git a/core/src/validator.rs b/core/src/validator.rs index cb40bd0ff9d4d4..a0c39da764239b 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1296,6 +1296,7 @@ impl Validator { tracer_thread, tpu_enable_udp, &prioritization_fee_cache, + config.block_production_method.clone(), config.generator_config.clone(), ); From 86dd18bfb54d8b02690fe2a7587839eb3a3e3e09 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 18 Sep 2023 10:07:40 -0700 Subject: [PATCH 115/407] TransactionScheduler: Id Generators (#33207) --- .../transaction_scheduler/batch_id_generator.rs | 14 ++++++++++++++ .../banking_stage/transaction_scheduler/mod.rs | 6 ++++++ .../transaction_id_generator.rs | 16 ++++++++++++++++ 3 files changed, 36 insertions(+) create mode 100644 core/src/banking_stage/transaction_scheduler/batch_id_generator.rs create mode 100644 core/src/banking_stage/transaction_scheduler/transaction_id_generator.rs diff --git a/core/src/banking_stage/transaction_scheduler/batch_id_generator.rs b/core/src/banking_stage/transaction_scheduler/batch_id_generator.rs new file mode 100644 index 00000000000000..6effc80f8537b4 --- /dev/null +++ b/core/src/banking_stage/transaction_scheduler/batch_id_generator.rs @@ -0,0 +1,14 @@ +use crate::banking_stage::scheduler_messages::TransactionBatchId; + +#[derive(Default)] +pub struct BatchIdGenerator { + next_id: u64, +} + +impl BatchIdGenerator { + pub fn next(&mut self) -> TransactionBatchId { + let id = self.next_id; + self.next_id = self.next_id.wrapping_sub(1); + TransactionBatchId::new(id) + } +} diff --git a/core/src/banking_stage/transaction_scheduler/mod.rs b/core/src/banking_stage/transaction_scheduler/mod.rs index c723f3af9a0da4..065efba0d86c0f 100644 --- a/core/src/banking_stage/transaction_scheduler/mod.rs +++ b/core/src/banking_stage/transaction_scheduler/mod.rs @@ -6,3 +6,9 @@ mod transaction_priority_id; mod transaction_state; #[allow(dead_code)] mod transaction_state_container; + +#[allow(dead_code)] +mod transaction_id_generator; + +#[allow(dead_code)] +mod batch_id_generator; diff --git a/core/src/banking_stage/transaction_scheduler/transaction_id_generator.rs b/core/src/banking_stage/transaction_scheduler/transaction_id_generator.rs new file mode 100644 index 00000000000000..0f88fd769d1a2a --- /dev/null +++ b/core/src/banking_stage/transaction_scheduler/transaction_id_generator.rs @@ -0,0 +1,16 @@ +use crate::banking_stage::scheduler_messages::TransactionId; + +/// Simple sequential ID generator for `TransactionId`s. +/// These IDs uniquely identify transactions during the scheduling process. +#[derive(Default)] +pub struct TransactionIdGenerator { + next_id: u64, +} + +impl TransactionIdGenerator { + pub fn next(&mut self) -> TransactionId { + let id = self.next_id; + self.next_id = self.next_id.wrapping_add(1); + TransactionId::new(id) + } +} From 402981e3c18375ae32bd8ac7db1713bb68887ba8 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 18 Sep 2023 10:59:42 -0700 Subject: [PATCH 116/407] cleanup test (#33291) --- accounts-db/src/accounts_db.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index babe4d87562b1f..fca79383230cfe 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -10083,12 +10083,8 @@ pub mod tests { if reverse { data = data.into_iter().rev().collect(); } - let expected_alive_bytes = if reverse { - aligned_stored_size(account_big.data().len()) - } else { - aligned_stored_size(account_small.data().len()) - }; let expected_accounts_data_len = data.last().unwrap().1.data().len(); + let expected_alive_bytes = aligned_stored_size(expected_accounts_data_len); let storable = (slot0, &data[..], INCLUDE_SLOT_IN_HASH_TESTS); let hashes = data.iter().map(|_| Hash::default()).collect::>(); let write_versions = data.iter().map(|_| 0).collect::>(); From 17c3930bc8275af2c863d431b73d1a218d4ebb43 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 18 Sep 2023 11:13:18 -0700 Subject: [PATCH 117/407] in gen index, stop using big hashmap (#33252) * in gen index, stop using big hashmap * update accounts_data_len * remove approx_stored_count --- accounts-db/src/accounts_db.rs | 91 ++++++++----------- accounts-db/src/accounts_index.rs | 146 ++++++++++++++++++++++++++++-- 2 files changed, 178 insertions(+), 59 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index fca79383230cfe..d2800b2ef7e06a 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -2322,8 +2322,6 @@ impl<'a> ZeroLamport for StoredAccountMeta<'a> { } } -type GenerateIndexAccountsMap<'a> = HashMap>; - /// called on a struct while scanning append vecs trait AppendVecScan: Send + Sync + Clone { /// return true if this pubkey should be included @@ -8914,20 +8912,6 @@ impl AccountsDb { (result, slots) } - fn process_storage_slot<'a>( - &self, - storage: &'a Arc, - ) -> GenerateIndexAccountsMap<'a> { - let num_accounts = storage.approx_stored_count(); - let mut accounts_map = GenerateIndexAccountsMap::with_capacity(num_accounts); - storage.accounts.account_iter().for_each(|stored_account| { - let pubkey = stored_account.pubkey(); - assert!(!self.is_filler_account(pubkey)); - accounts_map.insert(*pubkey, stored_account); - }); - accounts_map - } - /// return Some(lamports_to_top_off) if 'account' would collect rent fn stats_for_rent_payers( pubkey: &Pubkey, @@ -8948,30 +8932,32 @@ impl AccountsDb { fn generate_index_for_slot( &self, - accounts_map: GenerateIndexAccountsMap<'_>, + storage: &Arc, slot: Slot, store_id: AppendVecId, rent_collector: &RentCollector, storage_info: &StorageSizeAndCountMap, ) -> SlotIndexGenerationInfo { - if accounts_map.is_empty() { + let mut accounts = storage.accounts.account_iter(); + if accounts.next().is_none() { return SlotIndexGenerationInfo::default(); } + let accounts = storage.accounts.account_iter(); let secondary = !self.account_indexes.is_empty(); let mut rent_paying_accounts_by_partition = Vec::default(); let mut accounts_data_len = 0; let mut num_accounts_rent_paying = 0; - let num_accounts = accounts_map.len(); let mut amount_to_top_off_rent = 0; let mut stored_size_alive = 0; - let items = accounts_map.into_iter().map(|(pubkey, stored_account)| { + let items = accounts.map(|stored_account| { stored_size_alive += stored_account.stored_size(); + let pubkey = stored_account.pubkey(); if secondary { self.accounts_index.update_secondary_indexes( - &pubkey, + pubkey, &stored_account, &self.account_indexes, ); @@ -8981,16 +8967,16 @@ impl AccountsDb { } if let Some(amount_to_top_off_rent_this_account) = - Self::stats_for_rent_payers(&pubkey, &stored_account, rent_collector) + Self::stats_for_rent_payers(pubkey, &stored_account, rent_collector) { amount_to_top_off_rent += amount_to_top_off_rent_this_account; num_accounts_rent_paying += 1; // remember this rent-paying account pubkey - rent_paying_accounts_by_partition.push(pubkey); + rent_paying_accounts_by_partition.push(*pubkey); } ( - pubkey, + *pubkey, AccountInfo::new( StorageLocation::AppendVec(store_id, stored_account.offset()), // will never be cached stored_account.lamports(), @@ -8998,15 +8984,31 @@ impl AccountsDb { ) }); - let (dirty_pubkeys, insert_time_us, generate_index_count) = self + let (dirty_pubkeys, insert_time_us, mut generate_index_results) = self .accounts_index - .insert_new_if_missing_into_primary_index(slot, num_accounts, items); + .insert_new_if_missing_into_primary_index(slot, storage.approx_stored_count(), items); + + if let Some(duplicates_this_slot) = std::mem::take(&mut generate_index_results.duplicates) { + // there were duplicate pubkeys in this same slot + // Some were not inserted. This means some info like stored data is off. + duplicates_this_slot + .into_iter() + .for_each(|(pubkey, (_slot, info))| { + let duplicate = storage.accounts.get_account(info.offset()).unwrap().0; + assert_eq!(&pubkey, duplicate.pubkey()); + stored_size_alive = stored_size_alive.saturating_sub(duplicate.stored_size()); + if !duplicate.is_zero_lamport() { + accounts_data_len = + accounts_data_len.saturating_sub(duplicate.data().len() as u64); + } + }); + } { // second, collect into the shared DashMap once we've figured out all the info per store_id let mut info = storage_info.entry(store_id).or_default(); info.stored_size += stored_size_alive; - info.count += generate_index_count.count; + info.count += generate_index_results.count; } // dirty_pubkeys will contain a pubkey if an item has multiple rooted entries for @@ -9017,7 +9019,7 @@ impl AccountsDb { } SlotIndexGenerationInfo { insert_time_us, - num_accounts: num_accounts as u64, + num_accounts: generate_index_results.count as u64, num_accounts_rent_paying, accounts_data_len, amount_to_top_off_rent, @@ -9176,7 +9178,6 @@ impl AccountsDb { // no storage at this slot, no information to pull out continue; }; - let accounts_map = self.process_storage_slot(&storage); let store_id = storage.append_vec_id(); scan_time.stop(); @@ -9194,12 +9195,13 @@ impl AccountsDb { rent_paying_accounts_by_partition: rent_paying_accounts_by_partition_this_slot, } = self.generate_index_for_slot( - accounts_map, + &storage, *slot, store_id, &rent_collector, &storage_info, ); + rent_paying.fetch_add(rent_paying_this_slot, Ordering::Relaxed); amount_to_top_off_rent .fetch_add(amount_to_top_off_rent_this_slot, Ordering::Relaxed); @@ -9220,10 +9222,10 @@ impl AccountsDb { // verify index matches expected and measure the time to get all items assert!(verify); let mut lookup_time = Measure::start("lookup_time"); - for account in accounts_map.into_iter() { - let (key, account_info) = account; - let lock = self.accounts_index.get_bin(&key); - let x = lock.get(&key).unwrap(); + for account_info in storage.accounts.account_iter() { + let key = account_info.pubkey(); + let lock = self.accounts_index.get_bin(key); + let x = lock.get(key).unwrap(); let sl = x.slot_list.read().unwrap(); let mut count = 0; for (slot2, account_info2) in sl.iter() { @@ -15818,9 +15820,8 @@ pub mod tests { let storage = accounts.storage.get_slot_storage_entry(slot0).unwrap(); let storage_info = StorageSizeAndCountMap::default(); - let accounts_map = accounts.process_storage_slot(&storage); accounts.generate_index_for_slot( - accounts_map, + &storage, slot0, 0, &RentCollector::default(), @@ -15842,14 +15843,7 @@ pub mod tests { // empty store let storage = accounts.create_and_insert_store(0, 1, "test"); let storage_info = StorageSizeAndCountMap::default(); - let accounts_map = accounts.process_storage_slot(&storage); - accounts.generate_index_for_slot( - accounts_map, - 0, - 0, - &RentCollector::default(), - &storage_info, - ); + accounts.generate_index_for_slot(&storage, 0, 0, &RentCollector::default(), &storage_info); assert!(storage_info.is_empty()); } @@ -15890,14 +15884,7 @@ pub mod tests { ); let storage_info = StorageSizeAndCountMap::default(); - let accounts_map = accounts.process_storage_slot(&storage); - accounts.generate_index_for_slot( - accounts_map, - 0, - 0, - &RentCollector::default(), - &storage_info, - ); + accounts.generate_index_for_slot(&storage, 0, 0, &RentCollector::default(), &storage_info); assert_eq!(storage_info.len(), 1); for entry in storage_info.iter() { assert_eq!( diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 959bb8319e5080..0b41948c79a7cf 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -72,9 +72,11 @@ pub type RefCount = u64; pub type AccountMap = Arc>; #[derive(Default, Debug, PartialEq, Eq)] -pub(crate) struct GenerateIndexCount { +pub(crate) struct GenerateIndexResult { /// number of accounts inserted in the index pub count: usize, + /// pubkeys which were present multiple times in the insertion request. + pub duplicates: Option>, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -1586,24 +1588,55 @@ impl + Into> AccountsIndex { self.account_maps.len() } + /// remove the earlier instances of each pubkey when the pubkey exists later in the `Vec`. + /// Could also be done with HashSet. + /// Returns `HashSet` of duplicate pubkeys. + fn remove_older_duplicate_pubkeys( + items: &mut Vec<(Pubkey, (Slot, T))>, + ) -> Option> { + if items.len() < 2 { + return None; + } + // stable sort by pubkey. + // Earlier entries are overwritten by later entries + items.sort_by(|a, b| a.0.cmp(&b.0)); + let mut duplicates = None::>; + let mut i = 0; + while i < items.len().saturating_sub(1) { + let this_key = &items[i].0; + // look at next entry. If it is same pubkey as this one, then remove this one. + if this_key == &items[i + 1].0 { + let mut duplicates_insert = duplicates.unwrap_or_default(); + // i+1 is same pubkey as i, so remove i + duplicates_insert.push(items.remove(i)); + duplicates = Some(duplicates_insert); + // `items` got smaller, so `i` remains the same. + // There could also be several duplicate pubkeys. + } else { + i += 1; + } + } + duplicates + } + // Same functionally to upsert, but: // 1. operates on a batch of items // 2. holds the write lock for the duration of adding the items // Can save time when inserting lots of new keys. // But, does NOT update secondary index // This is designed to be called at startup time. - // returns (dirty_pubkeys, insertion_time_us, GenerateIndexCount) + // returns (dirty_pubkeys, insertion_time_us, GenerateIndexResult) #[allow(clippy::needless_collect)] pub(crate) fn insert_new_if_missing_into_primary_index( &self, slot: Slot, - item_len: usize, + approx_items_len: usize, items: impl Iterator, - ) -> (Vec, u64, GenerateIndexCount) { + ) -> (Vec, u64, GenerateIndexResult) { // big enough so not likely to re-allocate, small enough to not over-allocate by too much // this assumes the largest bin contains twice the expected amount of the average size per bin let bins = self.bins(); - let expected_items_per_bin = item_len * 2 / bins; + let expected_items_per_bin = approx_items_len * 2 / bins; let use_disk = self.storage.storage.disk.is_some(); let mut binned = (0..bins) .map(|_| Vec::with_capacity(expected_items_per_bin)) @@ -1627,14 +1660,22 @@ impl + Into> AccountsIndex { // This results in calls to insert_new_entry_if_missing_with_lock from different threads starting at different bins to avoid // lock contention. let random_offset = thread_rng().gen_range(0..bins); + let mut duplicates = Vec::default(); (0..bins).for_each(|pubkey_bin| { let pubkey_bin = (pubkey_bin + random_offset) % bins; - let items = std::mem::take(&mut binned[pubkey_bin]); + let mut items = std::mem::take(&mut binned[pubkey_bin]); if items.is_empty() { return; } + + let these_duplicates = Self::remove_older_duplicate_pubkeys(&mut items); + if let Some(mut these_duplicates) = these_duplicates { + duplicates.append(&mut these_duplicates); + } + let r_account_maps = &self.account_maps[pubkey_bin]; let mut insert_time = Measure::start("insert_into_primary_index"); + // count only considers non-duplicate accounts count += items.len(); if use_disk { r_account_maps.startup_insert_only(items.into_iter()); @@ -1668,7 +1709,10 @@ impl + Into> AccountsIndex { ( dirty_pubkeys, insertion_time.load(Ordering::Relaxed), - GenerateIndexCount { count }, + GenerateIndexResult { + count, + duplicates: (!duplicates.is_empty()).then_some(duplicates), + }, ) } @@ -2101,6 +2145,56 @@ pub mod tests { assert_eq!(num, 0); } + #[test] + fn test_remove_older_duplicate_pubkeys() { + let pk1 = Pubkey::new_from_array([0; 32]); + let pk2 = Pubkey::new_from_array([1; 32]); + let slot0 = 0; + let info2 = 55; + let mut items = vec![]; + let removed = AccountsIndex::::remove_older_duplicate_pubkeys(&mut items); + assert!(items.is_empty()); + assert!(removed.is_none()); + let mut items = vec![(pk1, (slot0, 1u64)), (pk2, (slot0, 2))]; + let expected = items.clone(); + let removed = AccountsIndex::::remove_older_duplicate_pubkeys(&mut items); + assert_eq!(items, expected); + assert!(removed.is_none()); + + for dup in 0..3 { + for other in 0..dup + 2 { + let first_info = 10u64; + let mut items = vec![(pk1, (slot0, first_info))]; + let mut expected_dups = items.clone(); + for i in 0..dup { + let this_dup = (pk1, (slot0, i + 10u64 + 1)); + if i < dup.saturating_sub(1) { + expected_dups.push(this_dup); + } + items.push(this_dup); + } + let mut expected = vec![*items.last().unwrap()]; + let other_item = (pk2, (slot0, info2)); + if other == dup + 1 { + // don't insert + } else if other == dup { + expected.push(other_item); + items.push(other_item); + } else { + expected.push(other_item); + items.insert(other as usize, other_item); + } + let result = AccountsIndex::::remove_older_duplicate_pubkeys(&mut items); + assert_eq!(items, expected); + if dup != 0 { + assert_eq!(result.unwrap(), expected_dups); + } else { + assert!(result.is_none()); + } + } + } + } + #[test] fn test_secondary_index_include_exclude() { let pk1 = Pubkey::new_unique(); @@ -2194,6 +2288,44 @@ pub mod tests { true } } + + #[test] + fn test_insert_duplicates() { + let key = solana_sdk::pubkey::new_rand(); + let pubkey = &key; + let slot = 0; + let mut ancestors = Ancestors::default(); + ancestors.insert(slot, 0); + + let account_info = true; + let index = AccountsIndex::::default_for_tests(); + let account_info2: bool = !account_info; + let items = vec![(*pubkey, account_info), (*pubkey, account_info2)]; + index.set_startup(Startup::Startup); + let (_, _, result) = + index.insert_new_if_missing_into_primary_index(slot, items.len(), items.into_iter()); + assert_eq!(result.count, 1); + index.set_startup(Startup::Normal); + if let AccountIndexGetResult::Found(entry, index) = + // the entry for + index.get_for_tests(pubkey, Some(&ancestors), None) + { + // make sure the one with the correct info is added + assert_eq!(entry.slot_list()[index], (slot, account_info2)); + // make sure it wasn't inserted twice + assert_eq!( + entry + .slot_list() + .iter() + .filter_map(|(entry_slot, _)| (entry_slot == &slot).then_some(true)) + .count(), + 1 + ); + } else { + panic!("failed"); + } + } + #[test] fn test_insert_new_with_lock_no_ancestors() { let key = solana_sdk::pubkey::new_rand(); From 8b8a21a52f51d2a4904075b6ada356281c44c0ca Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Mon, 18 Sep 2023 16:06:24 -0500 Subject: [PATCH 118/407] cleanup feature: enable request heap frame instruction #30076 (#33243) * cleanup feature: enable request heap frame instruction #30076 * update sbf tests * removed out dated comments and test --- accounts-db/src/accounts.rs | 15 +-- cost-model/src/cost_model.rs | 8 -- program-runtime/src/compute_budget.rs | 132 +------------------- programs/sbf/tests/programs.rs | 2 - runtime/src/bank.rs | 2 - runtime/src/bank/tests.rs | 51 +------- runtime/src/transaction_priority_details.rs | 1 - 7 files changed, 12 insertions(+), 199 deletions(-) diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index f570fbdd2ad42a..47b372d981843a 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -252,7 +252,6 @@ impl Accounts { let _process_transaction_result = compute_budget.process_instructions( tx.message().program_instructions_iter(), !feature_set.is_active(&remove_deprecated_request_unit_ix::id()), - true, // don't reject txs that use request heap size ix feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), ); // sanitize against setting size limit to zero @@ -723,7 +722,7 @@ impl Accounts { fee_structure.calculate_fee( tx.message(), lamports_per_signature, - &ComputeBudget::fee_budget_limits(tx.message().program_instructions_iter(), feature_set, Some(self.accounts_db.expected_cluster_type())), + &ComputeBudget::fee_budget_limits(tx.message().program_instructions_iter(), feature_set), feature_set.is_active(&remove_congestion_multiplier_from_fee_calculation::id()), feature_set.is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), ) @@ -1758,11 +1757,7 @@ mod tests { let fee = FeeStructure::default().calculate_fee( &message, lamports_per_signature, - &ComputeBudget::fee_budget_limits( - message.program_instructions_iter(), - &feature_set, - None, - ), + &ComputeBudget::fee_budget_limits(message.program_instructions_iter(), &feature_set), true, false, ); @@ -4327,11 +4322,7 @@ mod tests { let fee = FeeStructure::default().calculate_fee( &message, lamports_per_signature, - &ComputeBudget::fee_budget_limits( - message.program_instructions_iter(), - &feature_set, - None, - ), + &ComputeBudget::fee_budget_limits(message.program_instructions_iter(), &feature_set), true, false, ); diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index e321719e993030..b3ffdad3e6a2a6 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -119,17 +119,9 @@ impl CostModel { // calculate bpf cost based on compute budget instructions let mut compute_budget = ComputeBudget::default(); - // Starting from v1.15, cost model uses compute_budget.set_compute_unit_limit to - // measure bpf_costs (code below), vs earlier versions that use estimated - // bpf instruction costs. The calculated transaction costs are used by leaders - // during block packing, different costs for same transaction due to different versions - // will not impact consensus. So for v1.15+, should call compute budget with - // the feature gate `enable_request_heap_frame_ix` enabled. - let enable_request_heap_frame_ix = true; let result = compute_budget.process_instructions( transaction.message().program_instructions_iter(), !feature_set.is_active(&remove_deprecated_request_unit_ix::id()), - enable_request_heap_frame_ix, feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), ); diff --git a/program-runtime/src/compute_budget.rs b/program-runtime/src/compute_budget.rs index 44fb070b3786ae..a1272cf1707c14 100644 --- a/program-runtime/src/compute_budget.rs +++ b/program-runtime/src/compute_budget.rs @@ -5,11 +5,10 @@ use { compute_budget::{self, ComputeBudgetInstruction}, entrypoint::HEAP_LENGTH as MIN_HEAP_FRAME_BYTES, feature_set::{ - add_set_tx_loaded_accounts_data_size_instruction, enable_request_heap_frame_ix, - remove_deprecated_request_unit_ix, FeatureSet, + add_set_tx_loaded_accounts_data_size_instruction, remove_deprecated_request_unit_ix, + FeatureSet, }, fee::FeeBudgetLimits, - genesis_config::ClusterType, instruction::{CompiledInstruction, InstructionError}, pubkey::Pubkey, transaction::TransactionError, @@ -191,7 +190,6 @@ impl ComputeBudget { &mut self, instructions: impl Iterator, support_request_units_deprecated: bool, - enable_request_heap_frame_ix: bool, support_set_loaded_accounts_data_size_limit_ix: bool, ) -> Result { let mut num_non_compute_budget_instructions: u32 = 0; @@ -260,8 +258,7 @@ impl ComputeBudget { } if let Some((bytes, i)) = requested_heap_size { - if !enable_request_heap_frame_ix - || bytes > MAX_HEAP_FRAME_BYTES + if bytes > MAX_HEAP_FRAME_BYTES || bytes < MIN_HEAP_FRAME_BYTES as u32 || bytes % 1024 != 0 { @@ -293,23 +290,13 @@ impl ComputeBudget { pub fn fee_budget_limits<'a>( instructions: impl Iterator, feature_set: &FeatureSet, - maybe_cluster_type: Option, ) -> FeeBudgetLimits { let mut compute_budget = Self::default(); - // A cluster specific feature gate, when not activated it keeps v1.13 behavior in mainnet-beta; - // once activated for v1.14+, it allows compute_budget::request_heap_frame and - // compute_budget::set_compute_unit_price co-exist in same transaction. - let enable_request_heap_frame_ix = feature_set - .is_active(&enable_request_heap_frame_ix::id()) - || maybe_cluster_type - .and_then(|cluster_type| (cluster_type != ClusterType::MainnetBeta).then_some(0)) - .is_some(); let prioritization_fee_details = compute_budget .process_instructions( instructions, !feature_set.is_active(&remove_deprecated_request_unit_ix::id()), - enable_request_heap_frame_ix, feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), ) .unwrap_or_default(); @@ -369,7 +356,7 @@ mod tests { }; macro_rules! test { - ( $instructions: expr, $expected_result: expr, $expected_budget: expr, $enable_request_heap_frame_ix: expr, $support_set_loaded_accounts_data_size_limit_ix: expr ) => { + ( $instructions: expr, $expected_result: expr, $expected_budget: expr, $support_set_loaded_accounts_data_size_limit_ix: expr ) => { let payer_keypair = Keypair::new(); let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( &[&payer_keypair], @@ -380,20 +367,13 @@ mod tests { let result = compute_budget.process_instructions( tx.message().program_instructions_iter(), false, /*not support request_units_deprecated*/ - $enable_request_heap_frame_ix, $support_set_loaded_accounts_data_size_limit_ix, ); assert_eq!($expected_result, result); assert_eq!(compute_budget, $expected_budget); }; ( $instructions: expr, $expected_result: expr, $expected_budget: expr) => { - test!( - $instructions, - $expected_result, - $expected_budget, - true, - false - ); + test!($instructions, $expected_result, $expected_budget, false); }; } @@ -654,104 +634,8 @@ mod tests { ); } - #[test] - fn test_process_instructions_disable_request_heap_frame() { - // assert empty message results default compute budget and fee - test!( - &[], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: 0, - ..ComputeBudget::default() - }, - false, - false - ); - - // assert requesting heap frame when feature is disable will result instruction error - test!( - &[ - ComputeBudgetInstruction::request_heap_frame(40 * 1024), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData - )), - ComputeBudget::default(), - false, - false - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ], - Err(TransactionError::InstructionError( - 1, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default(), - false, - false - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), - ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), - ], - Err(TransactionError::InstructionError( - 1, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default(), - false, - false - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(1), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), - ], - Err(TransactionError::InstructionError( - 2, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default(), - false, - false - ); - - // assert normal results when not requesting heap frame when the feature is disabled - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64 * 7, - ..ComputeBudget::default() - }, - false, - false - ); - } - #[test] fn test_process_loaded_accounts_data_size_limit_instruction() { - let enable_request_heap_frame_ix: bool = true; - // Assert for empty instructions, change value of support_set_loaded_accounts_data_size_limit_ix // will not change results, which should all be default for support_set_loaded_accounts_data_size_limit_ix in [true, false] { @@ -762,7 +646,6 @@ mod tests { compute_unit_limit: 0, ..ComputeBudget::default() }, - enable_request_heap_frame_ix, support_set_loaded_accounts_data_size_limit_ix ); } @@ -801,7 +684,6 @@ mod tests { ], expected_result, expected_budget, - enable_request_heap_frame_ix, support_set_loaded_accounts_data_size_limit_ix ); } @@ -840,7 +722,6 @@ mod tests { ], expected_result, expected_budget, - enable_request_heap_frame_ix, support_set_loaded_accounts_data_size_limit_ix ); } @@ -868,7 +749,6 @@ mod tests { ),], expected_result, expected_budget, - enable_request_heap_frame_ix, support_set_loaded_accounts_data_size_limit_ix ); } @@ -904,7 +784,6 @@ mod tests { ], expected_result, expected_budget, - enable_request_heap_frame_ix, support_set_loaded_accounts_data_size_limit_ix ); } @@ -929,7 +808,6 @@ mod tests { let result = compute_budget.process_instructions( transaction.message().program_instructions_iter(), false, //not support request_units_deprecated - true, //enable_request_heap_frame_ix, true, //support_set_loaded_accounts_data_size_limit_ix, ); diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index b690ea2ffef434..9bdec77a897f59 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -3836,7 +3836,6 @@ fn test_program_fees() { &ComputeBudget::fee_budget_limits( sanitized_message.program_instructions_iter(), &feature_set, - None, ), true, false, @@ -3864,7 +3863,6 @@ fn test_program_fees() { &ComputeBudget::fee_budget_limits( sanitized_message.program_instructions_iter(), &feature_set, - None, ), true, false, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index def01b9d5bcbf1..543c90350697f1 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4092,7 +4092,6 @@ impl Bank { &ComputeBudget::fee_budget_limits( message.program_instructions_iter(), &self.feature_set, - Some(self.cluster_type()), ), self.feature_set .is_active(&remove_congestion_multiplier_from_fee_calculation::id()), @@ -5179,7 +5178,6 @@ impl Bank { !self .feature_set .is_active(&remove_deprecated_request_unit_ix::id()), - true, // don't reject txs that use request heap size ix self.feature_set .is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), ); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 001494e5594e7a..299ece5fc8998c 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -63,7 +63,7 @@ use { entrypoint::MAX_PERMITTED_DATA_INCREASE, epoch_schedule::{EpochSchedule, MINIMUM_SLOTS_PER_EPOCH}, feature::{self, Feature}, - feature_set::{self, enable_request_heap_frame_ix, FeatureSet}, + feature_set::{self, FeatureSet}, fee::FeeStructure, fee_calculator::FeeRateGovernor, genesis_config::{create_genesis_config, ClusterType, GenesisConfig}, @@ -2829,7 +2829,6 @@ fn test_bank_tx_compute_unit_fee() { &FeeStructure::default(), false, true, - true, ); let (expected_fee_collected, expected_fee_burned) = @@ -3011,7 +3010,6 @@ fn test_bank_blockhash_compute_unit_fee_structure() { &FeeStructure::default(), false, true, - true, ); assert_eq!( bank.get_balance(&mint_keypair.pubkey()), @@ -3030,7 +3028,6 @@ fn test_bank_blockhash_compute_unit_fee_structure() { &FeeStructure::default(), false, true, - true, ); assert_eq!( bank.get_balance(&mint_keypair.pubkey()), @@ -3144,7 +3141,6 @@ fn test_filter_program_errors_and_collect_compute_unit_fee() { &FeeStructure::default(), false, true, - true, ) * 2 ) .0 @@ -10074,7 +10070,6 @@ fn calculate_test_fee( lamports_per_signature: u64, fee_structure: &FeeStructure, support_set_accounts_data_size_limit_ix: bool, - enable_request_heap_frame_ix: bool, remove_congestion_multiplier: bool, ) -> u64 { let mut feature_set = FeatureSet::all_enabled(); @@ -10084,12 +10079,8 @@ fn calculate_test_fee( feature_set.deactivate(&include_loaded_accounts_data_size_in_fee_calculation::id()); } - if !enable_request_heap_frame_ix { - feature_set.deactivate(&enable_request_heap_frame_ix::id()); - } - let budget_limits = - ComputeBudget::fee_budget_limits(message.program_instructions_iter(), &feature_set, None); + ComputeBudget::fee_budget_limits(message.program_instructions_iter(), &feature_set); fee_structure.calculate_fee( message, lamports_per_signature, @@ -10115,7 +10106,6 @@ fn test_calculate_fee() { }, support_set_accounts_data_size_limit_ix, true, - true, ), 0 ); @@ -10133,7 +10123,6 @@ fn test_calculate_fee() { }, support_set_accounts_data_size_limit_ix, true, - true, ), 1 ); @@ -10156,7 +10145,6 @@ fn test_calculate_fee() { }, support_set_accounts_data_size_limit_ix, true, - true, ), 4 ); @@ -10184,7 +10172,6 @@ fn test_calculate_fee_compute_units() { &fee_structure, support_set_accounts_data_size_limit_ix, true, - true, ), max_fee + lamports_per_signature ); @@ -10204,7 +10191,6 @@ fn test_calculate_fee_compute_units() { &fee_structure, support_set_accounts_data_size_limit_ix, true, - true, ), max_fee + 3 * lamports_per_signature ); @@ -10246,7 +10232,6 @@ fn test_calculate_fee_compute_units() { &fee_structure, support_set_accounts_data_size_limit_ix, true, - true, ); assert_eq!( fee, @@ -10286,7 +10271,6 @@ fn test_calculate_prioritization_fee() { &fee_structure, true, true, - true, ); assert_eq!( fee, @@ -10332,7 +10316,6 @@ fn test_calculate_fee_secp256k1() { &fee_structure, support_set_accounts_data_size_limit_ix, true, - true, ), 2 ); @@ -10353,7 +10336,6 @@ fn test_calculate_fee_secp256k1() { &fee_structure, support_set_accounts_data_size_limit_ix, true, - true, ), 11 ); @@ -11994,7 +11976,6 @@ fn test_calculate_fee_with_congestion_multiplier() { cheap_lamports_per_signature, &fee_structure, true, - true, remove_congestion_multiplier, ), signature_fee * signature_count @@ -12016,7 +11997,6 @@ fn test_calculate_fee_with_congestion_multiplier() { expensive_lamports_per_signature, &fee_structure, true, - true, remove_congestion_multiplier, ), signature_fee * signature_count / denominator @@ -12047,35 +12027,12 @@ fn test_calculate_fee_with_request_heap_frame_flag() { )) .unwrap(); - // assert when enable_request_heap_frame_ix is enabled, prioritization fee will be counted + // assert when request_heap_frame is presented in tx, prioritization fee will be counted // into transaction fee - let mut enable_request_heap_frame_ix = true; assert_eq!( - calculate_test_fee( - &message, - lamports_per_signature, - &fee_structure, - true, - enable_request_heap_frame_ix, - true, - ), + calculate_test_fee(&message, lamports_per_signature, &fee_structure, true, true,), signature_fee + request_cu * lamports_per_cu ); - - // assert when enable_request_heap_frame_ix is disabled (an v1.13 behavior), prioritization fee will not be counted - // into transaction fee - enable_request_heap_frame_ix = false; - assert_eq!( - calculate_test_fee( - &message, - lamports_per_signature, - &fee_structure, - true, - enable_request_heap_frame_ix, - true, - ), - signature_fee - ); } #[test] diff --git a/runtime/src/transaction_priority_details.rs b/runtime/src/transaction_priority_details.rs index 08cbefd3280b93..0d0a94df4ed393 100644 --- a/runtime/src/transaction_priority_details.rs +++ b/runtime/src/transaction_priority_details.rs @@ -28,7 +28,6 @@ pub trait GetTransactionPriorityDetails { .process_instructions( instructions, true, // supports prioritization by request_units_deprecated instruction - true, // enable request heap frame instruction true, // enable support set accounts data size instruction // TODO: round_compute_unit_price_enabled: bool ) From c85eb733003128f4dd35246333073181b9e77ee9 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Mon, 18 Sep 2023 17:55:28 -0500 Subject: [PATCH 119/407] O(n) dedup (#33297) O(N) dedup Co-authored-by: jeff washington --- accounts-db/src/accounts_index.rs | 34 ++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 0b41948c79a7cf..f1c0b4e90972fc 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -1601,21 +1601,34 @@ impl + Into> AccountsIndex { // Earlier entries are overwritten by later entries items.sort_by(|a, b| a.0.cmp(&b.0)); let mut duplicates = None::>; - let mut i = 0; - while i < items.len().saturating_sub(1) { - let this_key = &items[i].0; - // look at next entry. If it is same pubkey as this one, then remove this one. - if this_key == &items[i + 1].0 { + + // Iterate the items vec from the end to the beginning. Adjacent duplicated items will be + // written to the front of the vec. + let n = items.len(); + let mut last_key = items[n - 1].0; + let mut write = n - 1; + let mut curr = write; + + while curr > 0 { + let curr_item = items[curr - 1]; + + if curr_item.0 == last_key { let mut duplicates_insert = duplicates.unwrap_or_default(); - // i+1 is same pubkey as i, so remove i - duplicates_insert.push(items.remove(i)); + duplicates_insert.push(curr_item); duplicates = Some(duplicates_insert); - // `items` got smaller, so `i` remains the same. - // There could also be several duplicate pubkeys. + curr -= 1; } else { - i += 1; + if curr < write { + items[write - 1] = curr_item; + } + curr -= 1; + write -= 1; + last_key = curr_item.0; } } + + items.drain(..(write - curr)); + duplicates } @@ -2187,6 +2200,7 @@ pub mod tests { let result = AccountsIndex::::remove_older_duplicate_pubkeys(&mut items); assert_eq!(items, expected); if dup != 0 { + expected_dups.reverse(); assert_eq!(result.unwrap(), expected_dups); } else { assert!(result.is_none()); From bc2b37276284234f431e7e39f1fb614cd56fcb50 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 18 Sep 2023 19:11:28 -0400 Subject: [PATCH 120/407] Adds DCOU to verify_snapshot_archive() (#33298) --- core/Cargo.toml | 1 + runtime/src/serde_snapshot.rs | 1 + runtime/src/snapshot_utils.rs | 20 +++++++++++++------- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/core/Cargo.toml b/core/Cargo.toml index df083bdf0508c2..f29ed64031afbe 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -88,6 +88,7 @@ serial_test = { workspace = true } solana-core = { path = ".", features = ["dev-context-only-utils"] } solana-logger = { workspace = true } solana-program-runtime = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } solana-stake-program = { workspace = true } static_assertions = { workspace = true } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 1aa71131fca802..078da133979f64 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -266,6 +266,7 @@ where /// used by tests to compare contents of serialized bank fields /// serialized format is not deterministic - likely due to randomness in structs like hashmaps +#[cfg(feature = "dev-context-only-utils")] pub(crate) fn compare_two_serialized_banks( path1: impl AsRef, path2: impl AsRef, diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 9d8e8026255d7e..74c9b2421f4c99 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -16,7 +16,6 @@ use { fs_err, lazy_static::lazy_static, log::*, - rayon::prelude::*, regex::Regex, solana_accounts_db::{ account_storage::AccountStorageMap, @@ -25,10 +24,7 @@ use { }, accounts_file::AccountsFileError, append_vec::AppendVec, - hardened_unpack::{ - streaming_unpack_snapshot, unpack_snapshot, ParallelSelector, UnpackError, - UnpackedAppendVecMap, - }, + hardened_unpack::{self, ParallelSelector, UnpackError}, shared_buffer_reader::{SharedBuffer, SharedBufferReader}, }, solana_measure::{measure, measure::Measure}, @@ -49,6 +45,8 @@ use { tempfile::TempDir, thiserror::Error, }; +#[cfg(feature = "dev-context-only-utils")] +use {hardened_unpack::UnpackedAppendVecMap, rayon::prelude::*}; mod archive_format; pub mod snapshot_storage_rebuilder; @@ -1293,7 +1291,7 @@ fn spawn_unpack_snapshot_thread( Builder::new() .name(format!("solUnpkSnpsht{thread_index:02}")) .spawn(move || { - streaming_unpack_snapshot( + hardened_unpack::streaming_unpack_snapshot( &mut archive, ledger_dir.as_path(), &account_paths, @@ -1872,6 +1870,7 @@ pub fn purge_old_snapshot_archives( } } +#[cfg(feature = "dev-context-only-utils")] fn unpack_snapshot_local( shared_buffer: SharedBuffer, ledger_dir: &Path, @@ -1895,7 +1894,12 @@ fn unpack_snapshot_local( divisions: parallel_divisions, }); let mut archive = Archive::new(reader); - unpack_snapshot(&mut archive, ledger_dir, account_paths, parallel_selector) + hardened_unpack::unpack_snapshot( + &mut archive, + ledger_dir, + account_paths, + parallel_selector, + ) }) .collect::>(); @@ -1925,6 +1929,7 @@ fn untar_snapshot_create_shared_buffer( } } +#[cfg(feature = "dev-context-only-utils")] fn untar_snapshot_in( snapshot_tar: impl AsRef, unpack_dir: &Path, @@ -1983,6 +1988,7 @@ pub enum VerifyBank { NonDeterministic, } +#[cfg(feature = "dev-context-only-utils")] pub fn verify_snapshot_archive( snapshot_archive: impl AsRef, snapshots_to_verify: impl AsRef, From 9970bfcf97d7282933664ef246ed829039e7e81f Mon Sep 17 00:00:00 2001 From: Jimii <30603522+jim4067@users.noreply.github.com> Date: Tue, 19 Sep 2023 12:30:18 +0300 Subject: [PATCH 121/407] docs: add solana stack exchange site (#33280) --- docs/src/developing/on-chain-programs/faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/developing/on-chain-programs/faq.md b/docs/src/developing/on-chain-programs/faq.md index f58b7d01224286..de255742309bb1 100644 --- a/docs/src/developing/on-chain-programs/faq.md +++ b/docs/src/developing/on-chain-programs/faq.md @@ -6,7 +6,7 @@ When writing or interacting with Solana programs, there are common questions or challenges that often come up. Below are resources to help answer these questions. -If not addressed here, ask on [StackOverflow](https://stackoverflow.com/questions/tagged/solana) with the `solana` tag +If not addressed here, ask on the [Solana Stack Exchange](https://solana.stackexchange.com/) or [StackOverflow](https://stackoverflow.com/questions/tagged/solana) with the `solana` tag ## Limitations From bc38ef27d80117f73e2c20798d73384c3c289e22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Tue, 19 Sep 2023 16:29:52 +0200 Subject: [PATCH 122/407] Feature - Enable Program-Runtime-v2 and Loader-v4 (#33294) * Adds a new feature `enable_program_runtime_v2_and_loader_v4`. * Adds a feature gated builtin entry for the solana_loader_v4_program. --- runtime/src/builtins.rs | 6 ++++++ sdk/src/feature_set.rs | 5 +++++ 2 files changed, 11 insertions(+) diff --git a/runtime/src/builtins.rs b/runtime/src/builtins.rs index 5a21424cc35a81..06a1709335b1db 100644 --- a/runtime/src/builtins.rs +++ b/runtime/src/builtins.rs @@ -101,4 +101,10 @@ pub static BUILTINS: &[BuiltinPrototype] = &[ name: "zk_token_proof_program", entrypoint: solana_zk_token_proof_program::process_instruction, }, + BuiltinPrototype { + feature_id: Some(feature_set::enable_program_runtime_v2_and_loader_v4::id()), + program_id: solana_sdk::loader_v4::id(), + name: "loader_v4", + entrypoint: solana_loader_v4_program::process_instruction, + }, ]; diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 9a52f20dd25083..95ea3f3b6cccb1 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -685,6 +685,10 @@ pub mod remaining_compute_units_syscall_enabled { solana_sdk::declare_id!("5TuppMutoyzhUSfuYdhgzD47F92GL1g89KpCZQKqedxP"); } +pub mod enable_program_runtime_v2_and_loader_v4 { + solana_sdk::declare_id!("8oBxsYqnCvUTGzgEpxPcnVf7MLbWWPYddE33PftFeBBd"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -851,6 +855,7 @@ lazy_static! { (enable_poseidon_syscall::id(), "Enable Poseidon syscall"), (timely_vote_credits::id(), "use timeliness of votes in determining credits to award"), (remaining_compute_units_syscall_enabled::id(), "enable the remaining_compute_units syscall"), + (enable_program_runtime_v2_and_loader_v4::id(), "Enable Program-Runtime-v2 and Loader-v4 #33293"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() From 62888d297a4e7027aa0bf218d921287143922dee Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Tue, 19 Sep 2023 08:27:35 -0700 Subject: [PATCH 123/407] Implement CLI front-end for loader-v4 commands (#33289) * Implement CLI front-end for loader-v4 commands * remove dead_code clippy overrides * Add unit tests --- cli/src/clap_app.rs | 4 +- cli/src/cli.rs | 11 +- cli/src/program_v4.rs | 605 +++++++++++++++++++++++++++++++++++++++++- 3 files changed, 607 insertions(+), 13 deletions(-) diff --git a/cli/src/clap_app.rs b/cli/src/clap_app.rs index 0c4b14f97b2b50..74d9b998badbf8 100644 --- a/cli/src/clap_app.rs +++ b/cli/src/clap_app.rs @@ -1,7 +1,8 @@ use { crate::{ address_lookup_table::AddressLookupTableSubCommands, cli::*, cluster_query::*, feature::*, - inflation::*, nonce::*, program::*, stake::*, validator_info::*, vote::*, wallet::*, + inflation::*, nonce::*, program::*, program_v4::ProgramV4SubCommands, stake::*, + validator_info::*, vote::*, wallet::*, }, clap::{App, AppSettings, Arg, ArgGroup, SubCommand}, solana_clap_utils::{self, hidden_unless_forced, input_validators::*, keypair::*}, @@ -143,6 +144,7 @@ pub fn get_clap_app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> A .inflation_subcommands() .nonce_subcommands() .program_subcommands() + .program_v4_subcommands() .address_lookup_table_subcommands() .stake_subcommands() .validator_info_subcommands() diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 9db77dbc7eb886..e6960c3fa3599a 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -1,7 +1,7 @@ use { crate::{ address_lookup_table::*, clap_app::*, cluster_query::*, feature::*, inflation::*, nonce::*, - program::*, spend_utils::*, stake::*, validator_info::*, vote::*, wallet::*, + program::*, program_v4::*, spend_utils::*, stake::*, validator_info::*, vote::*, wallet::*, }, clap::{crate_description, crate_name, value_t_or_exit, ArgMatches, Shell}, log::*, @@ -175,6 +175,7 @@ pub enum CliCommand { // Program Deployment Deploy, Program(ProgramCliCommand), + ProgramV4(ProgramV4CliCommand), // Stake Commands CreateStakeAccount { stake_account: SignerIndex, @@ -687,6 +688,9 @@ pub fn parse_command( ("program", Some(matches)) => { parse_program_subcommand(matches, default_signer, wallet_manager) } + ("program-v4", Some(matches)) => { + parse_program_v4_subcommand(matches, default_signer, wallet_manager) + } ("address-lookup-table", Some(matches)) => { parse_address_lookup_table_subcommand(matches, default_signer, wallet_manager) } @@ -1103,6 +1107,11 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { process_program_subcommand(rpc_client, config, program_subcommand) } + // Deploy a custom program v4 to the chain + CliCommand::ProgramV4(program_subcommand) => { + process_program_v4_subcommand(rpc_client, config, program_subcommand) + } + // Stake Commands // Create stake account diff --git a/cli/src/program_v4.rs b/cli/src/program_v4.rs index bb1a844da2771a..07b82636bf0871 100644 --- a/cli/src/program_v4.rs +++ b/cli/src/program_v4.rs @@ -1,10 +1,19 @@ use { crate::{ checks::*, - cli::{log_instruction_custom_error, CliConfig, ProcessResult}, + cli::{ + log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, + ProcessResult, + }, program::calculate_max_chunk_size, }, + clap::{App, AppSettings, Arg, ArgMatches, SubCommand}, log::*, + solana_clap_utils::{ + input_parsers::{pubkey_of, pubkey_of_signer, signer_of}, + input_validators::is_valid_signer, + keypair::{DefaultSigner, SignerIndex}, + }, solana_cli_output::CliProgramId, solana_client::{ connection_cache::ConnectionCache, @@ -13,6 +22,9 @@ use { }, tpu_client::{TpuClient, TpuClientConfig}, }, + solana_program_runtime::{compute_budget::ComputeBudget, invoke_context::InvokeContext}, + solana_rbpf::{elf::Executable, verifier::RequisiteVerifier}, + solana_remote_wallet::remote_wallet::RemoteWalletManager, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_api::config::RpcSendTransactionConfig, solana_sdk::{ @@ -29,9 +41,364 @@ use { system_instruction::{self, SystemError}, transaction::Transaction, }, - std::{cmp::Ordering, sync::Arc}, + std::{cmp::Ordering, fs::File, io::Read, rc::Rc, sync::Arc}, }; +#[derive(Debug, PartialEq, Eq)] +pub enum ProgramV4CliCommand { + Deploy { + program_location: String, + program_signer_index: SignerIndex, + authority_signer_index: SignerIndex, + }, + Redeploy { + program_location: String, + program_address: Pubkey, + buffer_signer_index: Option, + authority_signer_index: SignerIndex, + }, + Undeploy { + program_address: Pubkey, + authority_signer_index: SignerIndex, + }, + Finalize { + program_address: Pubkey, + authority_signer_index: SignerIndex, + }, +} + +pub trait ProgramV4SubCommands { + fn program_v4_subcommands(self) -> Self; +} + +impl ProgramV4SubCommands for App<'_, '_> { + fn program_v4_subcommands(self) -> Self { + self.subcommand( + SubCommand::with_name("program-v4") + .about("Program V4 management") + .setting(AppSettings::SubcommandRequiredElseHelp) + .subcommand( + SubCommand::with_name("deploy") + .about("Deploy a program") + .arg( + Arg::with_name("program_location") + .index(1) + .value_name("PROGRAM_FILEPATH") + .takes_value(true) + .help("/path/to/program.so"), + ) + .arg( + Arg::with_name("program") + .long("program") + .value_name("PROGRAM_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Program account signer. The program data is written to the associated account.") + ) + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Program authority [default: the default configured keypair]") + ), + ) + .subcommand( + SubCommand::with_name("redeploy") + .about("Redeploy a previously deployed program") + .arg( + Arg::with_name("program_location") + .index(1) + .value_name("PROGRAM_FILEPATH") + .takes_value(true) + .help("/path/to/program.so"), + ) + .arg( + Arg::with_name("program-id") + .long("program-id") + .value_name("PROGRAM_ID") + .takes_value(true) + .help("Executable program's address") + ) + .arg( + Arg::with_name("buffer") + .long("buffer") + .value_name("BUFFER_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Optional intermediate buffer account to write data to, which can be used to resume a failed deploy") + ) + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Program authority [default: the default configured keypair]") + ), + ) + .subcommand( + SubCommand::with_name("undeploy") + .about("Undeploy/close a program") + .arg( + Arg::with_name("program-id") + .long("program-id") + .value_name("PROGRAM_ID") + .takes_value(true) + .help("Executable program's address") + ) + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Program authority [default: the default configured keypair]") + ), + ) + .subcommand( + SubCommand::with_name("finalize") + .about("Finalize a program to make it immutable") + .arg( + Arg::with_name("program-id") + .long("program-id") + .value_name("PROGRAM_ID") + .takes_value(true) + .help("Executable program's address") + ) + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Program authority [default: the default configured keypair]") + ), + ) + ) + } +} + +pub fn parse_program_v4_subcommand( + matches: &ArgMatches<'_>, + default_signer: &DefaultSigner, + wallet_manager: &mut Option>, +) -> Result { + let (subcommand, sub_matches) = matches.subcommand(); + let response = match (subcommand, sub_matches) { + ("deploy", Some(matches)) => { + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let program_location = matches + .value_of("program_location") + .map(|location| location.to_string()); + + let program_pubkey = if let Ok((program_signer, Some(program_pubkey))) = + signer_of(matches, "program", wallet_manager) + { + bulk_signers.push(program_signer); + Some(program_pubkey) + } else { + pubkey_of_signer(matches, "program", wallet_manager)? + }; + + let (authority, authority_pubkey) = signer_of(matches, "authority", wallet_manager)?; + bulk_signers.push(authority); + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::ProgramV4(ProgramV4CliCommand::Deploy { + program_location: program_location.expect("Program location is missing"), + program_signer_index: signer_info + .index_of(program_pubkey) + .expect("Program signer is missing"), + authority_signer_index: signer_info + .index_of(authority_pubkey) + .expect("Authority signer is missing"), + }), + signers: signer_info.signers, + } + } + ("redeploy", Some(matches)) => { + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let program_location = matches + .value_of("program_location") + .map(|location| location.to_string()); + + let buffer_pubkey = if let Ok((buffer_signer, Some(buffer_pubkey))) = + signer_of(matches, "buffer", wallet_manager) + { + bulk_signers.push(buffer_signer); + Some(buffer_pubkey) + } else { + pubkey_of_signer(matches, "buffer", wallet_manager)? + }; + + let (authority, authority_pubkey) = signer_of(matches, "authority", wallet_manager)?; + bulk_signers.push(authority); + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::ProgramV4(ProgramV4CliCommand::Redeploy { + program_location: program_location.expect("Program location is missing"), + program_address: pubkey_of(matches, "program-id") + .expect("Program address is missing"), + buffer_signer_index: signer_info.index_of_or_none(buffer_pubkey), + authority_signer_index: signer_info + .index_of(authority_pubkey) + .expect("Authority signer is missing"), + }), + signers: signer_info.signers, + } + } + ("undeploy", Some(matches)) => { + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let (authority, authority_pubkey) = signer_of(matches, "authority", wallet_manager)?; + bulk_signers.push(authority); + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::ProgramV4(ProgramV4CliCommand::Undeploy { + program_address: pubkey_of(matches, "program-id") + .expect("Program address is missing"), + authority_signer_index: signer_info + .index_of(authority_pubkey) + .expect("Authority signer is missing"), + }), + signers: signer_info.signers, + } + } + ("finalize", Some(matches)) => { + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let (authority, authority_pubkey) = signer_of(matches, "authority", wallet_manager)?; + bulk_signers.push(authority); + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::ProgramV4(ProgramV4CliCommand::Finalize { + program_address: pubkey_of(matches, "program-id") + .expect("Program address is missing"), + authority_signer_index: signer_info + .index_of(authority_pubkey) + .expect("Authority signer is missing"), + }), + signers: signer_info.signers, + } + } + _ => unreachable!(), + }; + Ok(response) +} + +fn read_and_verify_elf(program_location: &str) -> Result, Box> { + let mut file = File::open(program_location) + .map_err(|err| format!("Unable to open program file: {err}"))?; + let mut program_data = Vec::new(); + file.read_to_end(&mut program_data) + .map_err(|err| format!("Unable to read program file: {err}"))?; + + // Verify the program + let program_runtime_environment = + solana_loader_v4_program::create_program_runtime_environment_v2( + &ComputeBudget::default(), + false, + ); + let executable = + Executable::::from_elf(&program_data, Arc::new(program_runtime_environment)) + .map_err(|err| format!("ELF error: {err}"))?; + + executable + .verify::() + .map_err(|err| format!("ELF error: {err}"))?; + + Ok(program_data) +} + +pub fn process_program_v4_subcommand( + rpc_client: Arc, + config: &CliConfig, + program_subcommand: &ProgramV4CliCommand, +) -> ProcessResult { + match program_subcommand { + ProgramV4CliCommand::Deploy { + program_location, + program_signer_index, + authority_signer_index, + } => { + let program_data = read_and_verify_elf(program_location)?; + let program_len = program_data.len() as u32; + + process_deploy_program( + rpc_client, + config, + &program_data, + program_len, + &config.signers[*program_signer_index].pubkey(), + Some(config.signers[*program_signer_index]), + config.signers[*authority_signer_index], + ) + } + ProgramV4CliCommand::Redeploy { + program_location, + program_address, + buffer_signer_index, + authority_signer_index, + } => { + let program_data = read_and_verify_elf(program_location)?; + let program_len = program_data.len() as u32; + let buffer_signer = buffer_signer_index.map(|index| config.signers[index]); + + process_deploy_program( + rpc_client, + config, + &program_data, + program_len, + program_address, + buffer_signer, + config.signers[*authority_signer_index], + ) + } + ProgramV4CliCommand::Undeploy { + program_address, + authority_signer_index, + } => process_undeploy_program( + rpc_client, + config, + program_address, + config.signers[*authority_signer_index], + ), + ProgramV4CliCommand::Finalize { + program_address, + authority_signer_index, + } => process_finalize_program( + rpc_client, + config, + program_address, + config.signers[*authority_signer_index], + ), + } +} + // This function can be used for the following use-cases // * Deploy a program // - buffer_signer argument must contain program signer information @@ -41,7 +408,6 @@ use { // * Redeploy a program using a buffer account // - buffer_signer argument must contain the temporary buffer account information // (program_address must contain program ID and must NOT be same as buffer_signer.pubkey()) -#[allow(dead_code)] fn process_deploy_program( rpc_client: Arc, config: &CliConfig, @@ -142,7 +508,6 @@ fn process_deploy_program( Ok(config.output_format.formatted_string(&program_id)) } -#[allow(dead_code)] fn process_undeploy_program( rpc_client: Arc, config: &CliConfig, @@ -206,7 +571,6 @@ fn process_undeploy_program( Ok(config.output_format.formatted_string(&program_id)) } -#[allow(dead_code)] fn process_finalize_program( rpc_client: Arc, config: &CliConfig, @@ -243,7 +607,6 @@ fn process_finalize_program( Ok(config.output_format.formatted_string(&program_id)) } -#[allow(dead_code)] fn check_payer( rpc_client: &RpcClient, config: &CliConfig, @@ -275,7 +638,6 @@ fn check_payer( Ok(()) } -#[allow(dead_code)] fn send_messages( rpc_client: Arc, config: &CliConfig, @@ -394,7 +756,6 @@ fn send_messages( Ok(()) } -#[allow(dead_code)] fn build_create_buffer_message( rpc_client: Arc, config: &CliConfig, @@ -563,7 +924,6 @@ fn build_retract_and_deploy_messages( Ok(messages) } -#[allow(dead_code)] fn build_retract_instruction( account: &Account, buffer_address: &Pubkey, @@ -595,7 +955,6 @@ fn build_retract_instruction( } } -#[allow(dead_code)] fn build_truncate_instructions( rpc_client: Arc, payer: &Pubkey, @@ -677,12 +1036,15 @@ fn build_truncate_instructions( mod tests { use { super::*, + crate::{clap_app::get_clap_app, cli::parse_command}, serde_json::json, solana_rpc_client_api::{ request::RpcRequest, response::{Response, RpcResponseContext}, }, - solana_sdk::signature::keypair_from_seed, + solana_sdk::signature::{ + keypair_from_seed, read_keypair_file, write_keypair_file, Keypair, + }, std::collections::HashMap, }; @@ -1001,4 +1363,225 @@ mod tests { ) .is_ok()); } + + fn make_tmp_path(name: &str) -> String { + let out_dir = std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()); + let keypair = Keypair::new(); + + let path = format!("{}/tmp/{}-{}", out_dir, name, keypair.pubkey()); + + // whack any possible collision + let _ignored = std::fs::remove_dir_all(&path); + // whack any possible collision + let _ignored = std::fs::remove_file(&path); + + path + } + + #[test] + #[allow(clippy::cognitive_complexity)] + fn test_cli_parse_deploy() { + let test_commands = get_clap_app("test", "desc", "version"); + + let default_keypair = Keypair::new(); + let keypair_file = make_tmp_path("keypair_file"); + write_keypair_file(&default_keypair, &keypair_file).unwrap(); + let default_signer = DefaultSigner::new("", &keypair_file); + + let program_keypair = Keypair::new(); + let program_keypair_file = make_tmp_path("program_keypair_file"); + write_keypair_file(&program_keypair, &program_keypair_file).unwrap(); + + let authority_keypair = Keypair::new(); + let authority_keypair_file = make_tmp_path("authority_keypair_file"); + write_keypair_file(&authority_keypair, &authority_keypair_file).unwrap(); + + let test_command = test_commands.clone().get_matches_from(vec![ + "test", + "program-v4", + "deploy", + "/Users/test/program.so", + "--program", + &program_keypair_file, + "--authority", + &authority_keypair_file, + ]); + assert_eq!( + parse_command(&test_command, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::ProgramV4(ProgramV4CliCommand::Deploy { + program_location: "/Users/test/program.so".to_string(), + program_signer_index: 1, + authority_signer_index: 2, + }), + signers: vec![ + read_keypair_file(&keypair_file).unwrap().into(), + read_keypair_file(&program_keypair_file).unwrap().into(), + read_keypair_file(&authority_keypair_file).unwrap().into() + ], + } + ); + } + + #[test] + #[allow(clippy::cognitive_complexity)] + fn test_cli_parse_redeploy() { + let test_commands = get_clap_app("test", "desc", "version"); + + let default_keypair = Keypair::new(); + let keypair_file = make_tmp_path("keypair_file"); + write_keypair_file(&default_keypair, &keypair_file).unwrap(); + let default_signer = DefaultSigner::new("", &keypair_file); + + let program_keypair = Keypair::new(); + let program_keypair_file = make_tmp_path("program_keypair_file"); + write_keypair_file(&program_keypair, &program_keypair_file).unwrap(); + + let authority_keypair = Keypair::new(); + let authority_keypair_file = make_tmp_path("authority_keypair_file"); + write_keypair_file(&authority_keypair, &authority_keypair_file).unwrap(); + + let test_command = test_commands.clone().get_matches_from(vec![ + "test", + "program-v4", + "redeploy", + "/Users/test/program.so", + "--program-id", + &program_keypair_file, + "--authority", + &authority_keypair_file, + ]); + assert_eq!( + parse_command(&test_command, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::ProgramV4(ProgramV4CliCommand::Redeploy { + program_location: "/Users/test/program.so".to_string(), + program_address: program_keypair.pubkey(), + authority_signer_index: 1, + buffer_signer_index: None, + }), + signers: vec![ + read_keypair_file(&keypair_file).unwrap().into(), + read_keypair_file(&authority_keypair_file).unwrap().into() + ], + } + ); + + let buffer_keypair = Keypair::new(); + let buffer_keypair_file = make_tmp_path("buffer_keypair_file"); + write_keypair_file(&buffer_keypair, &buffer_keypair_file).unwrap(); + + let test_command = test_commands.clone().get_matches_from(vec![ + "test", + "program-v4", + "redeploy", + "/Users/test/program.so", + "--program-id", + &program_keypair_file, + "--buffer", + &buffer_keypair_file, + "--authority", + &authority_keypair_file, + ]); + assert_eq!( + parse_command(&test_command, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::ProgramV4(ProgramV4CliCommand::Redeploy { + program_location: "/Users/test/program.so".to_string(), + program_address: program_keypair.pubkey(), + buffer_signer_index: Some(1), + authority_signer_index: 2, + }), + signers: vec![ + read_keypair_file(&keypair_file).unwrap().into(), + read_keypair_file(&buffer_keypair_file).unwrap().into(), + read_keypair_file(&authority_keypair_file).unwrap().into() + ], + } + ); + } + + #[test] + #[allow(clippy::cognitive_complexity)] + fn test_cli_parse_undeploy() { + let test_commands = get_clap_app("test", "desc", "version"); + + let default_keypair = Keypair::new(); + let keypair_file = make_tmp_path("keypair_file"); + write_keypair_file(&default_keypair, &keypair_file).unwrap(); + let default_signer = DefaultSigner::new("", &keypair_file); + + let program_keypair = Keypair::new(); + let program_keypair_file = make_tmp_path("program_keypair_file"); + write_keypair_file(&program_keypair, &program_keypair_file).unwrap(); + + let authority_keypair = Keypair::new(); + let authority_keypair_file = make_tmp_path("authority_keypair_file"); + write_keypair_file(&authority_keypair, &authority_keypair_file).unwrap(); + + let test_command = test_commands.clone().get_matches_from(vec![ + "test", + "program-v4", + "undeploy", + "--program-id", + &program_keypair_file, + "--authority", + &authority_keypair_file, + ]); + assert_eq!( + parse_command(&test_command, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::ProgramV4(ProgramV4CliCommand::Undeploy { + program_address: program_keypair.pubkey(), + authority_signer_index: 1, + }), + signers: vec![ + read_keypair_file(&keypair_file).unwrap().into(), + read_keypair_file(&authority_keypair_file).unwrap().into() + ], + } + ); + } + + #[test] + #[allow(clippy::cognitive_complexity)] + fn test_cli_parse_finalize() { + let test_commands = get_clap_app("test", "desc", "version"); + + let default_keypair = Keypair::new(); + let keypair_file = make_tmp_path("keypair_file"); + write_keypair_file(&default_keypair, &keypair_file).unwrap(); + let default_signer = DefaultSigner::new("", &keypair_file); + + let program_keypair = Keypair::new(); + let program_keypair_file = make_tmp_path("program_keypair_file"); + write_keypair_file(&program_keypair, &program_keypair_file).unwrap(); + + let authority_keypair = Keypair::new(); + let authority_keypair_file = make_tmp_path("authority_keypair_file"); + write_keypair_file(&authority_keypair, &authority_keypair_file).unwrap(); + + let test_command = test_commands.clone().get_matches_from(vec![ + "test", + "program-v4", + "finalize", + "--program-id", + &program_keypair_file, + "--authority", + &authority_keypair_file, + ]); + assert_eq!( + parse_command(&test_command, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::ProgramV4(ProgramV4CliCommand::Finalize { + program_address: program_keypair.pubkey(), + authority_signer_index: 1, + }), + signers: vec![ + read_keypair_file(&keypair_file).unwrap().into(), + read_keypair_file(&authority_keypair_file).unwrap().into() + ], + } + ); + } } From 92ab3827fd2fcd8709b2b71dbc9351f294efe34a Mon Sep 17 00:00:00 2001 From: Kevin Heavey <24635973+kevinheavey@users.noreply.github.com> Date: Tue, 19 Sep 2023 20:07:35 +0400 Subject: [PATCH 124/407] program: move array-bytes to dev-dependencies (#33277) * program: move array-bytes to dev-dependencies * run cargo-for-all-lock-files.sh tree --- programs/sbf/Cargo.lock | 1 - sdk/program/Cargo.toml | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 63ed6c10914881..8d34c0206afd52 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5235,7 +5235,6 @@ dependencies = [ "ark-ec", "ark-ff", "ark-serialize", - "array-bytes", "base64 0.21.4", "bincode", "bitflags 2.3.3", diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 9d3583faa8d280..8d178ec86f525f 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -51,7 +51,6 @@ ark-bn254 = { workspace = true } ark-ec = { workspace = true } ark-ff = { workspace = true } ark-serialize = { workspace = true } -array-bytes = { workspace = true } bitflags = { workspace = true } base64 = { workspace = true, features = ["alloc", "std"] } curve25519-dalek = { workspace = true, features = ["serde"] } @@ -79,6 +78,7 @@ parking_lot = { workspace = true } [dev-dependencies] anyhow = { workspace = true } +array-bytes = { workspace = true } assert_matches = { workspace = true } serde_json = { workspace = true } static_assertions = { workspace = true } From 22338f547285cd3e5f953b73ec47d7893afc5c1d Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 19 Sep 2023 09:08:42 -0700 Subject: [PATCH 125/407] TransactionScheduler: InFlightTracker (#33206) --- core/src/banking_stage/scheduler_messages.rs | 14 +- .../in_flight_tracker.rs | 123 ++++++++++++++++++ .../transaction_scheduler/mod.rs | 6 +- 3 files changed, 139 insertions(+), 4 deletions(-) create mode 100644 core/src/banking_stage/transaction_scheduler/in_flight_tracker.rs diff --git a/core/src/banking_stage/scheduler_messages.rs b/core/src/banking_stage/scheduler_messages.rs index f5b6ae9258702e..172087e2cf8e82 100644 --- a/core/src/banking_stage/scheduler_messages.rs +++ b/core/src/banking_stage/scheduler_messages.rs @@ -1,7 +1,7 @@ use { super::immutable_deserialized_packet::ImmutableDeserializedPacket, solana_sdk::{clock::Slot, transaction::SanitizedTransaction}, - std::sync::Arc, + std::{fmt::Display, sync::Arc}, }; /// A unique identifier for a transaction batch. @@ -14,6 +14,12 @@ impl TransactionBatchId { } } +impl Display for TransactionBatchId { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + /// A unique identifier for a transaction. #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub struct TransactionId(u64); @@ -24,6 +30,12 @@ impl TransactionId { } } +impl Display for TransactionId { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + /// Message: [Scheduler -> Worker] /// Transactions to be consumed (i.e. executed, recorded, and committed) pub struct ConsumeWork { diff --git a/core/src/banking_stage/transaction_scheduler/in_flight_tracker.rs b/core/src/banking_stage/transaction_scheduler/in_flight_tracker.rs new file mode 100644 index 00000000000000..243f14c66920a0 --- /dev/null +++ b/core/src/banking_stage/transaction_scheduler/in_flight_tracker.rs @@ -0,0 +1,123 @@ +use { + super::{batch_id_generator::BatchIdGenerator, thread_aware_account_locks::ThreadId}, + crate::banking_stage::scheduler_messages::TransactionBatchId, + std::collections::HashMap, +}; + +/// Tracks the number of transactions that are in flight for each thread. +pub struct InFlightTracker { + num_in_flight_per_thread: Vec, + cus_in_flight_per_thread: Vec, + batches: HashMap, + batch_id_generator: BatchIdGenerator, +} + +struct BatchEntry { + thread_id: ThreadId, + num_transactions: usize, + total_cus: u64, +} + +impl InFlightTracker { + pub fn new(num_threads: usize) -> Self { + Self { + num_in_flight_per_thread: vec![0; num_threads], + cus_in_flight_per_thread: vec![0; num_threads], + batches: HashMap::new(), + batch_id_generator: BatchIdGenerator::default(), + } + } + + /// Returns the number of transactions that are in flight for each thread. + pub fn num_in_flight_per_thread(&self) -> &[usize] { + &self.num_in_flight_per_thread + } + + /// Returns the number of cus that are in flight for each thread. + pub fn cus_in_flight_per_thread(&self) -> &[u64] { + &self.cus_in_flight_per_thread + } + + /// Tracks number of transactions and CUs in-flight for the `thread_id`. + /// Returns a `TransactionBatchId` that can be used to stop tracking the batch + /// when it is complete. + pub fn track_batch( + &mut self, + num_transactions: usize, + total_cus: u64, + thread_id: ThreadId, + ) -> TransactionBatchId { + let batch_id = self.batch_id_generator.next(); + self.num_in_flight_per_thread[thread_id] += num_transactions; + self.cus_in_flight_per_thread[thread_id] += total_cus; + self.batches.insert( + batch_id, + BatchEntry { + thread_id, + num_transactions, + total_cus, + }, + ); + + batch_id + } + + /// Stop tracking the batch with given `batch_id`. + /// Removes the number of transactions for the scheduled thread. + /// Returns the thread id that the batch was scheduled on. + /// + /// # Panics + /// Panics if the batch id does not exist in the tracker. + pub fn complete_batch(&mut self, batch_id: TransactionBatchId) -> ThreadId { + let Some(BatchEntry { + thread_id, + num_transactions, + total_cus, + }) = self.batches.remove(&batch_id) + else { + panic!("batch id {batch_id} is not being tracked"); + }; + self.num_in_flight_per_thread[thread_id] -= num_transactions; + self.cus_in_flight_per_thread[thread_id] -= total_cus; + + thread_id + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + #[should_panic(expected = "is not being tracked")] + fn test_in_flight_tracker_untracked_batch() { + let mut in_flight_tracker = InFlightTracker::new(2); + in_flight_tracker.complete_batch(TransactionBatchId::new(5)); + } + + #[test] + fn test_in_flight_tracker() { + let mut in_flight_tracker = InFlightTracker::new(2); + + // Add a batch with 2 transactions, 10 kCUs to thread 0. + let batch_id_0 = in_flight_tracker.track_batch(2, 10_000, 0); + assert_eq!(in_flight_tracker.num_in_flight_per_thread(), &[2, 0]); + assert_eq!(in_flight_tracker.cus_in_flight_per_thread(), &[10_000, 0]); + + // Add a batch with 1 transaction, 15 kCUs to thread 1. + let batch_id_1 = in_flight_tracker.track_batch(1, 15_000, 1); + assert_eq!(in_flight_tracker.num_in_flight_per_thread(), &[2, 1]); + assert_eq!( + in_flight_tracker.cus_in_flight_per_thread(), + &[10_000, 15_000] + ); + + in_flight_tracker.complete_batch(batch_id_0); + assert_eq!(in_flight_tracker.num_in_flight_per_thread(), &[0, 1]); + assert_eq!(in_flight_tracker.cus_in_flight_per_thread(), &[0, 15_000]); + + in_flight_tracker.complete_batch(batch_id_1); + assert_eq!(in_flight_tracker.num_in_flight_per_thread(), &[0, 0]); + assert_eq!(in_flight_tracker.cus_in_flight_per_thread(), &[0, 0]); + } +} diff --git a/core/src/banking_stage/transaction_scheduler/mod.rs b/core/src/banking_stage/transaction_scheduler/mod.rs index 065efba0d86c0f..96639f4ad40205 100644 --- a/core/src/banking_stage/transaction_scheduler/mod.rs +++ b/core/src/banking_stage/transaction_scheduler/mod.rs @@ -7,8 +7,8 @@ mod transaction_state; #[allow(dead_code)] mod transaction_state_container; +mod batch_id_generator; #[allow(dead_code)] -mod transaction_id_generator; - +mod in_flight_tracker; #[allow(dead_code)] -mod batch_id_generator; +mod transaction_id_generator; From 889d6c655e6d51610b0ae33e411f6ff6b5b8a55b Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 19 Sep 2023 13:09:42 -0400 Subject: [PATCH 126/407] Moves accounts-db benches to accounts-db crate (#33306) --- {runtime => accounts-db}/benches/accounts_index.rs | 0 {runtime => accounts-db}/benches/append_vec.rs | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename {runtime => accounts-db}/benches/accounts_index.rs (100%) rename {runtime => accounts-db}/benches/append_vec.rs (100%) diff --git a/runtime/benches/accounts_index.rs b/accounts-db/benches/accounts_index.rs similarity index 100% rename from runtime/benches/accounts_index.rs rename to accounts-db/benches/accounts_index.rs diff --git a/runtime/benches/append_vec.rs b/accounts-db/benches/append_vec.rs similarity index 100% rename from runtime/benches/append_vec.rs rename to accounts-db/benches/append_vec.rs From f50342a790cd0870159aa5b061ceada89f6a58b4 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Tue, 19 Sep 2023 10:46:37 -0700 Subject: [PATCH 127/407] Split vote related code from runtime to its own crate (#32882) * Move vote related code to its own crate * Update imports in code and tests * update programs/sbf/Cargo.lock * fix check errors * update abi_digest * rebase fixes * fixes after rebase --- Cargo.lock | 84 +++++++++++++++++ Cargo.toml | 2 + core/Cargo.toml | 1 + core/src/banking_stage.rs | 6 +- core/src/banking_stage/committer.rs | 2 +- core/src/banking_stage/consume_worker.rs | 6 +- core/src/cluster_info_vote_listener.rs | 14 +-- core/src/consensus.rs | 9 +- core/src/consensus/progress_map.rs | 5 +- core/src/replay_stage.rs | 2 +- core/src/tpu.rs | 7 +- core/src/tvu.rs | 2 +- core/src/verified_vote_packets.rs | 6 +- gossip/Cargo.toml | 1 + gossip/src/cluster_info.rs | 3 +- gossip/src/crds_value.rs | 2 +- ledger/Cargo.toml | 1 + ledger/src/blockstore_processor.rs | 11 +-- ledger/src/staking_utils.rs | 6 +- local-cluster/Cargo.toml | 1 + local-cluster/src/cluster_tests.rs | 2 +- local-cluster/tests/local_cluster.rs | 2 +- programs/sbf/Cargo.lock | 75 +++++++++++++++ rpc/Cargo.toml | 1 + rpc/src/rpc_pubsub.rs | 2 +- rpc/src/rpc_subscriptions.rs | 2 +- runtime/Cargo.toml | 1 + runtime/src/bank.rs | 2 +- runtime/src/bank/serde_snapshot.rs | 2 +- runtime/src/bank_utils.rs | 3 +- runtime/src/epoch_stakes.rs | 5 +- runtime/src/lib.rs | 4 - runtime/src/stakes.rs | 7 +- vote/Cargo.toml | 101 +++++++++++++++++++++ vote/build.rs | 27 ++++++ vote/src/lib.rs | 13 +++ {runtime => vote}/src/vote_account.rs | 30 +++--- {runtime => vote}/src/vote_parser.rs | 0 {runtime => vote}/src/vote_sender_types.rs | 0 {runtime => vote}/src/vote_transaction.rs | 0 40 files changed, 371 insertions(+), 79 deletions(-) create mode 100644 vote/Cargo.toml create mode 100644 vote/build.rs create mode 100644 vote/src/lib.rs rename {runtime => vote}/src/vote_account.rs (95%) rename {runtime => vote}/src/vote_parser.rs (100%) rename {runtime => vote}/src/vote_sender_types.rs (100%) rename {runtime => vote}/src/vote_transaction.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 7ce6d9a51f2bc8..30729d6ab7dc08 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5784,6 +5784,7 @@ dependencies = [ "solana-transaction-status", "solana-turbine", "solana-version", + "solana-vote", "solana-vote-program", "static_assertions", "strum", @@ -6074,6 +6075,7 @@ dependencies = [ "solana-thin-client", "solana-tpu-client", "solana-version", + "solana-vote", "solana-vote-program", "static_assertions", "test-case", @@ -6184,6 +6186,7 @@ dependencies = [ "solana-storage-bigtable", "solana-storage-proto", "solana-transaction-status", + "solana-vote", "solana-vote-program", "spl-token", "spl-token-2022", @@ -6291,6 +6294,7 @@ dependencies = [ "solana-thin-client", "solana-tpu-client", "solana-turbine", + "solana-vote", "solana-vote-program", "static_assertions", "tempfile", @@ -6712,6 +6716,7 @@ dependencies = [ "solana-tpu-client", "solana-transaction-status", "solana-version", + "solana-vote", "solana-vote-program", "spl-token", "spl-token-2022", @@ -6883,6 +6888,7 @@ dependencies = [ "solana-stake-program", "solana-system-program", "solana-version", + "solana-vote", "solana-vote-program", "solana-zk-token-proof-program", "solana-zk-token-sdk", @@ -7405,6 +7411,84 @@ dependencies = [ "solana-sdk", ] +[[package]] +name = "solana-vote" +version = "1.17.0" +dependencies = [ + "arrayref", + "assert_matches", + "bincode", + "blake3", + "bv", + "bytemuck", + "byteorder", + "bzip2", + "crossbeam-channel", + "dashmap 4.0.2", + "dir-diff", + "ed25519-dalek", + "flate2", + "fnv", + "fs-err", + "im", + "index_list", + "itertools", + "lazy_static", + "libsecp256k1", + "log", + "lru", + "lz4", + "memmap2", + "memoffset 0.9.0", + "modular-bitfield", + "num-derive", + "num-traits", + "num_cpus", + "num_enum 0.6.1", + "ouroboros", + "percentage", + "qualifier_attr", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rayon", + "regex", + "rustc_version 0.4.0", + "serde", + "serde_derive", + "siphasher", + "solana-address-lookup-table-program", + "solana-bpf-loader-program", + "solana-bucket-map", + "solana-compute-budget-program", + "solana-config-program", + "solana-cost-model", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-loader-v4-program", + "solana-logger", + "solana-measure", + "solana-metrics", + "solana-perf", + "solana-program-runtime", + "solana-rayon-threadlimit", + "solana-sdk", + "solana-stake-program", + "solana-system-program", + "solana-vote", + "solana-vote-program", + "solana-zk-token-proof-program", + "solana-zk-token-sdk", + "static_assertions", + "strum", + "strum_macros", + "symlink", + "tar", + "tempfile", + "test-case", + "thiserror", + "zstd", +] + [[package]] name = "solana-vote-program" version = "1.17.0" diff --git a/Cargo.toml b/Cargo.toml index 3653d88c1507fb..58cb4f83055604 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -109,6 +109,7 @@ members = [ "upload-perf", "validator", "version", + "vote", "watchtower", "zk-keygen", "zk-token-sdk", @@ -367,6 +368,7 @@ solana-transaction-status = { path = "transaction-status", version = "=1.17.0" } solana-turbine = { path = "turbine", version = "=1.17.0" } solana-udp-client = { path = "udp-client", version = "=1.17.0" } solana-version = { path = "version", version = "=1.17.0" } +solana-vote = { path = "vote", version = "=1.17.0" } solana-vote-program = { path = "programs/vote", version = "=1.17.0" } solana-zk-keygen = { path = "zk-keygen", version = "=1.17.0" } solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=1.17.0" } diff --git a/core/Cargo.toml b/core/Cargo.toml index f29ed64031afbe..fcab8ff8775912 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -69,6 +69,7 @@ solana-tpu-client = { workspace = true } solana-transaction-status = { workspace = true } solana-turbine = { workspace = true } solana-version = { workspace = true } +solana-vote = { workspace = true } solana-vote-program = { workspace = true } strum = { workspace = true, features = ["derive"] } strum_macros = { workspace = true } diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 7e9138048cb3dc..2dfb1e32b1d688 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -27,11 +27,9 @@ use { solana_measure::{measure, measure_us}, solana_perf::{data_budget::DataBudget, packet::PACKETS_PER_BATCH}, solana_poh::poh_recorder::PohRecorder, - solana_runtime::{ - bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache, - vote_sender_types::ReplayVoteSender, - }, + solana_runtime::{bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache}, solana_sdk::timing::AtomicInterval, + solana_vote::vote_sender_types::ReplayVoteSender, std::{ cmp, env, sync::{ diff --git a/core/src/banking_stage/committer.rs b/core/src/banking_stage/committer.rs index 26f5e56054a4a1..a5e42cbc75f8ec 100644 --- a/core/src/banking_stage/committer.rs +++ b/core/src/banking_stage/committer.rs @@ -14,12 +14,12 @@ use { bank_utils, prioritization_fee_cache::PrioritizationFeeCache, transaction_batch::TransactionBatch, - vote_sender_types::ReplayVoteSender, }, solana_sdk::{pubkey::Pubkey, saturating_add_assign}, solana_transaction_status::{ token_balances::TransactionTokenBalancesSet, TransactionTokenBalance, }, + solana_vote::vote_sender_types::ReplayVoteSender, std::{collections::HashMap, sync::Arc}, }; diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index 3c4d275bddf55a..1795db97439a50 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -138,14 +138,12 @@ mod tests { get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache, }, solana_poh::poh_recorder::{PohRecorder, WorkingBankEntry}, - solana_runtime::{ - bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache, - vote_sender_types::ReplayVoteReceiver, - }, + solana_runtime::{bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache}, solana_sdk::{ genesis_config::GenesisConfig, poh_config::PohConfig, pubkey::Pubkey, signature::Keypair, system_transaction, }, + solana_vote::vote_sender_types::ReplayVoteReceiver, std::{ sync::{atomic::AtomicBool, RwLock}, thread::JoinHandle, diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index 4a851946fac7ed..183cabcf04dbce 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -26,13 +26,8 @@ use { rpc_subscriptions::RpcSubscriptions, }, solana_runtime::{ - bank::Bank, - bank_forks::BankForks, - commitment::VOTE_THRESHOLD_SIZE, + bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE, epoch_stakes::EpochStakes, - vote_parser::{self, ParsedVote}, - vote_sender_types::ReplayVoteReceiver, - vote_transaction::VoteTransaction, }, solana_sdk::{ clock::{Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT}, @@ -43,6 +38,11 @@ use { timing::AtomicInterval, transaction::Transaction, }, + solana_vote::{ + vote_parser::{self, ParsedVote}, + vote_sender_types::ReplayVoteReceiver, + vote_transaction::VoteTransaction, + }, std::{ collections::{HashMap, HashSet}, iter::repeat, @@ -883,13 +883,13 @@ mod tests { genesis_utils::{ self, create_genesis_config, GenesisConfigInfo, ValidatorVoteKeypairs, }, - vote_sender_types::ReplayVoteSender, }, solana_sdk::{ hash::Hash, pubkey::Pubkey, signature::{Keypair, Signature, Signer}, }, + solana_vote::vote_sender_types::ReplayVoteSender, solana_vote_program::{vote_state::Vote, vote_transaction}, std::{ collections::BTreeSet, diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 0e204dbe562343..50c04dbbf486fe 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -19,10 +19,7 @@ use { }, chrono::prelude::*, solana_ledger::{ancestor_iterator::AncestorIterator, blockstore::Blockstore, blockstore_db}, - solana_runtime::{ - bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE, - vote_account::VoteAccountsHashMap, - }, + solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE}, solana_sdk::{ clock::{Slot, UnixTimestamp}, hash::Hash, @@ -31,6 +28,7 @@ use { signature::Keypair, slot_history::{Check, SlotHistory}, }, + solana_vote::vote_account::VoteAccountsHashMap, solana_vote_program::{ vote_instruction, vote_state::{ @@ -1521,7 +1519,7 @@ pub mod test { }, itertools::Itertools, solana_ledger::{blockstore::make_slot_entries, get_tmp_ledger_path}, - solana_runtime::{bank::Bank, vote_account::VoteAccount}, + solana_runtime::bank::Bank, solana_sdk::{ account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, clock::Slot, @@ -1530,6 +1528,7 @@ pub mod test { signature::Signer, slot_history::SlotHistory, }, + solana_vote::vote_account::VoteAccount, solana_vote_program::vote_state::{Vote, VoteStateVersions, MAX_LOCKOUT_HISTORY}, std::{ collections::{HashMap, VecDeque}, diff --git a/core/src/consensus/progress_map.rs b/core/src/consensus/progress_map.rs index 870449e80e0e02..a34509ef8142de 100644 --- a/core/src/consensus/progress_map.rs +++ b/core/src/consensus/progress_map.rs @@ -7,8 +7,9 @@ use { }, solana_ledger::blockstore_processor::{ConfirmationProgress, ConfirmationTiming}, solana_program_runtime::{report_execute_timings, timings::ExecuteTimingType}, - solana_runtime::{bank::Bank, bank_forks::BankForks, vote_account::VoteAccountsHashMap}, + solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey}, + solana_vote::vote_account::VoteAccountsHashMap, std::{ collections::{BTreeMap, HashMap, HashSet}, ops::Index, @@ -531,8 +532,8 @@ impl ProgressMap { mod test { use { super::*, - solana_runtime::vote_account::VoteAccount, solana_sdk::account::{Account, AccountSharedData}, + solana_vote::vote_account::VoteAccount, }; fn new_test_vote_account() -> VoteAccount { diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 55e9b7ad21aab1..0fec5020d6dcb9 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -63,7 +63,6 @@ use { bank_forks::{BankForks, MAX_ROOT_DISTANCE_FOR_VOTE_ONLY}, commitment::BlockCommitmentCache, prioritization_fee_cache::PrioritizationFeeCache, - vote_sender_types::ReplayVoteSender, }, solana_sdk::{ clock::{BankId, Slot, MAX_PROCESSING_AGE, NUM_CONSECUTIVE_LEADER_SLOTS}, @@ -75,6 +74,7 @@ use { timing::timestamp, transaction::Transaction, }, + solana_vote::vote_sender_types::ReplayVoteSender, solana_vote_program::vote_state::VoteTransaction, std::{ collections::{HashMap, HashSet}, diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 884153d3d630af..028a88f416e1fe 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -30,11 +30,7 @@ use { optimistically_confirmed_bank_tracker::BankNotificationSender, rpc_subscriptions::RpcSubscriptions, }, - solana_runtime::{ - bank_forks::BankForks, - prioritization_fee_cache::PrioritizationFeeCache, - vote_sender_types::{ReplayVoteReceiver, ReplayVoteSender}, - }, + solana_runtime::{bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache}, solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Keypair}, solana_streamer::{ nonblocking::quic::DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, @@ -42,6 +38,7 @@ use { streamer::StakedNodes, }, solana_turbine::broadcast_stage::{BroadcastStage, BroadcastStageType}, + solana_vote::vote_sender_types::{ReplayVoteReceiver, ReplayVoteSender}, std::{ collections::HashMap, net::{SocketAddr, UdpSocket}, diff --git a/core/src/tvu.rs b/core/src/tvu.rs index d3d57c1314caed..0b8358863fbceb 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -44,10 +44,10 @@ use { solana_runtime::{ accounts_background_service::AbsRequestSender, bank_forks::BankForks, commitment::BlockCommitmentCache, prioritization_fee_cache::PrioritizationFeeCache, - vote_sender_types::ReplayVoteSender, }, solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Keypair}, solana_turbine::retransmit_stage::RetransmitStage, + solana_vote::vote_sender_types::ReplayVoteSender, std::{ collections::HashSet, net::{SocketAddr, UdpSocket}, diff --git a/core/src/verified_vote_packets.rs b/core/src/verified_vote_packets.rs index fa486062cfb964..3edee355b86dcb 100644 --- a/core/src/verified_vote_packets.rs +++ b/core/src/verified_vote_packets.rs @@ -2,10 +2,7 @@ use { crate::{cluster_info_vote_listener::VerifiedLabelVotePacketsReceiver, result::Result}, itertools::Itertools, solana_perf::packet::PacketBatch, - solana_runtime::{ - bank::Bank, - vote_transaction::{VoteTransaction, VoteTransaction::VoteStateUpdate}, - }, + solana_runtime::bank::Bank, solana_sdk::{ account::from_account, clock::{Slot, UnixTimestamp}, @@ -15,6 +12,7 @@ use { slot_hashes::SlotHashes, sysvar, }, + solana_vote::vote_transaction::{VoteTransaction, VoteTransaction::VoteStateUpdate}, std::{ collections::{BTreeMap, HashMap, HashSet}, sync::Arc, diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index fcbcf9c9a9f1b4..3696342ae83b24 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -47,6 +47,7 @@ solana-streamer = { workspace = true } solana-thin-client = { workspace = true } solana-tpu-client = { workspace = true } solana-version = { workspace = true } +solana-vote = { workspace = true } solana-vote-program = { workspace = true } static_assertions = { workspace = true } thiserror = { workspace = true } diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index d5b2b447ea9422..b0b99b1c02dca4 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -60,7 +60,7 @@ use { packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE}, }, solana_rayon_threadlimit::get_thread_count, - solana_runtime::{bank_forks::BankForks, vote_parser}, + solana_runtime::bank_forks::BankForks, solana_sdk::{ clock::{Slot, DEFAULT_MS_PER_SLOT, DEFAULT_SLOTS_PER_EPOCH}, feature_set::FeatureSet, @@ -77,6 +77,7 @@ use { socket::SocketAddrSpace, streamer::{PacketBatchReceiver, PacketBatchSender}, }, + solana_vote::vote_parser, solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY, std::{ borrow::Cow, diff --git a/gossip/src/crds_value.rs b/gossip/src/crds_value.rs index 24d66e3c520b71..87ba34604e61d2 100644 --- a/gossip/src/crds_value.rs +++ b/gossip/src/crds_value.rs @@ -10,7 +10,6 @@ use { bincode::{serialize, serialized_size}, rand::{CryptoRng, Rng}, serde::de::{Deserialize, Deserializer}, - solana_runtime::vote_parser, solana_sdk::{ clock::Slot, hash::Hash, @@ -20,6 +19,7 @@ use { timing::timestamp, transaction::Transaction, }, + solana_vote::vote_parser, std::{ borrow::{Borrow, Cow}, cmp::Ordering, diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index fa77b944d300da..f6fbb140e55691 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -54,6 +54,7 @@ solana-stake-program = { workspace = true } solana-storage-bigtable = { workspace = true } solana-storage-proto = { workspace = true } solana-transaction-status = { workspace = true } +solana-vote = { workspace = true } solana-vote-program = { workspace = true } spl-token = { workspace = true, features = ["no-entrypoint"] } spl-token-2022 = { workspace = true, features = ["no-entrypoint"] } diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 9c988f1fcbc4f2..488ada14666c55 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -43,8 +43,6 @@ use { prioritization_fee_cache::PrioritizationFeeCache, runtime_config::RuntimeConfig, transaction_batch::TransactionBatch, - vote_account::VoteAccountsHashMap, - vote_sender_types::ReplayVoteSender, }, solana_sdk::{ clock::{Slot, MAX_PROCESSING_AGE}, @@ -61,6 +59,7 @@ use { }, }, solana_transaction_status::token_balances::TransactionTokenBalancesSet, + solana_vote::{vote_account::VoteAccountsHashMap, vote_sender_types::ReplayVoteSender}, std::{ borrow::Cow, collections::{HashMap, HashSet}, @@ -1863,11 +1862,8 @@ pub mod tests { rand::{thread_rng, Rng}, solana_entry::entry::{create_ticks, next_entry, next_entry_mut}, solana_program_runtime::declare_process_instruction, - solana_runtime::{ - genesis_utils::{ - self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, - }, - vote_account::VoteAccount, + solana_runtime::genesis_utils::{ + self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, }, solana_sdk::{ account::{AccountSharedData, WritableAccount}, @@ -1881,6 +1877,7 @@ pub mod tests { system_transaction, transaction::{Transaction, TransactionError}, }, + solana_vote::vote_account::VoteAccount, solana_vote_program::{ self, vote_state::{VoteState, VoteStateVersions, MAX_LOCKOUT_HISTORY}, diff --git a/ledger/src/staking_utils.rs b/ledger/src/staking_utils.rs index dda62298447ce4..75d1352792662b 100644 --- a/ledger/src/staking_utils.rs +++ b/ledger/src/staking_utils.rs @@ -2,10 +2,7 @@ pub(crate) mod tests { use { rand::Rng, - solana_runtime::{ - bank::Bank, - vote_account::{VoteAccount, VoteAccounts}, - }, + solana_runtime::bank::Bank, solana_sdk::{ account::AccountSharedData, clock::Clock, @@ -19,6 +16,7 @@ pub(crate) mod tests { }, transaction::Transaction, }, + solana_vote::vote_account::{VoteAccount, VoteAccounts}, solana_vote_program::{ vote_instruction, vote_state::{VoteInit, VoteState, VoteStateVersions}, diff --git a/local-cluster/Cargo.toml b/local-cluster/Cargo.toml index 5f0a3eeae5de04..4248fc02945238 100644 --- a/local-cluster/Cargo.toml +++ b/local-cluster/Cargo.toml @@ -33,6 +33,7 @@ solana-streamer = { workspace = true } solana-thin-client = { workspace = true } solana-tpu-client = { workspace = true } solana-turbine = { workspace = true } +solana-vote = { workspace = true } solana-vote-program = { workspace = true } static_assertions = { workspace = true } tempfile = { workspace = true } diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index dc44a307c48f3c..b410585396f8f0 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -21,7 +21,6 @@ use { gossip_service::{self, discover_cluster, GossipService}, }, solana_ledger::blockstore::Blockstore, - solana_runtime::vote_transaction::VoteTransaction, solana_sdk::{ client::SyncClient, clock::{self, Slot, NUM_CONSECUTIVE_LEADER_SLOTS}, @@ -38,6 +37,7 @@ use { transport::TransportError, }, solana_streamer::socket::SocketAddrSpace, + solana_vote::vote_transaction::VoteTransaction, solana_vote_program::vote_transaction, std::{ borrow::Borrow, diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 2280bd98f48cdc..137477be1cc5f2 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -61,7 +61,6 @@ use { snapshot_config::SnapshotConfig, snapshot_package::SnapshotKind, snapshot_utils::{self}, - vote_parser, }, solana_sdk::{ account::AccountSharedData, @@ -83,6 +82,7 @@ use { broadcast_duplicates_run::{BroadcastDuplicatesConfig, ClusterPartition}, BroadcastStageType, }, + solana_vote::vote_parser, solana_vote_program::{vote_state::MAX_LOCKOUT_HISTORY, vote_transaction}, std::{ collections::{BTreeSet, HashMap, HashSet}, diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 8d34c0206afd52..f0d50d53911c8a 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4837,6 +4837,7 @@ dependencies = [ "solana-transaction-status", "solana-turbine", "solana-version", + "solana-vote", "solana-vote-program", "strum", "strum_macros", @@ -5050,6 +5051,7 @@ dependencies = [ "solana-thin-client", "solana-tpu-client", "solana-version", + "solana-vote", "solana-vote-program", "static_assertions", "thiserror", @@ -5105,6 +5107,7 @@ dependencies = [ "solana-storage-bigtable", "solana-storage-proto", "solana-transaction-status", + "solana-vote", "solana-vote-program", "spl-token", "spl-token-2022", @@ -5449,6 +5452,7 @@ dependencies = [ "solana-tpu-client", "solana-transaction-status", "solana-version", + "solana-vote", "solana-vote-program", "spl-token", "spl-token-2022", @@ -5574,6 +5578,7 @@ dependencies = [ "solana-stake-program", "solana-system-program", "solana-version", + "solana-vote", "solana-vote-program", "solana-zk-token-proof-program", "solana-zk-token-sdk", @@ -6392,6 +6397,76 @@ dependencies = [ "solana-sdk", ] +[[package]] +name = "solana-vote" +version = "1.17.0" +dependencies = [ + "arrayref", + "bincode", + "blake3", + "bv", + "bytemuck", + "byteorder 1.4.3", + "bzip2", + "crossbeam-channel", + "dashmap", + "dir-diff", + "flate2", + "fnv", + "fs-err", + "im", + "index_list", + "itertools", + "lazy_static", + "log", + "lru", + "lz4", + "memmap2", + "modular-bitfield", + "num-derive", + "num-traits", + "num_cpus", + "num_enum 0.6.1", + "ouroboros", + "percentage", + "qualifier_attr", + "rand 0.8.5", + "rayon", + "regex", + "rustc_version", + "serde", + "serde_derive", + "siphasher", + "solana-address-lookup-table-program", + "solana-bpf-loader-program", + "solana-bucket-map", + "solana-compute-budget-program", + "solana-config-program", + "solana-cost-model", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-loader-v4-program", + "solana-measure", + "solana-metrics", + "solana-perf", + "solana-program-runtime", + "solana-rayon-threadlimit", + "solana-sdk", + "solana-stake-program", + "solana-system-program", + "solana-vote-program", + "solana-zk-token-proof-program", + "solana-zk-token-sdk", + "static_assertions", + "strum", + "strum_macros", + "symlink", + "tar", + "tempfile", + "thiserror", + "zstd", +] + [[package]] name = "solana-vote-program" version = "1.17.0" diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 62ae098cb6cc34..f6fa5160f9ee1d 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -51,6 +51,7 @@ solana-streamer = { workspace = true } solana-tpu-client = { workspace = true } solana-transaction-status = { workspace = true } solana-version = { workspace = true } +solana-vote = { workspace = true } solana-vote-program = { workspace = true } spl-token = { workspace = true, features = ["no-entrypoint"] } spl-token-2022 = { workspace = true, features = ["no-entrypoint"] } diff --git a/rpc/src/rpc_pubsub.rs b/rpc/src/rpc_pubsub.rs index e318940b1ca12d..e45a5f8af68517 100644 --- a/rpc/src/rpc_pubsub.rs +++ b/rpc/src/rpc_pubsub.rs @@ -622,7 +622,6 @@ mod tests { activate_all_features, create_genesis_config, create_genesis_config_with_vote_accounts, GenesisConfigInfo, ValidatorVoteKeypairs, }, - vote_transaction::VoteTransaction, }, solana_sdk::{ account::ReadableAccount, @@ -641,6 +640,7 @@ mod tests { transaction::{self, Transaction}, }, solana_stake_program::stake_state, + solana_vote::vote_transaction::VoteTransaction, solana_vote_program::vote_state::Vote, std::{ sync::{ diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index ad9de1664cc2ba..6fca7d45035837 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -29,7 +29,6 @@ use { bank::{Bank, TransactionLogInfo}, bank_forks::BankForks, commitment::{BlockCommitmentCache, CommitmentSlots}, - vote_transaction::VoteTransaction, }, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, @@ -42,6 +41,7 @@ use { solana_transaction_status::{ BlockEncodingOptions, ConfirmedBlock, EncodeError, VersionedConfirmedBlock, }, + solana_vote::vote_transaction::VoteTransaction, std::{ cell::RefCell, collections::{HashMap, VecDeque}, diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 319e6f4d7f0f38..2d15c7acbace71 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -66,6 +66,7 @@ solana-sdk = { workspace = true } solana-stake-program = { workspace = true } solana-system-program = { workspace = true } solana-version = { workspace = true } +solana-vote = { workspace = true } solana-vote-program = { workspace = true } solana-zk-token-proof-program = { workspace = true } solana-zk-token-sdk = { workspace = true } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 543c90350697f1..5963503095994c 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -54,7 +54,6 @@ use { stakes::{InvalidCacheEntryReason, Stakes, StakesCache, StakesEnum}, status_cache::{SlotDelta, StatusCache}, transaction_batch::TransactionBatch, - vote_account::{VoteAccount, VoteAccounts, VoteAccountsHashMap}, }, byteorder::{ByteOrder, LittleEndian}, dashmap::{DashMap, DashSet}, @@ -178,6 +177,7 @@ use { self, InflationPointCalculationEvent, PointValue, StakeStateV2, }, solana_system_program::{get_system_account_kind, SystemAccountKind}, + solana_vote::vote_account::{VoteAccount, VoteAccounts, VoteAccountsHashMap}, solana_vote_program::vote_state::VoteState, std::{ borrow::Cow, diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 338698407b8fcd..2e0bdd3ecc7ab6 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -612,7 +612,7 @@ mod tests { // This some what long test harness is required to freeze the ABI of // Bank's serialization due to versioned nature - #[frozen_abi(digest = "5G71eC1ofQ6pqgeQLb8zaK4EQCncs5Rs51rfmMAvtF8U")] + #[frozen_abi(digest = "12WNiuA7qeLU8JFweQszX5sCnCj1fYnYV4i9DeACqhQD")] #[derive(Serialize, AbiExample)] pub struct BankAbiTestWrapperNewer { #[serde(serialize_with = "wrapper_newer")] diff --git a/runtime/src/bank_utils.rs b/runtime/src/bank_utils.rs index 0eeb750bf47df3..96844da6351257 100644 --- a/runtime/src/bank_utils.rs +++ b/runtime/src/bank_utils.rs @@ -2,11 +2,10 @@ use { crate::{ bank::Bank, genesis_utils::{self, GenesisConfigInfo, ValidatorVoteKeypairs}, - vote_parser, - vote_sender_types::ReplayVoteSender, }, solana_accounts_db::transaction_results::TransactionResults, solana_sdk::{pubkey::Pubkey, signature::Signer, transaction::SanitizedTransaction}, + solana_vote::{vote_parser, vote_sender_types::ReplayVoteSender}, }; pub fn setup_bank_and_vote_pubkeys_for_tests( diff --git a/runtime/src/epoch_stakes.rs b/runtime/src/epoch_stakes.rs index 89707e1a5e65eb..1124906e2eb0cc 100644 --- a/runtime/src/epoch_stakes.rs +++ b/runtime/src/epoch_stakes.rs @@ -1,7 +1,8 @@ use { - crate::{stakes::StakesEnum, vote_account::VoteAccountsHashMap}, + crate::stakes::StakesEnum, serde::{Deserialize, Serialize}, solana_sdk::{clock::Epoch, pubkey::Pubkey}, + solana_vote::vote_account::VoteAccountsHashMap, std::{collections::HashMap, sync::Arc}, }; @@ -123,7 +124,7 @@ impl EpochStakes { #[cfg(test)] pub(crate) mod tests { use { - super::*, crate::vote_account::VoteAccount, solana_sdk::account::AccountSharedData, + super::*, solana_sdk::account::AccountSharedData, solana_vote::vote_account::VoteAccount, solana_vote_program::vote_state::create_account_with_authorized, std::iter, }; diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index fb22c1f96ccb83..ff94a68c69fa1e 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -37,10 +37,6 @@ pub mod static_ids; pub mod status_cache; pub mod transaction_batch; pub mod transaction_priority_details; -pub mod vote_account; -pub mod vote_parser; -pub mod vote_sender_types; -pub mod vote_transaction; #[macro_use] extern crate solana_metrics; diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index 8d8f72d2aba3ae..977c25b180564f 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -1,11 +1,7 @@ //! Stakes serve as a cache of stake and vote accounts to derive //! node stakes use { - crate::{ - stake_account, - stake_history::StakeHistory, - vote_account::{VoteAccount, VoteAccounts}, - }, + crate::{stake_account, stake_history::StakeHistory}, dashmap::DashMap, im::HashMap as ImHashMap, log::error, @@ -20,6 +16,7 @@ use { stake::state::{Delegation, StakeActivationStatus}, vote::state::VoteStateVersions, }, + solana_vote::vote_account::{VoteAccount, VoteAccounts}, std::{ collections::{HashMap, HashSet}, ops::{Add, Deref}, diff --git a/vote/Cargo.toml b/vote/Cargo.toml new file mode 100644 index 00000000000000..13adc56ba48f70 --- /dev/null +++ b/vote/Cargo.toml @@ -0,0 +1,101 @@ +[package] +name = "solana-vote" +description = "Solana vote" +documentation = "https://docs.rs/solana-vote" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +arrayref = { workspace = true } +bincode = { workspace = true } +blake3 = { workspace = true } +bv = { workspace = true, features = ["serde"] } +bytemuck = { workspace = true } +byteorder = { workspace = true } +bzip2 = { workspace = true } +crossbeam-channel = { workspace = true } +dashmap = { workspace = true, features = ["rayon", "raw-api"] } +dir-diff = { workspace = true } +flate2 = { workspace = true } +fnv = { workspace = true } +fs-err = { workspace = true } +im = { workspace = true, features = ["rayon", "serde"] } +index_list = { workspace = true } +itertools = { workspace = true } +lazy_static = { workspace = true } +log = { workspace = true } +lru = { workspace = true } +lz4 = { workspace = true } +memmap2 = { workspace = true } +modular-bitfield = { workspace = true } +num-derive = { workspace = true } +num-traits = { workspace = true } +num_cpus = { workspace = true } +num_enum = { workspace = true } +ouroboros = { workspace = true } +percentage = { workspace = true } +qualifier_attr = { workspace = true } +rand = { workspace = true } +rayon = { workspace = true } +regex = { workspace = true } +serde = { workspace = true, features = ["rc"] } +serde_derive = { workspace = true } +siphasher = { workspace = true } +solana-address-lookup-table-program = { workspace = true } +solana-bpf-loader-program = { workspace = true } +solana-bucket-map = { workspace = true } +solana-compute-budget-program = { workspace = true } +solana-config-program = { workspace = true } +solana-cost-model = { workspace = true } +solana-frozen-abi = { workspace = true } +solana-frozen-abi-macro = { workspace = true } +solana-loader-v4-program = { workspace = true } +solana-measure = { workspace = true } +solana-metrics = { workspace = true } +solana-perf = { workspace = true } +solana-program-runtime = { workspace = true } +solana-rayon-threadlimit = { workspace = true } +solana-sdk = { workspace = true } +solana-stake-program = { workspace = true } +solana-system-program = { workspace = true } +solana-vote-program = { workspace = true } +solana-zk-token-proof-program = { workspace = true } +solana-zk-token-sdk = { workspace = true } +static_assertions = { workspace = true } +strum = { workspace = true, features = ["derive"] } +strum_macros = { workspace = true } +symlink = { workspace = true } +tar = { workspace = true } +tempfile = { workspace = true } +thiserror = { workspace = true } +zstd = { workspace = true } + +[lib] +crate-type = ["lib"] +name = "solana_vote" + +[dev-dependencies] +assert_matches = { workspace = true } +ed25519-dalek = { workspace = true } +libsecp256k1 = { workspace = true } +memoffset = { workspace = true } +rand_chacha = { workspace = true } +solana-logger = { workspace = true } +solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } +# See order-crates-for-publishing.py for using this unusual `path = "."` +solana-vote = { path = ".", features = ["dev-context-only-utils"] } +static_assertions = { workspace = true } +test-case = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[build-dependencies] +rustc_version = { workspace = true } + +[features] +dev-context-only-utils = [] diff --git a/vote/build.rs b/vote/build.rs new file mode 100644 index 00000000000000..c9550c1c5c4f22 --- /dev/null +++ b/vote/build.rs @@ -0,0 +1,27 @@ +extern crate rustc_version; +use rustc_version::{version_meta, Channel}; + +fn main() { + // Copied and adapted from + // https://github.com/Kimundi/rustc-version-rs/blob/1d692a965f4e48a8cb72e82cda953107c0d22f47/README.md#example + // Licensed under Apache-2.0 + MIT + match version_meta().unwrap().channel { + Channel::Stable => { + println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); + } + Channel::Beta => { + println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); + } + Channel::Nightly => { + println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); + } + Channel::Dev => { + println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); + // See https://github.com/solana-labs/solana/issues/11055 + // We may be running the custom `rust-bpf-builder` toolchain, + // which currently needs `#![feature(proc_macro_hygiene)]` to + // be applied. + println!("cargo:rustc-cfg=RUSTC_NEEDS_PROC_MACRO_HYGIENE"); + } + } +} diff --git a/vote/src/lib.rs b/vote/src/lib.rs new file mode 100644 index 00000000000000..bfff15ff9213f0 --- /dev/null +++ b/vote/src/lib.rs @@ -0,0 +1,13 @@ +#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![allow(clippy::integer_arithmetic)] + +pub mod vote_account; +pub mod vote_parser; +pub mod vote_sender_types; +pub mod vote_transaction; + +#[macro_use] +extern crate serde_derive; + +#[macro_use] +extern crate solana_frozen_abi_macro; diff --git a/runtime/src/vote_account.rs b/vote/src/vote_account.rs similarity index 95% rename from runtime/src/vote_account.rs rename to vote/src/vote_account.rs index 93789e3eed87af..cd4d538b2ccc80 100644 --- a/runtime/src/vote_account.rs +++ b/vote/src/vote_account.rs @@ -54,15 +54,15 @@ pub struct VoteAccounts { } impl VoteAccount { - pub(crate) fn account(&self) -> &AccountSharedData { + pub fn account(&self) -> &AccountSharedData { &self.0.account } - pub(crate) fn lamports(&self) -> u64 { + pub fn lamports(&self) -> u64 { self.0.account.lamports() } - pub(crate) fn owner(&self) -> &Pubkey { + pub fn owner(&self) -> &Pubkey { self.0.account.owner() } @@ -75,7 +75,7 @@ impl VoteAccount { .as_ref() } - pub(crate) fn is_deserialized(&self) -> bool { + pub fn is_deserialized(&self) -> bool { self.0.vote_state.get().is_some() } @@ -86,10 +86,14 @@ impl VoteAccount { } impl VoteAccounts { - pub(crate) fn len(&self) -> usize { + pub fn len(&self) -> usize { self.vote_accounts.len() } + pub fn is_empty(&self) -> bool { + self.vote_accounts.is_empty() + } + pub fn staked_nodes(&self) -> Arc> { self.staked_nodes .get_or_init(|| { @@ -109,7 +113,7 @@ impl VoteAccounts { .clone() } - pub(crate) fn get(&self, pubkey: &Pubkey) -> Option<&VoteAccount> { + pub fn get(&self, pubkey: &Pubkey) -> Option<&VoteAccount> { let (_stake, vote_account) = self.vote_accounts.get(pubkey)?; Some(vote_account) } @@ -121,25 +125,25 @@ impl VoteAccounts { .unwrap_or_default() } - pub(crate) fn iter(&self) -> impl Iterator { + pub fn iter(&self) -> impl Iterator { self.vote_accounts .iter() .map(|(vote_pubkey, (_stake, vote_account))| (vote_pubkey, vote_account)) } - pub(crate) fn delegated_stakes(&self) -> impl Iterator { + pub fn delegated_stakes(&self) -> impl Iterator { self.vote_accounts .iter() .map(|(vote_pubkey, (stake, _vote_account))| (vote_pubkey, *stake)) } - pub(crate) fn find_max_by_delegated_stake(&self) -> Option<&VoteAccount> { + pub fn find_max_by_delegated_stake(&self) -> Option<&VoteAccount> { let key = |(_pubkey, (stake, _vote_account)): &(_, &(u64, _))| *stake; let (_pubkey, (_stake, vote_account)) = self.vote_accounts.iter().max_by_key(key)?; Some(vote_account) } - pub(crate) fn insert(&mut self, pubkey: Pubkey, (stake, vote_account): (u64, VoteAccount)) { + pub fn insert(&mut self, pubkey: Pubkey, (stake, vote_account): (u64, VoteAccount)) { self.add_node_stake(stake, &vote_account); let vote_accounts = Arc::make_mut(&mut self.vote_accounts); if let Some((stake, vote_account)) = vote_accounts.insert(pubkey, (stake, vote_account)) { @@ -147,7 +151,7 @@ impl VoteAccounts { } } - pub(crate) fn remove(&mut self, pubkey: &Pubkey) -> Option<(u64, VoteAccount)> { + pub fn remove(&mut self, pubkey: &Pubkey) -> Option<(u64, VoteAccount)> { let vote_accounts = Arc::make_mut(&mut self.vote_accounts); let entry = vote_accounts.remove(pubkey); if let Some((stake, ref vote_account)) = entry { @@ -156,7 +160,7 @@ impl VoteAccounts { entry } - pub(crate) fn add_stake(&mut self, pubkey: &Pubkey, delta: u64) { + pub fn add_stake(&mut self, pubkey: &Pubkey, delta: u64) { let vote_accounts = Arc::make_mut(&mut self.vote_accounts); if let Some((stake, vote_account)) = vote_accounts.get_mut(pubkey) { *stake += delta; @@ -165,7 +169,7 @@ impl VoteAccounts { } } - pub(crate) fn sub_stake(&mut self, pubkey: &Pubkey, delta: u64) { + pub fn sub_stake(&mut self, pubkey: &Pubkey, delta: u64) { let vote_accounts = Arc::make_mut(&mut self.vote_accounts); if let Some((stake, vote_account)) = vote_accounts.get_mut(pubkey) { *stake = stake diff --git a/runtime/src/vote_parser.rs b/vote/src/vote_parser.rs similarity index 100% rename from runtime/src/vote_parser.rs rename to vote/src/vote_parser.rs diff --git a/runtime/src/vote_sender_types.rs b/vote/src/vote_sender_types.rs similarity index 100% rename from runtime/src/vote_sender_types.rs rename to vote/src/vote_sender_types.rs diff --git a/runtime/src/vote_transaction.rs b/vote/src/vote_transaction.rs similarity index 100% rename from runtime/src/vote_transaction.rs rename to vote/src/vote_transaction.rs From 056e7cc2407ff4c229ba9066e4d231f74d76430e Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 19 Sep 2023 13:01:49 -0700 Subject: [PATCH 128/407] bucket storage refactoring (#33308) --- bucket_map/src/bucket_storage.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/bucket_map/src/bucket_storage.rs b/bucket_map/src/bucket_storage.rs index ff7f8a10c71ff9..4abf127bd505d3 100644 --- a/bucket_map/src/bucket_storage.rs +++ b/bucket_map/src/bucket_storage.rs @@ -88,7 +88,7 @@ pub enum BucketStorageError { impl Drop for BucketStorage { fn drop(&mut self) { - _ = remove_file(&self.path); + self.delete(); } } @@ -179,6 +179,11 @@ impl BucketStorage { bytes } + /// delete the backing file on disk + fn delete(&self) { + _ = remove_file(&self.path); + } + pub fn max_search(&self) -> u64 { self.max_search as u64 } @@ -236,7 +241,7 @@ impl BucketStorage { /// 'is_resizing' true if caller is resizing the index (so don't increment count) /// 'is_resizing' false if caller is adding an item to the index (so increment count) pub fn occupy(&mut self, ix: u64, is_resizing: bool) -> Result<(), BucketStorageError> { - assert!(ix < self.capacity(), "occupy: bad index size"); + debug_assert!(ix < self.capacity(), "occupy: bad index size"); let mut e = Err(BucketStorageError::AlreadyOccupied); //debug!("ALLOC {} {}", ix, uid); if self.try_lock(ix) { @@ -249,14 +254,14 @@ impl BucketStorage { } pub fn free(&mut self, ix: u64) { - assert!(ix < self.capacity(), "bad index size"); + debug_assert!(ix < self.capacity(), "bad index size"); let start = self.get_start_offset_with_header(ix); self.contents.free(&mut self.mmap[start..], ix as usize); self.count.fetch_sub(1, Ordering::Relaxed); } fn get_start_offset_with_header(&self, ix: u64) -> usize { - assert!(ix < self.capacity(), "bad index size"); + debug_assert!(ix < self.capacity(), "bad index size"); (self.cell_size * ix) as usize } From 288e8a682a3111ff87df00a8db719b45cd29e666 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 19 Sep 2023 13:02:22 -0700 Subject: [PATCH 129/407] conditionally erase folders on drop of `BucketMap` (#33309) --- bucket_map/src/bucket_map.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/bucket_map/src/bucket_map.rs b/bucket_map/src/bucket_map.rs index 4a8b2dbcc5ceca..d4b70eb47cc58f 100644 --- a/bucket_map/src/bucket_map.rs +++ b/bucket_map/src/bucket_map.rs @@ -31,11 +31,14 @@ pub struct BucketMap { max_buckets_pow2: u8, pub stats: Arc, pub temp_dir: Option, + /// true if dropping self removes all folders. + /// This is primarily for test environments. + pub erase_drives_on_drop: bool, } impl Drop for BucketMap { fn drop(&mut self) { - if self.temp_dir.is_none() { + if self.temp_dir.is_none() && self.erase_drives_on_drop { BucketMap::::erase_previous_drives(&self.drives); } } @@ -103,6 +106,7 @@ impl BucketMap { max_buckets_pow2: log2(config.max_buckets) as u8, stats, temp_dir, + erase_drives_on_drop: true, } } From 58f980a19bff472f493c842bcdbe477d75fa4627 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 19 Sep 2023 19:26:43 -0700 Subject: [PATCH 130/407] refactor bucket storage file open (#33314) --- bucket_map/src/bucket_storage.rs | 104 +++++++++++++++++++------------ 1 file changed, 63 insertions(+), 41 deletions(-) diff --git a/bucket_map/src/bucket_storage.rs b/bucket_map/src/bucket_storage.rs index 4abf127bd505d3..388f654dc28e54 100644 --- a/bucket_map/src/bucket_storage.rs +++ b/bucket_map/src/bucket_storage.rs @@ -6,7 +6,7 @@ use { std::{ fs::{remove_file, OpenOptions}, io::{Seek, SeekFrom, Write}, - path::PathBuf, + path::{Path, PathBuf}, sync::{ atomic::{AtomicU64, Ordering}, Arc, @@ -342,57 +342,79 @@ impl BucketStorage { unsafe { std::slice::from_raw_parts_mut(ptr, len as usize) } } - /// allocate a new memory mapped file of size `bytes` on one of `drives` - fn new_map(drives: &[PathBuf], bytes: u64, stats: &BucketStats) -> (MmapMut, PathBuf, u128) { + /// open a disk bucket file and mmap it + /// optionally creates it. + fn map_open_file( + path: impl AsRef + std::fmt::Debug + Clone, + create: bool, + create_bytes: u64, + stats: &BucketStats, + ) -> Option { let mut measure_new_file = Measure::start("measure_new_file"); - let r = thread_rng().gen_range(0..drives.len()); - let drive = &drives[r]; - let file_random = thread_rng().gen_range(0..u128::MAX); - let pos = format!("{}", file_random,); - let file = drive.join(pos); - let mut data = OpenOptions::new() + let data = OpenOptions::new() .read(true) .write(true) - .create(true) - .open(file.clone()) - .map_err(|e| { - panic!( - "Unable to create data file {} in current dir({:?}): {:?}", - file.display(), - std::env::current_dir(), - e - ); - }) - .unwrap(); - - // Theoretical performance optimization: write a zero to the end of - // the file so that we won't have to resize it later, which may be - // expensive. - //debug!("GROWING file {}", capacity * cell_size as u64); - data.seek(SeekFrom::Start(bytes - 1)).unwrap(); - data.write_all(&[0]).unwrap(); - data.rewind().unwrap(); - measure_new_file.stop(); - let mut measure_flush = Measure::start("measure_flush"); - data.flush().unwrap(); // can we skip this? - measure_flush.stop(); + .create(create) + .open(path.clone()); + if let Err(e) = data { + if !create { + // we can't load this file, so bail without error + return None; + } + panic!( + "Unable to create data file {:?} in current dir({:?}): {:?}", + path, + std::env::current_dir(), + e + ); + } + let mut data = data.unwrap(); + + if create { + // Theoretical performance optimization: write a zero to the end of + // the file so that we won't have to resize it later, which may be + // expensive. + //debug!("GROWING file {}", capacity * cell_size as u64); + data.seek(SeekFrom::Start(create_bytes - 1)).unwrap(); + data.write_all(&[0]).unwrap(); + data.rewind().unwrap(); + measure_new_file.stop(); + let measure_flush = Measure::start("measure_flush"); + data.flush().unwrap(); // can we skip this? + stats + .flush_file_us + .fetch_add(measure_flush.end_as_us(), Ordering::Relaxed); + } let mut measure_mmap = Measure::start("measure_mmap"); - let res = ( - unsafe { MmapMut::map_mut(&data).unwrap() }, - file, - file_random, - ); + let res = unsafe { MmapMut::map_mut(&data) }; + if let Err(e) = res { + panic!( + "Unable to mmap file {:?} in current dir({:?}): {:?}", + path, + std::env::current_dir(), + e + ); + } measure_mmap.stop(); stats .new_file_us .fetch_add(measure_new_file.as_us(), Ordering::Relaxed); - stats - .flush_file_us - .fetch_add(measure_flush.as_us(), Ordering::Relaxed); stats .mmap_us .fetch_add(measure_mmap.as_us(), Ordering::Relaxed); - res + res.ok() + } + + /// allocate a new memory mapped file of size `bytes` on one of `drives` + fn new_map(drives: &[PathBuf], bytes: u64, stats: &BucketStats) -> (MmapMut, PathBuf, u128) { + let r = thread_rng().gen_range(0..drives.len()); + let drive = &drives[r]; + let file_random = thread_rng().gen_range(0..u128::MAX); + let pos = format!("{}", file_random,); + let file = drive.join(pos); + let res = Self::map_open_file(file.clone(), true, bytes, stats).unwrap(); + + (res, file, file_random) } /// copy contents from 'old_bucket' to 'self' From 7a8a492d4c7f036879d39580c77b159a68c1dc47 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 19 Sep 2023 20:28:55 -0700 Subject: [PATCH 131/407] fix clippy error (#33317) * fix clippy error * fix fmt that somehow got merged --- bucket_map/src/bucket_storage.rs | 2 +- vote/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bucket_map/src/bucket_storage.rs b/bucket_map/src/bucket_storage.rs index 388f654dc28e54..5011d0567a5176 100644 --- a/bucket_map/src/bucket_storage.rs +++ b/bucket_map/src/bucket_storage.rs @@ -181,7 +181,7 @@ impl BucketStorage { /// delete the backing file on disk fn delete(&self) { - _ = remove_file(&self.path); + _ = remove_file(&self.path); } pub fn max_search(&self) -> u64 { diff --git a/vote/src/lib.rs b/vote/src/lib.rs index bfff15ff9213f0..a2271971d8900e 100644 --- a/vote/src/lib.rs +++ b/vote/src/lib.rs @@ -1,5 +1,5 @@ #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] -#![allow(clippy::integer_arithmetic)] +#![allow(clippy::arithmetic_side_effects)] pub mod vote_account; pub mod vote_parser; From bca41edf204e322df03c6d529da32ae0ab256d23 Mon Sep 17 00:00:00 2001 From: Tyera Date: Wed, 20 Sep 2023 00:00:51 -0600 Subject: [PATCH 132/407] Make active stake consistent in split (#33295) * Add feature gate * Add helper fn * Require split destination to be rent-exempt if it is active * Update cli to prefund split accounts * cli: require rent param with sign-only * Update tokens to prefund split accounts * Update split tests with sysvar accounts * Fix test_split_to_account_with_rent_exempt_reserve * Fix test_staked_split_destination_minimum_balance * Fix test_split_more_than_staked * Fix test_split_minimum_stake_delegation and remove misleading StakeState::Initialized case * Fix test_split_from_larger_sized_account * Add test for pre-/post-activation behavior splitting some or all of stake account * Assert active stake * Fix runtime test * Ignore stake-pool downstream * Review comments * Feature gate sysvar reads --- .github/workflows/downstream-project-spl.yml | 2 +- cli/src/cli.rs | 4 + cli/src/stake.rs | 78 +- cli/tests/stake.rs | 19 +- programs/stake/src/stake_instruction.rs | 715 +++++++++++++++++-- programs/stake/src/stake_state.rs | 52 +- runtime/tests/stake.rs | 12 +- sdk/src/feature_set.rs | 5 + tokens/src/arg_parser.rs | 1 + tokens/src/args.rs | 1 + tokens/src/commands.rs | 30 +- tokens/src/lib.rs | 1 + tokens/src/main.rs | 3 +- tokens/src/stake.rs | 15 + 14 files changed, 813 insertions(+), 125 deletions(-) create mode 100644 tokens/src/stake.rs diff --git a/.github/workflows/downstream-project-spl.yml b/.github/workflows/downstream-project-spl.yml index f0ecfb20accb4e..09c457c038f9ae 100644 --- a/.github/workflows/downstream-project-spl.yml +++ b/.github/workflows/downstream-project-spl.yml @@ -128,7 +128,7 @@ jobs: - [governance/addin-mock/program, governance/program] - [memo/program] - [name-service/program] - - [stake-pool/program] + # - [stake-pool/program] - [single-pool/program] steps: diff --git a/cli/src/cli.rs b/cli/src/cli.rs index e6960c3fa3599a..17a35f7da0a2ab 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -238,6 +238,7 @@ pub enum CliCommand { lamports: u64, fee_payer: SignerIndex, compute_unit_price: Option, + rent_exempt_reserve: Option, }, MergeStake { stake_account_pubkey: Pubkey, @@ -1226,6 +1227,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { lamports, fee_payer, compute_unit_price, + rent_exempt_reserve, } => process_split_stake( &rpc_client, config, @@ -1242,6 +1244,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { *lamports, *fee_payer, compute_unit_price.as_ref(), + rent_exempt_reserve.as_ref(), ), CliCommand::MergeStake { stake_account_pubkey, @@ -2243,6 +2246,7 @@ mod tests { lamports: 30, fee_payer: 0, compute_unit_price: None, + rent_exempt_reserve: None, }; config.signers = vec![&keypair, &split_stake_account]; let result = process_command(&config); diff --git a/cli/src/stake.rs b/cli/src/stake.rs index 79fe33a098feea..04101397120142 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -55,7 +55,7 @@ use { tools::{acceptable_reference_epoch_credits, eligible_for_deactivate_delinquent}, }, stake_history::{Epoch, StakeHistory}, - system_instruction::SystemError, + system_instruction::{self, SystemError}, sysvar::{clock, stake_history}, transaction::Transaction, }, @@ -121,6 +121,13 @@ pub struct StakeAuthorizationIndexed { pub new_authority_signer: Option, } +struct SignOnlySplitNeedsRent {} +impl ArgsConfig for SignOnlySplitNeedsRent { + fn sign_only_arg<'a, 'b>(&self, arg: Arg<'a, 'b>) -> Arg<'a, 'b> { + arg.requires("rent_exempt_reserve_sol") + } +} + pub trait StakeSubCommands { fn stake_subcommands(self) -> Self; } @@ -493,11 +500,21 @@ impl StakeSubCommands for App<'_, '_> { will be at a derived address of SPLIT_STAKE_ACCOUNT") ) .arg(stake_authority_arg()) - .offline_args() + .offline_args_config(&SignOnlySplitNeedsRent{}) .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) .arg(compute_unit_price_arg()) + .arg( + Arg::with_name("rent_exempt_reserve_sol") + .long("rent-exempt-reserve-sol") + .value_name("AMOUNT") + .takes_value(true) + .validator(is_amount) + .requires("sign_only") + .help("Offline signing only: the rent-exempt amount to move into the new \ + stake account, in SOL") + ) ) .subcommand( SubCommand::with_name("merge-stake") @@ -1027,6 +1044,7 @@ pub fn parse_split_stake( let signer_info = default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; let compute_unit_price = value_of(matches, COMPUTE_UNIT_PRICE_ARG.name); + let rent_exempt_reserve = lamports_of_sol(matches, "rent_exempt_reserve_sol"); Ok(CliCommandInfo { command: CliCommand::SplitStake { @@ -1043,6 +1061,7 @@ pub fn parse_split_stake( lamports, fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), compute_unit_price, + rent_exempt_reserve, }, signers: signer_info.signers, }) @@ -1852,6 +1871,7 @@ pub fn process_split_stake( lamports: u64, fee_payer: SignerIndex, compute_unit_price: Option<&u64>, + rent_exempt_reserve: Option<&u64>, ) -> ProcessResult { let split_stake_account = config.signers[split_stake_account]; let fee_payer = config.signers[fee_payer]; @@ -1885,7 +1905,7 @@ pub fn process_split_stake( split_stake_account.pubkey() }; - if !sign_only { + let rent_exempt_reserve = if !sign_only { if let Ok(stake_account) = rpc_client.get_account(&split_stake_account_address) { let err_msg = if stake_account.owner == stake::program::id() { format!("Stake account {split_stake_account_address} already exists") @@ -1906,30 +1926,44 @@ pub fn process_split_stake( )) .into()); } - } + minimum_balance + } else { + rent_exempt_reserve + .cloned() + .expect("rent_exempt_reserve_sol is required with sign_only") + }; let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; - let ixs = if let Some(seed) = split_stake_account_seed { - stake_instruction::split_with_seed( - stake_account_pubkey, - &stake_authority.pubkey(), - lamports, - &split_stake_account_address, - &split_stake_account.pubkey(), - seed, + let mut ixs = vec![system_instruction::transfer( + &fee_payer.pubkey(), + &split_stake_account_address, + rent_exempt_reserve, + )]; + if let Some(seed) = split_stake_account_seed { + ixs.append( + &mut stake_instruction::split_with_seed( + stake_account_pubkey, + &stake_authority.pubkey(), + lamports, + &split_stake_account_address, + &split_stake_account.pubkey(), + seed, + ) + .with_memo(memo) + .with_compute_unit_price(compute_unit_price), ) - .with_memo(memo) - .with_compute_unit_price(compute_unit_price) } else { - stake_instruction::split( - stake_account_pubkey, - &stake_authority.pubkey(), - lamports, - &split_stake_account_address, + ixs.append( + &mut stake_instruction::split( + stake_account_pubkey, + &stake_authority.pubkey(), + lamports, + &split_stake_account_address, + ) + .with_memo(memo) + .with_compute_unit_price(compute_unit_price), ) - .with_memo(memo) - .with_compute_unit_price(compute_unit_price) }; let nonce_authority = config.signers[nonce_authority]; @@ -4848,6 +4882,7 @@ mod tests { lamports: 50_000_000_000, fee_payer: 0, compute_unit_price: None, + rent_exempt_reserve: None, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), @@ -4915,6 +4950,7 @@ mod tests { lamports: 50_000_000_000, fee_payer: 1, compute_unit_price: None, + rent_exempt_reserve: None, }, signers: vec![ Presigner::new(&stake_auth_pubkey, &stake_sig).into(), diff --git a/cli/tests/stake.rs b/cli/tests/stake.rs index fe1396db6c5f50..5984e1d0cededd 100644 --- a/cli/tests/stake.rs +++ b/cli/tests/stake.rs @@ -1469,6 +1469,10 @@ fn test_stake_split() { config.json_rpc_url = test_validator.rpc_url(); config.signers = vec![&default_signer]; + let minimum_balance = rpc_client + .get_minimum_balance_for_rent_exemption(StakeStateV2::size_of()) + .unwrap(); + let mut config_offline = CliConfig::recent_for_tests(); config_offline.json_rpc_url = String::default(); config_offline.signers = vec![&offline_signer]; @@ -1496,10 +1500,7 @@ fn test_stake_split() { check_balance!(1_000_000_000_000, &rpc_client, &offline_pubkey); // Create stake account, identity is authority - let stake_balance = rpc_client - .get_minimum_balance_for_rent_exemption(StakeStateV2::size_of()) - .unwrap() - + 10_000_000_000; + let stake_balance = minimum_balance + 10_000_000_000; let stake_keypair = keypair_from_seed(&[0u8; 32]).unwrap(); let stake_account_pubkey = stake_keypair.pubkey(); config.signers.push(&stake_keypair); @@ -1569,6 +1570,7 @@ fn test_stake_split() { lamports: 2 * stake_balance, fee_payer: 0, compute_unit_price: None, + rent_exempt_reserve: Some(minimum_balance), }; config_offline.output_format = OutputFormat::JsonCompact; let sig_response = process_command(&config_offline).unwrap(); @@ -1593,10 +1595,15 @@ fn test_stake_split() { lamports: 2 * stake_balance, fee_payer: 0, compute_unit_price: None, + rent_exempt_reserve: None, }; process_command(&config).unwrap(); - check_balance!(8 * stake_balance, &rpc_client, &stake_account_pubkey,); - check_balance!(2 * stake_balance, &rpc_client, &split_account.pubkey(),); + check_balance!(8 * stake_balance, &rpc_client, &stake_account_pubkey); + check_balance!( + 2 * stake_balance + minimum_balance, + &rpc_client, + &split_account.pubkey() + ); } #[test] diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index c268009885edbb..6cf5f000745883 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -527,6 +527,12 @@ mod tests { feature_set } + fn feature_set_without_require_rent_exempt_split_destination() -> Arc { + let mut feature_set = FeatureSet::all_enabled(); + feature_set.deactivate(&feature_set::require_rent_exempt_split_destination::id()); + Arc::new(feature_set) + } + fn create_default_account() -> AccountSharedData { AccountSharedData::new(0, 0, &Pubkey::new_unique()) } @@ -638,6 +644,25 @@ mod tests { ) } + fn get_active_stake_for_tests( + stake_accounts: &[AccountSharedData], + clock: &Clock, + stake_history: &StakeHistory, + ) -> u64 { + let mut active_stake = 0; + for account in stake_accounts { + if let StakeStateV2::Stake(_meta, stake, _stake_flags) = account.state().unwrap() { + let stake_status = stake.delegation.stake_activating_and_deactivating( + clock.epoch, + Some(stake_history), + None, + ); + active_stake += stake_status.effective; + } + } + active_stake + } + #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] #[test_case(feature_set_all_enabled(); "all_enabled")] @@ -2704,6 +2729,12 @@ mod tests { #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] #[test_case(feature_set_all_enabled(); "all_enabled")] fn test_split(feature_set: Arc) { + let stake_history = StakeHistory::default(); + let current_epoch = 100; + let clock = Clock { + epoch: current_epoch, + ..Clock::default() + }; let stake_address = solana_sdk::pubkey::new_rand(); let minimum_delegation = crate::get_minimum_delegation(&feature_set); let stake_lamports = minimum_delegation * 2; @@ -2717,7 +2748,7 @@ mod tests { .unwrap(); let mut transaction_accounts = vec![ (stake_address, AccountSharedData::default()), - (split_to_address, split_to_account), + (split_to_address, split_to_account.clone()), ( rent::id(), create_account_shared_data_for_test(&Rent { @@ -2725,6 +2756,15 @@ mod tests { ..Rent::default() }), ), + ( + stake_history::id(), + create_account_shared_data_for_test(&stake_history), + ), + (clock::id(), create_account_shared_data_for_test(&clock)), + ( + epoch_schedule::id(), + create_account_shared_data_for_test(&EpochSchedule::default()), + ), ]; let instruction_accounts = vec![ AccountMeta { @@ -2752,6 +2792,11 @@ mod tests { &id(), ) .unwrap(); + let expected_active_stake = get_active_stake_for_tests( + &[stake_account.clone(), split_to_account.clone()], + &clock, + &stake_history, + ); transaction_accounts[0] = (stake_address, stake_account); // should fail, split more than available @@ -2777,6 +2822,12 @@ mod tests { stake_lamports ); + // no deactivated stake + assert_eq!( + expected_active_stake, + get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) + ); + assert_eq!(from(&accounts[0]).unwrap(), from(&accounts[1]).unwrap()); match state { StakeStateV2::Initialized(_meta) => { @@ -4046,6 +4097,12 @@ mod tests { let minimum_delegation = crate::get_minimum_delegation(&feature_set); let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); + let stake_history = StakeHistory::default(); + let current_epoch = 100; + let clock = Clock { + epoch: current_epoch, + ..Clock::default() + }; let source_address = Pubkey::new_unique(); let source_meta = Meta { rent_exempt_reserve, @@ -4053,7 +4110,7 @@ mod tests { }; let dest_address = Pubkey::new_unique(); let dest_account = AccountSharedData::new_data_with_space( - 0, + rent_exempt_reserve, &StakeStateV2::Uninitialized, StakeStateV2::size_of(), &id(), @@ -4071,57 +4128,60 @@ mod tests { is_writable: true, }, ]; - for (source_reserve, dest_reserve, expected_result) in [ - (rent_exempt_reserve, rent_exempt_reserve, Ok(())), + for (source_delegation, split_amount, expected_result) in [ + (minimum_delegation * 2, minimum_delegation, Ok(())), ( - rent_exempt_reserve, - rent_exempt_reserve - 1, + minimum_delegation * 2, + minimum_delegation - 1, Err(InstructionError::InsufficientFunds), ), ( - rent_exempt_reserve - 1, - rent_exempt_reserve, + (minimum_delegation * 2) - 1, + minimum_delegation, Err(InstructionError::InsufficientFunds), ), ( - rent_exempt_reserve - 1, - rent_exempt_reserve - 1, + (minimum_delegation - 1) * 2, + minimum_delegation - 1, Err(InstructionError::InsufficientFunds), ), ] { - // The source account's starting balance is equal to *both* the source and dest - // accounts' *final* balance - let mut source_starting_balance = source_reserve + dest_reserve; - for (delegation, source_stake_state) in &[ - (0, StakeStateV2::Initialized(source_meta)), - ( - minimum_delegation, - just_stake( - source_meta, - minimum_delegation * 2 + source_starting_balance - rent_exempt_reserve, + let source_account = AccountSharedData::new_data_with_space( + source_delegation + rent_exempt_reserve, + &just_stake(source_meta, source_delegation), + StakeStateV2::size_of(), + &id(), + ) + .unwrap(); + let expected_active_stake = get_active_stake_for_tests( + &[source_account.clone(), dest_account.clone()], + &clock, + &stake_history, + ); + let accounts = process_instruction( + Arc::clone(&feature_set), + &serialize(&StakeInstruction::Split(split_amount)).unwrap(), + vec![ + (source_address, source_account), + (dest_address, dest_account.clone()), + (rent::id(), create_account_shared_data_for_test(&rent)), + ( + stake_history::id(), + create_account_shared_data_for_test(&stake_history), ), - ), - ] { - source_starting_balance += delegation * 2; - let source_account = AccountSharedData::new_data_with_space( - source_starting_balance, - source_stake_state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(dest_reserve + delegation)).unwrap(), - vec![ - (source_address, source_account), - (dest_address, dest_account.clone()), - (rent::id(), create_account_shared_data_for_test(&rent)), - ], - instruction_accounts.clone(), - expected_result.clone(), - ); - } + (clock::id(), create_account_shared_data_for_test(&clock)), + ( + epoch_schedule::id(), + create_account_shared_data_for_test(&EpochSchedule::default()), + ), + ], + instruction_accounts.clone(), + expected_result.clone(), + ); + assert_eq!( + expected_active_stake, + get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) + ); } } @@ -4139,6 +4199,12 @@ mod tests { let minimum_delegation = crate::get_minimum_delegation(&feature_set); let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); + let stake_history = StakeHistory::default(); + let current_epoch = 100; + let clock = Clock { + epoch: current_epoch, + ..Clock::default() + }; let source_address = Pubkey::new_unique(); let source_meta = Meta { rent_exempt_reserve, @@ -4185,17 +4251,35 @@ mod tests { &id(), ) .unwrap(); - process_instruction( + let expected_active_stake = get_active_stake_for_tests( + &[source_account.clone(), dest_account.clone()], + &clock, + &stake_history, + ); + let accounts = process_instruction( Arc::clone(&feature_set), &serialize(&StakeInstruction::Split(source_account.lamports())).unwrap(), vec![ (source_address, source_account), (dest_address, dest_account.clone()), (rent::id(), create_account_shared_data_for_test(&rent)), + ( + stake_history::id(), + create_account_shared_data_for_test(&stake_history), + ), + (clock::id(), create_account_shared_data_for_test(&clock)), + ( + epoch_schedule::id(), + create_account_shared_data_for_test(&EpochSchedule::default()), + ), ], instruction_accounts.clone(), expected_result.clone(), ); + assert_eq!( + expected_active_stake, + get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) + ); } } } @@ -4308,6 +4392,12 @@ mod tests { let minimum_delegation = crate::get_minimum_delegation(&feature_set); let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); + let stake_history = StakeHistory::default(); + let current_epoch = 100; + let clock = Clock { + epoch: current_epoch, + ..Clock::default() + }; let source_address = Pubkey::new_unique(); let destination_address = Pubkey::new_unique(); let instruction_accounts = vec![ @@ -4359,17 +4449,26 @@ mod tests { minimum_delegation.saturating_sub(1), // when minimum is 0, this blows up! Err(InstructionError::InsufficientFunds), ), - // destination is not rent exempt, so split enough for rent and minimum delegation - (rent_exempt_reserve - 1, minimum_delegation + 1, Ok(())), + // destination is not rent exempt, so any split amount fails, including enough for rent + // and minimum delegation + ( + rent_exempt_reserve - 1, + minimum_delegation + 1, + Err(InstructionError::InsufficientFunds), + ), // destination is not rent exempt, but split amount only for minimum delegation ( rent_exempt_reserve - 1, minimum_delegation, Err(InstructionError::InsufficientFunds), ), - // destination has smallest non-zero balance, so can split the minimum balance - // requirements minus what destination already has - (1, rent_exempt_reserve + minimum_delegation - 1, Ok(())), + // destination is not rent exempt, so any split amount fails, including case where + // destination has smallest non-zero balance + ( + 1, + rent_exempt_reserve + minimum_delegation - 1, + Err(InstructionError::InsufficientFunds), + ), // destination has smallest non-zero balance, but cannot split less than the minimum // balance requirements minus what destination already has ( @@ -4377,9 +4476,13 @@ mod tests { rent_exempt_reserve + minimum_delegation - 2, Err(InstructionError::InsufficientFunds), ), - // destination has zero lamports, so split must be at least rent exempt reserve plus - // minimum delegation - (0, rent_exempt_reserve + minimum_delegation, Ok(())), + // destination has zero lamports, so any split amount fails, including at least rent + // exempt reserve plus minimum delegation + ( + 0, + rent_exempt_reserve + minimum_delegation, + Err(InstructionError::InsufficientFunds), + ), // destination has zero lamports, but split amount is less than rent exempt reserve // plus minimum delegation ( @@ -4410,6 +4513,11 @@ mod tests { &id(), ) .unwrap(); + let expected_active_stake = get_active_stake_for_tests( + &[source_account.clone(), destination_account.clone()], + &clock, + &stake_history, + ); let accounts = process_instruction( Arc::clone(&feature_set), &serialize(&StakeInstruction::Split(split_amount)).unwrap(), @@ -4417,10 +4525,23 @@ mod tests { (source_address, source_account.clone()), (destination_address, destination_account), (rent::id(), create_account_shared_data_for_test(&rent)), + ( + stake_history::id(), + create_account_shared_data_for_test(&stake_history), + ), + (clock::id(), create_account_shared_data_for_test(&clock)), + ( + epoch_schedule::id(), + create_account_shared_data_for_test(&EpochSchedule::default()), + ), ], instruction_accounts.clone(), expected_result.clone(), ); + assert_eq!( + expected_active_stake, + get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) + ); // For the expected OK cases, when the source's StakeStateV2 is Stake, then the // destination's StakeStateV2 *must* also end up as Stake as well. Additionally, // check to ensure the destination's delegation amount is correct. If the @@ -4892,6 +5013,8 @@ mod tests { fn test_split_more_than_staked(feature_set: Arc) { let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); + let stake_history = StakeHistory::default(); + let current_epoch = 100; let minimum_delegation = crate::get_minimum_delegation(&feature_set); let stake_lamports = (rent_exempt_reserve + minimum_delegation) * 2; let stake_address = solana_sdk::pubkey::new_rand(); @@ -4910,7 +5033,7 @@ mod tests { .unwrap(); let split_to_address = solana_sdk::pubkey::new_rand(); let split_to_account = AccountSharedData::new_data_with_space( - 0, + rent_exempt_reserve, &StakeStateV2::Uninitialized, StakeStateV2::size_of(), &id(), @@ -4920,6 +5043,21 @@ mod tests { (stake_address, stake_account), (split_to_address, split_to_account), (rent::id(), create_account_shared_data_for_test(&rent)), + ( + stake_history::id(), + create_account_shared_data_for_test(&stake_history), + ), + ( + clock::id(), + create_account_shared_data_for_test(&Clock { + epoch: current_epoch, + ..Clock::default() + }), + ), + ( + epoch_schedule::id(), + create_account_shared_data_for_test(&EpochSchedule::default()), + ), ]; let instruction_accounts = vec![ AccountMeta { @@ -4949,6 +5087,12 @@ mod tests { fn test_split_with_rent(feature_set: Arc) { let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); + let stake_history = StakeHistory::default(); + let current_epoch = 100; + let clock = Clock { + epoch: current_epoch, + ..Clock::default() + }; let minimum_delegation = crate::get_minimum_delegation(&feature_set); let stake_address = solana_sdk::pubkey::new_rand(); let split_to_address = solana_sdk::pubkey::new_rand(); @@ -4993,10 +5137,24 @@ mod tests { &id(), ) .unwrap(); + let expected_active_stake = get_active_stake_for_tests( + &[stake_account.clone(), split_to_account.clone()], + &clock, + &stake_history, + ); let mut transaction_accounts = vec![ (stake_address, stake_account), (split_to_address, split_to_account.clone()), (rent::id(), create_account_shared_data_for_test(&rent)), + ( + stake_history::id(), + create_account_shared_data_for_test(&stake_history), + ), + (clock::id(), create_account_shared_data_for_test(&clock)), + ( + epoch_schedule::id(), + create_account_shared_data_for_test(&EpochSchedule::default()), + ), ]; // not enough to make a non-zero stake account @@ -5020,7 +5178,7 @@ mod tests { Err(InstructionError::InsufficientFunds), ); - // split account already has way enough lamports + // split account already has enough lamports transaction_accounts[1].1.set_lamports(*minimum_balance); let accounts = process_instruction( Arc::clone(&feature_set), @@ -5029,6 +5187,10 @@ mod tests { instruction_accounts.clone(), Ok(()), ); + assert_eq!( + expected_active_stake, + get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) + ); // verify no stake leakage in the case of a stake if let StakeStateV2::Stake(meta, stake, stake_flags) = state { @@ -5058,6 +5220,12 @@ mod tests { fn test_split_to_account_with_rent_exempt_reserve(feature_set: Arc) { let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); + let stake_history = StakeHistory::default(); + let current_epoch = 100; + let clock = Clock { + epoch: current_epoch, + ..Clock::default() + }; let minimum_delegation = crate::get_minimum_delegation(&feature_set); let stake_lamports = (rent_exempt_reserve + minimum_delegation) * 2; let stake_address = solana_sdk::pubkey::new_rand(); @@ -5088,17 +5256,7 @@ mod tests { }, ]; - // Test various account prefunding, including empty, less than rent_exempt_reserve, exactly - // rent_exempt_reserve, and more than rent_exempt_reserve. The empty case is not covered in - // test_split, since that test uses a Meta with rent_exempt_reserve = 0 - let split_lamport_balances = vec![ - 0, - rent_exempt_reserve - 1, - rent_exempt_reserve, - rent_exempt_reserve + minimum_delegation - 1, - rent_exempt_reserve + minimum_delegation, - ]; - for initial_balance in split_lamport_balances { + let transaction_accounts = |initial_balance: u64| -> Vec<(Pubkey, AccountSharedData)> { let split_to_account = AccountSharedData::new_data_with_space( initial_balance, &StakeStateV2::Uninitialized, @@ -5106,11 +5264,63 @@ mod tests { &id(), ) .unwrap(); - let transaction_accounts = vec![ + vec![ (stake_address, stake_account.clone()), (split_to_address, split_to_account), (rent::id(), create_account_shared_data_for_test(&rent)), - ]; + ( + stake_history::id(), + create_account_shared_data_for_test(&stake_history), + ), + (clock::id(), create_account_shared_data_for_test(&clock)), + ( + epoch_schedule::id(), + create_account_shared_data_for_test(&EpochSchedule::default()), + ), + ] + }; + + // Test insufficient account prefunding, including empty and less than rent_exempt_reserve. + // The empty case is not covered in test_split, since that test uses a Meta with + // rent_exempt_reserve = 0 + let split_lamport_balances = vec![0, rent_exempt_reserve - 1]; + for initial_balance in split_lamport_balances { + let transaction_accounts = transaction_accounts(initial_balance); + // split more than available fails + process_instruction( + Arc::clone(&feature_set), + &serialize(&StakeInstruction::Split(stake_lamports + 1)).unwrap(), + transaction_accounts.clone(), + instruction_accounts.clone(), + Err(InstructionError::InsufficientFunds), + ); + // split to insufficiently funded dest fails + process_instruction( + Arc::clone(&feature_set), + &serialize(&StakeInstruction::Split(stake_lamports / 2)).unwrap(), + transaction_accounts, + instruction_accounts.clone(), + Err(InstructionError::InsufficientFunds), + ); + } + + // Test various account prefunding, including exactly rent_exempt_reserve, and more than + // rent_exempt_reserve + let split_lamport_balances = vec![ + rent_exempt_reserve, + rent_exempt_reserve + minimum_delegation - 1, + rent_exempt_reserve + minimum_delegation, + ]; + for initial_balance in split_lamport_balances { + let transaction_accounts = transaction_accounts(initial_balance); + let expected_active_stake = get_active_stake_for_tests( + &[ + transaction_accounts[0].1.clone(), + transaction_accounts[1].1.clone(), + ], + &clock, + &stake_history, + ); // split more than available fails process_instruction( @@ -5134,6 +5344,11 @@ mod tests { accounts[0].lamports() + accounts[1].lamports(), stake_lamports + initial_balance, ); + // no deactivated stake + assert_eq!( + expected_active_stake, + get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) + ); if let StakeStateV2::Stake(meta, stake, stake_flags) = state { let expected_stake = @@ -5184,6 +5399,12 @@ mod tests { let rent = Rent::default(); let source_larger_rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of() + 100); let split_rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); + let stake_history = StakeHistory::default(); + let current_epoch = 100; + let clock = Clock { + epoch: current_epoch, + ..Clock::default() + }; let minimum_delegation = crate::get_minimum_delegation(&feature_set); let stake_lamports = (source_larger_rent_exempt_reserve + minimum_delegation) * 2; let stake_address = solana_sdk::pubkey::new_rand(); @@ -5214,17 +5435,7 @@ mod tests { }, ]; - // Test various account prefunding, including empty, less than rent_exempt_reserve, exactly - // rent_exempt_reserve, and more than rent_exempt_reserve. The empty case is not covered in - // test_split, since that test uses a Meta with rent_exempt_reserve = 0 - let split_lamport_balances = vec![ - 0, - split_rent_exempt_reserve - 1, - split_rent_exempt_reserve, - split_rent_exempt_reserve + minimum_delegation - 1, - split_rent_exempt_reserve + minimum_delegation, - ]; - for initial_balance in split_lamport_balances { + let transaction_accounts = |initial_balance: u64| -> Vec<(Pubkey, AccountSharedData)> { let split_to_account = AccountSharedData::new_data_with_space( initial_balance, &StakeStateV2::Uninitialized, @@ -5232,11 +5443,52 @@ mod tests { &id(), ) .unwrap(); - let transaction_accounts = vec![ + vec![ (stake_address, stake_account.clone()), (split_to_address, split_to_account), (rent::id(), create_account_shared_data_for_test(&rent)), - ]; + ( + stake_history::id(), + create_account_shared_data_for_test(&stake_history), + ), + (clock::id(), create_account_shared_data_for_test(&clock)), + ( + epoch_schedule::id(), + create_account_shared_data_for_test(&EpochSchedule::default()), + ), + ] + }; + + // Test insufficient account prefunding, including empty and less than rent_exempt_reserve + let split_lamport_balances = vec![0, split_rent_exempt_reserve - 1]; + for initial_balance in split_lamport_balances { + process_instruction( + Arc::clone(&feature_set), + &serialize(&StakeInstruction::Split(stake_lamports / 2)).unwrap(), + transaction_accounts(initial_balance), + instruction_accounts.clone(), + Err(InstructionError::InsufficientFunds), + ); + } + + // Test various account prefunding, including exactly rent_exempt_reserve, and more than + // rent_exempt_reserve. The empty case is not covered in test_split, since that test uses a + // Meta with rent_exempt_reserve = 0 + let split_lamport_balances = vec![ + split_rent_exempt_reserve, + split_rent_exempt_reserve + minimum_delegation - 1, + split_rent_exempt_reserve + minimum_delegation, + ]; + for initial_balance in split_lamport_balances { + let transaction_accounts = transaction_accounts(initial_balance); + let expected_active_stake = get_active_stake_for_tests( + &[ + transaction_accounts[0].1.clone(), + transaction_accounts[1].1.clone(), + ], + &clock, + &stake_history, + ); // split more than available fails process_instruction( @@ -5260,6 +5512,11 @@ mod tests { accounts[0].lamports() + accounts[1].lamports(), stake_lamports + initial_balance ); + // no deactivated stake + assert_eq!( + expected_active_stake, + get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) + ); if let StakeStateV2::Stake(meta, stake, stake_flags) = state { let expected_split_meta = Meta { @@ -5315,6 +5572,8 @@ mod tests { let rent = Rent::default(); let source_smaller_rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); let split_rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of() + 100); + let stake_history = StakeHistory::default(); + let current_epoch = 100; let stake_lamports = split_rent_exempt_reserve + 1; let stake_address = solana_sdk::pubkey::new_rand(); let meta = Meta { @@ -5363,6 +5622,21 @@ mod tests { (stake_address, stake_account.clone()), (split_to_address, split_to_account), (rent::id(), create_account_shared_data_for_test(&rent)), + ( + stake_history::id(), + create_account_shared_data_for_test(&stake_history), + ), + ( + clock::id(), + create_account_shared_data_for_test(&Clock { + epoch: current_epoch, + ..Clock::default() + }), + ), + ( + epoch_schedule::id(), + create_account_shared_data_for_test(&EpochSchedule::default()), + ), ]; // should always return error when splitting to larger account @@ -5391,6 +5665,12 @@ mod tests { fn test_split_100_percent_of_source(feature_set: Arc) { let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); + let stake_history = StakeHistory::default(); + let current_epoch = 100; + let clock = Clock { + epoch: current_epoch, + ..Clock::default() + }; let minimum_delegation = crate::get_minimum_delegation(&feature_set); let stake_lamports = rent_exempt_reserve + minimum_delegation; let stake_address = solana_sdk::pubkey::new_rand(); @@ -5432,10 +5712,24 @@ mod tests { &id(), ) .unwrap(); + let expected_active_stake = get_active_stake_for_tests( + &[stake_account.clone(), split_to_account.clone()], + &clock, + &stake_history, + ); let transaction_accounts = vec![ (stake_address, stake_account), (split_to_address, split_to_account.clone()), (rent::id(), create_account_shared_data_for_test(&rent)), + ( + stake_history::id(), + create_account_shared_data_for_test(&stake_history), + ), + (clock::id(), create_account_shared_data_for_test(&clock)), + ( + epoch_schedule::id(), + create_account_shared_data_for_test(&EpochSchedule::default()), + ), ]; // split 100% over to dest @@ -5452,6 +5746,11 @@ mod tests { accounts[0].lamports() + accounts[1].lamports(), stake_lamports ); + // no deactivated stake + assert_eq!( + expected_active_stake, + get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) + ); match state { StakeStateV2::Initialized(_) => { @@ -5486,6 +5785,12 @@ mod tests { fn test_split_100_percent_of_source_to_account_with_lamports(feature_set: Arc) { let rent = Rent::default(); let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); + let stake_history = StakeHistory::default(); + let current_epoch = 100; + let clock = Clock { + epoch: current_epoch, + ..Clock::default() + }; let minimum_delegation = crate::get_minimum_delegation(&feature_set); let stake_lamports = rent_exempt_reserve + minimum_delegation; let stake_address = solana_sdk::pubkey::new_rand(); @@ -5534,10 +5839,24 @@ mod tests { &id(), ) .unwrap(); + let expected_active_stake = get_active_stake_for_tests( + &[stake_account.clone(), split_to_account.clone()], + &clock, + &stake_history, + ); let transaction_accounts = vec![ (stake_address, stake_account.clone()), (split_to_address, split_to_account), (rent::id(), create_account_shared_data_for_test(&rent)), + ( + stake_history::id(), + create_account_shared_data_for_test(&stake_history), + ), + (clock::id(), create_account_shared_data_for_test(&clock)), + ( + epoch_schedule::id(), + create_account_shared_data_for_test(&EpochSchedule::default()), + ), ]; // split 100% over to dest @@ -5554,6 +5873,11 @@ mod tests { accounts[0].lamports() + accounts[1].lamports(), stake_lamports + initial_balance ); + // no deactivated stake + assert_eq!( + expected_active_stake, + get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) + ); if let StakeStateV2::Stake(meta, stake, stake_flags) = state { assert_eq!( @@ -5582,6 +5906,12 @@ mod tests { let rent = Rent::default(); let source_rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of() + 100); let split_rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); + let stake_history = StakeHistory::default(); + let current_epoch = 100; + let clock = Clock { + epoch: current_epoch, + ..Clock::default() + }; let minimum_delegation = crate::get_minimum_delegation(&feature_set); let stake_lamports = source_rent_exempt_reserve + minimum_delegation; let stake_address = solana_sdk::pubkey::new_rand(); @@ -5627,6 +5957,15 @@ mod tests { (stake_address, stake_account), (split_to_address, split_to_account), (rent::id(), create_account_shared_data_for_test(&rent)), + ( + stake_history::id(), + create_account_shared_data_for_test(&stake_history), + ), + (clock::id(), create_account_shared_data_for_test(&clock)), + ( + epoch_schedule::id(), + create_account_shared_data_for_test(&EpochSchedule::default()), + ), ]; process_instruction( Arc::clone(&feature_set), @@ -5652,10 +5991,30 @@ mod tests { &id(), ) .unwrap(); + let expected_active_stake = get_active_stake_for_tests( + &[stake_account.clone(), split_to_account.clone()], + &clock, + &stake_history, + ); let transaction_accounts = vec![ (stake_address, stake_account), (split_to_address, split_to_account), (rent::id(), create_account_shared_data_for_test(&rent)), + ( + stake_history::id(), + create_account_shared_data_for_test(&stake_history), + ), + ( + clock::id(), + create_account_shared_data_for_test(&Clock { + epoch: current_epoch, + ..Clock::default() + }), + ), + ( + epoch_schedule::id(), + create_account_shared_data_for_test(&EpochSchedule::default()), + ), ]; let accounts = process_instruction( Arc::clone(&feature_set), @@ -5665,6 +6024,10 @@ mod tests { Ok(()), ); assert_eq!(accounts[1].lamports(), stake_lamports); + assert_eq!( + expected_active_stake, + get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) + ); let expected_split_meta = Meta { authorized: Authorized::auto(&stake_address), @@ -5709,6 +6072,200 @@ mod tests { } } + #[test_case(feature_set_without_require_rent_exempt_split_destination(), Ok(()); "without_require_rent_exempt_split_destination")] + #[test_case(feature_set_all_enabled(), Err(InstructionError::InsufficientFunds); "all_enabled")] + fn test_split_require_rent_exempt_destination( + feature_set: Arc, + expected_result: Result<(), InstructionError>, + ) { + let rent = Rent::default(); + let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); + let stake_history = StakeHistory::default(); + let current_epoch = 100; + let clock = Clock { + epoch: current_epoch, + ..Clock::default() + }; + let minimum_delegation = crate::get_minimum_delegation(&feature_set); + let delegation_amount = 3 * minimum_delegation; + let source_lamports = rent_exempt_reserve + delegation_amount; + let source_address = Pubkey::new_unique(); + let destination_address = Pubkey::new_unique(); + let meta = Meta { + authorized: Authorized::auto(&source_address), + rent_exempt_reserve, + ..Meta::default() + }; + let instruction_accounts = vec![ + AccountMeta { + pubkey: source_address, + is_signer: true, + is_writable: true, + }, + AccountMeta { + pubkey: destination_address, + is_signer: false, + is_writable: true, + }, + ]; + + for (split_amount, expected_result) in [ + (2 * minimum_delegation, expected_result), + (source_lamports, Ok(())), + ] { + for (state, expected_result) in &[ + (StakeStateV2::Initialized(meta), Ok(())), + (just_stake(meta, delegation_amount), expected_result), + ] { + let source_account = AccountSharedData::new_data_with_space( + source_lamports, + &state, + StakeStateV2::size_of(), + &id(), + ) + .unwrap(); + + let transaction_accounts = + |initial_balance: u64| -> Vec<(Pubkey, AccountSharedData)> { + let destination_account = AccountSharedData::new_data_with_space( + initial_balance, + &StakeStateV2::Uninitialized, + StakeStateV2::size_of(), + &id(), + ) + .unwrap(); + vec![ + (source_address, source_account.clone()), + (destination_address, destination_account), + (rent::id(), create_account_shared_data_for_test(&rent)), + ( + stake_history::id(), + create_account_shared_data_for_test(&stake_history), + ), + (clock::id(), create_account_shared_data_for_test(&clock)), + ( + epoch_schedule::id(), + create_account_shared_data_for_test(&EpochSchedule::default()), + ), + ] + }; + + // Test insufficient recipient prefunding; should error once feature is activated + let split_lamport_balances = vec![0, rent_exempt_reserve - 1]; + for initial_balance in split_lamport_balances { + let transaction_accounts = transaction_accounts(initial_balance); + let expected_active_stake = get_active_stake_for_tests( + &[source_account.clone(), transaction_accounts[1].1.clone()], + &clock, + &stake_history, + ); + let result_accounts = process_instruction( + Arc::clone(&feature_set), + &serialize(&StakeInstruction::Split(split_amount)).unwrap(), + transaction_accounts.clone(), + instruction_accounts.clone(), + expected_result.clone(), + ); + let result_active_stake = + get_active_stake_for_tests(&result_accounts[0..2], &clock, &stake_history); + if expected_active_stake > 0 // starting stake was delegated + // partial split + && result_accounts[0].lamports() > 0 + // successful split to deficient recipient + && expected_result.is_ok() + { + assert_ne!(expected_active_stake, result_active_stake); + } else { + assert_eq!(expected_active_stake, result_active_stake); + } + } + + // Test recipient prefunding, including exactly rent_exempt_reserve, and more than + // rent_exempt_reserve. + let split_lamport_balances = vec![rent_exempt_reserve, rent_exempt_reserve + 1]; + for initial_balance in split_lamport_balances { + let transaction_accounts = transaction_accounts(initial_balance); + let expected_active_stake = get_active_stake_for_tests( + &[source_account.clone(), transaction_accounts[1].1.clone()], + &clock, + &stake_history, + ); + let accounts = process_instruction( + Arc::clone(&feature_set), + &serialize(&StakeInstruction::Split(split_amount)).unwrap(), + transaction_accounts, + instruction_accounts.clone(), + Ok(()), + ); + + // no lamport leakage + assert_eq!( + accounts[0].lamports() + accounts[1].lamports(), + source_lamports + initial_balance + ); + + // no deactivated stake + assert_eq!( + expected_active_stake, + get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) + ); + + if let StakeStateV2::Stake(meta, stake, stake_flags) = state { + // split entire source account, including rent-exempt reserve + if accounts[0].lamports() == 0 { + assert_eq!(Ok(StakeStateV2::Uninitialized), accounts[0].state()); + assert_eq!( + Ok(StakeStateV2::Stake( + *meta, + Stake { + delegation: Delegation { + // delegated amount should not include source + // rent-exempt reserve + stake: delegation_amount, + ..stake.delegation + }, + ..*stake + }, + *stake_flags, + )), + accounts[1].state() + ); + } else { + assert_eq!( + Ok(StakeStateV2::Stake( + *meta, + Stake { + delegation: Delegation { + stake: minimum_delegation, + ..stake.delegation + }, + ..*stake + }, + *stake_flags, + )), + accounts[0].state() + ); + assert_eq!( + Ok(StakeStateV2::Stake( + *meta, + Stake { + delegation: Delegation { + stake: split_amount, + ..stake.delegation + }, + ..*stake + }, + *stake_flags, + )), + accounts[1].state() + ); + } + } + } + } + } + } + #[test_case(feature_set_old_warmup_cooldown_no_minimum_delegation(); "old_warmup_cooldown_no_min_delegation")] #[test_case(feature_set_old_warmup_cooldown(); "old_warmup_cooldown")] #[test_case(feature_set_all_enabled(); "all_enabled")] diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index 713054ae629db8..964d2d6ffc5d78 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -103,6 +103,19 @@ pub(crate) fn new_warmup_cooldown_rate_epoch(invoke_context: &InvokeContext) -> .new_warmup_cooldown_rate_epoch(epoch_schedule.as_ref()) } +fn get_stake_status( + invoke_context: &InvokeContext, + stake: &Stake, + clock: &Clock, +) -> Result { + let stake_history = invoke_context.get_sysvar_cache().get_stake_history()?; + Ok(stake.delegation.stake_activating_and_deactivating( + clock.epoch, + Some(&stake_history), + new_warmup_cooldown_rate_epoch(invoke_context), + )) +} + fn redelegate_stake( invoke_context: &InvokeContext, stake: &mut Stake, @@ -688,6 +701,16 @@ pub fn split( StakeStateV2::Stake(meta, mut stake, stake_flags) => { meta.authorized.check(signers, StakeAuthorize::Staker)?; let minimum_delegation = crate::get_minimum_delegation(&invoke_context.feature_set); + let is_active = if invoke_context + .feature_set + .is_active(&feature_set::require_rent_exempt_split_destination::id()) + { + let clock = invoke_context.get_sysvar_cache().get_clock()?; + let status = get_stake_status(invoke_context, &stake, &clock)?; + status.effective > 0 + } else { + false + }; let validated_split_info = validate_split_amount( invoke_context, transaction_context, @@ -697,6 +720,7 @@ pub fn split( lamports, &meta, minimum_delegation, + is_active, )?; // split the stake, subtract rent_exempt_balance unless @@ -763,6 +787,7 @@ pub fn split( lamports, &meta, 0, // additional_required_lamports + false, )?; let mut split_meta = meta; split_meta.rent_exempt_reserve = validated_split_info.destination_rent_exempt_reserve; @@ -925,12 +950,7 @@ pub fn redelegate( let (stake_meta, effective_stake) = if let StakeStateV2::Stake(meta, stake, _stake_flags) = stake_account.get_state()? { - let stake_history = invoke_context.get_sysvar_cache().get_stake_history()?; - let status = stake.delegation.stake_activating_and_deactivating( - clock.epoch, - Some(&stake_history), - new_warmup_cooldown_rate_epoch(invoke_context), - ); + let status = get_stake_status(invoke_context, &stake, &clock)?; if status.effective == 0 || status.activating != 0 || status.deactivating != 0 { ic_msg!(invoke_context, "stake is not active"); return Err(StakeError::RedelegateTransientOrInactiveStake.into()); @@ -1192,6 +1212,7 @@ fn validate_split_amount( lamports: u64, source_meta: &Meta, additional_required_lamports: u64, + source_is_active: bool, ) -> Result { let source_account = instruction_context .try_borrow_instruction_account(transaction_context, source_account_index)?; @@ -1232,12 +1253,27 @@ fn validate_split_amount( // nothing to do here } + let rent = invoke_context.get_sysvar_cache().get_rent()?; + let destination_rent_exempt_reserve = rent.minimum_balance(destination_data_len); + + // As of feature `require_rent_exempt_split_destination`, if the source is active stake, one of + // these criteria must be met: + // 1. the destination account must be prefunded with at least the rent-exempt reserve, or + // 2. the split must consume 100% of the source + if invoke_context + .feature_set + .is_active(&feature_set::require_rent_exempt_split_destination::id()) + && source_is_active + && source_remaining_balance != 0 + && destination_lamports < destination_rent_exempt_reserve + { + return Err(InstructionError::InsufficientFunds); + } + // Verify the destination account meets the minimum balance requirements // This must handle: // 1. The destination account having a different rent exempt reserve due to data size changes // 2. The destination account being prefunded, which would lower the minimum split amount - let rent = invoke_context.get_sysvar_cache().get_rent()?; - let destination_rent_exempt_reserve = rent.minimum_balance(destination_data_len); let destination_minimum_balance = destination_rent_exempt_reserve.saturating_add(additional_required_lamports); let destination_balance_deficit = diff --git a/runtime/tests/stake.rs b/runtime/tests/stake.rs index c260fead027308..7088e6438e1c22 100755 --- a/runtime/tests/stake.rs +++ b/runtime/tests/stake.rs @@ -428,15 +428,21 @@ fn test_stake_account_lifetime() { let split_stake_keypair = Keypair::new(); let split_stake_pubkey = split_stake_keypair.pubkey(); + bank.transfer( + stake_rent_exempt_reserve, + &mint_keypair, + &split_stake_pubkey, + ) + .unwrap(); let bank_client = BankClient::new_shared(bank.clone()); + // Test split let split_starting_delegation = stake_minimum_delegation + bonus_delegation; - let split_starting_balance = split_starting_delegation + stake_rent_exempt_reserve; let message = Message::new( &stake_instruction::split( &stake_pubkey, &stake_pubkey, - split_starting_balance, + split_starting_delegation, &split_stake_pubkey, ), Some(&mint_pubkey), @@ -451,7 +457,7 @@ fn test_stake_account_lifetime() { get_staked(&bank, &split_stake_pubkey), split_starting_delegation, ); - let stake_remaining_balance = balance - split_starting_balance; + let stake_remaining_balance = balance - split_starting_delegation; // Deactivate the split let message = Message::new( diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 95ea3f3b6cccb1..e74883ec930e9d 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -689,6 +689,10 @@ pub mod enable_program_runtime_v2_and_loader_v4 { solana_sdk::declare_id!("8oBxsYqnCvUTGzgEpxPcnVf7MLbWWPYddE33PftFeBBd"); } +pub mod require_rent_exempt_split_destination { + solana_sdk::declare_id!("D2aip4BBr8NPWtU9vLrwrBvbuaQ8w1zV38zFLxx4pfBV"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -856,6 +860,7 @@ lazy_static! { (timely_vote_credits::id(), "use timeliness of votes in determining credits to award"), (remaining_compute_units_syscall_enabled::id(), "enable the remaining_compute_units syscall"), (enable_program_runtime_v2_and_loader_v4::id(), "Enable Program-Runtime-v2 and Loader-v4 #33293"), + (require_rent_exempt_split_destination::id(), "Require stake split destination account to be rent exempt"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/tokens/src/arg_parser.rs b/tokens/src/arg_parser.rs index e40b29237c344b..924c4e3e8eebb6 100644 --- a/tokens/src/arg_parser.rs +++ b/tokens/src/arg_parser.rs @@ -559,6 +559,7 @@ fn parse_distribute_stake_args( stake_authority, withdraw_authority, lockup_authority, + rent_exempt_reserve: None, }; let stake_args = StakeArgs { unlocked_sol: sol_to_lamports(value_t_or_exit!(matches, "unlocked_sol", f64)), diff --git a/tokens/src/args.rs b/tokens/src/args.rs index b1f1522e1558bf..0dd4859f51e948 100644 --- a/tokens/src/args.rs +++ b/tokens/src/args.rs @@ -5,6 +5,7 @@ pub struct SenderStakeArgs { pub stake_authority: Box, pub withdraw_authority: Box, pub lockup_authority: Option>, + pub rent_exempt_reserve: Option, } pub struct StakeArgs { diff --git a/tokens/src/commands.rs b/tokens/src/commands.rs index 5b2603814b87c5..c10ad508d61a1c 100644 --- a/tokens/src/commands.rs +++ b/tokens/src/commands.rs @@ -31,7 +31,7 @@ use { signature::{unique_signers, Signature, Signer}, stake::{ instruction::{self as stake_instruction, LockupArgs}, - state::{Authorized, Lockup, StakeAuthorize}, + state::{Authorized, Lockup, StakeAuthorize, StakeStateV2}, }, system_instruction, transaction::Transaction, @@ -234,12 +234,24 @@ fn distribution_instructions( Some(sender_stake_args) => { let stake_authority = sender_stake_args.stake_authority.pubkey(); let withdraw_authority = sender_stake_args.withdraw_authority.pubkey(); - let mut instructions = stake_instruction::split( + let rent_exempt_reserve = sender_stake_args + .rent_exempt_reserve + .expect("SenderStakeArgs.rent_exempt_reserve should be populated"); + + // Transfer some tokens to stake account to cover rent-exempt reserve. + let mut instructions = vec![system_instruction::transfer( + &sender_pubkey, + new_stake_account_address, + rent_exempt_reserve, + )]; + + // Split to stake account + instructions.append(&mut stake_instruction::split( &sender_stake_args.stake_account_address, &stake_authority, - allocation.amount - unlocked_sol, + allocation.amount - unlocked_sol - rent_exempt_reserve, new_stake_account_address, - ); + )); // Make the recipient the new stake authority instructions.push(stake_instruction::authorize( @@ -1174,11 +1186,15 @@ pub fn test_process_distribute_stake_with_client(client: &RpcClient, sender_keyp let output_file = NamedTempFile::new().unwrap(); let output_path = output_file.path().to_str().unwrap().to_string(); + let rent_exempt_reserve = client + .get_minimum_balance_for_rent_exemption(StakeStateV2::size_of()) + .unwrap(); let sender_stake_args = SenderStakeArgs { stake_account_address, stake_authority: Box::new(stake_authority), withdraw_authority: Box::new(withdraw_authority), lockup_authority: None, + rent_exempt_reserve: Some(rent_exempt_reserve), }; let stake_args = StakeArgs { unlocked_sol: sol_to_lamports(1.0), @@ -1529,14 +1545,14 @@ mod tests { )); // Same recipient, same lockups } - const SET_LOCKUP_INDEX: usize = 5; + const SET_LOCKUP_INDEX: usize = 6; #[test] fn test_set_split_stake_lockup() { let lockup_date_str = "2021-01-07T00:00:00Z"; let allocation = Allocation { recipient: Pubkey::default().to_string(), - amount: sol_to_lamports(1.0), + amount: sol_to_lamports(1.002_282_880), lockup_date: lockup_date_str.to_string(), }; let stake_account_address = solana_sdk::pubkey::new_rand(); @@ -1548,6 +1564,7 @@ mod tests { stake_authority: Box::new(Keypair::new()), withdraw_authority: Box::new(Keypair::new()), lockup_authority: Some(Box::new(lockup_authority)), + rent_exempt_reserve: Some(2_282_880), }; let stake_args = StakeArgs { lockup_authority: Some(lockup_authority_address), @@ -1821,6 +1838,7 @@ mod tests { stake_authority: Box::new(stake_authority), withdraw_authority: Box::new(withdraw_authority), lockup_authority: None, + rent_exempt_reserve: Some(2_282_880), }; StakeArgs { diff --git a/tokens/src/lib.rs b/tokens/src/lib.rs index 0198312abef524..2e1e4641bcb323 100644 --- a/tokens/src/lib.rs +++ b/tokens/src/lib.rs @@ -4,4 +4,5 @@ pub mod args; pub mod commands; mod db; pub mod spl_token; +pub mod stake; pub mod token_display; diff --git a/tokens/src/main.rs b/tokens/src/main.rs index f72278a99f9cca..c97287671dace5 100644 --- a/tokens/src/main.rs +++ b/tokens/src/main.rs @@ -2,7 +2,7 @@ use { solana_clap_utils::input_validators::normalize_to_url_if_moniker, solana_cli_config::{Config, CONFIG_FILE}, solana_rpc_client::rpc_client::RpcClient, - solana_tokens::{arg_parser::parse_args, args::Command, commands, spl_token}, + solana_tokens::{arg_parser::parse_args, args::Command, commands, spl_token, stake}, std::{ env, error::Error, @@ -43,6 +43,7 @@ fn main() -> Result<(), Box> { match command_args.command { Command::DistributeTokens(mut args) => { spl_token::update_token_args(&client, &mut args.spl_token_args)?; + stake::update_stake_args(&client, &mut args.stake_args)?; commands::process_allocations(&client, &args, exit)?; } Command::Balances(mut args) => { diff --git a/tokens/src/stake.rs b/tokens/src/stake.rs new file mode 100644 index 00000000000000..3f1c35a3b4df36 --- /dev/null +++ b/tokens/src/stake.rs @@ -0,0 +1,15 @@ +use { + crate::{args::StakeArgs, commands::Error}, + solana_rpc_client::rpc_client::RpcClient, + solana_sdk::stake::state::StakeStateV2, +}; + +pub fn update_stake_args(client: &RpcClient, args: &mut Option) -> Result<(), Error> { + if let Some(stake_args) = args { + if let Some(sender_args) = &mut stake_args.sender_stake_args { + let rent = client.get_minimum_balance_for_rent_exemption(StakeStateV2::size_of())?; + sender_args.rent_exempt_reserve = Some(rent); + } + } + Ok(()) +} From df93145c9707bbe9c4fec5576a6f4d48374ca048 Mon Sep 17 00:00:00 2001 From: aric <118040453+aric0x02@users.noreply.github.com> Date: Wed, 20 Sep 2023 18:04:46 +0800 Subject: [PATCH 133/407] Update compressed-nfts.md typo single single token (#33321) --- docs/src/developing/guides/compressed-nfts.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/developing/guides/compressed-nfts.md b/docs/src/developing/guides/compressed-nfts.md index afcfbd564c9db1..3dd613dfaf33a5 100644 --- a/docs/src/developing/guides/compressed-nfts.md +++ b/docs/src/developing/guides/compressed-nfts.md @@ -161,7 +161,7 @@ actually create them following the same process of creating an - create a new token "mint" - create a associated token account (`ata`) for our token mint -- actually mint a single single token +- actually mint a single token - store the collection's metadata in an Account on-chain Since NFT Collections having nothing special to do with From cfd0a00ae2ba85a6d76757df8b4fa38ed242d185 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 20 Sep 2023 07:50:17 -0700 Subject: [PATCH 134/407] drop disk index bucket files on drop by default (#33316) --- bucket_map/src/bucket_storage.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/bucket_map/src/bucket_storage.rs b/bucket_map/src/bucket_storage.rs index 5011d0567a5176..5d2e8f308b6310 100644 --- a/bucket_map/src/bucket_storage.rs +++ b/bucket_map/src/bucket_storage.rs @@ -79,6 +79,8 @@ pub struct BucketStorage { pub stats: Arc, pub max_search: MaxSearch, pub contents: O, + /// true if when this bucket is dropped, the file should be deleted + pub delete_file_on_drop: bool, } #[derive(Debug)] @@ -88,7 +90,9 @@ pub enum BucketStorageError { impl Drop for BucketStorage { fn drop(&mut self) { - self.delete(); + if self.delete_file_on_drop { + self.delete(); + } } } @@ -157,6 +161,8 @@ impl BucketStorage { stats, max_search, contents: O::new(capacity), + // by default, newly created files will get deleted when dropped + delete_file_on_drop: true, }, file_name, ) From 5f58d2dd4a1120521e6ae75bb958d7ba082f17cb Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 20 Sep 2023 08:12:07 -0700 Subject: [PATCH 135/407] add occupy_if_matches to bucket map (#33318) --- bucket_map/src/bucket.rs | 112 +++++++++++++++++++++++++++++++++- bucket_map/src/index_entry.rs | 45 +++++++++++++- 2 files changed, 155 insertions(+), 2 deletions(-) diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index 650a579b7a3883..9dbc3402f9e560 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -756,7 +756,7 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { #[cfg(test)] mod tests { - use {super::*, tempfile::tempdir}; + use {super::*, crate::index_entry::OccupyIfMatches, tempfile::tempdir}; #[test] fn test_index_entries() { @@ -950,6 +950,116 @@ mod tests { } } + #[test] + fn test_occupy_if_matches() { + let random = 1; + let k = Pubkey::from([1u8; 32]); + let k2 = Pubkey::from([2u8; 32]); + let v = 12u64; + let v2 = 13u64; + let raw = vec![(k, v)]; + let hashed = Bucket::index_entries(&raw, random); + let hashed_raw = hashed.clone(); + + let mut index = create_test_index(None); + + let single_hashed_raw_inserted = hashed_raw.last().unwrap(); + let elem = IndexEntryPlaceInBucket::new(single_hashed_raw_inserted.0 % index.capacity()); + + assert_eq!(elem.get_slot_count_enum(&index), OccupiedEnum::Free); + elem.init(&mut index, &k); + elem.set_slot_count_enum_value(&mut index, OccupiedEnum::OneSlotInIndex(&v)); + assert_eq!( + elem.get_slot_count_enum(&index), + OccupiedEnum::OneSlotInIndex(&v) + ); + // clear it + elem.set_slot_count_enum_value(&mut index, OccupiedEnum::Free); + assert_eq!(elem.get_slot_count_enum(&index), OccupiedEnum::Free); + + assert_eq!( + elem.occupy_if_matches(&mut index, &v, &k), + OccupyIfMatches::SuccessfulInit + ); + assert_eq!( + elem.get_slot_count_enum(&index), + OccupiedEnum::OneSlotInIndex(&v) + ); + // clear it + elem.set_slot_count_enum_value(&mut index, OccupiedEnum::Free); + assert_eq!(elem.get_slot_count_enum(&index), OccupiedEnum::Free); + + // v2 but will still write it + assert_eq!( + elem.occupy_if_matches(&mut index, &v2, &k), + OccupyIfMatches::SuccessfulInit + ); + assert_eq!( + elem.get_slot_count_enum(&index), + OccupiedEnum::OneSlotInIndex(&v2) + ); + + // already a different occupied value for this pubkey in the index, so found duplicate + assert_eq!( + elem.occupy_if_matches(&mut index, &v, &k), + OccupyIfMatches::FoundDuplicate + ); + assert_eq!( + elem.get_slot_count_enum(&index), + OccupiedEnum::OneSlotInIndex(&v2) + ); + + // k2 is pubkey mismatch + assert_eq!( + elem.occupy_if_matches(&mut index, &v, &k2), + OccupyIfMatches::PubkeyMismatch + ); + + // clear it + elem.set_slot_count_enum_value(&mut index, OccupiedEnum::Free); + assert_eq!(elem.get_slot_count_enum(&index), OccupiedEnum::Free); + + // k2 is pubkey mismatch + assert_eq!( + elem.occupy_if_matches(&mut index, &v, &k2), + OccupyIfMatches::PubkeyMismatch + ); + } + + #[test] + #[should_panic(expected = "index asked to insert the same data twice")] + fn test_occupy_if_matches_panic() { + solana_logger::setup(); + let random = 1; + let k = Pubkey::from([1u8; 32]); + let v = 12u64; + let raw = vec![(k, v)]; + let hashed = Bucket::index_entries(&raw, random); + let hashed_raw = hashed.clone(); + + let mut index = create_test_index(None); + + let single_hashed_raw_inserted = hashed_raw.last().unwrap(); + let elem = IndexEntryPlaceInBucket::new(single_hashed_raw_inserted.0 % index.capacity()); + + assert_eq!(elem.get_slot_count_enum(&index), OccupiedEnum::Free); + elem.init(&mut index, &k); + elem.set_slot_count_enum_value(&mut index, OccupiedEnum::OneSlotInIndex(&v)); + assert_eq!( + elem.get_slot_count_enum(&index), + OccupiedEnum::OneSlotInIndex(&v) + ); + + assert_eq!( + elem.occupy_if_matches(&mut index, &v, &k), + OccupyIfMatches::SuccessfulInit + ); + assert_eq!( + elem.get_slot_count_enum(&index), + OccupiedEnum::OneSlotInIndex(&v) + ); + } + #[should_panic(expected = "batch insertion can only occur prior to any deletes")] #[test] fn batch_insert_after_delete() { diff --git a/bucket_map/src/index_entry.rs b/bucket_map/src/index_entry.rs index 88d869e4215ef3..a3e2fb89616584 100644 --- a/bucket_map/src/index_entry.rs +++ b/bucket_map/src/index_entry.rs @@ -36,6 +36,16 @@ struct DataBucketRefCountOccupiedHeader { packed_ref_count: PackedRefCount, } +#[derive(Debug, PartialEq)] +pub enum OccupyIfMatches { + /// this entry is occupied and contains a pubkey with a different value, so this entry could not be updated + FoundDuplicate, + /// this entry was free and contains this pubkey and either value matched or the value was written to match + SuccessfulInit, + /// this entry had a different pubkey + PubkeyMismatch, +} + /// allocated in `contents` in a BucketStorage #[derive(Copy, Clone)] #[repr(C)] @@ -279,7 +289,7 @@ pub(crate) union SingleElementOrMultipleSlots { /// just the values for `OccupiedEnum` /// This excludes the contents of any enum value. -#[derive(PartialEq, FromPrimitive)] +#[derive(PartialEq, FromPrimitive, Debug)] #[repr(u8)] enum OccupiedEnumTag { #[default] @@ -377,6 +387,39 @@ impl IndexEntryPlaceInBucket { index_entry.key = *pubkey; } + /// If the entry matches the pubkey and is unoccupied, then store `data` here and occupy the entry. + pub(crate) fn occupy_if_matches( + &self, + index_bucket: &mut BucketStorage>, + data: &T, + k: &Pubkey, + ) -> OccupyIfMatches { + let index_entry = index_bucket.get::>(self.ix); + if &index_entry.key == k { + let enum_tag = index_bucket.contents.get_enum_tag(self.ix); + if unsafe { &index_entry.contents.single_element } == data { + assert_eq!( + enum_tag, + OccupiedEnumTag::Free, + "index asked to insert the same data twice" + ); + index_bucket + .contents + .set_enum_tag(self.ix, OccupiedEnumTag::OneSlotInIndex); + OccupyIfMatches::SuccessfulInit + } else if enum_tag == OccupiedEnumTag::Free { + // pubkey is same, but value is different, so update value + self.set_slot_count_enum_value(index_bucket, OccupiedEnum::OneSlotInIndex(data)); + OccupyIfMatches::SuccessfulInit + } else { + // found occupied duplicate of this pubkey + OccupyIfMatches::FoundDuplicate + } + } else { + OccupyIfMatches::PubkeyMismatch + } + } + pub(crate) fn read_value<'a>( &self, index_bucket: &'a BucketStorage>, From 1e56b88f59d737d488891e22f2bc1c6af9193c07 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Wed, 20 Sep 2023 11:12:42 -0500 Subject: [PATCH 136/407] Update index bucket after data bucket updated (#33315) * typos * update index bucket after data bucket --------- Co-authored-by: HaoranYi --- bucket_map/src/bucket.rs | 10 ++++++---- bucket_map/src/index_entry.rs | 4 ++-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index 9dbc3402f9e560..daf04b4498ff83 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -498,10 +498,6 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { multiple_slots.set_num_slots(num_slots); MultipleSlots::set_ref_count(best_bucket, ix, ref_count); - elem.set_slot_count_enum_value( - &mut self.index, - OccupiedEnum::MultipleSlots(&multiple_slots), - ); //debug!( "DATA ALLOC {:?} {} {} {}", key, elem.data_location, best_bucket.capacity, elem_uid ); let best_bucket = &mut self.data[best_fit_bucket as usize]; best_bucket.occupy(ix, false).unwrap(); @@ -512,6 +508,12 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { *dest = *src; }); } + + // update index bucket after data bucket has been updated. + elem.set_slot_count_enum_value( + &mut self.index, + OccupiedEnum::MultipleSlots(&multiple_slots), + ); success = true; break; } diff --git a/bucket_map/src/index_entry.rs b/bucket_map/src/index_entry.rs index a3e2fb89616584..a81cb78659c803 100644 --- a/bucket_map/src/index_entry.rs +++ b/bucket_map/src/index_entry.rs @@ -283,7 +283,7 @@ pub(crate) union SingleElementOrMultipleSlots { /// the slot list contains a single element. No need for an entry in the data file. /// The element itself is stored in place in the index entry pub(crate) single_element: T, - /// the slot list ocntains more than one element. This contains the reference to the data file. + /// the slot list contains more than one element. This contains the reference to the data file. pub(crate) multiple_slots: MultipleSlots, } @@ -313,7 +313,7 @@ pub(crate) enum OccupiedEnum<'a, T> { MultipleSlots(&'a MultipleSlots) = OccupiedEnumTag::MultipleSlots as u8, } -/// Pack the storage offset and capacity-when-crated-pow2 fields into a single u64 +/// Pack the storage offset and capacity-when-created-pow2 fields into a single u64 #[bitfield(bits = 64)] #[repr(C)] #[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] From d27aaa79c86aabf5c3c2a7cd7eccb052f41e686d Mon Sep 17 00:00:00 2001 From: Nick Frostbutter <75431177+nickfrosty@users.noreply.github.com> Date: Wed, 20 Sep 2023 12:53:12 -0400 Subject: [PATCH 137/407] [docs] clarified `jsonParsed` fallback encoding for `simulateTransaction` (#33324) docs: clarified fallback encoding --- docs/src/api/methods/_simulateTransaction.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/src/api/methods/_simulateTransaction.mdx b/docs/src/api/methods/_simulateTransaction.mdx index fbae901244a97b..9a245846d1f915 100644 --- a/docs/src/api/methods/_simulateTransaction.mdx +++ b/docs/src/api/methods/_simulateTransaction.mdx @@ -88,8 +88,8 @@ encoding for returned Account data - `jsonParsed` encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. -- If `jsonParsed` is requested but a parser cannot be found, the field falls - back to binary encoding, detectable when the `data` field is type `string`. +- If `jsonParsed` is requested but a [parser cannot be found](https://github.com/solana-labs/solana/blob/cfd0a00ae2ba85a6d76757df8b4fa38ed242d185/account-decoder/src/parse_account_data.rs#L98-L100), the field falls + back to `base64` encoding, detectable when the returned `accounts.data` field is type `string`. From b7bac74d47fec37658bd8b3271f514bd1e08cfbd Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 20 Sep 2023 12:57:27 -0700 Subject: [PATCH 138/407] refactor header alignment check in bucket storage (#33326) --- bucket_map/src/bucket_storage.rs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/bucket_map/src/bucket_storage.rs b/bucket_map/src/bucket_storage.rs index 5d2e8f308b6310..c331f6b78c91c0 100644 --- a/bucket_map/src/bucket_storage.rs +++ b/bucket_map/src/bucket_storage.rs @@ -142,14 +142,8 @@ impl BucketStorage { stats: Arc, count: Arc, ) -> (Self, u128) { - let offset = O::offset_to_first_data(); - let size_of_u64 = std::mem::size_of::(); - assert_eq!( - offset / size_of_u64 * size_of_u64, - offset, - "header size must be a multiple of u64" - ); - let cell_size = elem_size * num_elems + offset as u64; + let offset = Self::get_offset_to_first_data(); + let cell_size = elem_size * num_elems + offset; let bytes = Self::allocate_to_fill_page(&mut capacity, cell_size); let (mmap, path, file_name) = Self::new_map(&drives, bytes, &stats); ( @@ -213,6 +207,17 @@ impl BucketStorage { ) } + fn get_offset_to_first_data() -> u64 { + let offset = O::offset_to_first_data() as u64; + let size_of_u64 = std::mem::size_of::() as u64; + assert_eq!( + offset / size_of_u64 * size_of_u64, + offset, + "header size must be a multiple of u64" + ); + offset + } + pub(crate) fn copying_entry(&mut self, ix_new: u64, other: &Self, ix_old: u64) { let start = self.get_start_offset_with_header(ix_new); let start_old = other.get_start_offset_with_header(ix_old); From a32ef97592cdf66771a3df75bb8688e93cd5b420 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 20 Sep 2023 13:38:12 -0700 Subject: [PATCH 139/407] fix comments (#33330) --- bucket_map/src/index_entry.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bucket_map/src/index_entry.rs b/bucket_map/src/index_entry.rs index a81cb78659c803..b8a55a07bee80b 100644 --- a/bucket_map/src/index_entry.rs +++ b/bucket_map/src/index_entry.rs @@ -38,7 +38,7 @@ struct DataBucketRefCountOccupiedHeader { #[derive(Debug, PartialEq)] pub enum OccupyIfMatches { - /// this entry is occupied and contains a pubkey with a different value, so this entry could not be updated + /// this entry is occupied and contains the same pubkey but with a different value, so this entry could not be updated FoundDuplicate, /// this entry was free and contains this pubkey and either value matched or the value was written to match SuccessfulInit, From 2839d51d5ee1a5d379b00b89062c6ff142b163f0 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 20 Sep 2023 16:52:52 -0400 Subject: [PATCH 140/407] Unhides fastboot cli arg (#33329) --- ledger-tool/src/main.rs | 1 - validator/src/cli.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 10ef5c24665bfa..c752c0f58efb47 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -1245,7 +1245,6 @@ fn main() { let use_snapshot_archives_at_startup = Arg::with_name(use_snapshot_archives_at_startup::cli::NAME) .long(use_snapshot_archives_at_startup::cli::LONG_ARG) - .hidden(hidden_unless_forced()) .takes_value(true) .possible_values(use_snapshot_archives_at_startup::cli::POSSIBLE_VALUES) .default_value(use_snapshot_archives_at_startup::cli::default_value()) diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 1fbe16cec77639..bdd0ac77ab17e8 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -305,7 +305,6 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .arg( Arg::with_name(use_snapshot_archives_at_startup::cli::NAME) .long(use_snapshot_archives_at_startup::cli::LONG_ARG) - .hidden(hidden_unless_forced()) .takes_value(true) .possible_values(use_snapshot_archives_at_startup::cli::POSSIBLE_VALUES) .default_value(use_snapshot_archives_at_startup::cli::default_value()) From 1d39c3167d1e3ddff74773b1ff652ccee1f16355 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 20 Sep 2023 23:45:55 -0400 Subject: [PATCH 141/407] Adds default info to cli arg for --accounts-hash-cache-path (#33331) --- ledger-tool/src/main.rs | 2 +- validator/src/cli.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index c752c0f58efb47..2fa26528497860 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -1150,7 +1150,7 @@ fn main() { .long("accounts-hash-cache-path") .value_name("PATH") .takes_value(true) - .help("Use PATH as accounts hash cache location"); + .help("Use PATH as accounts hash cache location [default: /accounts_hash_cache]"); let accounts_index_path_arg = Arg::with_name("accounts_index_path") .long("accounts-index-path") .value_name("PATH") diff --git a/validator/src/cli.rs b/validator/src/cli.rs index bdd0ac77ab17e8..1eef34535511ff 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -293,7 +293,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .long("accounts-hash-cache-path") .value_name("PATH") .takes_value(true) - .help("Use PATH as accounts hash cache location"), + .help("Use PATH as accounts hash cache location [default: /accounts_hash_cache]"), ) .arg( Arg::with_name("snapshots") From f22a349541c1f843c918bfa7ca9e4e170ba0e569 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 21 Sep 2023 12:21:10 +0800 Subject: [PATCH 142/407] chore: remove unused code (#33335) remove unused code --- ci/test-local-cluster-flakey.sh | 1 - ci/test-local-cluster-slow-1.sh | 1 - ci/test-local-cluster-slow-2.sh | 1 - ci/test-local-cluster.sh | 1 - ci/test-stable.sh | 20 -------------------- 5 files changed, 24 deletions(-) delete mode 120000 ci/test-local-cluster-flakey.sh delete mode 120000 ci/test-local-cluster-slow-1.sh delete mode 120000 ci/test-local-cluster-slow-2.sh delete mode 120000 ci/test-local-cluster.sh diff --git a/ci/test-local-cluster-flakey.sh b/ci/test-local-cluster-flakey.sh deleted file mode 120000 index 0c92a5c7bd6fd4..00000000000000 --- a/ci/test-local-cluster-flakey.sh +++ /dev/null @@ -1 +0,0 @@ -test-stable.sh \ No newline at end of file diff --git a/ci/test-local-cluster-slow-1.sh b/ci/test-local-cluster-slow-1.sh deleted file mode 120000 index 0c92a5c7bd6fd4..00000000000000 --- a/ci/test-local-cluster-slow-1.sh +++ /dev/null @@ -1 +0,0 @@ -test-stable.sh \ No newline at end of file diff --git a/ci/test-local-cluster-slow-2.sh b/ci/test-local-cluster-slow-2.sh deleted file mode 120000 index 0c92a5c7bd6fd4..00000000000000 --- a/ci/test-local-cluster-slow-2.sh +++ /dev/null @@ -1 +0,0 @@ -test-stable.sh \ No newline at end of file diff --git a/ci/test-local-cluster.sh b/ci/test-local-cluster.sh deleted file mode 120000 index 0c92a5c7bd6fd4..00000000000000 --- a/ci/test-local-cluster.sh +++ /dev/null @@ -1 +0,0 @@ -test-stable.sh \ No newline at end of file diff --git a/ci/test-stable.sh b/ci/test-stable.sh index a5783bc33e2f6d..e5f8b65326d99e 100755 --- a/ci/test-stable.sh +++ b/ci/test-stable.sh @@ -135,26 +135,6 @@ test-stable-perf) _ cargo test --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture _ cargo run --manifest-path poh-bench/Cargo.toml ${V:+--verbose} -- --hashes-per-tick 10 ;; -test-local-cluster) - _ cargo build --release --bins ${V:+--verbose} - _ ci/intercept.sh cargo test --release --package solana-local-cluster --test local_cluster ${V:+--verbose} -- --nocapture --test-threads=1 - exit 0 - ;; -test-local-cluster-flakey) - _ cargo build --release --bins ${V:+--verbose} - _ ci/intercept.sh cargo test --release --package solana-local-cluster --test local_cluster_flakey ${V:+--verbose} -- --nocapture --test-threads=1 - exit 0 - ;; -test-local-cluster-slow-1) - _ cargo build --release --bins ${V:+--verbose} - _ ci/intercept.sh cargo test --release --package solana-local-cluster --test local_cluster_slow_1 ${V:+--verbose} -- --nocapture --test-threads=1 - exit 0 - ;; -test-local-cluster-slow-2) - _ cargo build --release --bins ${V:+--verbose} - _ ci/intercept.sh cargo test --release --package solana-local-cluster --test local_cluster_slow_2 ${V:+--verbose} -- --nocapture --test-threads=1 - exit 0 - ;; test-wasm) _ node --version _ npm --version From 62f153ec8830ac5bfff96a5f269fd371ce3aace1 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 21 Sep 2023 13:46:23 +0800 Subject: [PATCH 143/407] ci: use new installation instructions for nodejs (#33337) * use new installation instructions for nodejs * more comments * remove apt list in the end --- ci/docker-rust/Dockerfile | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index 07fb9e973e0062..f6d1c5906e9a99 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -2,12 +2,13 @@ # ci/rust-version.sh to pick up the new image tag FROM rust:1.72.0 +ARG NODE_MAJOR=18 + RUN set -x \ && apt update \ && apt-get install apt-transport-https \ && echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list \ && apt-key adv --no-tty --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 \ - && curl -fsSL https://deb.nodesource.com/setup_current.x | bash - \ && apt update \ && apt install -y \ buildkite-agent \ @@ -27,9 +28,17 @@ RUN set -x \ protobuf-compiler \ \ && apt remove -y libcurl4-openssl-dev \ - && rm -rf /var/lib/apt/lists/* \ + # node + && sudo apt-get update \ + && sudo apt-get install -y ca-certificates curl gnupg \ + && sudo mkdir -p /etc/apt/keyrings \ + && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | sudo gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \ + && echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | sudo tee /etc/apt/sources.list.d/nodesource.list \ + && sudo apt-get update \ + && sudo apt-get install nodejs -y \ && node --version \ && npm --version \ + # rust && rustup component add rustfmt \ && rustup component add clippy \ && rustup target add wasm32-unknown-unknown \ @@ -42,4 +51,5 @@ RUN set -x \ && cargo install wasm-pack \ && cargo install sccache \ && rustc --version \ - && cargo --version + && cargo --version \ + && rm -rf /var/lib/apt/lists/* From 7c545b0ae96be5a3ed2619fd04cf4f74e5447b77 Mon Sep 17 00:00:00 2001 From: Trent Nelson Date: Wed, 20 Sep 2023 23:53:36 -0600 Subject: [PATCH 144/407] bump rust stable to 1.72.1 (#33333) * bump rust stable to 1.72.1 * bump rust nightly to 2023-09-20 * fix nightly lint -- unused doc comment -- rustdoc does not generate documentation for expression fields * fix nightly lint -- unnecessarily eager cloning of iterator items * fix nightly lint -- loop never actually loops --- accounts-db/src/accounts_db.rs | 6 +-- accounts-db/src/ancient_append_vecs.rs | 6 +-- accounts-db/src/partitioned_rewards.rs | 12 +++--- ci/docker-rust-nightly/Dockerfile | 2 +- ci/docker-rust/Dockerfile | 2 +- ci/rust-version.sh | 2 +- ledger/src/blockstore.rs | 6 +-- ledger/src/blockstore/blockstore_purge.rs | 4 +- local-cluster/tests/local_cluster.rs | 43 ++++++++----------- rust-toolchain.toml | 2 +- .../src/vote/state/vote_state_versions.rs | 10 ----- 11 files changed, 39 insertions(+), 56 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index d2800b2ef7e06a..be5509678db4e7 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -10146,15 +10146,15 @@ pub mod tests { let offset = 3; let hash = Hash::new(&[2; 32]); let stored_meta = StoredMeta { - /// global write version + // global write version write_version_obsolete: 0, - /// key for the account + // key for the account pubkey, data_len: 43, }; let account = StoredAccountMeta::AppendVec(AppendVecStoredAccountMeta { meta: &stored_meta, - /// account data + // account data account_meta: &account_meta, data: account.data(), offset, diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 3f4b7bb71f9a47..2bfe2a094edf84 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -1822,15 +1822,15 @@ pub mod tests { let offset = 3; let hash = Hash::new(&[2; 32]); let stored_meta = StoredMeta { - /// global write version + // global write version write_version_obsolete: 0, - /// key for the account + // key for the account pubkey, data_len: 43, }; let account = StoredAccountMeta::AppendVec(AppendVecStoredAccountMeta { meta: &stored_meta, - /// account data + // account data account_meta: &account_meta, data: account.data(), offset, diff --git a/accounts-db/src/partitioned_rewards.rs b/accounts-db/src/partitioned_rewards.rs index c638b047ca76ff..9d012a71a4a018 100644 --- a/accounts-db/src/partitioned_rewards.rs +++ b/accounts-db/src/partitioned_rewards.rs @@ -27,13 +27,13 @@ pub struct PartitionedEpochRewardsConfig { impl Default for PartitionedEpochRewardsConfig { fn default() -> Self { Self { - /// reward calculation happens synchronously during the first block of the epoch boundary. - /// So, # blocks for reward calculation is 1. + // reward calculation happens synchronously during the first block of the epoch boundary. + // So, # blocks for reward calculation is 1. reward_calculation_num_blocks: 1, - /// # stake accounts to store in one block during partitioned reward interval - /// Target to store 64 rewards per entry/tick in a block. A block has a minimum of 64 - /// entries/tick. This gives 4096 total rewards to store in one block. - /// This constant affects consensus. + // # stake accounts to store in one block during partitioned reward interval + // Target to store 64 rewards per entry/tick in a block. A block has a minimum of 64 + // entries/tick. This gives 4096 total rewards to store in one block. + // This constant affects consensus. stake_account_stores_per_block: 4096, test_enable_partitioned_rewards: false, test_compare_partitioned_epoch_rewards: false, diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile index 6a0b5523ff847e..23262061e4f2a3 100644 --- a/ci/docker-rust-nightly/Dockerfile +++ b/ci/docker-rust-nightly/Dockerfile @@ -1,4 +1,4 @@ -FROM solanalabs/rust:1.72.0 +FROM solanalabs/rust:1.72.1 ARG date RUN set -x \ diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index f6d1c5906e9a99..e5d80f9e04bfbf 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -1,6 +1,6 @@ # Note: when the rust version is changed also modify # ci/rust-version.sh to pick up the new image tag -FROM rust:1.72.0 +FROM rust:1.72.1 ARG NODE_MAJOR=18 diff --git a/ci/rust-version.sh b/ci/rust-version.sh index 1baaf19fc70d1e..76f929277ba757 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -29,7 +29,7 @@ fi if [[ -n $RUST_NIGHTLY_VERSION ]]; then nightly_version="$RUST_NIGHTLY_VERSION" else - nightly_version=2023-08-25 + nightly_version=2023-09-20 fi diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 6660252e46eb4f..d11e03e6f09851 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -7134,8 +7134,8 @@ pub mod tests { let expected_transactions: Vec = entries .iter() - .cloned() .filter(|entry| !entry.is_tick()) + .cloned() .flat_map(|entry| entry.transactions) .map(|transaction| { let mut pre_balances: Vec = vec![]; @@ -8018,8 +8018,8 @@ pub mod tests { let expected_transactions: Vec = entries .iter() - .cloned() .filter(|entry| !entry.is_tick()) + .cloned() .flat_map(|entry| entry.transactions) .map(|transaction| { let mut pre_balances: Vec = vec![]; @@ -8138,8 +8138,8 @@ pub mod tests { let expected_transactions: Vec = entries .iter() - .cloned() .filter(|entry| !entry.is_tick()) + .cloned() .flat_map(|entry| entry.transactions) .map(|transaction| { let mut pre_balances: Vec = vec![]; diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index 2bd6bfca2f946c..7a58d78750446e 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -716,8 +716,8 @@ pub mod tests { blockstore.insert_shreds(shreds, None, false).unwrap(); let signature = entries .iter() - .cloned() .filter(|entry| !entry.is_tick()) + .cloned() .flat_map(|entry| entry.transactions) .map(|transaction| transaction.signatures[0]) .collect::>()[0]; @@ -759,8 +759,8 @@ pub mod tests { blockstore.insert_shreds(shreds, None, false).unwrap(); let signature: Signature = entries .iter() - .cloned() .filter(|entry| !entry.is_tick()) + .cloned() .flat_map(|entry| entry.transactions) .map(|transaction| transaction.signatures[0]) .collect::>()[0]; diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 137477be1cc5f2..658fdf0de3b04e 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -4169,8 +4169,8 @@ fn find_latest_replayed_slot_from_ledger( .filter_map(|(s, _)| if s > latest_slot { Some(s) } else { None }) .collect(); - for new_latest_slot in new_latest_slots { - latest_slot = new_latest_slot; + if let Some(new_latest_slot) = new_latest_slots.first() { + latest_slot = *new_latest_slot; info!("Checking latest_slot {}", latest_slot); // Wait for the slot to be fully received by the validator loop { @@ -5293,30 +5293,23 @@ fn test_duplicate_shreds_switch_failure() { // 2) Wait for a duplicate slot to land on both validators and for the target switch // fork validator to get another version of the slot. Also ensure all versions of // the block are playable - let dup_slot; - loop { - dup_slot = duplicate_slot_receiver - .recv_timeout(Duration::from_millis(30_000)) - .expect("Duplicate leader failed to make a duplicate slot in allotted time"); + let dup_slot = duplicate_slot_receiver + .recv_timeout(Duration::from_millis(30_000)) + .expect("Duplicate leader failed to make a duplicate slot in allotted time"); - // Make sure both validators received and replay the complete blocks - let dup_frozen_hash = wait_for_duplicate_fork_frozen( - &cluster.ledger_path(&duplicate_fork_validator1_pubkey), - dup_slot, - ); - let original_frozen_hash = wait_for_duplicate_fork_frozen( - &cluster.ledger_path(&duplicate_leader_validator_pubkey), - dup_slot, - ); - if original_frozen_hash != dup_frozen_hash { - break; - } else { - panic!( - "Duplicate leader and partition target got same hash: {}", - original_frozen_hash - ); - } - } + // Make sure both validators received and replay the complete blocks + let dup_frozen_hash = wait_for_duplicate_fork_frozen( + &cluster.ledger_path(&duplicate_fork_validator1_pubkey), + dup_slot, + ); + let original_frozen_hash = wait_for_duplicate_fork_frozen( + &cluster.ledger_path(&duplicate_leader_validator_pubkey), + dup_slot, + ); + assert_ne!( + original_frozen_hash, dup_frozen_hash, + "Duplicate leader and partition target got same hash: {original_frozen_hash}", + ); // 3) Force `duplicate_fork_validator1_pubkey` to see a duplicate proof info!("Waiting for duplicate proof for slot: {}", dup_slot); diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 743f7cd993d6a8..7eb23c42c2af84 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.72.0" +channel = "1.72.1" diff --git a/sdk/program/src/vote/state/vote_state_versions.rs b/sdk/program/src/vote/state/vote_state_versions.rs index dc7c0e9e681e1e..7c4939d36928bc 100644 --- a/sdk/program/src/vote/state/vote_state_versions.rs +++ b/sdk/program/src/vote/state/vote_state_versions.rs @@ -21,30 +21,20 @@ impl VoteStateVersions { VoteState { node_pubkey: state.node_pubkey, - /// the signer for withdrawals authorized_withdrawer: state.authorized_withdrawer, - /// percentage (0-100) that represents what part of a rewards - /// payout should be given to this VoteAccount commission: state.commission, votes: Self::landed_votes_from_lockouts(state.votes), root_slot: state.root_slot, - /// the signer for vote transactions authorized_voters, - /// history of prior authorized voters and the epochs for which - /// they were set, the bottom end of the range is inclusive, - /// the top of the range is exclusive prior_voters: CircBuf::default(), - /// history of how many credits earned by the end of each epoch - /// each tuple is (Epoch, credits, prev_credits) epoch_credits: state.epoch_credits.clone(), - /// most recent timestamp submitted with a vote last_timestamp: state.last_timestamp.clone(), } } From 357eabd5f3ab4461faa1daf9f3f74245d804cb84 Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 21 Sep 2023 02:06:00 -0400 Subject: [PATCH 145/407] Move LedgerColumn delete() implementation to less restrictive traits (#33304) --- ledger/src/blockstore_db.rs | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index f2c9c45c43a186..25f68b8ef65381 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -1438,6 +1438,23 @@ where pub fn get_int_property(&self, name: &'static std::ffi::CStr) -> Result { self.backend.get_int_property_cf(self.handle(), name) } + + pub fn delete(&self, key: C::Index) -> Result<()> { + let is_perf_enabled = maybe_enable_rocksdb_perf( + self.column_options.rocks_perf_sample_interval, + &self.write_perf_status, + ); + let result = self.backend.delete_cf(self.handle(), &C::key(key)); + if let Some(op_start_instant) = is_perf_enabled { + report_rocksdb_write_perf( + C::NAME, + "delete", + &op_start_instant.elapsed(), + &self.column_options, + ); + } + result + } } impl LedgerColumn @@ -1521,23 +1538,6 @@ where } result } - - pub fn delete(&self, key: C::Index) -> Result<()> { - let is_perf_enabled = maybe_enable_rocksdb_perf( - self.column_options.rocks_perf_sample_interval, - &self.write_perf_status, - ); - let result = self.backend.delete_cf(self.handle(), &C::key(key)); - if let Some(op_start_instant) = is_perf_enabled { - report_rocksdb_write_perf( - C::NAME, - "delete", - &op_start_instant.elapsed(), - &self.column_options, - ); - } - result - } } impl LedgerColumn From 1fc4264a1c3e513e6fb15c44214cc399ac553e37 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 21 Sep 2023 06:47:28 -0700 Subject: [PATCH 146/407] add bucket::load_on_restart (#33328) --- bucket_map/src/bucket_storage.rs | 163 +++++++++++++++++++++++++++++-- 1 file changed, 157 insertions(+), 6 deletions(-) diff --git a/bucket_map/src/bucket_storage.rs b/bucket_map/src/bucket_storage.rs index c331f6b78c91c0..c81c6a1a7a3444 100644 --- a/bucket_map/src/bucket_storage.rs +++ b/bucket_map/src/bucket_storage.rs @@ -6,6 +6,7 @@ use { std::{ fs::{remove_file, OpenOptions}, io::{Seek, SeekFrom, Write}, + num::NonZeroU64, path::{Path, PathBuf}, sync::{ atomic::{AtomicU64, Ordering}, @@ -218,6 +219,37 @@ impl BucketStorage { offset } + // temporary tag + #[allow(dead_code)] + /// load and mmap the file that is this disk bucket if possible + pub(crate) fn load_on_restart( + path: PathBuf, + elem_size: NonZeroU64, + max_search: MaxSearch, + stats: Arc, + count: Arc, + ) -> Option { + let offset = Self::get_offset_to_first_data(); + let num_elems = std::fs::metadata(&path) + .ok() + .map(|metadata| metadata.len().saturating_sub(offset) / elem_size)?; + if num_elems == 0 { + return None; + } + let mmap = Self::map_open_file(&path, false, 0, &stats)?; + Some(Self { + path, + mmap, + cell_size: elem_size.into(), + count, + stats, + max_search, + contents: O::new(Capacity::Actual(num_elems)), + // since we loaded it, it persisted from last time, so we obviously want to keep it present disk. + delete_file_on_drop: false, + }) + } + pub(crate) fn copying_entry(&mut self, ix_new: u64, other: &Self, ix_old: u64) { let start = self.get_start_offset_with_header(ix_new); let start_old = other.get_start_offset_with_header(ix_old); @@ -523,13 +555,14 @@ mod test { let paths: Vec = vec![tmpdir.path().to_path_buf()]; assert!(!paths.is_empty()); + let drives = Arc::new(paths); + let num_elems = 1; + let elem_size = std::mem::size_of::>() as u64; + let max_search = 1; + let stats = Arc::default(); + let count = Arc::default(); let mut storage = BucketStorage::>::new( - Arc::new(paths), - 1, - std::mem::size_of::>() as u64, - 1, - Arc::default(), - Arc::default(), + drives, num_elems, elem_size, max_search, stats, count, ) .0; let ix = 0; @@ -547,6 +580,124 @@ mod test { assert!(storage.is_free(ix)); } + #[test] + fn test_load_on_restart_failures() { + let tmpdir = tempdir().unwrap(); + let paths: Vec = vec![tmpdir.path().to_path_buf()]; + assert!(!paths.is_empty()); + let elem_size = std::mem::size_of::>() as u64; + let max_search = 1; + let stats = Arc::new(BucketStats::default()); + let count = Arc::new(AtomicU64::default()); + // file doesn't exist + assert!(BucketStorage::>::load_on_restart( + PathBuf::from(tmpdir.path()), + NonZeroU64::new(elem_size).unwrap(), + max_search, + stats.clone(), + count.clone(), + ) + .is_none()); + solana_logger::setup(); + for len in [0, 1, 47, 48, 49, 4097] { + // create a zero len file. That will fail to load since it is too small. + let path = tmpdir.path().join("small"); + let mut file = OpenOptions::new() + .read(true) + .write(true) + .create(true) + .open(path.clone()) + .unwrap(); + _ = file.write_all(&vec![1u8; len]); + drop(file); + assert_eq!(std::fs::metadata(&path).unwrap().len(), len as u64); + let result = BucketStorage::>::load_on_restart( + path, + NonZeroU64::new(elem_size).unwrap(), + max_search, + stats.clone(), + count.clone(), + ); + if let Some(result) = result.as_ref() { + assert_eq!(result.capacity() as usize, len / elem_size as usize); + assert_eq!( + result.capacity_bytes() as usize, + len / elem_size as usize * elem_size as usize + ); + } + assert_eq!(result.is_none(), len < elem_size as usize, "{len}"); + } + } + + #[test] + fn test_load_on_restart() { + for request in [Some(7), None] { + let tmpdir = tempdir().unwrap(); + let paths: Vec = vec![tmpdir.path().to_path_buf()]; + assert!(!paths.is_empty()); + let drives = Arc::new(paths); + let num_elems = 1; + let elem_size = std::mem::size_of::>() as u64; + let max_search = 1; + let stats = Arc::new(BucketStats::default()); + let count = Arc::new(AtomicU64::default()); + let mut storage = if let Some(actual_elems) = request { + BucketStorage::>::new_with_capacity( + drives, + num_elems, + elem_size, + Capacity::Actual(actual_elems), + max_search, + stats.clone(), + count.clone(), + ) + .0 + } else { + BucketStorage::>::new( + drives, + num_elems, + elem_size, + max_search, + stats.clone(), + count.clone(), + ) + .0 + }; + let expected_capacity = storage.capacity(); + (0..num_elems).for_each(|ix| { + assert!(storage.is_free(ix)); + assert!(storage.occupy(ix, false).is_ok()); + }); + storage.delete_file_on_drop = false; + let len = storage.mmap.len(); + (0..expected_capacity as usize).for_each(|i| { + storage.mmap[i] = (i % 256) as u8; + }); + // close storage + let path = storage.path.clone(); + drop(storage); + + // re load and remap storage file + let storage = BucketStorage::>::load_on_restart( + path, + NonZeroU64::new(elem_size).unwrap(), + max_search, + stats, + count, + ) + .unwrap(); + assert_eq!(storage.capacity(), expected_capacity); + assert_eq!(len, storage.mmap.len()); + (0..expected_capacity as usize).for_each(|i| { + assert_eq!(storage.mmap[i], (i % 256) as u8); + }); + (0..num_elems).for_each(|ix| { + // all should be marked as free + assert!(storage.is_free(ix)); + }); + } + } + #[test] #[should_panic] fn test_header_bad_size() { From a2ad8203094b6f56096df39460cf5dea0d411356 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Thu, 21 Sep 2023 09:24:47 -0500 Subject: [PATCH 147/407] compute_budget heap_size does not have to be optional (#33313) --- program-runtime/src/compute_budget.rs | 16 ++++++++-------- programs/bpf_loader/src/lib.rs | 5 +---- programs/loader-v4/src/lib.rs | 8 +++----- runtime/src/bank/tests.rs | 6 +++--- 4 files changed, 15 insertions(+), 20 deletions(-) diff --git a/program-runtime/src/compute_budget.rs b/program-runtime/src/compute_budget.rs index a1272cf1707c14..6fa9cda02b7228 100644 --- a/program-runtime/src/compute_budget.rs +++ b/program-runtime/src/compute_budget.rs @@ -101,8 +101,8 @@ pub struct ComputeBudget { /// Number of compute units consumed for a multiscalar multiplication (msm) of ristretto points. /// The total cost is calculated as `msm_base_cost + (length - 1) * msm_incremental_cost`. pub curve25519_ristretto_msm_incremental_cost: u64, - /// Optional program heap region size, if `None` then loader default - pub heap_size: Option, + /// program heap region size, default: solana_sdk::entrypoint::HEAP_LENGTH + pub heap_size: usize, /// Number of compute units per additional 32k heap above the default (~.5 /// us per 32k at 15 units/us rounded up) pub heap_cost: u64, @@ -171,7 +171,7 @@ impl ComputeBudget { curve25519_ristretto_multiply_cost: 2_208, curve25519_ristretto_msm_base_cost: 2303, curve25519_ristretto_msm_incremental_cost: 788, - heap_size: None, + heap_size: solana_sdk::entrypoint::HEAP_LENGTH, heap_cost: 8, mem_op_base_cost: 10, alt_bn128_addition_cost: 334, @@ -267,7 +267,7 @@ impl ComputeBudget { InstructionError::InvalidInstructionData, )); } - self.heap_size = Some(bytes as usize); + self.heap_size = bytes as usize; } let compute_unit_limit = updated_compute_unit_limit @@ -467,7 +467,7 @@ mod tests { Ok(PrioritizationFeeDetails::default()), ComputeBudget { compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - heap_size: Some(40 * 1024), + heap_size: 40 * 1024, ..ComputeBudget::default() } ); @@ -512,7 +512,7 @@ mod tests { Ok(PrioritizationFeeDetails::default()), ComputeBudget { compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - heap_size: Some(MAX_HEAP_FRAME_BYTES as usize), + heap_size: MAX_HEAP_FRAME_BYTES as usize, ..ComputeBudget::default() } ); @@ -562,7 +562,7 @@ mod tests { )), ComputeBudget { compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT as u64, - heap_size: Some(MAX_HEAP_FRAME_BYTES as usize), + heap_size: MAX_HEAP_FRAME_BYTES as usize, ..ComputeBudget::default() } ); @@ -580,7 +580,7 @@ mod tests { )), ComputeBudget { compute_unit_limit: 1, - heap_size: Some(MAX_HEAP_FRAME_BYTES as usize), + heap_size: MAX_HEAP_FRAME_BYTES as usize, ..ComputeBudget::default() } ); diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 9a91286327bc3b..ae585d8f9582f2 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -262,10 +262,7 @@ macro_rules! create_vm { ($vm:ident, $program:expr, $regions:expr, $accounts_metadata:expr, $invoke_context:expr $(,)?) => { let invoke_context = &*$invoke_context; let stack_size = $program.get_config().stack_size(); - let heap_size = invoke_context - .get_compute_budget() - .heap_size - .unwrap_or(solana_sdk::entrypoint::HEAP_LENGTH); + let heap_size = invoke_context.get_compute_budget().heap_size; let round_up_heap_size = invoke_context .feature_set .is_active(&solana_sdk::feature_set::round_up_heap_size::id()); diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index 3ea4f60f70d32e..c22d95856f157e 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -18,7 +18,7 @@ use { vm::{BuiltinProgram, Config, ContextObject, EbpfVm, ProgramResult}, }, solana_sdk::{ - entrypoint::{HEAP_LENGTH, SUCCESS}, + entrypoint::SUCCESS, feature_set, instruction::InstructionError, loader_v4::{self, LoaderV4State, LoaderV4Status, DEPLOYMENT_COOLDOWN_IN_SLOTS}, @@ -113,15 +113,13 @@ pub fn create_vm<'a, 'b>( let config = program.get_config(); let sbpf_version = program.get_sbpf_version(); let compute_budget = invoke_context.get_compute_budget(); - let heap_size = compute_budget.heap_size.unwrap_or(HEAP_LENGTH); + let heap_size = compute_budget.heap_size; invoke_context.consume_checked(calculate_heap_cost( heap_size as u64, compute_budget.heap_cost, ))?; let mut stack = AlignedMemory::<{ ebpf::HOST_ALIGN }>::zero_filled(config.stack_size()); - let mut heap = AlignedMemory::<{ ebpf::HOST_ALIGN }>::zero_filled( - compute_budget.heap_size.unwrap_or(HEAP_LENGTH), - ); + let mut heap = AlignedMemory::<{ ebpf::HOST_ALIGN }>::zero_filled(compute_budget.heap_size); let stack_len = stack.len(); let regions: Vec = vec![ program.get_ro_region(), diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 299ece5fc8998c..dd4b51208306d3 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -9750,7 +9750,7 @@ fn test_compute_budget_program_noop() { *compute_budget, ComputeBudget { compute_unit_limit: compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - heap_size: Some(48 * 1024), + heap_size: 48 * 1024, ..ComputeBudget::default() } ); @@ -9793,7 +9793,7 @@ fn test_compute_request_instruction() { *compute_budget, ComputeBudget { compute_unit_limit: compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - heap_size: Some(48 * 1024), + heap_size: 48 * 1024, ..ComputeBudget::default() } ); @@ -9843,7 +9843,7 @@ fn test_failed_compute_request_instruction() { *compute_budget, ComputeBudget { compute_unit_limit: compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - heap_size: Some(48 * 1024), + heap_size: 48 * 1024, ..ComputeBudget::default() } ); From cd9b2cc20bb1e898fed037d6145b9e7e40fe0118 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 21 Sep 2023 23:17:58 +0800 Subject: [PATCH 148/407] build(deps): bump quinn-proto from 0.10.4 to 0.10.5 (#33345) * build(deps): bump quinn-proto from 0.10.4 to 0.10.5 Bumps [quinn-proto](https://github.com/quinn-rs/quinn) from 0.10.4 to 0.10.5. - [Release notes](https://github.com/quinn-rs/quinn/releases) - [Commits](https://github.com/quinn-rs/quinn/commits) --- updated-dependencies: - dependency-name: quinn-proto dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 30729d6ab7dc08..c11fa5a36de011 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4089,9 +4089,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.10.4" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13f81c9a9d574310b8351f8666f5a93ac3b0069c45c28ad52c10291389a7cf9" +checksum = "2c78e758510582acc40acb90458401172d41f1016f8c9dde89e49677afb7eec1" dependencies = [ "bytes", "rand 0.8.5", diff --git a/Cargo.toml b/Cargo.toml index 58cb4f83055604..7d701e693c595c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -266,7 +266,7 @@ protobuf-src = "1.1.0" qstring = "0.7.2" qualifier_attr = { version = "0.2.2", default-features = false } quinn = "0.10.2" -quinn-proto = "0.10.4" +quinn-proto = "0.10.5" quote = "1.0" rand = "0.8.5" rand_chacha = "0.3.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index f0d50d53911c8a..cf2f52c7cfa10f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3602,9 +3602,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.10.4" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13f81c9a9d574310b8351f8666f5a93ac3b0069c45c28ad52c10291389a7cf9" +checksum = "2c78e758510582acc40acb90458401172d41f1016f8c9dde89e49677afb7eec1" dependencies = [ "bytes", "rand 0.8.5", From 3e8ccbe19626ec02d916fac3a26211b4044b2c65 Mon Sep 17 00:00:00 2001 From: Tyera Date: Thu, 21 Sep 2023 09:59:51 -0600 Subject: [PATCH 149/407] Add comment to OptimisticallyConfirmedBankTracker (#33238) Add comment --- rpc/src/optimistically_confirmed_bank_tracker.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rpc/src/optimistically_confirmed_bank_tracker.rs b/rpc/src/optimistically_confirmed_bank_tracker.rs index 3179e570920383..0cd37fb8a3f3ce 100644 --- a/rpc/src/optimistically_confirmed_bank_tracker.rs +++ b/rpc/src/optimistically_confirmed_bank_tracker.rs @@ -314,6 +314,9 @@ impl OptimisticallyConfirmedBankTracker { slot, timestamp: timestamp(), }); + // NOTE: replay of `slot` may or may not be complete. Therefore, most new + // functionality to be triggered on optimistic confirmation should go in + // `notify_or_defer()` under the `bank.is_frozen()` case instead of here. } BankNotification::Frozen(bank) => { let frozen_slot = bank.slot(); From 3b1cbaec7284fdcbbfa700b89540bf5dfe3b53d9 Mon Sep 17 00:00:00 2001 From: sakridge Date: Thu, 21 Sep 2023 13:23:37 -0400 Subject: [PATCH 150/407] Add csv output option to stake and vote account rewards (#32360) * Add csv option to vote-account * Add CSV format to solana stake command Csv rename --- cli-output/src/cli_output.rs | 66 +++++++++++++++++++++++++++++------- cli/src/cli.rs | 6 ++++ cli/src/cluster_query.rs | 2 ++ cli/src/stake.rs | 14 +++++++- cli/src/vote.rs | 12 ++++++- 5 files changed, 86 insertions(+), 14 deletions(-) diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index 6fc394f6709530..daf522c60055f4 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -10,7 +10,7 @@ use { QuietDisplay, VerboseDisplay, }, base64::{prelude::BASE64_STANDARD, Engine}, - chrono::{Local, TimeZone}, + chrono::{Local, TimeZone, Utc}, clap::ArgMatches, console::{style, Emoji}, inflector::cases::titlecase::to_title_case, @@ -1140,9 +1140,36 @@ fn show_votes_and_credits( Ok(()) } +enum Format { + Csv, + Human, +} + +macro_rules! format_as { + ($target:expr, $fmt1:expr, $fmt2:expr, $which_fmt:expr, $($arg:tt)*) => { + match $which_fmt { + Format::Csv => { + writeln!( + $target, + $fmt1, + $($arg)* + ) + }, + Format::Human => { + writeln!( + $target, + $fmt2, + $($arg)* + ) + } + } + }; +} + fn show_epoch_rewards( f: &mut fmt::Formatter, epoch_rewards: &Option>, + use_csv: bool, ) -> fmt::Result { if let Some(epoch_rewards) = epoch_rewards { if epoch_rewards.is_empty() { @@ -1150,9 +1177,12 @@ fn show_epoch_rewards( } writeln!(f, "Epoch Rewards:")?; - writeln!( + let fmt = if use_csv { Format::Csv } else { Format::Human }; + format_as!( f, + "{},{},{},{},{},{},{},{}", " {:<6} {:<11} {:<26} {:<18} {:<18} {:>14} {:>14} {:>10}", + fmt, "Epoch", "Reward Slot", "Time", @@ -1160,15 +1190,17 @@ fn show_epoch_rewards( "New Balance", "Percent Change", "APR", - "Commission" + "Commission", )?; for reward in epoch_rewards { - writeln!( + format_as!( f, - " {:<6} {:<11} {:<26} ◎{:<17.9} ◎{:<17.9} {:>13.6}% {:>14} {:>10}", + "{},{},{},{},{},{}%,{},{}", + " {:<6} {:<11} {:<26} ◎{:<17.9} ◎{:<17.9} {:>13.3}% {:>14} {:>10}", + fmt, reward.epoch, reward.effective_slot, - Local.timestamp_opt(reward.block_time, 0).unwrap(), + Utc.timestamp_opt(reward.block_time, 0).unwrap(), lamports_to_sol(reward.amount), lamports_to_sol(reward.post_balance), reward.percent_change, @@ -1219,6 +1251,8 @@ pub struct CliStakeState { pub deactivating_stake: Option, #[serde(skip_serializing_if = "Option::is_none")] pub epoch_rewards: Option>, + #[serde(skip_serializing)] + pub use_csv: bool, } impl QuietDisplay for CliStakeState {} @@ -1373,7 +1407,7 @@ impl fmt::Display for CliStakeState { } show_authorized(f, self.authorized.as_ref().unwrap())?; show_lockup(f, self.lockup.as_ref())?; - show_epoch_rewards(f, &self.epoch_rewards)? + show_epoch_rewards(f, &self.epoch_rewards, self.use_csv)? } } Ok(()) @@ -1562,6 +1596,8 @@ pub struct CliVoteAccount { pub epoch_voting_history: Vec, #[serde(skip_serializing)] pub use_lamports_unit: bool, + #[serde(skip_serializing)] + pub use_csv: bool, #[serde(skip_serializing_if = "Option::is_none")] pub epoch_rewards: Option>, } @@ -1596,7 +1632,7 @@ impl fmt::Display for CliVoteAccount { self.recent_timestamp.slot )?; show_votes_and_credits(f, &self.votes, &self.epoch_voting_history)?; - show_epoch_rewards(f, &self.epoch_rewards)?; + show_epoch_rewards(f, &self.epoch_rewards, self.use_csv)?; Ok(()) } } @@ -3228,7 +3264,7 @@ mod tests { effective_slot: 100, epoch: 1, amount: 10, - block_time: UnixTimestamp::default(), + block_time: 0, apr: Some(10.0), }, CliEpochReward { @@ -3238,19 +3274,25 @@ mod tests { effective_slot: 200, epoch: 2, amount: 12, - block_time: UnixTimestamp::default(), + block_time: 1_000_000, apr: Some(13.0), }, ]; - let c = CliVoteAccount { + let mut c = CliVoteAccount { account_balance: 10000, validator_identity: Pubkey::default().to_string(), epoch_rewards: Some(epoch_rewards), + recent_timestamp: BlockTimestamp::default(), ..CliVoteAccount::default() }; let s = format!("{c}"); - assert!(!s.is_empty()); + assert_eq!(s, "Account Balance: 0.00001 SOL\nValidator Identity: 11111111111111111111111111111111\nVote Authority: {}\nWithdraw Authority: \nCredits: 0\nCommission: 0%\nRoot Slot: ~\nRecent Timestamp: 1970-01-01T00:00:00Z from slot 0\nEpoch Rewards:\n Epoch Reward Slot Time Amount New Balance Percent Change APR Commission\n 1 100 1970-01-01 00:00:00 UTC ◎0.000000010 ◎0.000000100 11.000% 10.00% 1%\n 2 200 1970-01-12 13:46:40 UTC ◎0.000000012 ◎0.000000100 11.000% 13.00% 1%\n"); + println!("{s}"); + + c.use_csv = true; + let s = format!("{c}"); + assert_eq!(s, "Account Balance: 0.00001 SOL\nValidator Identity: 11111111111111111111111111111111\nVote Authority: {}\nWithdraw Authority: \nCredits: 0\nCommission: 0%\nRoot Slot: ~\nRecent Timestamp: 1970-01-01T00:00:00Z from slot 0\nEpoch Rewards:\nEpoch,Reward Slot,Time,Amount,New Balance,Percent Change,APR,Commission\n1,100,1970-01-01 00:00:00 UTC,0.00000001,0.0000001,11%,10.00%,1%\n2,200,1970-01-12 13:46:40 UTC,0.000000012,0.0000001,11%,13.00%,1%\n"); println!("{s}"); } } diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 17a35f7da0a2ab..8252e13bbbd6a2 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -261,6 +261,7 @@ pub enum CliCommand { pubkey: Pubkey, use_lamports_unit: bool, with_rewards: Option, + use_csv: bool, }, StakeAuthorize { stake_account_pubkey: Pubkey, @@ -333,6 +334,7 @@ pub enum CliCommand { ShowVoteAccount { pubkey: Pubkey, use_lamports_unit: bool, + use_csv: bool, with_rewards: Option, }, WithdrawFromVoteAccount { @@ -1277,12 +1279,14 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { pubkey: stake_account_pubkey, use_lamports_unit, with_rewards, + use_csv, } => process_show_stake_account( &rpc_client, config, stake_account_pubkey, *use_lamports_unit, *with_rewards, + *use_csv, ), CliCommand::ShowStakeHistory { use_lamports_unit, @@ -1441,12 +1445,14 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { CliCommand::ShowVoteAccount { pubkey: vote_account_pubkey, use_lamports_unit, + use_csv, with_rewards, } => process_show_vote_account( &rpc_client, config, vote_account_pubkey, *use_lamports_unit, + *use_csv, *with_rewards, ), CliCommand::WithdrawFromVoteAccount { diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index 03949f7a7bab23..0470cf761ad95d 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -1821,6 +1821,7 @@ pub fn process_show_stakes( &stake_history, &clock, new_rate_activation_epoch, + false, ), }); } @@ -1840,6 +1841,7 @@ pub fn process_show_stakes( &stake_history, &clock, new_rate_activation_epoch, + false, ), }); } diff --git a/cli/src/stake.rs b/cli/src/stake.rs index 04101397120142..96c1b50b3576e6 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -706,12 +706,18 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(false) .help("Display inflation rewards"), ) + .arg( + Arg::with_name("csv") + .long("csv") + .takes_value(false) + .help("Format stake account data in csv") + ) .arg( Arg::with_name("num_rewards_epochs") .long("num-rewards-epochs") .takes_value(true) .value_name("NUM") - .validator(|s| is_within_range(s, 1..=10)) + .validator(|s| is_within_range(s, 1..=50)) .default_value_if("with_rewards", None, "1") .requires("with_rewards") .help("Display rewards for NUM recent epochs, max 10 [default: latest epoch only]"), @@ -1293,6 +1299,7 @@ pub fn parse_show_stake_account( let stake_account_pubkey = pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap(); let use_lamports_unit = matches.is_present("lamports"); + let use_csv = matches.is_present("csv"); let with_rewards = if matches.is_present("with_rewards") { Some(value_of(matches, "num_rewards_epochs").unwrap()) } else { @@ -1303,6 +1310,7 @@ pub fn parse_show_stake_account( pubkey: stake_account_pubkey, use_lamports_unit, with_rewards, + use_csv, }, signers: vec![], }) @@ -2226,6 +2234,7 @@ pub fn build_stake_state( stake_history: &StakeHistory, clock: &Clock, new_rate_activation_epoch: Option, + use_csv: bool, ) -> CliStakeState { match stake_state { StakeStateV2::Stake( @@ -2282,6 +2291,7 @@ pub fn build_stake_state( active_stake: u64_some_if_not_zero(effective), activating_stake: u64_some_if_not_zero(activating), deactivating_stake: u64_some_if_not_zero(deactivating), + use_csv, ..CliStakeState::default() } } @@ -2448,6 +2458,7 @@ pub fn process_show_stake_account( stake_account_address: &Pubkey, use_lamports_unit: bool, with_rewards: Option, + use_csv: bool, ) -> ProcessResult { let stake_account = rpc_client.get_account(stake_account_address)?; if stake_account.owner != stake::program::id() { @@ -2478,6 +2489,7 @@ pub fn process_show_stake_account( &stake_history, &clock, new_rate_activation_epoch, + use_csv, ); if state.stake_type == CliStakeType::Stake && state.activation_epoch.is_some() { diff --git a/cli/src/vote.rs b/cli/src/vote.rs index 6c98e49c3bff42..e4456fe1d2355c 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -333,12 +333,18 @@ impl VoteSubCommands for App<'_, '_> { .takes_value(false) .help("Display inflation rewards"), ) + .arg( + Arg::with_name("csv") + .long("csv") + .takes_value(false) + .help("Format rewards in a CSV table"), + ) .arg( Arg::with_name("num_rewards_epochs") .long("num-rewards-epochs") .takes_value(true) .value_name("NUM") - .validator(|s| is_within_range(s, 1..=10)) + .validator(|s| is_within_range(s, 1..=50)) .default_value_if("with_rewards", None, "1") .requires("with_rewards") .help("Display rewards for NUM recent epochs, max 10 [default: latest epoch only]"), @@ -648,6 +654,7 @@ pub fn parse_vote_get_account_command( let vote_account_pubkey = pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap(); let use_lamports_unit = matches.is_present("lamports"); + let use_csv = matches.is_present("csv"); let with_rewards = if matches.is_present("with_rewards") { Some(value_of(matches, "num_rewards_epochs").unwrap()) } else { @@ -657,6 +664,7 @@ pub fn parse_vote_get_account_command( command: CliCommand::ShowVoteAccount { pubkey: vote_account_pubkey, use_lamports_unit, + use_csv, with_rewards, }, signers: vec![], @@ -1208,6 +1216,7 @@ pub fn process_show_vote_account( config: &CliConfig, vote_account_address: &Pubkey, use_lamports_unit: bool, + use_csv: bool, with_rewards: Option, ) -> ProcessResult { let (vote_account, vote_state) = @@ -1257,6 +1266,7 @@ pub fn process_show_vote_account( votes, epoch_voting_history, use_lamports_unit, + use_csv, epoch_rewards, }; From 4d96c384a14b6ea3e052c071c4b2d6b6d220880a Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 22 Sep 2023 10:52:28 +0800 Subject: [PATCH 151/407] ci: install openssl for the Windows build (#33356) --- .github/workflows/release-artifacts.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index b8af200c1de6ac..e89cf87b1dc2cf 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -47,7 +47,8 @@ jobs: id: build shell: bash run: | - export OPENSSL_DIR="C:\Program Files\OpenSSL" + choco install openssl + export OPENSSL_DIR="C:\Program Files\OpenSSL-Win64" choco install protoc export PROTOC="C:\ProgramData\chocolatey\lib\protoc\tools\bin\protoc.exe" source /tmp/env.sh From 342833312773d7aabcd64a8233585df3d8c9b5c7 Mon Sep 17 00:00:00 2001 From: steviez Date: Fri, 22 Sep 2023 07:15:47 +0200 Subject: [PATCH 152/407] Switch confirmed_unrooted_slots from Vec<_> to HashSet<_> (#33311) The container is only used to check for inclusion of slots with the .contains() method. This method is O(n) on a Vec<_> but O(1) on a HashSet<_>. --- ledger/src/blockstore.rs | 47 ++++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index d11e03e6f09851..b4426aa3678501 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -2304,7 +2304,7 @@ impl Blockstore { fn get_transaction_status_with_counter( &self, signature: Signature, - confirmed_unrooted_slots: &[Slot], + confirmed_unrooted_slots: &HashSet, ) -> Result<(Option<(Slot, TransactionStatusMeta)>, u64)> { let mut counter = 0; let (lock, lowest_available_slot) = self.ensure_lowest_cleanup_slot(); @@ -2348,14 +2348,14 @@ impl Blockstore { "blockstore-rpc-api", ("method", "get_rooted_transaction_status", String) ); - self.get_transaction_status(signature, &[]) + self.get_transaction_status(signature, &HashSet::default()) } /// Returns a transaction status pub fn get_transaction_status( &self, signature: Signature, - confirmed_unrooted_slots: &[Slot], + confirmed_unrooted_slots: &HashSet, ) -> Result> { datapoint_info!( "blockstore-rpc-api", @@ -2374,7 +2374,7 @@ impl Blockstore { "blockstore-rpc-api", ("method", "get_rooted_transaction", String) ); - self.get_transaction_with_status(signature, &[]) + self.get_transaction_with_status(signature, &HashSet::default()) } /// Returns a complete transaction @@ -2388,7 +2388,7 @@ impl Blockstore { ("method", "get_complete_transaction", String) ); let last_root = self.last_root(); - let confirmed_unrooted_slots: Vec<_> = + let confirmed_unrooted_slots: HashSet<_> = AncestorIterator::new_inclusive(highest_confirmed_slot, self) .take_while(|&slot| slot > last_root) .collect(); @@ -2398,7 +2398,7 @@ impl Blockstore { fn get_transaction_with_status( &self, signature: Signature, - confirmed_unrooted_slots: &[Slot], + confirmed_unrooted_slots: &HashSet, ) -> Result> { if let Some((slot, meta)) = self.get_transaction_status(signature, confirmed_unrooted_slots)? @@ -2575,9 +2575,10 @@ impl Blockstore { ("method", "get_confirmed_signatures_for_address2", String) ); let last_root = self.last_root(); - let confirmed_unrooted_slots: Vec<_> = AncestorIterator::new_inclusive(highest_slot, self) - .take_while(|&slot| slot > last_root) - .collect(); + let confirmed_unrooted_slots: HashSet<_> = + AncestorIterator::new_inclusive(highest_slot, self) + .take_while(|&slot| slot > last_root) + .collect(); // Figure the `slot` to start listing signatures at, based on the ledger location of the // `before` signature if present. Also generate a HashSet of signatures that should @@ -7767,7 +7768,7 @@ pub mod tests { // Signature exists, root found in index 0 if let (Some((slot, _status)), counter) = blockstore - .get_transaction_status_with_counter(signature2, &[]) + .get_transaction_status_with_counter(signature2, &[].into()) .unwrap() { assert_eq!(slot, 2); @@ -7776,7 +7777,7 @@ pub mod tests { // Signature exists, root found although not required if let (Some((slot, _status)), counter) = blockstore - .get_transaction_status_with_counter(signature2, &[3]) + .get_transaction_status_with_counter(signature2, &[3].into()) .unwrap() { assert_eq!(slot, 2); @@ -7785,7 +7786,7 @@ pub mod tests { // Signature exists, root found in index 1 if let (Some((slot, _status)), counter) = blockstore - .get_transaction_status_with_counter(signature4, &[]) + .get_transaction_status_with_counter(signature4, &[].into()) .unwrap() { assert_eq!(slot, 2); @@ -7794,7 +7795,7 @@ pub mod tests { // Signature exists, root found although not required, in index 1 if let (Some((slot, _status)), counter) = blockstore - .get_transaction_status_with_counter(signature4, &[3]) + .get_transaction_status_with_counter(signature4, &[3].into()) .unwrap() { assert_eq!(slot, 2); @@ -7803,14 +7804,14 @@ pub mod tests { // Signature exists, no root found let (status, counter) = blockstore - .get_transaction_status_with_counter(signature5, &[]) + .get_transaction_status_with_counter(signature5, &[].into()) .unwrap(); assert_eq!(status, None); assert_eq!(counter, 6); // Signature exists, root not required if let (Some((slot, _status)), counter) = blockstore - .get_transaction_status_with_counter(signature5, &[3]) + .get_transaction_status_with_counter(signature5, &[3].into()) .unwrap() { assert_eq!(slot, 3); @@ -7819,39 +7820,39 @@ pub mod tests { // Signature does not exist, smaller than existing entries let (status, counter) = blockstore - .get_transaction_status_with_counter(signature1, &[]) + .get_transaction_status_with_counter(signature1, &[].into()) .unwrap(); assert_eq!(status, None); assert_eq!(counter, 2); let (status, counter) = blockstore - .get_transaction_status_with_counter(signature1, &[3]) + .get_transaction_status_with_counter(signature1, &[3].into()) .unwrap(); assert_eq!(status, None); assert_eq!(counter, 2); // Signature does not exist, between existing entries let (status, counter) = blockstore - .get_transaction_status_with_counter(signature3, &[]) + .get_transaction_status_with_counter(signature3, &[].into()) .unwrap(); assert_eq!(status, None); assert_eq!(counter, 2); let (status, counter) = blockstore - .get_transaction_status_with_counter(signature3, &[3]) + .get_transaction_status_with_counter(signature3, &[3].into()) .unwrap(); assert_eq!(status, None); assert_eq!(counter, 2); // Signature does not exist, larger than existing entries let (status, counter) = blockstore - .get_transaction_status_with_counter(signature7, &[]) + .get_transaction_status_with_counter(signature7, &[].into()) .unwrap(); assert_eq!(status, None); assert_eq!(counter, 2); let (status, counter) = blockstore - .get_transaction_status_with_counter(signature7, &[3]) + .get_transaction_status_with_counter(signature7, &[3].into()) .unwrap(); assert_eq!(status, None); assert_eq!(counter, 2); @@ -7934,7 +7935,7 @@ pub mod tests { let check_for_missing = || { ( blockstore - .get_transaction_status_with_counter(signature1, &[]) + .get_transaction_status_with_counter(signature1, &[].into()) .unwrap() .0 .is_none(), @@ -7952,7 +7953,7 @@ pub mod tests { let assert_existing_always = || { let are_existing_always = ( blockstore - .get_transaction_status_with_counter(signature2, &[]) + .get_transaction_status_with_counter(signature2, &[].into()) .unwrap() .0 .is_some(), From 97d53be16e61d2a4c86eb921c51290d20debdaec Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 22 Sep 2023 06:49:55 -0700 Subject: [PATCH 153/407] add Restart structs for disk index (#33361) --- bucket_map/src/bucket_map.rs | 10 +- bucket_map/src/lib.rs | 2 +- bucket_map/src/restart.rs | 262 +++++++++++++++++++++++++++++++++++ 3 files changed, 270 insertions(+), 4 deletions(-) create mode 100644 bucket_map/src/restart.rs diff --git a/bucket_map/src/bucket_map.rs b/bucket_map/src/bucket_map.rs index d4b70eb47cc58f..2adeefdcc33a1f 100644 --- a/bucket_map/src/bucket_map.rs +++ b/bucket_map/src/bucket_map.rs @@ -12,6 +12,9 @@ pub struct BucketMapConfig { pub max_buckets: usize, pub drives: Option>, pub max_search: Option, + /// A file with a known path where the current state of the bucket files on disk is saved as the index is running. + /// This file can be used to restore the index files as they existed prior to the process being stopped. + pub restart_config_file: Option, } impl BucketMapConfig { @@ -50,6 +53,9 @@ impl Debug for BucketMap { } } +// this should be <= 1 << DEFAULT_CAPACITY or we end up searching the same items over and over - probably not a big deal since it is so small anyway +pub(crate) const MAX_SEARCH_DEFAULT: MaxSearch = 32; + /// used to communicate resize necessary and current size. #[derive(Debug)] pub enum BucketMapError { @@ -72,9 +78,7 @@ impl BucketMap { config.max_buckets.is_power_of_two(), "Max number of buckets must be a power of two" ); - // this should be <= 1 << DEFAULT_CAPACITY or we end up searching the same items over and over - probably not a big deal since it is so small anyway - const MAX_SEARCH: MaxSearch = 32; - let max_search = config.max_search.unwrap_or(MAX_SEARCH); + let max_search = config.max_search.unwrap_or(MAX_SEARCH_DEFAULT); if let Some(drives) = config.drives.as_ref() { Self::erase_previous_drives(drives); diff --git a/bucket_map/src/lib.rs b/bucket_map/src/lib.rs index 97c29547ad2e91..a08bafe00c2cd8 100644 --- a/bucket_map/src/lib.rs +++ b/bucket_map/src/lib.rs @@ -6,6 +6,6 @@ pub mod bucket_map; mod bucket_stats; mod bucket_storage; mod index_entry; - +mod restart; pub type MaxSearch = u8; pub type RefCount = u64; diff --git a/bucket_map/src/restart.rs b/bucket_map/src/restart.rs new file mode 100644 index 00000000000000..e25ef3c44860fc --- /dev/null +++ b/bucket_map/src/restart.rs @@ -0,0 +1,262 @@ +//! Persistent info of disk index files to allow files to be reused on restart. +#![allow(dead_code)] +use { + crate::bucket_map::{BucketMapConfig, MAX_SEARCH_DEFAULT}, + memmap2::MmapMut, + std::{ + fmt::{Debug, Formatter}, + fs::{remove_file, OpenOptions}, + io::{Seek, SeekFrom, Write}, + path::{Path, PathBuf}, + sync::{Arc, Mutex}, + }, +}; + +/// written into file. Change this if expected file contents change. +const HEADER_VERSION: u64 = 1; + +/// written into file at top. +#[derive(Debug)] +#[repr(C)] +pub(crate) struct Header { + /// version of this file. Differences here indicate the file is not usable. + version: u64, + /// number of buckets these files represent. + buckets: usize, + /// u8 representing how many entries to search for during collisions. + /// If this is different, then the contents of the index file's contents are likely not as helpful. + max_search: u8, + /// padding to get header to u128 aligned + _dummy: [u8; 15], +} + +#[derive(Debug)] +#[repr(C)] +pub(crate) struct OneIndexBucket { + /// disk bucket file names are random u128s + file_name: u128, + /// each bucket uses a random value to hash with pubkeys. Without this, hashing would be inconsistent between restarts. + random: u64, + /// padding to make u128 aligned + _dummy: u64, +} + +pub(crate) struct Restart { + mmap: MmapMut, +} + +#[derive(Clone, Default)] +/// keep track of mapping from a single bucket to the shared mmap file +pub(crate) struct RestartableBucket { + /// shared struct keeping track of each bucket's file + pub(crate) restart: Option>>, + /// which index self represents inside `restart` + pub(crate) index: usize, + /// path disk index file is at for startup + pub(crate) path: Option, +} + +impl RestartableBucket { + /// this bucket is now using `file_name` and `random`. + /// This gets written into the restart file so that on restart we can re-open the file and re-hash with the same random. + pub(crate) fn set_file(&self, file_name: u128, random: u64) { + if let Some(mut restart) = self.restart.as_ref().map(|restart| restart.lock().unwrap()) { + let bucket = restart.get_bucket_mut(self.index); + bucket.file_name = file_name; + bucket.random = random; + } + } + /// retreive the file_name and random that were used prior to the current restart. + /// This was written into the restart file on the prior run by `set_file`. + pub(crate) fn get(&self) -> Option<(u128, u64)> { + self.restart.as_ref().map(|restart| { + let restart = restart.lock().unwrap(); + let bucket = restart.get_bucket(self.index); + (bucket.file_name, bucket.random) + }) + } +} + +impl Debug for RestartableBucket { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!( + f, + "{:?}", + &self.restart.as_ref().map(|restart| restart.lock().unwrap()) + )?; + Ok(()) + } +} + +impl Debug for Restart { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + let header = self.get_header(); + writeln!(f, "{:?}", header)?; + write!( + f, + "{:?}", + (0..header.buckets) + .map(|index| self.get_bucket(index)) + .take(10) + .collect::>() + )?; + Ok(()) + } +} + +impl Restart { + /// create a new restart file for use next time we restart on this machine + pub(crate) fn new(config: &BucketMapConfig) -> Option { + let expected_len = Self::expected_len(config.max_buckets); + + let path = config.restart_config_file.as_ref(); + let path = path?; + _ = remove_file(path); + + let mmap = Self::new_map(path, expected_len as u64).ok()?; + + let mut restart = Restart { mmap }; + let header = restart.get_header_mut(); + header.version = HEADER_VERSION; + header.buckets = config.max_buckets; + header.max_search = config.max_search.unwrap_or(MAX_SEARCH_DEFAULT); + + (0..config.max_buckets).for_each(|index| { + let bucket = restart.get_bucket_mut(index); + bucket.file_name = 0; + bucket.random = 0; + }); + + Some(restart) + } + + /// expected len of file given this many buckets + fn expected_len(max_buckets: usize) -> usize { + std::mem::size_of::
() + max_buckets * std::mem::size_of::() + } + + /// create mmap from `file` + fn new_map(file: impl AsRef, capacity: u64) -> Result { + let mut data = OpenOptions::new() + .read(true) + .write(true) + .create(true) + .open(file)?; + + // Theoretical performance optimization: write a zero to the end of + // the file so that we won't have to resize it later, which may be + // expensive. + data.seek(SeekFrom::Start(capacity - 1)).unwrap(); + data.write_all(&[0]).unwrap(); + data.rewind().unwrap(); + data.flush().unwrap(); + Ok(unsafe { MmapMut::map_mut(&data).unwrap() }) + } + + fn get_header(&self) -> &Header { + let start = 0_usize; + let end = start + std::mem::size_of::
(); + let item_slice: &[u8] = &self.mmap[start..end]; + unsafe { + let item = item_slice.as_ptr() as *const Header; + &*item + } + } + + fn get_header_mut(&mut self) -> &mut Header { + let start = 0_usize; + let end = start + std::mem::size_of::
(); + let item_slice: &[u8] = &self.mmap[start..end]; + unsafe { + let item = item_slice.as_ptr() as *mut Header; + &mut *item + } + } + + fn get_bucket(&self, index: usize) -> &OneIndexBucket { + let record_len = std::mem::size_of::(); + let start = std::mem::size_of::
() + record_len * index; + let end = start + record_len; + let item_slice: &[u8] = &self.mmap[start..end]; + unsafe { + let item = item_slice.as_ptr() as *const OneIndexBucket; + &*item + } + } + + fn get_bucket_mut(&mut self, index: usize) -> &mut OneIndexBucket { + let record_len = std::mem::size_of::(); + let start = std::mem::size_of::
() + record_len * index; + let end = start + record_len; + let item_slice: &mut [u8] = &mut self.mmap[start..end]; + unsafe { + let item = item_slice.as_mut_ptr() as *mut OneIndexBucket; + &mut *item + } + } +} + +#[cfg(test)] +mod test { + use {super::*, tempfile::tempdir}; + + #[test] + fn test_header_alignment() { + assert_eq!( + 0, + std::mem::size_of::
() % std::mem::size_of::() + ); + assert_eq!( + 0, + std::mem::size_of::() % std::mem::size_of::() + ); + } + + #[test] + fn test_restartable_bucket() { + solana_logger::setup(); + let tmpdir = tempdir().unwrap(); + let paths: Vec = vec![tmpdir.path().to_path_buf()]; + assert!(!paths.is_empty()); + let config_file = tmpdir.path().join("config"); + + let config = BucketMapConfig { + drives: Some(paths), + restart_config_file: Some(config_file), + ..BucketMapConfig::new(1 << 1) + }; + let buckets = config.max_buckets; + let restart = Arc::new(Mutex::new(Restart::new(&config).unwrap())); + (0..buckets).for_each(|bucket| { + let restartable_bucket = RestartableBucket { + restart: Some(restart.clone()), + index: bucket, + path: None, + }; + // default values + assert_eq!(restartable_bucket.get(), Some((0, 0))); + }); + (0..buckets).for_each(|bucket| { + let restartable_bucket = RestartableBucket { + restart: Some(restart.clone()), + index: bucket, + path: None, + }; + assert!(restartable_bucket.get().is_some()); + let file_name = bucket as u128; + let random = (bucket as u64 + 5) * 2; + restartable_bucket.set_file(bucket as u128, random); + assert_eq!(restartable_bucket.get(), Some((file_name, random))); + }); + (0..buckets).for_each(|bucket| { + let restartable_bucket = RestartableBucket { + restart: Some(restart.clone()), + index: bucket, + path: None, + }; + let file_name = bucket as u128; + let random = (bucket as u64 + 5) * 2; + assert_eq!(restartable_bucket.get(), Some((file_name, random))); + }); + } +} From d2464d9d7cd2100c2e89d002cb36af7fce38fa60 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 22 Sep 2023 10:17:04 -0700 Subject: [PATCH 154/407] use bytemuck for disk bucket restart (#33371) --- Cargo.lock | 1 + bucket_map/Cargo.toml | 1 + bucket_map/src/restart.rs | 63 ++++++++++++++++++++++----------------- programs/sbf/Cargo.lock | 1 + 4 files changed, 39 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c11fa5a36de011..a3b4f81d1a8631 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5417,6 +5417,7 @@ name = "solana-bucket-map" version = "1.17.0" dependencies = [ "bv", + "bytemuck", "fs_extra", "log", "memmap2", diff --git a/bucket_map/Cargo.toml b/bucket_map/Cargo.toml index 66af30513f7cd7..a37051e5d3054b 100644 --- a/bucket_map/Cargo.toml +++ b/bucket_map/Cargo.toml @@ -12,6 +12,7 @@ edition = { workspace = true } [dependencies] bv = { workspace = true, features = ["serde"] } +bytemuck = { workspace = true, features = ["derive"] } log = { workspace = true } memmap2 = { workspace = true } modular-bitfield = { workspace = true } diff --git a/bucket_map/src/restart.rs b/bucket_map/src/restart.rs index e25ef3c44860fc..f6539f5ab65447 100644 --- a/bucket_map/src/restart.rs +++ b/bucket_map/src/restart.rs @@ -2,6 +2,7 @@ #![allow(dead_code)] use { crate::bucket_map::{BucketMapConfig, MAX_SEARCH_DEFAULT}, + bytemuck::{Pod, Zeroable}, memmap2::MmapMut, std::{ fmt::{Debug, Formatter}, @@ -16,13 +17,13 @@ use { const HEADER_VERSION: u64 = 1; /// written into file at top. -#[derive(Debug)] +#[derive(Debug, Pod, Zeroable, Copy, Clone)] #[repr(C)] pub(crate) struct Header { /// version of this file. Differences here indicate the file is not usable. version: u64, /// number of buckets these files represent. - buckets: usize, + buckets: u64, /// u8 representing how many entries to search for during collisions. /// If this is different, then the contents of the index file's contents are likely not as helpful. max_search: u8, @@ -30,7 +31,13 @@ pub(crate) struct Header { _dummy: [u8; 15], } -#[derive(Debug)] +// In order to safely guarantee Header is Pod, it cannot have any padding. +const _: () = assert!( + std::mem::size_of::
() == std::mem::size_of::() * 2, + "Header cannot have any padding" +); + +#[derive(Debug, Pod, Zeroable, Copy, Clone)] #[repr(C)] pub(crate) struct OneIndexBucket { /// disk bucket file names are random u128s @@ -41,6 +48,12 @@ pub(crate) struct OneIndexBucket { _dummy: u64, } +// In order to safely guarantee Header is Pod, it cannot have any padding. +const _: () = assert!( + std::mem::size_of::() == std::mem::size_of::() * 2, + "Header cannot have any padding" +); + pub(crate) struct Restart { mmap: MmapMut, } @@ -96,7 +109,7 @@ impl Debug for Restart { f, "{:?}", (0..header.buckets) - .map(|index| self.get_bucket(index)) + .map(|index| self.get_bucket(index as usize)) .take(10) .collect::>() )?; @@ -118,7 +131,7 @@ impl Restart { let mut restart = Restart { mmap }; let header = restart.get_header_mut(); header.version = HEADER_VERSION; - header.buckets = config.max_buckets; + header.buckets = config.max_buckets as u64; header.max_search = config.max_search.unwrap_or(MAX_SEARCH_DEFAULT); (0..config.max_buckets).for_each(|index| { @@ -154,23 +167,13 @@ impl Restart { } fn get_header(&self) -> &Header { - let start = 0_usize; - let end = start + std::mem::size_of::
(); - let item_slice: &[u8] = &self.mmap[start..end]; - unsafe { - let item = item_slice.as_ptr() as *const Header; - &*item - } + let item_slice = &self.mmap[..std::mem::size_of::
()]; + bytemuck::from_bytes(item_slice) } fn get_header_mut(&mut self) -> &mut Header { - let start = 0_usize; - let end = start + std::mem::size_of::
(); - let item_slice: &[u8] = &self.mmap[start..end]; - unsafe { - let item = item_slice.as_ptr() as *mut Header; - &mut *item - } + let bytes = &mut self.mmap[..std::mem::size_of::
()]; + bytemuck::from_bytes_mut(bytes) } fn get_bucket(&self, index: usize) -> &OneIndexBucket { @@ -178,10 +181,7 @@ impl Restart { let start = std::mem::size_of::
() + record_len * index; let end = start + record_len; let item_slice: &[u8] = &self.mmap[start..end]; - unsafe { - let item = item_slice.as_ptr() as *const OneIndexBucket; - &*item - } + bytemuck::from_bytes(item_slice) } fn get_bucket_mut(&mut self, index: usize) -> &mut OneIndexBucket { @@ -189,10 +189,7 @@ impl Restart { let start = std::mem::size_of::
() + record_len * index; let end = start + record_len; let item_slice: &mut [u8] = &mut self.mmap[start..end]; - unsafe { - let item = item_slice.as_mut_ptr() as *mut OneIndexBucket; - &mut *item - } + bytemuck::from_bytes_mut(item_slice) } } @@ -227,6 +224,18 @@ mod test { }; let buckets = config.max_buckets; let restart = Arc::new(Mutex::new(Restart::new(&config).unwrap())); + + { + let restart = restart.lock().unwrap(); + let header = restart.get_header(); + assert_eq!(header.version, HEADER_VERSION); + assert_eq!(header.buckets, config.max_buckets as u64); + assert_eq!( + header.max_search, + config.max_search.unwrap_or(MAX_SEARCH_DEFAULT) + ); + } + (0..buckets).for_each(|bucket| { let restartable_bucket = RestartableBucket { restart: Some(restart.clone()), diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index cf2f52c7cfa10f..9945f9b4cb08a1 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4643,6 +4643,7 @@ name = "solana-bucket-map" version = "1.17.0" dependencies = [ "bv", + "bytemuck", "log", "memmap2", "modular-bitfield", From 7cf71011fd8642a3a34941344c4d8cca55354d37 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 22 Sep 2023 11:40:46 -0700 Subject: [PATCH 155/407] disk bucket: init restart path (#33375) --- accounts-db/src/bucket_map_holder.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/accounts-db/src/bucket_map_holder.rs b/accounts-db/src/bucket_map_holder.rs index 77ae98bdfe9574..c5fb8e68729b08 100644 --- a/accounts-db/src/bucket_map_holder.rs +++ b/accounts-db/src/bucket_map_holder.rs @@ -204,7 +204,15 @@ impl + Into> BucketMapHolder .unwrap_or(DEFAULT_AGE_TO_STAY_IN_CACHE); let mut bucket_config = BucketMapConfig::new(bins); - bucket_config.drives = config.as_ref().and_then(|config| config.drives.clone()); + bucket_config.drives = config.as_ref().and_then(|config| { + bucket_config.restart_config_file = config.drives.as_ref().and_then(|drives| { + drives + .first() + .map(|drive| drive.join("accounts_index_restart")) + }); + config.drives.clone() + }); + let mem_budget_mb = match config .as_ref() .map(|config| &config.index_limit_mb) From 5eb61ddf218cca6b341aa3ba639b56fde58711a5 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 22 Sep 2023 11:47:32 -0700 Subject: [PATCH 156/407] simple cleanup in bucket map (#33376) --- bucket_map/src/bucket.rs | 6 +++++- bucket_map/src/bucket_api.rs | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index daf04b4498ff83..f9a01ac8e591a7 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -110,7 +110,7 @@ pub struct Bucket { } impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { - pub fn new( + pub(crate) fn new( drives: Arc>, max_search: MaxSearch, stats: Arc, @@ -342,6 +342,7 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { /// insert as much of `entries` as possible into `index`. /// return an error if the index needs to resize. /// for every entry that already exists in `index`, add it (and the value already in the index) to `duplicates` + /// `reverse_sorted_entries` is (raw index (range = U64::MAX) in hash map, index in `items`) pub fn batch_insert_non_duplicates_internal( index: &mut BucketStorage>, data_buckets: &[BucketStorage], @@ -349,6 +350,9 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { reverse_sorted_entries: &mut Vec<(u64, usize)>, duplicates: &mut Vec<(usize, T)>, ) -> Result<(), BucketMapError> { + if reverse_sorted_entries.is_empty() { + return Ok(()); + } let max_search = index.max_search(); let cap = index.capacity(); let search_end = max_search.min(cap); diff --git a/bucket_map/src/bucket_api.rs b/bucket_map/src/bucket_api.rs index 4196ead7aef2b9..2ec495ab20a481 100644 --- a/bucket_map/src/bucket_api.rs +++ b/bucket_map/src/bucket_api.rs @@ -26,7 +26,7 @@ pub struct BucketApi { } impl BucketApi { - pub fn new( + pub(crate) fn new( drives: Arc>, max_search: MaxSearch, stats: Arc, From fcddeb446b34ef476a62f47fa81f22740cac8fed Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 22 Sep 2023 12:34:43 -0700 Subject: [PATCH 157/407] add disk bucket get_restart_file (#33373) * add disk bucket get_restart_file * add get_restartable_buckets --- bucket_map/src/restart.rs | 412 +++++++++++++++++++++++++++++++++++--- 1 file changed, 383 insertions(+), 29 deletions(-) diff --git a/bucket_map/src/restart.rs b/bucket_map/src/restart.rs index f6539f5ab65447..41849f94d831e4 100644 --- a/bucket_map/src/restart.rs +++ b/bucket_map/src/restart.rs @@ -5,8 +5,9 @@ use { bytemuck::{Pod, Zeroable}, memmap2::MmapMut, std::{ + collections::HashMap, fmt::{Debug, Formatter}, - fs::{remove_file, OpenOptions}, + fs::{self, remove_file, OpenOptions}, io::{Seek, SeekFrom, Write}, path::{Path, PathBuf}, sync::{Arc, Mutex}, @@ -34,7 +35,7 @@ pub(crate) struct Header { // In order to safely guarantee Header is Pod, it cannot have any padding. const _: () = assert!( std::mem::size_of::
() == std::mem::size_of::() * 2, - "Header cannot have any padding" + "incorrect size of header struct" ); #[derive(Debug, Pod, Zeroable, Copy, Clone)] @@ -51,7 +52,7 @@ pub(crate) struct OneIndexBucket { // In order to safely guarantee Header is Pod, it cannot have any padding. const _: () = assert!( std::mem::size_of::() == std::mem::size_of::() * 2, - "Header cannot have any padding" + "incorrect size of header struct" ); pub(crate) struct Restart { @@ -143,11 +144,99 @@ impl Restart { Some(restart) } + /// loads and mmaps restart file if it exists + /// returns None if the file doesn't exist or is incompatible or corrupt (in obvious ways) + pub(crate) fn get_restart_file(config: &BucketMapConfig) -> Option { + let path = config.restart_config_file.as_ref()?; + let metadata = std::fs::metadata(path).ok()?; + let file_len = metadata.len(); + + let expected_len = Self::expected_len(config.max_buckets); + if expected_len as u64 != file_len { + // mismatched len, so ignore this file + return None; + } + + let file = OpenOptions::new() + .read(true) + .write(true) + .create(false) + .open(path) + .ok()?; + let mmap = unsafe { MmapMut::map_mut(&file).unwrap() }; + + let restart = Restart { mmap }; + let header = restart.get_header(); + if header.version != HEADER_VERSION + || header.buckets != config.max_buckets as u64 + || header.max_search != config.max_search.unwrap_or(MAX_SEARCH_DEFAULT) + { + // file doesn't match our current configuration, so we have to restart with fresh buckets + return None; + } + + Some(restart) + } + /// expected len of file given this many buckets fn expected_len(max_buckets: usize) -> usize { std::mem::size_of::
() + max_buckets * std::mem::size_of::() } + /// return all files that matched bucket files in `drives` + /// matching files will be parsable as u128 + fn get_all_possible_index_files_in_drives(drives: &[PathBuf]) -> HashMap { + let mut result = HashMap::default(); + drives.iter().for_each(|drive| { + if drive.is_dir() { + let dir = fs::read_dir(drive); + if let Ok(dir) = dir { + for entry in dir.flatten() { + if let Some(name) = entry.path().file_name() { + if let Some(id) = name.to_str().and_then(|str| str.parse::().ok()) + { + result.insert(id, entry.path()); + } + } + } + } + } + }); + result + } + + /// get one `RestartableBucket` for each bucket. + /// If a potentially reusable file exists, then put that file's path in `RestartableBucket` for that bucket. + /// Delete all files that cannot possibly be re-used. + pub(crate) fn get_restartable_buckets( + restart: Option<&Arc>>, + drives: &Arc>, + num_buckets: usize, + ) -> Vec { + let mut paths = Self::get_all_possible_index_files_in_drives(drives); + let results = (0..num_buckets) + .map(|index| { + let path = restart.and_then(|restart| { + let restart = restart.lock().unwrap(); + let id = restart.get_bucket(index).file_name; + paths.remove(&id) + }); + RestartableBucket { + restart: restart.map(Arc::clone), + index, + path, + } + }) + .collect(); + + paths.into_iter().for_each(|path| { + // delete any left over files that we won't be using + _ = fs::remove_file(path.1); + }); + + results + } + /// create mmap from `file` fn new_map(file: impl AsRef, capacity: u64) -> Result { let mut data = OpenOptions::new() @@ -156,12 +245,14 @@ impl Restart { .create(true) .open(file)?; - // Theoretical performance optimization: write a zero to the end of - // the file so that we won't have to resize it later, which may be - // expensive. - data.seek(SeekFrom::Start(capacity - 1)).unwrap(); - data.write_all(&[0]).unwrap(); - data.rewind().unwrap(); + if capacity > 0 { + // Theoretical performance optimization: write a zero to the end of + // the file so that we won't have to resize it later, which may be + // expensive. + data.seek(SeekFrom::Start(capacity - 1)).unwrap(); + data.write_all(&[0]).unwrap(); + data.rewind().unwrap(); + } data.flush().unwrap(); Ok(unsafe { MmapMut::map_mut(&data).unwrap() }) } @@ -209,9 +300,243 @@ mod test { ); } + #[test] + fn test_get_restartable_buckets() { + let tmpdir = tempdir().unwrap(); + let paths: Vec = vec![tmpdir.path().to_path_buf()]; + assert!(!paths.is_empty()); + let config_file = tmpdir.path().join("config"); + + let config = BucketMapConfig { + drives: Some(paths.clone()), + restart_config_file: Some(config_file.clone()), + ..BucketMapConfig::new(1 << 2) + }; + + // create restart file + let restart = Arc::new(Mutex::new(Restart::new(&config).unwrap())); + let files = Restart::get_all_possible_index_files_in_drives(&paths); + assert!(files.is_empty()); + + let restartable_buckets = (0..config.max_buckets) + .map(|bucket| RestartableBucket { + restart: Some(restart.clone()), + index: bucket, + path: None, + }) + .collect::>(); + + let skip = 2; // skip this file + // note starting at 1 to avoid default values of 0 for file_name + // create 4 bucket files. + // 1,3,4 will match buckets 0,2,3 + // 5 is an extra file that will get deleted + (0..config.max_buckets + 1).for_each(|i| { + if i == skip { + return; + } + let file_name = (i + 1) as u128; + let random = (i * 2) as u64; + let file = tmpdir.path().join(file_name.to_string()); + create_dummy_file(&file); + + // bucket is connected to this file_name + if i < config.max_buckets { + restartable_buckets[i].set_file(file_name, random); + assert_eq!(Some((file_name, random)), restartable_buckets[i].get()); + } + }); + + let deleted_file = tmpdir.path().join((1 + config.max_buckets).to_string()); + assert!(std::fs::metadata(deleted_file.clone()).is_ok()); + let calc_restartable_buckets = Restart::get_restartable_buckets( + Some(&restart), + &Arc::new(paths.clone()), + config.max_buckets, + ); + + // make sure all bucket files were associated correctly + // and all files still exist + (0..config.max_buckets).for_each(|i| { + if i == skip { + assert_eq!(Some((0, 0)), restartable_buckets[i].get()); + assert_eq!(None, calc_restartable_buckets[i].path); + } else { + let file_name = (i + 1) as u128; + let random = (i * 2) as u64; + let expected_path = tmpdir.path().join(file_name.to_string()); + assert!(std::fs::metadata(expected_path.clone()).is_ok()); + + assert_eq!(Some((file_name, random)), restartable_buckets[i].get()); + assert_eq!(Some(expected_path), calc_restartable_buckets[i].path); + } + }); + + // this file wasn't associated with a bucket + assert!( + std::fs::metadata(deleted_file).is_err(), + "should have been deleted" + ); + } + + fn create_dummy_file(path: &Path) { + // easy enough to create a test file in the right spot with creating a 'restart' file of a given name. + let config = BucketMapConfig { + drives: None, + restart_config_file: Some(path.to_path_buf()), + ..BucketMapConfig::new(1 << 1) + }; + + // create file + assert!(Restart::new(&config).is_some()); + } + + #[test] + fn test_get_all_possible_index_files_in_drives() { + let tmpdir = tempdir().unwrap(); + let paths: Vec = vec![tmpdir.path().to_path_buf()]; + assert!(!paths.is_empty()); + create_dummy_file(&tmpdir.path().join("config")); + + // create a file with a valid u128 name + for file_name in [u128::MAX, 0, 123] { + let file = tmpdir.path().join(file_name.to_string()); + create_dummy_file(&file); + let mut files = Restart::get_all_possible_index_files_in_drives(&paths); + assert_eq!(files.remove(&file_name), Some(&file).cloned()); + assert!(files.is_empty()); + _ = fs::remove_file(&file); + } + + // create a file with a u128 name that fails to convert + for file_name in [ + u128::MAX.to_string() + ".", + u128::MAX.to_string() + "0", + "-123".to_string(), + ] { + let file = tmpdir.path().join(file_name); + create_dummy_file(&file); + let files = Restart::get_all_possible_index_files_in_drives(&paths); + assert!(files.is_empty(), "{files:?}"); + _ = fs::remove_file(&file); + } + + // 2 drives, 2 files in each + // create 2nd tmpdir (ie. drive) + let tmpdir2 = tempdir().unwrap(); + let paths2: Vec = + vec![paths.first().unwrap().clone(), tmpdir2.path().to_path_buf()]; + (0..4).for_each(|i| { + let parent = if i < 2 { &tmpdir } else { &tmpdir2 }; + let file = parent.path().join(i.to_string()); + create_dummy_file(&file); + }); + + let mut files = Restart::get_all_possible_index_files_in_drives(&paths); + assert_eq!(files.len(), 2); + (0..2).for_each(|file_name| { + let path = files.remove(&file_name).unwrap(); + assert_eq!(tmpdir.path().join(file_name.to_string()), path); + }); + let mut files = Restart::get_all_possible_index_files_in_drives(&paths2); + assert_eq!(files.len(), 4); + (0..2).for_each(|file_name| { + let path = files.remove(&file_name).unwrap(); + assert_eq!(tmpdir.path().join(file_name.to_string()), path); + }); + (2..4).for_each(|file_name| { + let path = files.remove(&file_name).unwrap(); + assert_eq!(tmpdir2.path().join(file_name.to_string()), path); + }); + assert!(files.is_empty()); + } + + #[test] + fn test_restartable_bucket_load() { + let tmpdir = tempdir().unwrap(); + let paths: Vec = vec![tmpdir.path().to_path_buf()]; + assert!(!paths.is_empty()); + let config_file = tmpdir.path().join("config"); + + for bucket_pow in [1, 2] { + let config = BucketMapConfig { + drives: Some(paths.clone()), + restart_config_file: Some(config_file.clone()), + ..BucketMapConfig::new(1 << bucket_pow) + }; + + // create file + let restart = Arc::new(Mutex::new(Restart::new(&config).unwrap())); + test_default_restart(&restart, &config); + drop(restart); + + // successful open + let restart = Restart::get_restart_file(&config); + assert!(restart.is_some()); + drop(restart); + + // unsuccessful: buckets wrong + let config_wrong_buckets = BucketMapConfig { + drives: Some(paths.clone()), + restart_config_file: Some(config_file.clone()), + ..BucketMapConfig::new(1 << (bucket_pow + 1)) + }; + let restart = Restart::get_restart_file(&config_wrong_buckets); + assert!(restart.is_none()); + + // unsuccessful: max search wrong + let config_wrong_buckets = BucketMapConfig { + max_search: Some(MAX_SEARCH_DEFAULT + 1), + drives: Some(paths.clone()), + restart_config_file: Some(config_file.clone()), + ..BucketMapConfig::new(1 << bucket_pow) + }; + let restart = Restart::get_restart_file(&config_wrong_buckets); + assert!(restart.is_none()); + + // create file with different header + let restart = Arc::new(Mutex::new(Restart::new(&config).unwrap())); + test_default_restart(&restart, &config); + restart.lock().unwrap().get_header_mut().version = HEADER_VERSION + 1; + drop(restart); + // unsuccessful: header wrong + let restart = Restart::get_restart_file(&config); + assert!(restart.is_none()); + + // file 0 len + let wrong_file_len = 0; + let path = config.restart_config_file.as_ref(); + let path = path.unwrap(); + _ = remove_file(path); + let mmap = Restart::new_map(path, wrong_file_len as u64).unwrap(); + drop(mmap); + // unsuccessful: header wrong + let restart = Restart::get_restart_file(&config); + assert!(restart.is_none()); + + // file too big or small + for smaller_bigger in [0, 1, 2] { + let wrong_file_len = Restart::expected_len(config.max_buckets) - 1 + smaller_bigger; + let path = config.restart_config_file.as_ref(); + let path = path.unwrap(); + _ = remove_file(path); + let mmap = Restart::new_map(path, wrong_file_len as u64).unwrap(); + let mut restart = Restart { mmap }; + let header = restart.get_header_mut(); + header.version = HEADER_VERSION; + header.buckets = config.max_buckets as u64; + header.max_search = config.max_search.unwrap_or(MAX_SEARCH_DEFAULT); + drop(restart); + // unsuccessful: header wrong + let restart = Restart::get_restart_file(&config); + // 0, 2 are wrong, 1 is right + assert_eq!(restart.is_none(), smaller_bigger != 1); + } + } + } + #[test] fn test_restartable_bucket() { - solana_logger::setup(); let tmpdir = tempdir().unwrap(); let paths: Vec = vec![tmpdir.path().to_path_buf()]; assert!(!paths.is_empty()); @@ -224,48 +549,77 @@ mod test { }; let buckets = config.max_buckets; let restart = Arc::new(Mutex::new(Restart::new(&config).unwrap())); + test_default_restart(&restart, &config); + let last_offset = 1; + (0..=last_offset).for_each(|offset| test_set_get(&restart, buckets, offset)); + // drop file (as if process exit) + drop(restart); + // re-load file (as if at next launch) + let restart = Arc::new(Mutex::new(Restart::get_restart_file(&config).unwrap())); + // make sure same as last set prior to reload + test_get(&restart, buckets, last_offset); + (4..6).for_each(|offset| test_set_get(&restart, buckets, offset)); + drop(restart); + // create a new file without deleting old one. Make sure it is default and not re-used. + let restart = Arc::new(Mutex::new(Restart::new(&config).unwrap())); + test_default_restart(&restart, &config); + } - { - let restart = restart.lock().unwrap(); - let header = restart.get_header(); - assert_eq!(header.version, HEADER_VERSION); - assert_eq!(header.buckets, config.max_buckets as u64); - assert_eq!( - header.max_search, - config.max_search.unwrap_or(MAX_SEARCH_DEFAULT) - ); - } + fn test_set_get(restart: &Arc>, buckets: usize, test_offset: usize) { + test_set(restart, buckets, test_offset); + test_get(restart, buckets, test_offset); + } + fn test_set(restart: &Arc>, buckets: usize, test_offset: usize) { (0..buckets).for_each(|bucket| { let restartable_bucket = RestartableBucket { restart: Some(restart.clone()), index: bucket, path: None, }; - // default values - assert_eq!(restartable_bucket.get(), Some((0, 0))); + assert!(restartable_bucket.get().is_some()); + let file_name = bucket as u128 + test_offset as u128; + let random = (file_name as u64 + 5) * 2; + restartable_bucket.set_file(file_name, random); + assert_eq!(restartable_bucket.get(), Some((file_name, random))); }); + } + + fn test_get(restart: &Arc>, buckets: usize, test_offset: usize) { (0..buckets).for_each(|bucket| { let restartable_bucket = RestartableBucket { restart: Some(restart.clone()), index: bucket, path: None, }; - assert!(restartable_bucket.get().is_some()); - let file_name = bucket as u128; - let random = (bucket as u64 + 5) * 2; - restartable_bucket.set_file(bucket as u128, random); + let file_name = bucket as u128 + test_offset as u128; + let random = (file_name as u64 + 5) * 2; assert_eq!(restartable_bucket.get(), Some((file_name, random))); }); + } + + /// make sure restart is default values we expect + fn test_default_restart(restart: &Arc>, config: &BucketMapConfig) { + { + let restart = restart.lock().unwrap(); + let header = restart.get_header(); + assert_eq!(header.version, HEADER_VERSION); + assert_eq!(header.buckets, config.max_buckets as u64); + assert_eq!( + header.max_search, + config.max_search.unwrap_or(MAX_SEARCH_DEFAULT) + ); + } + + let buckets = config.max_buckets; (0..buckets).for_each(|bucket| { let restartable_bucket = RestartableBucket { restart: Some(restart.clone()), index: bucket, path: None, }; - let file_name = bucket as u128; - let random = (bucket as u64 + 5) * 2; - assert_eq!(restartable_bucket.get(), Some((file_name, random))); + // default values + assert_eq!(restartable_bucket.get(), Some((0, 0))); }); } } From 456563b9e9df957dc998da7644b2cf2b5e8fe270 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 22 Sep 2023 13:46:04 -0700 Subject: [PATCH 158/407] pass RestartableBucket through disk index (#33377) --- bucket_map/src/bucket_api.rs | 9 ++++++++- bucket_map/src/bucket_map.rs | 29 ++++++++++++++++++++++++----- 2 files changed, 32 insertions(+), 6 deletions(-) diff --git a/bucket_map/src/bucket_api.rs b/bucket_map/src/bucket_api.rs index 2ec495ab20a481..d81b4b52f3cc34 100644 --- a/bucket_map/src/bucket_api.rs +++ b/bucket_map/src/bucket_api.rs @@ -1,7 +1,7 @@ use { crate::{ bucket::Bucket, bucket_item::BucketItem, bucket_map::BucketMapError, - bucket_stats::BucketMapStats, MaxSearch, RefCount, + bucket_stats::BucketMapStats, restart::RestartableBucket, MaxSearch, RefCount, }, solana_sdk::pubkey::Pubkey, std::{ @@ -23,6 +23,11 @@ pub struct BucketApi { bucket: LockedBucket, count: Arc, + + /// keeps track of which index file this bucket is currently using + /// or at startup, which bucket file this bucket should initially use + #[allow(dead_code)] + restartable_bucket: RestartableBucket, } impl BucketApi { @@ -30,6 +35,7 @@ impl BucketApi { drives: Arc>, max_search: MaxSearch, stats: Arc, + restartable_bucket: RestartableBucket, ) -> Self { Self { drives, @@ -37,6 +43,7 @@ impl BucketApi { stats, bucket: RwLock::default(), count: Arc::default(), + restartable_bucket, } } diff --git a/bucket_map/src/bucket_map.rs b/bucket_map/src/bucket_map.rs index 2adeefdcc33a1f..4f6e177c5baefb 100644 --- a/bucket_map/src/bucket_map.rs +++ b/bucket_map/src/bucket_map.rs @@ -1,9 +1,17 @@ //! BucketMap is a mostly contention free concurrent map backed by MmapMut use { - crate::{bucket_api::BucketApi, bucket_stats::BucketMapStats, MaxSearch, RefCount}, + crate::{ + bucket_api::BucketApi, bucket_stats::BucketMapStats, restart::Restart, MaxSearch, RefCount, + }, solana_sdk::pubkey::Pubkey, - std::{convert::TryInto, fmt::Debug, fs, path::PathBuf, sync::Arc}, + std::{ + convert::TryInto, + fmt::Debug, + fs::{self}, + path::PathBuf, + sync::{Arc, Mutex}, + }, tempfile::TempDir, }; @@ -83,6 +91,11 @@ impl BucketMap { if let Some(drives) = config.drives.as_ref() { Self::erase_previous_drives(drives); } + + let stats = Arc::default(); + + let restart = Restart::new(&config); + let mut temp_dir = None; let drives = config.drives.unwrap_or_else(|| { temp_dir = Some(TempDir::new().unwrap()); @@ -90,13 +103,19 @@ impl BucketMap { }); let drives = Arc::new(drives); - let stats = Arc::default(); - let buckets = (0..config.max_buckets) - .map(|_| { + let restart = restart.map(|restart| Arc::new(Mutex::new(restart))); + + let restartable_buckets = + Restart::get_restartable_buckets(restart.as_ref(), &drives, config.max_buckets); + + let buckets = restartable_buckets + .into_iter() + .map(|restartable_bucket| { Arc::new(BucketApi::new( Arc::clone(&drives), max_search, Arc::clone(&stats), + restartable_bucket, )) }) .collect(); From c750ac5d38d78c801df58652043c140107fad6be Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 22 Sep 2023 14:42:08 -0700 Subject: [PATCH 159/407] data bucket holds RestartableBucket (#33381) --- bucket_map/src/bucket.rs | 27 +++++++++++++++++---------- bucket_map/src/bucket_api.rs | 1 + 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index f9a01ac8e591a7..00fa5dc79f14ad 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -11,6 +11,7 @@ use { DataBucket, IndexBucket, IndexEntry, IndexEntryPlaceInBucket, MultipleSlots, OccupiedEnum, }, + restart::RestartableBucket, MaxSearch, RefCount, }, rand::{thread_rng, Rng}, @@ -107,6 +108,9 @@ pub struct Bucket { /// set to true once any entries have been deleted from the index. /// Deletes indicate that there can be free slots and that the full search range must be searched for an entry. at_least_one_entry_deleted: bool, + + /// keep track of which index file this bucket is using so on restart we can try to reuse it + restartable_bucket: RestartableBucket, } impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { @@ -115,6 +119,7 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { max_search: MaxSearch, stats: Arc, count: Arc, + restartable_bucket: RestartableBucket, ) -> Self { let (index, _file_name) = BucketStorage::new( Arc::clone(&drives), @@ -125,9 +130,10 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { count, ); stats.index.resize_grow(0, index.capacity_bytes()); + let random = thread_rng().gen(); Self { - random: thread_rng().gen(), + random, drives, index, data: vec![], @@ -135,6 +141,7 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { reallocated: Reallocated::default(), anticipated_size: 0, at_least_one_entry_deleted: false, + restartable_bucket, } } @@ -570,7 +577,7 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { count += 1; // grow relative to the current capacity let new_capacity = (current_capacity * 110 / 100).max(anticipated_size); - let (mut index, _file_name) = BucketStorage::new_with_capacity( + let (mut index, file_name) = BucketStorage::new_with_capacity( Arc::clone(&self.drives), 1, std::mem::size_of::>() as u64, @@ -596,13 +603,6 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { let new_elem: &mut IndexEntry = index.get_mut(new_ix); *new_elem = *elem; index.copying_entry(new_ix, &self.index, ix); - /* - let dbg_elem: IndexEntry = *new_elem; - assert_eq!( - Self::bucket_find_index_entry(&index, &elem.key, random).unwrap(), - (&dbg_elem, new_ix) - ); - */ } } if valid { @@ -610,6 +610,7 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { let mut items = self.reallocated.items.lock().unwrap(); items.index = Some(index); self.reallocated.add_reallocation(); + self.restartable_bucket.set_file(file_name, self.random); break; } } @@ -1075,7 +1076,13 @@ mod tests { let paths: Vec = vec![tmpdir.path().to_path_buf()]; assert!(!paths.is_empty()); let max_search = 2; - let mut bucket = Bucket::new(Arc::new(paths), max_search, Arc::default(), Arc::default()); + let mut bucket = Bucket::new( + Arc::new(paths), + max_search, + Arc::default(), + Arc::default(), + RestartableBucket::default(), + ); let key = Pubkey::new_unique(); assert_eq!(bucket.read_value(&key), None); diff --git a/bucket_map/src/bucket_api.rs b/bucket_map/src/bucket_api.rs index d81b4b52f3cc34..e5449a814a5be9 100644 --- a/bucket_map/src/bucket_api.rs +++ b/bucket_map/src/bucket_api.rs @@ -97,6 +97,7 @@ impl BucketApi { self.max_search, Arc::clone(&self.stats), Arc::clone(&self.count), + self.restartable_bucket.clone(), )); } } From 1840fd7ab33e0ef3bc9f0765a4f34190c4c1c33d Mon Sep 17 00:00:00 2001 From: Trent Nelson Date: Fri, 22 Sep 2023 17:58:05 -0600 Subject: [PATCH 160/407] Feature - better error codes for tx lamport check (#33343) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replaces `TransactionError::InstructionError(0, InstructionError::UnbalancedInstruction)` with `TransactionError::UnbalancedTransaction`. Co-authored-by: Alexander Meißner --- runtime/src/bank.rs | 39 ++++++++++++++++--- runtime/src/bank/tests.rs | 13 +++---- sdk/src/feature_set.rs | 5 +++ sdk/src/transaction/error.rs | 4 ++ storage-proto/proto/transaction_by_addr.proto | 1 + storage-proto/src/convert.rs | 12 ++++++ 6 files changed, 61 insertions(+), 13 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 5963503095994c..046c65ec94010c 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -236,7 +236,7 @@ struct RentMetrics { } pub type BankStatusCache = StatusCache>; -#[frozen_abi(digest = "3FiwE61TtjxHenszm3oFTzmHtGQGohJz3YN3TSTwcbUM")] +#[frozen_abi(digest = "EzAXfE2xG3ZqdAj8KMC8CeqoSxjo5hxrEaP7fta8LT9u")] pub type BankSlotDelta = SlotDelta>; #[derive(Default, Copy, Clone, Debug, PartialEq, Eq)] @@ -4804,6 +4804,24 @@ impl Bank { ) -> TransactionExecutionResult { let prev_accounts_data_len = self.load_accounts_data_size(); let transaction_accounts = std::mem::take(&mut loaded_transaction.accounts); + + fn transaction_accounts_lamports_sum( + accounts: &[(Pubkey, AccountSharedData)], + message: &SanitizedMessage, + ) -> Option { + let mut lamports_sum = 0u128; + for i in 0..message.account_keys().len() { + let Some((_, account)) = accounts.get(i) else { + return None; + }; + lamports_sum = lamports_sum.checked_add(u128::from(account.lamports()))?; + } + Some(lamports_sum) + } + + let lamports_before_tx = + transaction_accounts_lamports_sum(&transaction_accounts, tx.message()).unwrap_or(0); + let mut transaction_context = TransactionContext::new( transaction_accounts, if self @@ -4884,7 +4902,7 @@ impl Bank { process_message_time.as_us() ); - let status = process_result + let mut status = process_result .and_then(|info| { let post_account_state_info = self.get_transaction_account_state_info(&transaction_context, tx.message()); @@ -4910,10 +4928,6 @@ impl Bank { } err }); - let mut accounts_data_len_delta = status - .as_ref() - .map_or(0, |info| info.accounts_data_len_delta); - let status = status.map(|_| ()); let log_messages: Option = log_collector.and_then(|log_collector| { @@ -4936,6 +4950,19 @@ impl Bank { touched_account_count, accounts_resize_delta, } = transaction_context.into(); + + if status.is_ok() + && transaction_accounts_lamports_sum(&accounts, tx.message()) + .filter(|lamports_after_tx| lamports_before_tx == *lamports_after_tx) + .is_none() + { + status = Err(TransactionError::UnbalancedTransaction); + } + let mut accounts_data_len_delta = status + .as_ref() + .map_or(0, |info| info.accounts_data_len_delta); + let status = status.map(|_| ()); + loaded_transaction.accounts = accounts; if self .feature_set diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index dd4b51208306d3..3263eb9c41db7c 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -6577,10 +6577,9 @@ fn test_same_program_id_uses_unqiue_executable_accounts() { declare_process_instruction!(process_instruction, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let _ = instruction_context - .try_borrow_program_account(transaction_context, 1)? - .checked_add_lamports(1); - Ok(()) + instruction_context + .try_borrow_program_account(transaction_context, 0)? + .set_data_length(2) }); let (genesis_config, mint_keypair) = create_genesis_config(50000); @@ -6592,7 +6591,7 @@ fn test_same_program_id_uses_unqiue_executable_accounts() { // Add a new program owned by the first let program2_pubkey = solana_sdk::pubkey::new_rand(); - let mut program2_account = AccountSharedData::new(42, 1, &program1_pubkey); + let mut program2_account = AccountSharedData::new(1, 1, &program1_pubkey); program2_account.set_executable(true); bank.store_account(&program2_pubkey, &program2_account); @@ -6604,8 +6603,8 @@ fn test_same_program_id_uses_unqiue_executable_accounts() { bank.last_blockhash(), ); assert!(bank.process_transaction(&tx).is_ok()); - assert_eq!(1, bank.get_balance(&program1_pubkey)); - assert_eq!(42, bank.get_balance(&program2_pubkey)); + assert_eq!(6, bank.get_account(&program1_pubkey).unwrap().data().len()); + assert_eq!(1, bank.get_account(&program2_pubkey).unwrap().data().len()); } fn get_shrink_account_size() -> usize { diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index e74883ec930e9d..4810acb8d13a6c 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -693,6 +693,10 @@ pub mod require_rent_exempt_split_destination { solana_sdk::declare_id!("D2aip4BBr8NPWtU9vLrwrBvbuaQ8w1zV38zFLxx4pfBV"); } +pub mod better_error_codes_for_tx_lamport_check { + solana_sdk::declare_id!("Ffswd3egL3tccB6Rv3XY6oqfdzn913vUcjCSnpvCKpfx"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -861,6 +865,7 @@ lazy_static! { (remaining_compute_units_syscall_enabled::id(), "enable the remaining_compute_units syscall"), (enable_program_runtime_v2_and_loader_v4::id(), "Enable Program-Runtime-v2 and Loader-v4 #33293"), (require_rent_exempt_split_destination::id(), "Require stake split destination account to be rent exempt"), + (better_error_codes_for_tx_lamport_check::id(), "better error codes for tx lamport check #33353"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/sdk/src/transaction/error.rs b/sdk/src/transaction/error.rs index 9bc57e765daf8a..539e40ab914e6e 100644 --- a/sdk/src/transaction/error.rs +++ b/sdk/src/transaction/error.rs @@ -165,6 +165,10 @@ pub enum TransactionError { /// Program execution is temporarily restricted on an account. #[error("Execution of the program referenced by account at index {account_index} is temporarily restricted.")] ProgramExecutionTemporarilyRestricted { account_index: u8 }, + + /// The total balance before the transaction does not equal the total balance after the transaction + #[error("Sum of account balances before and after transaction do not match")] + UnbalancedTransaction, } impl From for TransactionError { diff --git a/storage-proto/proto/transaction_by_addr.proto b/storage-proto/proto/transaction_by_addr.proto index d2681484229520..8ebeeb91ba645e 100644 --- a/storage-proto/proto/transaction_by_addr.proto +++ b/storage-proto/proto/transaction_by_addr.proto @@ -61,6 +61,7 @@ enum TransactionErrorType { INVALID_LOADED_ACCOUNTS_DATA_SIZE_LIMIT = 33; RESANITIZATION_NEEDED = 34; PROGRAM_EXECUTION_TEMPORARILY_RESTRICTED = 35; + UNBALANCED_TRANSACTION = 36; } message InstructionError { diff --git a/storage-proto/src/convert.rs b/storage-proto/src/convert.rs index 3c9bb7d230d058..7ca5728d398ec1 100644 --- a/storage-proto/src/convert.rs +++ b/storage-proto/src/convert.rs @@ -812,6 +812,7 @@ impl TryFrom for TransactionError { 32 => TransactionError::MaxLoadedAccountsDataSizeExceeded, 33 => TransactionError::InvalidLoadedAccountsDataSizeLimit, 34 => TransactionError::ResanitizationNeeded, + 36 => TransactionError::UnbalancedTransaction, _ => return Err("Invalid TransactionError"), }) } @@ -927,6 +928,9 @@ impl From for tx_by_addr::TransactionError { TransactionError::ProgramExecutionTemporarilyRestricted { .. } => { tx_by_addr::TransactionErrorType::ProgramExecutionTemporarilyRestricted } + TransactionError::UnbalancedTransaction => { + tx_by_addr::TransactionErrorType::UnbalancedTransaction + } } as i32, instruction_error: match transaction_error { TransactionError::InstructionError(index, ref instruction_error) => { @@ -1811,6 +1815,14 @@ mod test { transaction_error, tx_by_addr_transaction_error.try_into().unwrap() ); + + let transaction_error = TransactionError::UnbalancedTransaction; + let tx_by_addr_transaction_error: tx_by_addr::TransactionError = + transaction_error.clone().into(); + assert_eq!( + transaction_error, + tx_by_addr_transaction_error.try_into().unwrap() + ); } #[test] From e92d90b674a841781387e966dbdffdb77d585ac6 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 22 Sep 2023 17:25:21 -0700 Subject: [PATCH 161/407] DiskIdx: reuse disk bucket file if possible (#33379) --- bucket_map/src/bucket.rs | 50 +++++++++++++++++++++++++++++++--------- 1 file changed, 39 insertions(+), 11 deletions(-) diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index 00fa5dc79f14ad..d691d019286695 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -19,7 +19,9 @@ use { solana_sdk::pubkey::Pubkey, std::{ collections::hash_map::DefaultHasher, + fs, hash::{Hash, Hasher}, + num::NonZeroU64, ops::RangeBounds, path::PathBuf, sync::{ @@ -119,18 +121,44 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { max_search: MaxSearch, stats: Arc, count: Arc, - restartable_bucket: RestartableBucket, + mut restartable_bucket: RestartableBucket, ) -> Self { - let (index, _file_name) = BucketStorage::new( - Arc::clone(&drives), - 1, - std::mem::size_of::>() as u64, - max_search, - Arc::clone(&stats.index), - count, - ); - stats.index.resize_grow(0, index.capacity_bytes()); - let random = thread_rng().gen(); + let reuse_path = std::mem::take(&mut restartable_bucket.path); + let elem_size = NonZeroU64::new(std::mem::size_of::>() as u64).unwrap(); + let (index, random) = reuse_path + .and_then(|path| { + // try to re-use the file this bucket was using last time we were running + restartable_bucket.get().and_then(|(_file_name, random)| { + let result = BucketStorage::load_on_restart( + path.clone(), + elem_size, + max_search, + Arc::clone(&stats.index), + count.clone(), + ) + .map(|index| (index, random)); + if result.is_none() { + // we couldn't re-use it, so delete it + _ = fs::remove_file(path); + } + result + }) + }) + .unwrap_or_else(|| { + // no file to re-use, so create a new file + let (index, file_name) = BucketStorage::new( + Arc::clone(&drives), + 1, + elem_size.into(), + max_search, + Arc::clone(&stats.index), + count, + ); + stats.index.resize_grow(0, index.capacity_bytes()); + let random = thread_rng().gen(); + restartable_bucket.set_file(file_name, random); + (index, random) + }); Self { random, From 1b1546441441d28185c4c057834c0e751a5accf0 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Sat, 23 Sep 2023 06:22:12 -0700 Subject: [PATCH 162/407] diskidx: stats for created vs reused (#33385) --- accounts-db/src/bucket_map_holder_stats.rs | 28 ++++++++++++++++++++++ bucket_map/src/bucket.rs | 22 +++++++++++++++++ bucket_map/src/bucket_stats.rs | 7 ++++++ 3 files changed, 57 insertions(+) diff --git a/accounts-db/src/bucket_map_holder_stats.rs b/accounts-db/src/bucket_map_holder_stats.rs index 1d82c5d5ae4c30..df7180bfecb8e7 100644 --- a/accounts-db/src/bucket_map_holder_stats.rs +++ b/accounts-db/src/bucket_map_holder_stats.rs @@ -226,6 +226,34 @@ impl BucketMapHolderStats { // sum of elapsed time in each thread let mut thread_time_elapsed_ms = elapsed_ms * storage.threads as u64; if disk.is_some() { + if startup || was_startup { + // these stats only apply at startup + datapoint_info!( + "accounts_index_startup", + ( + "entries_created", + disk.map(|disk| disk + .stats + .index + .startup + .entries_created + .swap(0, Ordering::Relaxed)) + .unwrap_or_default(), + i64 + ), + ( + "entries_reused", + disk.map(|disk| disk + .stats + .index + .startup + .entries_reused + .swap(0, Ordering::Relaxed)) + .unwrap_or_default(), + i64 + ), + ); + } datapoint_info!( if startup || was_startup { thread_time_elapsed_ms *= 2; // more threads are allocated during startup diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index d691d019286695..c65d26bc95620c 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -338,6 +338,7 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { self.set_anticipated_count((anticipated).saturating_add(current_len)); let mut entries = Self::index_entries(items, self.random); let mut duplicates = Vec::default(); + let mut entries_created_on_disk = 0; // insert, but resizes may be necessary loop { let cap = self.index.capacity(); @@ -351,6 +352,7 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { &self.data, items, &mut entries, + &mut entries_created_on_disk, &mut duplicates, ); match result { @@ -361,6 +363,18 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { items.len().saturating_sub(duplicates.len()) as u64, Ordering::Relaxed, ); + self.index.stats.startup.entries_reused.fetch_add( + items + .len() + .saturating_sub(duplicates.len()) + .saturating_sub(entries_created_on_disk) as u64, + Ordering::Relaxed, + ); + self.index + .stats + .startup + .entries_created + .fetch_add(entries_created_on_disk as u64, Ordering::Relaxed); return duplicates; } Err(error) => { @@ -383,6 +397,7 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { data_buckets: &[BucketStorage], items: &[(Pubkey, T)], reverse_sorted_entries: &mut Vec<(u64, usize)>, + entries_created_on_disk: &mut usize, duplicates: &mut Vec<(usize, T)>, ) -> Result<(), BucketMapError> { if reverse_sorted_entries.is_empty() { @@ -401,6 +416,7 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { let ix_index = (ix_entry + search) % cap; let elem = IndexEntryPlaceInBucket::new(ix_index); if index.try_lock(ix_index) { + *entries_created_on_disk += 1; // found free element and occupied it // These fields will be overwritten after allocation by callers. // Since this part of the mmapped file could have previously been used by someone else, there can be garbage here. @@ -849,12 +865,14 @@ mod tests { let mut index = create_test_index(None); + let mut entries_created = 0; let mut duplicates = Vec::default(); assert!(Bucket::::batch_insert_non_duplicates_internal( &mut index, &Vec::default(), &raw, &mut hashed, + &mut entries_created, &mut duplicates, ) .is_ok()); @@ -898,11 +916,13 @@ mod tests { let mut index = create_test_index(None); let mut duplicates = Vec::default(); + let mut entries_created = 0; assert!(Bucket::::batch_insert_non_duplicates_internal( &mut index, &Vec::default(), &raw, &mut hashed, + &mut entries_created, &mut duplicates, ) .is_ok()); @@ -946,11 +966,13 @@ mod tests { let mut index = create_test_index(Some(max_search as u8)); let mut duplicates = Vec::default(); + let mut entries_created = 0; let result = Bucket::::batch_insert_non_duplicates_internal( &mut index, &Vec::default(), &raw, &mut hashed, + &mut entries_created, &mut duplicates, ); diff --git a/bucket_map/src/bucket_stats.rs b/bucket_map/src/bucket_stats.rs index 3ccb7ae420a22c..ed862e084c69ca 100644 --- a/bucket_map/src/bucket_stats.rs +++ b/bucket_map/src/bucket_stats.rs @@ -3,6 +3,12 @@ use std::sync::{ Arc, }; +#[derive(Debug, Default)] +pub struct StartupBucketStats { + pub entries_created: AtomicU64, + pub entries_reused: AtomicU64, +} + #[derive(Debug, Default)] pub struct BucketStats { pub resizes: AtomicU64, @@ -15,6 +21,7 @@ pub struct BucketStats { pub find_index_entry_mut_us: AtomicU64, pub file_count: AtomicU64, pub total_file_size: AtomicU64, + pub startup: StartupBucketStats, } impl BucketStats { From 967a78bbc18995fb047917ad830b9d17761e3110 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Sun, 24 Sep 2023 12:23:48 -0700 Subject: [PATCH 163/407] diskidx: keep track if bucket file was reused so we can gen idx faster (#33380) --- bucket_map/src/bucket.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index c65d26bc95620c..924aa8b173ee42 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -113,6 +113,11 @@ pub struct Bucket { /// keep track of which index file this bucket is using so on restart we can try to reuse it restartable_bucket: RestartableBucket, + + /// true if this bucket was loaded (as opposed to created blank). + /// When populating, we want to prioritize looking for data on disk that already matches as opposed to writing new data. + #[allow(dead_code)] + reused_file_at_startup: bool, } impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { @@ -125,7 +130,7 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { ) -> Self { let reuse_path = std::mem::take(&mut restartable_bucket.path); let elem_size = NonZeroU64::new(std::mem::size_of::>() as u64).unwrap(); - let (index, random) = reuse_path + let (index, random, reused_file_at_startup) = reuse_path .and_then(|path| { // try to re-use the file this bucket was using last time we were running restartable_bucket.get().and_then(|(_file_name, random)| { @@ -136,7 +141,7 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { Arc::clone(&stats.index), count.clone(), ) - .map(|index| (index, random)); + .map(|index| (index, random, true /* true = reused file */)); if result.is_none() { // we couldn't re-use it, so delete it _ = fs::remove_file(path); @@ -157,7 +162,7 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { stats.index.resize_grow(0, index.capacity_bytes()); let random = thread_rng().gen(); restartable_bucket.set_file(file_name, random); - (index, random) + (index, random, false /* true = reused file */) }); Self { @@ -170,6 +175,7 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { anticipated_size: 0, at_least_one_entry_deleted: false, restartable_bucket, + reused_file_at_startup, } } From 5c576413130c1f54c237890c712fea057c117f90 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Sun, 24 Sep 2023 12:25:50 -0700 Subject: [PATCH 164/407] disk idx: restart re-uses disk index files (#33382) --- bucket_map/src/bucket_map.rs | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/bucket_map/src/bucket_map.rs b/bucket_map/src/bucket_map.rs index 4f6e177c5baefb..aa003b8750d41f 100644 --- a/bucket_map/src/bucket_map.rs +++ b/bucket_map/src/bucket_map.rs @@ -88,13 +88,21 @@ impl BucketMap { ); let max_search = config.max_search.unwrap_or(MAX_SEARCH_DEFAULT); - if let Some(drives) = config.drives.as_ref() { - Self::erase_previous_drives(drives); + let mut restart = Restart::get_restart_file(&config); + + if restart.is_none() { + // If we were able to load a restart file from the previous run, then don't wipe the accounts index drives from last time. + // Unused files will be wiped by `get_restartable_buckets` + if let Some(drives) = config.drives.as_ref() { + Self::erase_previous_drives(drives); + } } let stats = Arc::default(); - let restart = Restart::new(&config); + if restart.is_none() { + restart = Restart::new(&config); + } let mut temp_dir = None; let drives = config.drives.unwrap_or_else(|| { @@ -129,7 +137,8 @@ impl BucketMap { max_buckets_pow2: log2(config.max_buckets) as u8, stats, temp_dir, - erase_drives_on_drop: true, + // if we are keeping track of restart, then don't wipe the drives on drop + erase_drives_on_drop: restart.is_none(), } } From 27f59e809dbce3fc6e7115b5f2d43000b7f2e751 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Sun, 24 Sep 2023 12:26:17 -0700 Subject: [PATCH 165/407] disk idx: apply_grow_index does not delete index file (#33384) --- bucket_map/src/bucket.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index 924aa8b173ee42..e727e8424a8a10 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -679,11 +679,20 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { } } - pub fn apply_grow_index(&mut self, index: BucketStorage>) { + pub fn apply_grow_index(&mut self, mut index: BucketStorage>) { self.stats .index .resize_grow(self.index.capacity_bytes(), index.capacity_bytes()); + if self.restartable_bucket.restart.is_some() { + // we are keeping track of which files we use for restart. + // And we are resizing. + // So, delete the old file and set the new file to NOT delete. + // This way the new file will still be around on startup. + // We are completely done with the old file. + self.index.delete_file_on_drop = true; + index.delete_file_on_drop = false; + } self.index = index; } From 499ec49e71da6c063a295fe11e8013eba909084c Mon Sep 17 00:00:00 2001 From: hana <81144685+2501babe@users.noreply.github.com> Date: Mon, 25 Sep 2023 03:31:23 -0700 Subject: [PATCH 166/407] fix single-pool path for ci (#33339) --- ci/downstream-projects/func-spl.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/downstream-projects/func-spl.sh b/ci/downstream-projects/func-spl.sh index a565b8828fbc4c..00da118e14f06e 100755 --- a/ci/downstream-projects/func-spl.sh +++ b/ci/downstream-projects/func-spl.sh @@ -16,7 +16,7 @@ spl() { memo/program name-service/program stake-pool/program - stake-pool/single-pool + single-pool/program ) set -x rm -rf spl From 997aa0a3f8fce41e0dc6a9134a2f8677e8472288 Mon Sep 17 00:00:00 2001 From: ananas-block <58553958+ananas-block@users.noreply.github.com> Date: Mon, 25 Sep 2023 13:43:34 +0100 Subject: [PATCH 167/407] Feat(syscall): add altbn128 g1 & g2 compression (#32870) * solana-program - altbn128: add g1 & g2 compression still fixing tests for point of infinity feat: proof compression syscall working add rust test to ci remove prints added c test added sycall pricing * fixed ci checks * refactored altbn128 and compression --- program-runtime/src/compute_budget.rs | 12 + programs/bpf_loader/src/syscalls/mod.rs | 122 ++++- programs/sbf/Cargo.lock | 8 + programs/sbf/Cargo.toml | 1 + programs/sbf/build.rs | 1 + .../c/src/alt_bn128_compression/alt_bn128.c | 50 ++ .../sbf/rust/alt_bn128_compression/Cargo.toml | 19 + .../sbf/rust/alt_bn128_compression/src/lib.rs | 73 +++ programs/sbf/tests/programs.rs | 2 + runtime/src/bank.rs | 1 + sdk/program/src/alt_bn128/compression.rs | 487 ++++++++++++++++++ .../src/{alt_bn128.rs => alt_bn128/mod.rs} | 1 + sdk/program/src/syscalls/definitions.rs | 1 + sdk/sbf/c/inc/sol/alt_bn128_compression.h | 91 ++++ .../c/inc/sol/inc/alt_bn128_compression.inc | 72 +++ sdk/src/feature_set.rs | 4 + 16 files changed, 941 insertions(+), 4 deletions(-) create mode 100644 programs/sbf/c/src/alt_bn128_compression/alt_bn128.c create mode 100644 programs/sbf/rust/alt_bn128_compression/Cargo.toml create mode 100644 programs/sbf/rust/alt_bn128_compression/src/lib.rs create mode 100644 sdk/program/src/alt_bn128/compression.rs rename sdk/program/src/{alt_bn128.rs => alt_bn128/mod.rs} (99%) create mode 100644 sdk/sbf/c/inc/sol/alt_bn128_compression.h create mode 100644 sdk/sbf/c/inc/sol/inc/alt_bn128_compression.inc diff --git a/program-runtime/src/compute_budget.rs b/program-runtime/src/compute_budget.rs index 6fa9cda02b7228..09567cea7bcba8 100644 --- a/program-runtime/src/compute_budget.rs +++ b/program-runtime/src/compute_budget.rs @@ -131,6 +131,14 @@ pub struct ComputeBudget { pub poseidon_cost_coefficient_c: u64, /// Number of compute units consumed for accessing the remaining compute units. pub get_remaining_compute_units_cost: u64, + /// Number of compute units consumed to call alt_bn128_g1_compress. + pub alt_bn128_g1_compress: u64, + /// Number of compute units consumed to call alt_bn128_g1_decompress. + pub alt_bn128_g1_decompress: u64, + /// Number of compute units consumed to call alt_bn128_g2_compress. + pub alt_bn128_g2_compress: u64, + /// Number of compute units consumed to call alt_bn128_g2_decompress. + pub alt_bn128_g2_decompress: u64, } impl Default for ComputeBudget { @@ -183,6 +191,10 @@ impl ComputeBudget { poseidon_cost_coefficient_a: 61, poseidon_cost_coefficient_c: 542, get_remaining_compute_units_cost: 100, + alt_bn128_g1_compress: 30, + alt_bn128_g1_decompress: 398, + alt_bn128_g2_compress: 86, + alt_bn128_g2_decompress: 13610, } } diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index e8a0b70bc12d8f..c4a7fe1e6db50b 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -36,10 +36,11 @@ use { feature_set::{ self, blake3_syscall_enabled, curve25519_syscall_enabled, disable_cpi_setting_executable_and_rent_epoch, disable_deploy_of_alloc_free_syscall, - disable_fees_sysvar, enable_alt_bn128_syscall, enable_big_mod_exp_syscall, - enable_early_verification_of_account_modifications, enable_partitioned_epoch_reward, - enable_poseidon_syscall, error_on_syscall_bpf_function_hash_collisions, - last_restart_slot_sysvar, libsecp256k1_0_5_upgrade_enabled, reject_callx_r10, + disable_fees_sysvar, enable_alt_bn128_compression_syscall, enable_alt_bn128_syscall, + enable_big_mod_exp_syscall, enable_early_verification_of_account_modifications, + enable_partitioned_epoch_reward, enable_poseidon_syscall, + error_on_syscall_bpf_function_hash_collisions, last_restart_slot_sysvar, + libsecp256k1_0_5_upgrade_enabled, reject_callx_r10, remaining_compute_units_syscall_enabled, stop_sibling_instruction_search_at_parent, stop_truncating_strings_in_syscalls, switch_to_new_elf_parser, }, @@ -154,6 +155,8 @@ pub fn create_program_runtime_environment_v1<'a>( debugging_features: bool, ) -> Result>, Error> { let enable_alt_bn128_syscall = feature_set.is_active(&enable_alt_bn128_syscall::id()); + let enable_alt_bn128_compression_syscall = + feature_set.is_active(&enable_alt_bn128_compression_syscall::id()); let enable_big_mod_exp_syscall = feature_set.is_active(&enable_big_mod_exp_syscall::id()); let blake3_syscall_enabled = feature_set.is_active(&blake3_syscall_enabled::id()); let curve25519_syscall_enabled = feature_set.is_active(&curve25519_syscall_enabled::id()); @@ -345,6 +348,14 @@ pub fn create_program_runtime_environment_v1<'a>( SyscallRemainingComputeUnits::call )?; + // Alt_bn128_compression + register_feature_gated_function!( + result, + enable_alt_bn128_compression_syscall, + *b"sol_alt_bn128_compression", + SyscallAltBn128Compression::call, + )?; + // Log data result.register_function_hashed(*b"sol_log_data", SyscallLogData::call)?; @@ -1907,6 +1918,109 @@ declare_syscall!( } ); +declare_syscall!( + /// alt_bn128 g1 and g2 compression and decompression + SyscallAltBn128Compression, + fn inner_call( + invoke_context: &mut InvokeContext, + op: u64, + input_addr: u64, + input_size: u64, + result_addr: u64, + _arg5: u64, + memory_mapping: &mut MemoryMapping, + ) -> Result { + use solana_sdk::alt_bn128::compression::prelude::{ + alt_bn128_g1_compress, alt_bn128_g1_decompress, alt_bn128_g2_compress, + alt_bn128_g2_decompress, ALT_BN128_G1_COMPRESS, ALT_BN128_G1_DECOMPRESS, + ALT_BN128_G2_COMPRESS, ALT_BN128_G2_DECOMPRESS, G1, G1_COMPRESSED, G2, G2_COMPRESSED, + }; + let budget = invoke_context.get_compute_budget(); + let base_cost = budget.syscall_base_cost; + let (cost, output): (u64, usize) = match op { + ALT_BN128_G1_COMPRESS => ( + base_cost.saturating_add(budget.alt_bn128_g1_compress), + G1_COMPRESSED, + ), + ALT_BN128_G1_DECOMPRESS => { + (base_cost.saturating_add(budget.alt_bn128_g1_decompress), G1) + } + ALT_BN128_G2_COMPRESS => ( + base_cost.saturating_add(budget.alt_bn128_g2_compress), + G2_COMPRESSED, + ), + ALT_BN128_G2_DECOMPRESS => { + (base_cost.saturating_add(budget.alt_bn128_g2_decompress), G2) + } + _ => { + return Err(SyscallError::InvalidAttribute.into()); + } + }; + + consume_compute_meter(invoke_context, cost)?; + + let input = translate_slice::( + memory_mapping, + input_addr, + input_size, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; + + let call_result = translate_slice_mut::( + memory_mapping, + result_addr, + output as u64, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; + + match op { + ALT_BN128_G1_COMPRESS => { + let result_point = match alt_bn128_g1_compress(input) { + Ok(result_point) => result_point, + Err(e) => { + return Ok(e.into()); + } + }; + call_result.copy_from_slice(&result_point); + Ok(SUCCESS) + } + ALT_BN128_G1_DECOMPRESS => { + let result_point = match alt_bn128_g1_decompress(input) { + Ok(result_point) => result_point, + Err(e) => { + return Ok(e.into()); + } + }; + call_result.copy_from_slice(&result_point); + Ok(SUCCESS) + } + ALT_BN128_G2_COMPRESS => { + let result_point = match alt_bn128_g2_compress(input) { + Ok(result_point) => result_point, + Err(e) => { + return Ok(e.into()); + } + }; + call_result.copy_from_slice(&result_point); + Ok(SUCCESS) + } + ALT_BN128_G2_DECOMPRESS => { + let result_point = match alt_bn128_g2_decompress(input) { + Ok(result_point) => result_point, + Err(e) => { + return Ok(e.into()); + } + }; + call_result.copy_from_slice(&result_point); + Ok(SUCCESS) + } + _ => Err(SyscallError::InvalidAttribute.into()), + } + } +); + #[cfg(test)] #[allow(clippy::arithmetic_side_effects)] #[allow(clippy::indexing_slicing)] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 9945f9b4cb08a1..f3779ae0c794e6 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5652,6 +5652,14 @@ dependencies = [ "solana-program", ] +[[package]] +name = "solana-sbf-rust-alt-bn128-compression" +version = "1.17.0" +dependencies = [ + "array-bytes", + "solana-program", +] + [[package]] name = "solana-sbf-rust-call-depth" version = "1.17.0" diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 363a3a4972d589..6c2182e0bfab7d 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -112,6 +112,7 @@ members = [ "rust/128bit_dep", "rust/alloc", "rust/alt_bn128", + "rust/alt_bn128_compression", "rust/big_mod_exp", "rust/call_depth", "rust/caller_access", diff --git a/programs/sbf/build.rs b/programs/sbf/build.rs index 6bdfb9a4ea949a..46ed90b67f3a34 100644 --- a/programs/sbf/build.rs +++ b/programs/sbf/build.rs @@ -65,6 +65,7 @@ fn main() { "128bit", "alloc", "alt_bn128", + "alt_bn128_compression", "big_mod_exp", "call_depth", "caller_access", diff --git a/programs/sbf/c/src/alt_bn128_compression/alt_bn128.c b/programs/sbf/c/src/alt_bn128_compression/alt_bn128.c new file mode 100644 index 00000000000000..cdda5c79475cde --- /dev/null +++ b/programs/sbf/c/src/alt_bn128_compression/alt_bn128.c @@ -0,0 +1,50 @@ +/** + * @brief alt_bn128 syscall test + */ +#include +#include +#include + +extern uint64_t entrypoint(const uint8_t *input) { + // compress and decompress g1 + { + uint8_t result_compressed[ALT_BN128_COMPRESSION_G1_COMPRESS_OUTPUT_LEN]; + uint8_t result_decompressed[ALT_BN128_COMPRESSION_G1_DECOMPRESS_OUTPUT_LEN]; + uint8_t input[] = { + 45, 206, 255, 166, 152, 55, 128, 138, 79, 217, 145, 164, 25, 74, 120, 234, 234, 217, + 68, 149, 162, 44, 133, 120, 184, 205, 12, 44, 175, 98, 168, 172, 20, 24, 216, 15, 209, + 175, 106, 75, 147, 236, 90, 101, 123, 219, 245, 151, 209, 202, 218, 104, 148, 8, 32, + 254, 243, 191, 218, 122, 42, 81, 193, 84, + }; + + sol_alt_bn128_compression(ALT_BN128_G1_COMPRESS, input, SOL_ARRAY_SIZE(input), result_compressed); + sol_alt_bn128_compression(ALT_BN128_G1_DECOMPRESS, result_compressed, SOL_ARRAY_SIZE(result_compressed), result_decompressed); + + sol_assert(0 == + sol_memcmp(result_decompressed, input, ALT_BN128_COMPRESSION_G1_DECOMPRESS_OUTPUT_LEN)); + } + + // compress and decompress g2 + { + + uint8_t result_compressed[ALT_BN128_COMPRESSION_G2_COMPRESS_OUTPUT_LEN]; + uint8_t result_decompressed[ALT_BN128_COMPRESSION_G2_DECOMPRESS_OUTPUT_LEN]; + uint8_t input[] = { + 40, 57, 233, 205, 180, 46, 35, 111, 215, 5, 23, 93, 12, 71, 118, 225, 7, 46, 247, 147, + 47, 130, 106, 189, 184, 80, 146, 103, 141, 52, 242, 25, 0, 203, 124, 176, 110, 34, 151, + 212, 66, 180, 238, 151, 236, 189, 133, 209, 17, 137, 205, 183, 168, 196, 92, 159, 75, + 174, 81, 168, 18, 86, 176, 56, 16, 26, 210, 20, 18, 81, 122, 142, 104, 62, 251, 169, + 98, 141, 21, 253, 50, 130, 182, 15, 33, 109, 228, 31, 79, 183, 88, 147, 174, 108, 4, + 22, 14, 129, 168, 6, 80, 246, 254, 100, 218, 131, 94, 49, 247, 211, 3, 245, 22, 200, + 177, 91, 60, 144, 147, 174, 90, 17, 19, 189, 62, 147, 152, 18, + }; + + sol_alt_bn128_compression(ALT_BN128_G2_COMPRESS, input, SOL_ARRAY_SIZE(input), result_compressed); + sol_alt_bn128_compression(ALT_BN128_G2_DECOMPRESS, result_compressed, SOL_ARRAY_SIZE(result_compressed), result_decompressed); + + sol_assert( + 0 == sol_memcmp(result_decompressed, input, ALT_BN128_COMPRESSION_G2_DECOMPRESS_OUTPUT_LEN)); + } + + return SUCCESS; +} diff --git a/programs/sbf/rust/alt_bn128_compression/Cargo.toml b/programs/sbf/rust/alt_bn128_compression/Cargo.toml new file mode 100644 index 00000000000000..3dd3a52de29225 --- /dev/null +++ b/programs/sbf/rust/alt_bn128_compression/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "solana-sbf-rust-alt-bn128-compression" +description = "Solana BPF test program written in Rust" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +array-bytes = { workspace = true } +solana-program = { workspace = true } + +[lib] +crate-type = ["cdylib"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/sbf/rust/alt_bn128_compression/src/lib.rs b/programs/sbf/rust/alt_bn128_compression/src/lib.rs new file mode 100644 index 00000000000000..7545788fc3cf4a --- /dev/null +++ b/programs/sbf/rust/alt_bn128_compression/src/lib.rs @@ -0,0 +1,73 @@ +//! Alt_bn128 compression Syscalls tests + +extern crate solana_program; +use solana_program::{ + alt_bn128::compression::prelude::{ + alt_bn128_g1_compress, alt_bn128_g1_decompress, alt_bn128_g2_compress, + alt_bn128_g2_decompress, + }, + custom_heap_default, custom_panic_default, msg, +}; + +fn alt_bn128_compression_g1() { + let points_g1: [[u8; 64]; 3] = [ + [ + 45, 206, 255, 166, 152, 55, 128, 138, 79, 217, 145, 164, 25, 74, 120, 234, 234, 217, + 68, 149, 162, 44, 133, 120, 184, 205, 12, 44, 175, 98, 168, 172, 20, 24, 216, 15, 209, + 175, 106, 75, 147, 236, 90, 101, 123, 219, 245, 151, 209, 202, 218, 104, 148, 8, 32, + 254, 243, 191, 218, 122, 42, 81, 193, 84, + ], + [ + 45, 206, 255, 166, 152, 55, 128, 138, 79, 217, 145, 164, 25, 74, 120, 234, 234, 217, + 68, 149, 162, 44, 133, 120, 184, 205, 12, 44, 175, 98, 168, 172, 28, 75, 118, 99, 15, + 130, 53, 222, 36, 99, 235, 81, 5, 165, 98, 197, 197, 182, 144, 40, 212, 105, 169, 142, + 72, 96, 177, 156, 174, 43, 59, 243, + ], + [0u8; 64], + ]; + points_g1.iter().for_each(|point| { + let g1_compressed = alt_bn128_g1_compress(point).unwrap(); + let g1_decompressed = alt_bn128_g1_decompress(&g1_compressed).unwrap(); + assert_eq!(*point, g1_decompressed); + }); +} + +fn alt_bn128_compression_g2() { + let points_g2: [[u8; 128]; 3] = [ + [ + 40, 57, 233, 205, 180, 46, 35, 111, 215, 5, 23, 93, 12, 71, 118, 225, 7, 46, 247, 147, + 47, 130, 106, 189, 184, 80, 146, 103, 141, 52, 242, 25, 0, 203, 124, 176, 110, 34, 151, + 212, 66, 180, 238, 151, 236, 189, 133, 209, 17, 137, 205, 183, 168, 196, 92, 159, 75, + 174, 81, 168, 18, 86, 176, 56, 16, 26, 210, 20, 18, 81, 122, 142, 104, 62, 251, 169, + 98, 141, 21, 253, 50, 130, 182, 15, 33, 109, 228, 31, 79, 183, 88, 147, 174, 108, 4, + 22, 14, 129, 168, 6, 80, 246, 254, 100, 218, 131, 94, 49, 247, 211, 3, 245, 22, 200, + 177, 91, 60, 144, 147, 174, 90, 17, 19, 189, 62, 147, 152, 18, + ], + [ + 40, 57, 233, 205, 180, 46, 35, 111, 215, 5, 23, 93, 12, 71, 118, 225, 7, 46, 247, 147, + 47, 130, 106, 189, 184, 80, 146, 103, 141, 52, 242, 25, 0, 203, 124, 176, 110, 34, 151, + 212, 66, 180, 238, 151, 236, 189, 133, 209, 17, 137, 205, 183, 168, 196, 92, 159, 75, + 174, 81, 168, 18, 86, 176, 56, 32, 73, 124, 94, 206, 224, 37, 155, 80, 17, 74, 13, 30, + 244, 66, 96, 100, 254, 180, 130, 71, 3, 230, 109, 236, 105, 51, 131, 42, 16, 249, 49, + 33, 226, 166, 108, 144, 58, 161, 196, 221, 204, 231, 132, 137, 174, 84, 104, 128, 184, + 185, 54, 43, 225, 54, 222, 226, 15, 120, 89, 153, 233, 101, 53, + ], + [0u8; 128], + ]; + points_g2.iter().for_each(|point| { + let g2_compressed = alt_bn128_g2_compress(point).unwrap(); + let g2_decompressed = alt_bn128_g2_decompress(&g2_compressed).unwrap(); + assert_eq!(*point, g2_decompressed); + }); +} +#[no_mangle] +pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { + msg!("alt_bn128_compression"); + + alt_bn128_compression_g1(); + alt_bn128_compression_g2(); + 0 +} + +custom_heap_default!(); +custom_panic_default!(); diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 9bdec77a897f59..f6b680b1054442 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -279,6 +279,7 @@ fn test_program_sbf_sanity() { programs.extend_from_slice(&[ ("alloc", true), ("alt_bn128", true), + ("alt_bn128_compression", true), ("sbf_to_sbf", true), ("float", true), ("multiple_static", true), @@ -303,6 +304,7 @@ fn test_program_sbf_sanity() { ("solana_sbf_rust_128bit", true), ("solana_sbf_rust_alloc", true), ("solana_sbf_rust_alt_bn128", true), + ("solana_sbf_rust_alt_bn128_compression", true), ("solana_sbf_rust_curve25519", true), ("solana_sbf_rust_custom_heap", true), ("solana_sbf_rust_dep_crate", true), diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 046c65ec94010c..c767ce4a3120c9 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -8129,6 +8129,7 @@ impl Bank { feature_set::switch_to_new_elf_parser::id(), feature_set::bpf_account_data_direct_mapping::id(), feature_set::enable_alt_bn128_syscall::id(), + feature_set::enable_alt_bn128_compression_syscall::id(), feature_set::enable_big_mod_exp_syscall::id(), feature_set::blake3_syscall_enabled::id(), feature_set::curve25519_syscall_enabled::id(), diff --git a/sdk/program/src/alt_bn128/compression.rs b/sdk/program/src/alt_bn128/compression.rs new file mode 100644 index 00000000000000..2791b8fd35f8f5 --- /dev/null +++ b/sdk/program/src/alt_bn128/compression.rs @@ -0,0 +1,487 @@ +pub mod prelude { + pub use crate::alt_bn128::compression::{ + alt_bn128_compression_size::*, consts::*, target_arch::*, AltBn128CompressionError, + }; +} + +use thiserror::Error; + +mod consts { + pub const ALT_BN128_G1_COMPRESS: u64 = 0; + pub const ALT_BN128_G1_DECOMPRESS: u64 = 1; + pub const ALT_BN128_G2_COMPRESS: u64 = 2; + pub const ALT_BN128_G2_DECOMPRESS: u64 = 3; +} + +mod alt_bn128_compression_size { + pub const G1: usize = 64; + pub const G2: usize = 128; + pub const G1_COMPRESSED: usize = 32; + pub const G2_COMPRESSED: usize = 64; +} + +#[derive(Debug, Error, Clone, PartialEq, Eq)] +pub enum AltBn128CompressionError { + #[error("Unexpected error")] + UnexpectedError, + #[error("Failed to decompress g1")] + G1DecompressionFailed, + #[error("Failed to decompress g2")] + G2DecompressionFailed, + #[error("Failed to compress affine g1")] + G1CompressionFailed, + #[error("Failed to compress affine g2")] + G2CompressionFailed, + #[error("Invalid input size")] + InvalidInputSize, +} + +impl From for AltBn128CompressionError { + fn from(v: u64) -> AltBn128CompressionError { + match v { + 1 => AltBn128CompressionError::G1DecompressionFailed, + 2 => AltBn128CompressionError::G2DecompressionFailed, + 3 => AltBn128CompressionError::G1CompressionFailed, + 4 => AltBn128CompressionError::G2CompressionFailed, + 5 => AltBn128CompressionError::InvalidInputSize, + _ => AltBn128CompressionError::UnexpectedError, + } + } +} + +impl From for u64 { + fn from(v: AltBn128CompressionError) -> u64 { + match v { + AltBn128CompressionError::G1DecompressionFailed => 1, + AltBn128CompressionError::G2DecompressionFailed => 2, + AltBn128CompressionError::G1CompressionFailed => 3, + AltBn128CompressionError::G2CompressionFailed => 4, + AltBn128CompressionError::InvalidInputSize => 5, + AltBn128CompressionError::UnexpectedError => 0, + } + } +} + +#[cfg(not(target_os = "solana"))] +mod target_arch { + + use { + super::*, + crate::alt_bn128::compression::alt_bn128_compression_size, + ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, Validate}, + }; + + type G1 = ark_bn254::g1::G1Affine; + type G2 = ark_bn254::g2::G2Affine; + + pub fn alt_bn128_g1_decompress( + g1_bytes: &[u8], + ) -> Result<[u8; alt_bn128_compression_size::G1], AltBn128CompressionError> { + let g1_bytes: [u8; alt_bn128_compression_size::G1_COMPRESSED] = g1_bytes + .try_into() + .map_err(|_| AltBn128CompressionError::InvalidInputSize)?; + if g1_bytes == [0u8; alt_bn128_compression_size::G1_COMPRESSED] { + return Ok([0u8; alt_bn128_compression_size::G1]); + } + let decompressed_g1 = G1::deserialize_with_mode( + convert_endianness::<32, 32>(&g1_bytes).as_slice(), + Compress::Yes, + Validate::No, + ) + .map_err(|_| AltBn128CompressionError::G1DecompressionFailed)?; + let mut decompressed_g1_bytes = [0u8; alt_bn128_compression_size::G1]; + decompressed_g1 + .x + .serialize_with_mode(&mut decompressed_g1_bytes[..32], Compress::No) + .map_err(|_| AltBn128CompressionError::G1DecompressionFailed)?; + decompressed_g1 + .y + .serialize_with_mode(&mut decompressed_g1_bytes[32..], Compress::No) + .map_err(|_| AltBn128CompressionError::G1DecompressionFailed)?; + Ok(convert_endianness::<32, 64>(&decompressed_g1_bytes)) + } + + pub fn alt_bn128_g1_compress( + g1_bytes: &[u8], + ) -> Result<[u8; alt_bn128_compression_size::G1_COMPRESSED], AltBn128CompressionError> { + let g1_bytes: [u8; alt_bn128_compression_size::G1] = g1_bytes + .try_into() + .map_err(|_| AltBn128CompressionError::InvalidInputSize)?; + if g1_bytes == [0u8; alt_bn128_compression_size::G1] { + return Ok([0u8; alt_bn128_compression_size::G1_COMPRESSED]); + } + let g1 = G1::deserialize_with_mode( + convert_endianness::<32, 64>(&g1_bytes).as_slice(), + Compress::No, + Validate::No, + ) + .map_err(|_| AltBn128CompressionError::G1CompressionFailed)?; + let mut g1_bytes = [0u8; alt_bn128_compression_size::G1_COMPRESSED]; + G1::serialize_compressed(&g1, g1_bytes.as_mut_slice()) + .map_err(|_| AltBn128CompressionError::G2CompressionFailed)?; + Ok(convert_endianness::<32, 32>(&g1_bytes)) + } + + pub fn alt_bn128_g2_decompress( + g2_bytes: &[u8], + ) -> Result<[u8; alt_bn128_compression_size::G2], AltBn128CompressionError> { + let g2_bytes: [u8; alt_bn128_compression_size::G2_COMPRESSED] = g2_bytes + .try_into() + .map_err(|_| AltBn128CompressionError::InvalidInputSize)?; + if g2_bytes == [0u8; alt_bn128_compression_size::G2_COMPRESSED] { + return Ok([0u8; alt_bn128_compression_size::G2]); + } + let decompressed_g2 = + G2::deserialize_compressed(convert_endianness::<64, 64>(&g2_bytes).as_slice()) + .map_err(|_| AltBn128CompressionError::G2DecompressionFailed)?; + let mut decompressed_g2_bytes = [0u8; alt_bn128_compression_size::G2]; + decompressed_g2 + .x + .serialize_with_mode(&mut decompressed_g2_bytes[..64], Compress::No) + .map_err(|_| AltBn128CompressionError::G2DecompressionFailed)?; + decompressed_g2 + .y + .serialize_with_mode(&mut decompressed_g2_bytes[64..128], Compress::No) + .map_err(|_| AltBn128CompressionError::G2DecompressionFailed)?; + Ok(convert_endianness::<64, 128>(&decompressed_g2_bytes)) + } + + pub fn alt_bn128_g2_compress( + g2_bytes: &[u8], + ) -> Result<[u8; alt_bn128_compression_size::G2_COMPRESSED], AltBn128CompressionError> { + let g2_bytes: [u8; alt_bn128_compression_size::G2] = g2_bytes + .try_into() + .map_err(|_| AltBn128CompressionError::InvalidInputSize)?; + if g2_bytes == [0u8; alt_bn128_compression_size::G2] { + return Ok([0u8; alt_bn128_compression_size::G2_COMPRESSED]); + } + let g2 = G2::deserialize_with_mode( + convert_endianness::<64, 128>(&g2_bytes).as_slice(), + Compress::No, + Validate::No, + ) + .map_err(|_| AltBn128CompressionError::G2DecompressionFailed)?; + let mut g2_bytes = [0u8; alt_bn128_compression_size::G2_COMPRESSED]; + G2::serialize_compressed(&g2, g2_bytes.as_mut_slice()) + .map_err(|_| AltBn128CompressionError::G2CompressionFailed)?; + Ok(convert_endianness::<64, 64>(&g2_bytes)) + } + + pub fn convert_endianness( + bytes: &[u8; ARRAY_SIZE], + ) -> [u8; ARRAY_SIZE] { + let reversed: [_; ARRAY_SIZE] = bytes + .chunks_exact(CHUNK_SIZE) + .flat_map(|chunk| chunk.iter().rev().copied()) + .enumerate() + .fold([0u8; ARRAY_SIZE], |mut acc, (i, v)| { + acc[i] = v; + acc + }); + reversed + } +} + +#[cfg(target_os = "solana")] +mod target_arch { + use { + super::*, + alt_bn128_compression_size::{G1, G1_COMPRESSED, G2, G2_COMPRESSED}, + prelude::*, + }; + + pub fn alt_bn128_g1_compress( + input: &[u8], + ) -> Result<[u8; G1_COMPRESSED], AltBn128CompressionError> { + let mut result_buffer = [0; G1_COMPRESSED]; + let result = unsafe { + crate::syscalls::sol_alt_bn128_compression( + ALT_BN128_G1_COMPRESS, + input as *const _ as *const u8, + input.len() as u64, + &mut result_buffer as *mut _ as *mut u8, + ) + }; + + match result { + 0 => Ok(result_buffer), + error => Err(AltBn128CompressionError::from(error)), + } + } + + pub fn alt_bn128_g1_decompress(input: &[u8]) -> Result<[u8; G1], AltBn128CompressionError> { + let mut result_buffer = [0; G1]; + let result = unsafe { + crate::syscalls::sol_alt_bn128_compression( + ALT_BN128_G1_DECOMPRESS, + input as *const _ as *const u8, + input.len() as u64, + &mut result_buffer as *mut _ as *mut u8, + ) + }; + + match result { + 0 => Ok(result_buffer), + error => Err(AltBn128CompressionError::from(error)), + } + } + + pub fn alt_bn128_g2_compress( + input: &[u8], + ) -> Result<[u8; G2_COMPRESSED], AltBn128CompressionError> { + let mut result_buffer = [0; G2_COMPRESSED]; + let result = unsafe { + crate::syscalls::sol_alt_bn128_compression( + ALT_BN128_G2_COMPRESS, + input as *const _ as *const u8, + input.len() as u64, + &mut result_buffer as *mut _ as *mut u8, + ) + }; + + match result { + 0 => Ok(result_buffer), + error => Err(AltBn128CompressionError::from(error)), + } + } + + pub fn alt_bn128_g2_decompress( + input: &[u8; G2_COMPRESSED], + ) -> Result<[u8; G2], AltBn128CompressionError> { + let mut result_buffer = [0; G2]; + let result = unsafe { + crate::syscalls::sol_alt_bn128_compression( + ALT_BN128_G2_DECOMPRESS, + input as *const _ as *const u8, + input.len() as u64, + &mut result_buffer as *mut _ as *mut u8, + ) + }; + + match result { + 0 => Ok(result_buffer), + error => Err(AltBn128CompressionError::from(error)), + } + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::alt_bn128::compression::target_arch::convert_endianness, + ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, Validate}, + std::ops::Neg, + target_arch::{ + alt_bn128_g1_compress, alt_bn128_g1_decompress, alt_bn128_g2_compress, + alt_bn128_g2_decompress, + }, + }; + type G1 = ark_bn254::g1::G1Affine; + type G2 = ark_bn254::g2::G2Affine; + + #[test] + fn alt_bn128_g1_compression() { + let g1_be = [ + 45, 206, 255, 166, 152, 55, 128, 138, 79, 217, 145, 164, 25, 74, 120, 234, 234, 217, + 68, 149, 162, 44, 133, 120, 184, 205, 12, 44, 175, 98, 168, 172, 20, 24, 216, 15, 209, + 175, 106, 75, 147, 236, 90, 101, 123, 219, 245, 151, 209, 202, 218, 104, 148, 8, 32, + 254, 243, 191, 218, 122, 42, 81, 193, 84, + ]; + let g1_le = convert_endianness::<32, 64>(&g1_be); + let g1: G1 = + G1::deserialize_with_mode(g1_le.as_slice(), Compress::No, Validate::No).unwrap(); + + let g1_neg = g1.neg(); + let mut g1_neg_be = [0u8; 64]; + g1_neg + .x + .serialize_with_mode(&mut g1_neg_be[..32], Compress::No) + .unwrap(); + g1_neg + .y + .serialize_with_mode(&mut g1_neg_be[32..64], Compress::No) + .unwrap(); + let g1_neg_be: [u8; 64] = convert_endianness::<32, 64>(&g1_neg_be); + + let points = [(g1, g1_be), (g1_neg, g1_neg_be)]; + + for (point, g1_be) in &points { + let mut compressed_ref = [0u8; 32]; + G1::serialize_with_mode(point, compressed_ref.as_mut_slice(), Compress::Yes).unwrap(); + let compressed_ref: [u8; 32] = convert_endianness::<32, 32>(&compressed_ref); + + let decompressed = alt_bn128_g1_decompress(compressed_ref.as_slice()).unwrap(); + + assert_eq!( + alt_bn128_g1_compress(&decompressed).unwrap(), + compressed_ref + ); + assert_eq!(decompressed, *g1_be); + } + } + + #[test] + fn alt_bn128_g2_compression() { + let g2_be = [ + 40, 57, 233, 205, 180, 46, 35, 111, 215, 5, 23, 93, 12, 71, 118, 225, 7, 46, 247, 147, + 47, 130, 106, 189, 184, 80, 146, 103, 141, 52, 242, 25, 0, 203, 124, 176, 110, 34, 151, + 212, 66, 180, 238, 151, 236, 189, 133, 209, 17, 137, 205, 183, 168, 196, 92, 159, 75, + 174, 81, 168, 18, 86, 176, 56, 16, 26, 210, 20, 18, 81, 122, 142, 104, 62, 251, 169, + 98, 141, 21, 253, 50, 130, 182, 15, 33, 109, 228, 31, 79, 183, 88, 147, 174, 108, 4, + 22, 14, 129, 168, 6, 80, 246, 254, 100, 218, 131, 94, 49, 247, 211, 3, 245, 22, 200, + 177, 91, 60, 144, 147, 174, 90, 17, 19, 189, 62, 147, 152, 18, + ]; + let g2_le = convert_endianness::<64, 128>(&g2_be); + let g2: G2 = + G2::deserialize_with_mode(g2_le.as_slice(), Compress::No, Validate::No).unwrap(); + + let g2_neg = g2.neg(); + let mut g2_neg_be = [0u8; 128]; + g2_neg + .x + .serialize_with_mode(&mut g2_neg_be[..64], Compress::No) + .unwrap(); + g2_neg + .y + .serialize_with_mode(&mut g2_neg_be[64..128], Compress::No) + .unwrap(); + let g2_neg_be: [u8; 128] = convert_endianness::<64, 128>(&g2_neg_be); + + let points = [(g2, g2_be), (g2_neg, g2_neg_be)]; + + for (point, g2_be) in &points { + let mut compressed_ref = [0u8; 64]; + G2::serialize_with_mode(point, compressed_ref.as_mut_slice(), Compress::Yes).unwrap(); + let compressed_ref: [u8; 64] = convert_endianness::<64, 64>(&compressed_ref); + + let decompressed = alt_bn128_g2_decompress(compressed_ref.as_slice()).unwrap(); + + assert_eq!( + alt_bn128_g2_compress(&decompressed).unwrap(), + compressed_ref + ); + assert_eq!(decompressed, *g2_be); + } + } + + #[test] + fn alt_bn128_compression_g1_point_of_infitity() { + let g1_bytes = vec![0u8; 64]; + let g1_compressed = alt_bn128_g1_compress(&g1_bytes).unwrap(); + let g1_decompressed = alt_bn128_g1_decompress(&g1_compressed).unwrap(); + assert_eq!(g1_bytes, g1_decompressed); + } + + #[test] + fn alt_bn128_compression_g2_point_of_infitity() { + let g1_bytes = vec![0u8; 128]; + let g1_compressed = alt_bn128_g2_compress(&g1_bytes).unwrap(); + let g1_decompressed = alt_bn128_g2_decompress(&g1_compressed).unwrap(); + assert_eq!(g1_bytes, g1_decompressed); + } + #[test] + fn alt_bn128_compression_pairing_test_input() { + use serde::Deserialize; + + let test_data = r#"[ + { + "Input": "1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f593034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf704bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a416782bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550111e129f1cf1097710d41c4ac70fcdfa5ba2023c6ff1cbeac322de49d1b6df7c2032c61a830e3c17286de9462bf242fca2883585b93870a73853face6a6bf411198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", + "Expected": "0000000000000000000000000000000000000000000000000000000000000001", + "Name": "jeff1", + "Gas": 113000, + "NoBenchmark": false + },{ + "Input": "2eca0c7238bf16e83e7a1e6c5d49540685ff51380f309842a98561558019fc0203d3260361bb8451de5ff5ecd17f010ff22f5c31cdf184e9020b06fa5997db841213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f06967a1237ebfeca9aaae0d6d0bab8e28c198c5a339ef8a2407e31cdac516db922160fa257a5fd5b280642ff47b65eca77e626cb685c84fa6d3b6882a283ddd1198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", + "Expected": "0000000000000000000000000000000000000000000000000000000000000001", + "Name": "jeff2", + "Gas": 113000, + "NoBenchmark": false + },{ + "Input": "0f25929bcb43d5a57391564615c9e70a992b10eafa4db109709649cf48c50dd216da2f5cb6be7a0aa72c440c53c9bbdfec6c36c7d515536431b3a865468acbba2e89718ad33c8bed92e210e81d1853435399a271913a6520736a4729cf0d51eb01a9e2ffa2e92599b68e44de5bcf354fa2642bd4f26b259daa6f7ce3ed57aeb314a9a87b789a58af499b314e13c3d65bede56c07ea2d418d6874857b70763713178fb49a2d6cd347dc58973ff49613a20757d0fcc22079f9abd10c3baee245901b9e027bd5cfc2cb5db82d4dc9677ac795ec500ecd47deee3b5da006d6d049b811d7511c78158de484232fc68daf8a45cf217d1c2fae693ff5871e8752d73b21198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", + "Expected": "0000000000000000000000000000000000000000000000000000000000000001", + "Name": "jeff3", + "Gas": 113000, + "NoBenchmark": false + },{ + "Input": "2f2ea0b3da1e8ef11914acf8b2e1b32d99df51f5f4f206fc6b947eae860eddb6068134ddb33dc888ef446b648d72338684d678d2eb2371c61a50734d78da4b7225f83c8b6ab9de74e7da488ef02645c5a16a6652c3c71a15dc37fe3a5dcb7cb122acdedd6308e3bb230d226d16a105295f523a8a02bfc5e8bd2da135ac4c245d065bbad92e7c4e31bf3757f1fe7362a63fbfee50e7dc68da116e67d600d9bf6806d302580dc0661002994e7cd3a7f224e7ddc27802777486bf80f40e4ca3cfdb186bac5188a98c45e6016873d107f5cd131f3a3e339d0375e58bd6219347b008122ae2b09e539e152ec5364e7e2204b03d11d3caa038bfc7cd499f8176aacbee1f39e4e4afc4bc74790a4a028aff2c3d2538731fb755edefd8cb48d6ea589b5e283f150794b6736f670d6a1033f9b46c6f5204f50813eb85c8dc4b59db1c5d39140d97ee4d2b36d99bc49974d18ecca3e7ad51011956051b464d9e27d46cc25e0764bb98575bd466d32db7b15f582b2d5c452b36aa394b789366e5e3ca5aabd415794ab061441e51d01e94640b7e3084a07e02c78cf3103c542bc5b298669f211b88da1679b0b64a63b7e0e7bfe52aae524f73a55be7fe70c7e9bfc94b4cf0da1213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f", + "Expected": "0000000000000000000000000000000000000000000000000000000000000001", + "Name": "jeff4", + "Gas": 147000, + "NoBenchmark": false + },{ + "Input": "20a754d2071d4d53903e3b31a7e98ad6882d58aec240ef981fdf0a9d22c5926a29c853fcea789887315916bbeb89ca37edb355b4f980c9a12a94f30deeed30211213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f1abb4a25eb9379ae96c84fff9f0540abcfc0a0d11aeda02d4f37e4baf74cb0c11073b3ff2cdbb38755f8691ea59e9606696b3ff278acfc098fa8226470d03869217cee0a9ad79a4493b5253e2e4e3a39fc2df38419f230d341f60cb064a0ac290a3d76f140db8418ba512272381446eb73958670f00cf46f1d9e64cba057b53c26f64a8ec70387a13e41430ed3ee4a7db2059cc5fc13c067194bcc0cb49a98552fd72bd9edb657346127da132e5b82ab908f5816c826acb499e22f2412d1a2d70f25929bcb43d5a57391564615c9e70a992b10eafa4db109709649cf48c50dd2198a1f162a73261f112401aa2db79c7dab1533c9935c77290a6ce3b191f2318d198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", + "Expected": "0000000000000000000000000000000000000000000000000000000000000001", + "Name": "jeff5", + "Gas": 147000, + "NoBenchmark": false + },{ + "Input": "1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f593034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf704bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a416782bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550111e129f1cf1097710d41c4ac70fcdfa5ba2023c6ff1cbeac322de49d1b6df7c103188585e2364128fe25c70558f1560f4f9350baf3959e603cc91486e110936198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", + "Expected": "0000000000000000000000000000000000000000000000000000000000000000", + "Name": "jeff6", + "Gas": 113000, + "NoBenchmark": false + },{ + "Input": "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", + "Expected": "0000000000000000000000000000000000000000000000000000000000000000", + "Name": "one_point", + "Gas": 79000, + "NoBenchmark": false + },{ + "Input": "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d", + "Expected": "0000000000000000000000000000000000000000000000000000000000000001", + "Name": "two_point_match_2", + "Gas": 113000, + "NoBenchmark": false + },{ + "Input": "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", + "Expected": "0000000000000000000000000000000000000000000000000000000000000001", + "Name": "two_point_match_3", + "Gas": 113000, + "NoBenchmark": false + },{ + "Input": "105456a333e6d636854f987ea7bb713dfd0ae8371a72aea313ae0c32c0bf10160cf031d41b41557f3e7e3ba0c51bebe5da8e6ecd855ec50fc87efcdeac168bcc0476be093a6d2b4bbf907172049874af11e1b6267606e00804d3ff0037ec57fd3010c68cb50161b7d1d96bb71edfec9880171954e56871abf3d93cc94d745fa114c059d74e5b6c4ec14ae5864ebe23a71781d86c29fb8fb6cce94f70d3de7a2101b33461f39d9e887dbb100f170a2345dde3c07e256d1dfa2b657ba5cd030427000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000021a2c3013d2ea92e13c800cde68ef56a294b883f6ac35d25f587c09b1b3c635f7290158a80cd3d66530f74dc94c94adb88f5cdb481acca997b6e60071f08a115f2f997f3dbd66a7afe07fe7862ce239edba9e05c5afff7f8a1259c9733b2dfbb929d1691530ca701b4a106054688728c9972c8512e9789e9567aae23e302ccd75", + "Expected": "0000000000000000000000000000000000000000000000000000000000000001", + "Name": "two_point_match_4", + "Gas": 113000, + "NoBenchmark": false + },{ + "Input": "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d", + "Expected": "0000000000000000000000000000000000000000000000000000000000000001", + "Name": "ten_point_match_1", + "Gas": 385000, + "NoBenchmark": false + },{ + "Input": "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", + "Expected": "0000000000000000000000000000000000000000000000000000000000000001", + "Name": "ten_point_match_2", + "Gas": 385000, + "NoBenchmark": false + },{ + "Input": "105456a333e6d636854f987ea7bb713dfd0ae8371a72aea313ae0c32c0bf10160cf031d41b41557f3e7e3ba0c51bebe5da8e6ecd855ec50fc87efcdeac168bcc0476be093a6d2b4bbf907172049874af11e1b6267606e00804d3ff0037ec57fd3010c68cb50161b7d1d96bb71edfec9880171954e56871abf3d93cc94d745fa114c059d74e5b6c4ec14ae5864ebe23a71781d86c29fb8fb6cce94f70d3de7a2101b33461f39d9e887dbb100f170a2345dde3c07e256d1dfa2b657ba5cd030427000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000021a2c3013d2ea92e13c800cde68ef56a294b883f6ac35d25f587c09b1b3c635f7290158a80cd3d66530f74dc94c94adb88f5cdb481acca997b6e60071f08a115f2f997f3dbd66a7afe07fe7862ce239edba9e05c5afff7f8a1259c9733b2dfbb929d1691530ca701b4a106054688728c9972c8512e9789e9567aae23e302ccd75", + "Expected": "0000000000000000000000000000000000000000000000000000000000000001", + "Name": "ten_point_match_3", + "Gas": 113000, + "NoBenchmark": false + } + ]"#; + + #[derive(Deserialize)] + #[serde(rename_all = "PascalCase")] + struct TestCase { + input: String, + } + + let test_cases: Vec = serde_json::from_str(test_data).unwrap(); + + test_cases.iter().for_each(|test| { + let input = array_bytes::hex2bytes_unchecked(&test.input); + let g1 = input[0..64].to_vec(); + let g1_compressed = alt_bn128_g1_compress(&g1).unwrap(); + assert_eq!(g1, alt_bn128_g1_decompress(&g1_compressed).unwrap()); + let g2 = input[64..192].to_vec(); + let g2_compressed = alt_bn128_g2_compress(&g2).unwrap(); + assert_eq!(g2, alt_bn128_g2_decompress(&g2_compressed).unwrap()); + }); + } +} diff --git a/sdk/program/src/alt_bn128.rs b/sdk/program/src/alt_bn128/mod.rs similarity index 99% rename from sdk/program/src/alt_bn128.rs rename to sdk/program/src/alt_bn128/mod.rs index 52ebd8762e87d1..9e60048e4c8889 100644 --- a/sdk/program/src/alt_bn128.rs +++ b/sdk/program/src/alt_bn128/mod.rs @@ -1,3 +1,4 @@ +pub mod compression; pub mod prelude { pub use crate::alt_bn128::{consts::*, target_arch::*, AltBn128Error}; } diff --git a/sdk/program/src/syscalls/definitions.rs b/sdk/program/src/syscalls/definitions.rs index c3aa74ff8de211..b2dedceba953a0 100644 --- a/sdk/program/src/syscalls/definitions.rs +++ b/sdk/program/src/syscalls/definitions.rs @@ -71,6 +71,7 @@ define_syscall!(fn sol_big_mod_exp(params: *const u8, result: *mut u8) -> u64); define_syscall!(fn sol_get_epoch_rewards_sysvar(addr: *mut u8) -> u64); define_syscall!(fn sol_poseidon(parameters: u64, endianness: u64, vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64); define_syscall!(fn sol_remaining_compute_units() -> u64); +define_syscall!(fn sol_alt_bn128_compression(op: u64, input: *const u8, input_size: u64, result: *mut u8) -> u64); #[cfg(target_feature = "static-syscalls")] pub const fn sys_hash(name: &str) -> usize { diff --git a/sdk/sbf/c/inc/sol/alt_bn128_compression.h b/sdk/sbf/c/inc/sol/alt_bn128_compression.h new file mode 100644 index 00000000000000..d3f91bd41fbbe8 --- /dev/null +++ b/sdk/sbf/c/inc/sol/alt_bn128_compression.h @@ -0,0 +1,91 @@ +#pragma once +/** + * @brief Solana bn128 elliptic curve compression and decompression +**/ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Output length for the g1 compress operation. + */ +#define ALT_BN128_COMPRESSION_G1_COMPRESS_OUTPUT_LEN 32 + +/** + * Output length for the g1 decompress operation. + */ +#define ALT_BN128_COMPRESSION_G1_DECOMPRESS_OUTPUT_LEN 64 + +/** + * Output length for the g1 compress operation. + */ +#define ALT_BN128_COMPRESSION_G2_COMPRESS_OUTPUT_LEN 64 + +/** + * Output length for the g2 decompress operation. + */ +#define ALT_BN128_COMPRESSION_G2_DECOMPRESS_OUTPUT_LEN 128 + +/** + * G1 compression operation. + */ +#define ALT_BN128_G1_COMPRESS 0 + +/** + * G1 decompression operation. + */ +#define ALT_BN128_G1_DECOMPRESS 1 + +/** + * G2 compression operation. + */ +#define ALT_BN128_G2_COMPRESS 2 + +/** + * G2 decompression operation. + */ +#define ALT_BN128_G2_DECOMPRESS 3 + +/** + * Compression of alt_bn128 g1 and g2 points + * + * @param op ... + * @param input ... + * @param input_size ... + * @param result 64 byte array to hold the result. ... + * @return 0 if executed successfully + */ +/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/sbf/c/inc/sol/inc/alt_bn128_compression.inc AND RUN `cargo run --bin gen-headers` */ +#ifndef SOL_SBFV2 +uint64_t sol_alt_bn128_compression( + const uint64_t op, + const uint8_t *input, + const uint64_t input_size, + uint8_t *result +); +#else +typedef uint64_t(*sol_alt_bn128_compression_pointer_type)( + const uint64_t op, + const uint8_t *input, + const uint64_t input_size, + uint8_t *result +); +static uint64_t sol_alt_bn128_compression( + const uint64_t op arg1, + const uint8_t *input arg2, + const uint64_t input_size arg3, + uint8_t *result + arg4) { + sol_alt_bn128_compression_pointer_type sol_alt_bn128_compression_pointer = (sol_alt_bn128_compression_pointer_type) 860870125; + return sol_alt_bn128_compression_pointer(arg1, arg2, arg3, arg4); +} +#endif + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/inc/alt_bn128_compression.inc b/sdk/sbf/c/inc/sol/inc/alt_bn128_compression.inc new file mode 100644 index 00000000000000..e70d9b05e87eca --- /dev/null +++ b/sdk/sbf/c/inc/sol/inc/alt_bn128_compression.inc @@ -0,0 +1,72 @@ +#pragma once +/** + * @brief Solana bn128 elliptic curve compression and decompression +**/ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Output length for the g1 compress operation. + */ +#define ALT_BN128_COMPRESSION_G1_COMPRESS_OUTPUT_LEN 32 + +/** + * Output length for the g1 decompress operation. + */ +#define ALT_BN128_COMPRESSION_G1_DECOMPRESS_OUTPUT_LEN 64 + +/** + * Output length for the g1 compress operation. + */ +#define ALT_BN128_COMPRESSION_G2_COMPRESS_OUTPUT_LEN 64 + +/** + * Output length for the g2 decompress operation. + */ +#define ALT_BN128_COMPRESSION_G2_DECOMPRESS_OUTPUT_LEN 128 + +/** + * G1 compression operation. + */ +#define ALT_BN128_G1_COMPRESS 0 + +/** + * G1 decompression operation. + */ +#define ALT_BN128_G1_DECOMPRESS 1 + +/** + * G2 compression operation. + */ +#define ALT_BN128_G2_COMPRESS 2 + +/** + * G2 decompression operation. + */ +#define ALT_BN128_G2_DECOMPRESS 3 + +/** + * Compression of alt_bn128 g1 and g2 points + * + * @param op ... + * @param input ... + * @param input_size ... + * @param result 64 byte array to hold the result. ... + * @return 0 if executed successfully + */ +@SYSCALL uint64_t sol_alt_bn128_compression( + const uint64_t op, + const uint8_t *input, + const uint64_t input_size, + uint8_t *result +); + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 4810acb8d13a6c..a2c7170fa2525d 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -557,6 +557,9 @@ pub mod enable_bpf_loader_set_authority_checked_ix { pub mod enable_alt_bn128_syscall { solana_sdk::declare_id!("A16q37opZdQMCbe5qJ6xpBB9usykfv8jZaMkxvZQi4GJ"); } +pub mod enable_alt_bn128_compression_syscall { + solana_sdk::declare_id!("Compression111111111111111111111111111111111"); +} pub mod enable_program_redeployment_cooldown { solana_sdk::declare_id!("J4HFT8usBxpcF63y46t1upYobJgChmKyZPm5uTBRg25Z"); @@ -866,6 +869,7 @@ lazy_static! { (enable_program_runtime_v2_and_loader_v4::id(), "Enable Program-Runtime-v2 and Loader-v4 #33293"), (require_rent_exempt_split_destination::id(), "Require stake split destination account to be rent exempt"), (better_error_codes_for_tx_lamport_check::id(), "better error codes for tx lamport check #33353"), + (enable_alt_bn128_compression_syscall::id(), "add alt_bn128 compression syscalls"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() From def8b8fc6282e2fa730cd0dece6828aea98afc51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Mon, 25 Sep 2023 17:03:45 +0200 Subject: [PATCH 168/407] Bump solana_rbpf to v0.7.2 (#33394) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- programs/sbf/Cargo.toml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a3b4f81d1a8631..2e3cdb4923db0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7607,9 +7607,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d08e812351a5c726e51fa6aaae8687c661acfeb9a8b651bd58fc413a58701a58" +checksum = "103318aa365ff7caa8cf534f2246b5eb7e5b34668736d52b1266b143f7a21196" dependencies = [ "byteorder", "combine", diff --git a/Cargo.toml b/Cargo.toml index 7d701e693c595c..28e4b792f4826a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -298,7 +298,7 @@ siphasher = "0.3.11" smpl_jwt = "0.7.1" socket2 = "0.5.4" soketto = "0.7" -solana_rbpf = "=0.7.1" +solana_rbpf = "=0.7.2" solana-account-decoder = { path = "account-decoder", version = "=1.17.0" } solana-accounts-db = { path = "accounts-db", version = "=1.17.0" } solana-address-lookup-table-program = { path = "programs/address-lookup-table", version = "=1.17.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index f3779ae0c794e6..2662fa4c9f45bc 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6537,9 +6537,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d08e812351a5c726e51fa6aaae8687c661acfeb9a8b651bd58fc413a58701a58" +checksum = "103318aa365ff7caa8cf534f2246b5eb7e5b34668736d52b1266b143f7a21196" dependencies = [ "byteorder 1.4.3", "combine", diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 6c2182e0bfab7d..15d4b12e910d70 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -25,7 +25,7 @@ rand = "0.8" rustversion = "1.0.14" serde = "1.0.112" serde_json = "1.0.56" -solana_rbpf = "=0.7.1" +solana_rbpf = "=0.7.2" solana-account-decoder = { path = "../../account-decoder", version = "=1.17.0" } solana-accounts-db = { path = "../../accounts-db", version = "=1.17.0" } solana-bpf-loader-program = { path = "../bpf_loader", version = "=1.17.0" } From 7ff797bcef821d2cd4d09d2bae421ebc649fa594 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Mon, 25 Sep 2023 17:16:31 +0200 Subject: [PATCH 169/407] Refactor - Remove parameter `feature_set` from `load_program_from_bytes()` (#33395) Replaces parameter feature_set with delay_visibility_of_program_deployment in load_program_from_bytes(). --- ledger-tool/src/program.rs | 5 ++++- programs/bpf_loader/src/lib.rs | 10 +++++----- runtime/src/bank.rs | 6 ++++-- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index ef72c98a4da403..4acad738160be0 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -27,6 +27,7 @@ use { account::AccountSharedData, account_utils::StateMut, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, + feature_set, pubkey::Pubkey, slot_history::Slot, transaction_context::{IndexOfAccount, InstructionAccount}, @@ -357,7 +358,9 @@ fn load_program<'a>( #[allow(unused_mut)] let mut verified_executable = if is_elf { let result = load_program_from_bytes( - &invoke_context.feature_set, + invoke_context + .feature_set + .is_active(&feature_set::delay_visibility_of_program_deployment::id()), log_collector, &mut load_program_metrics, &contents, diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index ae585d8f9582f2..fa4bc849028144 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -35,7 +35,7 @@ use { cap_bpf_program_instruction_accounts, delay_visibility_of_program_deployment, enable_bpf_loader_extend_program_ix, enable_bpf_loader_set_authority_checked_ix, enable_program_redeployment_cooldown, limit_max_instruction_trace_length, - native_programs_consume_cu, remove_bpf_loader_incorrect_program_id, FeatureSet, + native_programs_consume_cu, remove_bpf_loader_incorrect_program_id, }, instruction::{AccountMeta, InstructionError}, loader_instruction::LoaderInstruction, @@ -67,7 +67,7 @@ pub const UPGRADEABLE_LOADER_COMPUTE_UNITS: u64 = 2_370; #[allow(clippy::too_many_arguments)] pub fn load_program_from_bytes( - feature_set: &FeatureSet, + delay_visibility_of_program_deployment: bool, log_collector: Option>>, load_program_metrics: &mut LoadProgramMetrics, programdata: &[u8], @@ -77,7 +77,7 @@ pub fn load_program_from_bytes( program_runtime_environment: Arc>>, reloading: bool, ) -> Result { - let effective_slot = if feature_set.is_active(&delay_visibility_of_program_deployment::id()) { + let effective_slot = if delay_visibility_of_program_deployment { deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET) } else { deployment_slot @@ -132,7 +132,7 @@ macro_rules! deploy_program { register_syscalls_time.stop(); load_program_metrics.register_syscalls_us = register_syscalls_time.as_us(); let executor = load_program_from_bytes( - &$invoke_context.feature_set, + $invoke_context.feature_set.is_active(&delay_visibility_of_program_deployment::id()), $invoke_context.get_log_collector(), &mut load_program_metrics, $new_programdata, @@ -1707,7 +1707,7 @@ pub mod test_utils { .expect("Failed to get account key"); if let Ok(loaded_program) = load_program_from_bytes( - &FeatureSet::all_enabled(), + true, None, &mut load_program_metrics, account.data(), diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index c767ce4a3120c9..3c8cba7471f51f 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4684,7 +4684,8 @@ impl Bank { ProgramAccountLoadResult::ProgramOfLoaderV1orV2(program_account) => { solana_bpf_loader_program::load_program_from_bytes( - &self.feature_set, + self.feature_set + .is_active(&feature_set::delay_visibility_of_program_deployment::id()), None, &mut load_program_metrics, program_account.data(), @@ -4706,7 +4707,8 @@ impl Bank { .ok_or(InstructionError::InvalidAccountData) .and_then(|programdata| { solana_bpf_loader_program::load_program_from_bytes( - &self.feature_set, + self.feature_set + .is_active(&feature_set::delay_visibility_of_program_deployment::id()), None, &mut load_program_metrics, programdata, From d9a113baa25826824e60fe920baf38b6f211ad80 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Sep 2023 16:15:21 +0000 Subject: [PATCH 170/407] build(deps): bump semver from 1.0.18 to 1.0.19 (#33389) * build(deps): bump semver from 1.0.18 to 1.0.19 Bumps [semver](https://github.com/dtolnay/semver) from 1.0.18 to 1.0.19. - [Release notes](https://github.com/dtolnay/semver/releases) - [Commits](https://github.com/dtolnay/semver/compare/1.0.18...1.0.19) --- updated-dependencies: - dependency-name: semver dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2e3cdb4923db0a..7218a80cb1c4ba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -963,7 +963,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.18", + "semver 1.0.19", "serde", "serde_json", "thiserror", @@ -4507,7 +4507,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.18", + "semver 1.0.19", ] [[package]] @@ -4707,9 +4707,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" +checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" dependencies = [ "serde", ] @@ -5455,7 +5455,7 @@ dependencies = [ "predicates", "regex", "reqwest", - "semver 1.0.18", + "semver 1.0.19", "serial_test", "solana-download-utils", "solana-logger", @@ -5536,7 +5536,7 @@ dependencies = [ "num-traits", "pretty-hex", "reqwest", - "semver 1.0.18", + "semver 1.0.19", "serde", "serde_derive", "serde_json", @@ -5598,7 +5598,7 @@ dependencies = [ "humantime", "indicatif", "pretty-hex", - "semver 1.0.18", + "semver 1.0.19", "serde", "serde_json", "solana-account-decoder", @@ -6101,7 +6101,7 @@ dependencies = [ "nix 0.26.4", "reqwest", "scopeguard", - "semver 1.0.18", + "semver 1.0.19", "serde", "serde_yaml 0.8.26", "serde_yaml 0.9.25", @@ -6600,7 +6600,7 @@ dependencies = [ "futures-util", "log", "reqwest", - "semver 1.0.18", + "semver 1.0.19", "serde", "serde_derive", "serde_json", @@ -6664,7 +6664,7 @@ dependencies = [ "num-traits", "parking_lot 0.12.1", "qstring", - "semver 1.0.18", + "semver 1.0.19", "solana-sdk", "thiserror", "uriparse", @@ -6744,7 +6744,7 @@ dependencies = [ "jsonrpc-http-server", "log", "reqwest", - "semver 1.0.18", + "semver 1.0.19", "serde", "serde_derive", "serde_json", @@ -6765,7 +6765,7 @@ dependencies = [ "bs58", "jsonrpc-core", "reqwest", - "semver 1.0.18", + "semver 1.0.19", "serde", "serde_derive", "serde_json", @@ -7404,7 +7404,7 @@ version = "1.17.0" dependencies = [ "log", "rustc_version 0.4.0", - "semver 1.0.18", + "semver 1.0.19", "serde", "serde_derive", "solana-frozen-abi", diff --git a/Cargo.toml b/Cargo.toml index 28e4b792f4826a..34ebe5c3b76857 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -283,7 +283,7 @@ rustc_version = "0.4" rustls = { version = "0.21.7", default-features = false, features = ["quic"] } rustversion = "1.0.14" scopeguard = "1.2.0" -semver = "1.0.18" +semver = "1.0.19" serde = "1.0.188" serde_bytes = "0.11.12" serde_derive = "1.0.103" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 2662fa4c9f45bc..8863bef090ed83 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4128,9 +4128,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" +checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" [[package]] name = "serde" From 08aba38d3507c8cb66f85074d8f1249d43e64a75 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Mon, 25 Sep 2023 09:26:17 -0700 Subject: [PATCH 171/407] [feature-id] add poseidon compression syscall feature id (#33392) add poseidon compression syscall feature id --- sdk/src/feature_set.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index a2c7170fa2525d..b414a5f6ab4551 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -558,7 +558,7 @@ pub mod enable_alt_bn128_syscall { solana_sdk::declare_id!("A16q37opZdQMCbe5qJ6xpBB9usykfv8jZaMkxvZQi4GJ"); } pub mod enable_alt_bn128_compression_syscall { - solana_sdk::declare_id!("Compression111111111111111111111111111111111"); + solana_sdk::declare_id!("EJJewYSddEEtSZHiqugnvhQHiWyZKjkFDQASd7oKSagn"); } pub mod enable_program_redeployment_cooldown { From 57e78a16dc762174faaf72f806e62f817892040b Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Mon, 25 Sep 2023 13:11:26 -0500 Subject: [PATCH 172/407] heap_size type to be consistent with request instruction (#33354) * heap_size type to be consistent with request instruction * update tests --- program-runtime/src/compute_budget.rs | 12 +++++----- programs/bpf_loader/src/lib.rs | 33 ++++++++++----------------- programs/loader-v4/src/lib.rs | 13 +++++------ 3 files changed, 24 insertions(+), 34 deletions(-) diff --git a/program-runtime/src/compute_budget.rs b/program-runtime/src/compute_budget.rs index 09567cea7bcba8..f9239224b488a0 100644 --- a/program-runtime/src/compute_budget.rs +++ b/program-runtime/src/compute_budget.rs @@ -102,7 +102,7 @@ pub struct ComputeBudget { /// The total cost is calculated as `msm_base_cost + (length - 1) * msm_incremental_cost`. pub curve25519_ristretto_msm_incremental_cost: u64, /// program heap region size, default: solana_sdk::entrypoint::HEAP_LENGTH - pub heap_size: usize, + pub heap_size: u32, /// Number of compute units per additional 32k heap above the default (~.5 /// us per 32k at 15 units/us rounded up) pub heap_cost: u64, @@ -179,7 +179,7 @@ impl ComputeBudget { curve25519_ristretto_multiply_cost: 2_208, curve25519_ristretto_msm_base_cost: 2303, curve25519_ristretto_msm_incremental_cost: 788, - heap_size: solana_sdk::entrypoint::HEAP_LENGTH, + heap_size: u32::try_from(solana_sdk::entrypoint::HEAP_LENGTH).unwrap(), heap_cost: 8, mem_op_base_cost: 10, alt_bn128_addition_cost: 334, @@ -279,7 +279,7 @@ impl ComputeBudget { InstructionError::InvalidInstructionData, )); } - self.heap_size = bytes as usize; + self.heap_size = bytes; } let compute_unit_limit = updated_compute_unit_limit @@ -524,7 +524,7 @@ mod tests { Ok(PrioritizationFeeDetails::default()), ComputeBudget { compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - heap_size: MAX_HEAP_FRAME_BYTES as usize, + heap_size: MAX_HEAP_FRAME_BYTES, ..ComputeBudget::default() } ); @@ -574,7 +574,7 @@ mod tests { )), ComputeBudget { compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT as u64, - heap_size: MAX_HEAP_FRAME_BYTES as usize, + heap_size: MAX_HEAP_FRAME_BYTES, ..ComputeBudget::default() } ); @@ -592,7 +592,7 @@ mod tests { )), ComputeBudget { compute_unit_limit: 1, - heap_size: MAX_HEAP_FRAME_BYTES as usize, + heap_size: MAX_HEAP_FRAME_BYTES, ..ComputeBudget::default() } ); diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index fa4bc849028144..eaefd76f1ae034 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -191,10 +191,10 @@ pub fn check_loader_id(id: &Pubkey) -> bool { } /// Only used in macro, do not use directly! -pub fn calculate_heap_cost(heap_size: u64, heap_cost: u64, enable_rounding_fix: bool) -> u64 { +pub fn calculate_heap_cost(heap_size: u32, heap_cost: u64, enable_rounding_fix: bool) -> u64 { const KIBIBYTE: u64 = 1024; const PAGE_SIZE_KB: u64 = 32; - let mut rounded_heap_size = heap_size; + let mut rounded_heap_size = u64::from(heap_size); if enable_rounding_fix { rounded_heap_size = rounded_heap_size .saturating_add(PAGE_SIZE_KB.saturating_mul(KIBIBYTE).saturating_sub(1)); @@ -267,7 +267,7 @@ macro_rules! create_vm { .feature_set .is_active(&solana_sdk::feature_set::round_up_heap_size::id()); let mut heap_cost_result = invoke_context.consume_checked($crate::calculate_heap_cost( - heap_size as u64, + heap_size, invoke_context.get_compute_budget().heap_cost, round_up_heap_size, )); @@ -281,7 +281,7 @@ macro_rules! create_vm { >::zero_filled(stack_size); let mut heap = solana_rbpf::aligned_memory::AlignedMemory::< { solana_rbpf::ebpf::HOST_ALIGN }, - >::zero_filled(heap_size); + >::zero_filled(usize::try_from(heap_size).unwrap()); let vm = $crate::create_vm( $program, $regions, @@ -4033,40 +4033,31 @@ mod tests { // when `enable_heap_size_round_up` not enabled: { // assert less than 32K heap should cost zero unit - assert_eq!(0, calculate_heap_cost(31_u64 * 1024, heap_cost, false)); + assert_eq!(0, calculate_heap_cost(31 * 1024, heap_cost, false)); // assert exact 32K heap should be cost zero unit - assert_eq!(0, calculate_heap_cost(32_u64 * 1024, heap_cost, false)); + assert_eq!(0, calculate_heap_cost(32 * 1024, heap_cost, false)); // assert slightly more than 32K heap is mistakenly cost zero unit - assert_eq!(0, calculate_heap_cost(33_u64 * 1024, heap_cost, false)); + assert_eq!(0, calculate_heap_cost(33 * 1024, heap_cost, false)); // assert exact 64K heap should cost 1 * heap_cost - assert_eq!( - heap_cost, - calculate_heap_cost(64_u64 * 1024, heap_cost, false) - ); + assert_eq!(heap_cost, calculate_heap_cost(64 * 1024, heap_cost, false)); } // when `enable_heap_size_round_up` is enabled: { // assert less than 32K heap should cost zero unit - assert_eq!(0, calculate_heap_cost(31_u64 * 1024, heap_cost, true)); + assert_eq!(0, calculate_heap_cost(31 * 1024, heap_cost, true)); // assert exact 32K heap should be cost zero unit - assert_eq!(0, calculate_heap_cost(32_u64 * 1024, heap_cost, true)); + assert_eq!(0, calculate_heap_cost(32 * 1024, heap_cost, true)); // assert slightly more than 32K heap should cost 1 * heap_cost - assert_eq!( - heap_cost, - calculate_heap_cost(33_u64 * 1024, heap_cost, true) - ); + assert_eq!(heap_cost, calculate_heap_cost(33 * 1024, heap_cost, true)); // assert exact 64K heap should cost 1 * heap_cost - assert_eq!( - heap_cost, - calculate_heap_cost(64_u64 * 1024, heap_cost, true) - ); + assert_eq!(heap_cost, calculate_heap_cost(64 * 1024, heap_cost, true)); } } diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index c22d95856f157e..4645b33c26a6e0 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -94,10 +94,10 @@ pub fn create_program_runtime_environment_v2<'a>( BuiltinProgram::new_loader(config, FunctionRegistry::default()) } -fn calculate_heap_cost(heap_size: u64, heap_cost: u64) -> u64 { +fn calculate_heap_cost(heap_size: u32, heap_cost: u64) -> u64 { const KIBIBYTE: u64 = 1024; const PAGE_SIZE_KB: u64 = 32; - heap_size + u64::from(heap_size) .saturating_add(PAGE_SIZE_KB.saturating_mul(KIBIBYTE).saturating_sub(1)) .checked_div(PAGE_SIZE_KB.saturating_mul(KIBIBYTE)) .expect("PAGE_SIZE_KB * KIBIBYTE > 0") @@ -114,12 +114,11 @@ pub fn create_vm<'a, 'b>( let sbpf_version = program.get_sbpf_version(); let compute_budget = invoke_context.get_compute_budget(); let heap_size = compute_budget.heap_size; - invoke_context.consume_checked(calculate_heap_cost( - heap_size as u64, - compute_budget.heap_cost, - ))?; + invoke_context.consume_checked(calculate_heap_cost(heap_size, compute_budget.heap_cost))?; let mut stack = AlignedMemory::<{ ebpf::HOST_ALIGN }>::zero_filled(config.stack_size()); - let mut heap = AlignedMemory::<{ ebpf::HOST_ALIGN }>::zero_filled(compute_budget.heap_size); + let mut heap = AlignedMemory::<{ ebpf::HOST_ALIGN }>::zero_filled( + usize::try_from(compute_budget.heap_size).unwrap(), + ); let stack_len = stack.len(); let regions: Vec = vec![ program.get_ro_region(), From 18231e9a5a242c4c0e7567a71480c1b043a14677 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 25 Sep 2023 12:19:52 -0700 Subject: [PATCH 173/407] dump final startup index stats only after startup is complete (#33400) --- accounts-db/src/bucket_map_holder_stats.rs | 2 +- bucket_map/src/bucket.rs | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/accounts-db/src/bucket_map_holder_stats.rs b/accounts-db/src/bucket_map_holder_stats.rs index df7180bfecb8e7..4df611539d16ed 100644 --- a/accounts-db/src/bucket_map_holder_stats.rs +++ b/accounts-db/src/bucket_map_holder_stats.rs @@ -226,7 +226,7 @@ impl BucketMapHolderStats { // sum of elapsed time in each thread let mut thread_time_elapsed_ms = elapsed_ms * storage.threads as u64; if disk.is_some() { - if startup || was_startup { + if was_startup { // these stats only apply at startup datapoint_info!( "accounts_index_startup", diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index e727e8424a8a10..0d387bacb803d7 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -369,16 +369,15 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { items.len().saturating_sub(duplicates.len()) as u64, Ordering::Relaxed, ); - self.index.stats.startup.entries_reused.fetch_add( + let stats = &self.index.stats.startup; + stats.entries_reused.fetch_add( items .len() .saturating_sub(duplicates.len()) .saturating_sub(entries_created_on_disk) as u64, Ordering::Relaxed, ); - self.index - .stats - .startup + stats .entries_created .fetch_add(entries_created_on_disk as u64, Ordering::Relaxed); return duplicates; From 85cc6ace05fb9845f692a692d2e826414c2c0a46 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Mon, 25 Sep 2023 12:33:38 -0700 Subject: [PATCH 174/407] Update is_locked_out cache when adopting on chain vote state (#33341) * Update is_locked_out cache when adopting on chain vote state * extend to all cached tower checks * upgrade error to panic --- core/src/consensus.rs | 2 +- core/src/replay_stage.rs | 157 +++++++++++++++++++++++++++++++-------- 2 files changed, 127 insertions(+), 32 deletions(-) diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 50c04dbbf486fe..675dfc691e675d 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -546,7 +546,7 @@ impl Tower { let vote = Vote::new(vec![vote_slot], vote_hash); let result = process_vote_unchecked(&mut self.vote_state, vote); if result.is_err() { - error!( + panic!( "Error while recording vote {} {} in local tower {:?}", vote_slot, vote_hash, result ); diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 0fec5020d6dcb9..5a6d825e3aa3d7 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -3063,7 +3063,7 @@ impl ReplayStage { pub fn compute_bank_stats( my_vote_pubkey: &Pubkey, ancestors: &HashMap>, - frozen_banks: &mut Vec>, + frozen_banks: &mut [Arc], tower: &mut Tower, progress: &mut ProgressMap, vote_tracker: &VoteTracker, @@ -3074,7 +3074,7 @@ impl ReplayStage { ) -> Vec { frozen_banks.sort_by_key(|bank| bank.slot()); let mut new_stats = vec![]; - for bank in frozen_banks { + for bank in frozen_banks.iter() { let bank_slot = bank.slot(); // Only time progress map should be missing a bank slot // is if this node was the leader for this slot as those banks @@ -3172,6 +3172,11 @@ impl ReplayStage { .get_hash(last_voted_slot) .expect("Must exist for us to have frozen descendant"), ); + // Since we are updating our tower we need to update associated caches for previously computed + // slots as well. + for slot in frozen_banks.iter().map(|b| b.slot()) { + Self::cache_tower_stats(progress, tower, slot, ancestors); + } } } } @@ -3232,24 +3237,33 @@ impl ReplayStage { cluster_slots, ); - let stats = progress - .get_fork_stats_mut(bank_slot) - .expect("All frozen banks must exist in the Progress map"); - - stats.vote_threshold = - tower.check_vote_stake_threshold(bank_slot, &stats.voted_stakes, stats.total_stake); - stats.is_locked_out = tower.is_locked_out( - bank_slot, - ancestors - .get(&bank_slot) - .expect("Ancestors map should contain slot for is_locked_out() check"), - ); - stats.has_voted = tower.has_voted(bank_slot); - stats.is_recent = tower.is_recent(bank_slot); + Self::cache_tower_stats(progress, tower, bank_slot, ancestors); } new_stats } + fn cache_tower_stats( + progress: &mut ProgressMap, + tower: &Tower, + slot: Slot, + ancestors: &HashMap>, + ) { + let stats = progress + .get_fork_stats_mut(slot) + .expect("All frozen banks must exist in the Progress map"); + + stats.vote_threshold = + tower.check_vote_stake_threshold(slot, &stats.voted_stakes, stats.total_stake); + stats.is_locked_out = tower.is_locked_out( + slot, + ancestors + .get(&slot) + .expect("Ancestors map should contain slot for is_locked_out() check"), + ); + stats.has_voted = tower.has_voted(slot); + stats.is_recent = tower.is_recent(slot); + } + fn update_propagation_status( progress: &mut ProgressMap, slot: Slot, @@ -6345,7 +6359,7 @@ pub(crate) mod tests { // All forks have same weight so heaviest bank to vote/reset on should be the tip of // the fork with the lower slot - let (vote_fork, reset_fork) = run_compute_and_select_forks( + let (vote_fork, reset_fork, _) = run_compute_and_select_forks( &bank_forks, &mut progress, &mut tower, @@ -6361,7 +6375,7 @@ pub(crate) mod tests { // 4 should be the heaviest slot, but should not be votable // because of lockout. 5 is the heaviest slot on the same fork as the last vote. - let (vote_fork, reset_fork) = run_compute_and_select_forks( + let (vote_fork, reset_fork, _) = run_compute_and_select_forks( &bank_forks, &mut progress, &mut tower, @@ -6404,7 +6418,7 @@ pub(crate) mod tests { // 4 should be the heaviest slot, but should not be votable // because of lockout. 5 is no longer valid due to it being a duplicate. - let (vote_fork, reset_fork) = run_compute_and_select_forks( + let (vote_fork, reset_fork, _) = run_compute_and_select_forks( &bank_forks, &mut progress, &mut tower, @@ -6440,7 +6454,7 @@ pub(crate) mod tests { // the right version of the block, so `duplicate_slots_to_repair` // should be empty assert!(duplicate_slots_to_repair.is_empty()); - let (vote_fork, reset_fork) = run_compute_and_select_forks( + let (vote_fork, reset_fork, _) = run_compute_and_select_forks( &bank_forks, &mut progress, &mut tower, @@ -6492,7 +6506,7 @@ pub(crate) mod tests { // All forks have same weight so heaviest bank to vote/reset on should be the tip of // the fork with the lower slot - let (vote_fork, reset_fork) = run_compute_and_select_forks( + let (vote_fork, reset_fork, _) = run_compute_and_select_forks( &bank_forks, &mut progress, &mut tower, @@ -6536,7 +6550,7 @@ pub(crate) mod tests { SlotStateUpdate::Duplicate(duplicate_state), ); - let (vote_fork, reset_fork) = run_compute_and_select_forks( + let (vote_fork, reset_fork, _) = run_compute_and_select_forks( &bank_forks, &mut progress, &mut tower, @@ -6571,7 +6585,7 @@ pub(crate) mod tests { SlotStateUpdate::Duplicate(duplicate_state), ); - let (vote_fork, reset_fork) = run_compute_and_select_forks( + let (vote_fork, reset_fork, _) = run_compute_and_select_forks( &bank_forks, &mut progress, &mut tower, @@ -6611,7 +6625,7 @@ pub(crate) mod tests { // the right version of the block, so `duplicate_slots_to_repair` // should be empty assert!(duplicate_slots_to_repair.is_empty()); - let (vote_fork, reset_fork) = run_compute_and_select_forks( + let (vote_fork, reset_fork, _) = run_compute_and_select_forks( &bank_forks, &mut progress, &mut tower, @@ -7967,7 +7981,7 @@ pub(crate) mod tests { heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks, my_vote_pubkey: Option, - ) -> (Option, Option) { + ) -> (Option, Option, Vec) { let mut frozen_banks: Vec<_> = bank_forks .read() .unwrap() @@ -7994,7 +8008,7 @@ pub(crate) mod tests { let SelectVoteAndResetForkResult { vote_bank, reset_bank, - .. + heaviest_fork_failures, } = ReplayStage::select_vote_and_reset_forks( &heaviest_bank, heaviest_bank_on_same_fork.as_ref(), @@ -8008,6 +8022,7 @@ pub(crate) mod tests { ( vote_bank.map(|(b, _)| b.slot()), reset_bank.map(|b| b.slot()), + heaviest_fork_failures, ) } @@ -8077,7 +8092,7 @@ pub(crate) mod tests { } #[test] - fn test_tower_sync_from_bank() { + fn test_tower_sync_from_bank_failed_switch() { solana_logger::setup_with_default( "error,solana_core::replay_stage=info,solana_core::consensus=info", ); @@ -8096,9 +8111,10 @@ pub(crate) mod tests { slot 6 We had some point voted 0 - 6, while the rest of the network voted 0 - 4. - We are sitting with an oudated tower that has voted until 1. We see that 2 is the heaviest slot, + We are sitting with an oudated tower that has voted until 1. We see that 4 is the heaviest slot, however in the past we have voted up to 6. We must acknowledge the vote state present at 6, - adopt it as our own and *not* vote on 2 or 4, to respect slashing rules. + adopt it as our own and *not* vote on 2 or 4, to respect slashing rules as there is + not enough stake to switch */ let generate_votes = |pubkeys: Vec| { @@ -8117,7 +8133,7 @@ pub(crate) mod tests { tower.record_vote(0, bank_hash(0)); tower.record_vote(1, bank_hash(1)); - let (vote_fork, reset_fork) = run_compute_and_select_forks( + let (vote_fork, reset_fork, failures) = run_compute_and_select_forks( &bank_forks, &mut progress, &mut tower, @@ -8128,8 +8144,12 @@ pub(crate) mod tests { assert_eq!(vote_fork, None); assert_eq!(reset_fork, Some(6)); + assert_eq!( + failures, + vec![HeaviestForkFailures::FailedSwitchThreshold(4, 0, 30000),] + ); - let (vote_fork, reset_fork) = run_compute_and_select_forks( + let (vote_fork, reset_fork, failures) = run_compute_and_select_forks( &bank_forks, &mut progress, &mut tower, @@ -8140,5 +8160,80 @@ pub(crate) mod tests { assert_eq!(vote_fork, None); assert_eq!(reset_fork, Some(6)); + assert_eq!( + failures, + vec![HeaviestForkFailures::FailedSwitchThreshold(4, 0, 30000),] + ); + } + + #[test] + fn test_tower_sync_from_bank_failed_lockout() { + solana_logger::setup_with_default( + "error,solana_core::replay_stage=info,solana_core::consensus=info", + ); + /* + Fork structure: + + slot 0 + | + slot 1 + / \ + slot 3 | + | slot 2 + slot 4 | + slot 5 + | + slot 6 + + We had some point voted 0 - 6, while the rest of the network voted 0 - 4. + We are sitting with an oudated tower that has voted until 1. We see that 4 is the heaviest slot, + however in the past we have voted up to 6. We must acknowledge the vote state present at 6, + adopt it as our own and *not* vote on 3 or 4, to respect slashing rules as we are locked + out on 4, even though there is enough stake to switch. However we should still reset onto + 4. + */ + + let generate_votes = |pubkeys: Vec| { + pubkeys + .into_iter() + .zip(iter::once(vec![0, 1, 2, 5, 6]).chain(iter::repeat(vec![0, 1, 3, 4]).take(2))) + .collect() + }; + let tree = tr(0) / (tr(1) / (tr(3) / (tr(4))) / (tr(2) / (tr(5) / (tr(6))))); + let (mut vote_simulator, _blockstore) = + setup_forks_from_tree(tree, 3, Some(Box::new(generate_votes))); + let (bank_forks, mut progress) = (vote_simulator.bank_forks, vote_simulator.progress); + let bank_hash = |slot| bank_forks.read().unwrap().bank_hash(slot).unwrap(); + let my_vote_pubkey = vote_simulator.vote_pubkeys[0]; + let mut tower = Tower::default(); + tower.node_pubkey = vote_simulator.node_pubkeys[0]; + tower.record_vote(0, bank_hash(0)); + tower.record_vote(1, bank_hash(1)); + + let (vote_fork, reset_fork, failures) = run_compute_and_select_forks( + &bank_forks, + &mut progress, + &mut tower, + &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.latest_validator_votes_for_frozen_banks, + Some(my_vote_pubkey), + ); + + assert_eq!(vote_fork, None); + assert_eq!(reset_fork, Some(4)); + assert_eq!(failures, vec![HeaviestForkFailures::LockedOut(4),]); + + let (vote_fork, reset_fork, failures) = run_compute_and_select_forks( + &bank_forks, + &mut progress, + &mut tower, + &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.latest_validator_votes_for_frozen_banks, + Some(my_vote_pubkey), + ); + + assert_eq!(vote_fork, None); + assert_eq!(reset_fork, Some(4)); + assert_eq!(failures, vec![HeaviestForkFailures::LockedOut(4),]); } } From 027f3dc6de5e0dca0ab1e6382825de4936630ea2 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 25 Sep 2023 12:48:05 -0700 Subject: [PATCH 175/407] disk idx: try to reuse disk index's exisiting data on startup (#33388) * disk idx: try to reuse disk index's exisiting data on startup * add tests * fix test and add test * update test comments * update comments --- bucket_map/src/bucket.rs | 673 +++++++++++++++++++++++++++++++------- bucket_map/src/restart.rs | 1 - 2 files changed, 549 insertions(+), 125 deletions(-) diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index 0d387bacb803d7..1eb9ae18b60850 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -9,7 +9,7 @@ use { }, index_entry::{ DataBucket, IndexBucket, IndexEntry, IndexEntryPlaceInBucket, MultipleSlots, - OccupiedEnum, + OccupiedEnum, OccupyIfMatches, }, restart::RestartableBucket, MaxSearch, RefCount, @@ -360,6 +360,7 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { &mut entries, &mut entries_created_on_disk, &mut duplicates, + self.reused_file_at_startup, ); match result { Ok(_result) => { @@ -392,7 +393,56 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { } } - /// sort `entries` by hash value + /// insert every entry in `reverse_sorted_entries` into the index as long as we can find a location where the data in the index + /// file already matches the data we want to insert for the pubkey. + /// for every entry that already exists in `index`, add it (and the value already in the index) to `duplicates` + /// `reverse_sorted_entries` is (raw index (range = U64::MAX) in hash map, index in `items`) + /// Any entries where the disk couldn't be updated are returned in `reverse_sorted_entries` or `duplicates`. + /// The remaining items in `reverse_sorted_entries` can be inserted by over-writing non-matchingnew data to the index file. + pub fn batch_insert_non_duplicates_reusing_file( + index: &mut BucketStorage>, + data_buckets: &[BucketStorage], + items: &[(Pubkey, T)], + reverse_sorted_entries: &mut Vec<(u64, usize)>, + duplicates: &mut Vec<(usize, T)>, + ) { + let max_search = index.max_search(); + let cap = index.capacity(); + let search_end = max_search.min(cap); + let mut not_found = Vec::default(); + // pop one entry at a time to insert + 'outer: while let Some((ix_entry_raw, ix)) = reverse_sorted_entries.pop() { + let (k, v) = &items[ix]; + // search for an empty spot starting at `ix_entry` + for search in 0..search_end { + let ix_index = (ix_entry_raw + search) % cap; + let elem = IndexEntryPlaceInBucket::new(ix_index); + match elem.occupy_if_matches(index, v, k) { + OccupyIfMatches::SuccessfulInit => {} + OccupyIfMatches::FoundDuplicate => { + // pubkey is same, and it is occupied, so we found a duplicate + let (v_existing, _ref_count_existing) = + elem.read_value(index, data_buckets); + // someone is already allocated with this pubkey, so we found a duplicate + duplicates.push((ix, *v_existing.first().unwrap())); + } + OccupyIfMatches::PubkeyMismatch => { + // fall through and look at next search value + continue; + } + } + continue 'outer; // this 'insertion' is completed - either because we found a duplicate or we occupied an entry in the file + } + // this pubkey did not exist in the file already and we exhausted the search space, so have to try the old way + not_found.push((ix_entry_raw, ix)); + } + // now add all entries that were not found + // they were pushed in order since we popped off input + // So, to keep them 'reversed', we need to reverse them here. + // This isn't required for correctness, but fits the optimal iteration order. + *reverse_sorted_entries = not_found.into_iter().rev().collect(); + } + /// insert as much of `entries` as possible into `index`. /// return an error if the index needs to resize. /// for every entry that already exists in `index`, add it (and the value already in the index) to `duplicates` @@ -404,15 +454,26 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { reverse_sorted_entries: &mut Vec<(u64, usize)>, entries_created_on_disk: &mut usize, duplicates: &mut Vec<(usize, T)>, + try_to_reuse_disk_data: bool, ) -> Result<(), BucketMapError> { - if reverse_sorted_entries.is_empty() { - return Ok(()); + if try_to_reuse_disk_data { + // First, insert everything we can into disk contents that already have the right pubkey and hopefully the right data. + // Ideally this results in no disk updates to insert these entries. + // Any entries that were unable to be inserted would remain in `reverse_sorted_entries` so that we fall through and insert those + // in any free slot we find. + Self::batch_insert_non_duplicates_reusing_file( + index, + data_buckets, + items, + reverse_sorted_entries, + duplicates, + ); } let max_search = index.max_search(); let cap = index.capacity(); let search_end = max_search.min(cap); - // pop one entry at a time to insert + // pop one entry at a time to insert in the first free location we find 'outer: while let Some((ix_entry_raw, i)) = reverse_sorted_entries.pop() { let (k, v) = &items[i]; let ix_entry = ix_entry_raw % cap; @@ -422,7 +483,11 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { let elem = IndexEntryPlaceInBucket::new(ix_index); if index.try_lock(ix_index) { *entries_created_on_disk += 1; - // found free element and occupied it + // found free element and occupied it. + // Note that since we are in the startup phase where we only add and do not remove, it is NOT possible to find this same pubkey AFTER + // the index we started searching at, or we would have found it as occupied BEFORE we were able to lock it here. + // This precondition is not true once we are able to delete entries. + // These fields will be overwritten after allocation by callers. // Since this part of the mmapped file could have previously been used by someone else, there can be garbage here. elem.init(index, k); @@ -823,6 +888,358 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { mod tests { use {super::*, crate::index_entry::OccupyIfMatches, tempfile::tempdir}; + #[test] + fn test_batch_insert_non_duplicates_reusing_file_many_entries() { + // 3 variations of reuse + for reuse_type in 0..3 { + let data_buckets = Vec::default(); + let v = 12u64; + let random = 1; + // with random=1, 6 entries is the most that don't collide on a single hash % cap value. + for len in 0..7 { + log::error!("testing with {len}"); + // cannot use pubkey [0,0,...] because that matches a zeroed out default file contents. + let raw = (0..len) + .map(|l| (Pubkey::from([(l + 1) as u8; 32]), v + (l as u64))) + .collect::>(); + + let mut hashed = Bucket::index_entries(&raw, random); + let hashed_raw = hashed.clone(); + + let tmpdir = tempdir().unwrap(); + let paths: Arc> = Arc::new(vec![tmpdir.path().to_path_buf()]); + assert!(!paths.is_empty()); + let max_search = 2; + let (mut index, file_name) = BucketStorage::>::new( + paths.clone(), + 1, + std::mem::size_of::>() as u64, + max_search, + Arc::default(), + Arc::default(), + ); + index.delete_file_on_drop = false; + let cap = index.capacity(); + + hashed.sort_unstable_by(|a, b| (a.0 % cap).cmp(&(b.0 % cap)).reverse()); + hashed.windows(2).for_each(|two| { + assert_ne!(two[0].0 % cap, two[1].0 % cap, "{two:?}, cap: {cap}"); + }); + + // file is blank, so nothing matches, so everything returned in `hashed` to retry. + let mut duplicates = Vec::default(); + let mut entries_created = 0; + // insert normally + Bucket::::batch_insert_non_duplicates_internal( + &mut index, + &data_buckets, + &raw, + &mut hashed, + &mut entries_created, + &mut duplicates, + false, + ) + .unwrap(); + assert!(hashed.is_empty()); + assert!(duplicates.is_empty()); + + hashed_raw.iter().for_each(|(hash, i)| { + let (k, v) = raw[*i]; + let ix = *hash % cap; + let entry = IndexEntryPlaceInBucket::new(ix); + assert_eq!(entry.key(&index), &k); + assert_eq!( + entry.get_slot_count_enum(&index), + OccupiedEnum::OneSlotInIndex(&v) + ); + }); + + drop(index); + let path = paths.first().unwrap().join(file_name.to_string()); + let mut index = BucketStorage::>::load_on_restart( + path, + NonZeroU64::new( + std::mem::size_of::>() as u64 + ) + .unwrap(), + max_search, + Arc::default(), + Arc::default(), + ) + .unwrap(); + + // verify index file is unoccupied, but that contents match + hashed_raw.iter().for_each(|(hash, i)| { + let (k, _v) = raw[*i]; + let ix = *hash % cap; + let entry = IndexEntryPlaceInBucket::new(ix); + assert_eq!(entry.key(&index), &k); + assert_eq!(entry.get_slot_count_enum(&index), OccupiedEnum::Free); + }); + + // this was wiped out by the last call to batch_insert..., so recreate it. + hashed = hashed_raw.clone(); + let mut duplicates = Vec::default(); + if reuse_type == 0 { + Bucket::::batch_insert_non_duplicates_reusing_file( + &mut index, + &data_buckets, + &raw, + &mut hashed, + &mut duplicates, + ); + } else if reuse_type == 1 { + // just overwrite all data instead of trying to reuse it + let mut entries_created = 0; + _ = Bucket::::batch_insert_non_duplicates_internal( + &mut index, + &data_buckets, + &raw, + &mut hashed, + &mut entries_created, + &mut duplicates, + false, + ); + assert_eq!(entries_created, hashed_raw.len()); + } else if reuse_type == 2 { + // call the higher level fn + // That fn will call batch_insert_non_duplicates_reusing_file. + // The inner fn should insert everything, reusing data, so there should be no entries created. + let mut entries_created = 0; + _ = Bucket::::batch_insert_non_duplicates_internal( + &mut index, + &data_buckets, + &raw, + &mut hashed, + &mut entries_created, + &mut duplicates, + // call re-use code first + true, + ); + assert_eq!(entries_created, 0); + } + assert!(hashed.is_empty()); + assert!(duplicates.is_empty()); + + hashed_raw.iter().for_each(|(hash, i)| { + let (k, v) = raw[*i]; + let ix = *hash % cap; + let entry = IndexEntryPlaceInBucket::new(ix); + assert_eq!(entry.key(&index), &k); + assert_eq!( + entry.get_slot_count_enum(&index), + OccupiedEnum::OneSlotInIndex(&v), + "i: {i}" + ); + }); + } + } + } + + #[test] + fn test_batch_insert_non_duplicates_reusing_file_blank_file() { + let data_buckets = Vec::default(); + let v = 12u64; + let random = 1; + for len in 1..4 { + // cannot use pubkey [0,0,...] because that matches a zeroed out default file contents. + let raw = (0..len) + .map(|l| (Pubkey::from([(l + 1) as u8; 32]), v + (l as u64))) + .collect::>(); + + let mut hashed = Bucket::index_entries(&raw, random); + let hashed_raw = hashed.clone(); + + let mut index = create_test_index(None); + + let cap = index.capacity(); + let ix = hashed[0].0 % cap; + + let entry = IndexEntryPlaceInBucket::new(ix); + + // file is blank, so nothing matches, so everything returned in `hashed` to retry. + let mut duplicates = Vec::default(); + Bucket::::batch_insert_non_duplicates_reusing_file( + &mut index, + &data_buckets, + &raw, + &mut hashed, + &mut duplicates, + ); + + assert_eq!(entry.get_slot_count_enum(&index), OccupiedEnum::Free); + assert_eq!(entry.key(&index), &Pubkey::default()); + assert_eq!(hashed, hashed_raw, "len: {len}"); + assert!(duplicates.is_empty()); + } + } + + #[should_panic(expected = "index asked to insert the same data twice")] + #[test] + fn test_batch_insert_non_duplicates_reusing_file_insert_twice() { + let data_buckets = Vec::default(); + let v = 12u64; + let random = 1; + // cannot use pubkey [0,0,...] because that matches a zeroed out default file contents. + let len = 1; + let raw = (0..len) + .map(|l| (Pubkey::from([(l + 1) as u8; 32]), v + (l as u64))) + .collect::>(); + + let mut hashed = Bucket::index_entries(&raw, random); + + let mut index = create_test_index(None); + let cap = index.capacity(); + let ix = hashed[0].0 % cap; + let entry = IndexEntryPlaceInBucket::new(ix); + entry.init(&mut index, &raw[0].0); + entry.set_slot_count_enum_value(&mut index, OccupiedEnum::OneSlotInIndex(&raw[0].1)); + + let mut duplicates = Vec::default(); + // this will assert because the same k,v pair are already occupied in the index. + Bucket::::batch_insert_non_duplicates_reusing_file( + &mut index, + &data_buckets, + &raw, + &mut hashed, + &mut duplicates, + ); + } + + #[test] + fn test_batch_insert_non_duplicates_reusing_file_insert_duplicate() { + let data_buckets = Vec::default(); + let v = 12u64; + let random = 1; + // cannot use pubkey [0,0,...] because that matches a zeroed out default file contents. + let len = 1; + let raw = (0..len) + .map(|l| (Pubkey::from([(l + 1) as u8; 32]), v + (l as u64))) + .collect::>(); + + let mut hashed = Bucket::index_entries(&raw, random); + + let mut index = create_test_index(None); + let cap = index.capacity(); + let ix = hashed[0].0 % cap; + + // occupy the index data entry with same pubkey, different value. + // This causes it to be treated as a duplicate. + let entry = IndexEntryPlaceInBucket::new(ix); + entry.init(&mut index, &(raw[0].0)); + let non_matching_v = raw[0].1 + 1; + entry.set_slot_count_enum_value(&mut index, OccupiedEnum::OneSlotInIndex(&non_matching_v)); + + // since the same key is already in use with a different value, it is a duplicate + let mut duplicates = Vec::default(); + Bucket::::batch_insert_non_duplicates_reusing_file( + &mut index, + &data_buckets, + &raw, + &mut hashed, + &mut duplicates, + ); + assert_eq!( + entry.get_slot_count_enum(&index), + OccupiedEnum::OneSlotInIndex(&non_matching_v) + ); + + assert!(hashed.is_empty()); + assert_eq!(duplicates, vec![(0, non_matching_v)], "len: {len}"); + } + + #[test] + fn test_batch_insert_non_duplicates_reusing_file_skip_one() { + let data_buckets = Vec::default(); + let v = 12u64; + let random = 1; + // cannot use pubkey [0,0,...] because that matches a zeroed out default file contents. + let len = 1; + let mut raw = (0..len + 1) + .map(|l| (Pubkey::from([(l + 1) as u8; 32]), v + (l as u64))) + .collect::>(); + + let other = raw.pop().unwrap(); + let mut hashed = Bucket::index_entries(&raw, random); + + let mut index = create_test_index(None); + let cap = index.capacity(); + let ix = hashed[0].0 % cap; + + // occupy the index data entry with a different pubkey + // This causes it to be skipped. + let entry = IndexEntryPlaceInBucket::new(ix); + entry.init(&mut index, &(other.0)); + let entry = IndexEntryPlaceInBucket::new(ix + 1); + // sets pubkey value and enum value of ZeroSlots. Leaving it at zero causes issues. + entry.init(&mut index, &(raw[0].0)); + // marks as free but does not clear out pubkey data in the file. This simulates finding the correct pubkey in the data file in a free entry and occupying it. + entry.set_slot_count_enum_value(&mut index, OccupiedEnum::Free); + + // since the same key is already in use with a different value, it is a duplicate + let mut duplicates = Vec::default(); + Bucket::::batch_insert_non_duplicates_reusing_file( + &mut index, + &data_buckets, + &raw, + &mut hashed, + &mut duplicates, + ); + + assert_eq!( + entry.get_slot_count_enum(&index), + OccupiedEnum::OneSlotInIndex(&raw[0].1) + ); + + assert!(hashed.is_empty()); + assert!(duplicates.is_empty()); + + let entry = IndexEntryPlaceInBucket::new(ix); + assert_eq!(entry.key(&index), &other.0); + let entry = IndexEntryPlaceInBucket::new(ix + 1); + assert_eq!(entry.key(&index), &raw[0].0); + } + + #[should_panic(expected = "called `Option::unwrap()` on a `None` value")] + #[test] + fn test_batch_insert_non_duplicates_reusing_file_existing_zero() { + let data_buckets = Vec::default(); + let v = 12u64; + let random = 1; + // cannot use pubkey [0,0,...] because that matches a zeroed out default file contents. + let len = 1; + let mut raw = (0..len + 1) + .map(|l| (Pubkey::from([(l + 1) as u8; 32]), v + (l as u64))) + .collect::>(); + + let other = raw.pop().unwrap(); + let mut hashed = Bucket::index_entries(&raw, random); + + let mut index = create_test_index(None); + let cap = index.capacity(); + let ix = hashed[0].0 % cap; + + // occupy the index data entry with a different pubkey + // This causes it to be skipped. + let entry = IndexEntryPlaceInBucket::new(ix); + entry.init(&mut index, &(other.0)); + let entry = IndexEntryPlaceInBucket::new(ix + 1); + // sets pubkey value and enum value of ZeroSlots. Leaving it at zero is illegal at startup, so we'll assert when we find this duplicate. + entry.init(&mut index, &(raw[0].0)); + + // since the same key is already in use with a different value, it is a duplicate. + // But, it is a zero length entry. This is not supported at startup. Startup would have never generated a zero length occupied entry. + // So, it is ok for this to assert. + let mut duplicates = Vec::default(); + Bucket::::batch_insert_non_duplicates_reusing_file( + &mut index, + &data_buckets, + &raw, + &mut hashed, + &mut duplicates, + ); + } + #[test] fn test_index_entries() { for v in 10..12u64 { @@ -865,105 +1282,62 @@ mod tests { #[test] fn batch_insert_duplicates_internal_simple() { - solana_logger::setup(); - // add the same duplicate key several times. - // make sure the resulting index and returned `duplicates` is correct. - let random = 1; - let data_buckets = Vec::default(); - let k = Pubkey::from([1u8; 32]); - for v in 10..12u64 { - for len in 1..4 { - let raw = (0..len).map(|l| (k, v + (l as u64))).collect::>(); - let mut hashed = Bucket::index_entries(&raw, random); - let hashed_raw = hashed.clone(); - - let mut index = create_test_index(None); - - let mut entries_created = 0; - let mut duplicates = Vec::default(); - assert!(Bucket::::batch_insert_non_duplicates_internal( - &mut index, - &Vec::default(), - &raw, - &mut hashed, - &mut entries_created, - &mut duplicates, - ) - .is_ok()); - - assert_eq!(duplicates.len(), len as usize - 1); - assert_eq!(hashed.len(), 0); - let single_hashed_raw_inserted = hashed_raw.last().unwrap(); - let elem = - IndexEntryPlaceInBucket::new(single_hashed_raw_inserted.0 % index.capacity()); - let (value, ref_count) = elem.read_value(&index, &data_buckets); - assert_eq!(ref_count, 1); - assert_eq!(value, &[raw[single_hashed_raw_inserted.1].1]); - let expected_duplicates = hashed_raw - .iter() - .rev() - .skip(1) - .map(|(_hash, i)| (*i, raw[single_hashed_raw_inserted.1].1)) - .collect::>(); - assert_eq!(expected_duplicates, duplicates); - } - } - } - - #[test] - fn batch_insert_non_duplicates_internal_simple() { - solana_logger::setup(); - // add 2 entries, make sure they are added in the buckets we expect - let random = 1; - let data_buckets = Vec::default(); - for v in 10..12u64 { - for len in 1..3 { - let raw = (0..len) - .map(|l| { - let k = Pubkey::from([l as u8; 32]); - (k, v + (l as u64)) - }) - .collect::>(); - let mut hashed = Bucket::index_entries(&raw, random); - let hashed_raw = hashed.clone(); + for try_to_reuse_disk_data in [false, true] { + // add the same duplicate key several times. + // make sure the resulting index and returned `duplicates` is correct. + let random = 1; + let data_buckets = Vec::default(); + let k = Pubkey::from([1u8; 32]); + for v in 10..12u64 { + for len in 1..4 { + let raw = (0..len).map(|l| (k, v + (l as u64))).collect::>(); + let mut hashed = Bucket::index_entries(&raw, random); + let hashed_raw = hashed.clone(); - let mut index = create_test_index(None); + let mut index = create_test_index(None); - let mut duplicates = Vec::default(); - let mut entries_created = 0; - assert!(Bucket::::batch_insert_non_duplicates_internal( - &mut index, - &Vec::default(), - &raw, - &mut hashed, - &mut entries_created, - &mut duplicates, - ) - .is_ok()); + let mut entries_created = 0; + let mut duplicates = Vec::default(); + assert!(Bucket::::batch_insert_non_duplicates_internal( + &mut index, + &Vec::default(), + &raw, + &mut hashed, + &mut entries_created, + &mut duplicates, + try_to_reuse_disk_data, + ) + .is_ok()); - assert_eq!(hashed.len(), 0); - (0..len).for_each(|i| { - let raw2 = hashed_raw[i]; - let elem = IndexEntryPlaceInBucket::new(raw2.0 % index.capacity()); + assert_eq!(duplicates.len(), len as usize - 1); + assert_eq!(hashed.len(), 0); + let single_hashed_raw_inserted = hashed_raw.last().unwrap(); + let elem = IndexEntryPlaceInBucket::new( + single_hashed_raw_inserted.0 % index.capacity(), + ); let (value, ref_count) = elem.read_value(&index, &data_buckets); assert_eq!(ref_count, 1); - assert_eq!(value, &[raw[hashed_raw[i].1].1]); - }); + assert_eq!(value, &[raw[single_hashed_raw_inserted.1].1]); + let expected_duplicates = hashed_raw + .iter() + .rev() + .skip(1) + .map(|(_hash, i)| (*i, raw[single_hashed_raw_inserted.1].1)) + .collect::>(); + assert_eq!(expected_duplicates, duplicates); + } } } } #[test] - fn batch_insert_non_duplicates_internal_same_ix_exceeds_max_search() { - solana_logger::setup(); - // add `len` entries with the same ix, make sure they are added in subsequent buckets. - // adjust `max_search`. If we try to add an entry that causes us to exceed `max_search`, then assert that the adding fails with an error and - // the colliding item remains in `entries` - let random = 1; - let data_buckets = Vec::default(); - for max_search in [2usize, 3] { + fn batch_insert_non_duplicates_internal_simple() { + for try_to_reuse_disk_data in [false, true] { + // add 2 entries, make sure they are added in the buckets we expect + let random = 1; + let data_buckets = Vec::default(); for v in 10..12u64 { - for len in 1..(max_search + 1) { + for len in 1..3 { let raw = (0..len) .map(|l| { let k = Pubkey::from([l as u8; 32]); @@ -971,56 +1345,107 @@ mod tests { }) .collect::>(); let mut hashed = Bucket::index_entries(&raw, random); - let common_ix = 2; // both are put at same ix - hashed.iter_mut().for_each(|v| { - v.0 = common_ix; - }); let hashed_raw = hashed.clone(); - let mut index = create_test_index(Some(max_search as u8)); + let mut index = create_test_index(None); let mut duplicates = Vec::default(); let mut entries_created = 0; - let result = Bucket::::batch_insert_non_duplicates_internal( + assert!(Bucket::::batch_insert_non_duplicates_internal( &mut index, &Vec::default(), &raw, &mut hashed, &mut entries_created, &mut duplicates, - ); + try_to_reuse_disk_data, + ) + .is_ok()); - assert_eq!( - hashed.len(), - if len > max_search { 1 } else { 0 }, - "len: {len}" - ); + assert_eq!(hashed.len(), 0); (0..len).for_each(|i| { - assert!(if len > max_search { - result.is_err() - } else { - result.is_ok() - }); let raw2 = hashed_raw[i]; - if i == 0 && len > max_search { - // max search was exceeded and the first entry was unable to be inserted, so it remained in `hashed` - assert_eq!(hashed[0], hashed_raw[0]); - } else { - // we insert in reverse order when ix values are equal, so we expect to find item[1] in item[1]'s expected ix and item[0] will be 1 search distance away from expected ix - let search_required = (len - i - 1) as u64; - let elem = IndexEntryPlaceInBucket::new( - (raw2.0 + search_required) % index.capacity(), - ); - let (value, ref_count) = elem.read_value(&index, &data_buckets); - assert_eq!(ref_count, 1); - assert_eq!(value, &[raw[hashed_raw[i].1].1]); - } + let elem = IndexEntryPlaceInBucket::new(raw2.0 % index.capacity()); + let (value, ref_count) = elem.read_value(&index, &data_buckets); + assert_eq!(ref_count, 1); + assert_eq!(value, &[raw[hashed_raw[i].1].1]); }); } } } } + #[test] + fn batch_insert_non_duplicates_internal_same_ix_exceeds_max_search() { + for try_to_reuse_disk_data in [false, true] { + // add `len` entries with the same ix, make sure they are added in subsequent buckets. + // adjust `max_search`. If we try to add an entry that causes us to exceed `max_search`, then assert that the adding fails with an error and + // the colliding item remains in `entries` + let random = 1; + let data_buckets = Vec::default(); + for max_search in [2usize, 3] { + for v in 10..12u64 { + for len in 1..(max_search + 1) { + let raw = (0..len) + .map(|l| { + // +1 because pubkey[0,0,...] matches default contents of index file + let k = Pubkey::from([(l + 1) as u8; 32]); + (k, v + (l as u64)) + }) + .collect::>(); + let mut hashed = Bucket::index_entries(&raw, random); + let common_ix = 2; // both are put at same ix + hashed.iter_mut().for_each(|v| { + v.0 = common_ix; + }); + let hashed_raw = hashed.clone(); + + let mut index = create_test_index(Some(max_search as u8)); + + let mut duplicates = Vec::default(); + let mut entries_created = 0; + let result = Bucket::::batch_insert_non_duplicates_internal( + &mut index, + &Vec::default(), + &raw, + &mut hashed, + &mut entries_created, + &mut duplicates, + try_to_reuse_disk_data, + ); + + assert_eq!( + hashed.len(), + if len > max_search { 1 } else { 0 }, + "len: {len}" + ); + (0..len).for_each(|i| { + assert!(if len > max_search { + result.is_err() + } else { + result.is_ok() + }); + let raw2 = hashed_raw[i]; + if i == 0 && len > max_search { + // max search was exceeded and the first entry was unable to be inserted, so it remained in `hashed` + assert_eq!(hashed[0], hashed_raw[0]); + } else { + // we insert in reverse order when ix values are equal, so we expect to find item[1] in item[1]'s expected ix and item[0] will be 1 search distance away from expected ix + let search_required = (len - i - 1) as u64; + let elem = IndexEntryPlaceInBucket::new( + (raw2.0 + search_required) % index.capacity(), + ); + let (value, ref_count) = elem.read_value(&index, &data_buckets); + assert_eq!(ref_count, 1); + assert_eq!(value, &[raw[hashed_raw[i].1].1]); + } + }); + } + } + } + } + } + #[test] fn test_occupy_if_matches() { let random = 1; diff --git a/bucket_map/src/restart.rs b/bucket_map/src/restart.rs index 41849f94d831e4..bc3336807e26a6 100644 --- a/bucket_map/src/restart.rs +++ b/bucket_map/src/restart.rs @@ -1,5 +1,4 @@ //! Persistent info of disk index files to allow files to be reused on restart. -#![allow(dead_code)] use { crate::bucket_map::{BucketMapConfig, MAX_SEARCH_DEFAULT}, bytemuck::{Pod, Zeroable}, From 40f536010f63f871b939091ca27713623599b1cf Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 25 Sep 2023 12:48:29 -0700 Subject: [PATCH 176/407] visit_duplicate_pubkeys_during_startup uses scan (#33397) --- accounts-db/src/accounts_db.rs | 72 ++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 34 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index be5509678db4e7..9aba27f90c0232 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -9428,41 +9428,45 @@ impl AccountsDb { let mut uncleaned_slots = HashSet::::default(); let mut removed_rent_paying = 0; let mut removed_top_off = 0; - pubkeys.iter().for_each(|pubkey| { - if let Some(entry) = self.accounts_index.get_account_read_entry(pubkey) { - let slot_list = entry.slot_list(); - if slot_list.len() < 2 { - return; - } - // Only the account data len in the highest slot should be used, and the rest are - // duplicates. So find the max slot to keep. - // Then sum up the remaining data len, which are the duplicates. - // All of the slots need to go in the 'uncleaned_slots' list. For clean to work properly, - // the slot where duplicate accounts are found in the index need to be in 'uncleaned_slots' list, too. - let max = slot_list.iter().map(|(slot, _)| slot).max().unwrap(); - slot_list.iter().for_each(|(slot, account_info)| { - uncleaned_slots.insert(*slot); - if slot == max { - // the info in 'max' is the most recent, current info for this pubkey - return; - } - let maybe_storage_entry = self - .storage - .get_account_storage_entry(*slot, account_info.store_id()); - let mut accessor = LoadedAccountAccessor::Stored( - maybe_storage_entry.map(|entry| (entry, account_info.offset())), - ); - let loaded_account = accessor.check_and_get_loaded_account(); - accounts_data_len_from_duplicates += loaded_account.data().len(); - if let Some(lamports_to_top_off) = - Self::stats_for_rent_payers(pubkey, &loaded_account, rent_collector) - { - removed_rent_paying += 1; - removed_top_off += lamports_to_top_off; + self.accounts_index.scan( + pubkeys.iter(), + |pubkey, slots_refs, _entry| { + if let Some((slot_list, _ref_count)) = slots_refs { + if slot_list.len() > 1 { + // Only the account data len in the highest slot should be used, and the rest are + // duplicates. So find the max slot to keep. + // Then sum up the remaining data len, which are the duplicates. + // All of the slots need to go in the 'uncleaned_slots' list. For clean to work properly, + // the slot where duplicate accounts are found in the index need to be in 'uncleaned_slots' list, too. + let max = slot_list.iter().map(|(slot, _)| slot).max().unwrap(); + slot_list.iter().for_each(|(slot, account_info)| { + uncleaned_slots.insert(*slot); + if slot == max { + // the info in 'max' is the most recent, current info for this pubkey + return; + } + let maybe_storage_entry = self + .storage + .get_account_storage_entry(*slot, account_info.store_id()); + let mut accessor = LoadedAccountAccessor::Stored( + maybe_storage_entry.map(|entry| (entry, account_info.offset())), + ); + let loaded_account = accessor.check_and_get_loaded_account(); + accounts_data_len_from_duplicates += loaded_account.data().len(); + if let Some(lamports_to_top_off) = + Self::stats_for_rent_payers(pubkey, &loaded_account, rent_collector) + { + removed_rent_paying += 1; + removed_top_off += lamports_to_top_off; + } + }); } - }); - } - }); + } + AccountsIndexScanResult::OnlyKeepInMemoryIfDirty + }, + None, + false, + ); timings .rent_paying .fetch_sub(removed_rent_paying, Ordering::Relaxed); From a41c15e47e8d2b5558ca17a0cd6376c055d7148a Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Mon, 25 Sep 2023 15:02:08 -0500 Subject: [PATCH 177/407] Separate vote cost (#33230) * Separate simple-vote transaction cost from non-vote transaction cost * remove is_simple_vote flag from transaction UsageCostDetails * update test and comment * set static usage cost for SimpleVote transaction --- core/src/banking_stage/consumer.rs | 6 +- core/src/banking_stage/qos_service.rs | 21 +++-- cost-model/src/cost_model.rs | 103 +++++++++++--------- cost-model/src/cost_tracker.rs | 114 +++++++++++++---------- cost-model/src/transaction_cost.rs | 129 +++++++++++++++++++++----- 5 files changed, 244 insertions(+), 129 deletions(-) diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 0104792ccd4d4b..ba915bc767efc8 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -737,7 +737,7 @@ mod tests { unprocessed_transaction_storage::ThreadType, }, crossbeam_channel::{unbounded, Receiver}, - solana_cost_model::cost_model::CostModel, + solana_cost_model::{cost_model::CostModel, transaction_cost::TransactionCost}, solana_entry::entry::{next_entry, next_versioned_entry}, solana_ledger::{ blockstore::{entries_to_test_shreds, Blockstore}, @@ -1264,7 +1264,9 @@ mod tests { }; let mut cost = CostModel::calculate_cost(&transactions[0], &bank.feature_set); - cost.bpf_execution_cost = actual_bpf_execution_cost; + if let TransactionCost::Transaction(ref mut usage_cost) = cost { + usage_cost.bpf_execution_cost = actual_bpf_execution_cost; + } block_cost + cost.sum() } else { diff --git a/core/src/banking_stage/qos_service.rs b/core/src/banking_stage/qos_service.rs index 61d97799a2a824..abac9c70f854f1 100644 --- a/core/src/banking_stage/qos_service.rs +++ b/core/src/banking_stage/qos_service.rs @@ -318,25 +318,25 @@ impl QosService { Ok(cost) => { saturating_add_assign!( batched_transaction_details.costs.batched_signature_cost, - cost.signature_cost + cost.signature_cost() ); saturating_add_assign!( batched_transaction_details.costs.batched_write_lock_cost, - cost.write_lock_cost + cost.write_lock_cost() ); saturating_add_assign!( batched_transaction_details.costs.batched_data_bytes_cost, - cost.data_bytes_cost + cost.data_bytes_cost() ); saturating_add_assign!( batched_transaction_details .costs .batched_builtins_execute_cost, - cost.builtins_execution_cost + cost.builtins_execution_cost() ); saturating_add_assign!( batched_transaction_details.costs.batched_bpf_execute_cost, - cost.bpf_execution_cost + cost.bpf_execution_cost() ); } Err(transaction_error) => match transaction_error { @@ -589,6 +589,7 @@ mod tests { use { super::*, itertools::Itertools, + solana_cost_model::transaction_cost::UsageCostDetails, solana_runtime::genesis_utils::{create_genesis_config, GenesisConfigInfo}, solana_sdk::{ hash::Hash, @@ -734,7 +735,7 @@ mod tests { let commited_status: Vec = qos_cost_results .iter() .map(|tx_cost| CommitTransactionDetails::Committed { - compute_units: tx_cost.as_ref().unwrap().bpf_execution_cost + compute_units: tx_cost.as_ref().unwrap().bpf_execution_cost() + execute_units_adjustment, }) .collect(); @@ -861,7 +862,7 @@ mod tests { CommitTransactionDetails::NotCommitted } else { CommitTransactionDetails::Committed { - compute_units: tx_cost.as_ref().unwrap().bpf_execution_cost + compute_units: tx_cost.as_ref().unwrap().bpf_execution_cost() + execute_units_adjustment, } } @@ -904,14 +905,14 @@ mod tests { let tx_cost_results: Vec<_> = (0..num_txs) .map(|n| { if n % 2 == 0 { - Ok(TransactionCost { + Ok(TransactionCost::Transaction(UsageCostDetails { signature_cost, write_lock_cost, data_bytes_cost, builtins_execution_cost, bpf_execution_cost, - ..TransactionCost::default() - }) + ..UsageCostDetails::default() + })) } else { Err(TransactionError::WouldExceedMaxBlockCostLimit) } diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index b3ffdad3e6a2a6..0e8d6954202351 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -6,7 +6,7 @@ //! use { - crate::{block_cost_limits::*, transaction_cost::TransactionCost}, + crate::{block_cost_limits::*, transaction_cost::*}, log::*, solana_program_runtime::compute_budget::{ ComputeBudget, DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, MAX_COMPUTE_UNIT_LIMIT, @@ -36,16 +36,21 @@ impl CostModel { transaction: &SanitizedTransaction, feature_set: &FeatureSet, ) -> TransactionCost { - let mut tx_cost = TransactionCost::new_with_default_capacity(); + if transaction.is_simple_vote_transaction() { + TransactionCost::SimpleVote { + writable_accounts: Self::get_writable_accounts(transaction), + } + } else { + let mut tx_cost = UsageCostDetails::new_with_default_capacity(); - tx_cost.signature_cost = Self::get_signature_cost(transaction); - Self::get_write_lock_cost(&mut tx_cost, transaction); - Self::get_transaction_cost(&mut tx_cost, transaction, feature_set); - tx_cost.account_data_size = Self::calculate_account_data_size(transaction); - tx_cost.is_simple_vote = transaction.is_simple_vote_transaction(); + tx_cost.signature_cost = Self::get_signature_cost(transaction); + Self::get_write_lock_cost(&mut tx_cost, transaction); + Self::get_transaction_cost(&mut tx_cost, transaction, feature_set); + tx_cost.account_data_size = Self::calculate_account_data_size(transaction); - debug!("transaction {:?} has cost {:?}", transaction, tx_cost); - tx_cost + debug!("transaction {:?} has cost {:?}", transaction, tx_cost); + TransactionCost::Transaction(tx_cost) + } } // Calculate cost of loaded accounts size in the same way heap cost is charged at @@ -68,24 +73,30 @@ impl CostModel { transaction.signatures().len() as u64 * SIGNATURE_COST } - fn get_write_lock_cost(tx_cost: &mut TransactionCost, transaction: &SanitizedTransaction) { + fn get_writable_accounts(transaction: &SanitizedTransaction) -> Vec { let message = transaction.message(); message .account_keys() .iter() .enumerate() - .for_each(|(i, k)| { - let is_writable = message.is_writable(i); - - if is_writable { - tx_cost.writable_accounts.push(*k); - tx_cost.write_lock_cost += WRITE_LOCK_UNITS; + .filter_map(|(i, k)| { + if message.is_writable(i) { + Some(*k) + } else { + None } - }); + }) + .collect() + } + + fn get_write_lock_cost(tx_cost: &mut UsageCostDetails, transaction: &SanitizedTransaction) { + tx_cost.writable_accounts = Self::get_writable_accounts(transaction); + tx_cost.write_lock_cost = + WRITE_LOCK_UNITS.saturating_mul(tx_cost.writable_accounts.len() as u64); } fn get_transaction_cost( - tx_cost: &mut TransactionCost, + tx_cost: &mut UsageCostDetails, transaction: &SanitizedTransaction, feature_set: &FeatureSet, ) { @@ -298,7 +309,7 @@ mod tests { .get(&system_program::id()) .unwrap(); - let mut tx_cost = TransactionCost::default(); + let mut tx_cost = UsageCostDetails::default(); CostModel::get_transaction_cost( &mut tx_cost, &simple_transaction, @@ -327,7 +338,7 @@ mod tests { let token_transaction = SanitizedTransaction::from_transaction_for_tests(tx); debug!("token_transaction {:?}", token_transaction); - let mut tx_cost = TransactionCost::default(); + let mut tx_cost = UsageCostDetails::default(); CostModel::get_transaction_cost( &mut tx_cost, &token_transaction, @@ -364,7 +375,7 @@ mod tests { ); let token_transaction = SanitizedTransaction::from_transaction_for_tests(tx); - let mut tx_cost = TransactionCost::default(); + let mut tx_cost = UsageCostDetails::default(); CostModel::get_transaction_cost( &mut tx_cost, &token_transaction, @@ -414,7 +425,7 @@ mod tests { ); let token_transaction = SanitizedTransaction::from_transaction_for_tests(tx); - let mut tx_cost = TransactionCost::default(); + let mut tx_cost = UsageCostDetails::default(); CostModel::get_transaction_cost( &mut tx_cost, &token_transaction, @@ -446,7 +457,7 @@ mod tests { .unwrap(); let expected_cost = program_cost * 2; - let mut tx_cost = TransactionCost::default(); + let mut tx_cost = UsageCostDetails::default(); CostModel::get_transaction_cost(&mut tx_cost, &tx, &FeatureSet::all_enabled()); assert_eq!(expected_cost, tx_cost.builtins_execution_cost); assert_eq!(0, tx_cost.bpf_execution_cost); @@ -478,7 +489,7 @@ mod tests { debug!("many random transaction {:?}", tx); let expected_cost = DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64 * 2; - let mut tx_cost = TransactionCost::default(); + let mut tx_cost = UsageCostDetails::default(); CostModel::get_transaction_cost(&mut tx_cost, &tx, &FeatureSet::all_enabled()); assert_eq!(0, tx_cost.builtins_execution_cost); assert_eq!(expected_cost, tx_cost.bpf_execution_cost); @@ -509,11 +520,11 @@ mod tests { ); let tx_cost = CostModel::calculate_cost(&tx, &FeatureSet::all_enabled()); - assert_eq!(2 + 2, tx_cost.writable_accounts.len()); - assert_eq!(signer1.pubkey(), tx_cost.writable_accounts[0]); - assert_eq!(signer2.pubkey(), tx_cost.writable_accounts[1]); - assert_eq!(key1, tx_cost.writable_accounts[2]); - assert_eq!(key2, tx_cost.writable_accounts[3]); + assert_eq!(2 + 2, tx_cost.writable_accounts().len()); + assert_eq!(signer1.pubkey(), tx_cost.writable_accounts()[0]); + assert_eq!(signer2.pubkey(), tx_cost.writable_accounts()[1]); + assert_eq!(key1, tx_cost.writable_accounts()[2]); + assert_eq!(key2, tx_cost.writable_accounts()[3]); } #[test] @@ -539,12 +550,12 @@ mod tests { * DEFAULT_PAGE_COST; let tx_cost = CostModel::calculate_cost(&tx, &FeatureSet::all_enabled()); - assert_eq!(expected_account_cost, tx_cost.write_lock_cost); - assert_eq!(*expected_execution_cost, tx_cost.builtins_execution_cost); - assert_eq!(2, tx_cost.writable_accounts.len()); + assert_eq!(expected_account_cost, tx_cost.write_lock_cost()); + assert_eq!(*expected_execution_cost, tx_cost.builtins_execution_cost()); + assert_eq!(2, tx_cost.writable_accounts().len()); assert_eq!( expected_loaded_accounts_data_size_cost, - tx_cost.loaded_accounts_data_size_cost + tx_cost.loaded_accounts_data_size_cost() ); } @@ -568,12 +579,12 @@ mod tests { let expected_loaded_accounts_data_size_cost = 0; let tx_cost = CostModel::calculate_cost(&tx, &feature_set); - assert_eq!(expected_account_cost, tx_cost.write_lock_cost); - assert_eq!(*expected_execution_cost, tx_cost.builtins_execution_cost); - assert_eq!(2, tx_cost.writable_accounts.len()); + assert_eq!(expected_account_cost, tx_cost.write_lock_cost()); + assert_eq!(*expected_execution_cost, tx_cost.builtins_execution_cost()); + assert_eq!(2, tx_cost.writable_accounts().len()); assert_eq!( expected_loaded_accounts_data_size_cost, - tx_cost.loaded_accounts_data_size_cost + tx_cost.loaded_accounts_data_size_cost() ); } @@ -607,12 +618,12 @@ mod tests { let expected_loaded_accounts_data_size_cost = (data_limit as u64) / (32 * 1024) * 8; let tx_cost = CostModel::calculate_cost(&tx, &feature_set); - assert_eq!(expected_account_cost, tx_cost.write_lock_cost); - assert_eq!(expected_execution_cost, tx_cost.builtins_execution_cost); - assert_eq!(2, tx_cost.writable_accounts.len()); + assert_eq!(expected_account_cost, tx_cost.write_lock_cost()); + assert_eq!(expected_execution_cost, tx_cost.builtins_execution_cost()); + assert_eq!(2, tx_cost.writable_accounts().len()); assert_eq!( expected_loaded_accounts_data_size_cost, - tx_cost.loaded_accounts_data_size_cost + tx_cost.loaded_accounts_data_size_cost() ); } @@ -640,12 +651,12 @@ mod tests { let expected_loaded_accounts_data_size_cost = 0; let tx_cost = CostModel::calculate_cost(&tx, &feature_set); - assert_eq!(expected_account_cost, tx_cost.write_lock_cost); - assert_eq!(expected_execution_cost, tx_cost.builtins_execution_cost); - assert_eq!(2, tx_cost.writable_accounts.len()); + assert_eq!(expected_account_cost, tx_cost.write_lock_cost()); + assert_eq!(expected_execution_cost, tx_cost.builtins_execution_cost()); + assert_eq!(2, tx_cost.writable_accounts().len()); assert_eq!( expected_loaded_accounts_data_size_cost, - tx_cost.loaded_accounts_data_size_cost + tx_cost.loaded_accounts_data_size_cost() ); } @@ -705,7 +716,7 @@ mod tests { .unwrap(); let expected_bpf_cost = DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT; - let mut tx_cost = TransactionCost::default(); + let mut tx_cost = UsageCostDetails::default(); CostModel::get_transaction_cost(&mut tx_cost, &transaction, &FeatureSet::all_enabled()); assert_eq!(expected_builtin_cost, tx_cost.builtins_execution_cost); diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index 30df841abf9601..e4f1b917d74b26 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -119,7 +119,7 @@ impl CostTracker { estimated_tx_cost: &TransactionCost, actual_execution_units: u64, ) { - let estimated_execution_units = estimated_tx_cost.bpf_execution_cost; + let estimated_execution_units = estimated_tx_cost.bpf_execution_cost(); match actual_execution_units.cmp(&estimated_execution_units) { Ordering::Equal => (), Ordering::Greater => { @@ -180,18 +180,17 @@ impl CostTracker { fn would_fit(&self, tx_cost: &TransactionCost) -> Result<(), CostTrackerError> { let cost: u64 = tx_cost.sum(); - let vote_cost = if tx_cost.is_simple_vote { cost } else { 0 }; - // check against the total package cost - if self.block_cost.saturating_add(cost) > self.block_cost_limit { + if tx_cost.is_simple_vote() { + // if vote transaction, check if it exceeds vote_transaction_limit + if self.vote_cost.saturating_add(cost) > self.vote_cost_limit { + return Err(CostTrackerError::WouldExceedVoteMaxLimit); + } + } else if self.block_cost.saturating_add(cost) > self.block_cost_limit { + // check against the total package cost return Err(CostTrackerError::WouldExceedBlockMaxLimit); } - // if vote transaction, check if it exceeds vote_transaction_limit - if self.vote_cost.saturating_add(vote_cost) > self.vote_cost_limit { - return Err(CostTrackerError::WouldExceedVoteMaxLimit); - } - // check if the transaction itself is more costly than the account_cost_limit if cost > self.account_cost_limit { return Err(CostTrackerError::WouldExceedAccountMaxLimit); @@ -201,7 +200,7 @@ impl CostTracker { // size. This way, transactions are not unnecessarily retried. let account_data_size = self .account_data_size - .saturating_add(tx_cost.account_data_size); + .saturating_add(tx_cost.account_data_size()); if let Some(account_data_size_limit) = self.account_data_size_limit { if account_data_size > account_data_size_limit { return Err(CostTrackerError::WouldExceedAccountDataTotalLimit); @@ -213,7 +212,7 @@ impl CostTracker { } // check each account against account_cost_limit, - for account_key in tx_cost.writable_accounts.iter() { + for account_key in tx_cost.writable_accounts().iter() { match self.cost_by_writable_accounts.get(account_key) { Some(chained_cost) => { if chained_cost.saturating_add(cost) > self.account_cost_limit { @@ -231,7 +230,7 @@ impl CostTracker { fn add_transaction_cost(&mut self, tx_cost: &TransactionCost) { self.add_transaction_execution_cost(tx_cost, tx_cost.sum()); - saturating_add_assign!(self.account_data_size, tx_cost.account_data_size); + saturating_add_assign!(self.account_data_size, tx_cost.account_data_size()); saturating_add_assign!(self.transaction_count, 1); } @@ -240,13 +239,13 @@ impl CostTracker { self.sub_transaction_execution_cost(tx_cost, cost); self.account_data_size = self .account_data_size - .saturating_sub(tx_cost.account_data_size); + .saturating_sub(tx_cost.account_data_size()); self.transaction_count = self.transaction_count.saturating_sub(1); } /// Apply additional actual execution units to cost_tracker fn add_transaction_execution_cost(&mut self, tx_cost: &TransactionCost, adjustment: u64) { - for account_key in tx_cost.writable_accounts.iter() { + for account_key in tx_cost.writable_accounts().iter() { let account_cost = self .cost_by_writable_accounts .entry(*account_key) @@ -254,14 +253,14 @@ impl CostTracker { *account_cost = account_cost.saturating_add(adjustment); } self.block_cost = self.block_cost.saturating_add(adjustment); - if tx_cost.is_simple_vote { + if tx_cost.is_simple_vote() { self.vote_cost = self.vote_cost.saturating_add(adjustment); } } /// Subtract extra execution units from cost_tracker fn sub_transaction_execution_cost(&mut self, tx_cost: &TransactionCost, adjustment: u64) { - for account_key in tx_cost.writable_accounts.iter() { + for account_key in tx_cost.writable_accounts().iter() { let account_cost = self .cost_by_writable_accounts .entry(*account_key) @@ -269,7 +268,7 @@ impl CostTracker { *account_cost = account_cost.saturating_sub(adjustment); } self.block_cost = self.block_cost.saturating_sub(adjustment); - if tx_cost.is_simple_vote { + if tx_cost.is_simple_vote() { self.vote_cost = self.vote_cost.saturating_sub(adjustment); } } @@ -287,6 +286,7 @@ impl CostTracker { mod tests { use { super::*, + crate::transaction_cost::*, solana_sdk::{ hash::Hash, signature::{Keypair, Signer}, @@ -331,11 +331,11 @@ mod tests { let simple_transaction = SanitizedTransaction::from_transaction_for_tests( system_transaction::transfer(mint_keypair, &keypair.pubkey(), 2, *start_hash), ); - let mut tx_cost = TransactionCost::new_with_capacity(1); + let mut tx_cost = UsageCostDetails::new_with_capacity(1); tx_cost.bpf_execution_cost = 5; tx_cost.writable_accounts.push(mint_keypair.pubkey()); - (simple_transaction, tx_cost) + (simple_transaction, TransactionCost::Transaction(tx_cost)) } fn build_simple_vote_transaction( @@ -359,12 +359,12 @@ mod tests { SimpleAddressLoader::Disabled, ) .unwrap(); - let mut tx_cost = TransactionCost::new_with_capacity(1); - tx_cost.builtins_execution_cost = 10; - tx_cost.writable_accounts.push(mint_keypair.pubkey()); - tx_cost.is_simple_vote = true; - (vote_transaction, tx_cost) + let writable_accounts = vec![mint_keypair.pubkey()]; + ( + vote_transaction, + TransactionCost::SimpleVote { writable_accounts }, + ) } #[test] @@ -413,7 +413,11 @@ mod tests { fn test_cost_tracker_add_data() { let (mint_keypair, start_hash) = test_setup(); let (_tx, mut tx_cost) = build_simple_transaction(&mint_keypair, &start_hash); - tx_cost.account_data_size = 1; + if let TransactionCost::Transaction(ref mut usage_cost) = tx_cost { + usage_cost.account_data_size = 1; + } else { + unreachable!(); + } let cost = tx_cost.sum(); // build testee to have capacity for one simple transaction @@ -568,8 +572,16 @@ mod tests { let second_account = Keypair::new(); let (_tx1, mut tx_cost1) = build_simple_transaction(&mint_keypair, &start_hash); let (_tx2, mut tx_cost2) = build_simple_transaction(&second_account, &start_hash); - tx_cost1.account_data_size = MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA; - tx_cost2.account_data_size = MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA + 1; + if let TransactionCost::Transaction(ref mut usage_cost) = tx_cost1 { + usage_cost.account_data_size = MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA; + } else { + unreachable!(); + } + if let TransactionCost::Transaction(ref mut usage_cost) = tx_cost2 { + usage_cost.account_data_size = MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA + 1; + } else { + unreachable!(); + } let cost1 = tx_cost1.sum(); let cost2 = tx_cost2.sum(); @@ -596,8 +608,16 @@ mod tests { let (_tx1, mut tx_cost1) = build_simple_transaction(&mint_keypair, &start_hash); let (_tx2, mut tx_cost2) = build_simple_transaction(&second_account, &start_hash); let remaining_account_data_size = 1234; - tx_cost1.account_data_size = remaining_account_data_size; - tx_cost2.account_data_size = remaining_account_data_size + 1; + if let TransactionCost::Transaction(ref mut usage_cost) = tx_cost1 { + usage_cost.account_data_size = remaining_account_data_size; + } else { + unreachable!(); + } + if let TransactionCost::Transaction(ref mut usage_cost) = tx_cost2 { + usage_cost.account_data_size = remaining_account_data_size + 1; + } else { + unreachable!(); + } let cost1 = tx_cost1.sum(); let cost2 = tx_cost2.sum(); @@ -661,11 +681,11 @@ mod tests { // | acct3 | $cost | // and block_cost = $cost { - let tx_cost = TransactionCost { + let tx_cost = TransactionCost::Transaction(UsageCostDetails { writable_accounts: vec![acct1, acct2, acct3], bpf_execution_cost: cost, - ..TransactionCost::default() - }; + ..UsageCostDetails::default() + }); assert!(testee.try_add(&tx_cost).is_ok()); let (_costliest_account, costliest_account_cost) = testee.find_costliest_account(); assert_eq!(cost, testee.block_cost); @@ -679,11 +699,11 @@ mod tests { // | acct3 | $cost | // and block_cost = $cost * 2 { - let tx_cost = TransactionCost { + let tx_cost = TransactionCost::Transaction(UsageCostDetails { writable_accounts: vec![acct2], bpf_execution_cost: cost, - ..TransactionCost::default() - }; + ..UsageCostDetails::default() + }); assert!(testee.try_add(&tx_cost).is_ok()); let (costliest_account, costliest_account_cost) = testee.find_costliest_account(); assert_eq!(cost * 2, testee.block_cost); @@ -699,11 +719,11 @@ mod tests { // | acct3 | $cost | // and block_cost = $cost * 2 { - let tx_cost = TransactionCost { + let tx_cost = TransactionCost::Transaction(UsageCostDetails { writable_accounts: vec![acct1, acct2], bpf_execution_cost: cost, - ..TransactionCost::default() - }; + ..UsageCostDetails::default() + }); assert!(testee.try_add(&tx_cost).is_err()); let (costliest_account, costliest_account_cost) = testee.find_costliest_account(); assert_eq!(cost * 2, testee.block_cost); @@ -723,11 +743,11 @@ mod tests { let block_max = account_max * 3; // for three accts let mut testee = CostTracker::new(account_max, block_max, block_max, None); - let tx_cost = TransactionCost { + let tx_cost = TransactionCost::Transaction(UsageCostDetails { writable_accounts: vec![acct1, acct2, acct3], bpf_execution_cost: cost, - ..TransactionCost::default() - }; + ..UsageCostDetails::default() + }); let mut expected_block_cost = tx_cost.sum(); let expected_tx_count = 1; assert!(testee.try_add(&tx_cost).is_ok()); @@ -810,11 +830,11 @@ mod tests { let acct3 = Pubkey::new_unique(); let cost = 100; - let tx_cost = TransactionCost { + let tx_cost = TransactionCost::Transaction(UsageCostDetails { writable_accounts: vec![acct1, acct2, acct3], bpf_execution_cost: cost, - ..TransactionCost::default() - }; + ..UsageCostDetails::default() + }); let mut cost_tracker = CostTracker::default(); @@ -857,11 +877,11 @@ mod tests { let mut cost_tracker = CostTracker::default(); let cost = 100u64; - let tx_cost = TransactionCost { + let tx_cost = TransactionCost::Transaction(UsageCostDetails { writable_accounts: vec![Pubkey::new_unique()], bpf_execution_cost: cost, - ..TransactionCost::default() - }; + ..UsageCostDetails::default() + }); cost_tracker.add_transaction_cost(&tx_cost); // assert cost_tracker is reverted to default diff --git a/cost-model/src/transaction_cost.rs b/cost-model/src/transaction_cost.rs index cc0d987ec00f52..e765eee3bc7038 100644 --- a/cost-model/src/transaction_cost.rs +++ b/cost-model/src/transaction_cost.rs @@ -1,10 +1,99 @@ -use solana_sdk::pubkey::Pubkey; +use {crate::block_cost_limits, solana_sdk::pubkey::Pubkey}; + +/// TransactionCost is used to represent resources required to process +/// a transaction, denominated in CU (eg. Compute Units). +/// Resources required to process a regular transaction often include +/// an array of variables, such as execution cost, loaded bytes, write +/// lock and read lock etc. +/// SimpleVote has a simpler and pre-determined format: it has 1 or 2 signatures, +/// 2 write locks, a vote instruction and less than 32k (page size) accounts to load. +/// It's cost therefore can be static #33269. +const SIMPLE_VOTE_USAGE_COST: u64 = 3428; + +#[derive(Debug)] +pub enum TransactionCost { + SimpleVote { writable_accounts: Vec }, + Transaction(UsageCostDetails), +} + +impl TransactionCost { + pub fn sum(&self) -> u64 { + match self { + Self::SimpleVote { .. } => SIMPLE_VOTE_USAGE_COST, + Self::Transaction(usage_cost) => usage_cost.sum(), + } + } + + pub fn bpf_execution_cost(&self) -> u64 { + match self { + Self::SimpleVote { .. } => 0, + Self::Transaction(usage_cost) => usage_cost.bpf_execution_cost, + } + } + + pub fn is_simple_vote(&self) -> bool { + match self { + Self::SimpleVote { .. } => true, + Self::Transaction(_) => false, + } + } + + pub fn data_bytes_cost(&self) -> u64 { + match self { + Self::SimpleVote { .. } => 0, + Self::Transaction(usage_cost) => usage_cost.data_bytes_cost, + } + } + + pub fn account_data_size(&self) -> u64 { + match self { + Self::SimpleVote { .. } => 0, + Self::Transaction(usage_cost) => usage_cost.account_data_size, + } + } + + pub fn loaded_accounts_data_size_cost(&self) -> u64 { + match self { + Self::SimpleVote { .. } => 8, // simple-vote loads less than 32K account data, + // the cost round up to be one page (32K) cost: 8CU + Self::Transaction(usage_cost) => usage_cost.loaded_accounts_data_size_cost, + } + } + + pub fn signature_cost(&self) -> u64 { + match self { + Self::SimpleVote { .. } => block_cost_limits::SIGNATURE_COST, + Self::Transaction(usage_cost) => usage_cost.signature_cost, + } + } + + pub fn write_lock_cost(&self) -> u64 { + match self { + Self::SimpleVote { .. } => block_cost_limits::WRITE_LOCK_UNITS.saturating_mul(2), + Self::Transaction(usage_cost) => usage_cost.write_lock_cost, + } + } + + pub fn builtins_execution_cost(&self) -> u64 { + match self { + Self::SimpleVote { .. } => solana_vote_program::vote_processor::DEFAULT_COMPUTE_UNITS, + Self::Transaction(usage_cost) => usage_cost.builtins_execution_cost, + } + } + + pub fn writable_accounts(&self) -> &[Pubkey] { + match self { + Self::SimpleVote { writable_accounts } => writable_accounts, + Self::Transaction(usage_cost) => &usage_cost.writable_accounts, + } + } +} const MAX_WRITABLE_ACCOUNTS: usize = 256; // costs are stored in number of 'compute unit's #[derive(Debug)] -pub struct TransactionCost { +pub struct UsageCostDetails { pub writable_accounts: Vec, pub signature_cost: u64, pub write_lock_cost: u64, @@ -13,10 +102,9 @@ pub struct TransactionCost { pub bpf_execution_cost: u64, pub loaded_accounts_data_size_cost: u64, pub account_data_size: u64, - pub is_simple_vote: bool, } -impl Default for TransactionCost { +impl Default for UsageCostDetails { fn default() -> Self { Self { writable_accounts: Vec::with_capacity(MAX_WRITABLE_ACCOUNTS), @@ -27,13 +115,12 @@ impl Default for TransactionCost { bpf_execution_cost: 0u64, loaded_accounts_data_size_cost: 0u64, account_data_size: 0u64, - is_simple_vote: false, } } } #[cfg(test)] -impl PartialEq for TransactionCost { +impl PartialEq for UsageCostDetails { fn eq(&self, other: &Self) -> bool { fn to_hash_set(v: &[Pubkey]) -> std::collections::HashSet<&Pubkey> { v.iter().collect() @@ -46,15 +133,15 @@ impl PartialEq for TransactionCost { && self.bpf_execution_cost == other.bpf_execution_cost && self.loaded_accounts_data_size_cost == other.loaded_accounts_data_size_cost && self.account_data_size == other.account_data_size - && self.is_simple_vote == other.is_simple_vote && to_hash_set(&self.writable_accounts) == to_hash_set(&other.writable_accounts) } } #[cfg(test)] -impl Eq for TransactionCost {} +impl Eq for UsageCostDetails {} -impl TransactionCost { +impl UsageCostDetails { + #[cfg(test)] pub fn new_with_capacity(capacity: usize) -> Self { Self { writable_accounts: Vec::with_capacity(capacity), @@ -67,25 +154,19 @@ impl TransactionCost { } pub fn sum(&self) -> u64 { - if self.is_simple_vote { - self.signature_cost - .saturating_add(self.write_lock_cost) - .saturating_add(self.data_bytes_cost) - .saturating_add(self.builtins_execution_cost) - } else { - self.signature_cost - .saturating_add(self.write_lock_cost) - .saturating_add(self.data_bytes_cost) - .saturating_add(self.builtins_execution_cost) - .saturating_add(self.bpf_execution_cost) - .saturating_add(self.loaded_accounts_data_size_cost) - } + self.signature_cost + .saturating_add(self.write_lock_cost) + .saturating_add(self.data_bytes_cost) + .saturating_add(self.builtins_execution_cost) + .saturating_add(self.bpf_execution_cost) + .saturating_add(self.loaded_accounts_data_size_cost) } } #[cfg(test)] mod tests { use { + super::*, crate::cost_model::CostModel, solana_sdk::{ feature_set::FeatureSet, @@ -131,8 +212,8 @@ mod tests { ) .unwrap(); - // expected vote tx cost: 2 write locks, 2 sig, 1 vite ix, and 11 CU tx data cost - let expected_vote_cost = 4151; + // expected vote tx cost: 2 write locks, 1 sig, 1 vote ix, 8cu of loaded accounts size, + let expected_vote_cost = SIMPLE_VOTE_USAGE_COST; // expected non-vote tx cost would include default loaded accounts size cost (16384) additionally let expected_none_vote_cost = 20535; From b1316739f25e595e3c2dcfef370a1d2e56ad5525 Mon Sep 17 00:00:00 2001 From: Max Kaplan Date: Mon, 25 Sep 2023 16:26:10 -0400 Subject: [PATCH 178/407] docs: updating systemd command (#33393) --- docs/src/running-validator/validator-start.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/running-validator/validator-start.md b/docs/src/running-validator/validator-start.md index f2241d83ba0d3e..d30533abd54b87 100644 --- a/docs/src/running-validator/validator-start.md +++ b/docs/src/running-validator/validator-start.md @@ -374,7 +374,7 @@ the validator as expected. Don't forget to mark it executable with `chmod +x /ho Start the service with: ```bash -$ sudo systemctl enable --now sol +sudo systemctl enable --now sol ``` ### Logging From 642d76b8cd1676100685d8f9393930fb05ce1cc2 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 25 Sep 2023 13:26:27 -0700 Subject: [PATCH 179/407] update comments (#33399) --- bucket_map/src/restart.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bucket_map/src/restart.rs b/bucket_map/src/restart.rs index bc3336807e26a6..aae4d455fd8fe2 100644 --- a/bucket_map/src/restart.rs +++ b/bucket_map/src/restart.rs @@ -27,7 +27,7 @@ pub(crate) struct Header { /// u8 representing how many entries to search for during collisions. /// If this is different, then the contents of the index file's contents are likely not as helpful. max_search: u8, - /// padding to get header to u128 aligned + /// padding to make size of Header be an even multiple of u128 _dummy: [u8; 15], } @@ -44,7 +44,7 @@ pub(crate) struct OneIndexBucket { file_name: u128, /// each bucket uses a random value to hash with pubkeys. Without this, hashing would be inconsistent between restarts. random: u64, - /// padding to make u128 aligned + /// padding to make size of OneIndexBucket be an even multiple of u128 _dummy: u64, } From 23ad476ffb55b400b3fa430f92477dd65f39b616 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 25 Sep 2023 17:18:38 -0400 Subject: [PATCH 180/407] Removes unused `Versioned` trait (#33360) --- accounts-db/src/accounts_db.rs | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 9aba27f90c0232..6974e214cbf660 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -769,22 +769,6 @@ type ReclaimResult = (AccountSlots, SlotOffsets); type PubkeysRemovedFromAccountsIndex = HashSet; type ShrinkCandidates = HashSet; -trait Versioned { - fn version(&self) -> u64; -} - -impl Versioned for (u64, Hash) { - fn version(&self) -> u64 { - self.0 - } -} - -impl Versioned for (u64, AccountInfo) { - fn version(&self) -> u64 { - self.0 - } -} - // Some hints for applicability of additional sanity checks for the do_load fast-path; // Slower fallback code path will be taken if the fast path has failed over the retry // threshold, regardless of these hints. Also, load cannot fail not-deterministically From d25d53e979da445f9e6cb039396a419c6942634d Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Mon, 25 Sep 2023 16:35:40 -0500 Subject: [PATCH 181/407] Fix bug of same-epoch stake deactivation after stake redelegation (#32606) * fix stake deactivation in the same epoch after redelegation bug add tests refactor common code into fn avoid early return add feature gate for the new stake redelegate behavior move stake tests out of cli add stake-program-test crate reimplemnt stake test with program-test remove stake-program-test crate reviews add setup.rs remove clippy reveiws * reviews * review comments --------- Co-authored-by: HaoranYi --- Cargo.lock | 1 + program-test/Cargo.toml | 1 + program-test/tests/setup.rs | 89 +++++++++++ program-test/tests/stake.rs | 193 ++++++++++++++++++++++++ program-test/tests/warp.rs | 94 ++---------- programs/sbf/Cargo.lock | 36 +++++ programs/stake/src/stake_instruction.rs | 7 +- programs/stake/src/stake_state.rs | 56 ++++++- sdk/program/src/stake/instruction.rs | 1 - sdk/program/src/stake/state.rs | 39 +++++ 10 files changed, 425 insertions(+), 92 deletions(-) create mode 100644 program-test/tests/setup.rs create mode 100644 program-test/tests/stake.rs diff --git a/Cargo.lock b/Cargo.lock index 7218a80cb1c4ba..11b687ef77c962 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6587,6 +6587,7 @@ dependencies = [ "solana-sdk", "solana-stake-program", "solana-vote-program", + "test-case", "thiserror", "tokio", ] diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index 07e05e88fb0ab1..87a9c88487a30d 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -27,6 +27,7 @@ solana-program-runtime = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-vote-program = { workspace = true } +test-case = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } diff --git a/program-test/tests/setup.rs b/program-test/tests/setup.rs new file mode 100644 index 00000000000000..60c42372b6e692 --- /dev/null +++ b/program-test/tests/setup.rs @@ -0,0 +1,89 @@ +use { + solana_program_test::ProgramTestContext, + solana_sdk::{ + pubkey::Pubkey, + rent::Rent, + signature::{Keypair, Signer}, + stake::{ + instruction as stake_instruction, + state::{Authorized, Lockup}, + }, + system_instruction, system_program, + transaction::Transaction, + }, + solana_vote_program::{ + vote_instruction, + vote_state::{self, VoteInit, VoteState}, + }, +}; + +pub async fn setup_stake( + context: &mut ProgramTestContext, + user: &Keypair, + vote_address: &Pubkey, + stake_lamports: u64, +) -> Pubkey { + let stake_keypair = Keypair::new(); + let transaction = Transaction::new_signed_with_payer( + &stake_instruction::create_account_and_delegate_stake( + &context.payer.pubkey(), + &stake_keypair.pubkey(), + vote_address, + &Authorized::auto(&user.pubkey()), + &Lockup::default(), + stake_lamports, + ), + Some(&context.payer.pubkey()), + &vec![&context.payer, &stake_keypair, user], + context.last_blockhash, + ); + context + .banks_client + .process_transaction(transaction) + .await + .unwrap(); + stake_keypair.pubkey() +} + +pub async fn setup_vote(context: &mut ProgramTestContext) -> Pubkey { + let mut instructions = vec![]; + let validator_keypair = Keypair::new(); + instructions.push(system_instruction::create_account( + &context.payer.pubkey(), + &validator_keypair.pubkey(), + Rent::default().minimum_balance(0), + 0, + &system_program::id(), + )); + let vote_lamports = Rent::default().minimum_balance(VoteState::size_of()); + let vote_keypair = Keypair::new(); + let user_keypair = Keypair::new(); + instructions.append(&mut vote_instruction::create_account_with_config( + &context.payer.pubkey(), + &vote_keypair.pubkey(), + &VoteInit { + node_pubkey: validator_keypair.pubkey(), + authorized_voter: user_keypair.pubkey(), + ..VoteInit::default() + }, + vote_lamports, + vote_instruction::CreateVoteAccountConfig { + space: vote_state::VoteStateVersions::vote_state_size_of(true) as u64, + ..vote_instruction::CreateVoteAccountConfig::default() + }, + )); + + let transaction = Transaction::new_signed_with_payer( + &instructions, + Some(&context.payer.pubkey()), + &vec![&context.payer, &validator_keypair, &vote_keypair], + context.last_blockhash, + ); + context + .banks_client + .process_transaction(transaction) + .await + .unwrap(); + + vote_keypair.pubkey() +} diff --git a/program-test/tests/stake.rs b/program-test/tests/stake.rs new file mode 100644 index 00000000000000..1ad7a756b32631 --- /dev/null +++ b/program-test/tests/stake.rs @@ -0,0 +1,193 @@ +#![allow(clippy::arithmetic_side_effects)] + +mod setup; + +use { + setup::{setup_stake, setup_vote}, + solana_program_test::ProgramTest, + solana_sdk::{ + instruction::InstructionError, + signature::{Keypair, Signer}, + stake::{instruction as stake_instruction, instruction::StakeError}, + transaction::{Transaction, TransactionError}, + }, + test_case::test_case, +}; + +#[derive(PartialEq)] +enum PendingStakeActivationTestFlag { + MergeActive, + MergeInactive, + NoMerge, +} + +#[test_case(PendingStakeActivationTestFlag::NoMerge; "test that redelegate stake then deactivate it then withdraw from it is not permitted")] +#[test_case(PendingStakeActivationTestFlag::MergeActive; "test that redelegate stake then merge it with another active stake then deactivate it then withdraw from it is not permitted")] +#[test_case(PendingStakeActivationTestFlag::MergeInactive; "test that redelegate stake then merge it with another inactive stake then deactivate it then withdraw from it is not permitted")] +#[tokio::test] +async fn test_stake_redelegation_pending_activation(merge_flag: PendingStakeActivationTestFlag) { + let program_test = ProgramTest::default(); + let mut context = program_test.start_with_context().await; + + // 1. create first vote accounts + context.warp_to_slot(100).unwrap(); + let vote_address = setup_vote(&mut context).await; + + // 1.1 advance to normal epoch + let first_normal_slot = context.genesis_config().epoch_schedule.first_normal_slot; + let slots_per_epoch = context.genesis_config().epoch_schedule.slots_per_epoch; + let mut current_slot = first_normal_slot + slots_per_epoch; + context.warp_to_slot(current_slot).unwrap(); + context.warp_forward_force_reward_interval_end().unwrap(); + + // 2. create first stake account and delegate to first vote_address + let stake_lamports = 50_000_000_000; + let user_keypair = Keypair::new(); + let stake_address = + setup_stake(&mut context, &user_keypair, &vote_address, stake_lamports).await; + + // 2.1 advance to new epoch so that the stake is activated. + current_slot += slots_per_epoch; + context.warp_to_slot(current_slot).unwrap(); + context.warp_forward_force_reward_interval_end().unwrap(); + + // 2.2 stake is now activated and can't withdrawal directly + let transaction = Transaction::new_signed_with_payer( + &[stake_instruction::withdraw( + &stake_address, + &user_keypair.pubkey(), + &solana_sdk::pubkey::new_rand(), + 1, + None, + )], + Some(&context.payer.pubkey()), + &vec![&context.payer, &user_keypair], + context.last_blockhash, + ); + let r = context.banks_client.process_transaction(transaction).await; + assert_eq!( + r.unwrap_err().unwrap(), + TransactionError::InstructionError(0, InstructionError::InsufficientFunds) + ); + + // 3. create 2nd vote account + let vote_address2 = setup_vote(&mut context).await; + + // 3.1 relegate stake account to 2nd vote account, which creates 2nd stake account + let stake_keypair2 = Keypair::new(); + let stake_address2 = stake_keypair2.pubkey(); + let transaction = Transaction::new_signed_with_payer( + &stake_instruction::redelegate( + &stake_address, + &user_keypair.pubkey(), + &vote_address2, + &stake_address2, + ), + Some(&context.payer.pubkey()), + &vec![&context.payer, &user_keypair, &stake_keypair2], + context.last_blockhash, + ); + context + .banks_client + .process_transaction(transaction) + .await + .unwrap(); + + if merge_flag != PendingStakeActivationTestFlag::NoMerge { + // 3.2 create 3rd to-merge stake account + let stake_address3 = + setup_stake(&mut context, &user_keypair, &vote_address2, stake_lamports).await; + + // 3.2.1 deactivate merge stake account + if merge_flag == PendingStakeActivationTestFlag::MergeInactive { + let transaction = Transaction::new_signed_with_payer( + &[stake_instruction::deactivate_stake( + &stake_address3, + &user_keypair.pubkey(), + )], + Some(&context.payer.pubkey()), + &vec![&context.payer, &user_keypair], + context.last_blockhash, + ); + context + .banks_client + .process_transaction(transaction) + .await + .unwrap(); + } + + // 3.2.2 merge 3rd stake account to 2nd stake account. However, it should not clear the pending stake activation flags on stake_account2. + let transaction = Transaction::new_signed_with_payer( + &stake_instruction::merge(&stake_address2, &stake_address3, &user_keypair.pubkey()), + Some(&context.payer.pubkey()), + &vec![&context.payer, &user_keypair], + context.last_blockhash, + ); + context + .banks_client + .process_transaction(transaction) + .await + .unwrap(); + } + + // 3.3 deactivate 2nd stake account should fail because of pending stake activation. + let transaction = Transaction::new_signed_with_payer( + &[stake_instruction::deactivate_stake( + &stake_address2, + &user_keypair.pubkey(), + )], + Some(&context.payer.pubkey()), + &vec![&context.payer, &user_keypair], + context.last_blockhash, + ); + let r = context.banks_client.process_transaction(transaction).await; + assert_eq!( + r.unwrap_err().unwrap(), + TransactionError::InstructionError( + 0, + InstructionError::Custom( + StakeError::RedelegatedStakeMustFullyActivateBeforeDeactivationIsPermitted as u32 + ) + ) + ); + + // 3.4 withdraw from 2nd stake account should also fail because of pending stake activation. + let transaction = Transaction::new_signed_with_payer( + &[stake_instruction::withdraw( + &stake_address2, + &user_keypair.pubkey(), + &solana_sdk::pubkey::new_rand(), + 1, + None, + )], + Some(&context.payer.pubkey()), + &vec![&context.payer, &user_keypair], + context.last_blockhash, + ); + let r = context.banks_client.process_transaction(transaction).await; + assert_eq!( + r.unwrap_err().unwrap(), + TransactionError::InstructionError(0, InstructionError::InsufficientFunds) + ); + + // 4. advance to new epoch so that the 2nd stake account is fully activated + current_slot += slots_per_epoch; + context.warp_to_slot(current_slot).unwrap(); + context.warp_forward_force_reward_interval_end().unwrap(); + + // 4.1 Now deactivate 2nd stake account should succeed because there is no pending stake activation. + let transaction = Transaction::new_signed_with_payer( + &[stake_instruction::deactivate_stake( + &stake_address2, + &user_keypair.pubkey(), + )], + Some(&context.payer.pubkey()), + &vec![&context.payer, &user_keypair], + context.last_blockhash, + ); + context + .banks_client + .process_transaction(transaction) + .await + .unwrap(); +} diff --git a/program-test/tests/warp.rs b/program-test/tests/warp.rs index 94f497a98f4a76..da0b632ad66759 100644 --- a/program-test/tests/warp.rs +++ b/program-test/tests/warp.rs @@ -1,11 +1,13 @@ #![allow(clippy::arithmetic_side_effects)] + +mod setup; + use { bincode::deserialize, log::debug, + setup::{setup_stake, setup_vote}, solana_banks_client::BanksClient, - solana_program_test::{ - processor, ProgramTest, ProgramTestBanksClientExt, ProgramTestContext, ProgramTestError, - }, + solana_program_test::{processor, ProgramTest, ProgramTestBanksClientExt, ProgramTestError}, solana_sdk::{ account::Account, account_info::{next_account_info, AccountInfo}, @@ -18,9 +20,8 @@ use { signature::{Keypair, Signer}, stake::{ instruction as stake_instruction, - state::{Authorized, Lockup, StakeActivationStatus, StakeStateV2}, + state::{StakeActivationStatus, StakeStateV2}, }, - system_instruction, system_program, sysvar::{ clock, stake_history::{self, StakeHistory}, @@ -29,89 +30,13 @@ use { transaction::{Transaction, TransactionError}, }, solana_stake_program::stake_state, - solana_vote_program::{ - vote_instruction, - vote_state::{self, VoteInit, VoteState}, - }, + solana_vote_program::vote_state, std::convert::TryInto, }; // Use a big number to be sure that we get the right error const WRONG_SLOT_ERROR: u32 = 123456; -async fn setup_stake( - context: &mut ProgramTestContext, - user: &Keypair, - vote_address: &Pubkey, - stake_lamports: u64, -) -> Pubkey { - let stake_keypair = Keypair::new(); - let transaction = Transaction::new_signed_with_payer( - &stake_instruction::create_account_and_delegate_stake( - &context.payer.pubkey(), - &stake_keypair.pubkey(), - vote_address, - &Authorized::auto(&user.pubkey()), - &Lockup::default(), - stake_lamports, - ), - Some(&context.payer.pubkey()), - &vec![&context.payer, &stake_keypair, user], - context.last_blockhash, - ); - context - .banks_client - .process_transaction(transaction) - .await - .unwrap(); - stake_keypair.pubkey() -} - -async fn setup_vote(context: &mut ProgramTestContext) -> Pubkey { - // warp once to make sure stake config doesn't get rent-collected - context.warp_to_slot(100).unwrap(); - let mut instructions = vec![]; - let validator_keypair = Keypair::new(); - instructions.push(system_instruction::create_account( - &context.payer.pubkey(), - &validator_keypair.pubkey(), - Rent::default().minimum_balance(0), - 0, - &system_program::id(), - )); - let vote_lamports = Rent::default().minimum_balance(VoteState::size_of()); - let vote_keypair = Keypair::new(); - let user_keypair = Keypair::new(); - instructions.append(&mut vote_instruction::create_account_with_config( - &context.payer.pubkey(), - &vote_keypair.pubkey(), - &VoteInit { - node_pubkey: validator_keypair.pubkey(), - authorized_voter: user_keypair.pubkey(), - ..VoteInit::default() - }, - vote_lamports, - vote_instruction::CreateVoteAccountConfig { - space: vote_state::VoteStateVersions::vote_state_size_of(true) as u64, - ..vote_instruction::CreateVoteAccountConfig::default() - }, - )); - - let transaction = Transaction::new_signed_with_payer( - &instructions, - Some(&context.payer.pubkey()), - &vec![&context.payer, &validator_keypair, &vote_keypair], - context.last_blockhash, - ); - context - .banks_client - .process_transaction(transaction) - .await - .unwrap(); - - vote_keypair.pubkey() -} - fn process_instruction( _program_id: &Pubkey, accounts: &[AccountInfo], @@ -214,6 +139,8 @@ async fn stake_rewards_from_warp() { // Initialize and start the test network let program_test = ProgramTest::default(); let mut context = program_test.start_with_context().await; + + context.warp_to_slot(100).unwrap(); let vote_address = setup_vote(&mut context).await; let user_keypair = Keypair::new(); @@ -415,11 +342,12 @@ async fn check_credits_observed( expected_credits ); } - #[tokio::test] async fn stake_merge_immediately_after_activation() { let program_test = ProgramTest::default(); let mut context = program_test.start_with_context().await; + + context.warp_to_slot(100).unwrap(); let vote_address = setup_vote(&mut context).await; context.increment_vote_account_credits(&vote_address, 100); diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 8863bef090ed83..d6fbe9f6f79707 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5331,6 +5331,7 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-vote-program", + "test-case", "thiserror", "tokio", ] @@ -6828,6 +6829,41 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "test-case" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8f1e820b7f1d95a0cdbf97a5df9de10e1be731983ab943e56703ac1b8e9d425" +dependencies = [ + "test-case-macros", +] + +[[package]] +name = "test-case-core" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54c25e2cb8f5fcd7318157634e8838aa6f7e4715c96637f969fabaccd1ef5462" +dependencies = [ + "cfg-if 1.0.0", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.37", +] + +[[package]] +name = "test-case-macros" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37cfd7bbc88a0104e304229fba519bdc45501a30b760fb72240342f1289ad257" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.37", + "test-case-core", +] + [[package]] name = "textwrap" version = "0.11.0" diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index 6cf5f000745883..20b6c9e0ebe3c4 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -267,7 +267,7 @@ declare_process_instruction!( let mut me = get_stake_account()?; let clock = get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; - deactivate(&mut me, &clock, &signers) + deactivate(invoke_context, &mut me, &clock, &signers) } Ok(StakeInstruction::SetLockup(lockup)) => { let mut me = get_stake_account()?; @@ -413,6 +413,7 @@ declare_process_instruction!( let clock = invoke_context.get_sysvar_cache().get_clock()?; deactivate_delinquent( + invoke_context, transaction_context, instruction_context, &mut me, @@ -7198,6 +7199,10 @@ mod tests { ..Clock::default() }), ), + ( + stake_history::id(), + create_account_shared_data_for_test(&StakeHistory::default()), + ), ], vec![ AccountMeta { diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index 964d2d6ffc5d78..c533728b11f24c 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -633,15 +633,56 @@ pub fn delegate( } } +fn deactivate_stake( + invoke_context: &InvokeContext, + stake: &mut Stake, + stake_flags: &mut StakeFlags, + epoch: Epoch, +) -> Result<(), InstructionError> { + if invoke_context + .feature_set + .is_active(&feature_set::stake_redelegate_instruction::id()) + { + if stake_flags.contains(StakeFlags::MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED) { + let stake_history = invoke_context.get_sysvar_cache().get_stake_history()?; + // when MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED flag is set on stake_flags, + // deactivation is only permitted when the stake delegation activating amount is zero. + let status = stake.delegation.stake_activating_and_deactivating( + epoch, + Some(stake_history.as_ref()), + new_warmup_cooldown_rate_epoch(invoke_context), + ); + if status.activating != 0 { + Err(InstructionError::from( + StakeError::RedelegatedStakeMustFullyActivateBeforeDeactivationIsPermitted, + )) + } else { + stake.deactivate(epoch)?; + // After deactivation, need to clear `MustFullyActivateBeforeDeactivationIsPermitted` flag if any. + // So that future activation and deactivation are not subject to that restriction. + stake_flags + .remove(StakeFlags::MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED); + Ok(()) + } + } else { + stake.deactivate(epoch)?; + Ok(()) + } + } else { + stake.deactivate(epoch)?; + Ok(()) + } +} + pub fn deactivate( + invoke_context: &InvokeContext, stake_account: &mut BorrowedAccount, clock: &Clock, signers: &HashSet, ) -> Result<(), InstructionError> { - if let StakeStateV2::Stake(meta, mut stake, stake_flags) = stake_account.get_state()? { + if let StakeStateV2::Stake(meta, mut stake, mut stake_flags) = stake_account.get_state()? { meta.authorized.check(signers, StakeAuthorize::Staker)?; - stake.deactivate(clock.epoch)?; - + deactivate_stake(invoke_context, &mut stake, &mut stake_flags, clock.epoch)?; stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) } else { Err(InstructionError::InvalidAccountData) @@ -975,7 +1016,7 @@ pub fn redelegate( // deactivate `stake_account` // // Note: This function also ensures `signers` contains the `StakeAuthorize::Staker` - deactivate(stake_account, &clock, signers)?; + deactivate(invoke_context, stake_account, &clock, signers)?; // transfer the effective stake to the uninitialized stake account stake_account.checked_sub_lamports(effective_stake)?; @@ -1001,7 +1042,7 @@ pub fn redelegate( &vote_state.convert_to_current(), clock.epoch, ), - StakeFlags::empty(), + StakeFlags::MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED, ))?; Ok(()) @@ -1115,6 +1156,7 @@ pub fn withdraw( } pub(crate) fn deactivate_delinquent( + invoke_context: &InvokeContext, transaction_context: &TransactionContext, instruction_context: &InstructionContext, stake_account: &mut BorrowedAccount, @@ -1148,7 +1190,7 @@ pub(crate) fn deactivate_delinquent( return Err(StakeError::InsufficientReferenceVotes.into()); } - if let StakeStateV2::Stake(meta, mut stake, stake_flags) = stake_account.get_state()? { + if let StakeStateV2::Stake(meta, mut stake, mut stake_flags) = stake_account.get_state()? { if stake.delegation.voter_pubkey != *delinquent_vote_account_pubkey { return Err(StakeError::VoteAddressMismatch.into()); } @@ -1156,7 +1198,7 @@ pub(crate) fn deactivate_delinquent( // Deactivate the stake account if its delegated vote account has never voted or has not // voted in the last `MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION` if eligible_for_deactivate_delinquent(&delinquent_vote_state.epoch_credits, current_epoch) { - stake.deactivate(current_epoch)?; + deactivate_stake(invoke_context, &mut stake, &mut stake_flags, current_epoch)?; stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) } else { Err(StakeError::MinimumDelinquentEpochsForDeactivationNotMet.into()) diff --git a/sdk/program/src/stake/instruction.rs b/sdk/program/src/stake/instruction.rs index 89db8e96be076e..9b964f0eee5f07 100644 --- a/sdk/program/src/stake/instruction.rs +++ b/sdk/program/src/stake/instruction.rs @@ -68,7 +68,6 @@ pub enum StakeError { #[error("stake redelegation to the same vote account is not permitted")] RedelegateToSameVoteAccount, - #[allow(dead_code)] #[error("redelegated stake must be fully activated before deactivation")] RedelegatedStakeMustFullyActivateBeforeDeactivationIsPermitted, } diff --git a/sdk/program/src/stake/state.rs b/sdk/program/src/stake/state.rs index 969e5f2ceffd29..4f94f73b3f2dd5 100644 --- a/sdk/program/src/stake/state.rs +++ b/sdk/program/src/stake/state.rs @@ -823,6 +823,45 @@ mod test { ); } + #[test] + fn stake_flag_member_offset() { + const FLAG_OFFSET: usize = 196; + let check_flag = |flag, expected| { + let stake = StakeStateV2::Stake( + Meta { + rent_exempt_reserve: 1, + authorized: Authorized { + staker: Pubkey::new_unique(), + withdrawer: Pubkey::new_unique(), + }, + lockup: Lockup::default(), + }, + Stake { + delegation: Delegation { + voter_pubkey: Pubkey::new_unique(), + stake: u64::MAX, + activation_epoch: Epoch::MAX, + deactivation_epoch: Epoch::MAX, + warmup_cooldown_rate: f64::MAX, + }, + credits_observed: 1, + }, + flag, + ); + + let bincode_serialized = serialize(&stake).unwrap(); + let borsh_serialized = StakeStateV2::try_to_vec(&stake).unwrap(); + + assert_eq!(bincode_serialized[FLAG_OFFSET], expected); + assert_eq!(borsh_serialized[FLAG_OFFSET], expected); + }; + check_flag( + StakeFlags::MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED, + 1, + ); + check_flag(StakeFlags::empty(), 0); + } + mod deprecated { use super::*; fn check_borsh_deserialization(stake: StakeState) { From 344e466e12ddef9c15564eab54101c92b968e008 Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Mon, 25 Sep 2023 18:17:47 -0700 Subject: [PATCH 182/407] Async connection creation in connection cache (#33302) If there is a connection in the cache available, use it and create the additional connection asynchronously. --- Cargo.lock | 1 + bench-tps/src/bench_tps_client/tpu_client.rs | 5 +- client/src/connection_cache.rs | 1 + client/src/nonblocking/tpu_client.rs | 3 + client/src/tpu_client.rs | 3 + connection-cache/Cargo.toml | 1 + connection-cache/src/connection_cache.rs | 252 ++++++++++++++----- programs/sbf/Cargo.lock | 1 + quic-client/src/lib.rs | 10 +- quic-client/src/nonblocking/quic_client.rs | 8 +- thin-client/src/thin_client.rs | 8 +- tpu-client/src/nonblocking/tpu_client.rs | 7 +- tpu-client/src/tpu_client.rs | 3 +- udp-client/src/lib.rs | 8 +- 14 files changed, 238 insertions(+), 73 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 11b687ef77c962..9c8371372316d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5700,6 +5700,7 @@ version = "1.17.0" dependencies = [ "async-trait", "bincode", + "crossbeam-channel", "futures-util", "indexmap 2.0.0", "indicatif", diff --git a/bench-tps/src/bench_tps_client/tpu_client.rs b/bench-tps/src/bench_tps_client/tpu_client.rs index ae762e52922ec5..c56da2ae6e880b 100644 --- a/bench-tps/src/bench_tps_client/tpu_client.rs +++ b/bench-tps/src/bench_tps_client/tpu_client.rs @@ -1,7 +1,9 @@ use { crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result}, solana_client::tpu_client::TpuClient, - solana_connection_cache::connection_cache::{ConnectionManager, ConnectionPool}, + solana_connection_cache::connection_cache::{ + ConnectionManager, ConnectionPool, NewConnectionConfig, + }, solana_sdk::{ account::Account, commitment_config::CommitmentConfig, epoch_info::EpochInfo, hash::Hash, message::Message, pubkey::Pubkey, signature::Signature, transaction::Transaction, @@ -12,6 +14,7 @@ impl BenchTpsClient for TpuClient where P: ConnectionPool, M: ConnectionManager, + C: NewConnectionConfig, { fn send_transaction(&self, transaction: Transaction) -> Result { let signature = transaction.signatures[0]; diff --git a/client/src/connection_cache.rs b/client/src/connection_cache.rs index 0bce6bbb3c90d2..44673c06f4d087 100644 --- a/client/src/connection_cache.rs +++ b/client/src/connection_cache.rs @@ -5,6 +5,7 @@ use { client_connection::ClientConnection, connection_cache::{ BaseClientConnection, ConnectionCache as BackendConnectionCache, ConnectionPool, + NewConnectionConfig, }, }, solana_quic_client::{QuicConfig, QuicConnectionManager, QuicPool}, diff --git a/client/src/nonblocking/tpu_client.rs b/client/src/nonblocking/tpu_client.rs index d04df3e451ae67..5e71eae36bd6e4 100644 --- a/client/src/nonblocking/tpu_client.rs +++ b/client/src/nonblocking/tpu_client.rs @@ -3,6 +3,7 @@ use { crate::{connection_cache::ConnectionCache, tpu_client::TpuClientConfig}, solana_connection_cache::connection_cache::{ ConnectionCache as BackendConnectionCache, ConnectionManager, ConnectionPool, + NewConnectionConfig, }, solana_quic_client::{QuicConfig, QuicConnectionManager, QuicPool}, solana_rpc_client::nonblocking::rpc_client::RpcClient, @@ -30,6 +31,7 @@ impl TpuClient where P: ConnectionPool, M: ConnectionManager, + C: NewConnectionConfig, { /// Serialize and send transaction to the current and upcoming leader TPUs according to fanout /// size @@ -99,6 +101,7 @@ impl TpuClient where P: ConnectionPool, M: ConnectionManager, + C: NewConnectionConfig, { /// Create a new client that disconnects when dropped pub async fn new_with_connection_cache( diff --git a/client/src/tpu_client.rs b/client/src/tpu_client.rs index 2abba3f7772894..45394151340070 100644 --- a/client/src/tpu_client.rs +++ b/client/src/tpu_client.rs @@ -2,6 +2,7 @@ use { crate::connection_cache::ConnectionCache, solana_connection_cache::connection_cache::{ ConnectionCache as BackendConnectionCache, ConnectionManager, ConnectionPool, + NewConnectionConfig, }, solana_quic_client::{QuicConfig, QuicConnectionManager, QuicPool}, solana_rpc_client::rpc_client::RpcClient, @@ -34,6 +35,7 @@ impl TpuClient where P: ConnectionPool, M: ConnectionManager, + C: NewConnectionConfig, { /// Serialize and send transaction to the current and upcoming leader TPUs according to fanout /// size @@ -90,6 +92,7 @@ impl TpuClient where P: ConnectionPool, M: ConnectionManager, + C: NewConnectionConfig, { /// Create a new client that disconnects when dropped pub fn new_with_connection_cache( diff --git a/connection-cache/Cargo.toml b/connection-cache/Cargo.toml index c0d37f40017b88..acf52f05f9ba5d 100644 --- a/connection-cache/Cargo.toml +++ b/connection-cache/Cargo.toml @@ -12,6 +12,7 @@ edition = { workspace = true } [dependencies] async-trait = { workspace = true } bincode = { workspace = true } +crossbeam-channel = { workspace = true } futures-util = { workspace = true } indexmap = { workspace = true } indicatif = { workspace = true, optional = true } diff --git a/connection-cache/src/connection_cache.rs b/connection-cache/src/connection_cache.rs index 66ec89d44f056f..306a8df2722091 100644 --- a/connection-cache/src/connection_cache.rs +++ b/connection-cache/src/connection_cache.rs @@ -4,13 +4,16 @@ use { connection_cache_stats::{ConnectionCacheStats, CONNECTION_STAT_SUBMISSION_INTERVAL}, nonblocking::client_connection::ClientConnection as NonblockingClientConnection, }, + crossbeam_channel::{Receiver, RecvError, Sender}, indexmap::map::IndexMap, + log::*, rand::{thread_rng, Rng}, solana_measure::measure::Measure, solana_sdk::timing::AtomicInterval, std::{ net::SocketAddr, sync::{atomic::Ordering, Arc, RwLock}, + thread::{Builder, JoinHandle}, }, thiserror::Error, }; @@ -27,9 +30,9 @@ pub enum Protocol { QUIC, } -pub trait ConnectionManager { +pub trait ConnectionManager: Send + Sync + 'static { type ConnectionPool: ConnectionPool; - type NewConnectionConfig; + type NewConnectionConfig: NewConnectionConfig; const PROTOCOL: Protocol; @@ -43,18 +46,20 @@ pub struct ConnectionCache< T, // NewConnectionConfig > { name: &'static str, - map: RwLock>, - connection_manager: S, + map: Arc>>, + connection_manager: Arc, stats: Arc, last_stats: AtomicInterval, connection_pool_size: usize, - connection_config: T, + connection_config: Arc, + sender: Sender<(usize, SocketAddr)>, } impl ConnectionCache where P: ConnectionPool, M: ConnectionManager, + C: NewConnectionConfig, { pub fn new( name: &'static str, @@ -76,17 +81,61 @@ where connection_config: C, connection_manager: M, ) -> Self { + let (sender, receiver) = crossbeam_channel::unbounded(); + + let map = Arc::new(RwLock::new(IndexMap::with_capacity(MAX_CONNECTIONS))); + let config = Arc::new(connection_config); + let connection_manager = Arc::new(connection_manager); + let connection_pool_size = 1.max(connection_pool_size); // The minimum pool size is 1. + + let stats = Arc::new(ConnectionCacheStats::default()); + + let _async_connection_thread = + Self::create_connection_async_thread(map.clone(), receiver, stats.clone()); Self { name, - map: RwLock::new(IndexMap::with_capacity(MAX_CONNECTIONS)), - stats: Arc::new(ConnectionCacheStats::default()), + map, + stats, connection_manager, last_stats: AtomicInterval::default(), - connection_pool_size: 1.max(connection_pool_size), // The minimum pool size is 1. - connection_config, + connection_pool_size, + connection_config: config, + sender, } } + /// This actually triggers the connection creation by sending empty data + fn create_connection_async_thread( + map: Arc>>, + receiver: Receiver<(usize, SocketAddr)>, + stats: Arc, + ) -> JoinHandle<()> { + Builder::new() + .name("solQAsynCon".to_string()) + .spawn(move || loop { + let recv_result = receiver.recv(); + match recv_result { + Err(RecvError) => { + break; + } + Ok((idx, addr)) => { + let map = map.read().unwrap(); + let pool = map.get(&addr); + if let Some(pool) = pool { + let conn = pool.get(idx); + if let Ok(conn) = conn { + drop(map); + let conn = conn.new_blocking_connection(addr, stats.clone()); + let result = conn.send_data(&[]); + debug!("Create async connection result {result:?} for {addr}"); + } + } + } + } + }) + .unwrap() + } + /// Create a lazy connection object under the exclusive lock of the cache map if there is not /// enough used connections in the connection pool for the specified address. /// Returns CreateConnectionResult. @@ -102,47 +151,37 @@ where // Read again, as it is possible that between read lock dropped and the write lock acquired // another thread could have setup the connection. - let should_create_connection = map + let pool_status = map .get(addr) - .map(|pool| pool.need_new_connection(self.connection_pool_size)) - .unwrap_or(true); - - let (cache_hit, num_evictions, eviction_timing_ms) = if should_create_connection { - // evict a connection if the cache is reaching upper bounds - let mut num_evictions = 0; - let mut get_connection_cache_eviction_measure = - Measure::start("get_connection_cache_eviction_measure"); - let existing_index = map.get_index_of(addr); - while map.len() >= MAX_CONNECTIONS { - let mut rng = thread_rng(); - let n = rng.gen_range(0..MAX_CONNECTIONS); - if let Some(index) = existing_index { - if n == index { - continue; - } - } - map.swap_remove_index(n); - num_evictions += 1; - } - get_connection_cache_eviction_measure.stop(); - - map.entry(*addr) - .and_modify(|pool| { - pool.add_connection(&self.connection_config, addr); - }) - .or_insert_with(|| { - let mut pool = self.connection_manager.new_connection_pool(); - pool.add_connection(&self.connection_config, addr); - pool - }); - ( - false, - num_evictions, - get_connection_cache_eviction_measure.as_ms(), - ) - } else { - (true, 0, 0) - }; + .map(|pool| pool.check_pool_status(self.connection_pool_size)) + .unwrap_or(PoolStatus::Empty); + + let (cache_hit, num_evictions, eviction_timing_ms) = + if matches!(pool_status, PoolStatus::Empty) { + Self::create_connection_internal( + &self.connection_config, + &self.connection_manager, + &mut map, + addr, + self.connection_pool_size, + None, + ) + } else { + (true, 0, 0) + }; + + if matches!(pool_status, PoolStatus::PartiallyFull) { + // trigger an async connection create + debug!("Triggering async connection for {addr:?}"); + Self::create_connection_internal( + &self.connection_config, + &self.connection_manager, + &mut map, + addr, + self.connection_pool_size, + Some(&self.sender), + ); + } let pool = map.get(addr).unwrap(); let connection = pool.borrow_connection(); @@ -156,6 +195,63 @@ where } } + fn create_connection_internal( + config: &Arc, + connection_manager: &Arc, + map: &mut std::sync::RwLockWriteGuard<'_, IndexMap>, + addr: &SocketAddr, + connection_pool_size: usize, + async_connection_sender: Option<&Sender<(usize, SocketAddr)>>, + ) -> (bool, u64, u64) { + // evict a connection if the cache is reaching upper bounds + let mut num_evictions = 0; + let mut get_connection_cache_eviction_measure = + Measure::start("get_connection_cache_eviction_measure"); + let existing_index = map.get_index_of(addr); + while map.len() >= MAX_CONNECTIONS { + let mut rng = thread_rng(); + let n = rng.gen_range(0..MAX_CONNECTIONS); + if let Some(index) = existing_index { + if n == index { + continue; + } + } + map.swap_remove_index(n); + num_evictions += 1; + } + get_connection_cache_eviction_measure.stop(); + + let mut hit_cache = false; + map.entry(*addr) + .and_modify(|pool| { + if matches!( + pool.check_pool_status(connection_pool_size), + PoolStatus::PartiallyFull + ) { + let idx = pool.add_connection(config, addr); + if let Some(sender) = async_connection_sender { + debug!( + "Sending async connection creation {} for {addr}", + pool.num_connections() - 1 + ); + sender.send((idx, *addr)).unwrap(); + }; + } else { + hit_cache = true; + } + }) + .or_insert_with(|| { + let mut pool = connection_manager.new_connection_pool(); + pool.add_connection(config, addr); + pool + }); + ( + hit_cache, + num_evictions, + get_connection_cache_eviction_measure.as_ms(), + ) + } + fn get_or_add_connection( &self, addr: &SocketAddr, @@ -179,12 +275,26 @@ where eviction_timing_ms, } = match map.get(addr) { Some(pool) => { - if pool.need_new_connection(self.connection_pool_size) { + let pool_status = pool.check_pool_status(self.connection_pool_size); + if matches!(pool_status, PoolStatus::Empty) { // create more connection and put it in the pool drop(map); self.create_connection(&mut lock_timing_ms, addr) } else { let connection = pool.borrow_connection(); + if matches!(pool_status, PoolStatus::PartiallyFull) { + debug!("Creating connection async for {addr}"); + drop(map); + let mut map = self.map.write().unwrap(); + Self::create_connection_internal( + &self.connection_config, + &self.connection_manager, + &mut map, + addr, + self.connection_pool_size, + Some(&self.sender), + ); + } CreateConnectionResult { connection, cache_hit: true, @@ -299,12 +409,22 @@ pub enum ClientError { IoError(#[from] std::io::Error), } -pub trait ConnectionPool { - type NewConnectionConfig; +pub trait NewConnectionConfig: Sized + Send + Sync + 'static { + fn new() -> Result; +} + +pub enum PoolStatus { + Empty, + PartiallyFull, + Full, +} + +pub trait ConnectionPool: Send + Sync + 'static { + type NewConnectionConfig: NewConnectionConfig; type BaseClientConnection: BaseClientConnection; - /// Add a connection to the pool - fn add_connection(&mut self, config: &Self::NewConnectionConfig, addr: &SocketAddr); + /// Add a connection to the pool and return its index + fn add_connection(&mut self, config: &Self::NewConnectionConfig, addr: &SocketAddr) -> usize; /// Get the number of current connections in the pool fn num_connections(&self) -> usize; @@ -319,10 +439,17 @@ pub trait ConnectionPool { let n = rng.gen_range(0..self.num_connections()); self.get(n).expect("index is within num_connections") } + /// Check if we need to create a new connection. If the count of the connections - /// is smaller than the pool size. - fn need_new_connection(&self, required_pool_size: usize) -> bool { - self.num_connections() < required_pool_size + /// is smaller than the pool size and if there is no connection at all. + fn check_pool_status(&self, required_pool_size: usize) -> PoolStatus { + if self.num_connections() == 0 { + PoolStatus::Empty + } else if self.num_connections() < required_pool_size { + PoolStatus::PartiallyFull + } else { + PoolStatus::Full + } } fn create_pool_entry( @@ -393,9 +520,16 @@ mod tests { type NewConnectionConfig = MockUdpConfig; type BaseClientConnection = MockUdp; - fn add_connection(&mut self, config: &Self::NewConnectionConfig, addr: &SocketAddr) { + /// Add a connection into the pool and return its index in the pool. + fn add_connection( + &mut self, + config: &Self::NewConnectionConfig, + addr: &SocketAddr, + ) -> usize { let connection = self.create_pool_entry(config, addr); + let idx = self.connections.len(); self.connections.push(connection); + idx } fn num_connections(&self) -> usize { @@ -436,7 +570,7 @@ mod tests { } } - impl MockUdpConfig { + impl NewConnectionConfig for MockUdpConfig { fn new() -> Result { Ok(Self { udp_socket: Arc::new( diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index d6fbe9f6f79707..67f6c9862b071c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4765,6 +4765,7 @@ version = "1.17.0" dependencies = [ "async-trait", "bincode", + "crossbeam-channel", "futures-util", "indexmap 2.0.0", "log", diff --git a/quic-client/src/lib.rs b/quic-client/src/lib.rs index 0357969d8296d3..90a55deaa691ed 100644 --- a/quic-client/src/lib.rs +++ b/quic-client/src/lib.rs @@ -19,7 +19,7 @@ use { solana_connection_cache::{ connection_cache::{ BaseClientConnection, ClientError, ConnectionCache, ConnectionManager, ConnectionPool, - ConnectionPoolError, Protocol, + ConnectionPoolError, NewConnectionConfig, Protocol, }, connection_cache_stats::ConnectionCacheStats, }, @@ -53,9 +53,11 @@ impl ConnectionPool for QuicPool { type BaseClientConnection = Quic; type NewConnectionConfig = QuicConfig; - fn add_connection(&mut self, config: &Self::NewConnectionConfig, addr: &SocketAddr) { + fn add_connection(&mut self, config: &Self::NewConnectionConfig, addr: &SocketAddr) -> usize { let connection = self.create_pool_entry(config, addr); + let idx = self.connections.len(); self.connections.push(connection); + idx } fn num_connections(&self) -> usize { @@ -93,8 +95,8 @@ pub struct QuicConfig { client_endpoint: Option, } -impl QuicConfig { - pub fn new() -> Result { +impl NewConnectionConfig for QuicConfig { + fn new() -> Result { let (cert, priv_key) = new_self_signed_tls_certificate(&Keypair::new(), IpAddr::V4(Ipv4Addr::UNSPECIFIED))?; Ok(Self { diff --git a/quic-client/src/nonblocking/quic_client.rs b/quic-client/src/nonblocking/quic_client.rs index 78f21ac2565f95..66a55f8f3fd78f 100644 --- a/quic-client/src/nonblocking/quic_client.rs +++ b/quic-client/src/nonblocking/quic_client.rs @@ -340,16 +340,18 @@ impl QuicClient { Ok(conn) => { *conn_guard = Some(conn.clone()); info!( - "Made connection to {} id {} try_count {}", + "Made connection to {} id {} try_count {}, from connection cache warming?: {}", self.addr, conn.connection.stable_id(), - connection_try_count + connection_try_count, + data.is_empty(), ); connection_try_count += 1; conn.connection.clone() } Err(err) => { - info!("Cannot make connection to {}, error {:}", self.addr, err); + info!("Cannot make connection to {}, error {:}, from connection cache warming?: {}", + self.addr, err, data.is_empty()); return Err(err); } } diff --git a/thin-client/src/thin_client.rs b/thin-client/src/thin_client.rs index c61addfb500c06..b1ae08fd7c01a3 100644 --- a/thin-client/src/thin_client.rs +++ b/thin-client/src/thin_client.rs @@ -8,7 +8,9 @@ use { rayon::iter::{IntoParallelIterator, ParallelIterator}, solana_connection_cache::{ client_connection::ClientConnection, - connection_cache::{ConnectionCache, ConnectionManager, ConnectionPool}, + connection_cache::{ + ConnectionCache, ConnectionManager, ConnectionPool, NewConnectionConfig, + }, }, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_api::{config::RpcProgramAccountsConfig, response::Response}, @@ -124,6 +126,7 @@ impl ThinClient where P: ConnectionPool, M: ConnectionManager, + C: NewConnectionConfig, { /// Create a new ThinClient that will interface with the Rpc at `rpc_addr` using TCP /// and the Tpu at `tpu_addr` over `transactions_socket` using Quic or UDP @@ -324,6 +327,7 @@ impl Client for ThinClient where P: ConnectionPool, M: ConnectionManager, + C: NewConnectionConfig, { fn tpu_addr(&self) -> String { self.tpu_addr().to_string() @@ -334,6 +338,7 @@ impl SyncClient for ThinClient where P: ConnectionPool, M: ConnectionManager, + C: NewConnectionConfig, { fn send_and_confirm_message( &self, @@ -618,6 +623,7 @@ impl AsyncClient for ThinClient where P: ConnectionPool, M: ConnectionManager, + C: NewConnectionConfig, { fn async_send_versioned_transaction( &self, diff --git a/tpu-client/src/nonblocking/tpu_client.rs b/tpu-client/src/nonblocking/tpu_client.rs index ea1bb98a569f7c..57a9b0b4033c61 100644 --- a/tpu-client/src/nonblocking/tpu_client.rs +++ b/tpu-client/src/nonblocking/tpu_client.rs @@ -9,7 +9,7 @@ use { log::*, solana_connection_cache::{ connection_cache::{ - ConnectionCache, ConnectionManager, ConnectionPool, Protocol, + ConnectionCache, ConnectionManager, ConnectionPool, NewConnectionConfig, Protocol, DEFAULT_CONNECTION_POOL_SIZE, }, nonblocking::client_connection::ClientConnection, @@ -268,6 +268,7 @@ fn send_wire_transaction_futures<'a, P, M, C>( where P: ConnectionPool, M: ConnectionManager, + C: NewConnectionConfig, { const SEND_TIMEOUT_INTERVAL: Duration = Duration::from_secs(5); let sleep_duration = SEND_TRANSACTION_INTERVAL.saturating_mul(index as u32); @@ -339,6 +340,7 @@ async fn sleep_and_send_wire_transaction_to_addr( where P: ConnectionPool, M: ConnectionManager, + C: NewConnectionConfig, { sleep(sleep_duration).await; send_wire_transaction_to_addr(connection_cache, &addr, wire_transaction).await @@ -352,6 +354,7 @@ async fn send_wire_transaction_to_addr( where P: ConnectionPool, M: ConnectionManager, + C: NewConnectionConfig, { let conn = connection_cache.get_nonblocking_connection(addr); conn.send_data(&wire_transaction).await @@ -365,6 +368,7 @@ async fn send_wire_transaction_batch_to_addr( where P: ConnectionPool, M: ConnectionManager, + C: NewConnectionConfig, { let conn = connection_cache.get_nonblocking_connection(addr); conn.send_data_batch(wire_transactions).await @@ -374,6 +378,7 @@ impl TpuClient where P: ConnectionPool, M: ConnectionManager, + C: NewConnectionConfig, { /// Serialize and send transaction to the current and upcoming leader TPUs according to fanout /// size diff --git a/tpu-client/src/tpu_client.rs b/tpu-client/src/tpu_client.rs index f2e9155f39116e..9d5a159686d426 100644 --- a/tpu-client/src/tpu_client.rs +++ b/tpu-client/src/tpu_client.rs @@ -3,7 +3,7 @@ use { crate::nonblocking::tpu_client::TpuClient as NonblockingTpuClient, rayon::iter::{IntoParallelIterator, ParallelIterator}, solana_connection_cache::connection_cache::{ - ConnectionCache, ConnectionManager, ConnectionPool, + ConnectionCache, ConnectionManager, ConnectionPool, NewConnectionConfig, }, solana_rpc_client::rpc_client::RpcClient, solana_sdk::{clock::Slot, transaction::Transaction, transport::Result as TransportResult}, @@ -71,6 +71,7 @@ impl TpuClient where P: ConnectionPool, M: ConnectionManager, + C: NewConnectionConfig, { /// Serialize and send transaction to the current and upcoming leader TPUs according to fanout /// size diff --git a/udp-client/src/lib.rs b/udp-client/src/lib.rs index c4ed99b3b7f82b..06eeca00185898 100644 --- a/udp-client/src/lib.rs +++ b/udp-client/src/lib.rs @@ -11,7 +11,7 @@ use { solana_connection_cache::{ connection_cache::{ BaseClientConnection, ClientError, ConnectionManager, ConnectionPool, - ConnectionPoolError, Protocol, + ConnectionPoolError, NewConnectionConfig, Protocol, }, connection_cache_stats::ConnectionCacheStats, }, @@ -28,9 +28,11 @@ impl ConnectionPool for UdpPool { type BaseClientConnection = Udp; type NewConnectionConfig = UdpConfig; - fn add_connection(&mut self, config: &Self::NewConnectionConfig, addr: &SocketAddr) { + fn add_connection(&mut self, config: &Self::NewConnectionConfig, addr: &SocketAddr) -> usize { let connection = self.create_pool_entry(config, addr); + let idx = self.connections.len(); self.connections.push(connection); + idx } fn num_connections(&self) -> usize { @@ -57,7 +59,7 @@ pub struct UdpConfig { udp_socket: Arc, } -impl UdpConfig { +impl NewConnectionConfig for UdpConfig { fn new() -> Result { let socket = solana_net_utils::bind_with_any_port(IpAddr::V4(Ipv4Addr::UNSPECIFIED)) .map_err(Into::::into)?; From 7c03958ba18818a6a572b3caf951c01aecf33c51 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Sep 2023 03:11:18 +0000 Subject: [PATCH 183/407] build(deps): bump tungstenite from 0.20.0 to 0.20.1 (#33405) * build(deps): bump tungstenite from 0.20.0 to 0.20.1 Bumps [tungstenite](https://github.com/snapview/tungstenite-rs) from 0.20.0 to 0.20.1. - [Changelog](https://github.com/snapview/tungstenite-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/snapview/tungstenite-rs/compare/v0.20.0...v0.20.1) --- updated-dependencies: - dependency-name: tungstenite dependency-type: direct:production ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9c8371372316d4..bfae857dc4e638 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8387,9 +8387,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "tungstenite" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e862a1c4128df0112ab625f55cd5c934bcb4312ba80b39ae4b4835a3fd58e649" +checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" dependencies = [ "byteorder", "bytes", diff --git a/Cargo.toml b/Cargo.toml index 34ebe5c3b76857..2c64fd87ed2fd5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -402,7 +402,7 @@ tokio-util = "0.6" tonic = "0.9.2" tonic-build = "0.9.2" trees = "0.4.2" -tungstenite = "0.20.0" +tungstenite = "0.20.1" unix_socket2 = "0.5.4" uriparse = "0.6.4" url = "2.4.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 67f6c9862b071c..36e7d8f07fdc5d 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -7271,9 +7271,9 @@ checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" [[package]] name = "tungstenite" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e862a1c4128df0112ab625f55cd5c934bcb4312ba80b39ae4b4835a3fd58e649" +checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" dependencies = [ "byteorder 1.4.3", "bytes", From b5c466d2c8854db16d98139440ecaf81002802d7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Sep 2023 03:12:39 +0000 Subject: [PATCH 184/407] build(deps): bump indicatif from 0.17.6 to 0.17.7 (#33369) * build(deps): bump indicatif from 0.17.6 to 0.17.7 Bumps [indicatif](https://github.com/console-rs/indicatif) from 0.17.6 to 0.17.7. - [Release notes](https://github.com/console-rs/indicatif/releases) - [Commits](https://github.com/console-rs/indicatif/commits) --- updated-dependencies: - dependency-name: indicatif dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bfae857dc4e638..0954cf387b1b01 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2637,9 +2637,9 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.6" +version = "0.17.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b297dc40733f23a0e52728a58fa9489a5b7638a324932de16b41adc3ef80730" +checksum = "fb28741c9db9a713d93deb3bb9515c20788cef5815265bee4980e87bde7e0f25" dependencies = [ "console", "instant", diff --git a/Cargo.toml b/Cargo.toml index 2c64fd87ed2fd5..1ec266eeee4bb9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -216,7 +216,7 @@ hyper-proxy = "0.9.1" im = "15.1.0" index_list = "0.2.7" indexmap = "2.0.0" -indicatif = "0.17.6" +indicatif = "0.17.7" Inflector = "0.11.4" itertools = "0.10.5" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"] } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 36e7d8f07fdc5d..abc80b929dca85 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2243,9 +2243,9 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.6" +version = "0.17.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b297dc40733f23a0e52728a58fa9489a5b7638a324932de16b41adc3ef80730" +checksum = "fb28741c9db9a713d93deb3bb9515c20788cef5815265bee4980e87bde7e0f25" dependencies = [ "console", "instant", From 426a47dba27430f56b2610e192d6a0983f9e0fbb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Sep 2023 09:12:33 +0000 Subject: [PATCH 185/407] build(deps): bump tokio-tungstenite from 0.20.0 to 0.20.1 (#33410) * build(deps): bump tokio-tungstenite from 0.20.0 to 0.20.1 Bumps [tokio-tungstenite](https://github.com/snapview/tokio-tungstenite) from 0.20.0 to 0.20.1. - [Changelog](https://github.com/snapview/tokio-tungstenite/blob/master/CHANGELOG.md) - [Commits](https://github.com/snapview/tokio-tungstenite/compare/v0.20.0...v0.20.1) --- updated-dependencies: - dependency-name: tokio-tungstenite dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 29 +++++------------------------ Cargo.toml | 2 +- programs/sbf/Cargo.lock | 29 +++++------------------------ 3 files changed, 11 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0954cf387b1b01..2f21bf22a2b32a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4540,7 +4540,7 @@ checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", "ring", - "rustls-webpki 0.101.4", + "rustls-webpki", "sct", ] @@ -4574,16 +4574,6 @@ dependencies = [ "base64 0.13.1", ] -[[package]] -name = "rustls-webpki" -version = "0.100.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e98ff011474fa39949b7e5c0428f9b4937eda7da7848bbb947786b7be0b27dab" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "rustls-webpki" version = "0.101.4" @@ -8189,9 +8179,9 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2dbec703c26b00d74844519606ef15d09a7d6857860f84ad223dec002ddea2" +checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" dependencies = [ "futures-util", "log", @@ -8199,7 +8189,7 @@ dependencies = [ "tokio", "tokio-rustls", "tungstenite", - "webpki-roots 0.23.1", + "webpki-roots 0.25.2", ] [[package]] @@ -8683,22 +8673,13 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki-roots" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" -dependencies = [ - "rustls-webpki 0.100.2", -] - [[package]] name = "webpki-roots" version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b291546d5d9d1eab74f069c77749f2cb8504a12caa20f0f2de93ddbf6f411888" dependencies = [ - "rustls-webpki 0.101.4", + "rustls-webpki", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 1ec266eeee4bb9..4bb4fea6ac9442 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -397,7 +397,7 @@ tiny-bip39 = "0.8.2" tokio = "1.29.1" tokio-serde = "0.8" tokio-stream = "0.1.14" -tokio-tungstenite = "0.20.0" +tokio-tungstenite = "0.20.1" tokio-util = "0.6" tonic = "0.9.2" tonic-build = "0.9.2" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index abc80b929dca85..82eec5ef06b6f4 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3982,7 +3982,7 @@ checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", "ring", - "rustls-webpki 0.101.4", + "rustls-webpki", "sct", ] @@ -4016,16 +4016,6 @@ dependencies = [ "base64 0.13.1", ] -[[package]] -name = "rustls-webpki" -version = "0.100.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e98ff011474fa39949b7e5c0428f9b4937eda7da7848bbb947786b7be0b27dab" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "rustls-webpki" version = "0.101.4" @@ -7072,9 +7062,9 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2dbec703c26b00d74844519606ef15d09a7d6857860f84ad223dec002ddea2" +checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" dependencies = [ "futures-util", "log", @@ -7082,7 +7072,7 @@ dependencies = [ "tokio", "tokio-rustls", "tungstenite", - "webpki-roots 0.23.1", + "webpki-roots 0.25.2", ] [[package]] @@ -7552,22 +7542,13 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki-roots" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" -dependencies = [ - "rustls-webpki 0.100.2", -] - [[package]] name = "webpki-roots" version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b291546d5d9d1eab74f069c77749f2cb8504a12caa20f0f2de93ddbf6f411888" dependencies = [ - "rustls-webpki 0.101.4", + "rustls-webpki", ] [[package]] From a9b0fb492be8f0228d446e05ec7377c9c2c26efe Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 26 Sep 2023 07:27:50 -0700 Subject: [PATCH 186/407] =?UTF-8?q?split=20hash=20calc=20ancient=20slot=20?= =?UTF-8?q?boundary=20to=20allow=20for=20ancient=20shrinking=20=E2=80=A6?= =?UTF-8?q?=20(#33216)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit split hash calc ancient slot boundary to allow for ancient shrinking to be behind --- accounts-db/src/accounts_db.rs | 106 ++++++++++++++++++++++++++++----- 1 file changed, 91 insertions(+), 15 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 6974e214cbf660..9d8dd73d6795ef 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1731,7 +1731,10 @@ impl SplitAncientStorages { // 2. first unevenly divided chunk starting at 1 epoch old slot (may be empty) // 3. evenly divided full chunks in the middle // 4. unevenly divided chunk of most recent slots (may be empty) - let ancient_slots = Self::get_ancient_slots(oldest_non_ancient_slot, snapshot_storages); + let ancient_slots = + Self::get_ancient_slots(oldest_non_ancient_slot, snapshot_storages, |storage| { + storage.capacity() > get_ancient_append_vec_capacity() * 50 / 100 + }); let first_non_ancient_slot = ancient_slots .last() @@ -1741,15 +1744,33 @@ impl SplitAncientStorages { } /// return all ancient append vec slots from the early slots referenced by 'snapshot_storages' + /// `treat_as_ancient` returns true if the storage at this slot is large and should be treated individually by accounts hash calculation. + /// `treat_as_ancient` is a fn so that we can test this well. Otherwise, we have to generate large append vecs to pass the intended checks. fn get_ancient_slots( oldest_non_ancient_slot: Slot, snapshot_storages: &SortedStorages, + treat_as_ancient: impl Fn(&AccountStorageEntry) -> bool, ) -> Vec { let range = snapshot_storages.range(); - snapshot_storages + let mut i = 0; + let mut len_trucate = 0; + let mut possible_ancient_slots = snapshot_storages .iter_range(&(range.start..oldest_non_ancient_slot)) - .filter_map(|(slot, storage)| storage.map(|_| slot)) - .collect() + .filter_map(|(slot, storage)| { + storage.map(|storage| { + i += 1; + if treat_as_ancient(storage) { + // even though the slot is in range of being an ancient append vec, if it isn't actually a large append vec, + // then we are better off treating all these slots as normally cachable to reduce work in dedup. + // Since this one is large, for the moment, this one becomes the highest slot where we want to individually cache files. + len_trucate = i; + } + slot + }) + }) + .collect::>(); + possible_ancient_slots.truncate(len_trucate); + possible_ancient_slots } /// create once ancient slots have been identified @@ -16763,22 +16784,77 @@ pub mod tests { // 3 = ancient slots: 1, 2 // 4 = ancient slots: 1, 2, 3 // 5 = ... + for all_are_large in [false, true] { + for oldest_non_ancient_slot in 0..6 { + let ancient_slots = SplitAncientStorages::get_ancient_slots( + oldest_non_ancient_slot, + &snapshot_storages, + |_storage| all_are_large, + ); + + if all_are_large { + assert_eq!( + raw_storages + .iter() + .filter_map(|storage| { + let slot = storage.slot(); + (slot < oldest_non_ancient_slot).then_some(slot) + }) + .collect::>(), + ancient_slots, + "count: {count}" + ); + } else { + // none are treated as ancient since none were deemed large enough append vecs. + assert!(ancient_slots.is_empty()); + } + } + } + } + } + + #[test] + fn test_get_ancient_slots_one_large() { + let slot1 = 1; + let db = AccountsDb::new_single_for_tests(); + // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense + let storages = (0..3) + .map(|i| db.create_and_insert_store(slot1 + (i as Slot), 1000, "test")) + .collect::>(); + + for count in 1..4 { + // use subset of storages + let mut raw_storages = storages.clone(); + raw_storages.truncate(count); + let snapshot_storages = SortedStorages::new(&raw_storages); + // 0 = all storages are non-ancient + // 1 = all storages are non-ancient + // 2 = ancient slots: 1 + // 3 = ancient slots: 1, 2 + // 4 = ancient slots: 1, 2, 3 (except 2 is large, 3 is not, so treat 3 as non-ancient) + // 5 = ... for oldest_non_ancient_slot in 0..6 { let ancient_slots = SplitAncientStorages::get_ancient_slots( oldest_non_ancient_slot, &snapshot_storages, + |storage| storage.slot() == 2, ); - assert_eq!( - raw_storages - .iter() - .filter_map(|storage| { - let slot = storage.slot(); - (slot < oldest_non_ancient_slot).then_some(slot) - }) - .collect::>(), - ancient_slots, - "count: {count}" - ); + let mut expected = raw_storages + .iter() + .filter_map(|storage| { + let slot = storage.slot(); + (slot < oldest_non_ancient_slot).then_some(slot) + }) + .collect::>(); + if expected.len() >= 2 { + // slot 3 is not considered ancient since slot 3 is a small append vec. + // slot 2 is the only large append vec, so 1 by itself is not ancient. [1, 2] is ancient, [1,2,3] becomes just [1,2] + expected.truncate(2); + } else { + // we're not asking about the big append vec at 2, so nothing + expected.clear(); + } + assert_eq!(expected, ancient_slots, "count: {count}"); } } } From 4488cc241ff33e48e5d2ed8544295303cfedc33f Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Tue, 26 Sep 2023 08:26:51 -0700 Subject: [PATCH 187/407] Cleanup cargo deps in vote crate (#33407) --- Cargo.lock | 59 ------------------------------------ programs/sbf/Cargo.lock | 53 --------------------------------- vote/Cargo.toml | 66 ++--------------------------------------- 3 files changed, 2 insertions(+), 176 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2f21bf22a2b32a..f5d49601cbff59 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7408,78 +7408,19 @@ dependencies = [ name = "solana-vote" version = "1.17.0" dependencies = [ - "arrayref", - "assert_matches", "bincode", - "blake3", - "bv", - "bytemuck", - "byteorder", - "bzip2", "crossbeam-channel", - "dashmap 4.0.2", - "dir-diff", - "ed25519-dalek", - "flate2", - "fnv", - "fs-err", - "im", - "index_list", "itertools", - "lazy_static", - "libsecp256k1", "log", - "lru", - "lz4", - "memmap2", - "memoffset 0.9.0", - "modular-bitfield", - "num-derive", - "num-traits", - "num_cpus", - "num_enum 0.6.1", - "ouroboros", - "percentage", - "qualifier_attr", "rand 0.8.5", - "rand_chacha 0.3.1", - "rayon", - "regex", "rustc_version 0.4.0", "serde", "serde_derive", - "siphasher", - "solana-address-lookup-table-program", - "solana-bpf-loader-program", - "solana-bucket-map", - "solana-compute-budget-program", - "solana-config-program", - "solana-cost-model", "solana-frozen-abi", "solana-frozen-abi-macro", - "solana-loader-v4-program", - "solana-logger", - "solana-measure", - "solana-metrics", - "solana-perf", - "solana-program-runtime", - "solana-rayon-threadlimit", "solana-sdk", - "solana-stake-program", - "solana-system-program", - "solana-vote", "solana-vote-program", - "solana-zk-token-proof-program", - "solana-zk-token-sdk", - "static_assertions", - "strum", - "strum_macros", - "symlink", - "tar", - "tempfile", - "test-case", "thiserror", - "zstd", ] [[package]] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 82eec5ef06b6f4..c507d05965f299 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6402,70 +6402,17 @@ dependencies = [ name = "solana-vote" version = "1.17.0" dependencies = [ - "arrayref", - "bincode", - "blake3", - "bv", - "bytemuck", - "byteorder 1.4.3", - "bzip2", "crossbeam-channel", - "dashmap", - "dir-diff", - "flate2", - "fnv", - "fs-err", - "im", - "index_list", "itertools", - "lazy_static", "log", - "lru", - "lz4", - "memmap2", - "modular-bitfield", - "num-derive", - "num-traits", - "num_cpus", - "num_enum 0.6.1", - "ouroboros", - "percentage", - "qualifier_attr", - "rand 0.8.5", - "rayon", - "regex", "rustc_version", "serde", "serde_derive", - "siphasher", - "solana-address-lookup-table-program", - "solana-bpf-loader-program", - "solana-bucket-map", - "solana-compute-budget-program", - "solana-config-program", - "solana-cost-model", "solana-frozen-abi", "solana-frozen-abi-macro", - "solana-loader-v4-program", - "solana-measure", - "solana-metrics", - "solana-perf", - "solana-program-runtime", - "solana-rayon-threadlimit", "solana-sdk", - "solana-stake-program", - "solana-system-program", "solana-vote-program", - "solana-zk-token-proof-program", - "solana-zk-token-sdk", - "static_assertions", - "strum", - "strum_macros", - "symlink", - "tar", - "tempfile", "thiserror", - "zstd", ] [[package]] diff --git a/vote/Cargo.toml b/vote/Cargo.toml index 13adc56ba48f70..293e7ac8004f5c 100644 --- a/vote/Cargo.toml +++ b/vote/Cargo.toml @@ -10,86 +10,24 @@ license = { workspace = true } edition = { workspace = true } [dependencies] -arrayref = { workspace = true } -bincode = { workspace = true } -blake3 = { workspace = true } -bv = { workspace = true, features = ["serde"] } -bytemuck = { workspace = true } -byteorder = { workspace = true } -bzip2 = { workspace = true } crossbeam-channel = { workspace = true } -dashmap = { workspace = true, features = ["rayon", "raw-api"] } -dir-diff = { workspace = true } -flate2 = { workspace = true } -fnv = { workspace = true } -fs-err = { workspace = true } -im = { workspace = true, features = ["rayon", "serde"] } -index_list = { workspace = true } itertools = { workspace = true } -lazy_static = { workspace = true } log = { workspace = true } -lru = { workspace = true } -lz4 = { workspace = true } -memmap2 = { workspace = true } -modular-bitfield = { workspace = true } -num-derive = { workspace = true } -num-traits = { workspace = true } -num_cpus = { workspace = true } -num_enum = { workspace = true } -ouroboros = { workspace = true } -percentage = { workspace = true } -qualifier_attr = { workspace = true } -rand = { workspace = true } -rayon = { workspace = true } -regex = { workspace = true } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } -siphasher = { workspace = true } -solana-address-lookup-table-program = { workspace = true } -solana-bpf-loader-program = { workspace = true } -solana-bucket-map = { workspace = true } -solana-compute-budget-program = { workspace = true } -solana-config-program = { workspace = true } -solana-cost-model = { workspace = true } solana-frozen-abi = { workspace = true } solana-frozen-abi-macro = { workspace = true } -solana-loader-v4-program = { workspace = true } -solana-measure = { workspace = true } -solana-metrics = { workspace = true } -solana-perf = { workspace = true } -solana-program-runtime = { workspace = true } -solana-rayon-threadlimit = { workspace = true } solana-sdk = { workspace = true } -solana-stake-program = { workspace = true } -solana-system-program = { workspace = true } solana-vote-program = { workspace = true } -solana-zk-token-proof-program = { workspace = true } -solana-zk-token-sdk = { workspace = true } -static_assertions = { workspace = true } -strum = { workspace = true, features = ["derive"] } -strum_macros = { workspace = true } -symlink = { workspace = true } -tar = { workspace = true } -tempfile = { workspace = true } thiserror = { workspace = true } -zstd = { workspace = true } [lib] crate-type = ["lib"] name = "solana_vote" [dev-dependencies] -assert_matches = { workspace = true } -ed25519-dalek = { workspace = true } -libsecp256k1 = { workspace = true } -memoffset = { workspace = true } -rand_chacha = { workspace = true } -solana-logger = { workspace = true } -solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } -# See order-crates-for-publishing.py for using this unusual `path = "."` -solana-vote = { path = ".", features = ["dev-context-only-utils"] } -static_assertions = { workspace = true } -test-case = { workspace = true } +bincode = { workspace = true } +rand = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] From ddd029774afcdf695ebf44494a04942ba3570075 Mon Sep 17 00:00:00 2001 From: Tyera Date: Tue, 26 Sep 2023 10:13:17 -0600 Subject: [PATCH 188/407] Add geyser block-metadata notification with entry count (#33359) * Add new ReplicaBlockInfoVersions variant * Use new variant to return entry count --- core/src/replay_stage.rs | 1 + .../src/geyser_plugin_interface.rs | 17 ++++++++++++++++- .../src/block_metadata_notifier.rs | 12 ++++++++---- .../src/block_metadata_notifier_interface.rs | 2 ++ 4 files changed, 27 insertions(+), 5 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 5a6d825e3aa3d7..b7d7db0dcec595 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -2927,6 +2927,7 @@ impl ReplayStage { Some(bank.clock().unix_timestamp), Some(bank.block_height()), bank.executed_transaction_count(), + r_replay_progress.num_entries as u64, ) } bank_complete_time.stop(); diff --git a/geyser-plugin-interface/src/geyser_plugin_interface.rs b/geyser-plugin-interface/src/geyser_plugin_interface.rs index 87c0987f2948e6..b2bbb5a4953aed 100644 --- a/geyser-plugin-interface/src/geyser_plugin_interface.rs +++ b/geyser-plugin-interface/src/geyser_plugin_interface.rs @@ -193,7 +193,7 @@ pub struct ReplicaBlockInfo<'a> { pub block_height: Option, } -/// Extending ReplicaBlockInfo by sending the transaction_entries_count. +/// Extending ReplicaBlockInfo by sending the executed_transaction_count. #[derive(Clone, Debug)] pub struct ReplicaBlockInfoV2<'a> { pub parent_slot: Slot, @@ -206,9 +206,24 @@ pub struct ReplicaBlockInfoV2<'a> { pub executed_transaction_count: u64, } +/// Extending ReplicaBlockInfo by sending the entries_count. +#[derive(Clone, Debug)] +pub struct ReplicaBlockInfoV3<'a> { + pub parent_slot: Slot, + pub parent_blockhash: &'a str, + pub slot: Slot, + pub blockhash: &'a str, + pub rewards: &'a [Reward], + pub block_time: Option, + pub block_height: Option, + pub executed_transaction_count: u64, + pub entry_count: u64, +} + pub enum ReplicaBlockInfoVersions<'a> { V0_0_1(&'a ReplicaBlockInfo<'a>), V0_0_2(&'a ReplicaBlockInfoV2<'a>), + V0_0_3(&'a ReplicaBlockInfoV3<'a>), } /// Errors returned by plugin calls diff --git a/geyser-plugin-manager/src/block_metadata_notifier.rs b/geyser-plugin-manager/src/block_metadata_notifier.rs index 2fcf409ca49b16..ab56cf3be81701 100644 --- a/geyser-plugin-manager/src/block_metadata_notifier.rs +++ b/geyser-plugin-manager/src/block_metadata_notifier.rs @@ -6,7 +6,7 @@ use { log::*, solana_accounts_db::stake_rewards::RewardInfo, solana_geyser_plugin_interface::geyser_plugin_interface::{ - ReplicaBlockInfoV2, ReplicaBlockInfoVersions, + ReplicaBlockInfoV3, ReplicaBlockInfoVersions, }, solana_measure::measure::Measure, solana_metrics::*, @@ -31,6 +31,7 @@ impl BlockMetadataNotifier for BlockMetadataNotifierImpl { block_time: Option, block_height: Option, executed_transaction_count: u64, + entry_count: u64, ) { let plugin_manager = self.plugin_manager.read().unwrap(); if plugin_manager.plugins.is_empty() { @@ -49,8 +50,9 @@ impl BlockMetadataNotifier for BlockMetadataNotifierImpl { block_time, block_height, executed_transaction_count, + entry_count, ); - let block_info = ReplicaBlockInfoVersions::V0_0_2(&block_info); + let block_info = ReplicaBlockInfoVersions::V0_0_3(&block_info); match plugin.notify_block_metadata(block_info) { Err(err) => { error!( @@ -103,8 +105,9 @@ impl BlockMetadataNotifierImpl { block_time: Option, block_height: Option, executed_transaction_count: u64, - ) -> ReplicaBlockInfoV2<'a> { - ReplicaBlockInfoV2 { + entry_count: u64, + ) -> ReplicaBlockInfoV3<'a> { + ReplicaBlockInfoV3 { parent_slot, parent_blockhash, slot, @@ -113,6 +116,7 @@ impl BlockMetadataNotifierImpl { block_time, block_height, executed_transaction_count, + entry_count, } } diff --git a/geyser-plugin-manager/src/block_metadata_notifier_interface.rs b/geyser-plugin-manager/src/block_metadata_notifier_interface.rs index 9b7c34ed5c081f..f48df55d8d0ce5 100644 --- a/geyser-plugin-manager/src/block_metadata_notifier_interface.rs +++ b/geyser-plugin-manager/src/block_metadata_notifier_interface.rs @@ -7,6 +7,7 @@ use { /// Interface for notifying block metadata changes pub trait BlockMetadataNotifier { /// Notify the block metadata + #[allow(clippy::too_many_arguments)] fn notify_block_metadata( &self, parent_slot: u64, @@ -17,6 +18,7 @@ pub trait BlockMetadataNotifier { block_time: Option, block_height: Option, executed_transaction_count: u64, + entry_count: u64, ); } From 01c71e75553f4b6eb7845ba125b1ad3d5f13c04c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Tue, 26 Sep 2023 20:43:41 +0200 Subject: [PATCH 189/407] Reloads deployments with `environments.program_runtime_v1` (#33412) Reloads deployments with environments.program_runtime_v1. --- programs/bpf_loader/src/lib.rs | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index eaefd76f1ae034..82c623746406f2 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -22,6 +22,7 @@ use { elf::Executable, error::EbpfError, memory_region::{AccessType, MemoryCowCallback, MemoryMapping, MemoryRegion}, + verifier::RequisiteVerifier, vm::{BuiltinProgram, ContextObject, EbpfVm, ProgramResult}, }, solana_sdk::{ @@ -120,7 +121,7 @@ macro_rules! deploy_program { $account_size:expr, $slot:expr, $drop:expr, $new_programdata:expr $(,)?) => {{ let mut load_program_metrics = LoadProgramMetrics::default(); let mut register_syscalls_time = Measure::start("register_syscalls_time"); - let program_runtime_environment = create_program_runtime_environment_v1( + let deployment_program_runtime_environment = create_program_runtime_environment_v1( &$invoke_context.feature_set, $invoke_context.get_compute_budget(), true, /* deployment */ @@ -131,6 +132,25 @@ macro_rules! deploy_program { })?; register_syscalls_time.stop(); load_program_metrics.register_syscalls_us = register_syscalls_time.as_us(); + // Verify using stricter deployment_program_runtime_environment + let mut load_elf_time = Measure::start("load_elf_time"); + let executable = Executable::::load( + $new_programdata, + Arc::new(deployment_program_runtime_environment), + ).map_err(|err| { + ic_logger_msg!($invoke_context.get_log_collector(), "{}", err); + InstructionError::InvalidAccountData + })?; + load_elf_time.stop(); + load_program_metrics.load_elf_us = load_elf_time.as_us(); + let mut verify_code_time = Measure::start("verify_code_time"); + executable.verify::().map_err(|err| { + ic_logger_msg!($invoke_context.get_log_collector(), "{}", err); + InstructionError::InvalidAccountData + })?; + verify_code_time.stop(); + load_program_metrics.verify_code_us = verify_code_time.as_us(); + // Reload but with environments.program_runtime_v1 let executor = load_program_from_bytes( $invoke_context.feature_set.is_active(&delay_visibility_of_program_deployment::id()), $invoke_context.get_log_collector(), @@ -139,8 +159,8 @@ macro_rules! deploy_program { $loader_key, $account_size, $slot, - Arc::new(program_runtime_environment), - false, + $invoke_context.programs_modified_by_tx.environments.program_runtime_v1.clone(), + true, )?; if let Some(old_entry) = $invoke_context.find_program_in_cache(&$program_id) { executor.tx_usage_counter.store( From 9f6f5325354b3c4c3055dbbfa704d2bb6650cc2e Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 26 Sep 2023 15:38:37 -0400 Subject: [PATCH 190/407] `flush_slot_cache_with_clean()` takes a single Slot (#33413) --- accounts-db/src/accounts_db.rs | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 9d8dd73d6795ef..ee54d2a22c844b 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -6481,7 +6481,7 @@ impl AccountsDb { let mut num_roots_flushed = 0; for &root in cached_roots.iter().rev() { if self - .flush_slot_cache_with_clean(&[root], should_flush_f.as_mut(), max_clean_root) + .flush_slot_cache_with_clean(root, should_flush_f.as_mut(), max_clean_root) .is_some() { num_roots_flushed += 1; @@ -6643,7 +6643,7 @@ impl AccountsDb { /// flush all accounts in this slot fn flush_slot_cache(&self, slot: Slot) -> Option { - self.flush_slot_cache_with_clean(&[slot], None::<&mut fn(&_, &_) -> bool>, None) + self.flush_slot_cache_with_clean(slot, None::<&mut fn(&_, &_) -> bool>, None) } /// 1.13 and some 1.14 could produce legal snapshots with more than 1 append vec per slot. @@ -6696,12 +6696,10 @@ impl AccountsDb { /// accounts fn flush_slot_cache_with_clean( &self, - slots: &[Slot], + slot: Slot, should_flush_f: Option<&mut impl FnMut(&Pubkey, &AccountSharedData) -> bool>, max_clean_root: Option, ) -> Option { - assert_eq!(1, slots.len()); - let slot = slots[0]; if self .remove_unrooted_slots_synchronization .slots_under_contention @@ -6725,15 +6723,12 @@ impl AccountsDb { // Nobody else should have been purging this slot, so should not have been removed // from `self.remove_unrooted_slots_synchronization`. - - slots.iter().for_each(|slot| { - assert!(self - .remove_unrooted_slots_synchronization - .slots_under_contention - .lock() - .unwrap() - .remove(slot)); - }); + assert!(self + .remove_unrooted_slots_synchronization + .slots_under_contention + .lock() + .unwrap() + .remove(&slot)); // Signal to any threads blocked on `remove_unrooted_slots(slot)` that we have finished // flushing From 746f69772a33f98c1d9afab91eda14f491c7c2e9 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Tue, 26 Sep 2023 14:05:36 -0700 Subject: [PATCH 191/407] [TieredStorage] Streamline the handling of TieredStorageFormat (#33396) #### Problem The TieredStorageFormat field in the TieredStorage is only used in the write path. #### Summary of Changes This PR simplifies the handling of TieredStorageFormat by removing its field from TieredStorage struct but passing via write_accounts(). --- accounts-db/src/tiered_storage.rs | 34 +++++++++++-------------------- 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index 65d3485dccb064..549528f22be6d4 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -46,7 +46,6 @@ pub struct TieredStorageFormat { #[derive(Debug)] pub struct TieredStorage { reader: OnceLock, - format: Option, path: PathBuf, } @@ -64,10 +63,9 @@ impl TieredStorage { /// /// Note that the actual file will not be created until write_accounts /// is called. - pub fn new_writable(path: impl Into, format: TieredStorageFormat) -> Self { + pub fn new_writable(path: impl Into) -> Self { Self { reader: OnceLock::::new(), - format: Some(format), path: path.into(), } } @@ -78,7 +76,6 @@ impl TieredStorage { let path = path.into(); Ok(Self { reader: TieredStorageReader::new_from_path(&path).map(OnceLock::from)?, - format: None, path, }) } @@ -104,6 +101,7 @@ impl TieredStorage { &self, accounts: &StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V>, skip: usize, + format: &TieredStorageFormat, ) -> TieredStorageResult> { if self.is_read_only() { return Err(TieredStorageError::AttemptToUpdateReadOnly( @@ -112,10 +110,7 @@ impl TieredStorage { } let result = { - // self.format must be Some as write_accounts can only be called on a - // TieredStorage instance created via new_writable() where its format - // field is required. - let writer = TieredStorageWriter::new(&self.path, self.format.as_ref().unwrap())?; + let writer = TieredStorageWriter::new(&self.path, format)?; writer.write_accounts(accounts, skip) }; @@ -191,7 +186,7 @@ mod tests { Vec::::new(), ); - let result = tiered_storage.write_accounts(&storable_accounts, 0); + let result = tiered_storage.write_accounts(&storable_accounts, 0, &HOT_FORMAT); match (&result, &expected_result) { ( @@ -220,10 +215,8 @@ mod tests { let tiered_storage_path = temp_dir.path().join("test_new_meta_file_only"); { - let tiered_storage = ManuallyDrop::new(TieredStorage::new_writable( - &tiered_storage_path, - HOT_FORMAT.clone(), - )); + let tiered_storage = + ManuallyDrop::new(TieredStorage::new_writable(&tiered_storage_path)); assert!(!tiered_storage.is_read_only()); assert_eq!(tiered_storage.path(), tiered_storage_path); @@ -256,7 +249,7 @@ mod tests { let temp_dir = tempdir().unwrap(); let tiered_storage_path = temp_dir.path().join("test_write_accounts_twice"); - let tiered_storage = TieredStorage::new_writable(&tiered_storage_path, HOT_FORMAT.clone()); + let tiered_storage = TieredStorage::new_writable(&tiered_storage_path); // Expect the result to be TieredStorageError::Unsupported as the feature // is not yet fully supported, but we can still check its partial results // in the test. @@ -277,18 +270,15 @@ mod tests { let temp_dir = tempdir().unwrap(); let tiered_storage_path = temp_dir.path().join("test_remove_on_drop"); { - let tiered_storage = - TieredStorage::new_writable(&tiered_storage_path, HOT_FORMAT.clone()); + let tiered_storage = TieredStorage::new_writable(&tiered_storage_path); write_zero_accounts(&tiered_storage, Err(TieredStorageError::Unsupported())); } // expect the file does not exists as it has been removed on drop assert!(!tiered_storage_path.try_exists().unwrap()); { - let tiered_storage = ManuallyDrop::new(TieredStorage::new_writable( - &tiered_storage_path, - HOT_FORMAT.clone(), - )); + let tiered_storage = + ManuallyDrop::new(TieredStorage::new_writable(&tiered_storage_path)); write_zero_accounts(&tiered_storage, Err(TieredStorageError::Unsupported())); } // expect the file exists as we have ManuallyDrop this time. @@ -370,8 +360,8 @@ mod tests { let temp_dir = tempdir().unwrap(); let tiered_storage_path = temp_dir.path().join(path_suffix); - let tiered_storage = TieredStorage::new_writable(tiered_storage_path, format.clone()); - _ = tiered_storage.write_accounts(&storable_accounts, 0); + let tiered_storage = TieredStorage::new_writable(tiered_storage_path); + _ = tiered_storage.write_accounts(&storable_accounts, 0, &format); verify_hot_storage(&tiered_storage, &accounts, format); } From e088eb2be0c1111c438f1cf716d7fd8ae6ebcd9f Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Tue, 26 Sep 2023 17:35:25 -0500 Subject: [PATCH 192/407] Code clean up (#33417) clean up Co-authored-by: HaoranYi --- accounts-db/src/accounts_hash.rs | 4 ++-- bucket_map/src/bucket.rs | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 77bdc31601f625..74d1cfa8a1681a 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -542,7 +542,7 @@ impl<'a> AccountsHasher<'a> { // This function is designed to allow hashes to be located in multiple, perhaps multiply deep vecs. // The caller provides a function to return a slice from the source data. - pub fn compute_merkle_root_from_slices<'b, F, T>( + fn compute_merkle_root_from_slices<'b, F, T>( total_hashes: usize, fanout: usize, max_levels_per_pass: Option, @@ -706,7 +706,7 @@ impl<'a> AccountsHasher<'a> { } } - pub fn compute_merkle_root_from_slices_recurse( + fn compute_merkle_root_from_slices_recurse( hashes: Vec, fanout: usize, max_levels_per_pass: Option, diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index 1eb9ae18b60850..036743c214dc46 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -476,10 +476,9 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { // pop one entry at a time to insert in the first free location we find 'outer: while let Some((ix_entry_raw, i)) = reverse_sorted_entries.pop() { let (k, v) = &items[i]; - let ix_entry = ix_entry_raw % cap; // search for an empty spot starting at `ix_entry` for search in 0..search_end { - let ix_index = (ix_entry + search) % cap; + let ix_index = (ix_entry_raw + search) % cap; let elem = IndexEntryPlaceInBucket::new(ix_index); if index.try_lock(ix_index) { *entries_created_on_disk += 1; From 177fd08707a31d58e661456f5c17833100bf5803 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Sep 2023 19:52:49 -0600 Subject: [PATCH 193/407] build(deps): bump blake3 from 1.4.1 to 1.5.0 (#33368) * build(deps): bump blake3 from 1.4.1 to 1.5.0 Bumps [blake3](https://github.com/BLAKE3-team/BLAKE3) from 1.4.1 to 1.5.0. - [Release notes](https://github.com/BLAKE3-team/BLAKE3/releases) - [Commits](https://github.com/BLAKE3-team/BLAKE3/compare/1.4.1...1.5.0) --- updated-dependencies: - dependency-name: blake3 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f5d49601cbff59..3444fe05c317e0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -304,9 +304,9 @@ checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "ascii" @@ -634,9 +634,9 @@ dependencies = [ [[package]] name = "blake3" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "199c42ab6972d92c9f8995f086273d25c42fc0f7b2a1fcefba465c1352d25ba5" +checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" dependencies = [ "arrayref", "arrayvec", diff --git a/Cargo.toml b/Cargo.toml index 4bb4fea6ac9442..9cb29f568c2836 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -150,7 +150,7 @@ backoff = "0.4.0" base64 = "0.21.4" bincode = "1.3.3" bitflags = { version = "2.3.3", features = ["serde"] } -blake3 = "1.4.1" +blake3 = "1.5.0" block-buffer = "0.10.4" borsh = "0.10.3" bs58 = "0.4.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index c507d05965f299..b38658d87499ec 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -292,9 +292,9 @@ checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" -version = "0.7.1" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4dc07131ffa69b8072d35f5007352af944213cde02545e2103680baed38fcd" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "ascii" @@ -593,9 +593,9 @@ dependencies = [ [[package]] name = "blake3" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "199c42ab6972d92c9f8995f086273d25c42fc0f7b2a1fcefba465c1352d25ba5" +checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" dependencies = [ "arrayref", "arrayvec", From 634eede841ccfa1cb45ef235cc7494ebbbf1d6bd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 09:45:14 +0000 Subject: [PATCH 194/407] build(deps): bump thiserror from 1.0.48 to 1.0.49 (#33423) * build(deps): bump thiserror from 1.0.48 to 1.0.49 Bumps [thiserror](https://github.com/dtolnay/thiserror) from 1.0.48 to 1.0.49. - [Release notes](https://github.com/dtolnay/thiserror/releases) - [Commits](https://github.com/dtolnay/thiserror/compare/1.0.48...1.0.49) --- updated-dependencies: - dependency-name: thiserror dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3444fe05c317e0..a590c07c5d4a41 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7914,18 +7914,18 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.48" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" +checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.48" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" +checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 9cb29f568c2836..ef02247eedb735 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -392,7 +392,7 @@ tar = "0.4.40" tarpc = "0.29.0" tempfile = "3.8.0" test-case = "3.2.1" -thiserror = "1.0.48" +thiserror = "1.0.49" tiny-bip39 = "0.8.2" tokio = "1.29.1" tokio-serde = "0.8" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b38658d87499ec..1ba2ee4fc8e052 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6819,18 +6819,18 @@ checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" [[package]] name = "thiserror" -version = "1.0.48" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" +checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.48" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" +checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ "proc-macro2", "quote", From ca92e9c3877e2a17374d59b634ab98837212e0fb Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Wed, 27 Sep 2023 19:02:05 +0800 Subject: [PATCH 195/407] chore(solana-accounts-db): remove unused deps (#33420) --- Cargo.lock | 11 ----------- accounts-db/Cargo.toml | 11 ----------- programs/sbf/Cargo.lock | 10 ---------- 3 files changed, 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a590c07c5d4a41..0bef486edce046 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5141,7 +5141,6 @@ dependencies = [ "bzip2", "crossbeam-channel", "dashmap 4.0.2", - "dir-diff", "ed25519-dalek", "flate2", "fnv", @@ -5152,7 +5151,6 @@ dependencies = [ "lazy_static", "libsecp256k1", "log", - "lru", "lz4", "memmap2", "memoffset 0.9.0", @@ -5171,20 +5169,14 @@ dependencies = [ "rustc_version 0.4.0", "serde", "serde_derive", - "siphasher", "solana-accounts-db", - "solana-bpf-loader-program", "solana-bucket-map", - "solana-compute-budget-program", "solana-config-program", - "solana-cost-model", "solana-frozen-abi", "solana-frozen-abi-macro", - "solana-loader-v4-program", "solana-logger", "solana-measure", "solana-metrics", - "solana-perf", "solana-program-runtime", "solana-rayon-threadlimit", "solana-sdk", @@ -5196,12 +5188,9 @@ dependencies = [ "static_assertions", "strum", "strum_macros", - "symlink", "tar", "tempfile", - "test-case", "thiserror", - "zstd", ] [[package]] diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 680412fcbaf98d..38cdd5b29e91b5 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -19,7 +19,6 @@ byteorder = { workspace = true } bzip2 = { workspace = true } crossbeam-channel = { workspace = true } dashmap = { workspace = true, features = ["rayon", "raw-api"] } -dir-diff = { workspace = true } flate2 = { workspace = true } fnv = { workspace = true } fs-err = { workspace = true } @@ -28,7 +27,6 @@ index_list = { workspace = true } itertools = { workspace = true } lazy_static = { workspace = true } log = { workspace = true } -lru = { workspace = true } lz4 = { workspace = true } memmap2 = { workspace = true } modular-bitfield = { workspace = true } @@ -44,18 +42,12 @@ rayon = { workspace = true } regex = { workspace = true } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } -siphasher = { workspace = true } -solana-bpf-loader-program = { workspace = true } solana-bucket-map = { workspace = true } -solana-compute-budget-program = { workspace = true } solana-config-program = { workspace = true } -solana-cost-model = { workspace = true } solana-frozen-abi = { workspace = true } solana-frozen-abi-macro = { workspace = true } -solana-loader-v4-program = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } -solana-perf = { workspace = true } solana-program-runtime = { workspace = true } solana-rayon-threadlimit = { workspace = true } solana-sdk = { workspace = true } @@ -67,11 +59,9 @@ solana-zk-token-sdk = { workspace = true } static_assertions = { workspace = true } strum = { workspace = true, features = ["derive"] } strum_macros = { workspace = true } -symlink = { workspace = true } tar = { workspace = true } tempfile = { workspace = true } thiserror = { workspace = true } -zstd = { workspace = true } [lib] crate-type = ["lib"] @@ -88,7 +78,6 @@ solana-accounts-db = { path = ".", features = ["dev-context-only-utils"] } solana-logger = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } static_assertions = { workspace = true } -test-case = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 1ba2ee4fc8e052..acb6ebcc66d2c0 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4467,7 +4467,6 @@ dependencies = [ "bzip2", "crossbeam-channel", "dashmap", - "dir-diff", "flate2", "fnv", "fs-err", @@ -4476,7 +4475,6 @@ dependencies = [ "itertools", "lazy_static", "log", - "lru", "lz4", "memmap2", "modular-bitfield", @@ -4493,18 +4491,12 @@ dependencies = [ "rustc_version", "serde", "serde_derive", - "siphasher", - "solana-bpf-loader-program", "solana-bucket-map", - "solana-compute-budget-program", "solana-config-program", - "solana-cost-model", "solana-frozen-abi", "solana-frozen-abi-macro", - "solana-loader-v4-program", "solana-measure", "solana-metrics", - "solana-perf", "solana-program-runtime", "solana-rayon-threadlimit", "solana-sdk", @@ -4516,11 +4508,9 @@ dependencies = [ "static_assertions", "strum", "strum_macros", - "symlink", "tar", "tempfile", "thiserror", - "zstd", ] [[package]] From f502dbc54ece05f6335eeebeaa389da2b51ea006 Mon Sep 17 00:00:00 2001 From: Jon Cinque Date: Wed, 27 Sep 2023 17:37:28 +0200 Subject: [PATCH 196/407] ci: Re-enable spl-stake-pool downstream job (#33425) --- .github/workflows/downstream-project-spl.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/downstream-project-spl.yml b/.github/workflows/downstream-project-spl.yml index 09c457c038f9ae..f0ecfb20accb4e 100644 --- a/.github/workflows/downstream-project-spl.yml +++ b/.github/workflows/downstream-project-spl.yml @@ -128,7 +128,7 @@ jobs: - [governance/addin-mock/program, governance/program] - [memo/program] - [name-service/program] - # - [stake-pool/program] + - [stake-pool/program] - [single-pool/program] steps: From 3608378097f6ebb542e621ab784468dc25aac8a6 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 28 Sep 2023 00:27:40 +0800 Subject: [PATCH 197/407] chore(solana-accounts-db): remove unused deps (#33429) --- Cargo.lock | 2 -- accounts-db/Cargo.toml | 2 -- programs/sbf/Cargo.lock | 2 -- 3 files changed, 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0bef486edce046..3f7ccdea909c8d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5183,8 +5183,6 @@ dependencies = [ "solana-stake-program", "solana-system-program", "solana-vote-program", - "solana-zk-token-proof-program", - "solana-zk-token-sdk", "static_assertions", "strum", "strum_macros", diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 38cdd5b29e91b5..a19708768fdd08 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -54,8 +54,6 @@ solana-sdk = { workspace = true } solana-stake-program = { workspace = true } solana-system-program = { workspace = true } solana-vote-program = { workspace = true } -solana-zk-token-proof-program = { workspace = true } -solana-zk-token-sdk = { workspace = true } static_assertions = { workspace = true } strum = { workspace = true, features = ["derive"] } strum_macros = { workspace = true } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index acb6ebcc66d2c0..623b88fad38af1 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4503,8 +4503,6 @@ dependencies = [ "solana-stake-program", "solana-system-program", "solana-vote-program", - "solana-zk-token-proof-program", - "solana-zk-token-sdk", "static_assertions", "strum", "strum_macros", From 3fbfa0e0da278041331710f60d4806218a806af3 Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Wed, 27 Sep 2023 09:35:19 -0700 Subject: [PATCH 198/407] Simplify code and use match to harden logic (#33409) addressed more feedback from Jon: Simplify code and use match to harden logic in connection cache --- connection-cache/src/connection_cache.rs | 53 +++++++++++++----------- 1 file changed, 28 insertions(+), 25 deletions(-) diff --git a/connection-cache/src/connection_cache.rs b/connection-cache/src/connection_cache.rs index 306a8df2722091..4962f815c33129 100644 --- a/connection-cache/src/connection_cache.rs +++ b/connection-cache/src/connection_cache.rs @@ -196,8 +196,8 @@ where } fn create_connection_internal( - config: &Arc, - connection_manager: &Arc, + config: &C, + connection_manager: &M, map: &mut std::sync::RwLockWriteGuard<'_, IndexMap>, addr: &SocketAddr, connection_pool_size: usize, @@ -276,31 +276,34 @@ where } = match map.get(addr) { Some(pool) => { let pool_status = pool.check_pool_status(self.connection_pool_size); - if matches!(pool_status, PoolStatus::Empty) { - // create more connection and put it in the pool - drop(map); - self.create_connection(&mut lock_timing_ms, addr) - } else { - let connection = pool.borrow_connection(); - if matches!(pool_status, PoolStatus::PartiallyFull) { - debug!("Creating connection async for {addr}"); + match pool_status { + PoolStatus::Empty => { + // create more connection and put it in the pool drop(map); - let mut map = self.map.write().unwrap(); - Self::create_connection_internal( - &self.connection_config, - &self.connection_manager, - &mut map, - addr, - self.connection_pool_size, - Some(&self.sender), - ); + self.create_connection(&mut lock_timing_ms, addr) } - CreateConnectionResult { - connection, - cache_hit: true, - connection_cache_stats: self.stats.clone(), - num_evictions: 0, - eviction_timing_ms: 0, + PoolStatus::PartiallyFull | PoolStatus::Full => { + let connection = pool.borrow_connection(); + if matches!(pool_status, PoolStatus::PartiallyFull) { + debug!("Creating connection async for {addr}"); + drop(map); + let mut map = self.map.write().unwrap(); + Self::create_connection_internal( + &self.connection_config, + &self.connection_manager, + &mut map, + addr, + self.connection_pool_size, + Some(&self.sender), + ); + } + CreateConnectionResult { + connection, + cache_hit: true, + connection_cache_stats: self.stats.clone(), + num_evictions: 0, + eviction_timing_ms: 0, + } } } } From 511182479b0e34d9393f1cfedb50a8ebcfcba834 Mon Sep 17 00:00:00 2001 From: Steven Luscher Date: Wed, 27 Sep 2023 09:36:08 -0700 Subject: [PATCH 199/407] docs: add direct link to StackExchange from the On-Chain Programs docs (#32937) * docs: add direct link to StackExchange from the On-Chain Programs docs * Update link to Discord. Co-authored-by: Nick Frostbutter <75431177+nickfrosty@users.noreply.github.com> --------- Co-authored-by: Nick Frostbutter <75431177+nickfrosty@users.noreply.github.com> --- docs/src/developing/on-chain-programs/faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/developing/on-chain-programs/faq.md b/docs/src/developing/on-chain-programs/faq.md index de255742309bb1..04093f64c54d00 100644 --- a/docs/src/developing/on-chain-programs/faq.md +++ b/docs/src/developing/on-chain-programs/faq.md @@ -6,7 +6,7 @@ When writing or interacting with Solana programs, there are common questions or challenges that often come up. Below are resources to help answer these questions. -If not addressed here, ask on the [Solana Stack Exchange](https://solana.stackexchange.com/) or [StackOverflow](https://stackoverflow.com/questions/tagged/solana) with the `solana` tag +If not addressed here, ask on [StackExchange](https://solana.stackexchange.com/questions/ask?tags=solana-program) with the `solana-program` tag. ## Limitations From 5d112270882017cef86f5120d04c9a6cec273ce6 Mon Sep 17 00:00:00 2001 From: Kevin Ji <1146876+kevinji@users.noreply.github.com> Date: Wed, 27 Sep 2023 22:32:44 -0400 Subject: [PATCH 200/407] Mark *.sh files with `#!` as executable (#33303) --- ci/buildkite-pipeline-in-disk.sh | 0 ci/buildkite-solana-private.sh | 0 ci/publish-installer.sh | 0 ci/stable/common.sh | 0 ci/stable/run-all.sh | 0 install/solana-install-init.sh | 0 net/scripts/colo-node-onacquire.sh | 0 net/scripts/colo-node-onfree.sh | 0 net/scripts/colo-utils.sh | 0 net/scripts/gce-self-destruct.sh | 0 run.sh | 0 scripts/coverage-in-disk.sh | 0 12 files changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 ci/buildkite-pipeline-in-disk.sh mode change 100644 => 100755 ci/buildkite-solana-private.sh mode change 100644 => 100755 ci/publish-installer.sh mode change 100644 => 100755 ci/stable/common.sh mode change 100644 => 100755 ci/stable/run-all.sh mode change 100644 => 100755 install/solana-install-init.sh mode change 100644 => 100755 net/scripts/colo-node-onacquire.sh mode change 100644 => 100755 net/scripts/colo-node-onfree.sh mode change 100644 => 100755 net/scripts/colo-utils.sh mode change 100644 => 100755 net/scripts/gce-self-destruct.sh mode change 100644 => 100755 run.sh mode change 100644 => 100755 scripts/coverage-in-disk.sh diff --git a/ci/buildkite-pipeline-in-disk.sh b/ci/buildkite-pipeline-in-disk.sh old mode 100644 new mode 100755 diff --git a/ci/buildkite-solana-private.sh b/ci/buildkite-solana-private.sh old mode 100644 new mode 100755 diff --git a/ci/publish-installer.sh b/ci/publish-installer.sh old mode 100644 new mode 100755 diff --git a/ci/stable/common.sh b/ci/stable/common.sh old mode 100644 new mode 100755 diff --git a/ci/stable/run-all.sh b/ci/stable/run-all.sh old mode 100644 new mode 100755 diff --git a/install/solana-install-init.sh b/install/solana-install-init.sh old mode 100644 new mode 100755 diff --git a/net/scripts/colo-node-onacquire.sh b/net/scripts/colo-node-onacquire.sh old mode 100644 new mode 100755 diff --git a/net/scripts/colo-node-onfree.sh b/net/scripts/colo-node-onfree.sh old mode 100644 new mode 100755 diff --git a/net/scripts/colo-utils.sh b/net/scripts/colo-utils.sh old mode 100644 new mode 100755 diff --git a/net/scripts/gce-self-destruct.sh b/net/scripts/gce-self-destruct.sh old mode 100644 new mode 100755 diff --git a/run.sh b/run.sh old mode 100644 new mode 100755 diff --git a/scripts/coverage-in-disk.sh b/scripts/coverage-in-disk.sh old mode 100644 new mode 100755 From c0100b13ef417badfd46c6ff032529e409770410 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 28 Sep 2023 15:01:14 +0800 Subject: [PATCH 201/407] ci: fix cargo files changes doens't trigger doc tests (#33438) --- ci/buildkite-pipeline.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index 44b7481bc67bce..e130c585ad6dde 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -151,6 +151,8 @@ all_test_steps() { # Docs tests if affects \ .rs$ \ + Cargo.lock$ \ + Cargo.toml$ \ ^ci/rust-version.sh \ ^ci/test-docs.sh \ ; then From fa168e3cd1c6023dae398f3ed1007781ce85cb45 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 28 Sep 2023 09:13:52 +0000 Subject: [PATCH 202/407] build(deps): bump indexmap from 2.0.0 to 2.0.1 (#33439) * build(deps): bump indexmap from 2.0.0 to 2.0.1 Bumps [indexmap](https://github.com/bluss/indexmap) from 2.0.0 to 2.0.1. - [Changelog](https://github.com/bluss/indexmap/blob/master/RELEASES.md) - [Commits](https://github.com/bluss/indexmap/commits) --- updated-dependencies: - dependency-name: indexmap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 18 +++++++++--------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 16 ++++++++-------- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3f7ccdea909c8d..0de9f872c59c42 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2626,9 +2626,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +checksum = "ad227c3af19d4914570ad36d30409928b75967c298feb9ea1969db3a610bb14e" dependencies = [ "equivalent", "hashbrown 0.14.0", @@ -4805,7 +4805,7 @@ version = "0.9.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" dependencies = [ - "indexmap 2.0.0", + "indexmap 2.0.1", "itoa", "ryu", "serde", @@ -5598,7 +5598,7 @@ dependencies = [ "dashmap 4.0.2", "futures 0.3.28", "futures-util", - "indexmap 2.0.0", + "indexmap 2.0.1", "indicatif", "log", "quinn", @@ -5679,7 +5679,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.0.0", + "indexmap 2.0.1", "indicatif", "log", "rand 0.8.5", @@ -6020,7 +6020,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.0.0", + "indexmap 2.0.1", "itertools", "log", "lru", @@ -7070,7 +7070,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "histogram", - "indexmap 2.0.0", + "indexmap 2.0.1", "itertools", "libc", "log", @@ -7161,7 +7161,7 @@ dependencies = [ "console", "csv", "ctrlc", - "indexmap 2.0.0", + "indexmap 2.0.1", "indicatif", "pickledb", "serde", @@ -7190,7 +7190,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.0.0", + "indexmap 2.0.1", "indicatif", "log", "rayon", diff --git a/Cargo.toml b/Cargo.toml index ef02247eedb735..6e31e785e6c177 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -215,7 +215,7 @@ hyper = "0.14.27" hyper-proxy = "0.9.1" im = "15.1.0" index_list = "0.2.7" -indexmap = "2.0.0" +indexmap = "2.0.1" indicatif = "0.17.7" Inflector = "0.11.4" itertools = "0.10.5" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 623b88fad38af1..b321cf18426da4 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2232,9 +2232,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +checksum = "ad227c3af19d4914570ad36d30409928b75967c298feb9ea1969db3a610bb14e" dependencies = [ "equivalent", "hashbrown 0.14.0", @@ -4202,7 +4202,7 @@ version = "0.9.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" dependencies = [ - "indexmap 2.0.0", + "indexmap 2.0.1", "itoa", "ryu", "serde", @@ -4695,7 +4695,7 @@ dependencies = [ "dashmap", "futures 0.3.28", "futures-util", - "indexmap 2.0.0", + "indexmap 2.0.1", "indicatif", "log", "quinn", @@ -4745,7 +4745,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.0.0", + "indexmap 2.0.1", "log", "rand 0.8.5", "rayon", @@ -4999,7 +4999,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.0.0", + "indexmap 2.0.1", "itertools", "log", "lru", @@ -6141,7 +6141,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "histogram", - "indexmap 2.0.0", + "indexmap 2.0.1", "itertools", "libc", "log", @@ -6224,7 +6224,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.0.0", + "indexmap 2.0.1", "indicatif", "log", "rayon", From 0b2beba357d74254710c5714b118071e75924a40 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 28 Sep 2023 19:16:16 +0800 Subject: [PATCH 203/407] chore(bpf): remove unused deps (#33435) chore: remove unused deps --- Cargo.lock | 7 ------- sdk/cargo-build-bpf/Cargo.toml | 3 --- sdk/cargo-test-bpf/Cargo.toml | 4 ---- 3 files changed, 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0de9f872c59c42..6e998ef754667b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5412,11 +5412,8 @@ dependencies = [ name = "solana-cargo-build-bpf" version = "1.17.0" dependencies = [ - "cargo_metadata", - "clap 3.2.23", "log", "solana-logger", - "solana-sdk", ] [[package]] @@ -5443,10 +5440,6 @@ dependencies = [ [[package]] name = "solana-cargo-test-bpf" version = "1.17.0" -dependencies = [ - "cargo_metadata", - "clap 3.2.23", -] [[package]] name = "solana-cargo-test-sbf" diff --git a/sdk/cargo-build-bpf/Cargo.toml b/sdk/cargo-build-bpf/Cargo.toml index 6a75de262dc8e5..a609ee13c9c3d2 100644 --- a/sdk/cargo-build-bpf/Cargo.toml +++ b/sdk/cargo-build-bpf/Cargo.toml @@ -10,11 +10,8 @@ license = { workspace = true } edition = { workspace = true } [dependencies] -cargo_metadata = { workspace = true } -clap = { version = "3.1.5", features = ["cargo", "env"] } log = { workspace = true, features = ["std"] } solana-logger = { workspace = true } -solana-sdk = { workspace = true } [features] program = [] diff --git a/sdk/cargo-test-bpf/Cargo.toml b/sdk/cargo-test-bpf/Cargo.toml index 722a666477aadf..a639225b3a2539 100644 --- a/sdk/cargo-test-bpf/Cargo.toml +++ b/sdk/cargo-test-bpf/Cargo.toml @@ -9,10 +9,6 @@ homepage = { workspace = true } license = { workspace = true } edition = { workspace = true } -[dependencies] -cargo_metadata = { workspace = true } -clap = { version = "3.1.5", features = ["cargo"] } - [[bin]] name = "cargo-test-bpf" path = "src/main.rs" From fa968da32e104b4152c2029858458e4cb20f3e59 Mon Sep 17 00:00:00 2001 From: Jon Cinque Date: Thu, 28 Sep 2023 15:08:11 +0200 Subject: [PATCH 204/407] cli: Don't skip preflight when interacting with programs (#33426) * cli: Don't skip preflight when closing a program * Don't skip preflight anywhere for program deploys, fix test --- cli/src/program.rs | 4 ---- cli/src/test_utils.rs | 21 +++++++++++++-------- cli/tests/program.rs | 4 ++++ 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/cli/src/program.rs b/cli/src/program.rs index 1c6bf988615add..4222c732e07fde 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -1175,7 +1175,6 @@ fn process_set_authority( &tx, config.commitment, RpcSendTransactionConfig { - skip_preflight: true, preflight_commitment: Some(config.commitment.commitment), ..RpcSendTransactionConfig::default() }, @@ -1226,7 +1225,6 @@ fn process_set_authority_checked( &tx, config.commitment, RpcSendTransactionConfig { - skip_preflight: false, preflight_commitment: Some(config.commitment.commitment), ..RpcSendTransactionConfig::default() }, @@ -1562,7 +1560,6 @@ fn close( &tx, config.commitment, RpcSendTransactionConfig { - skip_preflight: true, preflight_commitment: Some(config.commitment.commitment), ..RpcSendTransactionConfig::default() }, @@ -2232,7 +2229,6 @@ fn send_deploy_messages( &final_tx, config.commitment, RpcSendTransactionConfig { - skip_preflight: true, preflight_commitment: Some(config.commitment.commitment), ..RpcSendTransactionConfig::default() }, diff --git a/cli/src/test_utils.rs b/cli/src/test_utils.rs index 5526022591b724..68b37636583f8a 100644 --- a/cli/src/test_utils.rs +++ b/cli/src/test_utils.rs @@ -39,6 +39,17 @@ pub fn check_ready(rpc_client: &RpcClient) { } } +pub fn wait_n_slots(rpc_client: &RpcClient, n: u64) -> u64 { + let slot = rpc_client.get_slot().unwrap(); + loop { + sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT)); + let new_slot = rpc_client.get_slot().unwrap(); + if new_slot - slot > n { + return new_slot; + } + } +} + pub fn wait_for_next_epoch_plus_n_slots(rpc_client: &RpcClient, n: u64) -> (Epoch, u64) { let current_epoch = rpc_client.get_epoch_info().unwrap().epoch; let next_epoch = current_epoch + 1; @@ -48,14 +59,8 @@ pub fn wait_for_next_epoch_plus_n_slots(rpc_client: &RpcClient, n: u64) -> (Epoc let next_epoch = rpc_client.get_epoch_info().unwrap().epoch; if next_epoch > current_epoch { - let slot = rpc_client.get_slot().unwrap(); - loop { - sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT)); - let new_slot = rpc_client.get_slot().unwrap(); - if new_slot - slot > n { - return (next_epoch, new_slot); - } - } + let new_slot = wait_n_slots(rpc_client, n); + return (next_epoch, new_slot); } } } diff --git a/cli/tests/program.rs b/cli/tests/program.rs index ec28e4600081b9..de5ef8cd01319f 100644 --- a/cli/tests/program.rs +++ b/cli/tests/program.rs @@ -5,6 +5,7 @@ use { solana_cli::{ cli::{process_command, CliCommand, CliConfig}, program::{ProgramCliCommand, CLOSE_PROGRAM_WARNING}, + test_utils::wait_n_slots, }, solana_cli_output::OutputFormat, solana_faucet::faucet::run_local_faucet, @@ -688,6 +689,9 @@ fn test_cli_program_close_program() { &bpf_loader_upgradeable::id(), ); + // Wait one slot to avoid "Program was deployed in this block already" error + wait_n_slots(&rpc_client, 1); + // Close program let close_account = rpc_client.get_account(&programdata_pubkey).unwrap(); let programdata_lamports = close_account.lamports; From 5b9a167c51fc2ccd825b5dedbe30de9ce254d596 Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 28 Sep 2023 15:13:51 +0200 Subject: [PATCH 205/407] Add uniform start/stop log lines for background hash verification (#33441) Add uniform start/stop log lines for background hash verification --- runtime/src/bank.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 3c8cba7471f51f..adc9da3a4b07bb 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -7254,9 +7254,7 @@ impl Bank { Builder::new() .name("solBgHashVerify".into()) .spawn(move || { - info!( - "running initial verification accounts hash calculation in background" - ); + info!("Initial background accounts hash verification has started"); let result = accounts_.verify_accounts_hash_and_lamports( slot, cap, @@ -7276,6 +7274,7 @@ impl Bank { .accounts_db .verify_accounts_hash_in_bg .background_finished(); + info!("Initial background accounts hash verification has stopped"); result }) .unwrap() From 25c27d452cc389ef4cf43a10e47b71c3000283df Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Thu, 28 Sep 2023 08:58:08 -0500 Subject: [PATCH 206/407] hash dedup vec (#33246) * hash_dedup vec algo * reviews * reviews * more reviews * simplify working_set init with add_next_item * refactor to remove special case "new" from add_item. The new change is that, even the new item is the new min, it will still be added to working_set. This change will make init working_set code simpler and the loop loop check simpler. Since the item is inserted in at the end of the vector, the cost of push into and pop from the working will be O(1), shouldn't affect performance much. * comments * refactor unnamed tuple in working set to SlotGroupPointer type * use SlotGroupPointer in ItemLocation * Add Copy traits to avoid explicty call of clone on SlotGroupPointer * consume next in add_next_item fn (credit to jeff). note that the old code is still correct, since before call to add_next_item, we will have already overwritten `next` to correct value. --------- Co-authored-by: HaoranYi --- accounts-db/src/accounts_hash.rs | 299 +++++++++++++++++++------------ 1 file changed, 188 insertions(+), 111 deletions(-) diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 74d1cfa8a1681a..f5c0a78ffa2380 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -435,6 +435,24 @@ pub struct AccountsHasher<'a> { pub(crate) active_stats: &'a ActiveStats, } +/// Pointer to a specific item in chunked accounts hash slices. +#[derive(Debug, Clone, Copy)] +struct SlotGroupPointer { + /// slot group index + slot_group_index: usize, + /// offset within a slot group + offset: usize, +} + +/// A struct for the location of an account hash item inside chunked accounts hash slices. +#[derive(Debug)] +struct ItemLocation<'a> { + /// account's pubkey + key: &'a Pubkey, + /// pointer to the item in slot group slices + pointer: SlotGroupPointer, +} + impl<'a> AccountsHasher<'a> { /// true if it is possible that there are filler accounts present pub fn filler_accounts_enabled(&self) -> bool { @@ -813,59 +831,45 @@ impl<'a> AccountsHasher<'a> { (hashes, lamports_total) } - /// returns the item referenced by `min_index` - /// updates `indexes` to skip over the pubkey and its duplicates - /// updates `first_items` to point to the next pubkey - /// or removes the entire pubkey division entries (for `min_index`) if the referenced pubkey is the last entry in the same `bin` - /// removed from: `first_items`, `indexes`, and `first_item_pubkey_division` + /// Given the item location, return the item in the `CalculatedHashIntermediate` slices and the next item location in the same bin. + /// If the end of the `CalculatedHashIntermediate` slice is reached or all the accounts in current bin have been exhausted, return `None` for next item location. fn get_item<'b>( - min_index: usize, - bin: usize, - first_items: &mut Vec, sorted_data_by_pubkey: &[&'b [CalculateHashIntermediate]], - indexes: &mut Vec, - first_item_to_pubkey_division: &mut Vec, + bin: usize, binner: &PubkeyBinCalculator24, - ) -> &'b CalculateHashIntermediate { - let first_item = first_items[min_index]; - let key = &first_item; - let division_index = first_item_to_pubkey_division[min_index]; - let division_data = &sorted_data_by_pubkey[division_index]; - let mut index = indexes[min_index]; + item_loc: &ItemLocation<'b>, + ) -> (&'b CalculateHashIntermediate, Option>) { + let division_data = &sorted_data_by_pubkey[item_loc.pointer.slot_group_index]; + let mut index = item_loc.pointer.offset; index += 1; - let mut end; - loop { - end = index >= division_data.len(); - if end { - break; - } + let mut next = None; + + while index < division_data.len() { // still more items where we found the previous key, so just increment the index for that slot group, skipping all pubkeys that are equal let next_key = &division_data[index].pubkey; - if next_key == key { + if next_key == item_loc.key { index += 1; continue; // duplicate entries of same pubkey, so keep skipping } if binner.bin_from_pubkey(next_key) > bin { // the next pubkey is not in our bin - end = true; break; } // point to the next pubkey > key - first_items[min_index] = *next_key; - indexes[min_index] = index; + next = Some(ItemLocation { + key: next_key, + pointer: SlotGroupPointer { + slot_group_index: item_loc.pointer.slot_group_index, + offset: index, + }, + }); break; } - if end { - // stop looking in this vector - we exhausted it - first_items.remove(min_index); - first_item_to_pubkey_division.remove(min_index); - indexes.remove(min_index); - } // this is the previous first item that was requested - &division_data[index - 1] + (&division_data[index - 1], next) } /// `hash_data` must be sorted by `binner.bin_from_pubkey()` @@ -944,41 +948,55 @@ impl<'a> AccountsHasher<'a> { result } - // go through: [..][pubkey_bin][..] and return hashes and lamport sum - // slot groups^ ^accounts found in a slot group, sorted by pubkey, higher slot, write_version - // 1. handle zero lamport accounts - // 2. pick the highest slot or (slot = and highest version) of each pubkey - // 3. produce this output: - // a. AccountHashesFile: individual account hashes in pubkey order - // b. lamport sum - fn de_dup_accounts_in_parallel( - &self, + /// Return the working_set and max number of pubkeys for hash dedup. + /// `working_set` holds SlotGroupPointer {slot_group_index, offset} for items in account's pubkey descending order. + fn initialize_dedup_working_set( sorted_data_by_pubkey: &[&[CalculateHashIntermediate]], pubkey_bin: usize, bins: usize, + binner: &PubkeyBinCalculator24, stats: &HashStats, - ) -> (AccountHashesFile, u64) { - let binner = PubkeyBinCalculator24::new(bins); - - let len = sorted_data_by_pubkey.len(); - let mut indexes = Vec::with_capacity(len); - let mut first_items = Vec::with_capacity(len); - // map from index of an item in first_items[] to index of the corresponding item in sorted_data_by_pubkey[] - // this will change as items in sorted_data_by_pubkey[] are exhausted - let mut first_item_to_pubkey_division = Vec::with_capacity(len); - - // initialize 'first_items', which holds the current lowest item in each slot group + ) -> ( + Vec, /* working_set */ + usize, /* max_inclusive_num_pubkeys */ + ) { + // working_set holds the lowest items for each slot_group sorted by pubkey descending (min_key is the last) + let mut working_set: Vec = Vec::default(); + + // Initialize 'working_set', which holds the current lowest item in each slot group. + // `working_set` should be initialized in reverse order of slot_groups. Later slot_groups are + // processed first. For each slot_group, if the lowest item for current slot group is + // already in working_set (i.e. inserted by a later slot group), the next lowest item + // in this slot group is searched and checked, until either one that is `not` in the + // working_set is found, which will then be inserted, or no next lowest item is found. + // Iterating in reverse order of slot_group will guarantee that each slot group will be + // scanned only once and scanned continuously. Therefore, it can achieve better data + // locality during the scan. let max_inclusive_num_pubkeys = sorted_data_by_pubkey .iter() .enumerate() + .rev() .map(|(i, hash_data)| { let first_pubkey_in_bin = - Self::find_first_pubkey_in_bin(hash_data, pubkey_bin, bins, &binner, stats); + Self::find_first_pubkey_in_bin(hash_data, pubkey_bin, bins, binner, stats); + if let Some(first_pubkey_in_bin) = first_pubkey_in_bin { - let k = hash_data[first_pubkey_in_bin].pubkey; - first_items.push(k); - first_item_to_pubkey_division.push(i); - indexes.push(first_pubkey_in_bin); + let mut next = Some(ItemLocation { + key: &hash_data[first_pubkey_in_bin].pubkey, + pointer: SlotGroupPointer { + slot_group_index: i, + offset: first_pubkey_in_bin, + }, + }); + + Self::add_next_item( + &mut next, + &mut working_set, + sorted_data_by_pubkey, + pubkey_bin, + binner, + ); + let mut first_pubkey_in_next_bin = first_pubkey_in_bin + 1; while first_pubkey_in_next_bin < hash_data.len() { if binner.bin_from_pubkey(&hash_data[first_pubkey_in_next_bin].pubkey) @@ -994,6 +1012,105 @@ impl<'a> AccountsHasher<'a> { } }) .sum::(); + + (working_set, max_inclusive_num_pubkeys) + } + + /// Add next item into hash dedup working set + fn add_next_item<'b>( + next: &mut Option>, + working_set: &mut Vec, + sorted_data_by_pubkey: &[&'b [CalculateHashIntermediate]], + pubkey_bin: usize, + binner: &PubkeyBinCalculator24, + ) { + // looping to add next item to working set + while let Some(ItemLocation { key, pointer }) = std::mem::take(next) { + // if `new key` is less than the min key in the working set, skip binary search and + // insert item to the end vec directly + if let Some(SlotGroupPointer { + slot_group_index: current_min_slot_group_index, + offset: current_min_offset, + }) = working_set.last() + { + let current_min_key = &sorted_data_by_pubkey[*current_min_slot_group_index] + [*current_min_offset] + .pubkey; + if key < current_min_key { + working_set.push(pointer); + break; + } + } + + let found = working_set.binary_search_by(|pointer| { + let prob = &sorted_data_by_pubkey[pointer.slot_group_index][pointer.offset].pubkey; + (*key).cmp(prob) + }); + + match found { + Err(index) => { + // found a new new key, insert into the working_set. This is O(n/2) on + // average. Theoretically, this operation could be expensive and may be further + // optimized in future. + working_set.insert(index, pointer); + break; + } + Ok(index) => { + let found = &mut working_set[index]; + if found.slot_group_index > pointer.slot_group_index { + // There is already a later slot group that contains this key in the working_set, + // look up again. + let (_item, new_next) = Self::get_item( + sorted_data_by_pubkey, + pubkey_bin, + binner, + &ItemLocation { key, pointer }, + ); + *next = new_next; + } else { + // A previous slot contains this key, replace it, and look for next item in the previous slot group. + let (_item, new_next) = Self::get_item( + sorted_data_by_pubkey, + pubkey_bin, + binner, + &ItemLocation { + key, + pointer: *found, + }, + ); + *found = pointer; + *next = new_next; + } + } + } + } + } + + // go through: [..][pubkey_bin][..] and return hashes and lamport sum + // slot groups^ ^accounts found in a slot group, sorted by pubkey, higher slot, write_version + // 1. handle zero lamport accounts + // 2. pick the highest slot or (slot = and highest version) of each pubkey + // 3. produce this output: + // a. AccountHashesFile: individual account hashes in pubkey order + // b. lamport sum + fn de_dup_accounts_in_parallel( + &self, + sorted_data_by_pubkey: &[&[CalculateHashIntermediate]], + pubkey_bin: usize, + bins: usize, + stats: &HashStats, + ) -> (AccountHashesFile, u64) { + let binner = PubkeyBinCalculator24::new(bins); + + // working_set hold the lowest items for each slot_group sorted by pubkey descending (min_key is the last) + let (mut working_set, max_inclusive_num_pubkeys) = Self::initialize_dedup_working_set( + sorted_data_by_pubkey, + pubkey_bin, + bins, + &binner, + stats, + ); + let mut hashes = AccountHashesFile { writer: None, dir_for_temp_cache_files: self.dir_for_temp_cache_files.clone(), @@ -1001,47 +1118,17 @@ impl<'a> AccountsHasher<'a> { }; let mut overall_sum = 0; - let mut duplicate_pubkey_indexes = Vec::with_capacity(len); let filler_accounts_enabled = self.filler_accounts_enabled(); - // this loop runs once per unique pubkey contained in any slot group - while !first_items.is_empty() { - let loop_stop = { first_items.len() - 1 }; // we increment at the beginning of the loop - let mut min_index = 0; - let mut min_pubkey = first_items[min_index]; - let mut first_item_index = 0; // we will start iterating at item 1. +=1 is first instruction in loop - - // this loop iterates over each slot group to find the minimum pubkey at the maximum slot - // it also identifies duplicate pubkey entries at lower slots and remembers those to skip them after - while first_item_index < loop_stop { - first_item_index += 1; - let key = &first_items[first_item_index]; - let cmp = min_pubkey.cmp(key); - match cmp { - std::cmp::Ordering::Less => { - continue; // we still have the min item - } - std::cmp::Ordering::Equal => { - // we found the same pubkey in a later slot, so remember the lower slot as a duplicate - duplicate_pubkey_indexes.push(min_index); - } - std::cmp::Ordering::Greater => { - // this is the new min pubkey - min_pubkey = *key; - } - } - // this is the new index of the min entry - min_index = first_item_index; - } + while let Some(pointer) = working_set.pop() { + let key = &sorted_data_by_pubkey[pointer.slot_group_index][pointer.offset].pubkey; + // get the min item, add lamports, get hash - let item = Self::get_item( - min_index, - pubkey_bin, - &mut first_items, + let (item, mut next) = Self::get_item( sorted_data_by_pubkey, - &mut indexes, - &mut first_item_to_pubkey_division, + pubkey_bin, &binner, + &ItemLocation { key, pointer }, ); // add lamports and get hash @@ -1064,23 +1151,13 @@ impl<'a> AccountsHasher<'a> { } } - if !duplicate_pubkey_indexes.is_empty() { - // skip past duplicate keys in earlier slots - // reverse this list because get_item can remove first_items[*i] when *i is exhausted - // and that would mess up subsequent *i values - duplicate_pubkey_indexes.iter().rev().for_each(|i| { - Self::get_item( - *i, - pubkey_bin, - &mut first_items, - sorted_data_by_pubkey, - &mut indexes, - &mut first_item_to_pubkey_division, - &binner, - ); - }); - duplicate_pubkey_indexes.clear(); - } + Self::add_next_item( + &mut next, + &mut working_set, + sorted_data_by_pubkey, + pubkey_bin, + &binner, + ); } (hashes, overall_sum) From ec36369e479185f4a9758e3e2e2530a5ca20f2e3 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Thu, 28 Sep 2023 07:03:41 -0700 Subject: [PATCH 207/407] [clap-v3-utils] Deprecate input validators and add parsers to replace them (#33276) * add tests for validating `Pubkey` and `Hash` * add pubkey signature parser * add parsers for straightforward validators * add parser for token amounts * add parser for derivation and seeds * resolve warnings from deprecations in clap-v3-utils * remove some deprecated functions from `solana_keygen` * refactor signer related input parsers into a submodule * fix deprecation notice for utl * refactor parsers in `input_validators` to `input_parsers` * cargo fmt * Apply suggestions from code review Co-authored-by: Trent Nelson * Update clap-v3-utils/src/input_parsers/mod.rs Co-authored-by: Trent Nelson * mionr fixes to build * add deprecation notice for old `input_parsers::signer` functions * update `UiTokenAmount` to `Amount` * refactor to-be-deprecated functions back to `input_parsers/mod.rs * fmt --------- Co-authored-by: Trent Nelson --- clap-v3-utils/src/input_parsers/mod.rs | 548 ++++++++++++++++++ .../signer.rs} | 205 ++----- clap-v3-utils/src/input_validators.rs | 59 +- clap-v3-utils/src/offline.rs | 9 +- keygen/src/keygen.rs | 8 +- 5 files changed, 658 insertions(+), 171 deletions(-) create mode 100644 clap-v3-utils/src/input_parsers/mod.rs rename clap-v3-utils/src/{input_parsers.rs => input_parsers/signer.rs} (59%) diff --git a/clap-v3-utils/src/input_parsers/mod.rs b/clap-v3-utils/src/input_parsers/mod.rs new file mode 100644 index 00000000000000..d96af9516b9e5d --- /dev/null +++ b/clap-v3-utils/src/input_parsers/mod.rs @@ -0,0 +1,548 @@ +use { + crate::{ + input_validators::normalize_to_url_if_moniker, + keypair::{keypair_from_seed_phrase, ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG}, + }, + chrono::DateTime, + clap::ArgMatches, + solana_sdk::{ + clock::UnixTimestamp, + commitment_config::CommitmentConfig, + genesis_config::ClusterType, + native_token::sol_to_lamports, + pubkey::{Pubkey, MAX_SEED_LEN}, + signature::{read_keypair_file, Keypair, Signer}, + }, + std::str::FromStr, +}; + +pub mod signer; +#[deprecated( + since = "1.17.0", + note = "Please use the functions in `solana_clap_v3_utils::input_parsers::signer` directly instead" +)] +pub use signer::{ + pubkey_of_signer, pubkeys_of_multiple_signers, pubkeys_sigs_of, resolve_signer, signer_of, + STDOUT_OUTFILE_TOKEN, +}; + +// Return parsed values from matches at `name` +pub fn values_of(matches: &ArgMatches, name: &str) -> Option> +where + T: std::str::FromStr, + ::Err: std::fmt::Debug, +{ + matches + .values_of(name) + .map(|xs| xs.map(|x| x.parse::().unwrap()).collect()) +} + +// Return a parsed value from matches at `name` +pub fn value_of(matches: &ArgMatches, name: &str) -> Option +where + T: std::str::FromStr, + ::Err: std::fmt::Debug, +{ + matches + .value_of(name) + .and_then(|value| value.parse::().ok()) +} + +pub fn unix_timestamp_from_rfc3339_datetime( + matches: &ArgMatches, + name: &str, +) -> Option { + matches.value_of(name).and_then(|value| { + DateTime::parse_from_rfc3339(value) + .ok() + .map(|date_time| date_time.timestamp()) + }) +} + +#[deprecated( + since = "1.17.0", + note = "please use `Amount::parse_decimal` and `Amount::sol_to_lamport` instead" +)] +pub fn lamports_of_sol(matches: &ArgMatches, name: &str) -> Option { + value_of(matches, name).map(sol_to_lamports) +} + +pub fn cluster_type_of(matches: &ArgMatches, name: &str) -> Option { + value_of(matches, name) +} + +pub fn commitment_of(matches: &ArgMatches, name: &str) -> Option { + matches + .value_of(name) + .map(|value| CommitmentConfig::from_str(value).unwrap_or_default()) +} + +pub fn parse_url(arg: &str) -> Result { + url::Url::parse(arg) + .map_err(|err| err.to_string()) + .and_then(|url| { + url.has_host() + .then_some(arg.to_string()) + .ok_or("no host provided".to_string()) + }) +} + +pub fn parse_url_or_moniker(arg: &str) -> Result { + parse_url(&normalize_to_url_if_moniker(arg)) +} + +pub fn parse_pow2(arg: &str) -> Result { + arg.parse::() + .map_err(|e| format!("Unable to parse, provided: {arg}, err: {e}")) + .and_then(|v| { + v.is_power_of_two() + .then_some(v) + .ok_or(format!("Must be a power of 2: {v}")) + }) +} + +pub fn parse_percentage(arg: &str) -> Result { + arg.parse::() + .map_err(|e| format!("Unable to parse input percentage, provided: {arg}, err: {e}")) + .and_then(|v| { + (v <= 100).then_some(v).ok_or(format!( + "Percentage must be in range of 0 to 100, provided: {v}" + )) + }) +} + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum Amount { + Decimal(f64), + Raw(u64), + All, +} +impl Amount { + pub fn parse(arg: &str) -> Result { + if arg == "ALL" { + Ok(Amount::All) + } else { + Self::parse_decimal(arg).or(Self::parse_raw(arg) + .map_err(|_| format!("Unable to parse input amount, provided: {arg}"))) + } + } + + pub fn parse_decimal(arg: &str) -> Result { + arg.parse::() + .map(Amount::Decimal) + .map_err(|_| format!("Unable to parse input amount, provided: {arg}")) + } + + pub fn parse_raw(arg: &str) -> Result { + arg.parse::() + .map(Amount::Raw) + .map_err(|_| format!("Unable to parse input amount, provided: {arg}")) + } + + pub fn parse_decimal_or_all(arg: &str) -> Result { + if arg == "ALL" { + Ok(Amount::All) + } else { + Self::parse_decimal(arg).map_err(|_| { + format!("Unable to parse input amount as float or 'ALL' keyword, provided: {arg}") + }) + } + } + + pub fn to_raw_amount(&self, decimals: u8) -> Self { + match self { + Amount::Decimal(amount) => { + Amount::Raw((amount * 10_usize.pow(decimals as u32) as f64) as u64) + } + Amount::Raw(amount) => Amount::Raw(*amount), + Amount::All => Amount::All, + } + } + + pub fn sol_to_lamport(&self) -> Amount { + const NATIVE_SOL_DECIMALS: u8 = 9; + self.to_raw_amount(NATIVE_SOL_DECIMALS) + } +} + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum RawTokenAmount { + Amount(u64), + All, +} + +pub fn parse_rfc3339_datetime(arg: &str) -> Result { + DateTime::parse_from_rfc3339(arg) + .map(|_| arg.to_string()) + .map_err(|e| format!("{e}")) +} + +pub fn parse_derivation(arg: &str) -> Result { + let value = arg.replace('\'', ""); + let mut parts = value.split('/'); + let account = parts.next().unwrap(); + account + .parse::() + .map_err(|e| format!("Unable to parse derivation, provided: {account}, err: {e}")) + .and_then(|_| { + if let Some(change) = parts.next() { + change.parse::().map_err(|e| { + format!("Unable to parse derivation, provided: {change}, err: {e}") + }) + } else { + Ok(0) + } + })?; + Ok(arg.to_string()) +} + +pub fn parse_structured_seed(arg: &str) -> Result { + let (prefix, value) = arg + .split_once(':') + .ok_or("Seed must contain ':' as delimiter") + .unwrap(); + if prefix.is_empty() || value.is_empty() { + Err(String::from("Seed prefix or value is empty")) + } else { + match prefix { + "string" | "pubkey" | "hex" | "u8" => Ok(arg.to_string()), + _ => { + let len = prefix.len(); + if len != 5 && len != 6 { + Err(format!("Wrong prefix length {len} {prefix}:{value}")) + } else { + let sign = &prefix[0..1]; + let type_size = &prefix[1..len.saturating_sub(2)]; + let byte_order = &prefix[len.saturating_sub(2)..len]; + if sign != "u" && sign != "i" { + Err(format!("Wrong prefix sign {sign} {prefix}:{value}")) + } else if type_size != "16" + && type_size != "32" + && type_size != "64" + && type_size != "128" + { + Err(format!( + "Wrong prefix type size {type_size} {prefix}:{value}" + )) + } else if byte_order != "le" && byte_order != "be" { + Err(format!( + "Wrong prefix byte order {byte_order} {prefix}:{value}" + )) + } else { + Ok(arg.to_string()) + } + } + } + } + } +} + +pub fn parse_derived_address_seed(arg: &str) -> Result { + (arg.len() <= MAX_SEED_LEN) + .then_some(arg.to_string()) + .ok_or(format!( + "Address seed must not be longer than {MAX_SEED_LEN} bytes" + )) +} + +// Return the keypair for an argument with filename `name` or None if not present. +pub fn keypair_of(matches: &ArgMatches, name: &str) -> Option { + if let Some(value) = matches.value_of(name) { + if value == ASK_KEYWORD { + let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); + keypair_from_seed_phrase(name, skip_validation, true, None, true).ok() + } else { + read_keypair_file(value).ok() + } + } else { + None + } +} + +pub fn keypairs_of(matches: &ArgMatches, name: &str) -> Option> { + matches.values_of(name).map(|values| { + values + .filter_map(|value| { + if value == ASK_KEYWORD { + let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); + keypair_from_seed_phrase(name, skip_validation, true, None, true).ok() + } else { + read_keypair_file(value).ok() + } + }) + .collect() + }) +} + +// Return a pubkey for an argument that can itself be parsed into a pubkey, +// or is a filename that can be read as a keypair +pub fn pubkey_of(matches: &ArgMatches, name: &str) -> Option { + value_of(matches, name).or_else(|| keypair_of(matches, name).map(|keypair| keypair.pubkey())) +} + +pub fn pubkeys_of(matches: &ArgMatches, name: &str) -> Option> { + matches.values_of(name).map(|values| { + values + .map(|value| { + value.parse::().unwrap_or_else(|_| { + read_keypair_file(value) + .expect("read_keypair_file failed") + .pubkey() + }) + }) + .collect() + }) +} + +#[cfg(test)] +mod tests { + use { + super::*, + clap::{Arg, Command}, + solana_sdk::{hash::Hash, pubkey::Pubkey}, + }; + + fn app<'ab>() -> Command<'ab> { + Command::new("test") + .arg( + Arg::new("multiple") + .long("multiple") + .takes_value(true) + .multiple_occurrences(true) + .multiple_values(true), + ) + .arg(Arg::new("single").takes_value(true).long("single")) + .arg(Arg::new("unit").takes_value(true).long("unit")) + } + + #[test] + fn test_values_of() { + let matches = app().get_matches_from(vec!["test", "--multiple", "50", "--multiple", "39"]); + assert_eq!(values_of(&matches, "multiple"), Some(vec![50, 39])); + assert_eq!(values_of::(&matches, "single"), None); + + let pubkey0 = solana_sdk::pubkey::new_rand(); + let pubkey1 = solana_sdk::pubkey::new_rand(); + let matches = app().get_matches_from(vec![ + "test", + "--multiple", + &pubkey0.to_string(), + "--multiple", + &pubkey1.to_string(), + ]); + assert_eq!( + values_of(&matches, "multiple"), + Some(vec![pubkey0, pubkey1]) + ); + } + + #[test] + fn test_value_of() { + let matches = app().get_matches_from(vec!["test", "--single", "50"]); + assert_eq!(value_of(&matches, "single"), Some(50)); + assert_eq!(value_of::(&matches, "multiple"), None); + + let pubkey = solana_sdk::pubkey::new_rand(); + let matches = app().get_matches_from(vec!["test", "--single", &pubkey.to_string()]); + assert_eq!(value_of(&matches, "single"), Some(pubkey)); + } + + #[test] + fn test_parse_pubkey() { + let command = Command::new("test").arg( + Arg::new("pubkey") + .long("pubkey") + .takes_value(true) + .value_parser(clap::value_parser!(Pubkey)), + ); + + // success case + let matches = command + .clone() + .try_get_matches_from(vec!["test", "--pubkey", "11111111111111111111111111111111"]) + .unwrap(); + assert_eq!( + *matches.get_one::("pubkey").unwrap(), + Pubkey::from_str("11111111111111111111111111111111").unwrap(), + ); + + // validation fails + let matches_error = command + .clone() + .try_get_matches_from(vec!["test", "--pubkey", "this_is_an_invalid_arg"]) + .unwrap_err(); + assert_eq!(matches_error.kind, clap::error::ErrorKind::ValueValidation); + } + + #[test] + fn test_parse_hash() { + let command = Command::new("test").arg( + Arg::new("hash") + .long("hash") + .takes_value(true) + .value_parser(clap::value_parser!(Hash)), + ); + + // success case + let matches = command + .clone() + .try_get_matches_from(vec!["test", "--hash", "11111111111111111111111111111111"]) + .unwrap(); + assert_eq!( + *matches.get_one::("hash").unwrap(), + Hash::from_str("11111111111111111111111111111111").unwrap(), + ); + + // validation fails + let matches_error = command + .clone() + .try_get_matches_from(vec!["test", "--hash", "this_is_an_invalid_arg"]) + .unwrap_err(); + assert_eq!(matches_error.kind, clap::error::ErrorKind::ValueValidation); + } + + #[test] + fn test_parse_token_decimal() { + let command = Command::new("test").arg( + Arg::new("amount") + .long("amount") + .takes_value(true) + .value_parser(Amount::parse_decimal), + ); + + // success cases + let matches = command + .clone() + .try_get_matches_from(vec!["test", "--amount", "11223344"]) + .unwrap(); + assert_eq!( + *matches.get_one::("amount").unwrap(), + Amount::Decimal(11223344_f64), + ); + + let matches = command + .clone() + .try_get_matches_from(vec!["test", "--amount", "0.11223344"]) + .unwrap(); + assert_eq!( + *matches.get_one::("amount").unwrap(), + Amount::Decimal(0.11223344), + ); + + // validation fail cases + let matches_error = command + .clone() + .try_get_matches_from(vec!["test", "--amount", "this_is_an_invalid_arg"]) + .unwrap_err(); + assert_eq!(matches_error.kind, clap::error::ErrorKind::ValueValidation); + + let matches_error = command + .clone() + .try_get_matches_from(vec!["test", "--amount", "all"]) + .unwrap_err(); + assert_eq!(matches_error.kind, clap::error::ErrorKind::ValueValidation); + } + + #[test] + fn test_parse_token_decimal_or_all() { + let command = Command::new("test").arg( + Arg::new("amount") + .long("amount") + .takes_value(true) + .value_parser(Amount::parse_decimal_or_all), + ); + + // success cases + let matches = command + .clone() + .try_get_matches_from(vec!["test", "--amount", "11223344"]) + .unwrap(); + assert_eq!( + *matches.get_one::("amount").unwrap(), + Amount::Decimal(11223344_f64), + ); + + let matches = command + .clone() + .try_get_matches_from(vec!["test", "--amount", "0.11223344"]) + .unwrap(); + assert_eq!( + *matches.get_one::("amount").unwrap(), + Amount::Decimal(0.11223344), + ); + + let matches = command + .clone() + .try_get_matches_from(vec!["test", "--amount", "ALL"]) + .unwrap(); + assert_eq!(*matches.get_one::("amount").unwrap(), Amount::All,); + + // validation fail cases + let matches_error = command + .clone() + .try_get_matches_from(vec!["test", "--amount", "this_is_an_invalid_arg"]) + .unwrap_err(); + assert_eq!(matches_error.kind, clap::error::ErrorKind::ValueValidation); + } + + #[test] + fn test_sol_to_lamports() { + let command = Command::new("test").arg( + Arg::new("amount") + .long("amount") + .takes_value(true) + .value_parser(Amount::parse_decimal_or_all), + ); + + let test_cases = vec![ + ("50", 50_000_000_000), + ("1.5", 1_500_000_000), + ("0.03", 30_000_000), + ]; + + for (arg, expected_lamport) in test_cases { + let matches = command + .clone() + .try_get_matches_from(vec!["test", "--amount", arg]) + .unwrap(); + assert_eq!( + matches + .get_one::("amount") + .unwrap() + .sol_to_lamport(), + Amount::Raw(expected_lamport), + ); + } + } + + #[test] + fn test_derivation() { + let command = Command::new("test").arg( + Arg::new("derivation") + .long("derivation") + .takes_value(true) + .value_parser(parse_derivation), + ); + + let test_arguments = vec![ + ("2", true), + ("0", true), + ("65537", true), + ("0/2", true), + ("a", false), + ("4294967296", false), + ("a/b", false), + ("0/4294967296", false), + ]; + + for (arg, should_accept) in test_arguments { + if should_accept { + let matches = command + .clone() + .try_get_matches_from(vec!["test", "--derivation", arg]) + .unwrap(); + assert_eq!(matches.get_one::("derivation").unwrap(), arg); + } + } + } +} diff --git a/clap-v3-utils/src/input_parsers.rs b/clap-v3-utils/src/input_parsers/signer.rs similarity index 59% rename from clap-v3-utils/src/input_parsers.rs rename to clap-v3-utils/src/input_parsers/signer.rs index 03b3ba3be1f35d..28425a95a05465 100644 --- a/clap-v3-utils/src/input_parsers.rs +++ b/clap-v3-utils/src/input_parsers/signer.rs @@ -1,18 +1,13 @@ use { - crate::keypair::{ - keypair_from_seed_phrase, pubkey_from_path, resolve_signer_from_path, signer_from_path, - ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG, + crate::{ + input_parsers::{keypair_of, keypairs_of, pubkey_of, pubkeys_of}, + keypair::{pubkey_from_path, resolve_signer_from_path, signer_from_path}, }, - chrono::DateTime, clap::ArgMatches, solana_remote_wallet::remote_wallet::RemoteWalletManager, solana_sdk::{ - clock::UnixTimestamp, - commitment_config::CommitmentConfig, - genesis_config::ClusterType, - native_token::sol_to_lamports, pubkey::Pubkey, - signature::{read_keypair_file, Keypair, Signature, Signer}, + signature::{Keypair, Signature, Signer}, }, std::{error, rc::Rc, str::FromStr}, }; @@ -20,55 +15,6 @@ use { // Sentinel value used to indicate to write to screen instead of file pub const STDOUT_OUTFILE_TOKEN: &str = "-"; -// Return parsed values from matches at `name` -pub fn values_of(matches: &ArgMatches, name: &str) -> Option> -where - T: std::str::FromStr, - ::Err: std::fmt::Debug, -{ - matches - .values_of(name) - .map(|xs| xs.map(|x| x.parse::().unwrap()).collect()) -} - -// Return a parsed value from matches at `name` -pub fn value_of(matches: &ArgMatches, name: &str) -> Option -where - T: std::str::FromStr, - ::Err: std::fmt::Debug, -{ - if let Some(value) = matches.value_of(name) { - value.parse::().ok() - } else { - None - } -} - -pub fn unix_timestamp_from_rfc3339_datetime( - matches: &ArgMatches, - name: &str, -) -> Option { - matches.value_of(name).and_then(|value| { - DateTime::parse_from_rfc3339(value) - .ok() - .map(|date_time| date_time.timestamp()) - }) -} - -// Return the keypair for an argument with filename `name` or None if not present. -pub fn keypair_of(matches: &ArgMatches, name: &str) -> Option { - if let Some(value) = matches.value_of(name) { - if value == ASK_KEYWORD { - let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); - keypair_from_seed_phrase(name, skip_validation, true, None, true).ok() - } else { - read_keypair_file(value).ok() - } - } else { - None - } -} - // Return the keypair for an argument with filename `name` or `None` if not present wrapped inside `Result`. pub fn try_keypair_of( matches: &ArgMatches, @@ -78,21 +24,6 @@ pub fn try_keypair_of( Ok(keypair_of(matches, name)) } -pub fn keypairs_of(matches: &ArgMatches, name: &str) -> Option> { - matches.values_of(name).map(|values| { - values - .filter_map(|value| { - if value == ASK_KEYWORD { - let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); - keypair_from_seed_phrase(name, skip_validation, true, None, true).ok() - } else { - read_keypair_file(value).ok() - } - }) - .collect() - }) -} - pub fn try_keypairs_of( matches: &ArgMatches, name: &str, @@ -101,12 +32,6 @@ pub fn try_keypairs_of( Ok(keypairs_of(matches, name)) } -// Return a pubkey for an argument that can itself be parsed into a pubkey, -// or is a filename that can be read as a keypair -pub fn pubkey_of(matches: &ArgMatches, name: &str) -> Option { - value_of(matches, name).or_else(|| keypair_of(matches, name).map(|keypair| keypair.pubkey())) -} - // Return a `Result` wrapped pubkey for an argument that can itself be parsed into a pubkey, // or is a filename that can be read as a keypair pub fn try_pubkey_of( @@ -117,20 +42,6 @@ pub fn try_pubkey_of( Ok(pubkey_of(matches, name)) } -pub fn pubkeys_of(matches: &ArgMatches, name: &str) -> Option> { - matches.values_of(name).map(|values| { - values - .map(|value| { - value.parse::().unwrap_or_else(|_| { - read_keypair_file(value) - .expect("read_keypair_file failed") - .pubkey() - }) - }) - .collect() - }) -} - pub fn try_pubkeys_of( matches: &ArgMatches, name: &str, @@ -225,18 +136,28 @@ pub fn resolve_signer( ) } -pub fn lamports_of_sol(matches: &ArgMatches, name: &str) -> Option { - value_of(matches, name).map(sol_to_lamports) -} - -pub fn cluster_type_of(matches: &ArgMatches, name: &str) -> Option { - value_of(matches, name) +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct PubkeySignature { + pubkey: Pubkey, + signature: Signature, } - -pub fn commitment_of(matches: &ArgMatches, name: &str) -> Option { - matches - .value_of(name) - .map(|value| CommitmentConfig::from_str(value).unwrap_or_default()) +impl FromStr for PubkeySignature { + type Err = String; + + fn from_str(s: &str) -> Result { + let mut signer = s.split('='); + let pubkey = signer + .next() + .ok_or_else(|| String::from("Malformed signer string"))?; + let pubkey = Pubkey::from_str(pubkey).map_err(|err| format!("{err}"))?; + + let signature = signer + .next() + .ok_or_else(|| String::from("Malformed signer string"))?; + let signature = Signature::from_str(signature).map_err(|err| format!("{err}"))?; + + Ok(Self { pubkey, signature }) + } } #[cfg(test)] @@ -268,38 +189,6 @@ mod tests { format!("{out_dir}/tmp/{name}-{pubkey}") } - #[test] - fn test_values_of() { - let matches = app().get_matches_from(vec!["test", "--multiple", "50", "--multiple", "39"]); - assert_eq!(values_of(&matches, "multiple"), Some(vec![50, 39])); - assert_eq!(values_of::(&matches, "single"), None); - - let pubkey0 = solana_sdk::pubkey::new_rand(); - let pubkey1 = solana_sdk::pubkey::new_rand(); - let matches = app().get_matches_from(vec![ - "test", - "--multiple", - &pubkey0.to_string(), - "--multiple", - &pubkey1.to_string(), - ]); - assert_eq!( - values_of(&matches, "multiple"), - Some(vec![pubkey0, pubkey1]) - ); - } - - #[test] - fn test_value_of() { - let matches = app().get_matches_from(vec!["test", "--single", "50"]); - assert_eq!(value_of(&matches, "single"), Some(50)); - assert_eq!(value_of::(&matches, "multiple"), None); - - let pubkey = solana_sdk::pubkey::new_rand(); - let matches = app().get_matches_from(vec!["test", "--single", &pubkey.to_string()]); - assert_eq!(value_of(&matches, "single"), Some(pubkey)); - } - #[test] fn test_keypair_of() { let keypair = Keypair::new(); @@ -376,14 +265,40 @@ mod tests { } #[test] - fn test_lamports_of_sol() { - let matches = app().get_matches_from(vec!["test", "--single", "50"]); - assert_eq!(lamports_of_sol(&matches, "single"), Some(50_000_000_000)); - assert_eq!(lamports_of_sol(&matches, "multiple"), None); - let matches = app().get_matches_from(vec!["test", "--single", "1.5"]); - assert_eq!(lamports_of_sol(&matches, "single"), Some(1_500_000_000)); - assert_eq!(lamports_of_sol(&matches, "multiple"), None); - let matches = app().get_matches_from(vec!["test", "--single", "0.03"]); - assert_eq!(lamports_of_sol(&matches, "single"), Some(30_000_000)); + fn test_parse_pubkey_signature() { + let command = Command::new("test").arg( + Arg::new("pubkeysig") + .long("pubkeysig") + .takes_value(true) + .value_parser(clap::value_parser!(PubkeySignature)), + ); + + // success case + let matches = command + .clone() + .try_get_matches_from(vec![ + "test", + "--pubkeysig", + "11111111111111111111111111111111=4TpFuec1u4BZfxgHg2VQXwvBHANZuNSJHmgrU34GViLAM5uYZ8t7uuhWMHN4k9r41B2p9mwnHjPGwTmTxyvCZw63" + ] + ) + .unwrap(); + + let expected = PubkeySignature { + pubkey: Pubkey::from_str("11111111111111111111111111111111").unwrap(), + signature: Signature::from_str("4TpFuec1u4BZfxgHg2VQXwvBHANZuNSJHmgrU34GViLAM5uYZ8t7uuhWMHN4k9r41B2p9mwnHjPGwTmTxyvCZw63").unwrap(), + }; + + assert_eq!( + *matches.get_one::("pubkeysig").unwrap(), + expected, + ); + + // validation fails + let matches_error = command + .clone() + .try_get_matches_from(vec!["test", "--pubkeysig", "this_is_an_invalid_arg"]) + .unwrap_err(); + assert_eq!(matches_error.kind, clap::error::ErrorKind::ValueValidation); } } diff --git a/clap-v3-utils/src/input_validators.rs b/clap-v3-utils/src/input_validators.rs index 76c780d3f349d6..4bb40b0cd130af 100644 --- a/clap-v3-utils/src/input_validators.rs +++ b/clap-v3-utils/src/input_validators.rs @@ -25,6 +25,7 @@ where // Return an error if string cannot be parsed as type T. // Takes a String to avoid second type parameter when used as a clap validator +#[deprecated(since = "1.17.0", note = "please use `clap::value_parser!` instead")] pub fn is_parsable(string: &str) -> Result<(), String> where T: FromStr, @@ -35,6 +36,10 @@ where // Return an error if string cannot be parsed as numeric type T, and value not within specified // range +#[deprecated( + since = "1.17.0", + note = "please use `clap::builder::RangedI64ValueParser` instead" +)] pub fn is_within_range(string: String, range: R) -> Result<(), String> where T: FromStr + Copy + std::fmt::Debug + PartialOrd + std::ops::Add + From, @@ -59,6 +64,10 @@ pub fn is_pubkey(string: &str) -> Result<(), String> { } // Return an error if a hash cannot be parsed. +#[deprecated( + since = "1.17.0", + note = "please use `clap::value_parser!(Hash)` instead" +)] pub fn is_hash(string: T) -> Result<(), String> where T: AsRef + Display, @@ -144,6 +153,10 @@ where } // Return an error if string cannot be parsed as pubkey=signature string +#[deprecated( + since = "1.17.0", + note = "please use `clap::value_parser!(PubkeySignature)` instead" +)] pub fn is_pubkey_sig(string: T) -> Result<(), String> where T: AsRef + Display, @@ -169,6 +182,7 @@ where } // Return an error if a url cannot be parsed. +#[deprecated(since = "1.17.0", note = "please use `parse_url` instead")] pub fn is_url(string: T) -> Result<(), String> where T: AsRef + Display, @@ -185,6 +199,7 @@ where } } +#[deprecated(since = "1.17.0", note = "please use `parse_url_or_moniker` instead")] pub fn is_url_or_moniker(string: T) -> Result<(), String> where T: AsRef + Display, @@ -212,6 +227,10 @@ pub fn normalize_to_url_if_moniker>(url_or_moniker: T) -> String { .to_string() } +#[deprecated( + since = "1.17.0", + note = "please use `clap::value_parser!(Epoch)` instead" +)] pub fn is_epoch(epoch: T) -> Result<(), String> where T: AsRef + Display, @@ -219,6 +238,10 @@ where is_parsable_generic::(epoch) } +#[deprecated( + since = "1.17.0", + note = "please use `clap::value_parser!(Slot)` instead" +)] pub fn is_slot(slot: T) -> Result<(), String> where T: AsRef + Display, @@ -226,6 +249,7 @@ where is_parsable_generic::(slot) } +#[deprecated(since = "1.17.0", note = "please use `parse_pow2` instead")] pub fn is_pow2(bins: T) -> Result<(), String> where T: AsRef + Display, @@ -242,6 +266,10 @@ where }) } +#[deprecated( + since = "1.17.0", + note = "please use `clap_value_parser!(u16)` instead" +)] pub fn is_port(port: T) -> Result<(), String> where T: AsRef + Display, @@ -249,6 +277,7 @@ where is_parsable_generic::(port) } +#[deprecated(since = "1.17.0", note = "please use `parse_percentage` instead")] pub fn is_valid_percentage(percentage: T) -> Result<(), String> where T: AsRef + Display, @@ -268,6 +297,7 @@ where }) } +#[deprecated(since = "1.17.0", note = "please use `Amount::parse_decimal` instead")] pub fn is_amount(amount: T) -> Result<(), String> where T: AsRef + Display, @@ -281,6 +311,10 @@ where } } +#[deprecated( + since = "1.17.0", + note = "please use `TokenAmount::parse_decimal` instead" +)] pub fn is_amount_or_all(amount: T) -> Result<(), String> where T: AsRef + Display, @@ -297,6 +331,7 @@ where } } +#[deprecated(since = "1.17.0", note = "please use `parse_rfc3339_datetime` instead")] pub fn is_rfc3339_datetime(value: T) -> Result<(), String> where T: AsRef + Display, @@ -306,6 +341,7 @@ where .map_err(|e| format!("{e}")) } +#[deprecated(since = "1.17.0", note = "please use `parse_derivation` instead")] pub fn is_derivation(value: T) -> Result<(), String> where T: AsRef + Display, @@ -328,6 +364,7 @@ where .map(|_| ()) } +#[deprecated(since = "1.17.0", note = "please use `parse_structured_seed` instead")] pub fn is_structured_seed(value: T) -> Result<(), String> where T: AsRef + Display, @@ -373,6 +410,10 @@ where } } +#[deprecated( + since = "1.17.0", + note = "please use `parse_derived_address_seed` instead" +)] pub fn is_derived_address_seed(value: T) -> Result<(), String> where T: AsRef + Display, @@ -386,21 +427,3 @@ where Ok(()) } } - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_is_derivation() { - assert_eq!(is_derivation("2"), Ok(())); - assert_eq!(is_derivation("0"), Ok(())); - assert_eq!(is_derivation("65537"), Ok(())); - assert_eq!(is_derivation("0/2"), Ok(())); - assert_eq!(is_derivation("0'/2'"), Ok(())); - assert!(is_derivation("a").is_err()); - assert!(is_derivation("4294967296").is_err()); - assert!(is_derivation("a/b").is_err()); - assert!(is_derivation("0/4294967296").is_err()); - } -} diff --git a/clap-v3-utils/src/offline.rs b/clap-v3-utils/src/offline.rs index 83c951dffc15e1..b9ccd2d4ec4ea3 100644 --- a/clap-v3-utils/src/offline.rs +++ b/clap-v3-utils/src/offline.rs @@ -1,6 +1,7 @@ use { - crate::{input_validators::*, ArgConstant}, - clap::{Arg, Command}, + crate::{input_parsers::signer::PubkeySignature, ArgConstant}, + clap::{value_parser, Arg, Command}, + solana_sdk::hash::Hash, }; pub const BLOCKHASH_ARG: ArgConstant<'static> = ArgConstant { @@ -32,7 +33,7 @@ pub fn blockhash_arg<'a>() -> Arg<'a> { .long(BLOCKHASH_ARG.long) .takes_value(true) .value_name("BLOCKHASH") - .validator(|s| is_hash(s)) + .value_parser(value_parser!(Hash)) .help(BLOCKHASH_ARG.help) } @@ -49,7 +50,7 @@ fn signer_arg<'a>() -> Arg<'a> { .long(SIGNER_ARG.long) .takes_value(true) .value_name("PUBKEY=SIGNATURE") - .validator(|s| is_pubkey_sig(s)) + .value_parser(value_parser!(PubkeySignature)) .requires(BLOCKHASH_ARG.name) .multiple_occurrences(true) .multiple_values(true) diff --git a/keygen/src/keygen.rs b/keygen/src/keygen.rs index 20e218d88604cf..e6b5289c38e7cb 100644 --- a/keygen/src/keygen.rs +++ b/keygen/src/keygen.rs @@ -1,10 +1,10 @@ #![allow(clippy::arithmetic_side_effects)] use { bip39::{Mnemonic, MnemonicType, Seed}, - clap::{crate_description, crate_name, Arg, ArgMatches, Command}, + clap::{crate_description, crate_name, value_parser, Arg, ArgMatches, Command}, solana_clap_v3_utils::{ input_parsers::STDOUT_OUTFILE_TOKEN, - input_validators::{is_parsable, is_prompt_signer_source}, + input_validators::is_prompt_signer_source, keygen::{ check_for_overwrite, derivation_path::{acquire_derivation_path, derivation_path_arg}, @@ -339,7 +339,7 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { .long("num-threads") .value_name("NUMBER") .takes_value(true) - .validator(is_parsable::) + .value_parser(value_parser!(usize)) .default_value(num_threads) .help("Specify the number of grind threads"), ) @@ -571,7 +571,7 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box> { ); } - let num_threads: usize = matches.value_of_t_or_exit("num_threads"); + let num_threads = *matches.get_one::("num_threads").unwrap(); let grind_matches = grind_parse_args( ignore_case, From cc4e9283dbb9e85aaa058e3f46d8fc96dfc5ab93 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Thu, 28 Sep 2023 09:53:17 -0500 Subject: [PATCH 208/407] reduce WARN logging to only necessary scenario (#33408) --- runtime/src/prioritization_fee_cache.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/runtime/src/prioritization_fee_cache.rs b/runtime/src/prioritization_fee_cache.rs index e1005bd634de0b..5f8e3d9220d459 100644 --- a/runtime/src/prioritization_fee_cache.rs +++ b/runtime/src/prioritization_fee_cache.rs @@ -322,14 +322,17 @@ impl PrioritizationFeeCache { // block minimum fee. let (result, slot_finalize_time) = measure!( { + // Only retain priority fee reported from optimistically confirmed bank let pre_purge_bank_count = slot_prioritization_fee.len() as u64; slot_prioritization_fee.retain(|id, _| id == bank_id); let post_purge_bank_count = slot_prioritization_fee.len() as u64; metrics.accumulate_total_purged_duplicated_bank_count( pre_purge_bank_count.saturating_sub(post_purge_bank_count), ); - if post_purge_bank_count == 0 { - warn!("Prioritization fee cache unexpected finalized on non-existing bank. slot {slot} bank id {bank_id}"); + // It should be rare that optimistically confirmed bank had no prioritized + // transactions, but duplicated and unconfirmed bank had. + if pre_purge_bank_count > 0 && post_purge_bank_count == 0 { + warn!("Finalized bank has empty prioritization fee cache. slot {slot} bank id {bank_id}"); } let mut block_prioritization_fee = slot_prioritization_fee From e3cd13e49d48285dbb4c92ba3c41b0766fc66aed Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Thu, 28 Sep 2023 09:25:10 -0700 Subject: [PATCH 209/407] Add new received forwarded packets metric to banking stage (#33414) --- core/src/banking_stage.rs | 47 +++++++++++++---------- core/src/banking_stage/packet_receiver.rs | 7 ++++ 2 files changed, 34 insertions(+), 20 deletions(-) diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 2dfb1e32b1d688..e8b61de94dce2d 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -85,6 +85,7 @@ pub struct BankingStageStats { pub(crate) dropped_duplicated_packets_count: AtomicUsize, dropped_forward_packets_count: AtomicUsize, newly_buffered_packets_count: AtomicUsize, + newly_buffered_forwarded_packets_count: AtomicUsize, current_buffered_packets_count: AtomicUsize, rebuffered_packets_count: AtomicUsize, consumed_buffered_packets_count: AtomicUsize, @@ -147,109 +148,115 @@ impl BankingStageStats { if self.last_report.should_update(report_interval_ms) { datapoint_info!( "banking_stage-loop-stats", - ("id", self.id as i64, i64), + ("id", self.id, i64), ( "receive_and_buffer_packets_count", self.receive_and_buffer_packets_count - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "dropped_packets_count", - self.dropped_packets_count.swap(0, Ordering::Relaxed) as i64, + self.dropped_packets_count.swap(0, Ordering::Relaxed), i64 ), ( "dropped_duplicated_packets_count", self.dropped_duplicated_packets_count - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "dropped_forward_packets_count", self.dropped_forward_packets_count - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "newly_buffered_packets_count", - self.newly_buffered_packets_count.swap(0, Ordering::Relaxed) as i64, + self.newly_buffered_packets_count.swap(0, Ordering::Relaxed), + i64 + ), + ( + "newly_buffered_forwarded_packets_count", + self.newly_buffered_forwarded_packets_count + .swap(0, Ordering::Relaxed), i64 ), ( "current_buffered_packets_count", self.current_buffered_packets_count - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "rebuffered_packets_count", - self.rebuffered_packets_count.swap(0, Ordering::Relaxed) as i64, + self.rebuffered_packets_count.swap(0, Ordering::Relaxed), i64 ), ( "consumed_buffered_packets_count", self.consumed_buffered_packets_count - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "forwarded_transaction_count", - self.forwarded_transaction_count.swap(0, Ordering::Relaxed) as i64, + self.forwarded_transaction_count.swap(0, Ordering::Relaxed), i64 ), ( "forwarded_vote_count", - self.forwarded_vote_count.swap(0, Ordering::Relaxed) as i64, + self.forwarded_vote_count.swap(0, Ordering::Relaxed), i64 ), ( "consume_buffered_packets_elapsed", self.consume_buffered_packets_elapsed - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "receive_and_buffer_packets_elapsed", self.receive_and_buffer_packets_elapsed - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "filter_pending_packets_elapsed", self.filter_pending_packets_elapsed - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "packet_conversion_elapsed", - self.packet_conversion_elapsed.swap(0, Ordering::Relaxed) as i64, + self.packet_conversion_elapsed.swap(0, Ordering::Relaxed), i64 ), ( "transaction_processing_elapsed", self.transaction_processing_elapsed - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "packet_batch_indices_len_min", - self.batch_packet_indexes_len.minimum().unwrap_or(0) as i64, + self.batch_packet_indexes_len.minimum().unwrap_or(0), i64 ), ( "packet_batch_indices_len_max", - self.batch_packet_indexes_len.maximum().unwrap_or(0) as i64, + self.batch_packet_indexes_len.maximum().unwrap_or(0), i64 ), ( "packet_batch_indices_len_mean", - self.batch_packet_indexes_len.mean().unwrap_or(0) as i64, + self.batch_packet_indexes_len.mean().unwrap_or(0), i64 ), ( "packet_batch_indices_len_90pct", - self.batch_packet_indexes_len.percentile(90.0).unwrap_or(0) as i64, + self.batch_packet_indexes_len.percentile(90.0).unwrap_or(0), i64 ) ); diff --git a/core/src/banking_stage/packet_receiver.rs b/core/src/banking_stage/packet_receiver.rs index 00d3f1549d4339..a566ef7cf3e4c1 100644 --- a/core/src/banking_stage/packet_receiver.rs +++ b/core/src/banking_stage/packet_receiver.rs @@ -115,11 +115,13 @@ impl PacketReceiver { let mut dropped_packets_count = 0; let mut newly_buffered_packets_count = 0; + let mut newly_buffered_forwarded_packets_count = 0; Self::push_unprocessed( unprocessed_transaction_storage, deserialized_packets, &mut dropped_packets_count, &mut newly_buffered_packets_count, + &mut newly_buffered_forwarded_packets_count, banking_stage_stats, slot_metrics_tracker, tracer_packet_stats, @@ -144,6 +146,7 @@ impl PacketReceiver { deserialized_packets: Vec, dropped_packets_count: &mut usize, newly_buffered_packets_count: &mut usize, + newly_buffered_forwarded_packets_count: &mut usize, banking_stage_stats: &mut BankingStageStats, slot_metrics_tracker: &mut LeaderSlotMetricsTracker, tracer_packet_stats: &mut TracerPacketStats, @@ -154,6 +157,10 @@ impl PacketReceiver { .increment(deserialized_packets.len() as u64); *newly_buffered_packets_count += deserialized_packets.len(); + *newly_buffered_forwarded_packets_count += deserialized_packets + .iter() + .filter(|p| p.original_packet().meta().forwarded()) + .count(); slot_metrics_tracker .increment_newly_buffered_packets_count(deserialized_packets.len() as u64); From 6ea51280ddc235ed93e16906c3427efd20cd7ce4 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Thu, 28 Sep 2023 20:10:41 -0500 Subject: [PATCH 210/407] Move sort timer out of loop (#33448) move sort timer out of loop Co-authored-by: HaoranYi --- accounts-db/src/accounts_db.rs | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index ee54d2a22c844b..38b36cd5a392c5 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -7626,16 +7626,11 @@ impl AccountsDb { } fn sort_slot_storage_scan(accum: &mut BinnedHashData) -> u64 { - let time = AtomicU64::new(0); - accum.iter_mut().for_each(|items| { - let mut sort_time = Measure::start("sort"); + let (_, sort_time) = measure_us!(accum.iter_mut().for_each(|items| { // sort_by vs unstable because slot and write_version are already in order items.sort_by(AccountsHasher::compare_two_hash_entries); - sort_time.stop(); - time.fetch_add(sort_time.as_us(), Ordering::Relaxed); - }); - - time.load(Ordering::Relaxed) + })); + sort_time } /// normal code path returns the common cache path From 1261b3d4969db546f5a5f073152e39fc31d8fb3d Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Fri, 29 Sep 2023 08:57:32 -0700 Subject: [PATCH 211/407] gossip test update (#33431) fix bug in gossip test --- gossip/tests/gossip.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/gossip/tests/gossip.rs b/gossip/tests/gossip.rs index 9240c2b3ef9145..d9abeec31b5569 100644 --- a/gossip/tests/gossip.rs +++ b/gossip/tests/gossip.rs @@ -99,9 +99,8 @@ where let exit = Arc::new(AtomicBool::new(false)); let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect(); topo(&listen); - let mut done = true; + let mut done = false; for i in 0..(num * 32) { - done = true; let total: usize = listen.iter().map(|v| v.0.gossip_peers().len()).sum(); if (total + num) * 10 > num * num * 9 { done = true; From b81ff5d654c0d942bbef9277bffa3c18fc44fc27 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 29 Sep 2023 12:04:33 -0400 Subject: [PATCH 212/407] Fixup the metrics for remove_dead_accounts_shrink_us (#33458) --- accounts-db/src/accounts_db.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 38b36cd5a392c5..1ce9c0f52d8c39 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -8228,11 +8228,11 @@ impl AccountsDb { let mut shrink_candidate_slots = self.shrink_candidate_slots.lock().unwrap(); for slot in new_shrink_candidates { shrink_candidate_slots.insert(slot); - measure.stop(); - self.clean_accounts_stats - .remove_dead_accounts_shrink_us - .fetch_add(measure.as_us(), Ordering::Relaxed); } + measure.stop(); + self.clean_accounts_stats + .remove_dead_accounts_shrink_us + .fetch_add(measure.as_us(), Ordering::Relaxed); dead_slots.retain(|slot| { if let Some(slot_store) = self.storage.get_slot_storage_entry(*slot) { From 0e9e91c65ed8bc43c4922afdcb4f41b27fc9e585 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 29 Sep 2023 12:58:55 -0400 Subject: [PATCH 213/407] Drops the shrink_candidate_slots lock after done inserting (#33459) --- accounts-db/src/accounts_db.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 1ce9c0f52d8c39..dad1f152f36de2 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -8229,6 +8229,7 @@ impl AccountsDb { for slot in new_shrink_candidates { shrink_candidate_slots.insert(slot); } + drop(shrink_candidate_slots); measure.stop(); self.clean_accounts_stats .remove_dead_accounts_shrink_us From de38b05ad11db89bcef532223b2cb69c1ba10435 Mon Sep 17 00:00:00 2001 From: Jon Cinque Date: Fri, 29 Sep 2023 19:12:06 +0200 Subject: [PATCH 214/407] spl: Bump token-2022 and friends (#33453) * token: Update to 4.0.0 * token-2022: Bump and support new account and instruction types * Update token-2022 in fetch_spl / program-test * Fixup downstream uses * Mint and destination were flipped in 0.9.0 * Don't use `convert_pubkey` * Bump spl dependencies to versions which avoid recompilations --- Cargo.lock | 222 ++++++++++++++++-- Cargo.toml | 12 +- account-decoder/Cargo.toml | 2 + account-decoder/src/parse_token.rs | 18 +- account-decoder/src/parse_token_extension.rs | 183 +++++++++++++-- fetch-spl.sh | 2 +- ledger/Cargo.toml | 1 + ledger/src/token_balances.rs | 15 +- program-test/src/programs.rs | 2 +- .../src/programs/spl_token_2022-0.6.0.so | Bin 395648 -> 0 bytes .../src/programs/spl_token_2022-0.9.0.so | Bin 0 -> 518392 bytes programs/sbf/Cargo.lock | 220 ++++++++++++++--- programs/sbf/Cargo.toml | 2 + rpc/Cargo.toml | 1 + rpc/src/rpc.rs | 24 +- transaction-status/Cargo.toml | 3 +- .../src/parse_associated_token.rs | 2 +- transaction-status/src/parse_token.rs | 92 +++++++- .../extension/confidential_transfer.rs | 98 ++------ .../extension/confidential_transfer_fee.rs | 159 +++++++++++++ .../parse_token/extension/metadata_pointer.rs | 192 +++++++++++++++ .../src/parse_token/extension/mod.rs | 3 + .../parse_token/extension/transfer_hook.rs | 186 +++++++++++++++ 23 files changed, 1247 insertions(+), 192 deletions(-) delete mode 100644 program-test/src/programs/spl_token_2022-0.6.0.so create mode 100644 program-test/src/programs/spl_token_2022-0.9.0.so create mode 100644 transaction-status/src/parse_token/extension/confidential_transfer_fee.rs create mode 100644 transaction-status/src/parse_token/extension/metadata_pointer.rs create mode 100644 transaction-status/src/parse_token/extension/transfer_hook.rs diff --git a/Cargo.lock b/Cargo.lock index 6e998ef754667b..3737cdc37424a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -709,7 +709,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b" dependencies = [ "borsh-derive 0.10.3", - "hashbrown 0.13.2", + "hashbrown 0.12.3", ] [[package]] @@ -3299,6 +3299,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "num-derive" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e6a0fd4f737c707bd9086cc16c925f294943eb62eb71499e9fd4cf71f8b9f4e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.37", +] + [[package]] name = "num-integer" version = "0.1.44" @@ -3370,6 +3381,15 @@ dependencies = [ "num_enum_derive 0.6.1", ] +[[package]] +name = "num_enum" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70bf6736f74634d299d00086f02986875b3c2d924781a6a2cb6c201e73da0ceb" +dependencies = [ + "num_enum_derive 0.7.0", +] + [[package]] name = "num_enum_derive" version = "0.5.11" @@ -3394,6 +3414,18 @@ dependencies = [ "syn 2.0.37", ] +[[package]] +name = "num_enum_derive" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ea360eafe1022f7cc56cd7b869ed57330fb2453d0c7831d99b74c65d2f5597" +dependencies = [ + "proc-macro-crate 1.1.0", + "proc-macro2", + "quote", + "syn 2.0.37", +] + [[package]] name = "num_threads" version = "0.1.3" @@ -5077,8 +5109,10 @@ dependencies = [ "serde_json", "solana-config-program", "solana-sdk", + "spl-pod", "spl-token", "spl-token-2022", + "spl-token-metadata-interface", "thiserror", "zstd", ] @@ -5155,7 +5189,7 @@ dependencies = [ "memmap2", "memoffset 0.9.0", "modular-bitfield", - "num-derive", + "num-derive 0.3.3", "num-traits", "num_cpus", "num_enum 0.6.1", @@ -5198,7 +5232,7 @@ dependencies = [ "bincode", "bytemuck", "log", - "num-derive", + "num-derive 0.3.3", "num-traits", "rustc_version 0.4.0", "serde", @@ -6160,6 +6194,7 @@ dependencies = [ "solana-transaction-status", "solana-vote", "solana-vote-program", + "spl-pod", "spl-token", "spl-token-2022", "static_assertions", @@ -6483,7 +6518,7 @@ dependencies = [ "log", "memoffset 0.9.0", "num-bigint 0.4.4", - "num-derive", + "num-derive 0.3.3", "num-traits", "parking_lot 0.12.1", "rand 0.8.5", @@ -6519,7 +6554,7 @@ dependencies = [ "libc", "libsecp256k1", "log", - "num-derive", + "num-derive 0.3.3", "num-traits", "percentage", "rand 0.8.5", @@ -6632,7 +6667,7 @@ dependencies = [ "dialoguer", "hidapi", "log", - "num-derive", + "num-derive 0.3.3", "num-traits", "parking_lot 0.12.1", "qstring", @@ -6691,6 +6726,7 @@ dependencies = [ "solana-version", "solana-vote", "solana-vote-program", + "spl-pod", "spl-token", "spl-token-2022", "stream-cancel", @@ -6824,7 +6860,7 @@ dependencies = [ "memmap2", "memoffset 0.9.0", "modular-bitfield", - "num-derive", + "num-derive 0.3.3", "num-traits", "num_cpus", "num_enum 0.6.1", @@ -6904,7 +6940,7 @@ dependencies = [ "libsecp256k1", "log", "memmap2", - "num-derive", + "num-derive 0.3.3", "num-traits", "num_enum 0.6.1", "pbkdf2 0.11.0", @@ -7232,7 +7268,7 @@ dependencies = [ "Inflector", "base64 0.21.4", "bincode", - "borsh 0.9.3", + "borsh 0.10.3", "bs58", "lazy_static", "log", @@ -7410,7 +7446,7 @@ dependencies = [ "assert_matches", "bincode", "log", - "num-derive", + "num-derive 0.3.3", "num-traits", "rustc_version 0.4.0", "serde", @@ -7471,7 +7507,7 @@ dependencies = [ "bytemuck", "criterion", "curve25519-dalek", - "num-derive", + "num-derive 0.3.3", "num-traits", "solana-program-runtime", "solana-sdk", @@ -7504,7 +7540,7 @@ dependencies = [ "itertools", "lazy_static", "merlin", - "num-derive", + "num-derive 0.3.3", "num-traits", "rand 0.7.3", "serde", @@ -7562,13 +7598,13 @@ dependencies = [ [[package]] name = "spl-associated-token-account" -version = "1.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978dba3bcbe88d0c2c58366c254d9ea41c5f73357e72fc0bdee4d6b5fc99c8f4" +checksum = "385e31c29981488f2820b2022d8e731aae3b02e6e18e2fd854e4c9a94dc44fc3" dependencies = [ "assert_matches", - "borsh 0.9.3", - "num-derive", + "borsh 0.10.3", + "num-derive 0.4.0", "num-traits", "solana-program", "spl-token", @@ -7576,6 +7612,41 @@ dependencies = [ "thiserror", ] +[[package]] +name = "spl-discriminator" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cce5d563b58ef1bb2cdbbfe0dfb9ffdc24903b10ae6a4df2d8f425ece375033f" +dependencies = [ + "bytemuck", + "solana-program", + "spl-discriminator-derive", +] + +[[package]] +name = "spl-discriminator-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fadbefec4f3c678215ca72bd71862697bb06b41fd77c0088902dd3203354387b" +dependencies = [ + "quote", + "spl-discriminator-syn", + "syn 2.0.37", +] + +[[package]] +name = "spl-discriminator-syn" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e5f2044ca42c8938d54d1255ce599c79a1ffd86b677dfab695caa20f9ffc3f2" +dependencies = [ + "proc-macro2", + "quote", + "sha2 0.10.7", + "syn 2.0.37", + "thiserror", +] + [[package]] name = "spl-instruction-padding" version = "0.1.0" @@ -7588,46 +7659,145 @@ dependencies = [ [[package]] name = "spl-memo" -version = "3.0.1" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f180b03318c3dbab3ef4e1e4d46d5211ae3c780940dd0a28695aba4b59a75a" +dependencies = [ + "solana-program", +] + +[[package]] +name = "spl-pod" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd0dc6f70db6bacea7ff25870b016a65ba1d1b6013536f08e4fd79a8f9005325" +checksum = "2881dddfca792737c0706fa0175345ab282b1b0879c7d877bad129645737c079" dependencies = [ + "borsh 0.10.3", + "bytemuck", "solana-program", + "solana-zk-token-sdk", + "spl-program-error", +] + +[[package]] +name = "spl-program-error" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "249e0318493b6bcf27ae9902600566c689b7dfba9f1bdff5893e92253374e78c" +dependencies = [ + "num-derive 0.4.0", + "num-traits", + "solana-program", + "spl-program-error-derive", + "thiserror", +] + +[[package]] +name = "spl-program-error-derive" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab5269c8e868da17b6552ef35a51355a017bd8e0eae269c201fef830d35fa52c" +dependencies = [ + "proc-macro2", + "quote", + "sha2 0.10.7", + "syn 2.0.37", +] + +[[package]] +name = "spl-tlv-account-resolution" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "062e148d3eab7b165582757453632ffeef490c02c86a48bfdb4988f63eefb3b9" +dependencies = [ + "bytemuck", + "solana-program", + "spl-discriminator", + "spl-pod", + "spl-program-error", + "spl-type-length-value", ] [[package]] name = "spl-token" -version = "3.5.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e85e168a785e82564160dcb87b2a8e04cee9bfd1f4d488c729d53d6a4bd300d" +checksum = "08459ba1b8f7c1020b4582c4edf0f5c7511a5e099a7a97570c9698d4f2337060" dependencies = [ "arrayref", "bytemuck", - "num-derive", + "num-derive 0.3.3", "num-traits", - "num_enum 0.5.11", + "num_enum 0.6.1", "solana-program", "thiserror", ] [[package]] name = "spl-token-2022" -version = "0.6.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0043b590232c400bad5ee9eb983ced003d15163c4c5d56b090ac6d9a57457b47" +checksum = "e4abf34a65ba420584a0c35f3903f8d727d1f13ababbdc3f714c6b065a686e86" dependencies = [ "arrayref", "bytemuck", - "num-derive", + "num-derive 0.4.0", "num-traits", - "num_enum 0.5.11", + "num_enum 0.7.0", "solana-program", "solana-zk-token-sdk", "spl-memo", + "spl-pod", "spl-token", + "spl-token-metadata-interface", + "spl-transfer-hook-interface", + "spl-type-length-value", "thiserror", ] +[[package]] +name = "spl-token-metadata-interface" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c16ce3ba6979645fb7627aa1e435576172dd63088dc7848cb09aa331fa1fe4f" +dependencies = [ + "borsh 0.10.3", + "solana-program", + "spl-discriminator", + "spl-pod", + "spl-program-error", + "spl-type-length-value", +] + +[[package]] +name = "spl-transfer-hook-interface" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "051d31803f873cabe71aec3c1b849f35248beae5d19a347d93a5c9cccc5d5a9b" +dependencies = [ + "arrayref", + "bytemuck", + "solana-program", + "spl-discriminator", + "spl-pod", + "spl-program-error", + "spl-tlv-account-resolution", + "spl-type-length-value", +] + +[[package]] +name = "spl-type-length-value" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a468e6f6371f9c69aae760186ea9f1a01c2908351b06a5e0026d21cfc4d7ecac" +dependencies = [ + "bytemuck", + "solana-program", + "spl-discriminator", + "spl-pod", + "spl-program-error", +] + [[package]] name = "static_assertions" version = "1.1.0" diff --git a/Cargo.toml b/Cargo.toml index 6e31e785e6c177..c0f31b9e377840 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -373,11 +373,13 @@ solana-vote-program = { path = "programs/vote", version = "=1.17.0" } solana-zk-keygen = { path = "zk-keygen", version = "=1.17.0" } solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=1.17.0" } solana-zk-token-sdk = { path = "zk-token-sdk", version = "=1.17.0" } -spl-associated-token-account = "=1.1.3" +spl-associated-token-account = "=2.2.0" spl-instruction-padding = "0.1" -spl-memo = "=3.0.1" -spl-token = "=3.5.0" -spl-token-2022 = "=0.6.1" +spl-memo = "=4.0.0" +spl-pod = "=0.1.0" +spl-token = "=4.0.0" +spl-token-2022 = "=0.9.0" +spl-token-metadata-interface = "=0.2.0" static_assertions = "1.1.0" stream-cancel = "0.8.1" strum = "0.24" @@ -423,8 +425,10 @@ crossbeam-epoch = { git = "https://github.com/solana-labs/crossbeam", rev = "fd2 # * spl-associated-token-account # * spl-instruction-padding # * spl-memo +# * spl-pod # * spl-token # * spl-token-2022 +# * spl-token-metadata-interface # # They, in turn, depend on a number of crates that we also include directly using `path` # specifications. For example, `spl-token` depends on `solana-program`. And we explicitly specify diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml index bb82b077dcf911..3f883ddc23f9f5 100644 --- a/account-decoder/Cargo.toml +++ b/account-decoder/Cargo.toml @@ -23,11 +23,13 @@ solana-config-program = { workspace = true } solana-sdk = { workspace = true } spl-token = { workspace = true, features = ["no-entrypoint"] } spl-token-2022 = { workspace = true, features = ["no-entrypoint"] } +spl-token-metadata-interface = { workspace = true } thiserror = { workspace = true } zstd = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } +spl-pod = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/account-decoder/src/parse_token.rs b/account-decoder/src/parse_token.rs index 6f03a306cde372..42633e9d8ec462 100644 --- a/account-decoder/src/parse_token.rs +++ b/account-decoder/src/parse_token.rs @@ -290,12 +290,10 @@ mod test { use { super::*, crate::parse_token_extension::{UiMemoTransfer, UiMintCloseAuthority}, - spl_token_2022::{ - extension::{ - immutable_owner::ImmutableOwner, memo_transfer::MemoTransfer, - mint_close_authority::MintCloseAuthority, ExtensionType, StateWithExtensionsMut, - }, - pod::OptionalNonZeroPubkey, + spl_pod::optional_keys::OptionalNonZeroPubkey, + spl_token_2022::extension::{ + immutable_owner::ImmutableOwner, memo_transfer::MemoTransfer, + mint_close_authority::MintCloseAuthority, ExtensionType, StateWithExtensionsMut, }, }; @@ -506,10 +504,11 @@ mod test { delegate: COption::None, delegated_amount: 0, }; - let account_size = ExtensionType::get_account_len::(&[ + let account_size = ExtensionType::try_calculate_account_len::(&[ ExtensionType::ImmutableOwner, ExtensionType::MemoTransfer, - ]); + ]) + .unwrap(); let mut account_data = vec![0; account_size]; let mut account_state = StateWithExtensionsMut::::unpack_uninitialized(&mut account_data).unwrap(); @@ -586,7 +585,8 @@ mod test { fn test_parse_token_mint_with_extensions() { let owner_pubkey = SplTokenPubkey::new_from_array([3; 32]); let mint_size = - ExtensionType::get_account_len::(&[ExtensionType::MintCloseAuthority]); + ExtensionType::try_calculate_account_len::(&[ExtensionType::MintCloseAuthority]) + .unwrap(); let mint_base = Mint { mint_authority: COption::Some(owner_pubkey), supply: 42, diff --git a/account-decoder/src/parse_token_extension.rs b/account-decoder/src/parse_token_extension.rs index 0df45a6b6dab3c..39d26d83a20b99 100644 --- a/account-decoder/src/parse_token_extension.rs +++ b/account-decoder/src/parse_token_extension.rs @@ -6,6 +6,7 @@ use { solana_program::pubkey::Pubkey, solana_zk_token_sdk::zk_token_elgamal::pod::ElGamalPubkey, }, + spl_token_metadata_interface::state::TokenMetadata, }; #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] @@ -24,15 +25,21 @@ pub enum UiExtension { InterestBearingConfig(UiInterestBearingConfig), CpiGuard(UiCpiGuard), PermanentDelegate(UiPermanentDelegate), - UnparseableExtension, NonTransferableAccount, + ConfidentialTransferFeeConfig(UiConfidentialTransferFeeConfig), + ConfidentialTransferFeeAmount(UiConfidentialTransferFeeAmount), + TransferHook(UiTransferHook), + TransferHookAccount(UiTransferHookAccount), + MetadataPointer(UiMetadataPointer), + TokenMetadata(UiTokenMetadata), + UnparseableExtension, } pub fn parse_extension( extension_type: &ExtensionType, account: &StateWithExtensions, ) -> UiExtension { - match &extension_type { + match extension_type { ExtensionType::Uninitialized => UiExtension::Uninitialized, ExtensionType::TransferFeeConfig => account .get_extension::() @@ -50,10 +57,18 @@ pub fn parse_extension( .get_extension::() .map(|&extension| UiExtension::ConfidentialTransferMint(extension.into())) .unwrap_or(UiExtension::UnparseableExtension), + ExtensionType::ConfidentialTransferFeeConfig => account + .get_extension::() + .map(|&extension| UiExtension::ConfidentialTransferFeeConfig(extension.into())) + .unwrap_or(UiExtension::UnparseableExtension), ExtensionType::ConfidentialTransferAccount => account .get_extension::() .map(|&extension| UiExtension::ConfidentialTransferAccount(extension.into())) .unwrap_or(UiExtension::UnparseableExtension), + ExtensionType::ConfidentialTransferFeeAmount => account + .get_extension::() + .map(|&extension| UiExtension::ConfidentialTransferFeeAmount(extension.into())) + .unwrap_or(UiExtension::UnparseableExtension), ExtensionType::DefaultAccountState => account .get_extension::() .map(|&extension| UiExtension::DefaultAccountState(extension.into())) @@ -77,6 +92,22 @@ pub fn parse_extension( .map(|&extension| UiExtension::PermanentDelegate(extension.into())) .unwrap_or(UiExtension::UnparseableExtension), ExtensionType::NonTransferableAccount => UiExtension::NonTransferableAccount, + ExtensionType::MetadataPointer => account + .get_extension::() + .map(|&extension| UiExtension::MetadataPointer(extension.into())) + .unwrap_or(UiExtension::UnparseableExtension), + ExtensionType::TokenMetadata => account + .get_variable_len_extension::() + .map(|extension| UiExtension::TokenMetadata(extension.into())) + .unwrap_or(UiExtension::UnparseableExtension), + ExtensionType::TransferHook => account + .get_extension::() + .map(|&extension| UiExtension::TransferHook(extension.into())) + .unwrap_or(UiExtension::UnparseableExtension), + ExtensionType::TransferHookAccount => account + .get_extension::() + .map(|&extension| UiExtension::TransferHookAccount(extension.into())) + .unwrap_or(UiExtension::UnparseableExtension), } } @@ -251,9 +282,7 @@ impl From for UiPermanentDeleg pub struct UiConfidentialTransferMint { pub authority: Option, pub auto_approve_new_accounts: bool, - pub auditor_encryption_pubkey: Option, - pub withdraw_withheld_authority_encryption_pubkey: Option, - pub withheld_amount: String, + pub auditor_elgamal_pubkey: Option, } impl From @@ -263,19 +292,44 @@ impl From confidential_transfer_mint: extension::confidential_transfer::ConfidentialTransferMint, ) -> Self { let authority: Option = confidential_transfer_mint.authority.into(); - let auditor_encryption_pubkey: Option = - confidential_transfer_mint.auditor_encryption_pubkey.into(); - let withdraw_withheld_authority_encryption_pubkey: Option = - confidential_transfer_mint - .withdraw_withheld_authority_encryption_pubkey - .into(); + let auditor_elgamal_pubkey: Option = + confidential_transfer_mint.auditor_elgamal_pubkey.into(); Self { authority: authority.map(|pubkey| pubkey.to_string()), auto_approve_new_accounts: confidential_transfer_mint.auto_approve_new_accounts.into(), - auditor_encryption_pubkey: auditor_encryption_pubkey.map(|pubkey| pubkey.to_string()), - withdraw_withheld_authority_encryption_pubkey: - withdraw_withheld_authority_encryption_pubkey.map(|pubkey| pubkey.to_string()), - withheld_amount: format!("{}", confidential_transfer_mint.withheld_amount), + auditor_elgamal_pubkey: auditor_elgamal_pubkey.map(|pubkey| pubkey.to_string()), + } + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct UiConfidentialTransferFeeConfig { + pub authority: Option, + pub withdraw_withheld_authority_elgamal_pubkey: Option, + pub harvest_to_mint_enabled: bool, + pub withheld_amount: String, +} + +impl From + for UiConfidentialTransferFeeConfig +{ + fn from( + confidential_transfer_fee_config: extension::confidential_transfer_fee::ConfidentialTransferFeeConfig, + ) -> Self { + let authority: Option = confidential_transfer_fee_config.authority.into(); + let withdraw_withheld_authority_elgamal_pubkey: Option = + confidential_transfer_fee_config + .withdraw_withheld_authority_elgamal_pubkey + .into(); + Self { + authority: authority.map(|pubkey| pubkey.to_string()), + withdraw_withheld_authority_elgamal_pubkey: withdraw_withheld_authority_elgamal_pubkey + .map(|pubkey| pubkey.to_string()), + harvest_to_mint_enabled: confidential_transfer_fee_config + .harvest_to_mint_enabled + .into(), + withheld_amount: format!("{}", confidential_transfer_fee_config.withheld_amount), } } } @@ -284,7 +338,7 @@ impl From #[serde(rename_all = "camelCase")] pub struct UiConfidentialTransferAccount { pub approved: bool, - pub encryption_pubkey: String, + pub elgamal_pubkey: String, pub pending_balance_lo: String, pub pending_balance_hi: String, pub available_balance: String, @@ -295,7 +349,6 @@ pub struct UiConfidentialTransferAccount { pub maximum_pending_balance_credit_counter: u64, pub expected_pending_balance_credit_counter: u64, pub actual_pending_balance_credit_counter: u64, - pub withheld_amount: String, } impl From @@ -306,7 +359,7 @@ impl From ) -> Self { Self { approved: confidential_transfer_account.approved.into(), - encryption_pubkey: format!("{}", confidential_transfer_account.encryption_pubkey), + elgamal_pubkey: format!("{}", confidential_transfer_account.elgamal_pubkey), pending_balance_lo: format!("{}", confidential_transfer_account.pending_balance_lo), pending_balance_hi: format!("{}", confidential_transfer_account.pending_balance_hi), available_balance: format!("{}", confidential_transfer_account.available_balance), @@ -332,7 +385,99 @@ impl From actual_pending_balance_credit_counter: confidential_transfer_account .actual_pending_balance_credit_counter .into(), - withheld_amount: format!("{}", confidential_transfer_account.withheld_amount), + } + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct UiConfidentialTransferFeeAmount { + pub withheld_amount: String, +} + +impl From + for UiConfidentialTransferFeeAmount +{ + fn from( + confidential_transfer_fee_amount: extension::confidential_transfer_fee::ConfidentialTransferFeeAmount, + ) -> Self { + Self { + withheld_amount: format!("{}", confidential_transfer_fee_amount.withheld_amount), + } + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct UiMetadataPointer { + pub authority: Option, + pub metadata_address: Option, +} + +impl From for UiMetadataPointer { + fn from(metadata_pointer: extension::metadata_pointer::MetadataPointer) -> Self { + let authority: Option = metadata_pointer.authority.into(); + let metadata_address: Option = metadata_pointer.metadata_address.into(); + Self { + authority: authority.map(|pubkey| pubkey.to_string()), + metadata_address: metadata_address.map(|pubkey| pubkey.to_string()), + } + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct UiTokenMetadata { + pub update_authority: Option, + pub mint: String, + pub name: String, + pub symbol: String, + pub uri: String, + pub additional_metadata: Vec<(String, String)>, +} + +impl From for UiTokenMetadata { + fn from(token_metadata: TokenMetadata) -> Self { + let update_authority: Option = token_metadata.update_authority.into(); + Self { + update_authority: update_authority.map(|pubkey| pubkey.to_string()), + mint: token_metadata.mint.to_string(), + name: token_metadata.name, + symbol: token_metadata.symbol, + uri: token_metadata.uri, + additional_metadata: token_metadata.additional_metadata, + } + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct UiTransferHook { + pub authority: Option, + pub program_id: Option, +} + +impl From for UiTransferHook { + fn from(transfer_hook: extension::transfer_hook::TransferHook) -> Self { + let authority: Option = transfer_hook.authority.into(); + let program_id: Option = transfer_hook.program_id.into(); + Self { + authority: authority.map(|pubkey| pubkey.to_string()), + program_id: program_id.map(|pubkey| pubkey.to_string()), + } + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct UiTransferHookAccount { + pub transferring: bool, +} + +impl From for UiTransferHookAccount { + fn from(transfer_hook: extension::transfer_hook::TransferHookAccount) -> Self { + Self { + transferring: transfer_hook.transferring.into(), } } } diff --git a/fetch-spl.sh b/fetch-spl.sh index 2d651488948af8..bb8e84ebb2f6a7 100755 --- a/fetch-spl.sh +++ b/fetch-spl.sh @@ -45,7 +45,7 @@ fetch_program() { } fetch_program token 3.5.0 TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA BPFLoader2111111111111111111111111111111111 -fetch_program token-2022 0.6.0 TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb BPFLoaderUpgradeab1e11111111111111111111111 +fetch_program token-2022 0.9.0 TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb BPFLoaderUpgradeab1e11111111111111111111111 fetch_program memo 1.0.0 Memo1UhkJRfHyvLMcVucJwxXeuD728EqVDDwQDxFMNo BPFLoader1111111111111111111111111111111111 fetch_program memo 3.0.0 MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr BPFLoader2111111111111111111111111111111111 fetch_program associated-token-account 1.1.2 ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL BPFLoader2111111111111111111111111111111111 diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index f6fbb140e55691..df52fb3462eaf1 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -78,6 +78,7 @@ features = ["lz4"] bs58 = { workspace = true } solana-account-decoder = { workspace = true } solana-logger = { workspace = true } +spl-pod = { workspace = true } test-case = { workspace = true } [build-dependencies] diff --git a/ledger/src/token_balances.rs b/ledger/src/token_balances.rs index 41f3a38ac3315a..204bd4335972aa 100644 --- a/ledger/src/token_balances.rs +++ b/ledger/src/token_balances.rs @@ -121,12 +121,12 @@ mod test { use { super::*, solana_sdk::{account::Account, genesis_config::create_genesis_config}, + spl_pod::optional_keys::OptionalNonZeroPubkey, spl_token_2022::{ extension::{ immutable_owner::ImmutableOwner, memo_transfer::MemoTransfer, mint_close_authority::MintCloseAuthority, ExtensionType, StateWithExtensionsMut, }, - pod::OptionalNonZeroPubkey, solana_program::{program_option::COption, program_pack::Pack}, }, std::collections::BTreeMap, @@ -291,7 +291,8 @@ mod test { let mint_authority = Pubkey::new_unique(); let mint_size = - ExtensionType::get_account_len::(&[ExtensionType::MintCloseAuthority]); + ExtensionType::try_calculate_account_len::(&[ExtensionType::MintCloseAuthority]) + .unwrap(); let mint_base = Mint { mint_authority: COption::None, supply: 4242, @@ -339,10 +340,11 @@ mod test { delegated_amount: 0, close_authority: COption::None, }; - let account_size = ExtensionType::get_account_len::(&[ + let account_size = ExtensionType::try_calculate_account_len::(&[ ExtensionType::ImmutableOwner, ExtensionType::MemoTransfer, - ]); + ]) + .unwrap(); let mut account_data = vec![0; account_size]; let mut account_state = StateWithExtensionsMut::::unpack_uninitialized(&mut account_data) @@ -381,10 +383,11 @@ mod test { delegated_amount: 0, close_authority: COption::None, }; - let account_size = ExtensionType::get_account_len::(&[ + let account_size = ExtensionType::try_calculate_account_len::(&[ ExtensionType::ImmutableOwner, ExtensionType::MemoTransfer, - ]); + ]) + .unwrap(); let mut account_data = vec![0; account_size]; let mut account_state = StateWithExtensionsMut::::unpack_uninitialized(&mut account_data) diff --git a/program-test/src/programs.rs b/program-test/src/programs.rs index 2224252da77d19..ed96be7644f277 100644 --- a/program-test/src/programs.rs +++ b/program-test/src/programs.rs @@ -30,7 +30,7 @@ static SPL_PROGRAMS: &[(Pubkey, Pubkey, &[u8])] = &[ ( spl_token_2022::ID, solana_sdk::bpf_loader_upgradeable::ID, - include_bytes!("programs/spl_token_2022-0.6.0.so"), + include_bytes!("programs/spl_token_2022-0.9.0.so"), ), ( spl_memo_1_0::ID, diff --git a/program-test/src/programs/spl_token_2022-0.6.0.so b/program-test/src/programs/spl_token_2022-0.6.0.so deleted file mode 100644 index 0638fee19500c94582b129ff24b882f6aa4251e6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 395648 zcmeFa3xJ(fbua#%Ne+4V7zjDTFamRiJQ+C>B9D+tNx*~pyM1Emp0mwH9mDXnn-0u)gK*(Q;Y;X)PAHKib08Df*+@Vt$YM??gLZ_r@ryTDuv3(efxNScP$n2)Ouh z^;?0!Avngtd!RI~nLndyzj)E85m9sy39W$l>{z1-l6Ik7EBsb8dcl3Q_*VSf(pw9& z{*fD^h(7Dt1vo@4F}%B5v_<*oHB#XyPe0f2`~YhKzMG2T6VA)fo@zyc5%WI>xYc_# zTzyb|<~s-Z;>rPO8}VgLrExEo429f_gdz)3p6x5-$TiIWoaB$kDV+Fh#zs-kS&C0@ zi{g`9q2Xk*rn6mi`REnmlU^X5xJmd+d`2$^-lH_G$NH;k^^CH9*o8vOQku{%84Ee3 zWqhyJS7& zzg~IcJxFDKhfBHx%%S)}%RgP~Ei@^BUd8eO&ArG+JrD(QTFWsB7mz<%jJmk751$E&m~qg84tf{3eG<+YT*f{F1aiT~{C58R{{w z_bI@sJ}GEK)n4^TYaXJbdBOUzBG!b+%$>@DI3NY zgZS-02IMaT`85})dF4Y#kc^o8oP-akgY)wgN5S=DJ&-Js!RfM3K<(!<>$)ZOgk2jGuE zLiHw1eV|MIXRI9MpGEJPP?7}pH^%`6=y`j*pR3~~TlChu)Ia}Lt=RK<`a-|~=qTQ* z^eT+ZDSsP3lfLYNdotyh=5x_6P_e^fKA;+vp3Kx^{KNiaO0$JWkv@ct$^pjOaZOaZ z)XGmqI@~hkE5d)q{5ts^eSY*jcxe}j2N&v*Lx%vbL&r1nD{%UHAv>2#~DTwK{LwCe660m1i=i3BF!(+f@P-P*Y6d_;cz^rB!~ zFncau;^ofLa?8XUB!4nKDA~<$OFPo&&%$UQ(2NX?wa#CA2Swy#eN16}j}xyh{D3+b|f5r)eU(CRd1^%oP20x4cJ*T)o z?mWr;amz{WkLyozfBc5PUw*>+y9h`jzg{BnSDi5US$>)T`1Sft6;Csj5L~JsRDB74 z5M7bimj&gkmrM32o}hkVgbpB$=FPh^^JYps$KO?fy-j`4zE10*ej!~%&rDbNNyljU zLH)vCu7A8eCj+0CoIrfoUbcsgfcHS@vZ3iUoOK4$>*o*D=tVn><3%!DdQBrH(7U&c zJ|KQM62Ek-4w$99;@3V$vPH>FO8@3-Eo}NfrT=V=uimUa@%x|es^RB$S(VHL$Mo%C zx{sd**ZaAgdzs{qEj}Kn<5tpXcJDX|)nu)X zqsiLmq;Hb7`^69EXM50L)jlmpy!InZ{wyMj?T9wP&VtAAYV&vHv_HzyE0A-9>3UoA zM_#N|(7(gl(^8c5xWn3G>nySHebS=wPKU(Sjo2PS?7d%#r}AAjxg=k(=i-T)ULHAC ze#A0!!Mcw5lSVWPM)5?e=R(8F@tdvjy_YHm!M^ zFE&2Si`A=kr4+;;LUeg&J#1fA-h0Mr?y{}7^qi%1b$hU!({{T3=j#w2?lOCY=NxIB zqU)iWey?7k<*HNFkIymR*FSCjtljM2tGjd@aJ#Ykgq81BpY6XNIHc>RJsRI^<$azO zuA7_<{F_&3zT^W&PYtGY$*$ve8Qik1`=#qQ#&7J84!|HjZvp&#G(jMaf*e4_Msbhv zVVmiP9<4WNGyP!g_I`-1zEnRbdq>G*rXL+&G=}dZYWp%XVcnFho^3gQ>MSm?(If-q)cgHw16wc+3?flT7e3BeFeR@rf zI6e}?WU|7`N0&=R$FotpS)$gimq`AFz)#u(e;s0a*FG=p@pb&wsE6ak_3-yI-RMMm zvb{smgX1MzmXB1v>g!P(Ev$z_4eL1&)DxyleMlF4;jfc?iZAgYo`=I{B+}#MgX!PC z9+7ZY_n2Pp(y;TLujjH~{v1iQ@|v&N%7=CfmSGNsaRW`(MV_S)bOdUlVFsb zQNx!pzW$_!WCeuq-YJuvNk}aA9 z1A&;5ZuAGQ(;aH}e!p{q=I6Q-o!eJ;^eQ~jGmKa9$NG-NBg_{2J|p!cv!wfTcOd{r z`DT35X5;339XDAY%O~@7Tn+6S*{2y`Y3%(WANMoaub+e;S9IJi=a2vwS9DzW{&KoI z-^P79K9`$-AROVmWvTiA2Y6bZ@ox{}(|zO#QcU)J*>dN{q5J84DR4CXL^`ma#8)&y zM{C~_tv{t-r_m4jh)>cvJo`+7#o z=XsNxDaOwT)1`cl^>`1nOR{|3tm>*EUo-#F@b#@Hs2|P_=pFV$M-ZR(gOW9BzD$}~ zz`vo?L_G7-2uHrzWzcOKY`qa@xb`m_p7<@PtGLW=$GzNK4qA$r+AV2lctLA>mo_F z1fwzxSeD@U0Jf5Gh<;^uR=KPnWpi! zUX#q+A?Ryw zOV45T+&fF-D^E*4=SR0I&!2IIq`Mv3J4FeZwzJRr&G@A{QQ;@+Rlr<6obR~~LcZ|x zU?;nOy}okvjYFizk^7m}0_my!e0A3a60Yy3ztjH7@O^3C7%hL8e&hNy`)#iDo7#^x zyZCpT(V64c2!H*m$v>gdH4Q`t^e~_#bgQRd-gje zisu-1WcGvlfxqiv*JtVYyF=^cIS97b&*d@y&u)V}8h$WKWQD`$pS1UiD9U_ez%)dk z;nn7s?L1n%W-4NU=W1=3!*e(+|IesrA@Z<6Wvu6slX*VnmKG=lb8O#v`0eeWej*3> zxjmm>WalyE=%2HQfr*A!Ze02KUNv0F& zLOOjO{;=t!-5V7~T*2ZP5BEMV{`@`S$G4q%fZ=$d?T@`q_s3!ze>uN?ZD-KW0bhNt zA-yIMT$J3sTkBc-wAS+|7zr+3uYSSKdE}$dNId%&?0qO{wR{g2B|hF@d|8p8oCChy zi1cFO;+F9O${eHfSdY-7#9^MMSbmb3(UZtC;0ZDHyUz)U5!qr>E zi&8iUvV3{ObBCzM;(qPtW~*m~*;A8p-#WyVT=paN`$|hM&3PL_C|9w1#>WzN`>8!+ zhm~_*)q480o@$3=k5YRL$W#!u(&}|Mqz~iMb|W96qSAQ&MLZh;mn_;S^rAh;ek+gA z^lHJ=RqmE6xC z&*8eU=!GsqQ8>hp`9JVJv~ysOwqv#=N4^dmUxy5E@vBXa=9-_mH{j!5T-hOTdRvNG zZs6%*!c_rFu22R@Cab}ov~(Dp-NS_QI?110p$wQzzCpt++A!MfU5N4hkVbU<8t6fO zACG+Xdh1c?&+vQ<=}NvD&iNSLE7_B=k3OB$>5kZJh@5MeozfRL<9uztAa~8F7XJ*P7oV1+5vHG!&DsU3NXJqRx$rU#K z&eeX$JXGb{`YRm|jcz_3_RQA&xQ`?Gd)+Rub%Uhs=L%=`7^QPtPVt?6mW9Vy*z{MM zwuk*hm$Vr@+KdiuO2?#4`yr(FL5>f7T8`@;>^Gl}B=glCVL#sAfKLn2%cNl3ulaml z>+O!mog?W{Th|+?=iJdAaJ`?8z9997>9ing>3=5aq&G~KKwhefcc0RucS8}W;Nu?gqGVi$ zg^h2hGFdeoE@`b?)@Mgz2dQI)F_pbHNKPYnM777DoM2 z?3lu+U+~%j=zidr%pc#R^9=B(h1IUWc@+z*{ekr`3Hy2w=e-+IKAiX70$rMneNM}D zX}b=m4=-xaU*+g*q6IyoaFWOND7=c{S24UQicflOAk~}o_b2y)?~i7jJCMrWdO_giI0p z;kjQyuGedr^1L5Elg_6lKjGf{1mIRIKky%o{J1wJ>As%e<(U7?$R976CIxU`LGrnt zVET`co*l;uk;;E4N8~f;NhzPC^CvxDk)Atji&Xzj0${jg(ZOHQk8OQDnR!6Nm{Zb0 zPiZ)XYw~lmmG^Ug*J^&)K~le;Yjl0({gaPAq;gzQdbxeYe0x}*+1XPd{GKXuWc$XA-&nZ;W$2({k2u`^Z6+3SizI@y>lM#-|ZE;&>p8g@b%@m3yE-t zldI1)=$~?=_m1Kvvshf#y-BeU&x4pB&|Vt-w!GxwAL4WWe}WHpMoRa}L8YhN%k_H` zAspxD=h`X9-i{7|3AvVbCEo9Ss`s(4tnmAFUXu3vfX+YCe&1^Rbj11bWT~&K?^{w| zcc<}L|JStsnO5Jxmo+@!_-yMJHN439Y>U>{T|7_VCDYg&;EP#2RRezkM-%945)&>T z{kK#PG}aTJP=4#5rhea4@f*?a_b&kbwrl%qbTqpvrK7bYrK7bYrK7cD!00yKf9}czjOjwN1msNAzEphT}ekPkprjQN-hK(56sdK8=RCzV!RN zVg4^Nf5qT~4kEuBG)#EE3Hhz{hv_q$PbY1zpPa9~KaB5FzO#OHe!kr3G0Esqu=dZ< zdh?MIAh!8<9e%;MLH$lVw@e28cVziT`L-s9Cy0L@ay&1QA3Z%q@q4vdQSAL=_tRn% znD{Ph_dI2^WX4%iE}3ffunj2KZ&JOZ$yj%4M$e$1)=PF-f9$|d-)C(!KR(EQ4f1y} zf0LGH|Iflt((OMo`6@%?`+g?hKOtY-r*;Y9O*D8LkdJl)msVG{=J6Nuw&oo3DQOJ1 zi_h_V3Vu1Bjqv-1!J{Vf@zw2+kT1yRY|k$b9>QAwLn=2@SR3Hmyst2#N%jMdG|zQA z($6;^Zu}u#mZ6R!{$Fi=+1D%A03kSEM{G3DrQ$gnd##umueQbY_FB!J{_`#TCrTdRy4@y=%je%v!p+IcuS{bfTsy4 zEQ#m5jT8*~xk&pK{5B&)hu8m{c+hZoe^S;*KV7+Yy6-Kt+B1k$9;>X zT_Js)E+0KVq>CD~amCu*rtOSVx)>e$jczlIPEMCAOz-*ng6+3EzTr3wR*e!Hhh1){ zH6-5lW%AMQYJY4Vt@Pii^zQ+W(Yd|V>v%rS(u}`U^FMC-w(TklpJnN@Eo}H&|C5e% z;Ctf>r-Su-o61Q@$7!^KgK>&P!s<3#RmQ}`&+d6~up}nJ%v^kzO zZl>)(A-J@il}Os-bm00tO!=s%=TXU5$l3g}XR(Ase2IUE?@bNxEl0ZE5Vo7`jQdQU z`&FLV-p4UO)bxqlbxJ?#eYUr^L(q_Y1nT?lr!*Y*SwCBP$`9#0>od|X;d~72o33lb zJGFd1()EQJA3rDMYJ7Y^!zmvh)bMcmIIU-^l=E{^es0*$w-nB}T^d$6W1+OWaK>#i zE+vc1Zf(=~M|F+1pX20W{Pb~>^t`T&_Kmgqi5;Lhoc=wxVgJpS3UJ;;?2Y6xTX(pZ zS{MD5t~0QAXzzbR{B%6Ab+33rQR0(Mowvt_nfL$l0<SEcjr8{Gi;1`@(KF`M4AJDLl8=@_2=V1?$#BiXa3W_*vU~r1g-k zhNsVq^HzR{edFVw7h5}qIWLa;Ob$$bYWhm?%t!wqbm02RG_*V0uWsl1`9AW=-`+v~ z+$;TD$k{w>z}9OAXYCw{zwc5!^O4T$s?eEq-hUzfMZ^=ox1hbt z@S9<`|NXup$D>bW&L=_rNjaz!vHKAe*M`3?FSYeU<_3K}AfBwpQT0(D6&c@vuCowN zyk6fBFBF6$Ui&h5v3`~#UX)*mHsoCC?DaI7pS>LQWckpZK!>|tu=*KJ{mwg?kVsM7E3ziC= zL)!&26psdWK`Z!;bolxCz(24HiinRZYHycwR|tIS%f)PGMS>!iGtdu7+cbe6j{7|s zy>h#Z`}O(WjC{#j-3OWi`MHO-fZtWtaWu+`|8%c`=;u6-mmklY&|SL1w(Lb^HjHA3r#woj{9Z^zk**Jj)v2D z*;EbB*Lhnq+2%WbUSzJ#E7JXP7#VQv-@)erCZ+MzEVX&&g%V#I=NBj*6M#5eGSl#y zX!Xpu`JxuXevaYg`_fswCJPwor?ccoSKnpQp6*Vor~hJWuhla!(b{YEY&}oIi?p8Z z$>#~2+R5WE!SV(ZqQx%e28)1?W2%S$Tvr$N0w*w z$eT{G9{H17j!uuf8}jt))FZ1-upW8ty@U15Pc+MW1^bLXn3jYna=b7i=AwH<*aVgVd_upur z6JI|MR#o#z;51`CcD#{?7|pRAGx0lZhrLtn_f&A+LfbW<>s>sb3lNh3PG71BK))1Mzkd&zH0q}+7`AN6#1i0|`K?w4GMn50wXCXByU z;%(h(rpgcb6jetxJ9RKUt9J=_q5Ct~U&%*LOS_5hrl7s#ugj1%AI*^hOrM)+4^L!d z`;YBGQIym5uAZgD2KED3WCr|a%pZzR3GWuz+4<`}Ly zS?}abzmS8yUg|sIi*+x7S6eS!BH_c`vswrG`#8jQ({UV_1)uQGq5amwSY5{wlQ1}z`rHKk#FI=&rEeZP3m;PzF9sPEi{ z^}SROJDU2whx#}VpMpMfx;ARxiyG8dkWoX&0i_7~2S5D$sn3td51fg)J(Q319i_T; z5)L+Sg)utL7seDspCyZQ9+tG}xM>k1DY(MpipWCbLujo2Lqx}_MN7;v?%R{ZB`1yn5 zotMX3m9B}ezh4eQ!Xh-SQPn&|PkGFAQUdQQOCt;$Wx zFjudkHC(S(c-~{$n!Hwmzl8gJn2hu(Cth!RuV&%7C=?On0lS%N_^xFj$!d3-^4Gy=XR~emnPHt zME(u4#cy9CUiywkA-(Sp0C4`UV;-DES5>weBq!1jeUdD82IYqIxw!-M-)Zzt*Bf=6 zE3Vi&p`S14(|$_Qb1vFnwR0}Uw?plpJgN0B109R_x!U~ls81athZoM!^P)apJN=!0 z#P^rKO}Y8F@JT%!y$M=4L+3+>gYzGNgZ9aKT(8Zqd|Yw)tJxWUpmv7!1Nm)UhTr}% zef_lE2D-xvGW_JwYfVpuQP^E`i5y&udz@%QAMUk}$0-+FhWewZK?B#-SD z`0);{2k#pRy}5rtilyhvmS{ipbV#viQtkq2XU}A7A2L%#m6m8YA944u_8uPL(dF$P z6zM$jWrOKV%h>5yI77$z!syGTT)YW_4Cm*!3OUXWL6?5<#E#;53c?-keaxpbcGM&H z)#|5QhkO$Dzt*VymSp&d_5WjrKPWo2cB!KEek)-8h>rL5P&D%WVZ84n{X+JchWMmy zi-Za1OABaUZqk0MXgrr&aL!Z4F`g8~MVLRQu1V8f8KZ@QU-DrR2qpzi5 zj#qxqJ$|q##TvbT!2V*KH`22r}q(`m2h&7 z(mk1i3gD6}&91y&g24A0`7yMeyhGtO*l%>aJD~__c&osMcFh{J&)=y|?}MIg`1pGU z;|(u=&)_V@59gG$9aF3wS6aF2Ex++$+-GgPGYEubsKf#Kva-wU^MvAvT(dH%#~&To`2YWvc+qBiKKig+?FBV6>m;@_`&jpcuGekfN9r9hnWoAOCw zauK^;FgcI=ls^+T7%m@e)_&Qn@SVTY_c72fbR74|dP%493;Ca;Rq3)nlJcdSUc+pd zcPX6GBp8Qq+4nZeBmU6d*-*OFzq9et^qmckH|aYY`S}75?=A^FN@p5;)4S!ZmOTA!rTeUd+! zV)m!$<$Uy7!)J~7L*sM3q#y1*h=qZktQvit52-J+??+5;kiQDPp=18+`wp)O^DBF# zeBLPk6~x7zq|rPkx;l+$_U;~IZByY)T4+sJO^t{e4%!cQJk`G}Wi<`1LQwv6AT ze8+yL>sg}yq;x*4dZ9LdPxT|g5BCiw0771NFKhHl-vg+-M|76d7wUEDW7acVdwPGyUOXRb zC)D56Cu|2yWMorRoE$WI-em4Q5cazh1 zZXQ>?4*ON*+Qu8_10N4g)cY^ww_T6bk5>ix>IUR}^G2bo?_0zbeYYlQQ~Nxw=({yZ zo7wLbeYYlQGy6WhCwD;L^cX*-cDp@`D*d2Duz+Jnz8bJEX$Qg+Pi=@e`~h% z6|=6rd`0I;_O4($ zuYl2T8PZpF2pIH>^0&j|_{#LO{WcHrcWeE-G`{{yJCEaES&!SM-<8LESO-|caONj_ z2F-U>kCCrm@*|$x*em_y?c4>JY#;49;_2@KR#zYZ$8`Xv$J=$j5^~d zIa9@_o*a)~KEBlUH*_5-J8$xQwQ-JI0{vj~sMzLn-4inNsR^uw_1JuBLS{advB!_R z&h>8XhwVD9h2xpr58R>p*!5X>FKQxazapJa>Vm+E%HOEIT%+_{*{bwe(Z$~5y97$N zq3QJBBgF{&k*&Woo9U&88>dH){`oAXUlgP>e9J?~KU$&q`wf1MmFX*wP+xlhkMsZ3 zS$!`H(g|-4>szn6eBFZapLtgekAsC<_|jr0WmyEjo_$d_bb30uE5V96$NaTnWDKBe~w^`YXqy!sxkD zAep)9m;8~mtx>;oo%n8VI~<1TLcMkX=TzN(_VWvDck~9VdsL!dF@JLRqXLiP5lNrk zyTxy?a8z3NEdC<@6XHew-K*_LpUDE|!v3L{nDZ%1)T^mcTHl#NUC4wApb7(2P5*)8zeqyTQ2nK8ORC!uwO0V zc>4kIeY~a}(t>i6a?_*VfIpZIi#R`i31ZP-v&946h=`tKlE5lexL^=zkeUX@n?S#vAAF1#RH08IcMX%y-zquA4H&=eLt7=yfH%; z(wp&jGQLa55rz8U3h0M?^r$2xPLGP_V}I>q`_?Yd_T8)T-d@6;`2*N<+xAI%*Y;<{ z_wysqK;INmB(7*X3b}m(z}tII&zB_Z?HvW;qsl?@x(D^++=#$seHXL7${q>d(V`51 zd#)08J+=(}R|K5Z=HHQiPtxR|BGt-x^EBYQ9ZC3Kyd8Gf_T2(6-uAS@4f1cX{D;ss z=KlioS9VA~?0ads$`%XHt1Ca0{<&Zg>2yHL-L3tftaZA5Oc3s}_aexrY~M5H6aUY` z-j25!-)z@%_lz5ma`86BBi^p~EzMP;%TbncOvmyFM)!<6U&9vfdboL$LF`i2j{xhF^v7 zBU+7oC$Na{iKL72+d;w0>F#_`-_LhNflnLhkumQ?yy;`?CkniHV2%3ytJJS77r&5uqre@k|D$!Eq_Tp`N2mmD z8ThaWKg>rvpjp{3ddAY1j>h?8$Ym?=ZGz9Tba^>tF<74npDodJwx<((*1TN9ZudA{ zz28dbINek*_k0KaAI|mhX6Sys zD0&`k4c~{5c6Ff=ILZT01hIYVZm;zh!*vJtT~a(+201F?=O8>_12vzk>9hw~&iRn# z&qeuXtX!QOj2R*H%18gK{h`f??&g~!#lZ$BrH}2ac;jn!leyE0r$NM=s!r6^4m3&0Ua{t<(+&!b)CEe>|zKt8|@eJYYSdVnjn=Wf7gmBiQ zK3}i8XN`7P75z)+`PjZ1AOAqwcWVPYDUZyzRJaD^ZJpcoUSjW}`FQ7gFip4bjVBLl z{l1@>&F9}G&qx0$e3ibVJyFZ0-($bl=KXVul0f{}LfVb=OLiYCwIdNH@ss!$`Q$I= zDPy0Odi*>g;n3mTWv$=z8ujEgi1&37>JhDtxtxY5 z-`{u!_!17~j`7LZJyP$DtF&Apx>$br{K)hBchCGh3HEuVT>5=2JI~|k?UNN=`_1aF zm@`#=EQ4tw_}IMZnUNZ9v<~3w3h#Xv@ITUe!4j><=ZE3?!NVvY_wN^ceI3EqIo!VS z{wz(f{<40FGxt54uhV+`yi2Gb_JLpH0i|d4ZJLkrb&k~ktk%DCpZGqX9YuLvr~AmP z(&xjaiwqCYhtBKe{{Epu&&$3u_hA@qAsKI*s_WZ!k0xw~)UyPQgmXFf^&-OkDvqke+ozeFPc?Zf zs&DUamYcYWf$~emgB`7We)T4FI^%7ealE;{oV`!;(3%?m9sfNf<%8kgL-PCL-u`ym zH{-YqRK<>@w3Mb_z~)4yo{j21@}&4a-%juGpd2+wxv5p=Nxok1dNIAnbDtEC>hAGe z&(f%umD7j3fE{Y*?5@@L(iGJ|r41jHi1vFm-sfYrdpv4rRAC#^HG|f4y*4gVuE!%B z+xs%LeVZ=Hm$WIL9}6F`CFG+g$VUUlM_Y}LDuekbS*!Bk=Yf;ETSX49ZZ&w6%4)dK?Xl!jJLhyO{D-u|d|fcz7X?0~Gr#`_ zR!P?lMI-O^7OOcN25J8h|kb(bhsxc{owmPzTdjCN7IkyyyiUUv1HMi3U8~H zce(R*bMniI@1wsS()4(%=J)rn_--umk1HzYiS3&wvlZcVzx;l~uS-}Gb+kN4<3 zwI)4B+6a%Szys@)qJLfQkk7OC#aaV-@OfOQf3x}jV|7jcy1pXZ?0(@(Lbx{xxdz=A z%jSPBglqglxY>JUO(EQldbrvAUk~9r9;O!wKf53K(=h++didG=e;wpMk@wtYz&==p z>WlavRfzw8!#%g?JZd_19FH!-PoF2!?z||IJ{9Sd8|stex##vI)&>0i!?^WXK_EV7 zpZK92^DOHz{p@n#dL^`HK94PgxT5oha;{C`**iD*PJ_r*>C%@=eCa{khtX+Yz7LLbp6WlO6MW=K9J*;j_)HuNH|}QEp5PIIk>B*pC!KU`;fn%TV3l9 z(s^^hufETk9pB11y}ugUJ)WAr({}s3oA5q#J@(B(PhwPEEY+v;opz%~x#<`3ed*>a zHND5=o9$(}a!$Wf?CsA-KT*A9<=KvXnRcuX+TraD^;S0j8^ZkOC>|kS@O&ESOUHKo z@o%I5x2+O9%65-8Twj}re11j0;X)G%!iDWxz;*#H9r@@&`orMR?gR#2=8@hXJNGF^Y(kccsv!NP%y{M4mY@PEzT_>r@`9AAkML4$a%ZK${ zfcjE9CEa)E5P0!+h3DTP!FgDLmws2T?SO?xOMZNh$HEfC`rCd9`~F(;nBtQ>wpZfu zy*cxDiyyXA_@htZaJ_)-Cf-FvvHdqTQiv3-}v@2xjec;QmM=+ZFT zONeamcjngcD2)DzoPR8g{;_^v(9UVNzNB5c5#`czAv1Mdop4@z1LDW}K9t7CXCeVE z&Z$3mJ^9cBrE$7160S#ZKbms)Y;Zn+dbSmCS|JV#g75nAX{M=}^`>vUS=)tfJ(Swf zh6l$Ba%|jG#9z|o)~FkGQNL|PxHRcziBCFpe>WMc`)M)e+jJox%>(?z&&2>J%aab{ zucrLW6TU0wW=pxaV*7QK<;s_B(|*YW?Au#-hJ|mp@GJ>;Rc^KL0t>5N#rInz9O5(7 zcR^okc-{RF&)KkGmEBynnZEK>NSe@0uUy zGo)XKnJmviqbrc4@oa?(qEvANgpt z;6XW|2FpkKU3cmk{7c*CdY$y3%SZnvy&a#VP3awP zQ~kqtvGxGJ5HC4T)HYT5KfsTC67s{y41T2yKaf8{et7;tk!OJm{V3}Y&Zqc}uZ2~9 zdfP6xu<8N-{s-Q5vGV)G$2x?C_gYx>fcH1vIgCAE>^g9CE$S-`BuBd~IjH(jyC>cw0{n5F@`&IF3ds@SWAMQbEJNpjb-gt?i6Zv=k3L|X2 z^iqu{AL3u+=cQN7(fCnlMiYK6%wtU${x1B_d~aQVKX422|G-7uPiHLol=EaeY+wHi{|@ zy>lW9M|kXSKR4*_0b~9o@O>UZxUZOp@^)@KA0Yr2rgtKW`sz{bQc3rFU?IO#FFJqL z^z3t5U(jxjr~ZBCn!ej7`50ysu>UP#);lY(i&DL?S;rfnAGISN9rUK~QHYnwf1jq) zPNrSPc7F>dQoPf~ryVAz_U=R2e$ugk^-nsFcRfPA$*KwOgW#F4Ue<&8jI<-(VfaF~ z()qdu%S`}%F)xvPHNB&Dc@`e?|Nc#b>)9*ySox+Y0>7qT)Gmut{i5S@oaz^Af2v=U z4*vc_l6e=!-+f}gxP#gY)&q1shW)7eQ}sbt-`Avl-JOO{|Cc3<^PwWw@xT`~Jm2uy z`WG5rr1%V5&)D}?3r(wq4{GylbJ>J zJ9WP>>>sfMHz-vI_dQrgsh#UrK4AP#%qN|XoNt{UQh8pl1om~Ac&EwjCWX&)P1Epm z6y$s{{8Y|W4P*Vr_^w;?`}ea$e8uiJJCAskZXC>4D$i@AAAB8y`9AeMjHf#kF7co| zdwarjMxS85iseIdinKeP(6G>f={qeQ?V!B4JY!u#>SaH%oy`AT&^Cl8=h%$iVZLu? z^3fi0d(+ROklp|-aK8t`9_NB(e6Pu$%28OK>y4-XZE(9tN8;Co_}I>8`TJ4SH%JEd1dr=NiRenm3Vw-UNRPPDnPVr=&G^$)GM@KT&~G4 zr~>1w;@OkYPn18pLarcwIk#HsBmWfx&KY)`i%4gnulWhuB)E|OZ)AW zn!hmOW%8puVwuv<=u$d+sl;dL=X6WvuaSEF{6aEzhJ;Da?VxA8MZ?v1YCV|uNP1jR zy1=fou+l3%e^^mECT;s9o$W$XWZd}%+hhI5c61=VYlqUmtMYZpkNKkbP6x-U`Z~qK z@At+13Lo~o;xVvS%g=vW!&?-;WUSIXnfaWh?^l29v*ME;1W z?}JtF?v$R#i6<~O_v;dZ|~)FVrjK5O1IB+w|QXq_N)Kr*vH_xgP=t*L{W3 zZRNw-uHL!Yj@~m!70AiJCTVB)T($GN29&?L=bk6&aliI&_Z2fNyg>6`ahZjcFS@Uo zCgJ3^sTQyFORgwtctH6wnfodWD_-9Y6N4Xg8`Os%G_S<(>==cRNa!!{1aNb*Dz0}h$ z!T)i3-jmeeI9L?=@O!&lvA{VP2^X5KlJvaX-hlktJ+N?{x5)Z8FBbbOw6kY`v0Wci zU+xDa*8jkI@HrP1;JDs+Gwt;*4ZGbQ=8u_wr>4_hSRUB#J`V`<_cH(En%~#MnO;N` z>;E~@m*F?VtIhZC5s2I(FWzO8_W1c?e;3#Hv$!ww+D@e%K*;`X5kBF6uK|3&AEsX= zAP?%gP4iHz5Kp?oz*y zdy%v&+*c$ajz&*y%;>3G13eXwULf_Y7^C{XYlIr2T_bfo>B{SRP8SzK;kt54pYAqY z*Xzou9_?=XuJ*TmuO9ZIh1H%-_XToAft{!HcjdOGX4dOUPqlcDpJfX@C-fb&<+ zXzS-Q)NYXe%<%my&{F5K#G{bGgB;;_)aZMaOCGh9>~z~`Q^3x*>38Aq{Zmh zU6lIbB}&IoPctL&UIm+fjF#XHq*%EI$;(BHS=e1ZbX zdgenk;(=DFC*FFd`u2`p+^6-In?5G^`na4tcDwSitrK5;&R$9P{^B}O_8j+v_Z}ZT zsu1aVdOV_Z^mpg%o!@Nx+0HqbQ^ozGt$kU#ZrAqBG+3eEhvMxTrrj|Z-ZcU!294-K{AK;|eHzbpdO_g+iKaX-XIylGY`zMUw{}~-? z6wVA;4-4yij{M#>P2!nvpwo zaL!G_KJJq~sP<6djK7g~czkHr0TtoqvIyTtD+r|aB) z&eKS+5I*6Sn{+()b=zc|!mICZ=4Sn$Py1s$!oKg~_cVMx+1~}_ILwlME_H8-(Td6m>Tjix*pZ=cIcRc!Z zM@FCCy8QU`$CZQhM~C1Sn?0JeDgWX=gz|r<@@?F&`Y@Sk_pAm~e_=moJsr7#({ivhtSdcJ?>Nar2ceAe)jj0@0_8r%fJLh{9J9m-*548bI5xW zYqWf_Uj2OZ@5=wf(4QaXyrE8i-XnmVj^2(WeGk>>>HAauex<)ZI^FTm?`kG%m&>@~ z<5YS-2YQz-y`KZVUWIVHMaRL?Rr{qKrJK$(dMcdKq;?5s@9Q|6@(6oxuwUn!_4juE zcls_nNFChUY14Y_zInJoPD^x1ajy-ya}c?bmYd$CBQ?`DLPy zyEp57rDU_+58ABbL9+R`Bt0MLLpaA`Z~b>nq20bH=j{DK8^2+HYC8^NUtC4`*sl7) z;qd#kbjK_oW)Jweu=rB77gK$8sp%UVXX8tiQPS~sruJWKe1-8}#@*WZZ}MsH=lZzs z<9s+S7rFk@C(TL6W%gSUQS7&8(QnJ}o8j6z$OeRTeQYuOxYD8He%oy6uefrNg>SI% zr53))!m5|?eGLi2?veCfyJzk9aoxV30{))H`rvPY99(H(*f|Ffjr!HDYjiJ{f)kEc zVuzR<`1)x)pyM6ed&}iqH>uf?9KZ9?3RZ)9lz-#x8-?#DK_1S8iIx|wHF%x(0_6Ae zf%#~r@EPUQovxoLoCG{Z7jM&c;69`BWseRFDPQ!E3beCq9P62CVH?MKiWWY3{P0vm zewZco`u;2XX(M9dKI4l{wO`{t=KVJ{xhx~m$K-KI9g^Tx4Mb5kW&Qt#GSN^ZP7du|!lk~mV7hCwn0wC@i zX67Sk zP?VleI!Eb?cYq~-JYahIT74I)G>)r)*f+TLZt+)6yj}eG+9l#A6Bk%m^CuJMN!ag~ zk+1MCy(d)aP=!K$P3PldYA1APJkE;>U#*;|^(IeRedDzrmZP5Ya~b()K>Mkp>uB9= z6D<6k!0C26ZdZNPy>^PWx3W*tleU*z_*o0T(!#pV=K3tzY~$2sv&T1^9llxhZ`hwl z8mC4)&}f{xNeIxDwnOcWu8Oxq?T$Fb+vcAY$J^$m6~|lc5ub;}Hg3cfjZfN2s>dp3 zzqILmv#VluPTSd%j&~o#?`}KO@|)db_x96!ITbCR_`RKq!cW@t{AO20%dhAgFK~ML zTg303p?tP-s>z9(7g6^n7;=|1JaC?UO!1K2te{FnHc)o5B+Ib&pNEfv~LcLB^gYPE^Ah+|oJC$DD zoyIqv#?PHfm!wna0XY=9cDJQ+XknMb!Ez91DqQ$^wLS%&RuXlL8{FZYb;Xs;4NqGSbq{zCA;zL?_McR>CAgNk28ib*-jALa5w z*aAbf+x0-)Q7b3)ncbg{{zT#4tm#}Q%D&6+<+s+>$^1PuoOhD`Xq5kx4f0p7t-;T} zpRhN~KU?#K@U!{*gZ%H;>`{-M+b8|!W%g}mA`j^VGZp8~RPXt^W&yjDq(^y#j#s23 zPpUZo`1}ap$q;@njQEADyG#L{rXe5CQ;Gd=;i}0=`;$n7w*9#9ov+16PtP;YVQ%DKr1OeumxkG{b3j{v=Wkg1|L>?DhsyF*FTl6`THj}g zSEB+rzZYA7-&5O@kMx7$DL6K3k zU&NH3Y$(<{DyWApTi&C}avh~DZ+*Kdjv_W8N*mt5VU_NcFqx;~`d znicFrQI1*zy@GcYL@wQ4@pT1X2S_^4JV5-DyX)`Sv17t@pGNchcwm*lXT9TL3e>Or zOd|LweqxsRjpp?{htGE0o@qz(zi?i^SK!BXpC%s0y#9@l!=dN(CQtr;lJ9Svw0Zp{ z((Z6PJ)iWj{o$Hk{;Z zJH(Uw^r0T{dCK3v2K=^7=lk(ao7dX+WL(dZE+~%u_PxSq9$(dNP1m28@7=#ie%Jd7 z7{7JA>u(i5#K-IZB-Sn6&M4UVEzGl}o^tNxcCJ+C`-P^9q(6MUj`9c+NdLb>>NWlB z=UU93WBZZC(-E zJR_u=&o7?3T;mZiUN*$gfX6v`=O+m_umS8N4Lt?} z#k)c%+TH`eJKRrYyxaZc(>MK2jqjb0S${J?a=yWO(|aWrE=^{vrqlQDlKQYK1E8ad1I9(IpM_e!2(sy!_`Kk~6y`X}BcWadOhk^yas2vyb5t@7G z&T|yr^_IW%pw{o}gnrLw6wu`i&(EoqZr6Ma(}m*%NG12inWJPC@*X)I-_c;byYQQS z+2)=8ea7-ggIFvb5Sg|iUJ&G<=tne(l7?sp-=i!jDHzUjRUw4eD3`6B+(pD2{}cH|@F z|Ete1J6fxcoR4XLKAMrkFX0G>>v6Yd{C;n39q2--&*iTf{YST=M}2><*uN9$a#GT1 zrqF{q$zReBxzDOx&eQVw=ra;#IkI$Y<4m`m6LENMNBTNKZ0phGoC-q7Z_@xLFSmJW z@De+3nb^B)$p&Ru!l%ne{~>sUa8!>Tlz9J+Z~AVb;hEg4aME{J)+@X!nG=q5UzMTz zM?$)LKTRJx|Jopby&Nhb{hpiax1*QCYXpH`zZ||o@F>~$&`Q%_jKI~)p}xC2d^wyT zaE?X}UmwV!>!VQqI6fYa{Jk;g*Hb5de=7X!@15nN{~_W3m*nq~knY2fzp+97dilFS z`RepID0KLBjDuVUJ{~#z-kVR4gTIb(@N*&Ehara_4D#2@;puUZ0(fM5^u&*YoM9Y~ z{JkUS*VE(Rsk1|WJEZ$C#38!)9a(Z z8`6Ck^7p17f4%&jo)->J{!ZS!kX?8@a(F|~zo$+PPtOb6Lb?w_4u7(uHg2AlLrNX^ z{slBId@=E{a|g$=U;k{-zo+H!)Y+puL%I(`4j&5g*URB)dz5nd>##?8bme&DZ+XzK zr%wJ(+oM;7bRUNNjSuplmOn}x$H8C!c^MvwIv)8uxV&cHoR+^+H~#%)NcUmL-$0Q6 zwET(qy&&Z8MaL6c_jB&ae7*nS=SuyYP%=}`*XQN5HqNE3Fb&i3T|B|#>J@ely-U-pPpHrH z(7QAA_|>vtTz_2iwOhH`IdnZBAMY@_fQED{)~oODFgadn|KC8Ylpn-LP|l&}cj`IV z*^-R&-`g|iV3j|II){#XIFf(p?+=;WpuLnQ()+u(|L^D5uODsnbUCo|=zf2_s@H?l zdoPW?8;$EBa*u>9ApFncoq^cyiTL-U>+e#JW;(0)qh7Fj$>uFwm1$z%Xx z*uIxRej^3bck8`;Md6h$ohtcD4_++3e-K;xzTxoE%ftM~z(?zF5B4-4oqRqj1pO59 z5%t{3;G3eHK=De{IfaC ze+>N79N0yt`KM-=kYD`!xZaP)V;4OX^wY`YpVdJl--yzJXSeZ%3S&xiSsfsY;^mK}7OkB-+4`dH9UAs-b_5+8jy zsBbuYv?k1d41Cl%tnutLA000rEeZN5sZIrQaK4XL79C)3r z2LQUppQquoMv9N`d+5crDEMaS;QQT-M;Bwg@!zB!_-@0X zdi^{R<&HIna>9NN`Ct$FH?Ay@^1UrrNxfnDnVItB!;tTGqdebvJ1{0MW%(y*SuDT# zjv5{DrStww?q4q9eDp6Wm+K`cdg5mM4Ed1!L_WG4{F;n4xiP*bp9#N#IamerJ@-ns zNAneOiU8}s73oP!EB-1zEa%@5W;*%H*K6~5eV6%q)^DYU_j6H^_#>sB0`fD)%Xxq2 zqrW`v{&??k_Q%60Prb7Z_$3c7#$SX%hDDH%eDo!y_Yn3#Vf3q0Jy_N92e*vGI zLij7uuAc37kEvq$(GQnr%Ks_K6R+#6++8Ra;=hLZ13l;P??k?2?M8WjwlK=ZukBV3 z=(iX99wD9r2W9A3pToH=Q{NwBy)rCEd_(*eFn>Ug#jNLXg&We@;m^;&KS=lu+A|0F z3Zth<|HRw19}4)aF8LxKy+$e^|Ki^uxnRQ3a?z0~C&31CaDgO*bYFmeIkJAdj0&Y z07zQ)icdbE949S%Bplm!q>`51nm$@Q=#zaKMt-_fpFg}`!ujYMs$Zb1>1fvwZcm2< zMNX(UOJ~1aB5LQ+E|Rdk7rG{|neo@ZJCO2eTm5$j!tboI-9(M&FX^Z^Ui}rWt85Xu z#{1~v$2)hZZ||NTOTT<&)$#SqU!%N#SDXDP`ntvXix8JFC`CSLM+ikqA zNVJUawA)jAKG6HL-wD_4>Q8_^Jv)!yZr?__2KCXdj{DR;3gK6fFWIc|g)yoRleN>t z4ywM(@EtRh{XRqU1^8#}_xES;IFkMTd(0n03V6*93hnj3m<~M9Zo2ZAdE&F2*jW%>{1mzR={4$uyoXZx#2c@4 z_54>q9{j&;ud7@PNAG0q_4(*exA%|5UcVOQ>&Jt2(r-x%65-f?l<%ZvxrF12%0+6| zL*LLP53doQ`RQuo!J`uP@gUx=(UOO~3Q=oQBiy_wLv5g;EUX z^q-Y*(o&R&^!)c+Ff?2;Sp_B^y-dPK)|ahXZtZA+8_o}&CBL{_Kpx(VoRo)W&>xNF zN0XR8Xm@sgw1@pzk?Lf8Ue5eaXqe-T!~Y-1R~U7MBnY2t7I`<{=grx6z2EW_Mvt+1 zh_ISP+wa@-K73^Rne<}$>j0N@ z{e6_L*O$)<-b2}C_%56A!*0!YM7#MPl<)7A>~(f?^=yj=UNYW7KVT@vcl9op1lrMb z`RJA66W`XOw4={tzXy7V5JNlqtACGlySFn6_5tnHc%W6~p#MzqU5>rKO6!&HR~Wxe z%C$!?LAgRMFL*9QCC#h{jbVLnRWX$B8p(GXn$DAaN9ylnW+e4{zbEq#$~=$#cNhDg zz~P27f6Dg%8&E!ZO8Xo4tpsk;vQOZ}6|HwDK5+XyAL;nuba8#1k3Lu%Hv}SR13w?V zf7p8PX5??A2ZyrPe>{1xyoeoU_Ite^Tqk(MsUBRT;ZzT<(r~H=mrMA_dhm79TGE4E z<90(mee_;hvR3tRs0Wur5JEjj3zz*p8U59>y?)*>miYrcm|gF0N4ZcBPGkN+4?6rd zmfRILlW)FT=-qYvI?@xak@XB8Yusa=>=nf(mPPc>9p~ z-uau_Zd7yVaB{--P(HjJIkcmu7mvK|OZu@q*hlF6^S3n8i|Yi>p~idMqmy>K9eHH? zQOEaqyOtY{{kRJ4O2+DV?fen%)bfqii!TI0a9>aGD&%zHLp~IHZJ6uDSw8l0ujg@9 zsNuaz_7mxdu8vCAk^89+m7nVQ|HmRvhtuan*_8(aeeU)x>&@DiMdh-zOr>4)pb4c~M+64>IoMyCVXnn5j%}3`-yN|@LRoicLzQz4n!He>-6Z}Vd(G6!F zl;z(F$|p~)lk(pGNy{1uC-<)spYW+)YxAJN{F{$-og>r(s=sZWdWikfP|sxV9lnqI z?sgy2?~%s+`?X&N^nPq%q|&?Y-r?j{#lHmvBHmZH-N{?7QFQf{S}xu9y;q{;{O7d5 zULbw%UmWNaHJhUL%QQdxgBs2G*T27*{HtN{fiLCG-^*}&sx)*U*GBo5 zG|I2Ti1g#)4E!X_Z*&UbXY=0{=067hUG&oz2LE0Vwxa?6PK^F*^KTLDI~DwUNl52m z@bB10`47Xt6To{kUK^E`#?zNVes#wM)?oJ zzrA7pVes#7hxw0zfB)&nFAV-|3ERp?=ugt$(_dON*cS=a-VeoHLqx^^A-4S(0tqY-I{##&xcs9e%_|@?nd*leIb2^!M7i4l>ab%`xjyUVesuo!u-deci;K~ zv3r+=?P$Qar{`@oKJ|UKe022d4r@X>4}*ViX_Wsk{QKrG|1kLX`Y``7@bC5)h~3*9 zwxa?6uFM^e-o1co3VK&NY-qjv>svQlhV~7ufAze#>)GSs-(pDTVes!S?g++p&wm*H z9UsCU2LJvbgntbDTX})#-(6ul8t^Z7H2y2wy%n_YRPgWKkj~DJ_fopCuhw9lxW7^U z!|?BC!~DbG-w%ZO)vSu*DbET%@zj0d*X>(iJ1B<~zaLcXm2BxcJogd4!};960kN-Q ze>c(JQMC6y3nOelqu<_9yrL+8lDW2@k$FckFQ*8wpK*?!BM8^C*JRee?+Dvv`1wA= zE)FB}CDG}7lckPn5>dMMmEvdhaCyYf4srjYZr{TCf%S4n zro@XRaQWzmsr|`!g;%Qjj!M_^6esfCryX6LEy1Y0L;c$MX|;cS-?`b+eIIV6g}pq_ zYvEs18qdFIFOj8>M$0}4-kGHHeG;6nIKVLXx%LF~_4C;wU+e_{KSz?~3-ZGge+z!7 z?34DEb9$c!_n@@>y1f|nKF-=mm(Qp1#3*^n_gVgz#8XZ-2KW&Ep#JU-z0ub zu69c}Ie~KZqc;TOX#=?uJLjn7>QzF}(!JM+f1>2d_t(RHT6RS~()k_j5$b`O++D7C z+{fE*kOS-In%s@h`(M^iHNG5tpDmO_+C`1zknt|3Cqo|PT=-ULMxCCfyc9(qHwu50 zbN_OP_{7^Em2h>3mg6}9@}=t+mU{&Ez0z~M8#EvF2g@`4kMX^w^xamIYnSIna{KL& zpE?ZRU_8vq?dKciKaAY|LzsVdJ^ZZP{&A4MFk+mZJH_xoQkE`51L1lma+M6{H%99I zdSS#%B%S-$+Yq0=p9}p8vP4OTmUDfZbf`XK7^Fiw{=ntsR3+d~(2gsNAa|l3z$F}p zQAGHL<+`n$@2|5TnBF^G%M<>1g!9pVir#d&^7p&>PB&e0w~o`v-8v3aE{GxS&8i+I zP`JW~$>J|VJ;cZFJ2!Jv3eMZ-<@0h|HHyx^PU%!FXujq->hryE*0W-xhU2pOE6l%R zf#P%iBdS3C9HO^_c#sY}A0%|qVI1w%an;|qyu;rkt|j(5SLD}=zszw-+nK)C zJwfw1d_Tw2e3`~4PZ}IlN;>#^-RD2T5b5pra~19~F(of$}i}TiO9iY zW~58=T*)ryLJ#sBLz8wy;fxETKSfeLx==Dy@6vpPOA^GFYuM?y5NyQrIZN}(VN3I) zg8|bkMup|rTJ0PgN6L}4snI8kbjZjm5;tFd_g#@uaJN45ZlM{ zHGZw!doTbuPYxSOjgTBUroat zZ}O3z`(yqp()}Dw0q;rE)#$JEqnwbw?EfxwNp&%E)!r5N^GDT-G`@;_bcIo$6#(RK zI=6e-U$md9n>3z!lUaIQBKelM*b=qhj_xFX_=1( z3{HOvhw(k{!XLPN^k))JIP9OLBY#)=4d;&JgdE{$KYl1Jui^JwxpSfmtlW9CDe?KGJm3g)J>j?-0r}%=9?L`x_F!bmUcHUl&IH zRL>g&4mD3cdS_ZMhosVUwFn4D)4yuv-_cY=dST>`qIU6f5yScD9cg(Dua!3yI!L}6 z9aah)Z~xMfOJw9IjQq5mC?x&J`T3{|e^oDQ_%i~>&zBZPekI~et1$AL|A)MH0ki6? z?#A~44wq;JF$c&o2?q$5j2Sz`#NcRjOiaWv8ge;;sLh;AC=e3mFfn1X^%Ig95$z|r zn9!Jha?T7evD$}vMU4*Ds95b+>aEmzq4kEfwzOKsTlufw`mMG1JNujg6661AzwQU- zJ?q`?WxZ>ycdhkax9-dv*{;cz{*b3)^aol`=;v>ToZrrL=s$~oAo_D~ui_gI4Kv)c zli@JiA?H{XE)_nY^(W7%XaUJfP`k7@W(P~&N_j3u-_rQfgu|-0N)wJmNP%{tAIUjp z#S8Qxe3`{-o6f?8A(kY{zTnlYTAyy4vzX_~ilL-Yfo}^V|6o?;;&WEIsrlTI_ae>O9RJ ztuw!WlF$+FoqnZ>lhJ(j1MtJ`Qs?#mp!~-^J;b|yFHIPseGq=p9!yK~v!4#4I4zIw z|4M>`Nb?aMuW)I?EA=Lb(u9=-PzL?%MZtX)kROl~e(BNNcyH2G z**K>({x@2$`@QfV#4SFK{1wUHIsF&<-urcZi$uzklp}4o?1z$Pr@y>j+vWSz`^z_3yV?H!@_@ptMTrYa(hk!3O1h;{|Ba+iyytbAZtXDh z*B^eZ!XbU0D|pF#*ngr$AwEwLd{zrxl@8`dpuUQvyUjw-39!=%@`tAw-7A!r^5kzt z!p_Be6b|c!oP~5ix)CWC59n7Dxf|(IFse5w2dkwN;Ip<#`M46!ggmv~lt17H4SwYU zjn;mOB*p0cT0iiUCJB6Z3d0{LM(;>rkgyoh4tn~|^T8$dd+0auCsTN~!f#3QLzRnt zS2%2M-cD6t70REoWz;ziwYz74yC~1bD-8uRxxSq+=(SI3lzA8}DBd;9(Cyw}-tPTM zGbS&oFnnds?mdU~f`4d8!fo<`$9n)OT|vjr^A(i%M-iQ_TPI+WJS~32G;dFX<;dTP zr0ZPVuW(4OlgY92IVL5L^Mn09*nS(<)q`*()Ee2Yb$b$NHd$@FeB!T7IY}^7a^d7W(i@2ag}GHR_t){e)WM7K3R= z<3VpH+gE?s+SOotN`+NgZ`|0b`C@C|z&qKF+E5?MyA0Y; zF4EBF+mdpZ!l8UELAlqnTrUsh{1eLA#d6Uuj89!3El0DE?=I$pzoc~8PT22tDeCMj z=>`dj&0goriOqidxT~c+9-9<8lWuDpAJulOD=*Qo-uSqN1LbZFfiFyY3w`UvFU8-+ zd1KX=^peivFKNb*Zr%@f$(u0#LqeDO&<7Qdc<{XpcMUPz!*NAzH}yi?WPdM>!>C&D zh0`w2K*x=mel4S;bv7=Eo6|IY;1c?4$@yn$`sC`<8E#;|?7yD*frSZuo!RPUePap{`{mba>{W*S^ z3b`1auk@CC>y-ZS!<0wxkMlW{gLdyRACRx7RnINZ@{1AWuQXwDG)H1e6Q<~Rwlv|i zXr91h`O&F-iax?qjM!m;AId34KTYLruELyON4|={os0QCl!KG`-G3$|c_PMTXjk0e z_d1g@^+>#{q4?Gx=KMjthw~13Ixe3i>Q(YzY5FQ2Wd9iG>XKL~caY^JU5gY>mQ^%# zJDNycvd9reA+J*|4=UE`npl`LN|*X#0{=%W#5fW@5)}UB9BEf_ zE9C>_`VEfp|26*(BxK-cBgOFO~eeWw)}DSy%bkrUr< zUVlp#KTCd2jx!kb&AXC*o)n|^ry)%2d(G7;Sk;jF7HTlcUZ7s&fBJ>;zQOm9%Z(Br zH`gnFO5-rQBKS|ArD4*c-A(RAX1fx*_c%Tsy-(WR%lX6D?5W$M#LtsI+wxt`^u?%K z^MRjO;cz|`qvvUSY0~*B|0({rYqYM9bji;UZ*R{Jz9{`_%=Sn{^7y&y*uQK2G~$!t zy;S*{bT4Drv7&{~&k;U{^(@7{2H_i?c(6y)gT5C?KdWjhP_NN*prYw~{W;!1`iA9s zdzQ*M{!nize;URg|L)=(h91L*4Wmf?Kz%Z@_sx0;^Y%&@VM;h@o5&hz7m2e)f{dOw%%D>S)I z+kX%7^Ysbw0~H^vTU;%##BM5F8priLwDYUK68tEpYPWVvI_LvLbxz|8z<5xOG)LC) z8SncF;s(o&o0P+|&f$Aszst*6J9>Qdn9w7YvlnQ6HKRvW?p?x+rqio5j{KKoHo2=hH{GHPu)%UoU z2r&2|MazCy`6EvzhmOy!n!c#mQm#vqu73du(to*T3-D*Z^Z9eX2e4*%R0|x3hW-&S z_x{&f{~ucYtPkx_d|T*N({vf#u2VRq+c`ovUxzD3oHrU-LHtK-JYSl^eOA>esDq6O(55H7(QoiZ*4Z?4y9d44C-JU-OH+QP`+_}PJO^))kNLxV+a=!bHyN>Z zg6|tU#4b%fkNHkJ_G|nI`^_xTD@ZrD(yj*cp`VWC!$IXo=BHV^Qa{bwmHKJcF6&3BU+e7}W8ODGXO+kAD*}HY z&vDb}F-S4?e)itK61m^AgmT;HV(5OOwL8Se z<#Fy$U`Jj4=4rjPMn(CR+0icRPg1cwaU&ad3W=3Y_b(hh4n$L1>5ko3)WZl{-?xrG zfahZwe^hdGx%Xwn5B=2pajPASy`j`?i0T_rruSo>ckULfGXC>E0!L4aaZa7PxBmNyeqbWBO9f znCGwT2W7oCWc6NmCg@In#)ITzyodbG84N4fmDh7% zcPPi`5iZC5*3V+2gZNpBZ)qI;fXrUMpLA`oy`kOilzvo{uPI5f(Q0u6P-8sIbiQxa z_cO1Smlb(`n}yZFS5z_}Pw0QxZ!Bu77w5}70wl=k9o~1kQGBoJxF2czlHLQPPvPt_({C2r z6b~_7b=>7r1ItB=I;V3#t(TLR|B>^NTI92PpVl+7ny`jf-&Y#m7enlq#zYV^Z{ZR1TW&L=k^<&f9 z?*CNBQIIqIYKoulXO?jX`OvY0^kKKw{b8gO`^9x^mY#X2$P1S@mqX{zm6Mn&*|d}N z+C@5g|BQE%j>TvXJ{Nr}IMY(t=iuhCg7p4AvF&)~7rvqQM?#N5ZvFg? zoM$q*BAq&?KUwROb#jgGtZ_i?u&4L)IoQ964Bj4Zhqtp@_$%Ty2Uwt;tS>yT0}bxy zb>gP+%hE${;ywC5>Phb#H2$D}gFjUp2aXv3WFGFQB-eT2lbxfR2EPc(T^`5Z_lL@U zBc%iS=djG1iG8A9WBS0`<8;W{{b$F4cifn!>47Jn#M)JiUe#VsL?39nUdkEDRk@LP zyz0DQk-qONJi@T=6B>>fJzPGjYJn(*K$OH7G%R}d~53$@;A7i;!X%Ni@ z@#XP$Rtqy(zO{!^D(?+{@C#4U&33GNAz|3#cKk?Il3$@dst8|_l_n3ypFCfZ(^>*sa!S|DIZFO^Vshj^e1G$f$}%;`N6BCAmAYxKLGzMZXVV8Gy=MBgD)R}n zpY9hko!blNllL2xd%lcqeLr+;<4_GX8g888uoA7so}t;M>U)!{QCU2Uvdbwz54mD`h%o{^g{e+Kfa(B@$cLI%QKP= z+AqYv=M58v5P$0r&)$}F(B2~cj^F$ByAl7b>%PA)>6k@+HW4HIl=nWKIde`po+80Q zxyCrm{YdcT!rvEpJB@TnPCKZ0JOBDO9Z)zN*S%a+zzE4E`i1#slWb9^e0K~F^5a=< z`nfu(qOk8{>EFh3W0T)x+YYTKZca9TYMO>2-WYcR-^y9gYZZN8Dm+!o6@Ok~=o57P z&N)8MN53qa=Q~67P08o^i0(<}`5e#cc&o3$`7-EL`4r8hfpeTGlIoi_#PN0u8tODsIsLYsHR_xkW8z2=wS z&G(S+Uo4dP2dR&-PY)@plS-N(InD5BFg^T}?KTg7ztAA;FF405`+JoCrE&8$LpJ}k zhVqrpKlKq#=bu&(ek$up%F7fUYn{;NbBhrhUYdNm`b%Ts>+`Z`@2bzk-WZ-@*YSwI z$NI{Kr=5G3@A?t1!JT~HZFmmtAlz$s+I{Zn_rdLc?BsH`rx;zLKhR%cxib7;pz&kR zlO9sJNxBa+?4W(}eSz{lJ;R&n*LMMM;WBMIg~NvaW=kx1-|DCRAC+O{t37rzX?E?qdB4?dg#ywCBs$gO6Xa{6}fm z8s9MZn8DvQIMOg~{IkK64gRUYw8wGd*9Ozih#M1B7LqdkkGSzCr!X;MC5@D=uc*x(l#ywl+48EkgDae=|x?fta|uQ&MB25&U@27@;ne3QXj z4Zg+TWd^^&;1venX>gyxZ#Q_2!S6MAslh`A_Za+;!AlIj&)_bDzi99xgC8}xV(=pd z&ocOf22V5ipADXD@OKT44E~0}$7qKdUoqJH@dovte?Kv9Q12z>9+MC1y>wl`zUwUe zYwi73lb1yXZ#KAM@J54Y8NA-$X@pJh6#38e=>6KBcn|fN&wrqQ zOceVT_NPPmuC4SVJaVp7>49{GXNG<(4dg=cJG&&^)VFBzX!ZjPz5I)$KjXcxvo9Ak zfe7XS(a+IOp*gdAt=_5y#Jl!sy(8Bu80GV2=~;1U67_Fe$KYcs=SlhR z4L)k{mkmB>@J|drWbm&HK5X!K%2DHp!A~-Hufb0&$UuqrseKk$oBlUt{Ul82mPa`wYIr;1vcZ z1}`(1=Mv+_QiK20-uD(8_zw@%|&lTzNZl>iB z=ufcJu%`&2PuAr0N%KOXhXHQaYB`)l=}Z@i5A zkK=1NkL>ebehw)e$4A;c(NFZBN)zU2%!vCjbVnkj@00rFo+d4Sp(O7T*v~upJwa!! z#VI3+zfS}8bCB+=8#vJ*`@}WBj8_?!k1*^#q@mw;27l|HWQrg;Z9m^{JE&p2RbI#w zZ(&#}VAe|N8`MDeM`PX*dhJyANAJkd|4jk?!6zM$&tdsK9s?iYza_H_pF1j=AikY` zSLjdrJm^mBFK8N`Qf@`^XY<-@N6S8=3d>JUTT1;p&-7Av58-oIUgCb-E~BT!7JxF6m#(Yl(=bln|+LjiI?0jt;yoXR?4}>%RWlQ%h$jBd}3(N1T~TC zL(Jc_em= zUcPRP^bbmZ$mE6Tv-NTFo%)3-{Ya@Z{|{8q5#)pvOdB3Q$N=1Mq`b}%ed6u_Ze(< zqp`wZ+6`GRNad6AB8p8OxramLOXe9h{aMrT7VTf7q1?Bo@5B4H`s7V~kG_`trmt+i z68!tsSW_*o@m>>HvNk1$1g<5Hq`I2q4Ul)nG=#r@9M%p)-k2Jotu8*qT3hcld=%rmOFK*KA zfS&h>DXOL4Z!hTjNHO}TCOnZ`qkQCF{v*h>^*7T;rNUoo{#t|mu3EUx)+3jxo-U1h z4a;v(kAuIU7s_jhd>5lv>ie+2NfL?gX^Ee$@fT}+Je=ES;_HxY#u={XX8cRoFRx=c zwy(_R)qOu&(qVST_N66mcZR4J$Fw{1rD5_UEp}(8#)t9`y2`wf%4a%`DYIRGrAx>@ z#N4D01(;*a$E9$?=_3U8;+UxoNA(tPA^7_a+Gn^wLc z<@9`ytXIansb}In)Gu+9ddTO2z-JNvD3pim=8m?H6{)htKAQiNjt|Wb zk$preZ@+av%Jm!HzvKNC?RiGfkE(^MxX$SIIn{skhp=97MMsgulfP&$+J$o7+9&(! zG%e#b@y4UrU*utQJbPj%@QADWA;`n{$x@RQEbj4_|)>~_G-KD={{cliEJa~oTmvpmV z#zQY6+-v=I*YgOkwEnx3{%C*YOybdnRdmrCJv^_PEMq_J7dqj|{8i5Thk6{U*yS%> zSKp%9R8K)KBoyTpQD=eYr^9+XB)%x8Gf|WKIn}}i z0L4!s;_c?S7Kz=&xb$G4Us2!tKPPr$PZ#;oSj6xY>`h^my}6Y24{=-(zmN@geH>px zyi&V!sf}CQ?z~8|%R05$U-xHyyffeOx7eLGv0k&!a!(!mYlG!MkGej8cPHp&_O0xG zBJGjbDU&Deo5}oJn}0VuD*HH@Zpg+pc0NSxs>UbhSUY_Gw#`2kBd+5>t|4*aZ?Yc+ zcrFqCxW0;q*ss8!E5DPte`+i?nEkE3hx>uzEsDmNa_;Srbq4K!;e9Xa&()Y;_C+Y3&=ZFx3hiBT-RNcwb@77)f`WL}J(q4>4N->K~m?SYPmn_7(S zgYtbs$(iAJ&*n)2zIwX*<=}mG{+Bj0EZ^nytCG(B@M_`Zrk@unp7CH8=^2#o`96$& z%IUa_a$7TbKj}U?)6bxP?;Owt)^)VMB1t43yUw$IRDbHk0^m|%DFV^Q9MAjrz@(v1TtGWMf?qBAc8q!JOmBacbhG~C_v*^IAeFn>=c9J|EsEDA z+8p&`5TBIUp3HvzSmh!f+@by5+Zpcj)pG3o0_K_UWbNa9{&Ej5^+w|cW|5&51yxR z(rx30p>qiPy5p`h39mH${TS_RG5W0L5Bnipiu8VevPE&lKHj_>WODwAp!|;z?>Snuj32o^TPo0@@$o+T#q;KgUShWpdYj*4`>deXmGA5h>Yt*- z33Pxf2K8Z@_>+k3LbiRHK?NG957V&xu z-(z|R^_;a@1_AOUE2ruEq+>F}ZeLHWFrVpH(Dx^ECB5z6jsI98U@?F0*jBX-t&gT#6e4g|5 zX@&>=*L44q-7_ci&g9$RWZHvnghdG z?62?_fJio9agC;nO&*}%(J%ZyU+0sbU&VWrJlpiTe^;w@-j#Y)?h81I3POH@j^X+n z^k+8jN_{bM3v#iaepg6bOZ$<}zj8oyqW$*HuRZeNl&l|~r{&ffq<^+fGfnxE-7`Xd zLGGo=sxSUsM9#-2z7N^&Df0V?+pDeoiP zJi>7BsD>f^&@a&+aI`2GY&mM~%Tbk2fdoK&@nBb#!%J?%3->{d5rs@isG^03FW>^ z^hQ#qy$J2m#jhEi4)geO&!_SwOo#91)f_i;&;ugV68s^p}54 zIUXX6eWK#Ff?rw>+Fy}Z$PYR2b2haG^OXwdg+jkU4R~K1>V44mgV=dZ*WP!V z0l(3{*Ng1CKJ@ebwI;`x{pq@tjW2zEaLC4&>HHw|WwMfT+P~mTB~0>c4uJjs!Q>T` z8{b#w{XEq(*5BNp9C-RU7Cee>BO`4^4BI!89@k;rKhS`092ME8??C2Kblki{Z zUOuev(f)07pnncB{aq3vPh4ZzPX8RkcuLASRX@5fr++G;{t+<>{pe!U6v>l)%OW3r z5`r&~zhs8t)nAcV!S8y`=Z5?QUT9Ab{VZ*d>?6{!RJelqPLThw$-~bz|6!G%YJmf& zu%7+!Lz-Hg_O~OF5B+7UM4|rozgotpQ|K?PnzCH_x}5cy@*UmZSenw57Rh+b?q4cK ztF*mwbE@{oxG_n?+93B$`ggFxeg^-fc7bM#YR^JxLXTa?Rlbvz@6i19T{ATtSjqGD zNyobtPRb8x*x$XCVK4XL`a1YR8;@aJrv235WE1v z*rWNw_Cp>(pLMz}vsd4vop_4TnabaIH|wph)#zy8LCR%ZW4fgLR^nBY4kb@g=K51? z7p)~_-s2nBe0^u9rqApF+ntozUh$Llz1;JmVTiBFYtXNOe`p8sJE3KG^CjJ}!bwM+ zVR@~F0}rktZ2MWB$bPK%;qM}Tey>f~k1q=Pan^tTnsiyieBpY_A?O98SG)dvn$S7q zi|Uo@Sq|js=a0%g?=5=c8m&+4hbDX?^qK1ULDma>h6{Bvzob>4%}_e**Z!2z;d5Gm ztSf1JJh)H8uzjcp?Rtvr+mwArOqc4zFKI`pH4bQeHtsy6a5nBdsBoy4=A8}wb@F=u z^7}!(!_=>(!uyq;S-qba$9Zh8_v89r>_JPtr%SyukFD=R{}?MZu6KQXx)|-$bfqah zda?@o1@^KS-OloDpW%u0%R8W7OpnOYvO>&0u^>P0)JkoE(=f64m+{lYly2lNl)A+wXc z^e2;6^h?#Am_38uy-xWwTJKg1vu&OGib&r8~VOKz4!hrq+fdP{TB)M8a;PCN_ZvpO1zW$ z>{Ry`6{CO2)w|R5q}OYDbja&9J(||LRnzw`Jd^Tqk&S;}{A9v7U@A{P-6Lt|VbY=L z=OuYgvS#-lm&U(N--rBrwC(tZ=QFeM56@%%S04XN3-m+l_~$c9r_gUwf25|-Vn-=g zkQ*NdeP<=?>n!6}MMKxWkn=8?H<5eojowQvT@TYWST6Lm`|a1X_1oET+}{3o@Y{Pu zZ?^jFw<;Z+KK~Q#(|(bOR{OMqba@>1>0Xh)Q19mb_RXJe=eK`D1yA-pQ65jhZ@NnN zyXO6-tF@f}PICEkp;xP1&ifxPmp8YS%i`mf%bt_SWoKKte5=VN?R9(#a``izH*~pt zH|4QGzeo0MnS8t7Wp;D$pvEWNZddm)-7Xu4TxfpRd?AWFNy+@K%M=uSepG+dnqSm> z{T)Y{fACQ&_Xy#kM+o;GCcNu|gjXKY@YL<~KWcs9xNfe*_k#}j)NT>+@XGu z-vbruC7lhV*b4(s=WA0^yv zdSU3#3HMTOB(qEpEHe3AVtQaHYD2pt&QquJ*X&omo|(-PO>UXL{+8^If!x7U&;0V6 zwcJzb7pN)g7k|Kf|BdD^CbsP#=W9E{{sGQr{p0`rdEGa(?N1!fjXaM2bmd9<)AQQ) zC-x7yFJ9#~)W_}T6ERCE{UNo7*56WlX#Fj3`{cv~$?M zeLRx?9+lWr)hqRyHdE{+R!_MO^~`zf$FP_4WmO3Bihlk364uR5`geEwXL20w_9pB1 z|Dt-QwnZ|@gLx~IlkT(MV&nSqEX5<HF=ob@lD`o{m>iUZb#|vjpGzB!5!gsPXW3{*TP(B&V%c zIN7v=`EToyR8mj6&xhwD!g-GM;@_a%r@GF*F}KeCtQS%)WVt2#{+{hGhzBaHzs-Ig z)C;_q|3Lh!VNIs%@f#&x?sFoad_T_?vrFY+?VtW#!K7m+;XBMub=!W7J6T@RyItR> z`+3H=udo=sNi!sy`bg(JhR-B9zmE0QI^lNrLVRjP&VI}O06%9AfAO$nMLQpnboIuW ziVx%+eyQKr9Lk+c*QEP<;yuJi=KCcU_Sg2QSbV#C;zGGAMmK1_xMA{u!sQ9qi}%`o zhJwg)whw!@rc3jgf7WjQ>Q3y_kalj*$;D^)gO9_M$MOowV`C@76P&Yix%jfkMJ5MC zJpP@_LASO$;9{AK$@wygk-0v2jBl`5vD3(`>zPtLaxzP1@7gtZ-QGpGdtWUFaNr&T4_A&+d7p z{wo!v!SdAo`}NJODnHpiVcLO^Uuco91n6RP92o zu|h*HuT+4wllBd>UxoXRw6OHOkXu`GT8K0^9F-`|JbVyHhop*{qRRMD}1W^>WUHPH)8(|(qe6{(3kf5 z)X&FB2bZVay+80YeZO%$#PM==&dusMU$bZ5(a4>1OUg&tZ=_qIrt0{gXgkFZVf&kV z84m8#Fzo-(SC9{U;w>IrLp{;#Yta*KM^=ccPRDu1my`CtChgqV=ZyYV`T4~8U#1UI z|BH0Y{4e&aIQ`Bs`-%Gjng2z+L7&&23IA)E(mB~={p&V)A&={qcHck7cXJ<$|23Sm zv*UXnpZ|q=f%igLR}B5HW$J$|r5c!LNs|?-ku1)?9cJ#;0xOD#r&vvUPbFq zI(mrD+*t~T{m%LGeqHw^KjJ;4PrA=N^@n~}d^t$G_Gl2n?2p9{_hYWh@uO=De`lqn zt2chC?LT#YnC&k{pHPJ4+zsiM(q)3ihyK85fB3`tUgnRPugQL$-MiMBAA;%rM0#J? zu*uPuE-f$gry+mvcPjD*{p)km&b)mr+5Lneo}>NDhL$V-uhD0+!u3X<>FYHbpZP(p zc!z#aF?xXIHHS^lQ{RX3w$JqZ^S>nefYqy?G%W2&%ESZhxqLpx3CSq@5Z|rw8%M>D zyhTBko9*&5EEn<}((Rzp?Hoym@@C}Z=%!qH;0rp>vGmhap19AWR1iUxCvNnS&pz%M z)@U7P-i-2YCye@P;_y_Xmup1L?y1SsO#M9AYQIrF@_+RoFuw5o^E99Mg_^Hg z=(h0%^-XEq5|(TCF^7JBMUD^@qYL!?@2dSHcWCi)?^@RWf0rSFx4&BGp&y%m56b&{ zOuViC`#lZ*Jz>xdkL(}T(C3r-%Li4Sijg!<9_gP-7hi`eVGu13==k{x{UD6BK#P~18$#-(GN|}($4sO8F5bD zuzMAfcJg~cMA69;$`8i{JRcMfaeSJM3n=F&zvqMaEtlefM>a07@vh(dGuWf_2=zDY z9OV4#GC>{>ZfAR&TQv;R!TuxN`vl?EaluLW+FW`q5Gq9mVJu94{;d)x_>R zn_+1(PH)QmgM9ZJ=}MEIq4RL^9We6S?nesOWBMeW+{dTy(T;zU`HSSVX$r?{yX393 z|9TBZ**DjxwKL`Y9_rJOE_$x`9_B;2uSPo4&zZjAeD}x@)8YKwG}xIs;n2?zGRyrr zOkXP8pb7Hd7e{|aI=qxXzHGyMBDm&{} zXncHstADL@G`@k3NDn^zA0bJtY3mL~&uT#$B~S9;Vd81`M#w#;Dra);L*wJdB7@nl zV*8GooEOyhwZ@tD{)ob%Jm@~ArdFfpy_N_cfEQW-ynb30fB9j`dzeqpO$7PgZ22y= zd_R_a$%BWqJ~X`i6Df281M+|{hs6x(;tNV1@)nRr1$#1E$K^>-p}tTKTLX+ zCOx1xqSkA4U^0LIz7~A;W4srX5Bx%WzMRA7jk)x|FU04sBt7J5x7393%StKF{dkun zKey`Vw2IMtsLxVvW%S!i1$fD#FVcNKvrPXU(9pkY7^nMw7V*99w~lvpX?v2B?E8uL zEYfs-UKR9`DM-;%%ywA5hWs;FA=o^8NLhD3^4uilRgQ zEIsKRrvD1k-z|iZ2l_$CB=^$l~+3028!%WI&Fnt}ZgI?kN zaIradN%>rRPyWh18Vb9f_I>E5a{g59nUvpTc(L8G z9%wMfi!yE_yg@R_lXR>hJji_VeH??CFX?7Gz5Pjt(WSwDoYKwqf%yFz;koPe!iW61 zYd`PZZr{A>y+PuKWdWrR)r80c?Q%Sn+231b3A1(oO-T!Ic`Z%csBq{PoGCFbw?6M* zjJW^B zCtUFuD03Yk%>Tw*e%P1Lo{X3L$)*E}UvlGq4QsZ}Rn^n0D1Vv8SJQE%2(~<-d|)Ns z?;8sD-vfVH&$ImO$AT~Qb=l_Y8wU-x{@yrXu=V%Ge$(4NZ}CCmo92J7!JdD}V9)=M z!PFP-4?{mqoQHW5aYW?_<9|69@#qz37yZh3H~qS}rqQb3{zKDoofZ8Gl<|J)_M{lS zNaZ!npW21xW*08uePcd|v5#jn?+flB`wrdPnp7JV_$ooz;YL-=5Z&t(($rXZCKtz7Or)&t{-r z`oq5qy*v8+ZL8k>oH+8WdiPdsPsXQ{)4wO6f3|+@^!PpHZ*1o$OXJW>(C$`xd`0My zeh0J3{2{+#N5H?Yem1*T6n+Wk!`86eQ;?VY4rlUGDp1ZI5C1+X_=f%EsWOE$!tN>U zJF5B!_&x>kG+)tFc7Gi=qi1!#h5gIz(Q*M1lj#}i-cZj#!o7cGcF*j}9@ZOgSfcc3 zx4&bl=s)c5IGP*hJ@g3ZvQ+hq@6U~!%czI8XmPT>!Z01bv0Fv)1~#l+eYigg^-WGP zI|91=Z>?|1sQP&NSK4Ru6N%LqFV59#`>DtB7el$32K~Q*`BCo?^*3l|GWijMBoFvN zdAd#YrQGMDVRnx09O}{Z9G#8-(sOjD^L=`b?y=hSlID+1pEw`nT(#f`lewrDv| z+&VDjKUqnA?(-^;*FK?FV)xxZ4&E=We{vfA;CO>X$aA9g!$TrRp#Q1vtE=SXed+T! z9uPH_?W=29Kcqc~+pHg=e&D@M#*N|n?qP`+e?sZlx{gRW&-T|%Cg0Na-H7m=+I+bm z@EH61Nxuhusb&cK*)i$oNtu3_ufxFJ9hQ{9`)k*T9&E54@MD&w_w%}8y1$Wh^#KgHBBFU7gt!GejmFixBQ@h*zWoEadzCG ze)93R%)e3Jln+{Z>Nk-yg=-DopH(W5e;7}1*K+w@_Q{YQ={a`OYYqBmGJjz2>5oXc z3ZMG=4fRL6aWMKrI1dZ{jqF0JrQJ=l({^tJRUzx&w1+u*R}0hCF2O!Q?(_Wu z@u2U3-TNs17RwprxFX;0P~SIY-6*qnG~^*aC5h-u;)8T}QofoT)S9$+**eUVNxyU* zW&+`K9p=}R`^UNtvxW6TZ$N+g_bB8$U@8YA3;901PjUQ(8WGn;$;)%1b+5k?eY0B9 zROJ6{7FMGU`6ca*K?MnIUJ7~#PcgbuL(oO>v3V-_o~6dOn>Td-^#js>L;X^b0=yqo z3v)DosNa`BZnkK&?gu+8Kf%BAa{6=ybPFY&+$*4T2mWY5F=9ROu*rwbr=i?^ zqEbeXNS@hVuLvEYI<5Wgr(wSW;xqkx7P5^R7sFpIj=BF&oM?GKM_pGSDCwW3@!7qP z%h*r-oSom_p59;F!}oJFo6L(Xq1?G08}r^rXsyxsqGsP8;(Sm2UE}jRH5dBf4y9|h z-=wuX+N1w4J;wAWxY^@3h+)cN?yN@w3^oY{|I%16`eOUF*a zgY3q92CbZzsNfKdQ))mRlVshEJYk(;BVs;u-V{d|eLXuZf7Xng19`%RU8*y2?Yz-^o+|< z+OLey-mj7y*K0eR9iOXX=Xnc4H%Y!_A;$tEgT;>!1Z7bz_7x937sYp`U z&qsORvzho2E%EC(6?_RoARJ(@+?C-c&@$>rnJJ|30Z+w95I$w(ud-5)W4d0GI zg_Ca5Cq5t2BJ$lqJ>%a2^zZ(OUv7A^zlL<*4!Sp455^JD2jFi-qCoGDUft@Sl*~V= z-EHkK`qWI$oKD^k;yEne=@!qib{PHQIfj3kbo6%8!VfxFNeq@n_rq@g3*m}aLxk7$8pFlkq z4;^DUgGU**`Tq7A$qzjKUAknxJ~?-y`NpcZG!D`$`KFs*?dW4YPm==WNxD}MKHd6z z?=r$q)z{L_r7VA;<-f@Cz1Y&5oW$1?E{!7(r<$Lg(zV9&Kv&QY?fJTNIk)Gr`*G7* zj?ZiQy1V%6#A92JhO=*HdGQSvt|eX<-^2VN{kw#IKJIb7@AvEjkEh6d%gCKp&M?dO z^s5C~uwM54uzvfltDp1n@>WZ`kS^)1XgcqwC_jqGjV z;Cu!OlLvCOTSAw|&S`uB7!S&m=4E!BVlH-OJMm~R9m+w!KzdLJ^&FS;;jZ8OoVMRb zGbSHxz9r;iL-?5S|1(-|#{W+%?DICoXoPsBWZ#zgFkfW-0!qg0BmjT-v#FS$EO*avv}Kf%Ln>%LtpFp7buIf8gWK z-1wm=C++dLzVPv8FE;cOZ5H4v)JKXUSWswq3=*uQ@{gZ=zU%b%G2a`}P0%fHC^>w3n|C(Jgz1-jsYe~c~#xkgmy0O>00 zOxo_w#l)lYjx&)w7$^Kl{r7ke=`+>JU0c^=(fq;>(s2SPxz65?%;S4+Pv>I3=)6NZ zr93_k@$a@&$A63U46+{2=i?H{2_7GZ00xP(d4X?GPR`e4ZT*w80>1hF<&e*Lf9BXF z8J|aja`S%1^pljEPygev+!qty)GtZ(2K$H4YoBQTt}6O>wG>m4|F>CKjbu}=JSX>S zNZ4ZZN18q_S0~aN73qi2vzxyKKZO0NRG?w4nI6sj5cZSQUYfmey^nH!D!RSJ4@u|4 zC~)BWM8P!Udlo->{_M5dp8D_#>XE^%44a!NCk=hg^FY>)v$Y-OFWEW4(9XiX@7H=t<6f>6`Fdv9-mgh}i&3w=->d0mT-t)4(|P~du+xhy-9ZgA zyM4%D(%Hv_(vK|t5qs}^GC!ojct6i5-$P`2|87KsTu;QPescWq>{OP+Xm7W0#hYPUapwamY6(bv(4*mFF{f9_SH?@puMa(&?K z^l=^H<)8R2K>kSS4iJ2MXlq4W0DSX zd{ek@Oxkgg;5AE>Z`UnZJ+K=`a`g@_$@Gr%!^e@n|FtxpSKvO^+1)VI#}dSi{^EBhfel=}o3HYtapp4e&ae4VzF@>wc8Q``9w`3nk` za1oq5-cRA*BOdfQPui94+p~TgzF##v>qkD`@Ode>`y&Hdyq5QmvZR%4Iz;?8QGd8y z^Yz#efAA^PCkF(M%^wDSxVl;VCbmE2qs=!oma!h2&x@PnW0KxONj@gsN0@F5KRUUU z_}3c4nyy;-jD0tEpTebaK0ev2a3~iOg?>I>k$Y`ePr0IDzMW{#Q4#&v$l7ZM3%g2Tw)2 z{k-D};$`#yNr&H0#q+IsI%MnDMyESS|M2^Xdt`rJE8l%QyiDtJdzJCs=x5)9NxIoi zr%RF>KRMrv(OX#FrY;Th_=e-*vqazL^|~nv|ev-?tAa? zU?1sg`Ot1qq!|4>`BN9?L!P9(Leo9g@{?Z9o74wh$#U#_we^7&1}|c{M&G2{#>WE{ zd(U~6`ap-lU6$VD%D=leNc@tPcKjXNG0>y=;|ny1!u2?`H*AGr6 z-AmMy*HE`N2(sAW9h8>d`mLrC}$4FQE zu3EOwi|rgJ|C7D1XgE;*p}~}=f$|Z9NhcXc8Eka%ePG=OE#Bm(e8Au(8p^&kgDH0d z<&PP>Ov8clM-8St43zg7yi`N^eu=?71`iwDXYfvgXBoW1;AsYL*D%z32c_SKei`&n zXy2gUp~s+FpeOc9SEr^uDq_`5D&ZcB1}aDZgRqgz2vD=1P2g zJLB{3T^~WeK@|dzCy9Ox)1e)%SKMDky30N~x;}j0B6*PX_H}Ed>p{AQ4KLCAy3Tqx zVL6wrbVL1JR{!lXRROzyB(UecuMc)p{)wJz^^5j`zPr3!^3B%`Kxe25;PW=6^InzT zjLz#cW7uw|>*-_A_2(bT=n8!Q^Ao>Nqj0G4wqLtniuezgfBiBPe#b|DZ=vt6MEtBv zUh@dzzkAbPeciunhxm!#{MkPs{$F1Ij&J#Qbwa$rSLa(OPiQ~d_d?j$fZi&HM(>b+ zzt@I;vEAc}a;D(@Ffftzk;%rlErgxFA>Us5IQiDoj&FCco{(?XD&Kl?e7jf^hV*g1 zeN*@o#(RI*FY%Bgxmq?_XLNZcs65s26Sd#Pa}^xb@6e90U#^z{Fv^2o@Oj6$&V24a`2LMz z^dYUM*8FFsS9WgVyVTq1xe3l!C+WEfuJb2$ZsJt$>kH}Z^#8Wd*~d58xd5 zQ+(p|T;@{3>A48z1@^q`V z^DRp6Y<}k)WlX$K1VJA6|Jtp$trz)6zdzOU4Ab6_jYA-3&wMuZniP|rXRv)eC%h+E z(j)&{z9o9B!Er*iUvE34Yq#Du{DO?mGXB!?kj@{_e?)W?p6iw{TrEgd}70&y_rQm~wY7)2HiMEH3(p*ju28c`VGZc6O1TomV|WGsI2i@0`#1 zuKfNy&+q&6B&4Z+cig% zN9le&YuCChj7Ps|x1TQQ<@`w~H%H)~tUSna-A@=KUP&)Q$j_IgKgUCa<6TTI=Lj_) z`~^J4=yS^dygn{QpV#;k?N9oXK#sEgNvsd@lwbeb*e1R2dqDd9y58Mk`WKQ;&gXXN zZwk}PS^}OHJ$4HFj#r5MKNdZ3f;qRDQ;TYY2BAAw1Ma*yQ2ldThSp1$;;AF~-M(hqb);;X@jR z@(8?L-^e_h)(?LN5{C9%_EpFs@r`#8Pq}xGNs|YGs9 z0({T!A9%_OGJ4BCJ1rOK`~m$>y6>3kXl#9kfvP+{Pk6HZ#=c&k>M_d?{RPyZk6>9l zpYv~uZ|7C76G-fi;c?jyj=ny>SBUUsvV>)%J} zJe`hLiyo6y;9s@yP1a-QpnZNR)nkH8Mbe#g-?5}ad9C}6_flTO6#$=poBhTIMK7k~ zCO_Z*5~)-k$YB@!p8E*<`Nnvc)i2S&FWYa-bFlHAgTxOn$(Z|%DYvzI$?$An zFuwzt?F*)!^#0P{{R7JTJ)GB0I(}p@Czz6spBPNTEcan4oY=lX`Tm2-Q92J=X1@yi zDb96<<9PI2_~jL87wGeB8Qrax-+*tk(C^#x`ApbvU>M^PNqLeEosnAF*R>Z@LfH&coP#@K$^F zR!xt1`InA6ijm!Wf=L5;pf~b%Yya~z{gT-^>ZNw+-_<7liR|15!}KpC9y= zBS@e0c99O@?b-XldlS}oAOlJA(ao*N^E z4^|j%TBM<`WBGbiJzf7Yd*%D4-4FMD(|*s=!`ro-+V0IPZ_ic@L;1Z5_%?|r#;*^X z{4SIz;C<~CBA4m8%pSw*kcPf5)z?LC6LSNLvr6`hJypYV4ZcG4tDM)=e5)pYTID6) z!*-S?PK5aAyw3hPqO|P6eD2xu8+Zs>={^YG9~lplPq2%4iqS&J0er2!HZDo;NgUci zykBhn-S^QhG&^bhb^m$Q#JRddY1PC_^kiIy|08UglRDeKNY7 zFESzaXX0B55E0O0RZhNO37ju2^$o~NdBXbMAm7a`MjzL9x!h;L+H3c{Wwr&fA{@d zNtyc4&++!OH{Gy(ZDouPt0bLRFdVY@IkUOWBm!Fp_yuVWWale&& zFq$LzN)zuW%(ZgAmgDcDQRPmvcAbQdn~aV_l35;X-wNB^PChOWKdKll)ABRD`l!y! zhkEtxIXS@y#@n5j4>{L$6!pu!4TiUk`;8yIUnkWsX0Ow8*QtH_q|U2??%$m~TK;DT z^^YZ2S^e$llHsw0?KiuIcD?Zzpi8I5mnME${Y2pLsT}{$4Dj&&oza7IXwd`K|5N|o z>_JM8Hu~>pKLkEcH+;SrL7%ni`5Og8iMMmFPo%!z_8HXwB&{EG_*PCHV0T;|eE-00 zlCDeQ2g={qd|`Y=;@6cq@7=%cu)fdq$8m#Q?tgCZEDe(~&$A{E{y^dIJ~_yHs5hRs>siNyYI8sxJuLYm+gMZ2G<+<&F;8= z6Z+eb3%6g7rQiKhzKdRrcz+<;`{voB_0VJSD@M%01;766mngT9^7WRSyuq$IUtK;O zf7df^pC3zm{^Q?K?>#yGe?|%I^}zqf1OBhb@&98v{x@>`pDpGjPVy+ZjpnO+F>&4!<$+?S{2UN(EQypQ>pw?>{%ii6`yso~TJb}qh|mv7_0t`E zANu*9R6FzhwEm4b`NBG@%a_YP;G-~=TGH(_r>V!wBAKI{M+gq)0d&&{}t6Qye|)W>p3|(hU2fX==fOlO8373 z?@1}%ug>9p?kKvuUD%;=li7un__sDrSZn%!UHOkSf4y;yh6CkyX&8P#Ae6gye7@}G zz?=KSK&NVs&$CD2?f%HA;d!cfp#QlZyimS(S&Y8*8>u$%@4)r;_PHmgpC<eiG%6{Lad+egfrxK+E_38rr$7IXZsxxtX45mwtUN{oe-Zlae|*>}Wn4(*nllQo!rv+O@JnD#3vTe}}1 ze(k}CY?HMJulq%1$slDoa#RLeK~ph{^CsjP>w&CPs{cbB^q6?W3~Wt zKzvGt^ArZ1bYIeR1*Ltj*B|+P3vs=o@wopHbU;3F{lwqisdD7|eBw6WwMIK3Psj&; zu9x3+yU)8+I9toDHCRsE9MbZVj;%W2h?{mVcgJ>v?cVN=9SYZ)k7)Y-juqN(`#W}8 zJj4EurN2N0a*jyjlkOhELo6@pT|#&l>q}O4DcrvZ-lWiR38*9CQq1Z~xB}Fbw!nFk z$N8Sllg^|(6!j}5q!Z@RvU^atKQ0)^=Ub6 z>U$yT8#X*;`o2s2B-bnL>V0kwzb~U-o4*p`_iRZg>n)03fBCO9lyw+`zhE%&A1MDN z;X%@EUH1XP!F`(0Yrd}OdcMEo-6nsgAEl4si63TvFHN~d;oF3^(zIx~gm5V47ha?D zKmL7U*&nO*`MUVjdo`QfCqn!~J@5r-uk6d$^svWWk`(R6lXSB^GOwaA`rQ=yRg4DJ zkMZ>or%OCYJkMGzMqVDb7hZ0?@p-LB?lsYHe&KR0Iq6`&)l$B=8l%34en9Y>YWm^D zf*(+e2Z`@l^NuPW9|18C|1jf=y6PeAkn~byG%!bB3;)=jRKJ)#z`arxNrL^VNFO)I zH>AV(r&Rck)+6h^ig(-~{xQ|@DEZz9`MHPnLEk(Lc45y(%@_8AHAr`TmhLHPKh|hE z&>2sEnR+3#liCmJd=EZV=7{atsKF1-(Ni4-c2?@#i18`#USzJq;<^!L@0ry~Dv zv#@I4XAkLlNa*hR!q>@w=R4)Qo$U--*4yMy`Ls_uNU4+(`7tLJU~x8axR5+VQ5SJJus<jmwoJ4N!4iI)e^eBK>weWyKn3df`0AakMeuNO0v4=@+nzGZ|*lb zAD}$jJp*x*^pSi1_Un>(d-v0jbBg_}b z(S5mma*bCiSCEr)q)3$Wq@|<#`BJ32o%Mz3CQ3T&ug zV%G6<{s30_Fumye15mEKl5)78xl|y(O0sJL@x|za+P-@I_qCpC;Wq|%8T>1SGrrPs zD~+Qbcm0;-=l-d>-B({N{KU#*zG{K{6mcK=`BH=5gDv-YYdZJ)<=mXXlt=kKg29TG z@RRyH)T4QRYJb0o>A{a@318igrS$9W(Q;Eh?_ql2*Ot$9z7P5Le_t&9WZM!gC-l#C zU;Y-R5A~MI$LA!U>mSG?S{lkDfdMeL#2X#e=g;^926s4FUslL6Y0IKoZcG|6j82JbG*KR@==e8 zSG;kN;$Jg*S7m&R$Lsg=K7O7i={ThAbiVp~r#tKw^hZd)qf#&G|Cs1;xwl^P&;A8}xgxKM(YKWzZojkk$>NGU zS^F+A*xLswlJ`&5K||>HX$OezfyJ&!V;yM_i-Gz{@v20R0Oi+&s8 z`#`Q;uoz)_oR35L9*GCN@7DGiU1w~SShSb&p3K;6F!|6wW23?Y_urynau@Zw%U?0# zzM1@Z75>v$_OLH!4<8W24!opU$K#mw*Xz_fZhzdagnAEtkMj@tAC#_nBL8P&uHMgv z_1gJ~R4?0jGV>3l0(mn32UHd~^$!$GyWQ%ak?%#_B^}t6 z8H{&6pxswUNnyJWTD$*H+kHso`&9hXdxCcRIfjfsHz>Xte_m}c+m|f3mT<}+U?udj z@BR(UxJluf*`L<-(ysaU&s-n%&tQC9->CVM8B{dM{j?Lc`c{o^EpLm)xB9n?hyH!L zqzLJzc720p)Bf>%r62vm%)k9x(;xd8`aR_f{zNlo^x@1$Ccl5g_bGj#{ZH)QUSA;J zu}&BIx5GJkJuBdw&s)}SkxcT`2Q(17FM=YE?;9FxUSPoH??Hc2Vd;FS?5j`nqvT}e zQq{xZeJV&V=fadOe(zmk^QJzJ+CuN97OQ?O`m3yiXt{om68i6@LbGsujC4L9=j#g4 zw-qT0=`WY`vM-f*lsB5aIBM{EgQ*YX+?v6M?R}rZVgH&Z<$8T#Jlvo}}IaIT)6E%o$|QP1pA^&HC8L;ID^)28RFMiXIZYErMY@~TI!LdhPHD3tx@%C%+<3# zSI?SUJ$)_p%p0SgzZq4}j$A$4bMW;^*r*q{`GnDc7EPi zc42?cF07O}Oq4I`^0==Lc=grBCBYj1M86^CUjR6Ltc4UZ!{+Q9f43{pff_ z(s@76{2SQ}hWNDpXBq7EKZ9^uKWdZu%wGVVE(P4sXdUmkA9kVEf5__pd#nE-whWXw0_h!y8eE^LH+x!{qwc{16KbRtp5GE`nfMDtDlZT=D)1d`ZD^X zw$b(90XV3Cuhn1D`q}TQ<34WnTfa-&|1K>rtN&huz5Q<^oVFjejjn$Xa8UnFtG}rA zvtL)o{i)S&{W`7x<62%;{|5~A`u~)0T0d$VUH_*62la2a`u{`L8RI7PTXo#KtbWsP zY5jk%hH_d|J+>t&obEC{|v%u{iu!eIG4(=knYQ*Ue^oIKOfTiS6Kb8vig_h z>VK8hCmG~P7F=Vn*Z)$&Y5k}*7(dv0mG9^9b)Ke~$ltGj&*IT^*qEb3a@DJLh;} zT?_>B`HYZ1Yk*f^k9|J<&4NGh*!KV|HlM`S$;13;S7PUuKsQ)SUuO){p9T^|EPaP zWG)cTY_nsfaZgdWbLK=1W4j-~zaLdM`CBdc$*?o}oQ11o9(qK|2i{MAQieCiqrx6t z58EDmCjof;-7_-2feu)C@_F5sb_^;y5xy^gc3?k+WTF2gOJ&|3muvZsqteUwi+DR+ zzEl6|*(}%heE_ffrTpd6eC;>9ZXw;T5x-s%$dhb3N`BnN{Aefe@_m8W*Yg5-wOamz zl2`V~A9+gZO+7W5{d&TFZ*fUh8M`DZ*>p_H_4RjrFB4BOdaj0k9}nn-r!=1X$wqD^ zJk{D$zk%m5N5r?pvszf9{IYOWu@niq$46H2J1>H}`Mp`zaq$$R zZz^5em{dr5xq6gvz&bP@BX_Im&0FM=|30W4}kp?ej=K(}NEWT?##7 z{o2+8{W~0PS1_&wg^>P`)ZyuqnncKx)F{u%-3kgF>G$||3)B5xv-mz9u+a8FWB(;r z;29;Zm|T-~P@*e%fM%s|ik_oChIm8MWWoG;!^`4@89{T z>a8RK8(qV6|0p&L=^;1Cwqb?bT%=*DXI3c~rF!Ok!mFiS zVLx9r&g{n3d=EK!EPCeV^nRP~Yy13uN0g)bMLrgfl2=PN(t0iQdtO|>+`NVTY%IOv z`$}8%iQ(<<-R_L3PwupKiY%%=N%K=rxP1n_g;i12czD?|syK&s zOt1R>N59whWb`dfkq#n{?_=}!hx)rBJoR;BA7_@vJSLdt1vDEPmSc!c`zB5`f%Ztr$eU;6$LpU=+Zm;DWN$K(6l^L%jmc6pC&z4ZU1?ri|{ zs;YD0^G@=TAGH%g4#Y@0?<5%}#J(m$equBd0vJfYN&v?oH6@cFcM>R)aggNXXqkWl zVznTGk+%2FOacjCdv9rbZ=q_xp{8%x0n7JdwYB7k4mk2vDM~#p0(C<-t(TB zAz;V$b3wXHSLa|g z-&DKkJ_GYN4zdJy_2hE&P?q=ocJ6pr?Va{c*{<4s$?FiG)?TzPD&gF3jVE1ZqWt^s zD3^ahl)u#9zpLHr?{w8p+Iwv{XI#A3^)IdWj*)V?=R~<@+ZX3|)$X?UsnXhMwH~YY zf*M6@ChGfa$qvFkew2HW-TO&v-)HaOrM1O4*)%idC2jSb7v)`;G_77|)@YxlwT9g^ zor(JGZL8luQT!oKd+aehtJE1lcdS zThk`5Ux@Ms?Zz>_YgbEWsGJjR<^0zu=ik}LgL}a>T6Qy0pRH}}^?55Nt-an(NT;<6 z%)*|Da$aa_r>CNv^J`pzOlxngEuEqI>}ccL&qO&J>`T{aZ8J3N%+gA_w=k)#rmzpX+MNtUfzy%V(&Zq^*7S zMLGR7PRV9M&Yo_=e{|7SYZ~e_qgr z_wQ#6@BM9j_^TPiJJN>tPi747i8ekwJ7ajCZNvNd8N<7^tsj4S#_(=z!~1hHhWCXw zyhmmX?~XQk_|%NyUC^f2KQ?1{lQz6}%^2R1HvaCPF}!=*@ZK_Gc(=Coi=8uucVin~ zu2jy9o_xAZ&i=0%!}~-V-b-c-6wJ1;f zIb1JZ7I^O1SgKd7=a<(NpNe(IHCS%S9y@O9xOx{PJNmST$DeXP)_d_CFbn7F^&Z>g z?_?0K-gVSHbnSuh~7jrXP4e>zM1ROG{%fUhUJ@E7a&-dssK_x%>GC%2iuYu>AE{chL1vur&xop-*y zvqO9n+=K~7d|XegCqHa4gtv7fluszaf2{I72ue&HU-YFTJgS_e^VZltGvT*X@N;Xm zo?Jz6z^(T6RN&{zve8q)@AO<5@N0ej5%j+%;6G5o&-J>xo%SUDH`@MlI`2YT_agqU zRp7avT2FBJ2`-&?fvtC@^VZqRIfS>(YD@Kf5^9Eea*@UCsvU@R;t$3;@#9G2I@@o> zIGpkta05LEC!4p&>;S=r|tZA`dj+)kEW#8Hw>8nXoX+SU#=23KX*=925;*?D75rRB(!CzicYh__>&H_Fp8wVgep04$I?TVilHRa% z-$!HmMN{IZG&%mXk}iFLZi{`pXQThUXg{kJT>yE3rF`|ZFZ z(XZn>qdE^z=;JXX!}O-~&)FCPbPgqr{lH%AajXHT&IwW9u^;K(U51&?_im`y`{sV1 zAZSjA3HMcwS$)%vE`uwdqd-xn=YI}7mONzr z!Kcu#+!rPv^KQ$YXy34Iy%YHRJzn4So(%8r)oXvRmCn+!U)yoQ=%Zd*i7@LUx_-*& z`cq4vKEIzUVg5!X|0gWJ-v`#Yr-uMTx2v`&_FdM+zRPv7@A6RWyBsn-$^Pv6V=l0{ zy#0sM+xs(nN!90`pXC0e2l9>&*`XYtqZ zyx7@J&vllb1)hszf8jp+juP|x_TPZNjvX-PBxt1SHx{(Am>mHy)LbV-qa8Q_?Ix%oBg=k?@%X7~P4 zY5#X1zMj0P$j|=Fa=l(3W;V+=ztZlUN#R^iDai2W4(@6 z^vy0y|F$Vyre9y_Kh&f394KO|AU7Ae--J!S)q^TyX(mpi}e3gN%!&O&x-VIC3#@`Tw(eDs7OD%lKy5(|L;Zm ziAwvdv-ICD(toj%?&HDl7U_RmX}=Yg|2K>DU#p}$|36=(|7}Gs-Jbo`BK^~qbnlP9 zQl$SurT_O>`M+GG4^-0kTDpISp7OJ?(tggrpD*%1HD!PQk41Voj~Qga~*K|c#P{w6&-Z#2O{15S39{E5`#$v-)(9|sq>#jtIXear}Yc1 zLv&qY(e@p1{^Gg@A)EaUuXPTc7whxsS%K#s77BL*l{XyeX{%p9M;(v1ANXVS>-SJ| zlb56O)}l-2DAf+MzdPOv`geG~f)AV=!A;ADYCDS$G|u3P@AOir;huD_)y|g$A7V%; z_@H*CF#}h$GnF6QWInX!Uk=gE2i2SUgO2t6!71vSPjP>T=i$odq?ljxsn)qEpO2mD zJ`U-QC&-Bgczpg+o+pi3c|PAO$8-KvK2Q8K!mP*JU&HmICu}^)j{5v5i*+N-p9lx= zlj&*R9~7^1&gFBClpnP}^WBAf>vRU;FYmM)Pi3 zEoe`~-|~g;{~7-NnP(q6E$>FbaE$-MMUTFdzu&w6zH{?#mwv|Aw_LlG@h@&aey8Sp z<#PVA>)1x7UsAv9qvdkmf5CtGGsb`R@BV-3Xe{ugH@Txq0KT>pL z$9x@x>x&%En_RSo%Z_^po%iUhl5+U1$`a zOzaMP6b85|z4?+6(*xyo_xn+g;(lAaPs{ZepGSE+@SYs;XK{aTd)P!8cdPiApAO$o zG++0XHtZo5#vd|2o@d4V0bgGe{W`}z^j-r?2GM+^q|=)*nP_-Cc}O}ZVtnX@&v4f@ zo<7{`KHqg<$y|>@bx)?g`~f=rJvqs9s{3bA&eA=@hG4_P{pO#lT%C*V+UDg>e8~OI zUG5LR&wZWmZm#%m)*kqts`*RzIN*kdf5ZSKU!6N6|HAhC{JtmamCl}>G~nx9pUzDV zH?++CwBu`rf5RRZ49SzuvGbnIq0;^>B&#hEw+?XG<#;nc$kduCg--mcYYMko4Gw=YI zbnCmt@oZ@_)Z^*H*M`5T9bV;APg%V~zv_MA?5OL9a=)_k-%)P)KEzevpWYcyu|7xF zdhh00v*O7e<+nd6}?+4@V|%!E%8_5 zv(|q_x8A`h)BT5_TksPcq~N4qG_Is`-M-H*@%GX?oY^MV+kG9Lp3dPa3-X%<13%=I zbUlM|Qd{L^KXrpTzuoi8e$_mrZmX)0<4X)(qW-1#ABpc*4IdOPoW2K;a)5%Hs{AJn z9s&%f?_N~PpSZ`8kv`jgh@OcNkIzqQ(aRX!%hvZD3O=Ik=%hErZ_R@krrQRe6!pz= z#%GHizvvi@@ua!g>DGIgx;NPa1wu+mC;hU%cpCjxc9ZTysobgdJw69}QSG4p9_?E& zo=*FyeX<9^Zc?0$yFHT54ZS&L;I_YG_RBdQmOQ)^9lr=Vq-V3Sm`9xM`So6b?4II2 z35FZek>B*9>6J1co~3?167+ycyk}FsPc(+G_Pbj5h+H3PA1HsXw~OZMYG>)yz78lJ zILhV2uWI!}>1D;H}9<@NtRlb3XcB(dy?EhXxik40k~}u-)Fa6(7*db&z$Ay)9D$GKZNA?W4^0F>;rK$0-l}bVSVWZ zxA)>*P1|;I_7886U@hc_%|5OTY5Xni|+|(T-W&gQu_J?Oc&xqGXbCL zeTKhWir06E^v;v)D8l8~NGR|lOjm?YcRnf~?NXLQ7`2n{v&&oc5#5?^Zi9(K=0w?tqJT<-Qwqk5=e@ zs6=0nOjyGATxE`;-sgG#JnYvwkUw%j>rh(fDDDR% zoh>sE>wJa0b~}F2rGAz*9RCFR0Ug%?7?agJzOUx?F6}6geKKF*@f=S%W6SCd{7sUj z7hsVRE{k&dPyd$HQS=g4w65?ei10zU5Z58O>? z#C!JA_dX%0aOy`)CqE|jr<%)AzSixod`*Mdl6-;3r?b62O@9^)c6G@6c?MRjpU@s{ zs|Uh#vJ3j~9t54_x$|lZT7CFS{g-r-pZXq3Hso-eXA?u~d#3-}_?n#_{Xp+xulITs z?_#fay_M-*Y@K&n=6Ll!u-=o@I%qZ?`mWpCS>HF4yvc6NyB*IYc{(@n-*m6h**DMW zEZRx>G~NNBU5+NkJ$;|gtKGGZCw-M2j`|#Ny!rW2|HIE)Ir;fPZ+yQkKc843&w;<- z|AQ8tic4zHHAI61}MsQ=xVH2k`HW8-G8sn zQwqB@#-(im-&cFi2-5jh&1F;rIK8yW})Jkj38>fO_SspL;dcBoDf8>z! zx6)pd+H=0QpVl=c|Mi5YQy~ABINn~=hkU87b7f!ME)2af6c{ zmKxT@Uh-GO|);$8deKJ4}K^hx|$H z*Bk9{o99!#I9{=2*+0z{L5Hj9B=2?jKJ0HZrJj7%`H6gV>W|s-hJ}Z^T|TtWn8p5= z))lmVn8kNTwlDJfl>6-ojANoNzuwy=Uw^-q%ldr*594HqJ)Ax2>DlNBOV5tHV7|U% zGV!pHm2{rt{!o96Kd}!w)bAA$eM9|Tf9=z-AF*Z}-`J8YKSTfRv(;SJ7fjcA?4sXk z97cX(%on&ENrLWLHGL zD&EbF?;U7fqIG|*7wh{6vKw^nfb)_cT?Jtbr*~Q=y}LT@0F)c9gKIsObYG7CKs+b@ z0f_}3w+o~<%k9c_BI&83-^TZ?)IX#b@+J2g--v%lX&fl_BTICRkgk3wfOI{GCZ2#? z(`Dm6-W2QRW3Io{UkLvde*b)jffx3T)@S*xKRUfzrhPx=;}YIb*g5%4t`B#0oCe*5 z^UocxF^$FbuJaL2JGzMm{%c$hApX?|*X?vF;b&PtOgrYq{{37#2V777(fF{dW3HbE z#rizN#q1{DyptWyd=z!=`%{BypOWR%bkjNe6nJR4Gv48N`pWUW1HONy)5GsZSnI1C zKiTh#c@^4)^(v=tL^$JSHC(4plk9vBK#O@vdKwCbQ+r9jk)NMMJEX~%O_;>j0*;k~ z%fqfO)*-^ZTB4KkSZO9jBuXCjy@Nv@Xnje1`kxz9wnFFWM>c73VMxQOc42$t%q7slCeH z532HCsB&tG2(KM@iCkG0`jp6I5)lDB%juSA{~zeKyr-YDd` z#Fw%@|IYdHcS!^FnhCy~wDk+=m47*3zD|5EF;1{6eaCz`h|F-5FShGsdHKz$`Qp=w zLN9NB8S?U{Gt8I&dJ4W!pMCfE^4N^=<(H@83p+cU`qL}vM?Y8QON?)wI}DxaVcMgU z_yQqnAFtgYEc8pi=P&e2=#$Cgb*J%V(s&)`6tdnJua{}O{$Eq-%L!M{#d(CX{(KPi z(|Ly0`2D9wV9|dMI=z$hTEBk}xT@F6_CXCqw*Svx91ptTi|>>4UBAPhs-4ogZ%-NkJvG0k z9tWo47oO5D@#{-I9{daB_?@SaW7^r0W7)68c=>N8&!2n6^8BVL<@qz#kf&~+moLvh z(AGXK&)>;7y}aBmVGow)Ez~1wmr3)jSeMT9ozwA{{~U>VU+iaOYpwc8zNle8ve*~N z;`^!jBG(^T?5pOBHacA3%@?8bz_swK@o>DK(TcZdz`3C2vA)&9i;f7FukZoyAqW4C z^zyebUoOV+DfBY!=z?z*`K{`c?2s!+wjU{PF8m9ltg2UDu71i6ZS?$~bUyXqHP)%- z1Lc0ta;c}j$a+P6zSHtI97KiTrqj!1z4!+&QQlN~bP#(1Q|VDE;+7uu_OH@E{D6^V z^Vu@I=BN6eX*&1F$p-2Fq{DZ9*!`hTxv%!y-XG`D$2^|vvvw|Sr-wPtu^jUCmrC`l zCx18f_*0rcvj(b9_PEbir9Zm*9sh3SI~~tc2ob5oM!vI z;nkk6L?2yM;`byzhnyF36!uh=p6^_Kmb)GZy)hH=b3v&-^~B|;swXv$vtM!irT%yW z^Ht`J_2k{(X8(CP`OhimFYMrvm!1Fn%lV6SqhFaiKj>yZCf#Q-UjhHSQ^IQ;Dvt{b zn6IGo!GE5rp2}C&Kl9r1on?Bq%Ae|dq^uun$Y0DyLhrWdxb_v}`#=BqQuzG+kcocQ z4)ep9x07#_Vft8ZJ~?r&r|X=Q&UxeqMvR{P!2a+@!#@=M{o&th{_ueqAC5Ubj(f+5 zU;PN>(~+Oc;IsS_7ecpAnaFPmQ#KF7Jnq4z^L$;u+}=C~DgB^+rF|39_3LOK zoqx=Z`ns<0b%q{K=gl#?IUfPKV*S62=QH3_XV?q+4v_K>2Y)%v)yQ8g=F-ojo}w>% z+}ksqceXLKTo3YfD*AUB{lS-GFFIeB>@Yu__bMw#^4yE|WxHy9QT0(hdZXP{4*4^R zdS&tb8QDP-hm5@9UP=|O@|Wf4lf?6=(B#Xj+p zr!8IQVv75y@x4Qxb7}x%6jM3#%HG|KXZ-2YhOG+b6td=GJ5kfH%8d&o1b|_groh>1i^3` zhg;u4bUD%a!~FJ@2AAL7aQ|*M%yb@1`?j*5*q-9YI`BjDC)HPBo~Ld60Cz0h<=6ZB zhB{}FUmx!rKI-YrPrS;n^GW&osIR_nw(1FQU!4cM#_{B&7eha6yEmnZe zX=J@#9?vhaA83A|b3Mg=ee6>cjxC{k{km^VJD>aU+6UD=tSt1N&I9OtQ(uR-kG@yK zc{E!{=N|J-PFMDE2qK)$O{Q@^rJiiIc%2*Mez4_V|D4OiCWbI(054mR^wH1ty|^XE zojT9_OEF%7jt1h{es6{RvR_l*Qm?E* zd|&c^EZ1TA_%!)kG%5h6?=R^*PpjUeh9_TcV4$m=Z#>rwG7JXZ9nJHF z?*}ZyWIp;hIf;MqZf3sfUc*&S{)f{a_r-NSCyRIdvLh~c*`qFx>KA=x#X;|bj76Xz z8~1)bu+ZtR8}|_(_iuEbjrrdX5S>#{K7B7LkMo5*ry<9B>%3sGPaEZBV<9io=JWBl zcweu`7yO%|tG&e!SbbiUpY=Yuvwo~VKI^Sn0F2jZi`{lPb$_kgHj-}_a@ z;awezOpm1TUZK6~5{yCD03Tf+?7}$?p1j!AafX#gcrX9=ueNtzKrJZhYexE4qSQh6WR@nK1a=FA`PcE=L#6vn*E>D1$;dnz( z-;ql@R@gi}?KsEm?lK(fh4;Sj*YS`J)|V_S!%;5l_NrfdJVmSj40I^CbQZM)^d|Z` zT|H@-U&h1stJ~{u?Rdxvje}))@(bTbnuG_UfOt~Xj)#=!JWUx7`BS%6ZHH$&>zqy* zo*F~nZ!5=Bp6d9It{hK()#DswIi8~1*Wo2g?`z!5;@(Pm+`Rh#z>e5~OmLEC+4aZ7zqEFqpLfDJ9B~tem~6mMePm7F}^?N?YyFZ_wW@3y!DUTD(fGF z_xAt(0^Y-$ES%PQy`R)p`uQrrd;7bc!uojlEdk#X?_Hi#dGFE{2Io!euhhflLn)9C z)GueFUHYP*OqJe;f~1F1e9egYQ{3~S9I+l{xglL1J{PG>U*hln)?OFyUY>3DNCwU! z_0CuNb$d0i6&|?I)78(2*ZXseU#>5FzQAR?0T=oFGQi7Gg+L?^IXq)KO?;N zqu>|9tS8>rE8xA~P+B~{>osEWu=f*E>0$N{^)vGIJ-6+cm0KeFHP zJ`((W#M?2~x!`e!r@p!l?N-*W`x!nG>CmUPZrJZ(^6z4-YYoNwHnM9{*ePIJX}tAu zs&&rX8B)Asg0JUM0y#dr-Qwev)&tUbZ$S?P#$4#|+ zigqvLX0w4M#d|R7my#c3x(py?xp`GZZmQ{*mE|V%L^1z;(CeRl!rRr>87UWA8%9XJ zG1_~L!{rUhmDAaht7R6^lDkzAj(iKeqqXGL=QY-USl)t725({fI+Yx19h>mv^zf39 zt9U<2>nt-NS6}<)_oiI^*4ImN^&RM=r;-1q^wBrLc#sE?^^KQDgIBIw|RS0 zUlUIs)>rsGfaamPr${?tam7x!xu+#Rde2w;414@Jt3=;HpcP_t?hX4O=+wREJx&Jh z3(Nzgk8kt~J#wFqXOtJ#L*p9W!6%rHhwyI>zDOVE>(}{sb^S{7C+;@zq!(&_8Gg!s zE$icVmHN$8=^sZ$)NgXp#md3utM+<%^LLo9^Pk!m%MYxvc(n`n{H$FLdj4%uzkTQw zjNcZ%)-QX}4n{W{4^HpeWQR8z-eK52blKr&osL)s&Enite!lZp=dtqhPY-y%7omIY z!yoLn_~Atj^SQpu7|IFhQa!~V(oO5XnKG4f=i`I+nP|V0MY7wtUX3WoZIj?h^CGuX zSxy;F>-Chs=PUEAF-&>Y4&`!|6W{O(!^?5{1mw9H?_pNUXL*Fz{!HDzqCoz++!S*C zfT2ry|+Vl#_J*+?GiQJ1f5)a5T5cX_NQzu@w){yOtz-?09qJb%RF z)xWZF?=LsIxrO#zX$2JRbAOcgsKbe0`FzKtc^KuT0np}qWBmf{Zta147Ic~)+B#T8 zzkC5rsP@fQ1zz#p#Vi>LyDD20cFvZcaQ-fGxz4xT8(~xwF5fcZ;nN*K9(D(%ZyEkI z_d49fb%b`kLVeim_Vt!RFS;1mz4@wMJYJ$({FGkG#&=u!X~)e@$79}3<@UV-RX5^JwME#;=HSe#HEvzz4J^x4~!4)o+RKSH5=gWIgfk zaTIdhZN(>hLZ24xI_B~0zufmJ@8g=if19Th@9V+1q4*wB8tpwa_WMSU^zE>VRWckK z8ucXhi?nYM`!U7-N8D@IxggzN&Bnal_5NRe)>b2=*!Ml_$_RV==Vx6K;r&KWewJT6 z)xLMNAL_oY_W5OTQ4|V~#)UTFrCGKdAP$a~5`P0qwH}@#NnXFu8{Z z*BPCZBf8vK@0pg^p`V{WZJG^@!dL(mwH?ZpW+d4Rr4HFxP`fhvfMd z^~2B)YPVbB+@{u#e(v1B7jVzOXAR5ydI*mETodSo~(8nI_&R`sNUMQW|$-noa5!GJvEDpF!Ix>e8P!7 z$`jk+V`chLE|$R!A50PRF^w+x&iv$*%9CHlPrQns&ewYndap!sP55iUw?3;q+M^$Z zMS1VAc(w-R+3TtPp>v|l|KLid`}|_6^Z`qc_KyBl&VL=2!c-r<>p{Fk`^0DZ{6r zzjKen<)8F&t9+53(|U{M>Czv?`?D^--;?i%UPQiyecz{dOJ%p;?)_2Yx!OTfWQ6efXK+Cs$Oa;^&)7 z@zc#4^se8(c`ye!)-q)6vTHD8pxYCg&Q&zJO`=AC8zNIgpZ_+kl; z?YkXQ6}2<{%R`UDGkty>d?=^SE~RU{)&9Ed&y0g1T-I+M-XZpWEIl>|BY<{5I@|d# zy{GT3@zco6k;VE+r)RfMUQ<(Y-T4L3F)@MwT;FVeZz-L1x`k7$Khx{=h1K&)4_{RyE^Xgx+-$zHPw6dGu!ZBk+fpeB1cA z5B#HKZHE6e;|~NrgxM52SDEdkR8juDJ+?1&K`GmvZ$LY?;0(ODUs2rK2U3ub+0Qm%cRv-8y2oE3lXP1kGOapLAkcrK1L z)49sY<@nl<&abl^7Rhp6D3w!AxAKr~^uix8IH#BNkj|H)XExHa_)eMl-*pg?a2tkP z#?%h#@8$axzmN5fR({W?`+H@38q>Iw<$QmFr&x7fCcoR;tB~8bIUc=(R@}=v%j2{C zAunwV6CVmF>vSfL_frI>Ro_Z;Exp+a15^t9`Z3ha>(j_q%j1W~YbQ@BR|? zQ9jLov&S57x%|(v-n%_teov*|QNOZ1&-u3Saz3)D+3NSa_dtnd`HwbA^O_@$pL(D3 zm?A&qV_i8v@DUsJ$;yg8`2*+^>OXj``O-_^-7pQ_D=K)81EJ_4-B)7IQti(0FLbx` z7{d=#!VfQRrE4BUI8?K3+&x~(C;a92(aE74Fmy`SxSFLNpVoaptzYkVZu0m+@UelP z+8=xdd}LVrj<;M$t&K4LkuLV*27dQ+tg-&A?}rXXKCOFlJ(6(yB0imU4nmHHzU*<% z^Vsejhv+m9@0rz;oCJ=EL67ge$o-*x5#H;*?0CJyu63ziBy(QYyx<$A@ANKXv!`Ym z>n07rf2rf?nYGl*JHx}B%iSM(qwCQ=*cX&9**)5KUHVSpbGV_{m+PBFZPvn{<DF zFQIpby4_BoJ|l({=dkG@rWP*jN{dB3di?&s-h1qs^^YgfC5raezKe}-;3fOlNJU;} zoz;>ztv8k74^`m54bw>S*pj0=jG}F58|rDD-eAk5@ zzYQMoFUG%rEYTRRJ)l1JI&dPkJ^mDtit*LZs`z0lin=>^`t>8zbLFzS0*nzs*K z>3AnT=)U45zeAUK{L)YOy8;_NK`ufbP%rj7-3u$mkI>`UQP<0QpMiRg6w!`mKFTS> zW}kz%dS6@f8tJdj;P3DQQ7+p7PUWSu7FoEbzR~;11Edc8j(ii*A3KA8Lng%-k01B- zsax+4_>JyQc(W!K!6)Y?fvV)4K^?U+tR=Kj8eYCx2~l zln>&|jyYVj!xdn8f8Y24{?LG-e6Ys4zahI@^(ySK;71ephUg|Xn%AO#vxj@Zf1@8h zo#d8lUs!fM*-ZWYk93Z&vkmVS{T|FaK6&v zbWTzC<0)q>N%Y7b&^zy%CrRE~cw8QIpHJh9@Tgq&?Cz7aaUWq2d}tu)ned;)?-~rFTh@p^XPogFX~Qwj$X<}0P){yLD&!@;XF+?O{r%Qn zi!Rn_k4F54hn(>&k52c5Dc9tX)+zLT5a}7|dDCB@xplAdErv(!E4U#yH;Q|un*UGS zXyC~mE8HKx!u=a=cVB$%ywJn!e&k4%j^>+u98FJiyN^ahB`nJG~tCXY2P-ZYRp4o7C?S$M$dGcfe}DpQ;}puk_<@fVHw4rqUB9 zD)29q;e*byp4eT1|C=)WI;&1$muY`o@~D2mc76^1k**)dhEzKDS1qH?#T4_w0ms)F z{cq?^9`2jxGkoEfo>aeHdY$(SeMeSu)AbI|Z+yc~y${84+SX(Cdb-&q_}P>6n&fF7 zC;cLNCPJSb1+2B#eZK$LIpTO^C#XD*`_wmD=U{(j%j$g+txGQ5_#Q)`{gKYMIX=$2 z2tO3xb)H0j^uKM;Yn0<=d>_0zUljdan;dWFrS5B8Q12IMpRK3%Hz#TTi_VEXkuUWB zaOV9`>x}i}FKztSIlQ!^X8VrymHON#`knAw0^r7cFzbLGt(y> zOjm!XC+9LL$wp(}kBSJc^DOuC!Qe}bZ?f;$?!=?-Ymi=sNiXNs&3AwhhLI0lBx&yS z^J?NlT0g_mRd3R5^4qZJMDjZk^3L1GG~xIjvbCe=4Ze0R@_fT9f?hTPTp^$F{*S)< zDLSfr?yJXpNSi#r+DG39&mMI?4fL?Klqa_fb$>X!$n)u41FjJ~`+L9ldyIwmd$JQ4 z|28{UW=lv*GoV9hG8!1vQF&^^kwa z5t~qYk^PTO`mpP23l?(QkMA1LbzbA~nimf=JYI4-5&fO-&(pq@olgD660|-wywJx> z;|t*Ok2~IUHUe-%F;CfyObvun^z&Wtd*;oxbqtOVlyI$w>Hb$|l+W!VN~+HPYFr^d ze+^yzA*9i<-Rbmw6UsCBnBu;(#b01vVRsH~^|0(%;$^*Pe*&%d`%<_MD+I4+JpR$K zKd|RgPxHK*)+Z*S9MO@^cD<(cLgn{)ZmrOFvU@bHiGIm}=CztHY!kiEUv&4_U$zq% z12?qIBQ)M|otPb>XU>Z!sUIe+M58iFK55KivIEdfW47NVAxkUdth=j{_x<4`IKHL8Vl;?5+eusY7 zyIay1isPBqvjc2j`}OVx5OB(>Q=yZ0J=GPt}eJ~UZQvONwYzlh7Gt$!&@2+eEy$tu_A2m|v zM*M+OyQa0dMvuOuM*QTf_L+5lSN9dF{7Y*;ZsU^lcYfem7MFB|-03?vlAH9jMXY|Y z->dh(H15idPV3%qjAsUY?`AXV*C^#rJKk#JioVB3xuL0(9rbp-Wv72wLcRr5W#CseiLPV%`?@>+AS`m>qh;;fLO9 zL9uGnj}rXI?A~*(l>7E zOjbuZ`+#Qw;%_M7aZV$?7w?Tkd}q{)@D0Et2?t@nUhhPhuFO|7CG@82Z^|8>+p+g7 zji1gw+zT@d$+Uc2VQ7ncj??lnOL*G&81bD^uYG`T06*K&`1lU^aAm%I1$?94I34lYw-BGD5A=?u^b_sJvVPQf zt8tuMH@Rux2b}Oo4@gh+#Xd*ZI~{IfxBK_75#cny6TR5~_5L67)_G^Zt#n`gr}Z7v zg$92QqLeZQdVI)@Yeom^0WlRcq(&iR(7tUg*_Pwi`AfJ0@_B+2j@l#bRsMej=vpTX8e zyT|(yg3D()qw~8g8TNyZgS7L=GU|ImG?#n+p*XK}6op-h_@Q{m?I_^hj__5>@ds{b zp?6rpT@`v@=nNPxaD=BQ7VxKoL5xQ|s7kL|Ye75D^9B6W`Zwt!Jr8+uVKrYokw!E<6I@V!}+(Ld(i$$b|m;=xnYmj?wCc+43JrH>bHW=&-Zw( zA8i9#(wEk{EMYP~RqlT`zPI@2f-by+;Ggm1ZH~X#uL1qW50AEb89#!by{vy}oEV^R z!Lk0v52QC327cp5@KN{5rsYSSxyg_1&X3SjFNYrx8Q5`KUCwGBF+An@>kjAvw;!19^%5QBQux@N2vwUB)MX5wdxnGg9+5+ULJhl24t- z84UP*)kedocMZrdwp_lx;q5k$rK5br(-rMC?qQA#tXF6BcfrZt=ekI_e8Q97r7LzA z0^P@FJUL&r@B6PXu%cblxvoF+M^-w0A?Mk+>T!kFXUQ7#HJ(skt^}&qcg`DDqy7g1 zKkCWyS0D6n|NGqUjP}<&abPgu(N733e(eJt4Tez<%3Eu_^97?LJG##4d9UZs#+<$^ z?z^X_Iis_~PJd_2dwOQQ%H;4=d>_~uc*nin3Ovy+P1(8EIGpDD_2hF#m+j5=Pb50i(`SL6rSadPS9%M5_&#y7-M&iZ2;rrQ;r@_k!S|w) zU%eY|uHJ)0xPiUyQ*VLPMETM=p0TIn&pUbyj_lWfl zaQPgm=?5SgMHcFXkFVnR?Rq*7InBmhzm)ZG1Mq3>5&T7YW9FIuHhd%=j`bLYPrP3y z-q8Qql}trC$UPT+^8wEwdl2uiy4+rBU?%UB2g0-FdY_{Cz~u%|);s^LtapO`LF8iz zx%#L2AL8*p;zRE+U}5hM(kq3$je9!f%jE4P^iP=%!h;3SKQ|kKnbSYspQT5$$6X&y z);rGNe>J`Hxa&!+yJn9%KU@07^^)Gh>zVZd>n8=D$AZsj599M%4-fRapGn`01)hZ- zU)4AHDlfO5{D$irs5{C*=O-Lq`lhT$c0+Fz`bX>ARk`T){F1AQx`#Vc_Y3`VC0A9T zpON0Ce;y7v(8Kujw5tC3Vb?!YLU6L{*`G|W%-QDYyFH!ryn|o)*$;UL<4M@{{a3$e z^_HDK@F5TPbX;Y6WFIe;*<@yq}Y$Iv=KYKy=PbdPx1Kp8SsMi%S2@#-cx< zm{H)P9{L6pj`W-G=Fy&0>A8)_m)8Ch{=PgtcXe6MZM1AHJy)aj0T1{}S5HR%nf_FE z6^EE+$7UuM^IFT!k)U;&=%{bw5Q7D zQogw}zjVb14N&dCc(!0M|3xE`j%n?wEv_f>`QDH7o4mhjKBDsp`6JSE(Vi%T{h)P@ z>+8nR{sto9M91n83nu-yxv%pi+D|{FT?M|HT@~+E)UgXiOxdW@D}HB>JAEzv=5*>j zS1ng`OMqU;Xb4Js07D_jx$&_yx-^JE|Wa^JI?BfYUxL_0^4ETK68s z54_LedOH5^8d)GX9)tbTCy)NImx}W5_Fo15uq$QHweUGy+A%r_-`Bo*@n27mk6Z)2 z_c%lgpU3xfJY4HWQc?c3AP}ywFY$ZURf`jy6B&4)r<1>PkfwausQ0^;o%v9$f%Jj* z#d!w6GpFp#?5MY&_B~`LQ6AV4b$+X!JZb!XDSPv>b6fg2??-01sr2v&;?vq6G5KZL zo1?@ld(*|UrH9v=x`6&&KCS-!K9tiuZ?l!B^*P&jx_~2-JN3Ik)SLb6l=E8Y;k!J4 z(XYlmO#Zdml>t|t*CwU%3BL|mm#+9ZLoi7XGZ9Yv%>7qJ7~*67abC=8w}gHT{VTmI zJ&+yo_SSkpzRL9v*IQQtW%B~@^D_I<--vQRAIn)CdSKH0Hu!N$Jq#qKhl_Qk(8EVU z4{!4Fq<^)}qw`BWv(|n``nNbw5%c5e^ldig{K$?tAM44lyZ%ML0Sk(C|3d$cI=$?J z?|&uwH$+eSF>HTiKe_bETMgkP{d*|NnHTLH^W1#?bo%$z75zKk_0@>u>4jH&>qBqo zf2Z@mwJzx4v}3lxYhAGaoqoPpaOHO^=Y8r6vlKs2cetL8ZzbI*qm@7O`VKF@od4VF z(#}?X>HDu$@`t{kBLC{Yylr_azx4gz+4&lk!}~dZ_x0cV11}wNe7pZD=-cLDy`$B_ z=kaOBUrfR`_fKbkZhCy|Es&SJ4$;Er@jV^S*%wO6^!@P@zx!E-o{GL04<_OB__X8y zorLeTcf9ej>G8eVk|xvV@jV@%OSm&trf=3cU4K12z6I0Z^Z2ym(}u6$_qB^0evM<~ z`pMhT2RcVQJW%&^j&Ic8%9lMB<6D9RxSo#B*vsz&^~Dw@d?$Vtm8>@+C12Fks=W&W~7s zI_3DLbt)QpaGYn9%O}5o6!A+}xPH?NLvItF?La#BVccsljCxGD zZd;vS9CLa#zGd-FZfkzgXM5NMzsG=__)*x@>nGlBnjaVAT)e}n`}?v3w7*cS+X6R8 zFWzA;<`*m!u2?^bc>?)KmoEwVU}m^M_=cxSZ?~sQ{&Y`w>E&;*1g%f7ekKPa4$twF zMfG!Y4^HD@I?wl^@-5!4MUUG5NoIu8yra3CSU5kw$oy)*(7Eyc%Pcur9qUQ`10I(B zt$ntE7&mwx!{h*!W(l&tRS&-NM|rO&U$%av{+bz%G8Pk)nmv43XeQ;tt(c*_C{H!lr3?;;f2 zDd;@I!_@y9@ss+A{7z?K5eshOeDe~X?;r(NUFzX-JXk$tJPDf$p5xF|@#N@K@r_da z(V&n0u@4SO=4-uaANWQ(Z@8Q(IG=c}`-FcZ97FUqZrpICr|$z^*5ih#U+0@VUi&Ca zXMGgU{r65dLeak=;2B?oV?6!U5x)=onNT{6_Y#K1Am zT07?JpEiHszT@U=KGD2~xu8$L+y;EzE?dJ9ghbc z#Frg*Ic;9Vkkd!J&jeoOZ(bC1#yZ(P1RH=GSatTd;3Wg=7n`3iSzvydjs^x`;VJXu z5a=kweXaucQ@~5UJxI8nECurRareo#O+U2Yhqx|3vh|G17|X7vaaQ~qSn_nx^_2Od z8|SaBKNF7bmi||nU#912w4=%;9S@z|PVa7h4?O5%`U92p1*nho={?{lo!U$MEtmHc z;LG^#0lw^U*Du*)POtI{?iuib?Y|4>f2mhg&Q$m()ra(t8Chn>OsY>Mo%K<g zaI&9zI^JF5&_em5OFMjk#W`>9p9ei&<3sr!tHq$3=d$n7={x%peqjf&E-?_-wTn@p8lU}SR zK0nD0yPhVW)*=0w@bmQ^Uu{=>Pt5vxIestX6ZbAA#Zw;h^@+>NVJqI+``zGwzW$iU zKNj)e-)A8=gCLWX<~QxO^tzpv^maiq#(8h2D!uq_g2CT%9s~=H`qApGcj^vWH2AmG ze(;<)T}!_G{&+U#dV%&WTMX~3TD|JlGSu%*x8~jbVQ*-@UBrjH$R65$IkBNU`|IVR zI=DX?_iw>>x_a^#lk8XOnS#!_XBwQ|r;vPWA8ZclO%AD?>`~``|2r&O(u;mJA3rt! z83?;W^A4ShodbHvZ=HLcbRO(3$Cu7#??u0x`)9VlJFv;)wSFr)Wd~&AKCZUp()m(P ze%9+5;S}>b=78529GgN#~_e9_Y1td$aPsq_tZ-L)$v9z02VTTnx+xXANkF zHQ;j|X!U-l!%aFD80Dur|JL!8*E{Ss@}D)*eS~~V)EDt4k8w_v^0^Xl)XUc_cAxT4 z*5@1_igvF>{W&PZox0t+f;LOF3pc z<*S~X6n~?|-#ZBp`JJyiF$w0{^qNmpM>W>Tl}F(@So#2vnuS8 z68*G~@>M$w9`l25IluNt*yRfAL=kp*!1rz}jQg#|h@$^>xS^ikw9eA8A8fwX=d#05 z&JpK3$9+0|uXEro4_~#{ed<^09qJqN0Z)0>zHRdww>!2W&f=GQKG{E#2kdJ){f{|a zX?zd7r-L_GfiLn&UzEQC`5oCo8=a2+H@UC(ru6+&uDdpnqF zzGjTh-ArdsZNK|}*!ZAzuX^I=r3*Xgh_|cmi|Cw1)eg!YeZu&q{#8%@%=(|~j(WTg zL-|LyI4O9&L#Opz!kfJmKF&87-dk4uzEx1=R^e`0@w=8TyOQr<7(XBnbZLjL{|a9{ z`7MK=*6z5=^SyGrBkl=Cx%K3W&R^8W#;Kk70jG6Kyr1FY^SGBs{Z!NM*Hd6T^+yM+4G>iyoRcg&+;e@?1*n_V&|>U~?(8_h`!ots*G zLYFA+D}2}Nk|@8hOXmCsulF@UNAyFzLonGcF)@R_`a2(w##{889pYk&^9)hnMV?;R zVbj((?2yohz3|Qc+~xVdV>>L$FX~&nx4pio4r`?QBv@B8{jYUgoTF>g0~%+0YQOW( z9AC5Xy_TM@8ZlpZRA0RVD0@+KaK1``NwKfT@--%-n#JZEn)NBhzdp7i1TiItx{ z>UN{%`vW%;3hV1+9vth5|BL-G+E2_sF@isE&9#nr8{iq=7xQF&f3g3a29_K}Jxp%k zHxQ-un*Pn6uJdZs%1z)em>P zd#@!wTPyP8Va}J^<>$Qbvi$fw;NM<;e4aA%@^h2*@9#DFp?$_4KfV0yugH&wC(F;@ z`2717>z{=t=k?^x6mN`s-amB?rKN{xMT5={nU~C*9Qk{ddSADm{EfBasr8W0gJ)ij zy1!@Ti26$B8Kj4_Z>{#s4!fPS^uf!lA+xcDk1MgRL3vz*l;Lh~=j>?gul0Ml8RM|_ zFF0f9;io)4 z*2{Dsn*EUV+aBjSv<_-`InJIJ^S2ncHGW{;Z}w|FdEWTNd02T}{{xr4n`GL(Y>Nlo z^gnRWYSp#*%nyn8G8(n`LSm3*`m$?wrhzVEB#`;kh% ziz44sm3(t5`5vm|;|(;H_iQB}p9GdXaDCVMcYH5k8{paT(pj&wev=*EYx23_P2S;T zH`Q&Hf$=ErHKnx_x!6y?Y5i8~#oG6twx6DF*;@O1D?kvO^Z@k=?abyTA9r+*3H{Q_ z$;SQsi}sstUWg6`m!IbbrSxa@UA63}>%V43>k$2M%7l zx9phb>%8006#HP>N7TAa+TrilOYbS3_BZP{w9NCT9WS2bOha_59K8oW6nb2Gd$`;A zRV`0;OY^8yY6;rPB$dfH(N7xvRG3)*@U>sg%Rius$?Tl*^c zy0l*jIxU~hZ;@`ee9IxDkNu44eY3_bp7Xzr_@(nd>OX8pwpZHW>kC?k-WU0=I?u8f z>(h)UAE%0^q)!!3j!YE~)=nAUD8(NQ`q(aQ=dxA+2yS1Dzc)lb*ZZ)9Z#$RO>+m;F z*}?4tUe@D=s9$HKOU{|j`Y4{~huh9&1w7;1&SgbB*B>}u=Sy~2d*t)?n$PtOijK({ zd^)xh<(#g0m;X@vkd6l8pNV|Md4;HV^DgE_euSBG;J!Hbv&F~%fmNS#c^xw_+iyD# zg{<--||{@|@CQ zPcN6l^E1UhMDXF3on9}U!z{ydU-6#kC*r60qjNf}&p)39{@*v^^}o~09Rz;zUvmBo z>c#M`S$M}8?w%muN9P{$IkGccpl=lH`pY@=p z=d12FddcrmL=oT3%c!?TEj~Zsb}#iF*JsP;jNVPWI~`B6pVCJ?>-nTtdpa&LyNvj0 zw+lWSb2zPkzLXubxS}6BO8Sxcrn7_ARPz1Pyp~>NzUl0sD=PWER>?P=9kip8?@N_@ z)7e4$EBXGaluz%3={`Q!nH%7v)_t#{(tw-TXr7<){;fyeJP<>QsRx9+0#5c(U(N3+ z&4JwU9k?Da6~z|kuF0PxrTf{szuvEX{#pLulg>%Af1iu|&4rW_j=zY2Q~FfzLtTie z6XgxX<2~m%CmwVTqI_bgCtt8}lX{3Qt=Uo4LVpQP`xiRT&VJVbeE;Rl1-*Zb`+C>v z;uzQT?$PQ9>z(r|pR&g_|8qUpd@Trso50+Mu4mqh9V_8e&gs&5KEcfoJmLMzquajg z$o+NA--`2ayB)AN4~PCsme8&;AAeY~&cjV)4&S^IjD*{UcDM8gJ-($EoQ|~q{g%)( z@3`*+f8627FY>#5e#`cOmwP(fmv{%l-q1Z2ohz=}sZiwqq{A2I;_mdY-X+()ZtBqn zlC;iRj59(1_H$So_!)A%4Zj&vtfTH^s>^EwzQ(QXJ7YgMty z`Sr&P?v^uLP`g6j^xmED$o?LLm>9nBn`e>=(6P(Bq_4ig!<#*<@mAxk-tDd@SGhco zIvmW##Li8DPCCtdwLV<5BMXNkytO0fpzHsH`@P^3qdMQ=;ejjNXMaJG+0703l6%#2 z()mG+FFHS1dyBWW6Xfoc)rH zPwQQ%_Ijbdet^Dk<`^}fVMLvuoe}4T|i|4rehHj3}uK)5S z)1P}IzX?B%Id66>?8`Ak!m&NJ&N z!+fdX_wl*R4~}2#uWU)e-Dmi{{-lqt3~%S!mLy(IseX1&ZOOfp;_ckolHhX*-o}F^ z4TEp3i#p%(CC=|`EY6iJS?O?N&L6z%6=APWzGR(+IS!r+dYe5aX2rS%#y8u?!#!Ao zliaJHDxyBqU&08e!;qMkfTG)e!pUd~alll7q9ZXjx3`2BD5biEI%b52#b z!Y*)eD8}n;QQywH9FO*QCfRWTxBbGf+YzS2c{se8kUNd-Y%K@JCU&$rbztqV|Pf$W^R|YCV~HrVOY4vb`S( za17H)-t`?B?I%u>E5I`6_A~t+gvQBwa>C_my~`Emo5q*T0A>yIB_T&)7i%3R8(V2` zEqU@V*E!nd>Bqjy@^r7!@oz6rbHB^-H26I&PaGGi5vG?XAJ<#*t%b`=JyT;yML?UPiOmry3W&U-AL<$>?c(1 z($B>_KkSoUjQd<6)O#NT(GR#CM|~}Og>+al zJkIO7f7tUiS9%9q$`4So+@0nX_olm?jfEZ0a@kJ07svU^7rR>Z!#NtmfAd91fXnak z0W=@9AncnTvLAgNQQkq1XM3>!W`~`=4Qn~#qMQdkUGr3Z&u<&x$)~io-T<<($am2B zs_%KLzNGU5r?=K|Cms&Gce^ipPVXTvUFQ>Hoty6bpu>sIIgk&QKXlni>(}Y*Z`%H` z-uEbfPlIrhYwcfk#W+v-VfuzUyR87?XPEfO5AmDy|7Y;I9_NoqFGZ$L-)}@FhKFOF zm+5N|PUp@se5#l5tNo@b=P1fie(n3Q{}3PT`ae;ee)0!;PQhZZx2|o^}UD)Yh6?J zpX8nUj`vsgH~tiBzbudKApOJkSP9gGub%V%!xFscC)}|L-rq05k)J=j8A>G_@xN{w z{7;qgsXZ~i8NVf0r1#>CZ}1i7nIKV1!H`+()pFnWy$Er%bkzutj}7w z*w+dD$MqhHK$9I1j$t~kKQX3nc6+|A4_UIoS-$tZbUxw(`Ix6T-OYDAZFK0Je7-Bf z_8?YxUCm2>-O~3g3w^)B%j3M_1OP&PS)`R8^J&>57|#rs-ZP<{(*Sf+a8Yl4kCFZ5 z1>!|pv>d+K8Ry9br+p8qJ#w^2-|D&ao(t)Gk@#97K!94a?tBuQ-uoOx`E0Qw9bvk1 zJrP`>bxV#{9G{B%TyEgSJ2RYj;{R5CV_Yxq!y|d4>ce$aTw^?`bN`B0dE_4(q)1nJ zvH#QcVI;!gyomGtY7bLST#TPb;RF52P8Y)@f8;5HyLr{q4u6irYrVfK+JW=1HAs3U zeA=^gxAfm_^1*W>#Lsx*zoXxhi}l$XVw}m>AGdOvuXV&KH$UKXXdU2&_j-PPw{gRe z?D!-pds?a1#gvHxioOIMPqKqxwHyIzOj#YuN8M{$(o+JdqyKcVDab z5@aW^e>9L~U-%loR>yctx-1>$2mSn!d%c~85P)0#fcyE9LmnUF>0rR`3%~PRhtqe} zBo`bf8%R<)qI-3Z!|wxoExi`y>wUJ{J$-eoPvHFsD?h())alsm>DBwGddFiQm`S|) zuI}n{9A4vFm9EZ}k$#E$tJk`pFWKwmx%p%F^czSLKlL6{Ss#_ti~5G%Eb0p!gv;l< zysV}ofz$bR$*1J8lfumO3Vqs7skU<`I|E!c?sBsy_J6ZSoqu}2HNW1&q2ER<6aFZ_ z>Zk8LW{++)e(YK9{af!AO3!EGE^YxDJjtc<{H|_c``UAf|m|s8F@;A?iB7~!T!S5I9ezojro&T5? zANYx^mVXp);fPPd!*hhxoID@Ea+~KnfeaJAp7_AZ@^1uO^Zcdw3we$2zPwbqP$^dK z^Wfc7<&w25$MK@@2ot`ZxPVQIpOwP*wW@gQi3^12WBu5qq<`e$Lk0gBkN>TF=j&Mr zd6;mRms(hOrlkY)v;B#WzUb+hRcrfxG0SI9Y@Q$EE%Nafz8{FcEq)RYV+fz%Tv555 zkj1}0i+UIFj07C!uO=^;N1LyCXTCn-54#-7Ud%>aK5t;-!(~TyM1Jo#+DFxQ8#RxV zov;mbGK0pc?AS)j-*u;&pR8JEaC6Yc7GCe=-9TZ0lb#Y^*q;ce zaZKZ-^t#^XyD0eiB+4;9pXUc$^c~iv3;jTm@>lin#159;vNxm`E)M;yabtDZTV2tQ zvt!4sKKh;sWr|sby;+lq8;KQo(Vh+X`@+XLKFfFO3Z{aO!6$v65A!ffZ=N?`>D$lp z@3?B6Uwl#hgfENth&4~1pnSk(N1iZz8!oZsmSkd`$Mak#`K9kU_To3gy^srw_hlBm z^AhqB^|`~lC%)Sm_4n>m=xypnU+;`hggka#?)jdCM@Wq` z8!G2%^7+n3+)uLlWk+4kyI2vpi4pUX)qCC7cR2K&PTVuMd|d&ja`O4Xui4%}&1;q$ z0_;N>yv_^e*LyonG#tO?e_dNWoX>Z@sXqDjPM^-nY5%l0M9UC}N|-_R>Q;}Qp)>?h@Gj`+H^_|~~ic%5$DLuS7sCcIZ2~Uth<2EWV!HW}mt`+HG(e53;dFC45C&Snru+<7+Iw%f%je7I--9Yt47EvBjQ0 z=;(^{e7&~=&SA8rYyC?5aoOP=o*wx)zL2%zd%k49rE9*@$~V%MPj;HdyL#g9%;z`V z?{E(}UnJ+*W8U5yhAy%M@eS|YI(#vo^040P+z{W*tDAO1|GdQU<~KRM&i*JT#@pr@ z5KK6Y&*HD5F!G68(rdHar;nw%#3)|AfIw z-%79Pd)CdZKVbN~q;EG`Fxjy6T>BwAf#bj;#ANXIeEbQ?R0bz#Y==I@*r9=azQ6QI;{uLB9vcd`=*_e2Hm z%n}^ebvM4Hg@<~MDv|j{D*3*RIN@i$>F!xRS6PSoMkU{L_X}RA4!WPYyOQsBD*4`8x>vyV zK92p&*0Y7oe5_|{{kz)}v<^-Cn?0*Y z_i1f$K8p0&dNz1ca)J8@pgm#h+Pc>n@!GfIdLvNcJs=Par+C>#&mj6@#2;r!r2(NxGW08M_`_=iL-hJIyi*gn?oaQAezY}!OttNBe^j%}EN9vvj>)AlM z@M*rjXRVJLYR8b95rCQ#+>;1B&Gy~l@tT+FJ*+)zIr~HTF@M|!pM2K5<&LnYbicYf zpQfE|>sa?4H~G%zKWl#T{J39)bgsK(kGcG7Jwxv{YJWy@Bz|*!gzZcGT%T&jyIzdv zsP!b;-PW;YQ%XJT>sW8_F!J4qU#)W8;D4#|*d#2E<4CzY1g$*cYtCMVzaIAWtA>TA z#S45~zY;#(?_oV?!m&JCuPW6KNmib%SIwqX=3!s2az!z1c_0=dQ>;_ry4U3=f%6st@xMZsHU0;V5q`|JIH81E+gclq(7b-?*UT zyo@c^uJ5p4AL-9Ty5@(bwhdEY?BP zF9rc`_`}b}e4J}u!rIU-_HZ`l^1Thw938cfmW_EiH-sLiUZvXxLP$V9|Af;)zQWCc zPx@%rlmEiMbiqxQuJbyitAT&&7ZcZ6a#G!Ax!Bb~YaN7@fs@^zjrw@6eakt3C&=&! zpT>@q?>}=uKdYerE_w<&*zRx_h5R&6^9t9Kzp{F=U8l1z7u?&jJE`w~9vx45 zKcC-t!SEuEe92cWurS^ecK?yYe9^TJ!r*E`1Ok|UH63Wu7J^#jeGrNznFXj zzMklOj>`?c`)&ENvF9uw?sL3kx!IV@-L1uZ*vqeuE4TVGbE0`!e!bUA>))h@92DQP zqb_G$@1UGejxI)eHtKX`k9oOzH&FH3uy!S&Fm3}M`PUnKls?Z6hn!wuV96aj-Omn( zd_+CwAm17!4KMd}(WUoUv%@aWOJm-vdD$Jq4xi5Q^VsSK*|@huYhL8@cHB1#K0Dv? zEzW=OC!gQW|5LSVcEstTJavPV7{0Ks@uHO@d*J_L?@i$2zN&li-$)vb6iDSPlLQx| zpJFR^mcg>K*=`-nah${{K~Ca{5(<^Zqp_^S+T|!ePkEBF5=zS!AWi!zaTW}H?Q^<3 zOjnt121-9K?JI4%P)xT6eJvPxg_M%$eZOb<&2L5{Z$SI!|G0^K=WgeobMCq4p1c0U zPOgzdK27R!7(K1|nXkJh-*6%y<$SUE&?jo__Z>Cm_SM+^D}XQSJ62Qf)|zs6*OVij zEca4Pxf>JZeBagAgXb>QcAC3T{pfa+``fe~Hu}1n$%C&C<(8u&IB(aom5=#+hst%d z!`kf@>vwtWw|-u&-R$y>ThvZS+EeTY<$cy(ACT&TxyQ^m{pafz>}SkBhxVpx->$yD z*WvqsJ`P0(74F9FTO{MgwW_b2K3|X87~gNx^_GphSqt^W4dVNF(l#JL8L#=v&pj=> zhNq+uw(+2Crv#;cvp;j5K#1cP>l`cFVCVA+ns3qbc?QSVDcDa5XXDOly-oP6hxRx7 z@yYl}x&O_j!1rOTe=)wLrB|W-0hcCrwpkS$<$>Y&e4o{uZPxoB&X-t^6t$kk@GL*6 zdTK$t-=yV;?}vb|)93edYw?dhqHw(3DKF&50=q-W2##8MbU@{H5OgvBQS;w=8@Ubp zX}7MkZ``d5alTHqaW@n%oZBtQb8njy@2uY23j*NE9LVXLoTr2Li=LXKGJxZ}k2R(P zefKQih=Bl(_%Egh?yAvOzmL4G*TzFBF7MZ{y>iXk52EebZ)uN^f?V?(C1bQ*`*CFb zm-8LrhxxuPIeg;`nc--dgBT;RQ+6q?<@KH zd$stMyNrI*t6q=4&&T`_gPR+Qj=V0PEcdrHaV~0GZW0yx%`>|haYu>j0&QQjSnj- z{c2sJUy&Ya(x1d1<>^$)Z>cH&19i&JX|?gYnjD`f*K?2JANmig&+#C{PhWQ_r8Bal zM0>mo@!9k*NXBgXY?()A(pxaNQHhtzA5-=dA&))WO6!qlF7*bKDn2{cSzohaIeWl|>7bE^$Husq*c<9w6vs*$fS&m?{m{DTNn!qdU^_!sSta{q9BMIL;fO?a#Emz1ke z!i&q*-pv&_l&e*Q4e+zcz&V7&MXM5oP_&ycKng9OJ-&$}lp66Kp`@ftO_t)Ij zZ{a0vtvu6QG*4!)Y1@i-Ch zUtJPPdbE{KPB-xuOMHwypHz_14Y#HqLgDj>mIAfwXZWep5-TB zQuxm(93TIkUf(~V-2WNCb?PA6r-T2== z_Le2OKwEyY!jG4p{NCcnuL?gxl7x0Px?#)FaXw86EZcn!%FVkzkf86fPYB;}?@{@m zC`XAYU-rDVAId$El#|O;PR>Lw&@~_z+TKa|_(IY?^!psAAP1q(y^{DO`zhuBW6Ac9+d=t|Ecao-NBNbE ze;)Bk`p7S*kK+}?lWzZCru-={Rrsv`qAES0@EKp#PNp}K_43>f>-o?>R^~;19|+&U z5Pr;k2$jQeoq_X*Sbtc15Wg;@6V5s4D!vZS`nwb741Bz1{QXt<+PM9+R3rUM+WT1x zLw@Am(Q_L1cI5mg35WFqbj-K%hIJ~3mvyOsOSkyZ?OGqc^QYlEZ9VdKUB~DJ5GG_( zuagS1sZih&??Z|4p=|eFh!5=+zlRrfNU_AZqIQW$zUSFbKAPQ|E-%&gWtd&AUcY~$ioY)ioy7lzbu6I{-&K|P@;+^6KQGUGA4|~b@3b-gqlx$= z{ZM6f=(q2gp&Xhj#`E;)dojlU#X58);dKkYo{#g-R*gH_zspNMEa}N|T~h9;xLlQ9 zwsAd~f4k&=*8(`wKXxjADUWYMKKk&Wr=F1fer~ONi{jz;L6Y!~34Cq0O1o=2F|7Pn zz9jY5MQ?}VHdkJ{7$06*fcH~U-|m}axuJfJxm(I`E_&vMvyvAqK7t4AM+l3OcZ0&kU@b48Y>$q0JweO=jJU@r; z=dL-Qqx1dpwornBi38Rp&ei&mkM+gvW%;u5Y7*u28`>{Bw7kEw>gQMe{F1kOOy_pZ z@Am~O_39UQVmjCSioka|{oN6lQ-24}&(&oh@5GmKO5e}Jx!m?zyq~w-X!AAiAO603 zZa~`)-GPJG{QmME<{$;D6S>C&Kt|CF02+{11DM z*g!rJK$akScgSE6J+6dAH|RLx?dSNrJmFmz!9O2t(Q;n?JyzbhZos>0zb#@_*LMqR{XA1dgk6EK`Bo@c>XyZ{?LCel?>U2E5vs= zll$gW?%xwieDq$WGdX{wJl4L?5#I~4dZYKYX!*w>KycCf6tBX&bR%l~rz!2|ceY+p zJWJn8FPhy;yK`NYU8DNm&&!r=p6Tx%_`E!6m#BSX_R0fN9qwUhds43d84IC#8Fis= z4`@2a8|7MF-xd{(08^Piy^Lw>ScNTy6-L@biI!OZO;_K}%HC$J*lU*Og_b^USE>%;G_;~wFHfwvQgTGfkXm^SA<9GoA1D7?s z)7R-D)0dO)*L2Dq;g&C0M1BS|J{lBoL1A3!_j4q@XdLpv^=-bv*5lr;`jqY2oS-9X z-z8;ymEMFH6A$u-zTZFe@7FrtqVW)^kh>`*xK@w)dU-z3@1^;B^d3g~3^L%vb@>-X~f zdlvDy>*Lu;Ac~8~A&A-)rOa8Ox{h>uKdzW&F}{`#UC=#&2)WbfDioi{+%oKJ)gboV3*F7wXAa zzgT_kpj`$$^Tffrj^5Wy&xSd1EH<~@@I2eXQA**;5BX$yn6IcV_GLA&JVx(@60atxCam!Il)(2mb6 zyPZ%ler^#D-=k`h^28q^g71sicj^%)9RIFqw((2zOsBoa`nc~>aZ7?_H!57W2e}?z zaotE~Db!ok*g>ST1ovV85cI|65&ks7Z4YZcr#D`&yG!HmeY=iJzAoqWmM_!sz}KCK zM{?hR=h)~bm+JRZvasKX8Sc4?ocTMNESE{znTmce{2u*L3wIi7b`#+EJD7eyg)bIOM<$FFM>E#Va;&Lbo`FH^TLT*ym8XvVE^O$HC;*Xk7J@tP`4-3H! z@7EuUr}ew~QHc&ZGw%rIuB+&QX{1xsc>XI3-}Su<+^kfBc?LbU^u)PpuLF$$E~QVLc!J3cvfd zLg3YFzq7w9?YAwiFDVDn)7p;Nh98o2e~*OnNO$jL_lq2eU7N(~GL@?&pUL;~WiP1w z)W}gG}-_XVQ^HXix;u!J0Kr<^<|;AMTQ_)XAfbVUbLk2s#uf$gGKydU9Sy@b>9Drsfh z@c!p~&o}6KIQHAtB!8{nE~KNHzqNQ!p2!*32Y$ZqOySq+0f+1T(D(h5av=R;e}!*n zf#LD^Z|~iFr{v4^xv#%Ce(6BpGbi0k5`1O7oj{HH`l-ZzJNy6F(BAYR##mokrEo3SsvRh)PG{9z!y7(3){q#jTt-GDUWF2M0QFEYr2#JcHuL& zQ(6|WQ-I(9lXl8p<@*9Vg$qh>_uBmVq;?AUcWQQu%3W-ypxg=Vlp45;+9{_1=i9SW zn$1p80k1c{CG8V@Z%g=>O@$(_wfZipSB`PKO^o9`2-6Pc76$w6Ct-N__~H9KX*u?zUbPg@xEpR67J`G50W9vz2$T=sE13AdkmcF}R!^lzPUc^%~-F)ok& zTV-7Kb0ruTtL=XoCU)V^!bi}u?D5I4b} zg+Fi}N(xGbsNjf;QCY<95ILUOO2kG_rF%7)QhsE$D zU2KY^e&YFjw(~E+j_`JNyLPejY=qMVI4s9cCnWm^&!5rmep_N6%IDS0KZ0^zPx8Ld zfkgct&wA*-FK^Xy`~L@dE7oc6(~-L`qNALS+_giFT<)lkC|Z-7*HOHx){8=f%bO|P z@a_J8xg%sy)|cp)J0Qs7ixuRLC#@^h20yXeaMZ2o5B zZM4ql-|#Wfqxlv+{~2AQ^kkdwm2& z2jD$mx_G~_c0c7ky3Q8azKX9W=S9l^?tV)*z9jV?$G6J5_-?6(>o}~Dc#*;k)@j5C zzC1UUZQd^V{ClU-dbJm`t+z_N)5r2%h*^p9l(3i&woc3CRH3sd#X#o|$d5SRxuJv$ zhJSIX?(>hEJWMWCx(fSsyz>43X#a6(kLaN~ius^kPF@D}4 z_&D8^LxrR82N95QWrbu5{Jo@TE2%{P!e6pqyv4o~=5V6Tk}Yt(?|Q)J@zMTcM%Rl* z7Z8B+@gdvPX!vmS)A;W$eu{_RBaQZJ|8PF|eXgiO(7|_Bln;x`pUP2E{wOz;6PH7d ztNGSn6Z(B$Z4l!u`5ry|CRoQiV08tr92sYg!#+$&fK+{!S`>XyY<|7*7j$#{5e6Pz}^CVX7$ldA{^fj z$=W#x=D!?&e$JZV9RA|+Paput{>SoBe12W&b9w1SBKhX$oP1rg{G|1t0-;pSN6hWk zuSV?Is|~@$@-a6zQ^R*&Rh9=7%unz&XL~- z8Q)2%&~{{f+*e^w@N-`64>J|%&^vUjhw0@vseEQrcT0RWr6)+{G#GH+O?a8K`nx>%CZAllN4K2M zr_fR1+GfQ=e|$;%se+B~@kLK*KS~F`Ea~H>FS$<4mOW~|>%~&jSL7U{%WXRNvck1? zE7%8-ef^g0!=6;UfU9yiCs|N=aeeB1oxD%u>&bJ3a``~zGI~b+@wF$ByT_zDd53|& zV!3-#>CL9PB||p#!y;F{4>b+80c9Cy>AM`3t(?C{M!M*nPM2Gb&rI_7r3&XN-DSHU z6VY4Z==Q%MR(bG0w zJ#$3LMbCaheD;G|1>P@c{L{wgju5e+qvn2pte0!#J0Ix%*25-WPJc}Ag$C~<+V6c| zc-doGAf~rQ&YfP@pIJN4%P{%n{BinouX(S~!7yDqc(;b*_EJ80dviQvQelm?li4xR zI%}u(A;DDI3Hha+nysB4)O4q3VLR=Y`WJ7fw;~bnr)#KYpj`T0tZ5`@KG1 z&|V3VG;O|~w*!CnGnRN8T~}(@4-h`bWp6*n zH+l#i2QJz#o{Zb9A>X9qn6F=#H@MxK5;;o;s^AYRdic(qj_>~dX7sG)_w)IFZav$i zbk~-1Id}cDY`yM3&KaRTevaEdi=R^&1g>nUqvn@j5leqclp~7d-!c5<`p?I|7@rd9 z)I&@^j=x;L@?-pc%4fcNu?g{fe}fkA_`8(gUi_AL_!QZm?f3_7{C$K5J#FPYeh`^x zg7u1gzc%RUXRc~I*gjeLF6BA0?@bKa-@{xO|I|zfa#MQ#aM{%tO8od8;#GZzPWWNt zU8!lC@PqU6W5}Aj>Lw|`u+V$eRtb;qFnSR$bio&Tw=Y6(w-gJ;cNo1^uE*lJ{wVZH zGU)Bn{CQc|J_m9(BN5RfE#h+=3h^txNAnStr<}bz?t01Pn)?1i)R_)!ez%{cQT`$8 zS2tWF;k3L+S?NDnDqJ|WH(hL38J~TBba8tmBjq;spDq6Q1F9!netkU6HndgQkNzIg z$?cNkHFn9D=2_9A{oL^Odq3$w4H*BPUoZH|Ch&7HS$p?~=MdTAbAyBqJq5BDc75sg zgO~H~7f^2Kq7P~Rjt*)&`FVldB|1@ZyUOovEZcdll=FRm=aZikbv%5YR(MSHZM0w8 z)9>9yZwmzu&+`brz+1*K7KC%VAv!OVu#c;L?#lISKDb8G{oanlb2-bV)*_vDhuWKd z9^K(Z@jIQ$2iLE8nMFaqq@M$ygEmf^J+N%h#w~|?5hpqqnxlflff4|4yn=0R97ueH1De;)^ZA0eI(8-Mbt+eA+Me7oDZ5t|n-TBrIT zS`UT7@U^NpeE#M0*7DnxzVc<-o^Jma22@T9MOARU=xl@-zb9~u+MzyA$Tn&_MTHQ# zP+w8yJF;`X{+*qk=yC10*`|LL{mJ_U zq$GMm0jGnnN&L{`dLA%$y%I+J`75_o=YtFbMf}-2u?P!aw)}4?&vcHEq-TKTwiv$< zPfVT8bnp+7Kb{ww{Ba!HB=AuHF1e0%jQy}6(Xt=30YCknU~gZ4*VpHzy~slhykD?C zGn~e2QSbt9Umq7rxCg>?AHV#(RyL*WSCOw56o0f8ov&|korm^WEX+&i#betLTAWe^ABGSZ~?=jC|uGl9O=gqH*>D zuB-io_|g3k9JrNWE>rxzpRbR)o|^lJWRrgM4e&K8D8K#P4%b8ezJuFS+0^T$-4?84 zKdALa4;Y^gsD5{SwQ&BjR}i4tMLc7AY~6F&ejONx_A8#1bwllc(W6=q<#02yB4 zj4@spC|~^EJ@-dHQ*-XT8zqPzhmeK%6%Y&kg!bdO*O~eHd^f_Knf1~ysdtm`+H4$O z{2uGtU=DC<%TezUuIo`hZ_56^7m9*Dip>u_fQ-zCdg&&Y`aXo}GsZJ%=s`)wxoyp# zZS2y1j6}w8{z?7ggt`}>0XaI6H*^;9-B zA?4$71Cu1EcT zxyv!rZ^OX~hhJF2QkYlc4@olHBcJP~hnAki&+S)#FV*#9WOO+neV^FxPdJ@%KfO@x z`bnE#mIqN8oa3=*J$`?!5Hh>GciUQ{zt~hRkEdMU3IP}I`^4`tY|(h~aqi^`ALI3) zj!-hh>!r&z9NU|aiC*v^~^u>Sskou427 zi0Ze3&4=7hjy|OQyYOMPYYPYT{I1*Yde8oxxL$vU#qsdS5yV>+g=g z?`xz3Tta|zJJ!eVczm{TtG2wZYk;Q(DIS0K!sT`vbcaYPVSYqD`FUj4!!YrBGkmWn z+xXRaLK_Yd1Yq$tM8Ly5}$lWi05|r-Vamh`*)n%Y(LQFjfZw?d7qEC zJ$Ml9O!@V5{L#BL-yxHuxz}j8EmHp=Fp~V1uV=r;_ia``Z|{h3-Fpe5Hg4DXXS^Qu ztm?;&JJ@?6muePQ?F(Oucb^5z$$#ZxgMmzdEjq{6BeedxpDW~gJNqEmAp7!sHiF!%j?_gYZ zI_Y2eU8ehZqW=F#x|jTf2@{%{bUsr<_p86lbgxR({~t;B&53$R_d^M~YvtPK9r3-c z1KKaB?~jRGy`cRSVLHG6#&R47lkWodC+e+7uga_IO7KqLG8QyaSuAq0Fb5ie*nJ?(bYc)!LE!dr*G zqvluOAC&b9|K0%CLD+7t&o^HC3_^e(iWjtvsqc2zxCZ6>n)TuTjKKHxG9MRe`H*{1 z`Re!EK067UR!wO6%uzk(B2i)b$-7-^*(V9q`T* zFDTnNN0+0tSihhtF8>!cz9~MOm49XvgwUL~o8!ax{W#R~9Y1MrphG-dF7jAqNXl7K zJ_O(RyWo9NL(E5$53i@TofgV>ugFI__zl57Dd&z4E7;hj3rn?fzL$fD zyE=6`UKbogxoqa*;9ubTyA<39Y)7~kzSG0~`ka3L_yqdo(gpoJ+0GlURfSdCo}{1o z7HwDHK?+N_?=uI!qyydO1DgVh1pA-k>Fp{$GISv=mnj+_vj}PaV|DnR#V*iO0$=+_ zy>?;xaK`rqbRUE5{tDpM=FdYgzEmf^gx&!#CusLS7kvFZ;^ceJA=<%YQSmB2sEVEa zm2{S^AGrIqP@`(6xyzq5l|Ndug#eRYfI-6!?|7_02llH9=E$hjwDR;+@ON5`l z;ryI%4{J7=FY{;Czo*K^g!B1vyUmLnFW>L??`pXI_WkI*=}CXTG~1~3yWVAc3q5M~ z2mZZb$H&)e#!b<3TU^Eu^L<`jYW=dFhfsDxu5}*N)=%s~qs=$CEy(uDW4(iJ8Xomp zTgVXRfz*?Ud7z&M9=}P`2f-|<-+b4DrOa2gej8WQxUIkv(cN0k&)-cWommb7f8sF> zpZxC`(tLf7so%CueSCLNe)u`ExIE~Q^6;hnfR%s9%0F)9?^ZcG^ZGeLp#5)Dc&A%G z=cOTvz9suN!Vmv$VK3+=-+jK;Zu2klSK>RgKA$&x{hX)Ii-u6Y&*X6$d?2U#rmURt zgYWd}N}BqsEuQ(QPh8%7-5@@X`kaolJkLdZ*gucJ&#h5@97ZI={tmhKCtp{|HeQR= zN;yC05Zj5M;|SA%2c5rvOMCEJ2$$bfU4J(HqVmuAa+?&Zl<(AVIzH;>VevOUCH_NG1SX>$JVb_=F{FU%D1eO zb}zrFOMJ>5SE+K#O%91H!sF*D@oc|!h&pP1sqs}Z&a(XpVEjA*Bkf$BuQ|@g@o)S* z{;Tp(oi7#Vdl~6-sE01sz}822xp={{P#c)|(#@Hilg__3ddY9kPil^Ln^53)ZmkYH zj8C3tIo!-X$8>EchUv;1ZdN`m?Nr}q^es1|qV+-J%jDHk~ zdi;CWDe+JFKz>*APxYhk3nl4Q{w>C*k1fD|>?Hg-lE9PvDJ1xlO>=hx@_D<;@3Pye ztthkV`#jCpQOB>8-)8?he_`NR6rlq%)4FKb|ayppPT*KX8pdsXz|ET=i>p# z9n!~rH#*`?c)os2JQyBCvgjZ4hfDt^^!s`7Y@=3`TYH7TS+-p1OpDgXdhYUzCBAKq zc)_?%8v_6C1NW~4zWF7*D=m1Ot8jV|zX?U>c4+xtu!S6Vdtng2NkLDDf8aW??(;eL zX>gd1@R>fo1V7+<@ke+)8jS5K(#d(5&!>9&5rCt9I}Sg0=Z{D@hIfwA-3v4Yo{f(j zr}&e2a!0P`dMOse%{MUO92wsyYu-1y94Wj%-Br{7|2#P^*?8#RaUH)w;n$9jrKT5T zT=erGl$T{-tjkUDO?&29lKPaK_V+Nnoytq^m-J*mKAB#j)~cshZcflsUOKw~?snjn zTraQID=$fYKL?e>r(4P$lXU8%7gEA>k_9|eA0_jvUa9D>mb;SqRbMCb?-BT?uDALn z|KfW6i3A_m4xdW2tG7e(ENd6Y=Pc;RHGEp=V>(5a`*c)gxrR^1@svb~M@!}!{veJg zb?M;uB<%ZI(QW$uBIkE>r`<kRT>SdZ{mnk=vuLr=w?8?jk$) zP@zNdPY3^~?O^5o98)%>`1*MnkN5L{#FG#>KjZl=x8MB!f4iDX0q5Z%@*OpQ_`Uxm z^&PbG%PvuSX0C(?IL-qJcYHvCfsZSLh!p*6{_uI^F8Hso4C&_4|R0|6nrx zH3B~9Z1{RGk2-xlhU*O!t>|5vp7ry{93r z(f55?%1MYKx#d441zm26>&b15i?v{qS?kA7wC>x1yc1=lpmU z@wpWbGC3%&*9If}rx4C9pT)n)a-1s{{+%YM)IS%tjaecprNgrV4R&11Tas7bCBQ9k8@(ppAKZ@&jYZ&FarQ9_V zF0S9B@rdh}a%!ko_%arko0W2_t(%N>z&DnG^bDyJ-WOv-g? zKK!ODuAdd3^1_auThSrm;`)a)9{EDXFMnMe=8xj~w`mw@-BM1+x#IeF#QFNA+=X$N zKZ@&BL5X}yIW_Ey>mQHvJtgIq#9=M>gocrJM9RHd!o~GJ8|OPFqP~hx2=$Bf=7{$&g*f#Jad!A`~1i64|b;3sNTIm(_OFoyBcjOenIj6 z7Rlh{IUfRvu#1n&_~+lp<$Q$n&Gjlg>V4+hc;WdH;pa1X4vqBqzC)XmFZ+Nbf%_Ze zU$Q>FC!JeQXymI7VBEhapGHH84hoOxIZVGVJhMgPdw?%pF;Nf4i@AP@#(Dg&3LRWe z6}_Kn*?-cAL3_hfXrmMhx(O>U=@ZCv$sitVD!nm-*VB7TlFeiy~)pq)y(InP=0 zlPY5@$?^LQ*x2{+o%Z~7H&xPEj+SG+a#tqG`8uzkH}d&$o(qg{lv}!Vp!&(*6UwHp zly_9}sYbaO$aW>)vl*SZ`u;7;pM&zbaY_N;DOkW$Txn0TihQa|(!E>-p4!>TdB$dLOF7H{Gvux*1P={hrZE)W`53+BbwhZN8r`UC0mZcjFHtGaUOJ z@z1T%?^(F~zw;uHI|S|R6}{-`DFI%$+Jvf*mo5jpUI_aNBcb1?=2?f;27wxy+1*DUscmjz6rZ} z-0!v8dtkm#olX6q5W;l}a>&2u6CKp~NWFhZ#{vDfM6e1m2JNIy+@8w*b zT)zMq*5`cieTuY5A>?*EL@N9V_I~*GevRYFbA#F~XI`K8 zU*E4_yKBGDg&{u=Iro$lm%W-Pm{mb zg3kX;{_d&6-zaPCgT_($MMJ=ysr*huj?kny7jZIqWo?kdguFhI9BXzk~udi1j_-h(6w`@r&#GwCa1)p9}SU zy8uYbS6IN0Zi5G}TEG9Z^b6{DZBNzri}vqO;ha9c*54P!cj5Tdl9bDCt#@HRUu1l; zdQLh%)!T{jtFW6o;X_)JMs8Y_A zo}HLy_`2WFBbx8t!R7K}5dShi#~prQf#(;i${|cQ)GuCeZ>Cdza6OajZJp`Aou5DU zuW5!?BBe8RR#1dbzEJbzvV_ilrR%`VC*%Kf5-t05g8x+@ybw;Mz} zllWc3D)@#!={+I*yjF0p&9@iB!^dsTvq@|FW-S-bOP|xQkLMi6tIn|j4?61oKTOQy zeH^c6A3g)Va7?E2d<8E4z$M4+pR1Fv-uV90iF{EW`1BpHJ!fT2u%nBHCHK{zCYpHS0KJjj!YQJ8&mj4;wr|`BPmFW51=`oX)z~ z?!4Khe7;AH}+$(-Y%o`zCQcrjLl%O$ogG-7O#gqH)91*O|u^ zU-rxEYWh8AOwMQOJvv`kj{Dg@JI8vD?e84aeC%I**CY8IaybWXdV=-cR8t?e9Yl}c z^;ktdNY5i*#(4zrh>}qDdY-jD7jWJ>YWh`w1s1m#Qg8e7s%Q&p5$m z`v{)$uZ9sFI6W>uK5t|FA=8yi*$*ZEX*V$5@$!91->>m=r|rv;3C_<4`}Z}c0hf5R zS-QU$Fo-zzbJ{m=PPC8TOD*&x5svwY2mKI#eh-9p6~oi`6W_wQW}P4T`T1r2teyC4 zd@cT+4O?|TwNtXmJC}kNf=Rq+NB%AnvJFj|{|?RX=UfX{vDJYOyexblpM5=@dY_K> z0f|x0&Z#m?$9r9j&u4VL%ytP8&aKsgZbx`L?TMFa&O80;zf|UtgGd)T@K2Hs-v=vP z0^o3J-?qo+atj(Qd=)wf1nwYwbl-V@A2i#f8`|wwfcX4%vKthMNCi91Y!~CD9$^T#?KX-M=H+bTxcRUP##`oX*OrF>uD(?79 zu1E;r#|n+@v-N>mxVZrxAbYOTeAH8P*(OzBE{}aSj`Cd~;!z7{xWm>bP?mgJ)}aFG z`>PA`pR023dU06?OR>MH?|2L%F2vta^JD%SK0V8k0LOC)blJvV7vIk_JgUQfeBWt! zg~IoA>MzE``_c&0`FPiMt%e^}a)P-0N{ugHX6J8hJffTup1F8TdDA3b-{3-qpJJU&z$5YuDeJ{$1@K4oc;QXrP5A{_yKv4c?;I|Jcyu&E}BlwvsXuo8> zs}ZK&X1?V7l;=47eNUHLe^;A!(*=-yKSz_6S29&U&jL<+3ID+Pe7aU&XHyr;dOg3h z!X_UCENQ>}QcU#vCj9KVP{V_045s_{X3B3mB07-c=r+W+4`{w#glp+IR)-Ga&$k^& z|FtMukA9xpI1&9{N7>5v8}uB0kD756d&%zqk?(ZThmQ%~Jyd)QA5{A`dQRy{zVk&n z=tciv(VhpH8~j|WK9Bz}o^a{M<39s%ZN@imH;&)eA>PLu$`RrB*nEfmu;OMbp*&^-RLe`a5o}7kJ*ApsVv8-_&-gmg-rrKGWHc7=pDB z^mSPJV&}Gu|JC#avwaCVEAT$I$?2y&>;q4Hoi5qmh^ODf?Ik{-y%YBPTF9E*ACB>x zpW(Q{hthJ(kqDOo%(LP5!rug+_8IG+@%FLtp|8N)7^m!WX9=*%XX$+C zpuy`K)bf6w%HM17^Kw2uvOJ1c>=n}^{{5zD^fTf~`3T_?Z=&0Y`Nw6;AL;JIIj!qo z|7VzC??x)u16KvV^z`vfbB{?-?x7QYyM@!i8zjN$@pTL0@p)wQ_0PN>tBBt*B;8+L z|IJre`1;pB(#`x&AV0|r@pshxQt*5D1vmxqM4tZSH}`#(q05&)a)jmD0IRl~+sVUh zAUNF(LVIdZ;)39*k0$Z`1*FYcI^kY}-^H`+U9o}|A>Oxj3u?wt9x`+@808H9Xa6^YfqEOnCh$vj86O zpnR1A1?cBk9ZvGQ8$W|AZqLQ{4NcDc+^3H-N0B!KeXRFaNM{EB(6|1k^d2+$`o}ru zJ0EQIcJO%w3iYX-jmcg5dy9%8wB?mS22^M5W+|{({z* zO9d|XrBtVkL;g<3AmleBJj28DalC54Mvv1u9KXv=s|fk4`in&2pZ-Sj;lGhC$Itjl zyX2FANjp76PV#dd7SOz8?t&}ir?;Qe^6K5FTrCsBU5gH{0S0pQgD zek?}+VU!d5!R`x_ZiegAt@yol5&XoyQuayRxcR%u@pE{jZ*Q{vm}Yf*!R4ivZ!EV1 zS=|n)pHJnu2l-r1+&)PMdVc?~VU|s8lV2{^1#FY?NR9u zYb1g5NDO1KJo!3a-&QR*4V_FD_1vLh=I6erh5%nQJm#~B{6MWLKj%p&$`6oL`8nX7 zC_hzE5+9C`C(4hei1RNtZ|A&->jm+=ox%afc&-C*UQW6=-(cA9^ZC2uOeY;4&vKl% zuY}Key4l+?o_z*?8tI(sSo$n}igSD1`#K#(7o4l{J)!z8zm!v6@$oS6sFcHcnU(7^IjWvF96##oWKMT=`jU^$b!vTWCMUVA z=&*3r`LOP0?~A16bUozC=$b~}W%%pG-~9Y0`?}o`=p{wS@uOTbpZ5p06Xq{1gf%*W?6PF*kq@j{K=&fnxXh2&Z0p^N9G z)~+sZY!71W--)SR@BIGyt+t!etB5s6XWL+JztXS50p2`%RHD&dDMA#7O@E9 zkMiW})ET66eD1`1ag?uGIqlobQW~$n_%Gzs;txYSSYOWeWhfVa^zSOK)Iy^jTFT!A zAU;>3jQ0q%JJ9;oQ0B zN%7cje?Xy84t9bkZkIb8*2{9$_DG^$E1$G)?j{_=lj}|)$~!#o&;FjoG}1{|XX;-l zPF40D?Wd$&|MO_5!^{jfT+ko>POS4eZa3H&$@XdAK#~Oc;B=8GnWWi z!693BsvdW+PRGSyxZ3{d{kU(7rn}ts>9i-9yHw-ZexeuPQ!n`aw`u&Ac=!@OrTJ-B zuw6Qv27{N8M*h;xJ+6Kwy(sDNchc>CXXAy46OH}13I=V9rfFfLuMGCLbzJq+v9t&hXIf2u|7wbj&?)Eh4DGexE{d18W|${KE0pQ z^l_~H16sN*qkj9f>Sqw9;wxoR!7KRf?@?CE@9~))>%VEaWsEV~`a{yb0UKugTxXx* z%lA+#4tDXw9TJT79Pp;|_jYm*k$U2BvG~<^v;@B2;^Ud`5Bd9-E*D2pe+UT9@8LU? zfap10*BRX^;DZ@OCtmMg{eZ@|cdAc##3=W$;^qA2ctkm5uH-%i_erRCd>_mAje7=} z6aC^o_1$i$9@jvh<751;UC$@KEAIHrGQ1Z?mzLAHkP}5&Rw^&W8aw3Tnr`>WZsHfXj%-_bs-71fNJOR(>ye9qklhQQ@Ien@=0YbHFa zZr7J)vJIb?c(+UZJ3{O~-6)xDv2~<}1&ruHQttb8J*MA$enx$RCJO4sQ{?K!%RIae z9IA|Z`NKr~L6ZmeQ_j1Qr2F;E1V_C=eEUp~v=ccvwkPM`{7v8S_x?k@#H@XmzxMnJ z(@BrVQ=T|aCp`Lno&1G*5MQ;wZt>o}omkq+R5~AEZ-(?PG7xvX{Qd>hIUTmmv*WXFYQz(K16wke-Oy9 z-;y6y?Q}kCK|7&RWC{9KNH9K^O?_R}PKHn4^_q|43qS@wuZY*(3?HVG9*?K|SGAMD zV|-ORCFo}R_`WODwYCdvHxTc(;q=aEF4T9%DN(ME&aLQu+oo zJ-MDj)O(o^s>as`DOVB?pYfIZlWPS)tXG<3Jth9WB>MsB@$W^}>-TKudhu-Edhy>C zy|Plsh`*=aM+U&PPpHp%T9v$ZYJA^b^}XNqS$@|aZdWp0@eh@8&TP`wbPxN&iw{yC(~0s{TlE4c1)LcsuM5mR3~2AsZPA; zl{)cZfcCcdgZNE(rQBBO6|@5#=Oe_|_wD`O&w_SBdAj&ra{MM8ePfJ;&aia13plPw zJ6Sx(lPbNEh_BKs7EeA^>HU5HfJ>ee68&QBa>(>J?IN1E)%M10&|~>HKVrI{pKCWh zxSgKQ=mJB}0BhqJ2Y(OlG6}bJi&tY;1Swx9^mA$1rXQ7zzOP0-OFpwZ(2v)X!9<4r zUDay62D|j?3z3R(?JDMCKCViEJ`Kump1AY({^qu7R62M_ejLWX%-Wgx2dwIXZfmbc zB~|f}cpE=QDQs|FF5jXRF-%80GK5dLp+95&w)Wf_-rW-&xd6{B3c;o2s~FG=6cRY< zJ-Teuu=p%j(X%u0z3bfhU6Sr}#(HtHL$sqg)k>C)fo#Ix#p`K7mR*nG?7;YANYKU^GTM^+q{Eey7mtBdy&X;Gu95i-hY@(g-iBR(H|&Z zkw1*{-X2>uKi4rhj{CUUvsUAK;Zdb@+V_RrZq5QbqU8H7#E1C0-IbQpS>P)xj~(Vw zS2GvOakL5opAS{fyI-o=Hwk9l4q3KV0s4D8w7&?^@p8TQK7o$-P=74D>SgJ_*|e5- zc-bcX&OP=e#BTHa`6m7DQs22+-oJa`_i@}#q8&?!e!plXkoh+-x^0b?>r7pw?~L`9G4*Mm5+3E8pmIAD@3*c|_;UlAukC#G zoloB1{JV>j)*lFuaE8y*b8z~#S>RRB@?2k`966pbpM4%}bWa0_@Wtv~2|fv5C}eP* zO&!9QjXPIJ*!ju+!2bHytE|6tN^-m&<@Hh~1&?br-Pg0*?ziyW>ihRO$A4PGlTWBG z>oEW@e3R12{w3{lm3FA){TkB2{z|;suGjMZuAaYd?tF`soS?kL>VbT+9QR4Y2VaPH zDfP{H^7&Nx>;3;Q+PO-8fEOL(>6|}p-4cxL0>9_HKz@wwSblCZ`MK)L(qFvY{kuT1 z{A^YDvHaLLLOzkM;Vmvd|EO?WKCJyu-fu2kq~BD_1@$>A;P_B)9Ekguq8p!!^!`+) zf`g0u5q=Z@oau$PuEBygTx~y^w08D(A3pEjg-+K?y=ZqvIR6&yH&+Yz`1?lwe%HeO zvP}7*+2y@?%7M#8?i$l4CJ(WmTB!vh>le|GhTrm-W~BWo`U4$Gd~3hlR(-#J5Mj#y zTd&fA&gIqH`K{hxtbOC}3v_BdZ8dT-zDwhm-KlccZu1P6Tfe{NbQN#0cPyXPa;%r< zNSvRAr!;*K$szuZn(un4di)7y581fJ@~=wVA7z;B?&W-a&inZx8|OJcOSsv-=#eP@ibJ2kz}><`x~zVGDcbQkQS(13#DxnXvhUidQaepsSoeb|ZbCd>YcifR0P zBKBwMna*^Fq;q_ygjCzF!EBF>Z`Jb)?9UEpJ*TqX{!?IF+oPJl?E~sZPiz18b%?&A z#oN8j%J+nL`a<_n$bVm_n1;}8LfGw)K_oJrdYk*r(NoIr%6j^RNUfz0_L-`4=-Hb! zo%>gC48W_bqg%rp+a)jdC1-MJ9l%@;`rfNamGA#$C8C(~5#Kc4vWTwn$N zFV54xjMvX66$8p`1y7vMH@W3}h4Rp|(&2{kqgpN#>)SE?t2Cb$vwxH8+S*~O>Bo9; zV@DnMu#YcM5We47>AycH3GI`b-r4vXX-6M#dx1aO)%!uu3e7hp8w`qup=5M$n1RX;;hJk|1}RDyCfFYRTx| zYdnllH~zAVt-o!N?D07_pT|stErjUj<&yDG!FA)IqUy$z#CmYTMEpUM_XX?vUC0DC zjX$EPKPP%RkpYlG+O5k#Ko*^CFDmsY$z8mq`hRc~8-$y21B6lV)exD|(AEkY)eaWK9;}D z(&r3Mrc3)+JmtQseG>6i?PKxeBipBay`UrS1{1|z#7n!FKH>RcwjbWx(RR`7Rr?__ z4g*<^!)!M_7u7B~eVj|4hq?|a)%(w)`n1Qp5tqfgvvjoA zj-W!?ks-pg`|106Q|FtnTciWMSMTd#gMcUH%=hnhxn67U1~9nMJJcV97vhilg1)b} z%%EO6zjr-)yN3N7Zn;ClWqZdwrpw0hqZmHOKeq$i9;AFSK9<{+3g;-um;5%a9a^FB zT<@3s7Ir!JeGs-U9qrM#!9PTT=#Q!I>)EyV`8)G*y^-cWv`ziwx|-nixW@asKs--^ zk^o8ZdTDMOLmH;<-vvG7^J~+0&;xRJ2WC+>Dy-2W}#H2PJDtaD~ZtyXg|Xe(*?TK(kvs-BPYCzfab zrN7$yZ#M#PUSC_k1mk;c-hN)s>gAeV%MZ6dYR^geeqs4G6-+Nzi*HZ2!Y!LT#O-hV zVSfve#qDm_!}%s1SMuqP2msfYweWp>@cNzJP2Exq=Ml~ZX8_;ytn;URv!ssNMoekOQ%*HiI&JKBM+c3dmF+`D|3yvA~C{Kz&Q!ww$c zTRz$w9M2DeYxI4d=;y5c?0SEi- zH%V^%{t){gBRsvQpQ#92|7SYst`|=_>cz7s#_@L|0O$4izO0|$h~b-_(RCJGe_*+} z4FARagaQV~bs#o%&zJ<`@5eA+@Y%aCUhuK_gGi%)CXVcGkN#*0j7(5Sb4Wwh@aGh{8A5msizkO;QZVn z^_Y~mc$O3SBAt{kt*;_qo)7g&KKRVnmuNT29r;AReUP
  • + the default is finalized +
  • +
  • + processed is not supported. +
  • - - whether to populate the `rewards` array. + + +encoding format for each returned Transaction + + + +
    + +- `jsonParsed` attempts to use program-specific instruction parsers to return + more human-readable and explicit data in the `transaction.message.instructions` list. +- If `jsonParsed` is requested but a parser cannot be found, the instruction + falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields). + +
    +
    - + + +level of transaction detail to return -Encoding format for Account data + - +
    + +- If `accounts` are requested, transaction details only include signatures and + an annotated list of accounts in each transaction. +- Transaction metadata is limited to only: fee, err, pre_balances, + post_balances, pre_token_balances, and post_token_balances. + +
    + +
    + + + +the max transaction version to return in responses.
    -- `base58` is slow -- `jsonParsed` encoding attempts to use program-specific state parsers to return - more human-readable and explicit account state data. -- If `jsonParsed` is requested but a parser cannot be found, the field falls back - to `base64` encoding, detectable when the `data` field is type `string`. +- If the requested block contains a transaction with a higher version, an + error will be returned. +- If this parameter is omitted, only legacy transactions will be returned, and + a block containing any versioned transaction will prompt the error.
    + + whether to populate the `rewards` array. If parameter not provided, the + default includes rewards. + + ### Result: diff --git a/docs/src/api/websocket/_slotsUpdatesSubscribe.mdx b/docs/src/api/websocket/_slotsUpdatesSubscribe.mdx index efb639b2034958..cbd6a6aec13303 100644 --- a/docs/src/api/websocket/_slotsUpdatesSubscribe.mdx +++ b/docs/src/api/websocket/_slotsUpdatesSubscribe.mdx @@ -55,8 +55,14 @@ This subscription is unstable The notification will be an object with the following fields: -- `parent: ` - The parent slot +- `err: ` - The error message. Only present if the update is of type "dead". +- `parent: ` - The parent slot. Only present if the update is of type "createdBank". - `slot: ` - The newly updated slot +- `stats: ` - The error message. Only present if the update is of type "frozen". An object with the following fields: + - `maxTransactionsPerEntry: `, + - `numFailedTransactions: `, + - `numSuccessfulTransactions: `, + - `numTransactionEntries: `, - `timestamp: ` - The Unix timestamp of the update - `type: ` - The update type, one of: - "firstShredReceived" diff --git a/docs/src/api/websocket/_voteSubscribe.mdx b/docs/src/api/websocket/_voteSubscribe.mdx index 4f6f9cc87be982..d100035d93658e 100644 --- a/docs/src/api/websocket/_voteSubscribe.mdx +++ b/docs/src/api/websocket/_voteSubscribe.mdx @@ -60,6 +60,7 @@ The notification will be an object with the following fields: - `slots: ` - The slots covered by the vote, as an array of u64 integers - `timestamp: ` - The timestamp of the vote - `signature: ` - The signature of the transaction that contained this vote +- `votePubkey: ` - The public key of the vote account, as base-58 encoded string ```json { From c7f2c331c2a254db07f471c7d68d3f7fcb3e39ed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 12:53:44 +0000 Subject: [PATCH 340/407] build(deps): bump regex from 1.10.0 to 1.10.1 (#33705) * build(deps): bump regex from 1.10.0 to 1.10.1 Bumps [regex](https://github.com/rust-lang/regex) from 1.10.0 to 1.10.1. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.10.0...1.10.1) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 18 +++++++++--------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 12 ++++++------ 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 033095542093de..f496d624e2384a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4372,14 +4372,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d119d7c7ca818f8a53c300863d4f87566aac09943aef5b355bb83969dae75d87" +checksum = "aaac441002f822bc9705a681810a4dd2963094b9ca0ddc41cb963a4c189189ea" dependencies = [ "aho-corasick 1.0.1", "memchr", - "regex-automata 0.4.1", - "regex-syntax 0.8.0", + "regex-automata 0.4.2", + "regex-syntax 0.8.2", ] [[package]] @@ -4390,13 +4390,13 @@ checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" [[package]] name = "regex-automata" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "465c6fc0621e4abc4187a2bda0937bfd4f722c2730b29562e19689ea796c9a4b" +checksum = "5011c7e263a695dc8ca064cddb722af1be54e517a280b12a5356f98366899e5d" dependencies = [ "aho-corasick 1.0.1", "memchr", - "regex-syntax 0.8.0", + "regex-syntax 0.8.2", ] [[package]] @@ -4407,9 +4407,9 @@ checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "regex-syntax" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3cbb081b9784b07cceb8824c8583f86db4814d172ab043f3c23f7dc600bf83d" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" diff --git a/Cargo.toml b/Cargo.toml index c5c55a09cc58af..03fd7cdd50ebad 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -277,7 +277,7 @@ raptorq = "1.7.0" rayon = "1.8.0" rcgen = "0.10.0" reed-solomon-erasure = "6.0.0" -regex = "1.10.0" +regex = "1.10.1" rolling-file = "0.2.0" reqwest = { version = "0.11.22", default-features = false } rpassword = "7.2" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index f34ae041826e77..4cd781992cc7c5 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3814,9 +3814,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d119d7c7ca818f8a53c300863d4f87566aac09943aef5b355bb83969dae75d87" +checksum = "aaac441002f822bc9705a681810a4dd2963094b9ca0ddc41cb963a4c189189ea" dependencies = [ "aho-corasick 1.0.1", "memchr", @@ -3826,9 +3826,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "465c6fc0621e4abc4187a2bda0937bfd4f722c2730b29562e19689ea796c9a4b" +checksum = "5011c7e263a695dc8ca064cddb722af1be54e517a280b12a5356f98366899e5d" dependencies = [ "aho-corasick 1.0.1", "memchr", @@ -3837,9 +3837,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3cbb081b9784b07cceb8824c8583f86db4814d172ab043f3c23f7dc600bf83d" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" From d548993cb49cdbc62a894fb08f48c4cc35e568a5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 12:54:13 +0000 Subject: [PATCH 341/407] build(deps): bump async-trait from 0.1.73 to 0.1.74 (#33707) * build(deps): bump async-trait from 0.1.73 to 0.1.74 Bumps [async-trait](https://github.com/dtolnay/async-trait) from 0.1.73 to 0.1.74. - [Release notes](https://github.com/dtolnay/async-trait/releases) - [Commits](https://github.com/dtolnay/async-trait/compare/0.1.73...0.1.74) --- updated-dependencies: - dependency-name: async-trait dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f496d624e2384a..c894e481e2081a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -430,9 +430,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.73" +version = "0.1.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" +checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 03fd7cdd50ebad..2b19f7f42a2443 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -146,7 +146,7 @@ assert_cmd = "2.0" assert_matches = "1.5.0" async-channel = "1.9.0" async-mutex = "1.4.0" -async-trait = "0.1.73" +async-trait = "0.1.74" atty = "0.2.11" backoff = "0.4.0" base64 = "0.21.4" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 4cd781992cc7c5..8bab847630c73a 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -404,9 +404,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.73" +version = "0.1.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" +checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", From bec4a47886928d31b0747944caf188e67476bfd2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 12:54:44 +0000 Subject: [PATCH 342/407] build(deps): bump flate2 from 1.0.27 to 1.0.28 (#33708) * build(deps): bump flate2 from 1.0.27 to 1.0.28 Bumps [flate2](https://github.com/rust-lang/flate2-rs) from 1.0.27 to 1.0.28. - [Release notes](https://github.com/rust-lang/flate2-rs/releases) - [Commits](https://github.com/rust-lang/flate2-rs/compare/1.0.27...1.0.28) --- updated-dependencies: - dependency-name: flate2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c894e481e2081a..f41c6deeae40cb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1946,9 +1946,9 @@ checksum = "398ea4fabe40b9b0d885340a2a991a44c8a645624075ad966d21f88688e2b69e" [[package]] name = "flate2" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ "crc32fast", "miniz_oxide", diff --git a/Cargo.toml b/Cargo.toml index 2b19f7f42a2443..2ab74f105eadc2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -196,7 +196,7 @@ env_logger = "0.9.3" etcd-client = "0.11.1" fast-math = "0.1" fd-lock = "3.0.13" -flate2 = "1.0.27" +flate2 = "1.0.28" fnv = "1.0.7" fs_extra = "1.3.0" fs-err = "2.9.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 8bab847630c73a..9cc774c2a48c37 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1631,9 +1631,9 @@ checksum = "279fb028e20b3c4c320317955b77c5e0c9701f05a1d309905d6fc702cdc5053e" [[package]] name = "flate2" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ "crc32fast", "miniz_oxide", From 167dac204fe64e4c9c68e6de5ae8a097eb595dea Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Mon, 16 Oct 2023 09:21:08 -0500 Subject: [PATCH 343/407] Retry hash file allocation (#33565) * retry hash file allocation * add sleep * submit a datapoint for retry * typo * more typos * Update accounts-db/src/accounts_hash.rs Co-authored-by: Brooks * fmt --------- Co-authored-by: HaoranYi Co-authored-by: Brooks --- accounts-db/src/accounts_hash.rs | 68 +++++++++++++++++++++++++------- 1 file changed, 53 insertions(+), 15 deletions(-) diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index cc6db20fbfbb00..c6a1458a4bb263 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -26,6 +26,7 @@ use { atomic::{AtomicU64, AtomicUsize, Ordering}, Arc, }, + thread, time, }, tempfile::tempfile_in, }; @@ -87,22 +88,59 @@ impl AccountHashesFile { if self.writer.is_none() { // we have hashes to write but no file yet, so create a file that will auto-delete on drop - let mut data = tempfile_in(&self.dir_for_temp_cache_files).unwrap_or_else(|err| { - panic!( - "Unable to create file within {}: {err}", - self.dir_for_temp_cache_files.display() - ) - }); + let get_file = || -> Result<_, std::io::Error> { + let mut data = tempfile_in(&self.dir_for_temp_cache_files).unwrap_or_else(|err| { + panic!( + "Unable to create file within {}: {err}", + self.dir_for_temp_cache_files.display() + ) + }); + + // Theoretical performance optimization: write a zero to the end of + // the file so that we won't have to resize it later, which may be + // expensive. + assert!(self.capacity > 0); + data.seek(SeekFrom::Start((self.capacity - 1) as u64))?; + data.write_all(&[0])?; + data.rewind()?; + data.flush()?; + Ok(data) + }; + + // Retry 5 times to allocate the AccountHashesFile. The memory might be fragmented and + // causes memory allocation failure. Therefore, let's retry after failure. Hoping that the + // kernel has the chance to defrag the memory between the retries, and retries succeed. + let mut num_retries = 0; + let data = loop { + num_retries += 1; + + match get_file() { + Ok(data) => { + break data; + } + Err(err) => { + info!( + "Unable to create account hashes file within {}: {}, retry counter {}", + self.dir_for_temp_cache_files.display(), + err, + num_retries + ); - // Theoretical performance optimization: write a zero to the end of - // the file so that we won't have to resize it later, which may be - // expensive. - assert!(self.capacity > 0); - data.seek(SeekFrom::Start((self.capacity - 1) as u64)) - .unwrap(); - data.write_all(&[0]).unwrap(); - data.rewind().unwrap(); - data.flush().unwrap(); + if num_retries > 5 { + panic!( + "Unable to create account hashes file within {}: after {} retries", + self.dir_for_temp_cache_files.display(), + num_retries + ); + } + datapoint_info!( + "retry_account_hashes_file_allocation", + ("retry", num_retries, i64) + ); + thread::sleep(time::Duration::from_millis(num_retries * 100)); + } + } + }; //UNSAFE: Required to create a Mmap let map = unsafe { MmapMut::map_mut(&data) }; From f4fb957a38e00902b1997094e255980a1e07adb6 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Mon, 16 Oct 2023 08:58:53 -0700 Subject: [PATCH 344/407] Code cleanup in cargo-registry (#33711) --- cargo-registry/src/client.rs | 22 ++--- cargo-registry/src/main.rs | 110 +++++-------------------- cargo-registry/src/publisher.rs | 6 +- cargo-registry/src/response_builder.rs | 29 ++++++- 4 files changed, 63 insertions(+), 104 deletions(-) diff --git a/cargo-registry/src/client.rs b/cargo-registry/src/client.rs index 7edad1e0aa6599..109b1a8b1975eb 100644 --- a/cargo-registry/src/client.rs +++ b/cargo-registry/src/client.rs @@ -19,9 +19,9 @@ use { std::{error, sync::Arc, time::Duration}, }; -pub struct ClientConfig<'a>(pub ProgramV4CommandConfig<'a>); +pub(crate) struct RPCCommandConfig<'a>(pub ProgramV4CommandConfig<'a>); -impl<'a> ClientConfig<'a> { +impl<'a> RPCCommandConfig<'a> { pub fn new(client: &'a Client) -> Self { Self(ProgramV4CommandConfig { websocket_url: &client.websocket_url, @@ -34,7 +34,7 @@ impl<'a> ClientConfig<'a> { } } -pub struct Client { +pub(crate) struct Client { pub rpc_client: Arc, pub port: u16, pub server_url: String, @@ -161,7 +161,7 @@ impl Client { ) } - pub fn new() -> Result> { + pub(crate) fn new() -> Result> { let matches = Self::get_clap_app( crate_name!(), crate_description!(), @@ -169,7 +169,7 @@ impl Client { ) .get_matches(); - let config = if let Some(config_file) = matches.value_of("config_file") { + let cli_config = if let Some(config_file) = matches.value_of("config_file") { Config::load(config_file).unwrap_or_default() } else { Config::default() @@ -177,19 +177,19 @@ impl Client { let (_, json_rpc_url) = ConfigInput::compute_json_rpc_url_setting( matches.value_of("json_rpc_url").unwrap_or(""), - &config.json_rpc_url, + &cli_config.json_rpc_url, ); let (_, websocket_url) = ConfigInput::compute_websocket_url_setting( matches.value_of("websocket_url").unwrap_or(""), - &config.websocket_url, + &cli_config.websocket_url, matches.value_of("json_rpc_url").unwrap_or(""), - &config.json_rpc_url, + &cli_config.json_rpc_url, ); let (_, commitment) = ConfigInput::compute_commitment_config( matches.value_of("commitment").unwrap_or(""), - &config.commitment, + &cli_config.commitment, ); let rpc_timeout = value_t_or_exit!(matches, "rpc_timeout", u64); @@ -200,8 +200,8 @@ impl Client { let confirm_transaction_initial_timeout = Duration::from_secs(confirm_transaction_initial_timeout); - let payer_keypair = Self::get_keypair(&matches, &config.keypair_path, "keypair")?; - let authority_keypair = Self::get_keypair(&matches, &config.keypair_path, "authority")?; + let payer_keypair = Self::get_keypair(&matches, &cli_config.keypair_path, "keypair")?; + let authority_keypair = Self::get_keypair(&matches, &cli_config.keypair_path, "authority")?; let port = value_t_or_exit!(matches, "port", u16); diff --git a/cargo-registry/src/main.rs b/cargo-registry/src/main.rs index 4ba61c917969b7..419e8cf434202d 100644 --- a/cargo-registry/src/main.rs +++ b/cargo-registry/src/main.rs @@ -79,23 +79,14 @@ impl CargoRegistryService { _request: &hyper::Request, ) -> hyper::Response { let Some((path, _crate_name, _version)) = Self::get_crate_name_and_version(path) else { - return response_builder::error_response( - hyper::StatusCode::BAD_REQUEST, - "Failed to parse the request.", - ); + return response_builder::error_in_parsing(); }; if path.len() != PATH_PREFIX.len() { - return response_builder::error_response( - hyper::StatusCode::BAD_REQUEST, - "Request length is incorrect", - ); + return response_builder::error_incorrect_length(); } - response_builder::error_response( - hyper::StatusCode::NOT_IMPLEMENTED, - "This command is not implemented yet", - ) + response_builder::error_not_implemented() } fn handle_unyank_request( @@ -103,23 +94,14 @@ impl CargoRegistryService { _request: &hyper::Request, ) -> hyper::Response { let Some((path, _crate_name, _version)) = Self::get_crate_name_and_version(path) else { - return response_builder::error_response( - hyper::StatusCode::BAD_REQUEST, - "Failed to parse the request.", - ); + return response_builder::error_in_parsing(); }; if path.len() != PATH_PREFIX.len() { - return response_builder::error_response( - hyper::StatusCode::BAD_REQUEST, - "Request length is incorrect", - ); + return response_builder::error_incorrect_length(); } - response_builder::error_response( - hyper::StatusCode::NOT_IMPLEMENTED, - "This command is not implemented yet", - ) + response_builder::error_not_implemented() } fn get_crate_name(path: &str) -> Option<(&str, &str)> { @@ -131,23 +113,14 @@ impl CargoRegistryService { _request: &hyper::Request, ) -> hyper::Response { let Some((path, _crate_name)) = Self::get_crate_name(path) else { - return response_builder::error_response( - hyper::StatusCode::BAD_REQUEST, - "Failed to parse the request.", - ); + return response_builder::error_in_parsing(); }; if path.len() != PATH_PREFIX.len() { - return response_builder::error_response( - hyper::StatusCode::BAD_REQUEST, - "Request length is incorrect", - ); + return response_builder::error_incorrect_length(); } - response_builder::error_response( - hyper::StatusCode::NOT_IMPLEMENTED, - "This command is not implemented yet", - ) + response_builder::error_not_implemented() } fn handle_add_owners_request( @@ -155,23 +128,14 @@ impl CargoRegistryService { _request: &hyper::Request, ) -> hyper::Response { let Some((path, _crate_name)) = Self::get_crate_name(path) else { - return response_builder::error_response( - hyper::StatusCode::BAD_REQUEST, - "Failed to parse the request.", - ); + return response_builder::error_in_parsing(); }; if path.len() != PATH_PREFIX.len() { - return response_builder::error_response( - hyper::StatusCode::BAD_REQUEST, - "Request length is incorrect", - ); + return response_builder::error_incorrect_length(); } - response_builder::error_response( - hyper::StatusCode::NOT_IMPLEMENTED, - "This command is not implemented yet", - ) + response_builder::error_not_implemented() } fn handle_delete_owners_request( @@ -179,23 +143,14 @@ impl CargoRegistryService { _request: &hyper::Request, ) -> hyper::Response { let Some((path, _crate_name)) = Self::get_crate_name(path) else { - return response_builder::error_response( - hyper::StatusCode::BAD_REQUEST, - "Failed to parse the request.", - ); + return response_builder::error_in_parsing(); }; if path.len() != PATH_PREFIX.len() { - return response_builder::error_response( - hyper::StatusCode::BAD_REQUEST, - "Request length is incorrect", - ); + return response_builder::error_incorrect_length(); } - response_builder::error_response( - hyper::StatusCode::NOT_IMPLEMENTED, - "This command is not implemented yet", - ) + response_builder::error_not_implemented() } fn handle_get_crates_request( @@ -208,16 +163,10 @@ impl CargoRegistryService { // full path started with PATH_PREFIX. So it's sufficient to check that provided // path is smaller than PATH_PREFIX. if path.len() >= PATH_PREFIX.len() { - return response_builder::error_response( - hyper::StatusCode::BAD_REQUEST, - "Request length is incorrect", - ); + return response_builder::error_incorrect_length(); } - response_builder::error_response( - hyper::StatusCode::NOT_IMPLEMENTED, - "This command is not implemented yet", - ) + response_builder::error_not_implemented() } async fn handler( @@ -255,41 +204,26 @@ impl CargoRegistryService { Method::PUT => match endpoint { "new" => { if path.len() != PATH_PREFIX.len() { - response_builder::error_response( - hyper::StatusCode::BAD_REQUEST, - "Invalid length of the request.", - ) + response_builder::error_incorrect_length() } else { Self::handle_publish_request(request, client.clone(), index.clone()).await } } "unyank" => Self::handle_unyank_request(path, &request), "owners" => Self::handle_add_owners_request(path, &request), - _ => response_builder::error_response( - hyper::StatusCode::METHOD_NOT_ALLOWED, - "Unknown request", - ), + _ => response_builder::error_not_allowed(), }, Method::GET => match endpoint { "crates" => Self::handle_get_crates_request(path, &request), "owners" => Self::handle_get_owners_request(path, &request), - _ => response_builder::error_response( - hyper::StatusCode::METHOD_NOT_ALLOWED, - "Unknown request", - ), + _ => response_builder::error_not_allowed(), }, Method::DELETE => match endpoint { "yank" => Self::handle_yank_request(path, &request), "owners" => Self::handle_delete_owners_request(path, &request), - _ => response_builder::error_response( - hyper::StatusCode::METHOD_NOT_ALLOWED, - "Unknown request", - ), + _ => response_builder::error_not_allowed(), }, - _ => response_builder::error_response( - hyper::StatusCode::METHOD_NOT_ALLOWED, - "Unknown request", - ), + _ => response_builder::error_not_allowed(), }) } } diff --git a/cargo-registry/src/publisher.rs b/cargo-registry/src/publisher.rs index 5940191b46dcaf..ea4c74a7251b67 100644 --- a/cargo-registry/src/publisher.rs +++ b/cargo-registry/src/publisher.rs @@ -1,6 +1,6 @@ use { crate::{ - client::{Client, ClientConfig}, + client::{Client, RPCCommandConfig}, sparse_index::{IndexEntry, RegistryIndex}, }, flate2::read::GzDecoder, @@ -129,7 +129,7 @@ impl Publisher { let tempdir = tempdir()?; archive.unpack(tempdir.path())?; - let config = ClientConfig::new(client.as_ref()); + let command_config = RPCCommandConfig::new(client.as_ref()); let lib_name = Self::program_library_name(&tempdir, &meta_data)?; @@ -152,7 +152,7 @@ impl Publisher { process_deploy_program( client.rpc_client.clone(), - &config.0, + &command_config.0, &program_data, program_data.len() as u32, &program_keypair.pubkey(), diff --git a/cargo-registry/src/response_builder.rs b/cargo-registry/src/response_builder.rs index 8a56e298f713ae..a8da2d9d6fdbba 100644 --- a/cargo-registry/src/response_builder.rs +++ b/cargo-registry/src/response_builder.rs @@ -1,4 +1,4 @@ -use {crate::response_builder, log::error}; +use log::error; pub(crate) fn error_response(status: hyper::StatusCode, msg: &str) -> hyper::Response { error!("{}", msg); @@ -23,5 +23,30 @@ pub(crate) fn success_response_str(value: &str) -> hyper::Response } pub(crate) fn success_response() -> hyper::Response { - response_builder::success_response_str("") + success_response_str("") +} + +pub(crate) fn error_not_allowed() -> hyper::Response { + error_response(hyper::StatusCode::METHOD_NOT_ALLOWED, "Unknown request") +} + +pub(crate) fn error_not_implemented() -> hyper::Response { + error_response( + hyper::StatusCode::NOT_IMPLEMENTED, + "This command is not implemented yet", + ) +} + +pub(crate) fn error_in_parsing() -> hyper::Response { + error_response( + hyper::StatusCode::BAD_REQUEST, + "Failed to parse the request", + ) +} + +pub(crate) fn error_incorrect_length() -> hyper::Response { + error_response( + hyper::StatusCode::BAD_REQUEST, + "Request length is incorrect", + ) } From 8bd0e4cd959f21f2423148aa51378b7b848cc719 Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 16 Oct 2023 11:21:33 -0500 Subject: [PATCH 345/407] Change getHealth to compare optimistically confirmed slots (#33651) The current getHealth mechanism checks a local accounts hash slot vs. those of other nodes as specified by --known-validator. This is a very coarse comparison given that the default for this value is 100 slots. More so, any nodes using a value larger than the default (ie --incremental-snapshot-interval 500) will likely see getHealth return status behind at some point. Change the underlying mechanism of how health is computed. Instead of using the accounts hash slots published in gossip, use the latest optimistically confirmed slot from the cluster. Even when a node is behind, it is able to observe cluster optimistically confirmed by slots by viewing votes published in gossip. Thus, the latest cluster optimistically confirmed slot can be compared against the latest optimistically confirmed bank from replay to determine health. This new comparison is much more granular, and not needing to depend on individual known validators is also a plus. --- core/src/validator.rs | 4 +- docs/src/api/http.md | 10 +- docs/src/api/methods/_getHealth.mdx | 9 +- rpc/src/rpc.rs | 47 ++++-- rpc/src/rpc_health.rs | 231 ++++++++++++++++++---------- rpc/src/rpc_service.rs | 141 +++-------------- validator/src/cli.rs | 8 +- validator/src/main.rs | 1 + 8 files changed, 213 insertions(+), 238 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index 011d63924328c0..3075fb5261a180 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -943,7 +943,8 @@ impl Validator { // (by both replay stage and banking stage) let prioritization_fee_cache = Arc::new(PrioritizationFeeCache::default()); - let rpc_override_health_check = Arc::new(AtomicBool::new(false)); + let rpc_override_health_check = + Arc::new(AtomicBool::new(config.rpc_config.disable_health_check)); let ( json_rpc_service, pubsub_service, @@ -980,7 +981,6 @@ impl Validator { ledger_path, config.validator_exit.clone(), exit.clone(), - config.known_validators.clone(), rpc_override_health_check.clone(), startup_verification_complete, optimistically_confirmed_bank.clone(), diff --git a/docs/src/api/http.md b/docs/src/api/http.md index 63163fbfa4ccdf..9edf2c792cd9c5 100644 --- a/docs/src/api/http.md +++ b/docs/src/api/http.md @@ -154,13 +154,11 @@ Some methods support providing a `filters` object to enable pre-filtering the da Although not a JSON RPC API, a `GET /health` at the RPC HTTP Endpoint provides a health-check mechanism for use by load balancers or other network infrastructure. This request will always return a HTTP 200 OK response with a body of -"ok", "behind" or "unknown" based on the following conditions: +"ok", "behind" or "unknown": -1. If one or more `--known-validator` arguments are provided to `solana-validator` - "ok" is returned - when the node has within `HEALTH_CHECK_SLOT_DISTANCE` slots of the highest - known validator, otherwise "behind". "unknown" is returned when no slot - information from known validators is not yet available. -2. "ok" is always returned if no known validators are provided. +- `ok`: The node is within `HEALTH_CHECK_SLOT_DISTANCE` slots from the latest cluster confirmed slot +- `behind { distance }`: The node is behind `distance` slots from the latest cluster confirmed slot where `distance > HEALTH_CHECK_SLOT_DISTANCE` +- `unknown`: The node is unable to determine where it stands in relation to the cluster ## JSON RPC API Reference diff --git a/docs/src/api/methods/_getHealth.mdx b/docs/src/api/methods/_getHealth.mdx index 482a2ff62a076e..ceb30cc40fa04c 100644 --- a/docs/src/api/methods/_getHealth.mdx +++ b/docs/src/api/methods/_getHealth.mdx @@ -12,13 +12,8 @@ import { ## getHealth -Returns the current health of the node. - -:::caution -If one or more `--known-validator` arguments are provided to `solana-validator` - "ok" is returned -when the node has within `HEALTH_CHECK_SLOT_DISTANCE` slots of the highest known validator, -otherwise an error is returned. "ok" is always returned if no known validators are provided. -::: +Returns the current health of the node. A healthy node is one that is within +`HEALTH_CHECK_SLOT_DISTANCE` slots of the latest cluster confirmed slot. diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 7ff2ffa42b5f3e..709c186889995e 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -149,12 +149,15 @@ pub struct JsonRpcConfig { pub obsolete_v1_7_api: bool, pub rpc_scan_and_fix_roots: bool, pub max_request_body_size: Option, + /// Disable the health check, used for tests and TestValidator + pub disable_health_check: bool, } impl JsonRpcConfig { pub fn default_for_test() -> Self { Self { full_api: true, + disable_health_check: true, ..Self::default() } } @@ -374,6 +377,10 @@ impl JsonRpcRequestProcessor { ); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); + let startup_verification_complete = Arc::clone(bank.get_startup_verification_complete()); + let slot = bank.slot(); + let optimistically_confirmed_bank = + Arc::new(RwLock::new(OptimisticallyConfirmedBank { bank })); Self { config: JsonRpcConfig::default(), snapshot_config: None, @@ -381,24 +388,22 @@ impl JsonRpcRequestProcessor { block_commitment_cache: Arc::new(RwLock::new(BlockCommitmentCache::new( HashMap::new(), 0, - CommitmentSlots::new_from_slot(bank.slot()), + CommitmentSlots::new_from_slot(slot), ))), - blockstore, + blockstore: Arc::clone(&blockstore), validator_exit: create_validator_exit(exit.clone()), health: Arc::new(RpcHealth::new( - cluster_info.clone(), - None, + Arc::clone(&optimistically_confirmed_bank), + blockstore, 0, exit, - Arc::clone(bank.get_startup_verification_complete()), + startup_verification_complete, )), cluster_info, genesis_hash, transaction_sender: Arc::new(Mutex::new(sender)), bigtable_ledger_storage: None, - optimistically_confirmed_bank: Arc::new(RwLock::new(OptimisticallyConfirmedBank { - bank, - })), + optimistically_confirmed_bank, largest_accounts_cache: Arc::new(RwLock::new(LargestAccountsCache::new(30))), max_slots: Arc::new(MaxSlots::default()), leader_schedule_cache, @@ -4787,6 +4792,8 @@ pub mod tests { // note that this means that slot 0 will always be considered complete let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(0)); let max_complete_rewards_slot = Arc::new(AtomicU64::new(0)); + let optimistically_confirmed_bank = + OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); let meta = JsonRpcRequestProcessor::new( config, @@ -4795,11 +4802,11 @@ pub mod tests { block_commitment_cache.clone(), blockstore.clone(), validator_exit, - RpcHealth::stub(), + RpcHealth::stub(optimistically_confirmed_bank.clone(), blockstore.clone()), cluster_info, Hash::default(), None, - OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), + optimistically_confirmed_bank, Arc::new(RwLock::new(LargestAccountsCache::new(30))), max_slots.clone(), Arc::new(LeaderScheduleCache::new_from_bank(&bank)), @@ -6398,7 +6405,11 @@ pub mod tests { let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let (bank_forks, mint_keypair, ..) = new_bank_forks(); - let health = RpcHealth::stub(); + let optimistically_confirmed_bank = + OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let health = RpcHealth::stub(optimistically_confirmed_bank.clone(), blockstore.clone()); + // Mark the node as healthy to start + health.stub_set_health_status(Some(RpcHealthStatus::Ok)); // Freeze bank 0 to prevent a panic in `run_transaction_simulation()` bank_forks.write().unwrap().get(0).unwrap().freeze(); @@ -6429,7 +6440,7 @@ pub mod tests { cluster_info, Hash::default(), None, - OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), + optimistically_confirmed_bank, Arc::new(RwLock::new(LargestAccountsCache::new(30))), Arc::new(MaxSlots::default()), Arc::new(LeaderScheduleCache::default()), @@ -6690,18 +6701,20 @@ pub mod tests { .my_contact_info() .tpu(connection_cache.protocol()) .unwrap(); + let optimistically_confirmed_bank = + OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); let (request_processor, receiver) = JsonRpcRequestProcessor::new( JsonRpcConfig::default(), None, bank_forks.clone(), block_commitment_cache, - blockstore, + blockstore.clone(), validator_exit, - RpcHealth::stub(), + RpcHealth::stub(optimistically_confirmed_bank.clone(), blockstore), cluster_info, Hash::default(), None, - OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), + optimistically_confirmed_bank, Arc::new(RwLock::new(LargestAccountsCache::new(30))), Arc::new(MaxSlots::default()), Arc::new(LeaderScheduleCache::default()), @@ -8327,9 +8340,9 @@ pub mod tests { None, bank_forks.clone(), block_commitment_cache, - blockstore, + blockstore.clone(), validator_exit, - RpcHealth::stub(), + RpcHealth::stub(optimistically_confirmed_bank.clone(), blockstore.clone()), cluster_info, Hash::default(), None, diff --git a/rpc/src/rpc_health.rs b/rpc/src/rpc_health.rs index 022b2e03d1eae2..8a6347cc7a6bd0 100644 --- a/rpc/src/rpc_health.rs +++ b/rpc/src/rpc_health.rs @@ -1,12 +1,10 @@ use { - solana_gossip::cluster_info::ClusterInfo, - solana_sdk::{clock::Slot, pubkey::Pubkey}, - std::{ - collections::HashSet, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, + crate::optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, + solana_ledger::blockstore::Blockstore, + solana_sdk::clock::Slot, + std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, RwLock, }, }; @@ -18,8 +16,8 @@ pub enum RpcHealthStatus { } pub struct RpcHealth { - cluster_info: Arc, - known_validators: Option>, + optimistically_confirmed_bank: Arc>, + blockstore: Arc, health_check_slot_distance: u64, override_health_check: Arc, startup_verification_complete: Arc, @@ -29,15 +27,15 @@ pub struct RpcHealth { impl RpcHealth { pub fn new( - cluster_info: Arc, - known_validators: Option>, + optimistically_confirmed_bank: Arc>, + blockstore: Arc, health_check_slot_distance: u64, override_health_check: Arc, startup_verification_complete: Arc, ) -> Self { Self { - cluster_info, - known_validators, + optimistically_confirmed_bank, + blockstore, health_check_slot_distance, override_health_check, startup_verification_complete, @@ -54,84 +52,74 @@ impl RpcHealth { } } + if self.override_health_check.load(Ordering::Relaxed) { + return RpcHealthStatus::Ok; + } if !self.startup_verification_complete.load(Ordering::Acquire) { return RpcHealthStatus::Unknown; } - if self.override_health_check.load(Ordering::Relaxed) { - RpcHealthStatus::Ok - } else if let Some(known_validators) = &self.known_validators { - match ( - self.cluster_info - .get_accounts_hash_for_node(&self.cluster_info.id(), |hashes| { - hashes - .iter() - .max_by(|a, b| a.0.cmp(&b.0)) - .map(|slot_hash| slot_hash.0) - }) - .flatten(), - known_validators - .iter() - .filter_map(|known_validator| { - self.cluster_info - .get_accounts_hash_for_node(known_validator, |hashes| { - hashes - .iter() - .max_by(|a, b| a.0.cmp(&b.0)) - .map(|slot_hash| slot_hash.0) - }) - .flatten() - }) - .max(), - ) { - ( - Some(latest_account_hash_slot), - Some(latest_known_validator_account_hash_slot), - ) => { - // The validator is considered healthy if its latest account hash slot is within - // `health_check_slot_distance` of the latest known validator's account hash slot - if latest_account_hash_slot - > latest_known_validator_account_hash_slot - .saturating_sub(self.health_check_slot_distance) - { - RpcHealthStatus::Ok - } else { - let num_slots = latest_known_validator_account_hash_slot - .saturating_sub(latest_account_hash_slot); - warn!( - "health check: behind by {} slots: me={}, latest known_validator={}", - num_slots, - latest_account_hash_slot, - latest_known_validator_account_hash_slot - ); - RpcHealthStatus::Behind { num_slots } - } - } - (latest_account_hash_slot, latest_known_validator_account_hash_slot) => { - if latest_account_hash_slot.is_none() { - warn!("health check: latest_account_hash_slot not available"); - } - if latest_known_validator_account_hash_slot.is_none() { - warn!( - "health check: latest_known_validator_account_hash_slot not available" - ); - } - RpcHealthStatus::Unknown - } + // A node can observe votes by both replaying blocks and observing gossip. + // + // ClusterInfoVoteListener receives votes from both of these sources and then records + // optimistically confirmed slots in the Blockstore via OptimisticConfirmationVerifier. + // Thus, it is possible for a node to record an optimistically confirmed slot before the + // node has replayed and validated the slot for itself. + // + // OptimisticallyConfirmedBank holds a bank for the latest optimistically confirmed slot + // that the node has replayed. It is true that the node will have replayed that slot by + // virtue of having a bank available. Observing that the cluster has optimistically + // confirmed a slot through gossip is not enough to reconstruct the bank. + // + // So, comparing the latest optimistic slot from the Blockstore vs. the slot from the + // OptimisticallyConfirmedBank bank allows a node to see where it stands in relation to the + // tip of the cluster. + let my_latest_optimistically_confirmed_slot = self + .optimistically_confirmed_bank + .read() + .unwrap() + .bank + .slot(); + + let mut optimistic_slot_infos = match self.blockstore.get_latest_optimistic_slots(1) { + Ok(infos) => infos, + Err(err) => { + warn!("health check: blockstore error: {err}"); + return RpcHealthStatus::Unknown; } - } else { - // No known validator point of reference available, so this validator is healthy - // because it's running + }; + let Some((cluster_latest_optimistically_confirmed_slot, _, _)) = + optimistic_slot_infos.pop() + else { + warn!("health check: blockstore does not contain any optimistically confirmed slots"); + return RpcHealthStatus::Unknown; + }; + + if my_latest_optimistically_confirmed_slot + >= cluster_latest_optimistically_confirmed_slot + .saturating_sub(self.health_check_slot_distance) + { RpcHealthStatus::Ok + } else { + let num_slots = cluster_latest_optimistically_confirmed_slot + .saturating_sub(my_latest_optimistically_confirmed_slot); + warn!( + "health check: behind by {num_slots} \ + slots: me={my_latest_optimistically_confirmed_slot}, \ + latest cluster={cluster_latest_optimistically_confirmed_slot}", + ); + RpcHealthStatus::Behind { num_slots } } } #[cfg(test)] - pub(crate) fn stub() -> Arc { - use crate::rpc::tests::new_test_cluster_info; + pub(crate) fn stub( + optimistically_confirmed_bank: Arc>, + blockstore: Arc, + ) -> Arc { Arc::new(Self::new( - Arc::new(new_test_cluster_info()), - None, + optimistically_confirmed_bank, + blockstore, 42, Arc::new(AtomicBool::new(false)), Arc::new(AtomicBool::new(true)), @@ -143,3 +131,84 @@ impl RpcHealth { *self.stub_health_status.write().unwrap() = stub_health_status; } } + +#[cfg(test)] +pub mod tests { + use { + super::*, + solana_ledger::{ + genesis_utils::{create_genesis_config, GenesisConfigInfo}, + get_tmp_ledger_path_auto_delete, + }, + solana_runtime::{bank::Bank, bank_forks::BankForks}, + solana_sdk::{clock::UnixTimestamp, hash::Hash, pubkey::Pubkey}, + }; + + #[test] + fn test_get_health() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100); + let bank = Bank::new_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let optimistically_confirmed_bank = + OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let bank0 = bank_forks.read().unwrap().root_bank(); + assert!(bank0.slot() == 0); + + let health_check_slot_distance = 10; + let override_health_check = Arc::new(AtomicBool::new(true)); + let startup_verification_complete = Arc::clone(bank0.get_startup_verification_complete()); + let health = RpcHealth::new( + optimistically_confirmed_bank.clone(), + blockstore.clone(), + health_check_slot_distance, + override_health_check.clone(), + startup_verification_complete, + ); + + // Override health check set to true - status is ok + assert_eq!(health.check(), RpcHealthStatus::Ok); + + // Remove the override - status now unknown with incomplete startup verification + override_health_check.store(false, Ordering::Relaxed); + assert_eq!(health.check(), RpcHealthStatus::Unknown); + + // Mark startup verification complete - status still unknown as no slots have been + // optimistically confirmed yet + bank0.set_startup_verification_complete(); + assert_eq!(health.check(), RpcHealthStatus::Unknown); + + // Mark slot 15 as being optimistically confirmed in the Blockstore, this could + // happen if the cluster confirmed the slot and this node became aware through gossip, + // but this node has not yet replayed slot 15. The local view of the latest optimistic + // slot is still slot 0 so status will be behind + blockstore + .insert_optimistic_slot(15, &Hash::default(), UnixTimestamp::default()) + .unwrap(); + assert_eq!(health.check(), RpcHealthStatus::Behind { num_slots: 15 }); + + // Simulate this node observing slot 4 as optimistically confirmed - status still behind + let bank4 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 4)); + optimistically_confirmed_bank.write().unwrap().bank = bank4.clone(); + assert_eq!(health.check(), RpcHealthStatus::Behind { num_slots: 11 }); + + // Simulate this node observing slot 5 as optimistically confirmed - status now ok + // as distance is <= health_check_slot_distance + let bank5 = Arc::new(Bank::new_from_parent(bank4, &Pubkey::default(), 5)); + optimistically_confirmed_bank.write().unwrap().bank = bank5.clone(); + assert_eq!(health.check(), RpcHealthStatus::Ok); + + // Node now up with tip of cluster + let bank15 = Arc::new(Bank::new_from_parent(bank5, &Pubkey::default(), 15)); + optimistically_confirmed_bank.write().unwrap().bank = bank15.clone(); + assert_eq!(health.check(), RpcHealthStatus::Ok); + + // Node "beyond" tip of cluster - this technically isn't possible but could be + // observed locally due to a race between updates to Blockstore and + // OptimisticallyConfirmedBank. Either way, not a problem and status is ok. + let bank16 = Arc::new(Bank::new_from_parent(bank15, &Pubkey::default(), 16)); + optimistically_confirmed_bank.write().unwrap().bank = bank16.clone(); + assert_eq!(health.check(), RpcHealthStatus::Ok); + } +} diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index c38e3b7444b6a4..92822b342eb69d 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -37,12 +37,11 @@ use { }, solana_sdk::{ exit::Exit, genesis_config::DEFAULT_GENESIS_DOWNLOAD_PATH, hash::Hash, - native_token::lamports_to_sol, pubkey::Pubkey, + native_token::lamports_to_sol, }, solana_send_transaction_service::send_transaction_service::{self, SendTransactionService}, solana_storage_bigtable::CredentialType, std::{ - collections::HashSet, net::SocketAddr, path::{Path, PathBuf}, sync::{ @@ -350,7 +349,6 @@ impl JsonRpcService { ledger_path: &Path, validator_exit: Arc>, exit: Arc, - known_validators: Option>, override_health_check: Arc, startup_verification_complete: Arc, optimistically_confirmed_bank: Arc>, @@ -368,8 +366,8 @@ impl JsonRpcService { let rpc_niceness_adj = config.rpc_niceness_adj; let health = Arc::new(RpcHealth::new( - cluster_info.clone(), - known_validators, + Arc::clone(&optimistically_confirmed_bank), + Arc::clone(&blockstore), config.health_check_slot_distance, override_health_check, startup_verification_complete, @@ -586,10 +584,6 @@ mod tests { use { super::*, crate::rpc::{create_validator_exit, tests::new_test_cluster_info}, - solana_gossip::{ - crds::GossipRoute, - crds_value::{AccountsHashes, CrdsData, CrdsValue}, - }, solana_ledger::{ genesis_utils::{create_genesis_config, GenesisConfigInfo}, get_tmp_ledger_path, @@ -643,7 +637,6 @@ mod tests { &PathBuf::from("farf"), validator_exit, exit, - None, Arc::new(AtomicBool::new(false)), Arc::new(AtomicBool::new(true)), optimistically_confirmed_bank, @@ -726,18 +719,25 @@ mod tests { #[test] fn test_is_file_get_path() { + let ledger_path = get_tmp_ledger_path!(); + let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); + let bank_forks = create_bank_forks(); + let optimistically_confirmed_bank = + OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let health = RpcHealth::stub(optimistically_confirmed_bank, blockstore); + let bank_forks = create_bank_forks(); let rrm = RpcRequestMiddleware::new( - PathBuf::from("/"), + ledger_path.clone(), None, bank_forks.clone(), - RpcHealth::stub(), + health.clone(), ); let rrm_with_snapshot_config = RpcRequestMiddleware::new( - PathBuf::from("/"), + ledger_path.clone(), Some(SnapshotConfig::default()), bank_forks, - RpcHealth::stub(), + health, ); assert!(rrm.is_file_get_path(DEFAULT_GENESIS_DOWNLOAD_PATH)); @@ -830,14 +830,17 @@ mod tests { let runtime = Runtime::new().unwrap(); let ledger_path = get_tmp_ledger_path!(); - std::fs::create_dir(&ledger_path).unwrap(); - + let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); let genesis_path = ledger_path.join(DEFAULT_GENESIS_ARCHIVE); + let bank_forks = create_bank_forks(); + let optimistically_confirmed_bank = + OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let rrm = RpcRequestMiddleware::new( ledger_path.clone(), None, - create_bank_forks(), - RpcHealth::stub(), + bank_forks, + RpcHealth::stub(optimistically_confirmed_bank, blockstore), ); // File does not exist => request should fail. @@ -885,106 +888,4 @@ mod tests { } } } - - #[test] - fn test_health_check_with_no_known_validators() { - let rm = RpcRequestMiddleware::new( - PathBuf::from("/"), - None, - create_bank_forks(), - RpcHealth::stub(), - ); - assert_eq!(rm.health_check(), "ok"); - } - - #[test] - fn test_health_check_with_known_validators() { - let cluster_info = Arc::new(new_test_cluster_info()); - let health_check_slot_distance = 123; - let override_health_check = Arc::new(AtomicBool::new(false)); - let startup_verification_complete = Arc::new(AtomicBool::new(true)); - let known_validators = vec![ - solana_sdk::pubkey::new_rand(), - solana_sdk::pubkey::new_rand(), - solana_sdk::pubkey::new_rand(), - ]; - - let health = Arc::new(RpcHealth::new( - cluster_info.clone(), - Some(known_validators.clone().into_iter().collect()), - health_check_slot_distance, - override_health_check.clone(), - startup_verification_complete, - )); - - let rm = RpcRequestMiddleware::new(PathBuf::from("/"), None, create_bank_forks(), health); - - // No account hashes for this node or any known validators - assert_eq!(rm.health_check(), "unknown"); - - // No account hashes for any known validators - cluster_info.push_accounts_hashes(vec![(1000, Hash::default()), (900, Hash::default())]); - cluster_info.flush_push_queue(); - assert_eq!(rm.health_check(), "unknown"); - - // Override health check - override_health_check.store(true, Ordering::Relaxed); - assert_eq!(rm.health_check(), "ok"); - override_health_check.store(false, Ordering::Relaxed); - - // This node is ahead of the known validators - cluster_info - .gossip - .crds - .write() - .unwrap() - .insert( - CrdsValue::new_unsigned(CrdsData::AccountsHashes(AccountsHashes::new( - known_validators[0], - vec![ - (1, Hash::default()), - (1001, Hash::default()), - (2, Hash::default()), - ], - ))), - 1, - GossipRoute::LocalMessage, - ) - .unwrap(); - assert_eq!(rm.health_check(), "ok"); - - // Node is slightly behind the known validators - cluster_info - .gossip - .crds - .write() - .unwrap() - .insert( - CrdsValue::new_unsigned(CrdsData::AccountsHashes(AccountsHashes::new( - known_validators[1], - vec![(1000 + health_check_slot_distance - 1, Hash::default())], - ))), - 1, - GossipRoute::LocalMessage, - ) - .unwrap(); - assert_eq!(rm.health_check(), "ok"); - - // Node is far behind the known validators - cluster_info - .gossip - .crds - .write() - .unwrap() - .insert( - CrdsValue::new_unsigned(CrdsData::AccountsHashes(AccountsHashes::new( - known_validators[2], - vec![(1000 + health_check_slot_distance, Hash::default())], - ))), - 1, - GossipRoute::LocalMessage, - ) - .unwrap(); - assert_eq!(rm.health_check(), "behind"); - } } diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 0dcafe309eb5d9..bd82c0a4ac2727 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -258,11 +258,9 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("SLOT_DISTANCE") .takes_value(true) .default_value(&default_args.health_check_slot_distance) - .help("If --known-validators are specified, report this validator healthy \ - if its latest account hash is no further behind than this number of \ - slots from the latest known validator account hash. \ - If no --known-validators are specified, the validator will always \ - report itself to be healthy") + .help("Report this validator healthy if its latest optimistically confirmed slot \ + that has been replayed is no further behind than this number of slots from \ + the cluster latest optimistically confirmed slot") ) .arg( Arg::with_name("rpc_faucet_addr") diff --git a/validator/src/main.rs b/validator/src/main.rs index abfe6e2009bc45..38bb9813ab3a70 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1299,6 +1299,7 @@ pub fn main() { "health_check_slot_distance", u64 ), + disable_health_check: false, rpc_threads: value_t_or_exit!(matches, "rpc_threads", usize), rpc_niceness_adj: value_t_or_exit!(matches, "rpc_niceness_adj", i8), account_indexes: account_indexes.clone(), From d948e5bf69a0b8093b5e509273d6bb9ecb5acbaf Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 16 Oct 2023 10:06:20 -0700 Subject: [PATCH 346/407] ancient shrink on its own cadence (#33712) --- accounts-db/src/accounts_db.rs | 7 ++----- runtime/src/accounts_background_service.rs | 6 +++++- runtime/src/bank.rs | 7 +++++++ 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index b4162eeecabaa3..6ccd88349d533d 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4403,11 +4403,12 @@ impl AccountsDb { /// get a sorted list of slots older than an epoch /// squash those slots into ancient append vecs - fn shrink_ancient_slots(&self, oldest_non_ancient_slot: Slot) { + pub fn shrink_ancient_slots(&self, epoch_schedule: &EpochSchedule) { if self.ancient_append_vec_offset.is_none() { return; } + let oldest_non_ancient_slot = self.get_oldest_non_ancient_slot(epoch_schedule); let can_randomly_shrink = true; let sorted_slots = self.get_sorted_potential_ancient_slots(oldest_non_ancient_slot); if self.create_ancient_storage == CreateAncientStorage::Append { @@ -4752,10 +4753,6 @@ impl AccountsDb { pub fn shrink_candidate_slots(&self, epoch_schedule: &EpochSchedule) -> usize { let oldest_non_ancient_slot = self.get_oldest_non_ancient_slot(epoch_schedule); - if !self.shrink_candidate_slots.lock().unwrap().is_empty() { - // this can affect 'shrink_candidate_slots', so don't 'take' it until after this completes - self.shrink_ancient_slots(oldest_non_ancient_slot); - } let shrink_candidates_slots = std::mem::take(&mut *self.shrink_candidate_slots.lock().unwrap()); diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 627ccbf76adaa5..b826f8eddc5724 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -21,7 +21,7 @@ use { solana_accounts_db::{ accounts_db::CalcAccountsHashDataSource, accounts_hash::CalcAccountsHashConfig, }, - solana_measure::measure::Measure, + solana_measure::{measure::Measure, measure_us}, solana_sdk::clock::{BankId, Slot}, stats::StatsManager, std::{ @@ -383,6 +383,8 @@ impl SnapshotRequestHandler { snapshot_root_bank.clean_accounts(*last_full_snapshot_slot); clean_time.stop(); + let (_, shrink_ancient_time_us) = measure_us!(snapshot_root_bank.shrink_ancient_slots()); + let mut shrink_time = Measure::start("shrink_time"); snapshot_root_bank.shrink_candidate_slots(); shrink_time.stop(); @@ -464,6 +466,7 @@ impl SnapshotRequestHandler { ("snapshot_time", snapshot_time.as_us(), i64), ("total_us", total_time.as_us(), i64), ("non_snapshot_time_us", non_snapshot_time_us, i64), + ("shrink_ancient_time_us", shrink_ancient_time_us, i64), ); Ok(snapshot_root_bank.block_height()) } @@ -705,6 +708,7 @@ impl AccountsBackgroundService { bank.force_flush_accounts_cache(); bank.clean_accounts(last_full_snapshot_slot); last_cleaned_block_height = bank.block_height(); + bank.shrink_ancient_slots(); } bank.shrink_candidate_slots(); } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 5227db287c9f10..1eb555358151fd 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -7952,6 +7952,13 @@ impl Bank { .shrink_candidate_slots(self.epoch_schedule()) } + pub(crate) fn shrink_ancient_slots(&self) { + self.rc + .accounts + .accounts_db + .shrink_ancient_slots(self.epoch_schedule()) + } + pub fn no_overflow_rent_distribution_enabled(&self) -> bool { self.feature_set .is_active(&feature_set::no_overflow_rent_distribution::id()) From 69495f4c13e025d8dc473438b31cc0f1788dc352 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 16 Oct 2023 12:47:07 -0700 Subject: [PATCH 347/407] Fix non-determinism in account_hash_ignore_slot on genesis (#33692) --- runtime/src/bank.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 1eb555358151fd..852ba85e353a2d 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3837,6 +3837,16 @@ impl Bank { // Bootstrap validator collects fees until `new_from_parent` is called. self.fee_rate_governor = genesis_config.fee_rate_governor.clone(); + // Make sure to activate the account_hash_ignore_slot feature + // before calculating any account hashes. + if genesis_config + .accounts + .iter() + .any(|(pubkey, _)| pubkey == &feature_set::account_hash_ignore_slot::id()) + { + self.activate_feature(&feature_set::account_hash_ignore_slot::id()); + } + for (pubkey, account) in genesis_config.accounts.iter() { assert!( self.get_account(pubkey).is_none(), From 8a20e7f8deead592dd783d6a0b8fef3dc8b45ddf Mon Sep 17 00:00:00 2001 From: Pierre Date: Tue, 17 Oct 2023 08:53:03 +1100 Subject: [PATCH 348/407] sdk: fix broken C examples (#33701) fix C broken example --- programs/sbf/c/src/move_funds/move_funds.c | 2 +- sdk/bpf/c/README.md | 14 ++++++-------- sdk/sbf/c/README.md | 14 ++++++-------- 3 files changed, 13 insertions(+), 17 deletions(-) diff --git a/programs/sbf/c/src/move_funds/move_funds.c b/programs/sbf/c/src/move_funds/move_funds.c index cab323af560f60..9d26a025026d04 100644 --- a/programs/sbf/c/src/move_funds/move_funds.c +++ b/programs/sbf/c/src/move_funds/move_funds.c @@ -5,7 +5,7 @@ #include /** - * Number of SolKeyedAccount expected. The program should bail if an + * Number of SolAccountInfo expected. The program should bail if an * unexpected number of accounts are passed to the program's entrypoint */ #define NUM_KA 3 diff --git a/sdk/bpf/c/README.md b/sdk/bpf/c/README.md index 8158d80c6779ac..f2c3a1022b0125 100644 --- a/sdk/bpf/c/README.md +++ b/sdk/bpf/c/README.md @@ -9,16 +9,14 @@ and `src/program.c` containing: ```c #include -bool entrypoint(const uint8_t *input) { - SolKeyedAccount ka[1]; - uint8_t *data; - uint64_t data_len; +extern uint64_t entrypoint(const uint8_t *input) { + SolAccountInfo ka[1]; + SolParameters params = (SolParameters) { .ka = ka }; - if (!sol_deserialize(buf, ka, SOL_ARRAY_SIZE(ka), NULL, &data, &data_len)) { - return false; + if (!sol_deserialize(input, ¶ms, SOL_ARRAY_SIZE(ka))) { + return ERROR_INVALID_ARGUMENT; } - print_params(1, ka, data, data_len); - return true; + return SUCCESS; } ``` diff --git a/sdk/sbf/c/README.md b/sdk/sbf/c/README.md index ede109f36da9cc..888f0fbe65c3f9 100644 --- a/sdk/sbf/c/README.md +++ b/sdk/sbf/c/README.md @@ -9,16 +9,14 @@ and `src/program.c` containing: ```c #include -bool entrypoint(const uint8_t *input) { - SolKeyedAccount ka[1]; - uint8_t *data; - uint64_t data_len; +extern uint64_t entrypoint(const uint8_t *input) { + SolAccountInfo ka[1]; + SolParameters params = (SolParameters) { .ka = ka }; - if (!sol_deserialize(buf, ka, SOL_ARRAY_SIZE(ka), NULL, &data, &data_len)) { - return false; + if (!sol_deserialize(input, ¶ms, SOL_ARRAY_SIZE(ka))) { + return ERROR_INVALID_ARGUMENT; } - print_params(1, ka, data, data_len); - return true; + return SUCCESS; } ``` From b5b48eeb72a1824b0d48f1cca3ba59b518932153 Mon Sep 17 00:00:00 2001 From: Illia Bobyr Date: Tue, 17 Oct 2023 01:12:15 -0700 Subject: [PATCH 349/407] Cargo.toml: Sort dependency and feature lists (#33670) Keeping these sorted reduces merge conflicts. --- Cargo.toml | 18 +++++++++--------- frozen-abi/Cargo.toml | 2 +- geyser-plugin-manager/Cargo.toml | 1 - programs/sbf/Cargo.toml | 2 +- remote-wallet/Cargo.toml | 6 +++--- sdk/program/Cargo.toml | 4 ++-- 6 files changed, 16 insertions(+), 17 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2ab74f105eadc2..32b3f9df7c9f46 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -133,6 +133,7 @@ license = "Apache-2.0" edition = "2021" [workspace.dependencies] +Inflector = "0.11.4" aes-gcm-siv = "0.10.3" ahash = "0.8.3" anyhow = "1.0.75" @@ -198,8 +199,8 @@ fast-math = "0.1" fd-lock = "3.0.13" flate2 = "1.0.28" fnv = "1.0.7" -fs_extra = "1.3.0" fs-err = "2.9.0" +fs_extra = "1.3.0" futures = "0.3.28" futures-util = "0.3.28" gag = "1.0.0" @@ -219,7 +220,6 @@ im = "15.1.0" index_list = "0.2.7" indexmap = "2.0.2" indicatif = "0.17.7" -Inflector = "0.11.4" itertools = "0.10.5" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"] } js-sys = "0.3.64" @@ -246,10 +246,10 @@ min-max-heap = "1.3.0" modular-bitfield = "0.11.2" nix = "0.26.4" num-bigint = "0.4.4" -num_cpus = "1.16.0" -num_enum = "0.7.0" num-derive = "0.4" num-traits = "0.2" +num_cpus = "1.16.0" +num_enum = "0.7.0" openssl = "0.10" ouroboros = "0.15.6" parking_lot = "0.12" @@ -278,8 +278,8 @@ rayon = "1.8.0" rcgen = "0.10.0" reed-solomon-erasure = "6.0.0" regex = "1.10.1" -rolling-file = "0.2.0" reqwest = { version = "0.11.22", default-features = false } +rolling-file = "0.2.0" rpassword = "7.2" rustc_version = "0.4" rustls = { version = "0.21.7", default-features = false, features = ["quic"] } @@ -290,9 +290,9 @@ serde = "1.0.189" serde_bytes = "0.11.12" serde_derive = "1.0.103" serde_json = "1.0.107" +serde_with = { version = "2.3.3", default-features = false } serde_yaml = "0.9.25" serial_test = "2.0.0" -serde_with = { version = "2.3.3", default-features = false } sha2 = "0.10.8" sha3 = "0.10.4" signal-hook = "0.3.17" @@ -300,7 +300,6 @@ siphasher = "0.3.11" smpl_jwt = "0.7.1" socket2 = "0.5.4" soketto = "0.7" -solana_rbpf = "=0.7.2" solana-account-decoder = { path = "account-decoder", version = "=1.18.0" } solana-accounts-db = { path = "accounts-db", version = "=1.18.0" } solana-address-lookup-table-program = { path = "programs/address-lookup-table", version = "=1.18.0" } @@ -312,7 +311,6 @@ solana-bloom = { path = "bloom", version = "=1.18.0" } solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=1.18.0" } solana-bucket-map = { path = "bucket_map", version = "=1.18.0" } solana-cargo-registry = { path = "cargo-registry", version = "=1.18.0" } -solana-connection-cache = { path = "connection-cache", version = "=1.18.0", default-features = false } solana-clap-utils = { path = "clap-utils", version = "=1.18.0" } solana-clap-v3-utils = { path = "clap-v3-utils", version = "=1.18.0" } solana-cli = { path = "cli", version = "=1.18.0" } @@ -321,6 +319,7 @@ solana-cli-output = { path = "cli-output", version = "=1.18.0" } solana-client = { path = "client", version = "=1.18.0" } solana-compute-budget-program = { path = "programs/compute-budget", version = "=1.18.0" } solana-config-program = { path = "programs/config", version = "=1.18.0" } +solana-connection-cache = { path = "connection-cache", version = "=1.18.0", default-features = false } solana-core = { path = "core", version = "=1.18.0" } solana-cost-model = { path = "cost-model", version = "=1.18.0" } solana-download-utils = { path = "download-utils", version = "=1.18.0" } @@ -333,8 +332,8 @@ solana-genesis-utils = { path = "genesis-utils", version = "=1.18.0" } solana-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=1.18.0" } solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=1.18.0" } solana-gossip = { path = "gossip", version = "=1.18.0" } -solana-loader-v4-program = { path = "programs/loader-v4", version = "=1.18.0" } solana-ledger = { path = "ledger", version = "=1.18.0" } +solana-loader-v4-program = { path = "programs/loader-v4", version = "=1.18.0" } solana-local-cluster = { path = "local-cluster", version = "=1.18.0" } solana-logger = { path = "logger", version = "=1.18.0" } solana-measure = { path = "measure", version = "=1.18.0" } @@ -378,6 +377,7 @@ solana-wen-restart = { path = "wen-restart", version = "=1.18.0" } solana-zk-keygen = { path = "zk-keygen", version = "=1.18.0" } solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=1.18.0" } solana-zk-token-sdk = { path = "zk-token-sdk", version = "=1.18.0" } +solana_rbpf = "=0.7.2" spl-associated-token-account = "=2.2.0" spl-instruction-padding = "0.1" spl-memo = "=4.0.0" diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index 4a4029ceb843d4..25272a04b80b7b 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -30,8 +30,8 @@ memmap2 = { workspace = true } subtle = { workspace = true } [target.'cfg(not(target_os = "solana"))'.dev-dependencies] -solana-logger = { workspace = true } bitflags = { workspace = true } +solana-logger = { workspace = true } [build-dependencies] rustc_version = { workspace = true } diff --git a/geyser-plugin-manager/Cargo.toml b/geyser-plugin-manager/Cargo.toml index 9b4468eddaea9b..d905248150b717 100644 --- a/geyser-plugin-manager/Cargo.toml +++ b/geyser-plugin-manager/Cargo.toml @@ -10,7 +10,6 @@ license = { workspace = true } edition = { workspace = true } [dependencies] - bs58 = { workspace = true } crossbeam-channel = { workspace = true } json5 = { workspace = true } diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 509d750cd41d29..507bf01385e44c 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -25,7 +25,6 @@ rand = "0.8" rustversion = "1.0.14" serde = "1.0.112" serde_json = "1.0.56" -solana_rbpf = "=0.7.2" solana-account-decoder = { path = "../../account-decoder", version = "=1.18.0" } solana-accounts-db = { path = "../../accounts-db", version = "=1.18.0" } solana-bpf-loader-program = { path = "../bpf_loader", version = "=1.18.0" } @@ -49,6 +48,7 @@ solana-sdk = { path = "../../sdk", version = "=1.18.0" } solana-transaction-status = { path = "../../transaction-status", version = "=1.18.0" } solana-validator = { path = "../../validator", version = "=1.18.0" } solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=1.18.0" } +solana_rbpf = "=0.7.2" static_assertions = "1.1.0" thiserror = "1.0" diff --git a/remote-wallet/Cargo.toml b/remote-wallet/Cargo.toml index 21fecb564b21fd..8cea360d7c14ca 100644 --- a/remote-wallet/Cargo.toml +++ b/remote-wallet/Cargo.toml @@ -28,10 +28,10 @@ assert_matches = { workspace = true } [features] default = ["linux-static-hidraw", "hidapi"] -linux-static-libusb = ["hidapi/linux-static-libusb"] -linux-static-hidraw = ["hidapi/linux-static-hidraw"] -linux-shared-libusb = ["hidapi/linux-shared-libusb"] linux-shared-hidraw = ["hidapi/linux-shared-hidraw"] +linux-shared-libusb = ["hidapi/linux-shared-libusb"] +linux-static-hidraw = ["hidapi/linux-static-hidraw"] +linux-static-libusb = ["hidapi/linux-static-libusb"] [[bin]] name = "solana-ledger-udev" diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 8d178ec86f525f..3f73eaf1d76c27 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -51,8 +51,8 @@ ark-bn254 = { workspace = true } ark-ec = { workspace = true } ark-ff = { workspace = true } ark-serialize = { workspace = true } -bitflags = { workspace = true } base64 = { workspace = true, features = ["alloc", "std"] } +bitflags = { workspace = true } curve25519-dalek = { workspace = true, features = ["serde"] } itertools = { workspace = true } libc = { workspace = true, features = ["extra_traits"] } @@ -70,8 +70,8 @@ solana-logger = { workspace = true } [target.'cfg(target_arch = "wasm32")'.dependencies] console_error_panic_hook = { workspace = true } console_log = { workspace = true } -js-sys = { workspace = true } getrandom = { workspace = true, features = ["js", "wasm-bindgen"] } +js-sys = { workspace = true } [target.'cfg(not(target_pointer_width = "64"))'.dependencies] parking_lot = { workspace = true } From 9faae48d1969f440d67e547614f8dc8b711eb9f6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Oct 2023 14:03:27 +0000 Subject: [PATCH 350/407] build(deps): bump @babel/traverse from 7.19.6 to 7.23.2 in /docs (#33726) Bumps [@babel/traverse](https://github.com/babel/babel/tree/HEAD/packages/babel-traverse) from 7.19.6 to 7.23.2. - [Release notes](https://github.com/babel/babel/releases) - [Changelog](https://github.com/babel/babel/blob/main/CHANGELOG.md) - [Commits](https://github.com/babel/babel/commits/v7.23.2/packages/babel-traverse) --- updated-dependencies: - dependency-name: "@babel/traverse" dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs/package-lock.json | 363 +++++++++++++++++++++++++++-------------- 1 file changed, 241 insertions(+), 122 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index 8f577415e69674..976f65828db4bf 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -184,16 +184,81 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz", - "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==", + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", "dependencies": { - "@babel/highlight": "^7.18.6" + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" }, "engines": { "node": ">=6.9.0" } }, + "node_modules/@babel/code-frame/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/code-frame/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "node_modules/@babel/code-frame/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/code-frame/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/@babel/compat-data": { "version": "7.19.4", "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.19.4.tgz", @@ -240,12 +305,13 @@ } }, "node_modules/@babel/generator": { - "version": "7.19.6", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.19.6.tgz", - "integrity": "sha512-oHGRUQeoX1QrKeJIKVe0hwjGqNnVYsM5Nep5zo0uE0m42sLH+Fsd2pStJ5sRM1bNyTUUoz0pe2lTeMJrb/taTA==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz", + "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==", "dependencies": { - "@babel/types": "^7.19.4", + "@babel/types": "^7.23.0", "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" }, "engines": { @@ -373,9 +439,9 @@ } }, "node_modules/@babel/helper-environment-visitor": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz", - "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", "engines": { "node": ">=6.9.0" } @@ -392,23 +458,23 @@ } }, "node_modules/@babel/helper-function-name": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz", - "integrity": "sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", "dependencies": { - "@babel/template": "^7.18.10", - "@babel/types": "^7.19.0" + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-hoist-variables": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", - "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", "dependencies": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" }, "engines": { "node": ">=6.9.0" @@ -528,28 +594,28 @@ } }, "node_modules/@babel/helper-split-export-declaration": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", - "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", "dependencies": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-string-parser": { - "version": "7.19.4", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.19.4.tgz", - "integrity": "sha512-nHtDoQcuqFmwYNYPz3Rah5ph2p8PFeFCsZk9A/48dPc/rGocJ5J3hAAZ7pb76VWX3fZKu+uEr/FhH5jLx7umrw==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", + "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.19.1", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz", - "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", "engines": { "node": ">=6.9.0" } @@ -590,12 +656,12 @@ } }, "node_modules/@babel/highlight": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", - "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", + "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", "dependencies": { - "@babel/helper-validator-identifier": "^7.18.6", - "chalk": "^2.0.0", + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", "js-tokens": "^4.0.0" }, "engines": { @@ -667,9 +733,9 @@ } }, "node_modules/@babel/parser": { - "version": "7.19.6", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.19.6.tgz", - "integrity": "sha512-h1IUp81s2JYJ3mRkdxJgs4UvmSsRvDrx5ICSJbPvtWYv5i1nTBGcBpnog+89rAFMwvvru6E5NUHdBe01UeSzYA==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", + "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==", "bin": { "parser": "bin/babel-parser.js" }, @@ -1908,31 +1974,31 @@ } }, "node_modules/@babel/template": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.18.10.tgz", - "integrity": "sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", "dependencies": { - "@babel/code-frame": "^7.18.6", - "@babel/parser": "^7.18.10", - "@babel/types": "^7.18.10" + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.19.6", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.19.6.tgz", - "integrity": "sha512-6l5HrUCzFM04mfbG09AagtYyR2P0B71B1wN7PfSPiksDPz2k5H9CBC1tcZpz2M8OxbKTPccByoOJ22rUKbpmQQ==", - "dependencies": { - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.19.6", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.19.0", - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/parser": "^7.19.6", - "@babel/types": "^7.19.4", + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", + "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", + "dependencies": { + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.0", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.0", + "@babel/types": "^7.23.0", "debug": "^4.1.0", "globals": "^11.1.0" }, @@ -1941,12 +2007,12 @@ } }, "node_modules/@babel/types": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.20.7.tgz", - "integrity": "sha512-69OnhBxSSgK0OzTJai4kyPDiKTIe3j+ctaHdIGVbRahTLAT7L3R9oeXHC2aVSuGYt3cVnoAMDmOCgJ2yaiLMvg==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", + "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", "dependencies": { - "@babel/helper-string-parser": "^7.19.4", - "@babel/helper-validator-identifier": "^7.19.1", + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", "to-fast-properties": "^2.0.0" }, "engines": { @@ -14439,11 +14505,63 @@ } }, "@babel/code-frame": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz", - "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==", + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", "requires": { - "@babel/highlight": "^7.18.6" + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "requires": { + "color-convert": "^1.9.0" + } + }, + "chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==" + }, + "has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==" + }, + "supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "requires": { + "has-flag": "^3.0.0" + } + } } }, "@babel/compat-data": { @@ -14481,12 +14599,13 @@ } }, "@babel/generator": { - "version": "7.19.6", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.19.6.tgz", - "integrity": "sha512-oHGRUQeoX1QrKeJIKVe0hwjGqNnVYsM5Nep5zo0uE0m42sLH+Fsd2pStJ5sRM1bNyTUUoz0pe2lTeMJrb/taTA==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz", + "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==", "requires": { - "@babel/types": "^7.19.4", + "@babel/types": "^7.23.0", "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" }, "dependencies": { @@ -14581,9 +14700,9 @@ } }, "@babel/helper-environment-visitor": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz", - "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==" + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==" }, "@babel/helper-explode-assignable-expression": { "version": "7.18.6", @@ -14594,20 +14713,20 @@ } }, "@babel/helper-function-name": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz", - "integrity": "sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", "requires": { - "@babel/template": "^7.18.10", - "@babel/types": "^7.19.0" + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" } }, "@babel/helper-hoist-variables": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", - "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", "requires": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" } }, "@babel/helper-member-expression-to-functions": { @@ -14694,22 +14813,22 @@ } }, "@babel/helper-split-export-declaration": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", - "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", "requires": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" } }, "@babel/helper-string-parser": { - "version": "7.19.4", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.19.4.tgz", - "integrity": "sha512-nHtDoQcuqFmwYNYPz3Rah5ph2p8PFeFCsZk9A/48dPc/rGocJ5J3hAAZ7pb76VWX3fZKu+uEr/FhH5jLx7umrw==" + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", + "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==" }, "@babel/helper-validator-identifier": { - "version": "7.19.1", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz", - "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==" + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==" }, "@babel/helper-validator-option": { "version": "7.18.6", @@ -14738,12 +14857,12 @@ } }, "@babel/highlight": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", - "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", + "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", "requires": { - "@babel/helper-validator-identifier": "^7.18.6", - "chalk": "^2.0.0", + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", "js-tokens": "^4.0.0" }, "dependencies": { @@ -14799,9 +14918,9 @@ } }, "@babel/parser": { - "version": "7.19.6", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.19.6.tgz", - "integrity": "sha512-h1IUp81s2JYJ3mRkdxJgs4UvmSsRvDrx5ICSJbPvtWYv5i1nTBGcBpnog+89rAFMwvvru6E5NUHdBe01UeSzYA==" + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", + "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==" }, "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { "version": "7.18.6", @@ -15600,39 +15719,39 @@ } }, "@babel/template": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.18.10.tgz", - "integrity": "sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", "requires": { - "@babel/code-frame": "^7.18.6", - "@babel/parser": "^7.18.10", - "@babel/types": "^7.18.10" + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" } }, "@babel/traverse": { - "version": "7.19.6", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.19.6.tgz", - "integrity": "sha512-6l5HrUCzFM04mfbG09AagtYyR2P0B71B1wN7PfSPiksDPz2k5H9CBC1tcZpz2M8OxbKTPccByoOJ22rUKbpmQQ==", - "requires": { - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.19.6", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.19.0", - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/parser": "^7.19.6", - "@babel/types": "^7.19.4", + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", + "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", + "requires": { + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.0", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.0", + "@babel/types": "^7.23.0", "debug": "^4.1.0", "globals": "^11.1.0" } }, "@babel/types": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.20.7.tgz", - "integrity": "sha512-69OnhBxSSgK0OzTJai4kyPDiKTIe3j+ctaHdIGVbRahTLAT7L3R9oeXHC2aVSuGYt3cVnoAMDmOCgJ2yaiLMvg==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", + "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", "requires": { - "@babel/helper-string-parser": "^7.19.4", - "@babel/helper-validator-identifier": "^7.19.1", + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", "to-fast-properties": "^2.0.0" } }, From 122ec75e4b3e48f740f038bd90e8572eae1e1986 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Oct 2023 14:07:11 +0000 Subject: [PATCH 351/407] build(deps): bump const_format from 0.2.31 to 0.2.32 (#33727) Bumps [const_format](https://github.com/rodrimati1992/const_format_crates) from 0.2.31 to 0.2.32. - [Release notes](https://github.com/rodrimati1992/const_format_crates/releases) - [Changelog](https://github.com/rodrimati1992/const_format_crates/blob/master/Changelog.md) - [Commits](https://github.com/rodrimati1992/const_format_crates/commits) --- updated-dependencies: - dependency-name: const_format dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f41c6deeae40cb..a03acc727878bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1228,18 +1228,18 @@ checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" [[package]] name = "const_format" -version = "0.2.31" +version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c990efc7a285731f9a4378d81aff2f0e85a2c8781a05ef0f8baa8dac54d0ff48" +checksum = "e3a214c7af3d04997541b18d432afaff4c455e79e2029079647e72fc2bd27673" dependencies = [ "const_format_proc_macros", ] [[package]] name = "const_format_proc_macros" -version = "0.2.31" +version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e026b6ce194a874cb9cf32cd5772d1ef9767cc8fcb5765948d74f37a9d8b2bf6" +checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 32b3f9df7c9f46..8661c35f3d1bdf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -173,7 +173,7 @@ clap = "2.33.1" console = "0.15.7" console_error_panic_hook = "0.1.7" console_log = "0.2.2" -const_format = "0.2.31" +const_format = "0.2.32" core_affinity = "0.5.10" criterion = "0.5.1" criterion-stats = "0.3.0" From b241cef813f4f2d72c7465b3f8259da65bf6d81d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Oct 2023 15:21:28 +0000 Subject: [PATCH 352/407] build(deps): bump regex from 1.10.1 to 1.10.2 (#33725) * build(deps): bump regex from 1.10.1 to 1.10.2 Bumps [regex](https://github.com/rust-lang/regex) from 1.10.1 to 1.10.2. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.10.1...1.10.2) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 10 +++++----- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a03acc727878bc..6cf391a085f449 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4372,13 +4372,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.1" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaac441002f822bc9705a681810a4dd2963094b9ca0ddc41cb963a4c189189ea" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ "aho-corasick 1.0.1", "memchr", - "regex-automata 0.4.2", + "regex-automata 0.4.3", "regex-syntax 0.8.2", ] @@ -4390,9 +4390,9 @@ checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" [[package]] name = "regex-automata" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5011c7e263a695dc8ca064cddb722af1be54e517a280b12a5356f98366899e5d" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick 1.0.1", "memchr", diff --git a/Cargo.toml b/Cargo.toml index 8661c35f3d1bdf..ceb0e67c1c1ce6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -277,7 +277,7 @@ raptorq = "1.7.0" rayon = "1.8.0" rcgen = "0.10.0" reed-solomon-erasure = "6.0.0" -regex = "1.10.1" +regex = "1.10.2" reqwest = { version = "0.11.22", default-features = false } rolling-file = "0.2.0" rpassword = "7.2" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 9cc774c2a48c37..d4a60cd04493bc 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3814,9 +3814,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.1" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaac441002f822bc9705a681810a4dd2963094b9ca0ddc41cb963a4c189189ea" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ "aho-corasick 1.0.1", "memchr", @@ -3826,9 +3826,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5011c7e263a695dc8ca064cddb722af1be54e517a280b12a5356f98366899e5d" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick 1.0.1", "memchr", From 5de91636259a78f7f72f20ec8132dd7e9a547b6d Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 17 Oct 2023 08:34:18 -0700 Subject: [PATCH 353/407] add metric for ancient can't move slots (#33713) * add metric for ancient can't move slots * rename * fix erors in replacing text * rename --- accounts-db/src/accounts_db.rs | 14 +++++++++++++- accounts-db/src/ancient_append_vecs.rs | 7 +++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 6ccd88349d533d..fa4cebc298589b 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -2014,10 +2014,10 @@ pub(crate) struct ShrinkStatsSub { pub(crate) store_accounts_timing: StoreAccountsTiming, pub(crate) rewrite_elapsed_us: u64, pub(crate) create_and_insert_store_elapsed_us: u64, + pub(crate) unpackable_slots_count: usize, } impl ShrinkStatsSub { - #[allow(dead_code)] pub(crate) fn accumulate(&mut self, other: &Self) { self.store_accounts_timing .accumulate(&other.store_accounts_timing); @@ -2026,6 +2026,7 @@ impl ShrinkStatsSub { self.create_and_insert_store_elapsed_us, other.create_and_insert_store_elapsed_us ); + saturating_add_assign!(self.unpackable_slots_count, other.unpackable_slots_count); } } @@ -2041,6 +2042,7 @@ pub struct ShrinkStats { handle_reclaims_elapsed: AtomicU64, remove_old_stores_shrink_us: AtomicU64, rewrite_elapsed: AtomicU64, + unpackable_slots_count: AtomicU64, drop_storage_entries_elapsed: AtomicU64, recycle_stores_write_elapsed: AtomicU64, accounts_removed: AtomicUsize, @@ -2219,6 +2221,13 @@ impl ShrinkAncientStats { self.shrink_stats.rewrite_elapsed.swap(0, Ordering::Relaxed) as i64, i64 ), + ( + "unpackable_slots_count", + self.shrink_stats + .unpackable_slots_count + .swap(0, Ordering::Relaxed) as i64, + i64 + ), ( "drop_storage_entries_elapsed", self.shrink_stats @@ -4177,6 +4186,9 @@ impl AccountsDb { shrink_stats .rewrite_elapsed .fetch_add(stats_sub.rewrite_elapsed_us, Ordering::Relaxed); + shrink_stats + .unpackable_slots_count + .fetch_add(stats_sub.unpackable_slots_count as u64, Ordering::Relaxed); } /// get stores for 'slot' diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 770eb0be73a741..f4da2f20d7944e 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -299,6 +299,7 @@ impl AccountsDb { ); let accounts_to_combine = self.calc_accounts_to_combine(&accounts_per_storage); + metrics.unpackable_slots_count += accounts_to_combine.unpackable_slots_count; // pack the accounts with 1 ref let pack = PackedAncientStorage::pack( @@ -385,6 +386,7 @@ impl AccountsDb { store_accounts_timing, rewrite_elapsed_us, create_and_insert_store_elapsed_us, + unpackable_slots_count: 0, }); write_ancient_accounts .shrinks_in_progress @@ -584,6 +586,7 @@ impl AccountsDb { target_slots_sorted.push(info.slot); } } + let unpackable_slots_count = remove.len(); remove.into_iter().rev().for_each(|i| { accounts_to_combine.remove(i); }); @@ -591,6 +594,7 @@ impl AccountsDb { accounts_to_combine, accounts_keep_slots, target_slots_sorted, + unpackable_slots_count, } } @@ -718,6 +722,8 @@ struct AccountsToCombine<'a> { /// Some of these slots will have ancient append vecs created at them to contain everything in 'accounts_to_combine' /// The rest will become dead slots with no accounts in them. target_slots_sorted: Vec, + /// when scanning, this many slots contained accounts that could not be packed because accounts with ref_count > 1 existed. + unpackable_slots_count: usize, } #[derive(Default)] @@ -3135,6 +3141,7 @@ pub mod tests { accounts_keep_slots: HashMap::default(), accounts_to_combine: vec![shrink_collect], target_slots_sorted: Vec::default(), + unpackable_slots_count: 0, }; db.addref_accounts_failed_to_shrink_ancient(accounts_to_combine); db.accounts_index.scan( From 6efc7ec61dbee1608b2fd142ceea34f939ab5bb8 Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Tue, 17 Oct 2023 10:34:12 -0700 Subject: [PATCH 354/407] remove redundant pubkey update record (#33722) * remove redundant pubkey update record * from became unused, so removed from all process_pull_response() calls --- gossip/src/cluster_info.rs | 1 - gossip/src/crds_gossip.rs | 2 -- gossip/src/crds_gossip_pull.rs | 19 ++----------------- gossip/tests/crds_gossip.rs | 1 - 4 files changed, 2 insertions(+), 21 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 8bfe628da8c441..0e72efd8a5cde2 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -2184,7 +2184,6 @@ impl ClusterInfo { { let _st = ScopedTimer::from(&self.stats.process_pull_response); self.gossip.process_pull_responses( - from, filtered_pulls, filtered_pulls_expired_timeout, failed_inserts, diff --git a/gossip/src/crds_gossip.rs b/gossip/src/crds_gossip.rs index 41a0e4c9ab4a7f..015deed1d2a472 100644 --- a/gossip/src/crds_gossip.rs +++ b/gossip/src/crds_gossip.rs @@ -274,7 +274,6 @@ impl CrdsGossip { /// Process a pull response. pub fn process_pull_responses( &self, - from: &Pubkey, responses: Vec, responses_expired_timeout: Vec, failed_inserts: Vec, @@ -283,7 +282,6 @@ impl CrdsGossip { ) { self.pull.process_pull_responses( &self.crds, - from, responses, responses_expired_timeout, failed_inserts, diff --git a/gossip/src/crds_gossip_pull.rs b/gossip/src/crds_gossip_pull.rs index 3e69192f2ac7e9..dae1eb3d79a7d3 100644 --- a/gossip/src/crds_gossip_pull.rs +++ b/gossip/src/crds_gossip_pull.rs @@ -360,7 +360,6 @@ impl CrdsGossipPull { pub(crate) fn process_pull_responses( &self, crds: &RwLock, - from: &Pubkey, responses: Vec, responses_expired_timeout: Vec, failed_inserts: Vec, @@ -382,7 +381,6 @@ impl CrdsGossipPull { } stats.success += num_inserts; self.num_pulls.fetch_add(num_inserts, Ordering::Relaxed); - owners.insert(*from); for owner in owners { crds.update_record_timestamp(&owner, now); } @@ -543,7 +541,6 @@ impl CrdsGossipPull { fn process_pull_response( &self, crds: &RwLock, - from: &Pubkey, timeouts: &CrdsTimeouts, response: Vec, now: u64, @@ -553,7 +550,6 @@ impl CrdsGossipPull { self.filter_pull_responses(crds, timeouts, response, now, &mut stats); self.process_pull_responses( crds, - from, versioned, versioned_expired_timeout, failed_inserts, @@ -1196,7 +1192,6 @@ pub(crate) mod tests { let failed = node .process_pull_response( &node_crds, - &node_pubkey, &node.make_timeouts(node_pubkey, &HashMap::new(), Duration::default()), rsp.into_iter().flatten().collect(), 1, @@ -1375,14 +1370,8 @@ pub(crate) mod tests { ); // inserting a fresh value should be fine. assert_eq!( - node.process_pull_response( - &node_crds, - &peer_pubkey, - &timeouts, - vec![peer_entry.clone()], - 1, - ) - .0, + node.process_pull_response(&node_crds, &timeouts, vec![peer_entry.clone()], 1,) + .0, 0 ); @@ -1394,7 +1383,6 @@ pub(crate) mod tests { assert_eq!( node.process_pull_response( &node_crds, - &peer_pubkey, &timeouts, vec![peer_entry.clone(), unstaked_peer_entry], node.crds_timeout + 100, @@ -1408,7 +1396,6 @@ pub(crate) mod tests { assert_eq!( node.process_pull_response( &node_crds, - &peer_pubkey, &timeouts, vec![peer_entry], node.crds_timeout + 1, @@ -1425,7 +1412,6 @@ pub(crate) mod tests { assert_eq!( node.process_pull_response( &node_crds, - &peer_pubkey, &timeouts, vec![peer_vote.clone()], node.crds_timeout + 1, @@ -1439,7 +1425,6 @@ pub(crate) mod tests { assert_eq!( node.process_pull_response( &node_crds, - &peer_pubkey, &timeouts, vec![peer_vote], node.crds_timeout + 2, diff --git a/gossip/tests/crds_gossip.rs b/gossip/tests/crds_gossip.rs index 827da50390c305..74415ec3c8ff33 100644 --- a/gossip/tests/crds_gossip.rs +++ b/gossip/tests/crds_gossip.rs @@ -575,7 +575,6 @@ fn network_run_pull( .gossip .filter_pull_responses(&timeouts, rsp, now, &mut stats); node.gossip.process_pull_responses( - &from, vers, vers_expired_timeout, failed_inserts, From f178975185de868eaff5592213a7a65d11af1857 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Tue, 17 Oct 2023 14:45:17 -0500 Subject: [PATCH 355/407] typo (#33734) Co-authored-by: HaoranYi --- accounts-db/src/accounts_db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index fa4cebc298589b..f04933b47df0a8 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4347,7 +4347,7 @@ impl AccountsDb { for usage in &store_usage { let store = &usage.store; let alive_ratio = (total_alive_bytes as f64) / (total_bytes as f64); - debug!("alive_ratio: {:?} store_id: {:?}, store_ratio: {:?} requirment: {:?}, total_bytes: {:?} total_alive_bytes: {:?}", + debug!("alive_ratio: {:?} store_id: {:?}, store_ratio: {:?} requirement: {:?}, total_bytes: {:?} total_alive_bytes: {:?}", alive_ratio, usage.store.append_vec_id(), usage.alive_ratio, shrink_ratio, total_bytes, total_alive_bytes); if alive_ratio > shrink_ratio { // we have reached our goal, stop From 2c6cc4dd7dcf994229ef450b70a865ce30b853e6 Mon Sep 17 00:00:00 2001 From: Jacob Creech <82475023+jacobcreech@users.noreply.github.com> Date: Tue, 17 Oct 2023 14:55:29 -0500 Subject: [PATCH 356/407] docs: move rpc info to rpc docs (#33723) docs: link fixes docs: link fixes docs: link fixes --- docs/src/running-validator/validator-start.md | 15 --------------- .../validator/get-started/setup-an-rpc-node.md | 17 ++++++++++++++++- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/docs/src/running-validator/validator-start.md b/docs/src/running-validator/validator-start.md index 16940dc7030d45..cfa3aa152f15a6 100644 --- a/docs/src/running-validator/validator-start.md +++ b/docs/src/running-validator/validator-start.md @@ -430,18 +430,3 @@ which starts the solana validator process uses "exec" to do so (example: "exec solana-validator ..."); otherwise, when logrotate sends its signal to the validator, the enclosing script will die and take the validator process with it. - -### Account indexing - -As the number of populated accounts on the cluster grows, account-data RPC -requests that scan the entire account set -- like -[`getProgramAccounts`](../api/http#getprogramaccounts) and -[SPL-token-specific requests](../api/http#gettokenaccountsbydelegate) -- -may perform poorly. If your validator needs to support any of these requests, -you can use the `--account-index` parameter to activate one or more in-memory -account indexes that significantly improve RPC performance by indexing accounts -by the key field. Currently supports the following parameter values: - -- `program-id`: each account indexed by its owning program; used by [getProgramAccounts](../api/http#getprogramaccounts) -- `spl-token-mint`: each SPL token account indexed by its token Mint; used by [getTokenAccountsByDelegate](../api/http#gettokenaccountsbydelegate), and [getTokenLargestAccounts](../api/http#gettokenlargestaccounts) -- `spl-token-owner`: each SPL token account indexed by the token-owner address; used by [getTokenAccountsByOwner](../api/http#gettokenaccountsbyowner), and [getProgramAccounts](../api/http#getprogramaccounts) requests that include an spl-token-owner filter. diff --git a/docs/src/validator/get-started/setup-an-rpc-node.md b/docs/src/validator/get-started/setup-an-rpc-node.md index fc2038e67783c9..2c3f35031042e7 100644 --- a/docs/src/validator/get-started/setup-an-rpc-node.md +++ b/docs/src/validator/get-started/setup-an-rpc-node.md @@ -67,4 +67,19 @@ The identities of the [known validators](../../running-validator/validator-start Additional examples of other Solana cluster specific validator commands can be found on the [Clusters](../../clusters.md) page. -Keep in mind, you will still need to customize these commands to operate as an RPC node, as well other operator specific configuration settings. \ No newline at end of file +Keep in mind, you will still need to customize these commands to operate as an RPC node, as well other operator specific configuration settings. + +## Account indexing + +As the number of populated accounts on the cluster grows, account-data RPC +requests that scan the entire account set -- like +[`getProgramAccounts`](../../api/http#getprogramaccounts) and +[SPL-token-specific requests](../../api/http#gettokenaccountsbydelegate) -- +may perform poorly. If your validator needs to support any of these requests, +you can use the `--account-index` parameter to activate one or more in-memory +account indexes that significantly improve RPC performance by indexing accounts +by the key field. Currently supports the following parameter values: + +- `program-id`: each account indexed by its owning program; used by [getProgramAccounts](../../api/http#getprogramaccounts) +- `spl-token-mint`: each SPL token account indexed by its token Mint; used by [getTokenAccountsByDelegate](../../api/http#gettokenaccountsbydelegate), and [getTokenLargestAccounts](../../api/http#gettokenlargestaccounts) +- `spl-token-owner`: each SPL token account indexed by the token-owner address; used by [getTokenAccountsByOwner](../../api/http#gettokenaccountsbyowner), and [getProgramAccounts](../../api/http#getprogramaccounts) requests that include an spl-token-owner filter. From 673a38c89219f90b9ed213df40df6e0cafd17f69 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Tue, 17 Oct 2023 15:20:29 -0500 Subject: [PATCH 357/407] move timer after early exit (#33732) Co-authored-by: HaoranYi --- accounts-db/src/accounts_db.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index f04933b47df0a8..f4d4b665c3689c 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4554,10 +4554,11 @@ impl AccountsDb { /// Combine all account data from storages in 'sorted_slots' into ancient append vecs. /// This keeps us from accumulating append vecs for each slot older than an epoch. fn combine_ancient_slots(&self, sorted_slots: Vec, can_randomly_shrink: bool) { - let mut total = Measure::start("combine_ancient_slots"); if sorted_slots.is_empty() { return; } + + let mut total = Measure::start("combine_ancient_slots"); let mut guard = None; // the ancient append vec currently being written to From 56a74859982250a25ecf56b83784bbb61a3544fb Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 17 Oct 2023 14:41:23 -0700 Subject: [PATCH 358/407] log ancient stats each time we run (#33730) --- accounts-db/src/accounts_db.rs | 296 ++++++++++++------------- accounts-db/src/ancient_append_vecs.rs | 6 +- 2 files changed, 148 insertions(+), 154 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index f4d4b665c3689c..22f81f68550519 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -2157,155 +2157,153 @@ impl ShrinkStats { impl ShrinkAncientStats { pub(crate) fn report(&self) { - if self.shrink_stats.last_report.should_update(1000) { - datapoint_info!( - "shrink_ancient_stats", - ( - "num_slots_shrunk", - self.shrink_stats - .num_slots_shrunk - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "storage_read_elapsed", - self.shrink_stats - .storage_read_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "index_read_elapsed", - self.shrink_stats - .index_read_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "create_and_insert_store_elapsed", - self.shrink_stats - .create_and_insert_store_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "store_accounts_elapsed", - self.shrink_stats - .store_accounts_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "update_index_elapsed", - self.shrink_stats - .update_index_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "handle_reclaims_elapsed", - self.shrink_stats - .handle_reclaims_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "remove_old_stores_shrink_us", - self.shrink_stats - .remove_old_stores_shrink_us - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "rewrite_elapsed", - self.shrink_stats.rewrite_elapsed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "unpackable_slots_count", - self.shrink_stats - .unpackable_slots_count - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "drop_storage_entries_elapsed", - self.shrink_stats - .drop_storage_entries_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "recycle_stores_write_time", - self.shrink_stats - .recycle_stores_write_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "accounts_removed", - self.shrink_stats - .accounts_removed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "bytes_removed", - self.shrink_stats.bytes_removed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "bytes_written", - self.shrink_stats.bytes_written.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "alive_accounts", - self.shrink_stats.alive_accounts.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "dead_accounts", - self.shrink_stats.dead_accounts.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "accounts_loaded", - self.shrink_stats.accounts_loaded.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "ancient_append_vecs_shrunk", - self.ancient_append_vecs_shrunk.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "random", - self.random_shrink.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "slots_considered", - self.slots_considered.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "ancient_scanned", - self.ancient_scanned.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "total_us", - self.total_us.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "second_pass_one_ref", - self.second_pass_one_ref.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ); - } + datapoint_info!( + "shrink_ancient_stats", + ( + "num_slots_shrunk", + self.shrink_stats + .num_slots_shrunk + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "storage_read_elapsed", + self.shrink_stats + .storage_read_elapsed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "index_read_elapsed", + self.shrink_stats + .index_read_elapsed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "create_and_insert_store_elapsed", + self.shrink_stats + .create_and_insert_store_elapsed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "store_accounts_elapsed", + self.shrink_stats + .store_accounts_elapsed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "update_index_elapsed", + self.shrink_stats + .update_index_elapsed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "handle_reclaims_elapsed", + self.shrink_stats + .handle_reclaims_elapsed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "remove_old_stores_shrink_us", + self.shrink_stats + .remove_old_stores_shrink_us + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "rewrite_elapsed", + self.shrink_stats.rewrite_elapsed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "unpackable_slots_count", + self.shrink_stats + .unpackable_slots_count + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "drop_storage_entries_elapsed", + self.shrink_stats + .drop_storage_entries_elapsed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "recycle_stores_write_time", + self.shrink_stats + .recycle_stores_write_elapsed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "accounts_removed", + self.shrink_stats + .accounts_removed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "bytes_removed", + self.shrink_stats.bytes_removed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "bytes_written", + self.shrink_stats.bytes_written.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "alive_accounts", + self.shrink_stats.alive_accounts.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "dead_accounts", + self.shrink_stats.dead_accounts.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "accounts_loaded", + self.shrink_stats.accounts_loaded.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "ancient_append_vecs_shrunk", + self.ancient_append_vecs_shrunk.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "random", + self.random_shrink.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "slots_considered", + self.slots_considered.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "ancient_scanned", + self.ancient_scanned.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "total_us", + self.total_us.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "second_pass_one_ref", + self.second_pass_one_ref.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ); } } diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index f4da2f20d7944e..46e299d3eefac3 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -272,11 +272,7 @@ impl AccountsDb { .total_us .fetch_add(total_us, Ordering::Relaxed); - // only log when we've spent 1s total - // results will continue to accumulate otherwise - if self.shrink_ancient_stats.total_us.load(Ordering::Relaxed) > 1_000_000 { - self.shrink_ancient_stats.report(); - } + self.shrink_ancient_stats.report(); } fn combine_ancient_slots_packed_internal( From c09cbbb778a9491cec34f77920a8096a4de496af Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 17 Oct 2023 14:49:46 -0700 Subject: [PATCH 359/407] sort ancient append vec target_slots_sorted (#33729) --- accounts-db/src/ancient_append_vecs.rs | 328 ++++++++++++++----------- 1 file changed, 182 insertions(+), 146 deletions(-) diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 46e299d3eefac3..c4ba88c3cc6434 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -586,6 +586,7 @@ impl AccountsDb { remove.into_iter().rev().for_each(|i| { accounts_to_combine.remove(i); }); + target_slots_sorted.sort_unstable(); AccountsToCombine { accounts_to_combine, accounts_keep_slots, @@ -717,6 +718,7 @@ struct AccountsToCombine<'a> { /// these slots will NOT be in 'accounts_keep_slots' /// Some of these slots will have ancient append vecs created at them to contain everything in 'accounts_to_combine' /// The rest will become dead slots with no accounts in them. + /// Sort order is lowest to highest. target_slots_sorted: Vec, /// when scanning, this many slots contained accounts that could not be packed because accounts with ref_count > 1 existed. unpackable_slots_count: usize, @@ -1419,167 +1421,201 @@ pub mod tests { for add_dead_account in [true, false] { for method in TestWriteMultipleRefs::iter() { for num_slots in 0..3 { - for two_refs in [false, true] { - let (db, storages, slots, infos) = get_sample_storages(num_slots, None); - let original_results = storages - .iter() - .map(|store| db.get_unique_accounts_from_storage(store)) - .collect::>(); - if two_refs { - original_results.iter().for_each(|results| { - results.stored_accounts.iter().for_each(|account| { - let entry = db - .accounts_index - .get_account_read_entry(account.pubkey()) - .unwrap(); - entry.addref(); - }) - }); - } + for unsorted_slots in [false, true] { + for two_refs in [false, true] { + let (db, mut storages, slots, mut infos) = + get_sample_storages(num_slots, None); + let slots_vec; + if unsorted_slots { + slots_vec = slots.rev().collect::>(); + storages = storages.into_iter().rev().collect(); + infos = infos.into_iter().rev().collect(); + } else { + slots_vec = slots.collect::>() + } - if add_dead_account { - storages.iter().for_each(|storage| { - let pk = solana_sdk::pubkey::new_rand(); - let alive = false; - let write_version = 0; - append_single_account_with_default_hash( - storage, - &pk, - &AccountSharedData::default(), - write_version, - alive, - Some(&db.accounts_index), - ); - assert!(db.accounts_index.purge_exact( - &pk, - &[storage.slot()] - .into_iter() - .collect::>(), - &mut Vec::default() - )); - }); - } - let original_results = storages - .iter() - .map(|store| db.get_unique_accounts_from_storage(store)) - .collect::>(); + let original_results = storages + .iter() + .map(|store| db.get_unique_accounts_from_storage(store)) + .collect::>(); + if two_refs { + original_results.iter().for_each(|results| { + results.stored_accounts.iter().for_each(|account| { + let entry = db + .accounts_index + .get_account_read_entry(account.pubkey()) + .unwrap(); + entry.addref(); + }) + }); + } - let accounts_per_storage = infos - .iter() - .zip(original_results.into_iter()) - .collect::>(); + if add_dead_account { + storages.iter().for_each(|storage| { + let pk = solana_sdk::pubkey::new_rand(); + let alive = false; + let write_version = 0; + append_single_account_with_default_hash( + storage, + &pk, + &AccountSharedData::default(), + write_version, + alive, + Some(&db.accounts_index), + ); + assert!(db.accounts_index.purge_exact( + &pk, + &[storage.slot()] + .into_iter() + .collect::>(), + &mut Vec::default() + )); + }); + } + let original_results = storages + .iter() + .map(|store| db.get_unique_accounts_from_storage(store)) + .collect::>(); - let accounts_to_combine = - db.calc_accounts_to_combine(&accounts_per_storage); - let slots_vec = slots.collect::>(); - if !add_dead_account && two_refs { - assert!(accounts_to_combine.accounts_to_combine.is_empty()); - continue; - } else { - assert_eq!( + let accounts_per_storage = infos + .iter() + .zip(original_results.into_iter()) + .collect::>(); + + let accounts_to_combine = + db.calc_accounts_to_combine(&accounts_per_storage); + if !add_dead_account && two_refs { + assert!(accounts_to_combine.accounts_to_combine.is_empty()); + continue; + } else { + assert_eq!( accounts_to_combine.accounts_to_combine.len(), num_slots, "method: {method:?}, num_slots: {num_slots}, two_refs: {two_refs}" ); - } - if two_refs { - // all accounts should be in many_refs - let mut accounts_keep = accounts_to_combine - .accounts_keep_slots - .keys() - .cloned() - .collect::>(); - assert!(!accounts_to_combine - .accounts_to_combine - .iter() - .any(|a| a.unrefed_pubkeys.is_empty())); - accounts_keep.sort_unstable(); - assert_eq!(accounts_keep, slots_vec); - assert!(accounts_to_combine.target_slots_sorted.is_empty()); - assert_eq!(accounts_to_combine.accounts_keep_slots.len(), num_slots); - assert!(accounts_to_combine.accounts_to_combine.iter().all( - |shrink_collect| shrink_collect - .alive_accounts - .one_ref - .accounts - .is_empty() - )); - assert!(accounts_to_combine.accounts_to_combine.iter().all( - |shrink_collect| shrink_collect - .alive_accounts - .many_refs - .accounts - .is_empty() - )); - } else { - if add_dead_account { + } + if two_refs { + // all accounts should be in many_refs + let mut accounts_keep = accounts_to_combine + .accounts_keep_slots + .keys() + .cloned() + .collect::>(); assert!(!accounts_to_combine .accounts_to_combine .iter() .any(|a| a.unrefed_pubkeys.is_empty())); + // sort because accounts_keep_slots is a hashmap, with non-deterministic ordering + accounts_keep.sort_unstable(); + if unsorted_slots { + accounts_keep = accounts_keep.into_iter().rev().collect(); + } + assert_eq!(accounts_keep, slots_vec); + assert!(accounts_to_combine.target_slots_sorted.is_empty()); + assert_eq!( + accounts_to_combine.accounts_keep_slots.len(), + num_slots + ); + assert!(accounts_to_combine.accounts_to_combine.iter().all( + |shrink_collect| shrink_collect + .alive_accounts + .one_ref + .accounts + .is_empty() + )); + assert!(accounts_to_combine.accounts_to_combine.iter().all( + |shrink_collect| shrink_collect + .alive_accounts + .many_refs + .accounts + .is_empty() + )); + } else { + if add_dead_account { + assert!(!accounts_to_combine + .accounts_to_combine + .iter() + .any(|a| a.unrefed_pubkeys.is_empty())); + } + // all accounts should be in one_ref and all slots are available as target slots + assert_eq!( + accounts_to_combine.target_slots_sorted, + if unsorted_slots { + slots_vec.iter().cloned().rev().collect::>() + } else { + slots_vec.clone() + } + ); + assert!(accounts_to_combine.accounts_keep_slots.is_empty()); + assert!(accounts_to_combine.accounts_to_combine.iter().all( + |shrink_collect| !shrink_collect + .alive_accounts + .one_ref + .accounts + .is_empty() + )); + assert!(accounts_to_combine.accounts_to_combine.iter().all( + |shrink_collect| shrink_collect + .alive_accounts + .many_refs + .accounts + .is_empty() + )); } - // all accounts should be in one_ref and all slots are available as target slots - assert_eq!(accounts_to_combine.target_slots_sorted, slots_vec); - assert!(accounts_to_combine.accounts_keep_slots.is_empty()); - assert!(accounts_to_combine.accounts_to_combine.iter().all( - |shrink_collect| !shrink_collect - .alive_accounts - .one_ref - .accounts - .is_empty() - )); - assert!(accounts_to_combine.accounts_to_combine.iter().all( - |shrink_collect| shrink_collect - .alive_accounts - .many_refs - .accounts - .is_empty() - )); - } - // test write_ancient_accounts_to_same_slot_multiple_refs since we built interesting 'AccountsToCombine' - let write_ancient_accounts = match method { - TestWriteMultipleRefs::MultipleRefs => { - let mut write_ancient_accounts = WriteAncientAccounts::default(); - db.write_ancient_accounts_to_same_slot_multiple_refs( - accounts_to_combine.accounts_keep_slots.values(), - &mut write_ancient_accounts, + // test write_ancient_accounts_to_same_slot_multiple_refs since we built interesting 'AccountsToCombine' + let write_ancient_accounts = match method { + TestWriteMultipleRefs::MultipleRefs => { + let mut write_ancient_accounts = + WriteAncientAccounts::default(); + db.write_ancient_accounts_to_same_slot_multiple_refs( + accounts_to_combine.accounts_keep_slots.values(), + &mut write_ancient_accounts, + ); + write_ancient_accounts + } + TestWriteMultipleRefs::PackedStorages => { + let packed_contents = Vec::default(); + db.write_packed_storages(&accounts_to_combine, packed_contents) + } + }; + if two_refs { + assert_eq!( + write_ancient_accounts.shrinks_in_progress.len(), + num_slots ); - write_ancient_accounts - } - TestWriteMultipleRefs::PackedStorages => { - let packed_contents = Vec::default(); - db.write_packed_storages(&accounts_to_combine, packed_contents) - } - }; - if two_refs { - assert_eq!(write_ancient_accounts.shrinks_in_progress.len(), num_slots); - let mut shrinks_in_progress = write_ancient_accounts - .shrinks_in_progress - .iter() - .collect::>(); - shrinks_in_progress.sort_unstable_by(|a, b| a.0.cmp(b.0)); - assert_eq!( - shrinks_in_progress - .iter() - .map(|(slot, _)| **slot) - .collect::>(), - slots_vec - ); - assert_eq!( - shrinks_in_progress + let mut shrinks_in_progress = write_ancient_accounts + .shrinks_in_progress .iter() - .map(|(_, shrink_in_progress)| shrink_in_progress - .old_storage() - .append_vec_id()) - .collect::>(), - storages - .iter() - .map(|storage| storage.append_vec_id()) - .collect::>() - ); - } else { - assert!(write_ancient_accounts.shrinks_in_progress.is_empty()); + .collect::>(); + // sort because shrinks_in_progress is a HashMap with non-deterministic order + shrinks_in_progress.sort_unstable_by(|a, b| a.0.cmp(b.0)); + if unsorted_slots { + shrinks_in_progress = + shrinks_in_progress.into_iter().rev().collect(); + } + assert_eq!( + shrinks_in_progress + .iter() + .map(|(slot, _)| **slot) + .collect::>(), + slots_vec + ); + assert_eq!( + shrinks_in_progress + .iter() + .map(|(_, shrink_in_progress)| shrink_in_progress + .old_storage() + .append_vec_id()) + .collect::>(), + storages + .iter() + .map(|storage| storage.append_vec_id()) + .collect::>() + ); + } else { + assert!(write_ancient_accounts.shrinks_in_progress.is_empty()); + } } } } From 0b05e8db11b9d988a68cc0a01c9bd47e30dfab2f Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Tue, 17 Oct 2023 15:24:38 -0700 Subject: [PATCH 360/407] [TieredStorage] Footer test for HotStorageReader (#33718) #### Problem HotStorageReader currently doesn't have a test that covers its footer. #### Summary of Changes This PR includes a test for HotStorageReader that verifies the footer. --- accounts-db/src/tiered_storage/hot.rs | 48 +++++++++++++++++++++++++-- 1 file changed, 46 insertions(+), 2 deletions(-) diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 68c0e705976385..a7dbcd38930722 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -223,11 +223,18 @@ pub mod tests { super::*, crate::tiered_storage::{ byte_block::ByteBlockWriter, - footer::AccountBlockFormat, + file::TieredStorageFile, + footer::{ + AccountBlockFormat, AccountMetaFormat, OwnersBlockFormat, TieredStorageFooter, + FOOTER_SIZE, + }, + hot::{HotAccountMeta, HotStorageReader}, + index::AccountIndexFormat, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, }, - ::solana_sdk::{hash::Hash, stake_history::Epoch}, + ::solana_sdk::{hash::Hash, pubkey::Pubkey, stake_history::Epoch}, memoffset::offset_of, + tempfile::TempDir, }; #[test] @@ -354,4 +361,41 @@ pub mod tests { optional_fields.account_hash.unwrap() ); } + + #[test] + fn test_hot_storage_footer() { + // Generate a new temp path that is guaranteed to NOT already have a file. + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir.path().join("test_hot_storage_footer"); + let expected_footer = TieredStorageFooter { + account_meta_format: AccountMetaFormat::Hot, + owners_block_format: OwnersBlockFormat::LocalIndex, + account_index_format: AccountIndexFormat::AddressAndOffset, + account_block_format: AccountBlockFormat::AlignedRaw, + account_entry_count: 300, + account_meta_entry_size: 16, + account_block_size: 4096, + owner_count: 250, + owner_entry_size: 32, + account_index_offset: 1069600, + owners_offset: 1081200, + hash: Hash::new_unique(), + min_account_address: Pubkey::default(), + max_account_address: Pubkey::new_unique(), + footer_size: FOOTER_SIZE as u64, + format_version: 1, + }; + + { + let file = TieredStorageFile::new_writable(&path).unwrap(); + expected_footer.write_footer_block(&file).unwrap(); + } + + // Reopen the same storage, and expect the persisted footer is + // the same as what we have written. + { + let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + assert_eq!(expected_footer, *hot_storage.footer()); + } + } } From 9baa0b1c02fcea0d80e6c9866720e229677d9798 Mon Sep 17 00:00:00 2001 From: Illia Bobyr Date: Tue, 17 Oct 2023 20:20:59 -0700 Subject: [PATCH 361/407] ci/env.sh: Log `CI_BASE_BRANCH` (#33744) When debugging CI failures it helps when I can see all the variables set by `env.sh`. Order environment variables in the output to match the order they are set a bit better. `CI_BASE_BRANCH` was not set at all for unknown environment. Does not matter much, but it seems consistent to include it in the list next to all the other variables. --- ci/env.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ci/env.sh b/ci/env.sh index 131bfb88f7f44b..24e2fb9a0c3ba0 100644 --- a/ci/env.sh +++ b/ci/env.sh @@ -116,6 +116,7 @@ if [[ -n $CI ]]; then else export CI= export CI_BRANCH= + export CI_BASE_BRANCH= export CI_BUILD_ID= export CI_COMMIT= export CI_JOB_ID= @@ -131,10 +132,12 @@ fi cat < Date: Tue, 17 Oct 2023 23:35:49 -0400 Subject: [PATCH 362/407] Upgrades lock_api to v0.4.10 (#33736) --- Cargo.lock | 5 +++-- programs/sbf/Cargo.lock | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6cf391a085f449..c9995e72c250a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2987,10 +2987,11 @@ checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" [[package]] name = "lock_api" -version = "0.4.6" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88943dd7ef4a2e5a4bfa2753aaab3013e34ce2533d1996fb18ef591e315e2b3b" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ + "autocfg", "scopeguard", ] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index d4a60cd04493bc..e1a4c686ed1367 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2621,10 +2621,11 @@ checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" [[package]] name = "lock_api" -version = "0.4.6" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88943dd7ef4a2e5a4bfa2753aaab3013e34ce2533d1996fb18ef591e315e2b3b" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ + "autocfg", "scopeguard", ] From c699bc9cab1e017476dbf34f6f43acd78927b2cf Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 18 Oct 2023 13:41:42 +0000 Subject: [PATCH 363/407] down samples outgoing gossip pull requests (#33719) Push message propagation has improved in recent versions of the gossip code and we don't rely on pull requests as much as before. Handling pull requests is also inefficient and expensive. The commit reduces number of outgoing pull requests by down sampling. --- gossip/benches/crds_gossip_pull.rs | 2 +- gossip/src/crds_gossip_pull.rs | 96 ++++++++++++++++++------------ 2 files changed, 60 insertions(+), 38 deletions(-) diff --git a/gossip/benches/crds_gossip_pull.rs b/gossip/benches/crds_gossip_pull.rs index eaed9b671166ef..35be66b4bad97c 100644 --- a/gossip/benches/crds_gossip_pull.rs +++ b/gossip/benches/crds_gossip_pull.rs @@ -52,6 +52,6 @@ fn bench_build_crds_filters(bencher: &mut Bencher) { let crds = RwLock::new(crds); bencher.iter(|| { let filters = crds_gossip_pull.build_crds_filters(&thread_pool, &crds, MAX_BLOOM_SIZE); - assert_eq!(filters.len(), 128); + assert_eq!(filters.len(), 16); }); } diff --git a/gossip/src/crds_gossip_pull.rs b/gossip/src/crds_gossip_pull.rs index dae1eb3d79a7d3..c3044dbba8cc43 100644 --- a/gossip/src/crds_gossip_pull.rs +++ b/gossip/src/crds_gossip_pull.rs @@ -53,8 +53,6 @@ use { pub const CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS: u64 = 15000; // Retention period of hashes of received outdated values. const FAILED_INSERTS_RETENTION_MS: u64 = 20_000; -// Maximum number of pull requests to send out each time around. -const MAX_NUM_PULL_REQUESTS: usize = 1024; pub const FALSE_RATE: f64 = 0.1f64; pub const KEYS: f64 = 8f64; @@ -143,19 +141,26 @@ impl CrdsFilter { /// A vector of crds filters that together hold a complete set of Hashes. struct CrdsFilterSet { - filters: Vec>, + filters: Vec>>, mask_bits: u32, } impl CrdsFilterSet { - fn new(num_items: usize, max_bytes: usize) -> Self { + fn new(rng: &mut R, num_items: usize, max_bytes: usize) -> Self { + const SAMPLE_RATE: usize = 8; + const MAX_NUM_FILTERS: usize = 1024; let max_bits = (max_bytes * 8) as f64; let max_items = CrdsFilter::max_items(max_bits, FALSE_RATE, KEYS); let mask_bits = CrdsFilter::mask_bits(num_items as f64, max_items); - let filters = - repeat_with(|| Bloom::random(max_items as usize, FALSE_RATE, max_bits as usize).into()) - .take(1 << mask_bits) - .collect(); + let mut filters: Vec<_> = repeat_with(|| None).take(1usize << mask_bits).collect(); + let mut indices: Vec<_> = (0..filters.len()).collect(); + let size = (filters.len() + SAMPLE_RATE - 1) / SAMPLE_RATE; + for _ in 0..MAX_NUM_FILTERS.min(size) { + let k = rng.gen_range(0..indices.len()); + let k = indices.swap_remove(k); + let filter = Bloom::random(max_items as usize, FALSE_RATE, max_bits as usize); + filters[k] = Some(AtomicBloom::::from(filter)); + } Self { filters, mask_bits } } @@ -167,7 +172,9 @@ impl CrdsFilterSet { .unwrap_or_default(), ) .unwrap(); - self.filters[index].add(&hash_value); + if let Some(filter) = &self.filters[index] { + filter.add(&hash_value); + } } } @@ -177,10 +184,12 @@ impl From for Vec { cfs.filters .into_iter() .enumerate() - .map(|(seed, filter)| CrdsFilter { - filter: filter.into(), - mask: CrdsFilter::compute_mask(seed as u64, mask_bits), - mask_bits, + .filter_map(|(seed, filter)| { + Some(CrdsFilter { + filter: Bloom::::from(filter?), + mask: CrdsFilter::compute_mask(seed as u64, mask_bits), + mask_bits, + }) }) .collect() } @@ -269,14 +278,7 @@ impl CrdsGossipPull { if nodes.is_empty() { return Err(CrdsGossipError::NoPeers); } - let mut filters = self.build_crds_filters(thread_pool, crds, bloom_size); - if filters.len() > MAX_NUM_PULL_REQUESTS { - for i in 0..MAX_NUM_PULL_REQUESTS { - let j = rng.gen_range(i..filters.len()); - filters.swap(i, j); - } - filters.truncate(MAX_NUM_PULL_REQUESTS); - } + let filters = self.build_crds_filters(thread_pool, crds, bloom_size); // Associate each pull-request filter with a randomly selected peer. let dist = WeightedIndex::new(weights).unwrap(); let nodes = repeat_with(|| nodes[dist.sample(&mut rng)].clone()); @@ -425,7 +427,7 @@ impl CrdsGossipPull { let crds = crds.read().unwrap(); let num_items = crds.len() + crds.num_purged() + failed_inserts.len(); let num_items = MIN_NUM_BLOOM_ITEMS.max(num_items); - let filters = CrdsFilterSet::new(num_items, bloom_size); + let filters = CrdsFilterSet::new(&mut rand::thread_rng(), num_items, bloom_size); thread_pool.install(|| { crds.par_values() .with_min_len(PAR_MIN_LENGTH) @@ -669,45 +671,61 @@ pub(crate) mod tests { #[test] fn test_crds_filter_set_add() { - let crds_filter_set = - CrdsFilterSet::new(/*num_items=*/ 9672788, /*max_bytes=*/ 8196); - let hash_values: Vec<_> = repeat_with(Hash::new_unique).take(1024).collect(); + let mut rng = rand::thread_rng(); + let crds_filter_set = CrdsFilterSet::new( + &mut rng, /*num_items=*/ 59672788, /*max_bytes=*/ 8196, + ); + let hash_values: Vec<_> = repeat_with(|| { + let buf: [u8; 32] = rng.gen(); + solana_sdk::hash::hashv(&[&buf]) + }) + .take(1024) + .collect(); + assert_eq!(crds_filter_set.filters.len(), 8192); for hash_value in &hash_values { crds_filter_set.add(*hash_value); } let filters: Vec = crds_filter_set.into(); + let mut num_hits = 0; assert_eq!(filters.len(), 1024); for hash_value in hash_values { - let mut num_hits = 0; + let mut hit = false; let mut false_positives = 0; for filter in &filters { if filter.test_mask(&hash_value) { num_hits += 1; + assert!(!hit); + hit = true; assert!(filter.contains(&hash_value)); assert!(filter.filter.contains(&hash_value)); } else if filter.filter.contains(&hash_value) { false_positives += 1; } } - assert_eq!(num_hits, 1); assert!(false_positives < 5); } + assert!(num_hits > 96, "num_hits: {num_hits}"); } #[test] fn test_crds_filter_set_new() { // Validates invariances required by CrdsFilterSet::get in the // vector of filters generated by CrdsFilterSet::new. - let filters: Vec = - CrdsFilterSet::new(/*num_items=*/ 55345017, /*max_bytes=*/ 4098).into(); - assert_eq!(filters.len(), 16384); + let filters = CrdsFilterSet::new( + &mut rand::thread_rng(), + 55345017, // num_items + 4098, // max_bytes + ); + assert_eq!(filters.filters.len(), 16384); + let filters = Vec::::from(filters); + assert_eq!(filters.len(), 1024); let mask_bits = filters[0].mask_bits; let right_shift = 64 - mask_bits; let ones = !0u64 >> mask_bits; - for (i, filter) in filters.iter().enumerate() { + for filter in &filters { // Check that all mask_bits are equal. assert_eq!(mask_bits, filter.mask_bits); - assert_eq!(i as u64, filter.mask >> right_shift); + assert!((0..16384).contains(&(filter.mask >> right_shift))); assert_eq!(ones, ones & filter.mask); } } @@ -740,7 +758,7 @@ pub(crate) mod tests { let crds = RwLock::new(crds); assert!(num_inserts > 30_000, "num inserts: {num_inserts}"); let filters = crds_gossip_pull.build_crds_filters(&thread_pool, &crds, MAX_BLOOM_SIZE); - assert_eq!(filters.len(), MIN_NUM_BLOOM_FILTERS.max(32)); + assert_eq!(filters.len(), MIN_NUM_BLOOM_FILTERS.max(4)); let crds = crds.read().unwrap(); let purged: Vec<_> = thread_pool.install(|| crds.purged().collect()); let hash_values: Vec<_> = crds.values().map(|v| v.value_hash).chain(purged).collect(); @@ -751,21 +769,24 @@ pub(crate) mod tests { "hash_values.len(): {}", hash_values.len() ); + let mut num_hits = 0; let mut false_positives = 0; for hash_value in hash_values { - let mut num_hits = 0; + let mut hit = false; for filter in &filters { if filter.test_mask(&hash_value) { num_hits += 1; + assert!(!hit); + hit = true; assert!(filter.contains(&hash_value)); assert!(filter.filter.contains(&hash_value)); } else if filter.filter.contains(&hash_value) { false_positives += 1; } } - assert_eq!(num_hits, 1); } - assert!(false_positives < 150_000, "fp: {false_positives}"); + assert!(num_hits > 4000, "num_hits: {num_hits}"); + assert!(false_positives < 20_000, "fp: {false_positives}"); } #[test] @@ -1308,7 +1329,8 @@ pub(crate) mod tests { } #[test] fn test_crds_filter_complete_set_add_mask() { - let mut filters: Vec = CrdsFilterSet::new(1000, 10).into(); + let mut filters = + Vec::::from(CrdsFilterSet::new(&mut rand::thread_rng(), 1000, 10)); assert!(filters.iter().all(|f| f.mask_bits > 0)); let mut h: Hash = Hash::default(); // rev to make the hash::default() miss on the first few test_masks From e96678b302d4f1eb8cc495df6e5885045d4de55a Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 18 Oct 2023 10:43:35 -0400 Subject: [PATCH 364/407] Uses SeqLock for CachedAccountInner::hash (#33696) --- Cargo.lock | 10 ++++++++++ Cargo.toml | 1 + accounts-db/Cargo.toml | 1 + accounts-db/src/accounts_cache.rs | 12 ++++++------ programs/sbf/Cargo.lock | 10 ++++++++++ 5 files changed, 28 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c9995e72c250a1..00f0bd0165ccd0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4745,6 +4745,15 @@ dependencies = [ "pest", ] +[[package]] +name = "seqlock" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5c67b6f14ecc5b86c66fa63d76b5092352678545a8a3cdae80aef5128371910" +dependencies = [ + "parking_lot 0.12.1", +] + [[package]] name = "serde" version = "1.0.189" @@ -5210,6 +5219,7 @@ dependencies = [ "rayon", "regex", "rustc_version 0.4.0", + "seqlock", "serde", "serde_derive", "solana-accounts-db", diff --git a/Cargo.toml b/Cargo.toml index ceb0e67c1c1ce6..9d09f953f4ad5b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -286,6 +286,7 @@ rustls = { version = "0.21.7", default-features = false, features = ["quic"] } rustversion = "1.0.14" scopeguard = "1.2.0" semver = "1.0.20" +seqlock = "0.2.0" serde = "1.0.189" serde_bytes = "0.11.12" serde_derive = "1.0.103" diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 36ddf7d0e75f10..0cafcfe3e474dc 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -40,6 +40,7 @@ qualifier_attr = { workspace = true } rand = { workspace = true } rayon = { workspace = true } regex = { workspace = true } +seqlock = { workspace = true } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } solana-bucket-map = { workspace = true } diff --git a/accounts-db/src/accounts_cache.rs b/accounts-db/src/accounts_cache.rs index 04d1ef9d736bcc..cb38243fd222dc 100644 --- a/accounts-db/src/accounts_cache.rs +++ b/accounts-db/src/accounts_cache.rs @@ -4,6 +4,7 @@ use { accounts_hash::AccountHash, }, dashmap::DashMap, + seqlock::SeqLock, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, clock::Slot, @@ -77,7 +78,7 @@ impl SlotCacheInner { let data_len = account.data().len() as u64; let item = Arc::new(CachedAccountInner { account, - hash: RwLock::new(None), + hash: SeqLock::new(None), slot, pubkey: *pubkey, include_slot_in_hash, @@ -143,7 +144,7 @@ pub type CachedAccount = Arc; #[derive(Debug)] pub struct CachedAccountInner { pub account: AccountSharedData, - hash: RwLock>, + hash: SeqLock>, slot: Slot, pubkey: Pubkey, /// temporarily here during feature activation @@ -153,18 +154,17 @@ pub struct CachedAccountInner { impl CachedAccountInner { pub fn hash(&self) -> AccountHash { - let hash = self.hash.read().unwrap(); - match *hash { + let hash = self.hash.read(); + match hash { Some(hash) => hash, None => { - drop(hash); let hash = AccountsDb::hash_account( self.slot, &self.account, &self.pubkey, self.include_slot_in_hash, ); - *self.hash.write().unwrap() = Some(hash); + *self.hash.lock_write() = Some(hash); hash } } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index e1a4c686ed1367..915b4fe0f47c8c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4133,6 +4133,15 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" +[[package]] +name = "seqlock" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5c67b6f14ecc5b86c66fa63d76b5092352678545a8a3cdae80aef5128371910" +dependencies = [ + "parking_lot 0.12.1", +] + [[package]] name = "serde" version = "1.0.189" @@ -4501,6 +4510,7 @@ dependencies = [ "rayon", "regex", "rustc_version", + "seqlock", "serde", "serde_derive", "solana-bucket-map", From 84c2f9de55b026cf2affad1bb8194890ea0df3d6 Mon Sep 17 00:00:00 2001 From: Illia Bobyr Date: Wed, 18 Oct 2023 10:22:33 -0700 Subject: [PATCH 365/407] program::message::AccountKeys: `Clone`, `Default`, `Debug`, `Eq` (#33749) It is a pretty standard set of traits to implement on most types. Both `Pubkey` and `LoadedAddresses` contained within the `AccountKeys` already implement them. Doing the same for `AccountKeys` could simplify unit tests and/or some common value manipulation logic. --- sdk/program/src/message/account_keys.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/sdk/program/src/message/account_keys.rs b/sdk/program/src/message/account_keys.rs index 8662f59cae566d..f0ab7deeef0987 100644 --- a/sdk/program/src/message/account_keys.rs +++ b/sdk/program/src/message/account_keys.rs @@ -4,11 +4,12 @@ use { message::{v0::LoadedAddresses, CompileError}, pubkey::Pubkey, }, - std::{collections::BTreeMap, ops::Index}, + std::{collections::BTreeMap, iter::zip, ops::Index}, }; /// Collection of static and dynamically loaded keys used to load accounts /// during transaction processing. +#[derive(Clone, Default, Debug, Eq)] pub struct AccountKeys<'a> { static_keys: &'a [Pubkey], dynamic_keys: Option<&'a LoadedAddresses>, @@ -138,6 +139,12 @@ impl<'a> AccountKeys<'a> { } } +impl PartialEq for AccountKeys<'_> { + fn eq(&self, other: &Self) -> bool { + zip(self.iter(), other.iter()).all(|(a, b)| a == b) + } +} + #[cfg(test)] mod tests { use {super::*, crate::instruction::AccountMeta}; From 2465abce5c48e667dfc67cc3ec459f399932043b Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 18 Oct 2023 18:06:14 +0000 Subject: [PATCH 366/407] simplifies pull-responses handling (#33743) Following: https://github.com/solana-labs/solana/pull/33722 from pubkey in PullResponse is no longer used in processing pull-responses and so the code can be simplified. --- gossip/src/cluster_info.rs | 87 +++++--------------------------------- 1 file changed, 11 insertions(+), 76 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 0e72efd8a5cde2..e7f405f06dc4b0 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -80,7 +80,7 @@ use { solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY, std::{ borrow::Cow, - collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, + collections::{HashMap, HashSet, VecDeque}, fmt::Debug, fs::{self, File}, io::BufReader, @@ -2101,77 +2101,27 @@ impl ClusterInfo { fn handle_batch_pull_responses( &self, - responses: Vec<(Pubkey, Vec)>, - thread_pool: &ThreadPool, + responses: Vec, stakes: &HashMap, epoch_duration: Duration, ) { let _st = ScopedTimer::from(&self.stats.handle_batch_pull_responses_time); - if responses.is_empty() { - return; - } - fn extend(hash_map: &mut HashMap>, (key, mut value): (K, Vec)) - where - K: Eq + std::hash::Hash, - { - match hash_map.entry(key) { - Entry::Occupied(mut entry) => { - let entry_value = entry.get_mut(); - if entry_value.len() < value.len() { - std::mem::swap(entry_value, &mut value); - } - entry_value.extend(value); - } - Entry::Vacant(entry) => { - entry.insert(value); - } - } - } - fn merge( - mut hash_map: HashMap>, - other: HashMap>, - ) -> HashMap> - where - K: Eq + std::hash::Hash, - { - if hash_map.len() < other.len() { - return merge(other, hash_map); - } - for kv in other { - extend(&mut hash_map, kv); - } - hash_map - } - let responses = thread_pool.install(|| { - responses - .into_par_iter() - .with_min_len(1024) - .fold(HashMap::new, |mut hash_map, kv| { - extend(&mut hash_map, kv); - hash_map - }) - .reduce(HashMap::new, merge) - }); if !responses.is_empty() { let self_pubkey = self.id(); let timeouts = self .gossip .make_timeouts(self_pubkey, stakes, epoch_duration); - for (from, data) in responses { - self.handle_pull_response(&from, data, &timeouts); - } + self.handle_pull_response(responses, &timeouts); } } // Returns (failed, timeout, success) fn handle_pull_response( &self, - from: &Pubkey, crds_values: Vec, timeouts: &CrdsTimeouts, ) -> (usize, usize, usize) { let len = crds_values.len(); - trace!("PullResponse me: {} from: {} len={}", self.id(), from, len); let mut pull_stats = ProcessPullStats::default(); let (filtered_pulls, filtered_pulls_expired_timeout, failed_inserts) = { let _st = ScopedTimer::from(&self.stats.filter_pull_response); @@ -2446,9 +2396,9 @@ impl ClusterInfo { Protocol::PullRequest(filter, caller) => { pull_requests.push((from_addr, filter, caller)) } - Protocol::PullResponse(from, data) => { + Protocol::PullResponse(_, mut data) => { check_duplicate_instance(&data)?; - pull_responses.push((from, data)); + pull_responses.append(&mut data); } Protocol::PushMessage(from, data) => { check_duplicate_instance(&data)?; @@ -2460,13 +2410,10 @@ impl ClusterInfo { } } if self.require_stake_for_gossip(stakes) { - for (_, data) in &mut pull_responses { - retain_staked(data, stakes); - } + retain_staked(&mut pull_responses, stakes); for (_, data) in &mut push_messages { retain_staked(data, stakes); } - pull_responses.retain(|(_, data)| !data.is_empty()); push_messages.retain(|(_, data)| !data.is_empty()); } self.handle_batch_ping_messages(ping_messages, recycler, response_sender); @@ -2478,7 +2425,7 @@ impl ClusterInfo { stakes, response_sender, ); - self.handle_batch_pull_responses(pull_responses, thread_pool, stakes, epoch_duration); + self.handle_batch_pull_responses(pull_responses, stakes, epoch_duration); self.trim_crds_table(CRDS_UNIQUE_PUBKEY_CAPACITY, stakes); self.handle_batch_pong_messages(pong_messages, Instant::now()); self.handle_batch_pull_requests( @@ -3212,18 +3159,11 @@ mod tests { ); assert_eq!( (0, 0, 1), - ClusterInfo::handle_pull_response( - &cluster_info, - &entrypoint_pubkey, - data.clone(), - &timeouts - ) + cluster_info.handle_pull_response(data.clone(), &timeouts) ); - - let entrypoint_pubkey2 = solana_sdk::pubkey::new_rand(); assert_eq!( (1, 0, 0), - ClusterInfo::handle_pull_response(&cluster_info, &entrypoint_pubkey2, data, &timeouts) + cluster_info.handle_pull_response(data, &timeouts) ); } @@ -3981,12 +3921,7 @@ mod tests { &stakes, Duration::from_millis(cluster_info.gossip.pull.crds_timeout), ); - ClusterInfo::handle_pull_response( - &cluster_info, - &entrypoint_pubkey, - vec![entrypoint_crdsvalue], - &timeouts, - ); + cluster_info.handle_pull_response(vec![entrypoint_crdsvalue], &timeouts); let (pings, pulls) = cluster_info.new_pull_requests(&thread_pool, None, &HashMap::new()); assert_eq!(pings.len(), 1); assert_eq!(pulls.len(), MIN_NUM_BLOOM_FILTERS); @@ -4495,7 +4430,7 @@ mod tests { ); assert_eq!( (0, 0, NO_ENTRIES), - cluster_info.handle_pull_response(&entrypoint_pubkey, data, &timeouts) + cluster_info.handle_pull_response(data, &timeouts) ); } From afd044e2961f6c3c0569a0b35428f30e98b29391 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 18 Oct 2023 18:53:58 +0000 Subject: [PATCH 367/407] removes redundant ClusterInfo::drain_push_queue (#33753) --- gossip/src/cluster_info.rs | 40 ++++++++++++++++--------------------- gossip/src/crds_gossip.rs | 7 ------- gossip/tests/crds_gossip.rs | 4 +--- 3 files changed, 18 insertions(+), 33 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index e7f405f06dc4b0..67f713676d5d2f 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -889,10 +889,7 @@ impl ClusterInfo { CrdsData::LowestSlot(0, LowestSlot::new(self_pubkey, min, now)), &self.keypair(), ); - self.local_message_pending_push_queue - .lock() - .unwrap() - .push(entry); + self.push_message(entry); } } @@ -973,7 +970,7 @@ impl ClusterInfo { TimedGuard::new(self.gossip.crds.read().unwrap(), label, counter) } - pub fn push_message(&self, message: CrdsValue) { + fn push_message(&self, message: CrdsValue) { self.local_message_pending_push_queue .lock() .unwrap() @@ -1515,25 +1512,23 @@ impl ClusterInfo { (pings, pulls.collect()) } - fn drain_push_queue(&self) -> Vec { - let mut push_queue = self.local_message_pending_push_queue.lock().unwrap(); - std::mem::take(&mut *push_queue) - } - // Used in tests pub fn flush_push_queue(&self) { - let pending_push_messages = self.drain_push_queue(); - let mut gossip_crds = self.gossip.crds.write().unwrap(); - let now = timestamp(); - for entry in pending_push_messages { - let _ = gossip_crds.insert(entry, now, GossipRoute::LocalMessage); + let entries: Vec = + std::mem::take(&mut *self.local_message_pending_push_queue.lock().unwrap()); + if !entries.is_empty() { + let mut gossip_crds = self.gossip.crds.write().unwrap(); + let now = timestamp(); + for entry in entries { + let _ = gossip_crds.insert(entry, now, GossipRoute::LocalMessage); + } } } fn new_push_requests(&self, stakes: &HashMap) -> Vec<(SocketAddr, Protocol)> { let self_id = self.id(); let (mut push_messages, num_entries, num_nodes) = { let _st = ScopedTimer::from(&self.stats.new_push_requests); - self.gossip - .new_push_messages(&self_id, self.drain_push_queue(), timestamp(), stakes) + self.flush_push_queue(); + self.gossip.new_push_messages(&self_id, timestamp(), stakes) }; self.stats .push_fanout_num_entries @@ -3596,12 +3591,11 @@ mod tests { &SocketAddrSpace::Unspecified, ); //check that all types of gossip messages are signed correctly - let (push_messages, _, _) = cluster_info.gossip.new_push_messages( - &cluster_info.id(), - cluster_info.drain_push_queue(), - timestamp(), - &stakes, - ); + cluster_info.flush_push_queue(); + let (push_messages, _, _) = + cluster_info + .gossip + .new_push_messages(&cluster_info.id(), timestamp(), &stakes); // there should be some pushes ready assert!(!push_messages.is_empty()); push_messages diff --git a/gossip/src/crds_gossip.rs b/gossip/src/crds_gossip.rs index 015deed1d2a472..977db716e5cef1 100644 --- a/gossip/src/crds_gossip.rs +++ b/gossip/src/crds_gossip.rs @@ -72,7 +72,6 @@ impl CrdsGossip { pub fn new_push_messages( &self, pubkey: &Pubkey, // This node. - pending_push_messages: Vec, now: u64, stakes: &HashMap, ) -> ( @@ -80,12 +79,6 @@ impl CrdsGossip { usize, // number of values usize, // number of push messages ) { - { - let mut crds = self.crds.write().unwrap(); - for entry in pending_push_messages { - let _ = crds.insert(entry, now, GossipRoute::LocalMessage); - } - } self.push.new_push_messages(pubkey, &self.crds, now, stakes) } diff --git a/gossip/tests/crds_gossip.rs b/gossip/tests/crds_gossip.rs index 74415ec3c8ff33..ff9e36ba2cc644 100644 --- a/gossip/tests/crds_gossip.rs +++ b/gossip/tests/crds_gossip.rs @@ -351,9 +351,7 @@ fn network_run_push( node.gossip.purge(&node_pubkey, thread_pool, now, &timeouts); ( node_pubkey, - node.gossip - .new_push_messages(&node_pubkey, vec![], now, &stakes) - .0, + node.gossip.new_push_messages(&node_pubkey, now, &stakes).0, ) }) .collect(); From 94273434ce03e8e6f93d8ee156ecc7f971aa2770 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Wed, 18 Oct 2023 12:27:39 -0700 Subject: [PATCH 368/407] [TieredStorage] HotStorageReader::get_account_meta_from_offset() (#33724) #### Problem HotStorageReader currently only implements get_footer(). It does not have a function to obtain the account meta. #### Summary of Changes This PR implements HotStorageReader::get_account_meta_from_offset(). A function that returns the account meta located at the specified offset. This will be the helper function that will be later used to obtain the account meta when the offset is available from the index block of a hot storage file. #### Test Plan A new test is included in this PR. --- accounts-db/src/tiered_storage/hot.rs | 57 +++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index a7dbcd38930722..0ae2a597ab0b43 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -7,6 +7,7 @@ use { footer::{AccountBlockFormat, AccountMetaFormat, OwnersBlockFormat, TieredStorageFooter}, index::AccountIndexFormat, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, + mmap_utils::get_type, TieredStorageFormat, TieredStorageResult, }, memmap2::{Mmap, MmapOptions}, @@ -215,6 +216,12 @@ impl HotStorageReader { pub fn num_accounts(&self) -> usize { self.footer.account_entry_count as usize } + + /// Returns the account meta located at the specified offset. + fn get_account_meta_from_offset(&self, offset: usize) -> TieredStorageResult<&HotAccountMeta> { + let (meta, _) = get_type::(&self.mmap, offset)?; + Ok(meta) + } } #[cfg(test)] @@ -234,6 +241,7 @@ pub mod tests { }, ::solana_sdk::{hash::Hash, pubkey::Pubkey, stake_history::Epoch}, memoffset::offset_of, + rand::Rng, tempfile::TempDir, }; @@ -398,4 +406,53 @@ pub mod tests { assert_eq!(expected_footer, *hot_storage.footer()); } } + + #[test] + fn test_hot_storage_get_account_meta_from_offset() { + // Generate a new temp path that is guaranteed to NOT already have a file. + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir.path().join("test_hot_storage_footer"); + + const NUM_ACCOUNTS: u32 = 10; + let mut rng = rand::thread_rng(); + + let hot_account_metas: Vec<_> = (0..NUM_ACCOUNTS) + .map(|_| { + HotAccountMeta::new() + .with_lamports(rng.gen_range(0..u64::MAX)) + .with_owner_index(rng.gen_range(0..NUM_ACCOUNTS)) + }) + .collect(); + + let account_offsets: Vec<_>; + let footer = TieredStorageFooter { + account_meta_format: AccountMetaFormat::Hot, + account_entry_count: NUM_ACCOUNTS, + ..TieredStorageFooter::default() + }; + { + let file = TieredStorageFile::new_writable(&path).unwrap(); + let mut current_offset = 0; + + account_offsets = hot_account_metas + .iter() + .map(|meta| { + let prev_offset = current_offset; + current_offset += file.write_type(meta).unwrap(); + prev_offset + }) + .collect(); + // while the test only focuses on account metas, writing a footer + // here is necessary to make it a valid tiered-storage file. + footer.write_footer_block(&file).unwrap(); + } + + let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + + for (offset, expected_meta) in account_offsets.iter().zip(hot_account_metas.iter()) { + let meta = hot_storage.get_account_meta_from_offset(*offset).unwrap(); + assert_eq!(meta, expected_meta); + } + assert_eq!(&footer, hot_storage.footer()); + } } From d33758171be87236992b2832cc5a81ceb7247934 Mon Sep 17 00:00:00 2001 From: Jon Cinque Date: Wed, 18 Oct 2023 21:39:01 +0200 Subject: [PATCH 369/407] token-2022: Update to use program build with zk-ops enabled (#33747) token-2022: Update to use version with zk-ops enabled --- .../src/programs/spl_token_2022-0.9.0.so | Bin 518392 -> 599088 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/program-test/src/programs/spl_token_2022-0.9.0.so b/program-test/src/programs/spl_token_2022-0.9.0.so index 805d3ed1a4131fe44c9bb00b6e865db27c593195..704fce119087717dafb64963b1c15faac125f6df 100644 GIT binary patch delta 148738 zcmeFa3w#vS^*BB|nV+;h%-%dqrk9ZC#RvR|)-J(^_-euRMKVFy)WQRG{u!7k=)7 zTRZqy_$ohl-nVaLJaxC%!-m$*Ng)L1z02cVPu_cLs-h0%*x-QxSG3)Q+f}2V`;l1e z%G1+65?fq(sVlc>S|joZ*ImGJryXLytJHmgdrxb=iizOkOb`6e1|#}Yd&FeD zRKI?Q*rMm@BX@}1`f`294l&uiTR;DPQJj>cpL@R;54WG)FBT>h>to&$JCf4%H=eQ< z{w*m_f22hm1@O@pYs4-9*Z*A4sHQ?FQ-k7cYt3DYNvwFKsJrzfspOTP+TrQmkLN zTJ-Ojrw?8&Ms*yd4_Gah19Zk}@eJJht`>Vc&eb#F*{S{Y)YW3~sd@UTtHqX6=O!H$ znr4-M==NLw58Y>oTRU}0{udM(6@$gAr;bhDPk)z)(>smT|0cvUr_M@xExtyRt;SQ) z8p#dx_f(?vApJd?D6J4;P1p7M?LrJnE!C@pSeQClpDecRPfgeL@4ggA(+27N_H7&F z-Qv=}Xc7C<7pG>9i1rfp%D8=EVa81&+?{jb#&UXzlu(ob5$Rw+WQ!5fA&|g zC3BRo^$W0!mDurcpTKu~yy^v!lht3J`hpmdHAr8vN-WOG({Fh}Y=PVEFU0PwHTrF< z#4O(|{pl}aI&S#iM5pe{^@qL?>$~sJ@A^WF@3BL_<%?}O+0W>D`WM#tBu{ti?bqEm z=+CYcBmAXFPeSWfb+h}7qz3%U{||RaZ0_q9q28}rkAChhPd&ehjrJH^$lCCxXUBI; zn(RvL=y=e*$?9K~v{*cv+aPB4DNo(g7+dj^#%+81%yOkRILe=G+%~@N8dqwSqkK{0 zws!qSx!f(UC5xL2y!w^@5_1dY>fe=ZJ6O=krB_`hj-K8>bx&Drx76?5R(!^Km;UdK zV(*!y`oWFc^7_wpsct&W-c2~x3LU-L?k}Q$VR~w-gX+}1;^v@rw3}g;3q^sD7Qjx-<@~?!+tHD|QtwkLYLSKgHYubM>3A69)(E z(k~e+cAULjPv5w0#LseE`lU9-Mb>_2z8KKHt9a|gF`>H7|eh3ECx-#H|(c#%z{(4}9NWW;1{`N1$h>IrcKOZBWxv0Nx9uoU6 z+SsNp%N@Er&}_}>;qD4;n#}YL%xob76ajnN4Oh6_;WA0O5c37 zSbWJ2{eMP?(n}ZX`|Gyty0pJbf9Ie$dReJHe3U3H&C~x{CuWtl&C3c0FB#2Z#LzYR z3!TOOp}X~ee=c?m%hSL7T8+)wU> zNO#p``YWG{y%+ezimQ_JH+zURSM6$3iA@g1&wSmQd_eD&JC{2$8G>&i{ON7)q7L_H z7{;MTE3981(0lsk6Qm`>xGaZ15e-8&povokWn2Dt^$ukt$(&7}H0x_pTCtMpo62T> zjV9qS1UJ^;Q#PTmHsf$8XS_h15s%#Pxu#74TtEDqC+}riPxuRlPe{@a$@(FtZ_Fv- z=eb?z?sc)vdbA>!cyvHd>(#@0?&#DJnpP2NJ&Gu0u2d4r2z5Dzu<5I3j>0Jn7d*&t zoyKs{YKH3%Q+;#zGKM$I7N4Epceo#k&=A7R#t5@sDpRQ#RQD=T355J~H^MB(XwWMb zWalb=yJ^<*ll5Hdu?c!|{=h4c;TFkovet=6A0XJPW3t04w{D-IXZE}Q-l&350qOA6 zqje;#Or}qN_pu(kSI@{q))b$nzzq@zpMcxLBhH*q!h1P*%@$SeScWL#BH#4Xa0^D zM@N)cyRQ?0WXNAv0A^!|(6wa1pL+DzG1E7gT5k-?a&*2}UQ|Nm&jQzI^XT9n{)gD| zR{y~d{3Djk5)jS^GZ{PjYQQr>Os3MO9kt&5K=1D=w@&#`_XVMe#yZ&~G_eP0G;Uzn z2=f4H`~}0<^pNUkZ%6y@PC^KTdULP4I+EEoZsK|<@^<8H0>g~~z9Wy;vR}GWbp3Oy z^b0-D<)&W0s4F~o_PR$~NVO_L`LYqV7LT@`%8k%r(vuM$V_ngr=N6$IdbBS{={<7^ z2L_NrTEfV6D_H50Bb4yfJH^qDbF4=v=qdhj*CCO5e0z)|bVO5I=a@9v4PMiLg< zH0D(Z;QxU=YVpieNu`eA(Vz=_?sWq(xEklsO-lwd$h97tpl3|%2i+2o1I_E2M9%{o z_mMUl8zefoL$oHl6@7qk4L?VfUj%=mpy#6bfSn0Bnlc6VieTVE`ahO<~ok zK0|Xo4JtIQqI#O0;OT(Rao81=GdG(M4Jqe5U;x^Vk*@;b5Yq$t)*}=2_7mI3NP8JQ z{ZFLLXhYf}C2d!d7I^_~q&=XtXtar>J>?(P*OhwC$hQ*qKA1dQs15XR%9xd_Rj$>@ zB{XsZfC~27>zH-vEWOk<+q!p_epbOSX{Iuw42VTHyD{w1x*oHZP16UtW{d83_BqF+ zeGaAsQ%$5PuY97f|_>W7f=p?o1V|$2pVeJq~~QyFalFp=o0)ukB@lv9V{t zWRX$Ihpz^Mlo#4u_Maf@Jjb6&VtJF$QG#~wWQ=66jY9a-n`9L|t`EL8hl71LeJqD}V>E|#W%AgElM z0BRu_N`0HSm9Pw@1rN)v;LvOqSq#^6q%()#$M6PCd^V_maNPlBcKRN^FKD5=-6f*C zwvW?&I8LSE;jk(Pc96|`KZ_Y2T0Yj3?!k_?81Re`t9G=p(NFwjr?btNzGh}%3Q7qB z9{Q^g?&boDM-c-|Dwr{?5*id+|C+!2$!*_1aF!V zm-L*CNl%Wx^N97@VckFa=U1W=Re4XwPn7pSXPn>mM5+HA(@q!XZ1@ zF=UT^jj2%=vL@5VA{(I;PK}n10C^f@YLwzkjgpKF+V|p&x6T;Vw(fTk+x9r*j9-`q zsTCBoca;Tw`*+NOMt%AB*x^CL$`+jz`XRupT&hvHG(c(;zo#5-<^uK|=R%j$sPKtD z&i9R_mg`7Q8ak)gfQIaHtNNGjH2sRtM9x<}t7MR?%k>e{wHXDn4JJr(8teojaV8o# z7Loenld5V>9QoK!Sy1VgYZHkjjl6-4TzOkmLs2bEU1-vV0&HUK) zz(RU#@b*qZyY^`J#dY2VRKASLPbN2i0(e7L!Z}r}8#lU#rLpHC8_?^32>(IDDY4m2 z(3#!%9WL***G?|G8R@9E=+$Vw#&d}Yt+9mfec@IJ|2n(1tEt3XP;wamFpCD$U1dYw zq$Fb7o21-nGT~#yE{DH7a%4p!c);rLuHG}~D`av7UFdFHli!NhQ0d%5Hy&Kyz>i`NEZ-3)e)hB8y2O9lE*UcVY*q&AP5j771<@IKnbM7H8!&mMHwT98ZTj( zB?QkOVdPUABhmWLQOky0!!v!sX6`$O%21NVt*Bz|L$B);S&>cA^zD%*$6aC;G)t3X z6Eo_i>7}PoJu{CbHcK-Z9s~`*0>{UW~lMGR5}P4$KugtSwS!yd*B%iOKFi6 zq&gnsjjO3GGQ6FgG0-Y)@ifB-NiFyeNo`5t3Oxz=bcFfiZG@oh44K4>;;5pFE57@v!I0NW}iKmp5sEz3ivDg zyjuxw98B^9=aN%J&@7N{Rub^KI4m1dLue1VK*HX2_xziE>8tBuI-+GWqakW1>LZ*Z z=zLJ!H$$UiIXZ+!Qcb0qM(M1)F4hD(DXYhA?#$q3XciUh2Yl>^=Rm zLb^j7ZKZM<#vpD=nDyL~(Y147JYfxUsfw?19IZ=fwO+f#omois2&-E}EzHpx#hwhT zSA^I-M3qd~Gc zJbOOsgAyamQQVXx)uVk$E~FyNNkV18VRk8XoFu?H6v08Gp0fmV_yI=P@FqDWb6g9- zkZ)3h72#Wn@ydb&?6m4;5)46*;rfXT50@cg0}E=7lOdv3rWyVoM9(Zt!@KzT1>LjX zrIzdq_tynGe~fmGsbWf;iGEHIn5bNSnX7=c_9HxX z2;@U^3a&X2JF=awS|P3kJY(}nIE2e> zr%{6U9!QiS@XndJP9wK!1fnNtO=-o@;U!%3JI;mR+KRiSdt)GbH6LQ3()hR&?FHs5K zq|pXDk(?bB9N_M$o6m4jE5Q}@6A3mpFn8v;nbfM;myOLF%^s$5C>8j^ zC#Q}^9ect?o?4RRS3iuSi3RYwSk-}z6|6}cLwI<=&d`LT$AU4tnHNFB$WUis$j-p0 z4j-cMIsjhPbZMk&7|SJToa5qL$4&=bm%YoDeG=pqI~+w*;GZA<&6D?tKaevf82|7D z2-REAwFKwVKtucBSDfgHmxy_@fvw_YHBfF21$A-CKhzjr+*?2PWPQnn+ty$g$`=T$|mrhm58y`BaQWmsz!U>!)>KgEE&M zK4M+ERv)Zy*-w#1Z`f~bS*z!DsRJEWF5pOlPTe}=O}%%%5oWg@ow~_Imk?_Oy9FJ# zXd0h3s3#aweE%~6ua-qTO|*`xwGDVqi(&-RcKW` zp!W;jF(O(RjYm9nGaA?@DBq3t(f&a;1JsyTeJXJOb!f9v9`W`I@tAjnZ zWuLr%YlO$K7lbwLpE$0(c+Bb%(k~UwU*`|-XixEQxQDpXj0PFkQx;Nz(l2VM9R4|C z=I^+gBgM`7?XUTLTqE$&Bc!w?lvdr+~3B71b?x2Q6#_xZ0#(n;hUy)FzNSn!{Vx*HkV zMC2%Eh?BHAMpEP#D%d2QfN%myUqQl7$z%?J65a1jmmLVVTB2}1mjnvQ-3Uh$`z zj^6w+Pe;c(>@dZVO3sJwKzqC%@c!%5QMZxE=>NB;qmO)iyy|(I(quYL5_Jp^>lO%+QLapM^^>`F+S-2$m!@*2-Y@;z{yUsJ0gF` z_?^9ZqMdEsbgd`(oJIg)5tX~ebq*hqYRG2;+FVF?`_M!ES=H~j(}L-+Kc;F%r{p=< z^WcSAs3**&2iO!ExFgjR5*ev|fQL*b;3t}*hvf4tdd^Eowkdj0+X4u-IXcEinv&!F z$4t@dWrKa8{)wmP;BoNsXn*8HH)6WdG$W_p*mPm(m#6L zt^<-v!I=GkiG6m%NA@{BA8j>OVh>^v=D}>N1Vn=6A*_`t5@yK_Ir?DDnKiPqL_V*U zF#A22DNA^ye7-`$WBSN~wNzj?&k~pi!l^JaI;;9xPg)R(BkNCn2>xc}T3J)ExCo6fIg3mz4Vgv`*9$-cqS6S=cc6Ep-VG`g*m4G6|DgmeYkzPnJ-MK=06z>o|M@fk@Txq$d(fYYcv&dIdtW7M{; z&B-7~MlA$nnC=JDh@P>DU5ydgBeUZE$A*3G`t32^-$b4dAX9?(@aG+WPGWI3}2rDmiY9kaPsL z6iHIlz6AWZF;FKPaKjxMfbHSPxStv$Q57MAsj*{F1C_!V=u%~)jhK+Zg-3GHx}uIk zsw^a*!!uMx#B+sZInY9tpou8P-$TYDnh3HgR6I4pEU~@9TTsCWu_UUms&csjqYl=$-~+9b{W1Qy16S6m~hkSpo@I2Y!NrS5P$9W){Rf; zerp*d@)7%c4tWZ*12S?QIY2HL9#xg&Bp0+!upB2+<|u3B4%VhhSPhfe5?;}ZWed!g z1@YtLz>(O%TK7S`c^P4unAUNkiUTF(@q$;LH5Mc@QV|^+ z7#9<-c=i-EL)K)}oWM+xU|13aaoY;iccXx|c!g`xjR9-b+pg5!%7SVDM}>WdO(?k?7MhxJKrkG4|WFmk}z{56O%+?nC1Zd)+9C@QCN`}8gE1AGtiTt6tq z-uFwu6R0;)?C9MAPrcRvUxN_e15tM2qO|)=si&- zje=qOsfFJ{5)Ci&1tn{+>#UGstK_E`&1j0g5DUcs>y>kCu&ZvckUp z8oj{P*}8X{K2VfgRv=QBq;|dl5Y)>`aNb)yazRghX6#FOao_D~ajfqw`n^~`Vomq! z)&r>yO(ch!*V)INo5r1}zIvkm-lbyy)w}f(@EJGwI@Pd!;^4?U{gQnm{hGyk>#bt_ zHO2ZFmy5mE^w*zRv#ry$>1}^Q?68BEzrRSl{Pv;a^D+Zkp6(-FE@NJvX(3*=wh%Ay z**@fD4D+&j81a&`X4|e&QC=#Y$1u8#Z7NxY0oJ{_qJ8UzMV zvDOFjeLP3)(fEBRWnxAh)353V!x^p_-IxwO$KfKnjLj8UqH}PPt2>>3G&V9LjRxUs zli&~%G2mKB1zuOZhQ_Mix5SCVRUa^ML_o%nCsY-x$O~_&_XpUc@ezkW;7ml*j5&Si zZWfTB;_j64{;&nBWLqgWu?~AfN1$i$5M8&&tf`;)VGct_$i@-H{w7(Doli+>mBx>*lI^JT} z)j#(oi~Gi91yL`kR}Z-gaG$p!C0Nx1nuFo~vb>*&2Wv|TLk2QqaeaY6%*SfQ=Lg~(ehaTDk)3e)ys2c zKq{jJ8U#McrWBm}A314XoeF6Lj0h+a{d`I*Mabhj9SL+9A zl`}Y{Mvpd>IJDdS<`0rZ;|;xn>{e9r5Ezn6n2`<|LLem@rx8o+2Jw{(WHGJ-CosNI z7j*?YKgW(MW0joC&#}K2qL&h5Z_F*5M1_rzPT>`}9w|eD1J(@Y2T@qMn*_9197Nis zakS^z4IjJTqu!sh88!(J!lpQzTH8h^&wC%3nh%|9-BRMtNJ3k%%HMXUi5|r%!QJrC zoSsh|8|^6WXw{*rBd0*e)9{e@A37`=c_M9hv_Wm)SEz!XaVogP=1I}XoC8uSAA{}y z-M)vM0(7HPJ*phF5>dG{q(mP66PST}v{SoK!`PoWWKmgXMJgxguaT`-DLGPSS4*(LX2u{weW*eNI*yCK-aZ&*gkf2&kIt91g$O))x_Vwb7tm_+0Y$e~ZxV?w~tZ z6C-ZvkfwNwo}&HBQE}z?srvc{#FmLU`n~Uq-Q(x#hu4Y`H}BAc8@6>SAK_9T@{Rjg zbG{?tdZ0-xoKmcZ_KF=-X6XVx5IYsVfBb-$H6ce&dmoQGxZn6!vUq&LB>nU|#8G&d zcZVph?5}HpQ#nX4UoAFPF4WuKA@U|J*56-CEWHL_dY!mX9}Fx_8lmU!73(LB*I%gL zc63r{+nkj+ID5B|IQ!^5;%xVO#M$(<#M$V*F`Ui1oj99zJ8^c&S}|(cApH>`mQTym zH@qkIR~GA6-!8tM)=OM7K1)oPI9%_;jHV)^vr@6km738k7h1<oMlV>H@f)mV+UwMYF&A**QoHGx)x$s`Cj+GGWygh09hDkVu>j!;PWkDB7aJGaqB|KlkehF7gIA6j` zBwQ%r6%rmS;k6Pjk#H@+s*BoT7nL8iI!$+{cc=zQP{Nh(TBD}uJuiV^fP$;ydqHr! zf@+~3?CWI1;F|=9UoOeq1^CvAXFWZF!{h>PiF^`7N?-?g*^LQpTCLpKRc-)2b|Cij zj0~)W#@Pc)G-EOTAMW?C5oS-jjN#Gp{oau&!B#?$pT<@1TVc9W2oKC*vf{)I*uJLL zUg%Xfd_Gt{uME+1b3wy5h-Y@F<@@jzd@owU_k!(oR|#9P9^_Y9&4RUzkk168+ehys zJ=i8?(=~ltnZPD0CgUVy3^0scG^Y-YPU6{&)DI~5{5{av6!;tAdC)%nQKsDtfAJ?* zaz2DaOtGhi*N*CW+1MN|PkC=(6`Ni^wP^N{ni~i$!ew5@8kWBL zYOTskjf66PZvu=X$ELiv0N!D<-dC@g`unAd#ePCGjvSU}ZfybF>nn97J7i7yR5wneJ0 zU>NZpSz{gj(w)<5E#OD`93K1OpI0j&L`+yXfamr$!mPcjRC*K%d6lWGH6vX9Z}E=_ z`N1s^Y00f()>)AH!&Y0T#b8;?dp^_f}>F|GX0X1LvACu8_K9715>d zq}4*>D9P*ZppE=QvBEbsNWMGz;llr=SP^2ul3l$qR^X&rIv$(E)qsN%Kk-h&*zpJ` z!rs*_l$mxV<-(NvS z5KsO}%&=zL4~*b1SJXQ4;j4egc7t?L!x&l77IP?T z)R-f))ha*XmATsoX~PIB;3BhGz!r^yoe$KEFvFwa8TLaU*AM@P?`pV^4K`9#p&`2r z8$^Xf8myC%vllZ^*^S+@Vz8=Pkm!KNJ3#$hkqd(!u1eqU$p|ZQb0yrwMwy#WFf61= zxKP5g3C4xD!BFBHCCZ1O@lS*2C}0*2Z7VcS&j{Ml-}(kO{h}jVp?5{}5I1A`UgzdR z%!cWEnc8jQDBn7GAZ zA(tG+@~DOI7hR11*g{|r1=BJ#kvINkt)B;9__y1~X!x9q;ug(G;lA=WyM0_u%)!xT zvJ2BUlwmO+c?M@bo9=eH%9;NX+*TmO87OWE!xaSSGr7Vhx>1r&1_b#oUTF89;?!3< zWn>H%=tziXfH8YRnE6Evat_zG<8O7~ANVd9o02xc2(cSb32LOCM~tz7rz@zv5=dpD zlC-Cm;IqdvCFPxEBP?_Cf_bzg1)oO!4iU|wpEFz!c^EngbNCc0@M!$*S=>-MjwV!o zbiYF~oI!feWhe&s;In+PEC}9`1a_~IbGt^}4vnJb#q6xK;-jB{YHeLds*a2gc8{p(e1FRm^nfjii9Mp`X;j%rB@)N0fS|T-z0bh0NsLXtW zRBZ5E_yc}diPH)>hc_nx|8RL=ZyfIx;n!&VR2DSQ-KhHm-OZv}hU=eac=%d|H$27g zxD^c7ZYKk*^mE#07WP20npu)|A^$NFPc%S2*adAoIxCW2J&VzmlHgt?K{iRiaU`;s zEK_A;B!9Xpx=787&+5Ge!{P?Z+|N_P(M9U#2+q#fP4`BKad?>p4PW40jVGM|va@UaNF0`zXSg1L zP>%ry(ggBj%LHs2^axIaBFJq6ylRL|EE2#D`f0d~Kpj-$3N=s;tBJ(!vh%7W#MSmC-tQN$}0X!qbbZ!cE`8grrC$)2-vfYSDc3Yyc6j(t5 zGr@Ra>xwMN^syztf{%n{J`tKPVfN&ZflHXfGaRlVI0z?s$&?_*N`##fu9a}Jgc~Hh zN5Wer+#=!a5C4!B3c@}rxfW(o7R@$9ak z6%&#QAP1N7_L!Jd1B@Cgbqx2}=sLoP4Ji`lfURI=_6jAA3aya}j9&|>oE^W&fj=7@ zy?r&Z6`UR7mq>=rQTXRU0XnR$jtYek4Y5MGtk7RokoaA`xB7perw7&Ze0XkbiW^Hb zQCD7=7?w748sJCLtqdRvmq$B|Y|(&K3B1%;&Yp5L!`I8T`s?^4f^m(;YxefjaHrxf@YI9j($RXKEbx1o+D~>mJz6<{!zhLKRge|@KkzU=6XA_7Mgg$|7q%% z?2H~@u5ca4c}m~O?8iBiv#GkK(mG`17Z0Q1aF{7e=GhWB7ZLTd8-xPlpvn$)^;)Ad9+!iI%6}71RwYN z8dubVJ%oMB`XfkQct!m)XxyXihiJxvVQrPzq2(-u75YYIP@7g$4UUCPd(kQ=SMjrE zQKY;E$`iy-Shj#BkYF|t91TQb1Lz${7dDu=CB(}Cyhs4;V$kzAQ(r{IH0D&?yoC(G z88?GJE^z*2aWfeDQGxTv$4%SR5r+4tyCW zZ=lX1FYE}E105Ak6erJe;LA99S-BLjA3RsXOJxigf6+=~G6yI?h4cdaNSqXbx^BU> z@S{}tWZ4IF8j3hqZcDi}HDEnNmnmQi3NPx;(z*VeVRVmCy^J*LAx7CEO;B#LoqVYI z?OYr#D%5m~gqjf_LCz^J+*`&V@6mQq39j6$D;NGyjGL*Qc^oPUt3;@@o9Hp6QxFsd zS6oHu%1r6XJlbe-D8`)mgr7*_J7Oh1Ie~+=;JPkG;-rYg4>=?r2rPKC$A}SQPD7gv z%{U2$AdzMjCyMq<+F?9!59=XU08U$`Ik+xSr&!RM9PlN-GM0?v?vGulofvZofNy=A&G>*0{fM-XoEGAuv~|lpuy1lAex^#P?{)G62Ttw$D7?kQ4;!#Cr?yRgIxs&eiXY27_5-vt z&%w=F2RBMPbBG(ZUSm#ko7^m9ZusYQqSka;Tm@EFIpsu&j7hE7S2`I4SFMZ?Hj}A7 z8kNGo2^kHY#*+_qQW$2&gs4ssqwBOfc6Vncp}eBHD2*loc0SN(?F;*8Hnw*iwL++< z4!NS4g@A|XD9R^!p;?)6c9uD}%)twiWy)R6&x?-9^9AbXdxn3*Irj!m8c*`HjqR{a z1~G8*o3&?hL&w{##UvOB%H7(b&fVH2j;*ou-|zs$HDC-aZS2-sGyd)N1=5kobuB6(WWtB8Y^BV)}T*S=GB?Wqj}M*oaX3JuT&?z3nJ(hMKs65q#6}paj~6 zeHCibsAJOhCG$xSU_wJrO;2IbHbLCyPdY$7!jNFf4m zXt0u^8yeijk!AHotP6X?&d&P#isN+Fq8uiG4gMiPZ&}qFlG2JA@d?KVnxscs|o*Ayh|njOTaT|;O!$nNAe zy5?{Tbxo3XMwAd^meL#3##6xw{qxvtNd+boUWms+|QWeP@GhC>I6r5^Y^r$5xmJ!ej-I5T$^YNuq`H1bU_=6eqJg1 z9#HhvH=Tr{=O@V zP^g8e(P4D$zU-zi_Xwhe&_ zNT7AZc;n#C?DHcZ@wW!Fb9YMxlt}o;zQ1Y(y%|)r+v?QK-TfxH*%kkCM_;0bX$<@q zeYs<5CY2i#{B*}3A`HP_9xaVvOq1|96}GhLK6nwTN3a_`z~$3i_)BFNim?4E?fSF9h}D^q2~yZ>+Z$~n%sqFuuw3)m;Y$Jlzs9JlremRqwkb$`%U zIUfPoINDC7#>&|e-YQ|#0X}b#aJhtQ84i?4xJkmpB)n6?BPHA{;V}~4gYeaGn5I@1 zv`B;o2_KN~RtdLCc)NrT6Kr4XsXbzK8skpaUw_k@breq;hY6Jqb2Q;uiW&)nFZIKp zxZs0#w2habi>JWf2+x!EGOa881*wOPPK$$12|Tyc`CXKb_01l=Ye}F6;~*Y4YNsJ` zpaz$G5l+&eU4S7IB3P%!12z1ruNQti9iNM>Q~g26BM-_Ymb9ac03(e$HJ9 zhFs3@W*!FWj2gV=$K{c4@A957eVZ5Bhl+eSD*7tVTB=}Ic{$+OCu+5hjmyve68oG! zQ0rR~XUKS*l2#*u9vWxxBdBQmnjv#qBPBlBU#eT_2XZKgk~Ck!O433JD?tZKcr70< z$Kz;CP~?et{K$1vPDl<)A9(zL<|pXjA~qr``vZbb|@PC(^GR~{Tl ze>em;b&$eb(9F*R<(CjeM5z6xrrp#&mEguHbOY1jLt5bFxx!+3SnD4C=ms47fO1M% zyT2)NzVhXx2=HIbYzcgdZMFo*vEYjFPUiHjOg_ZTG~P)#^r}`Z{E1V!61d|_{CMG_ za4p=;b^JajeHs_5e@}J?|o6 zMIeRe;6=5v9-#ppjqxI{b;~(@_%25^k38 zA_><>c&UW%kuc+#zLgSYI`An~f&-x%!msjG^Yehx=pMOz5>P6Y4F;4-cghMV4{E51 zV7qt1!Ebj4_SIR`%o#Z7LR=r$A7V8M`mY9+s#uE^4lJ>&@?TUFzQ@ZdgK<<2B}PbT z%;K0F(U^?UA*Hce79gZF)*xZVGlRaZvVaMhzGn$mDl>XXRmoqlH{=V1@RL+rub0$R zk<`>_EGc}dl%Fq>^$QM5SZcbUmEfSUND@%LEWbz*XyFQVa(JDs2xap2<=Ki*7YQpm z@`&NtijF*CI7h*L#-Bs@a9p?`Ulu5Wg%VZ`4kj3khl~ggiV>A`J6Qh z#wcOdD1O%$S+GZ_lJA1C#zV#tU{28%K3;;6OZtUeDS@w?8H|4ASHe;N-)sp>0etfr z4)ZWHB6O<1)2;xTMugB32cZ=XLTd?jTD!54$GO#IperqSJp?ff9e(&1O@{dkGxnM- zX!aF=1Q+dF!X0CyAMPmn6$$BZRIt<0=&cQ?1Q*Z=Qk1Y0x;P=B1ysRK$RA6H#+jYY zc?s!oXGI0U0S%<>d5s@nlxRRg0@L6r;D_l($#=Pfky?0UXCyx%opK`37_u?yzocnS@c12$6*&_W5z`AaBY!Zd#Ye7}S>9>5!^A<95B$0>UX1{D-lxzDFl zgJT>Gj&w9Q%+X*;Y=fI+gYCh#5;Z6j1{Cl{XsuSl5j%rf5_vVaMxpJY%`rAqxyMTi+gMw=X6hFxg4`kjBa z^7iXFw*V^G2xk)ZQ{6h0M~^}ByClv`6i<+->jL=0jI7`Pnx;hxKSJMn;Vw_NAfd`R zp+~FZ8PrBLQnU<3zqA;TPb~ib&SwqEmO#51s6xltImul#ey)IhAOUwIMtB8 z*tj^T0%0cS8W*!H8=V1FukQJ-al1r6q~LP;PcufXOIUrJu% zM-X5oalV9=#Dx-85)YOz3yib6rfyK=l+-HxLQfn149aNpj^uvBM1dkH$+(hXyNr)x zy#|Q~5>$UrUqTC3LlNp=Gd>T==PtOnu`p2Yz^g_)Tl*Lu?Cq|1;A^xlz~c_^ICnt? z9&jL*5F+P+NU&Toy4rzPOL!~D+U$7u0iLmuon|yw;c0qwGZZ?y8Y~MLxCbhziKBW; z#G|>$^;Lu)q|UD_kO8!AHNlYu!1`qj4^Lqu+3*X7$7u}LG9z)D)xnZ9()o#Yt3B{~ z9U?|HoinJK_ADDCe2jPNF)T5cHpqJ|-B}9Ezzu~MLxHdKIrj@M_=t?z2;3y2@V75(Pc|bxk$s!VqaIbrLG=|ud&-dDa|%JJ2iY;clj|6J1K2P2 zM5ev1e{fwKuedlb7su|`hO;E_)xcK#%UB{8v3^8{iX)yZ>+@dl`v8wVpVTFY{fGiL zEk=83rIF%wNxN9Olaq!}dny0b+&dv%!owu|FTyi@Z%F)aCH%I88PD|XlQ7dUeFu1% zH4yqIJ*e_E(>*#O3b~=B1@g{6L1ixW5j$uOHhjY!+Kw%(PL z7TgWGIGzs2V2vuKfVz8xbf;7o<`yHicp}w3An}#zLOi;RkkZ{|iJ)}1mEfrE))IU? zhSxeeeK5etxEaIvy|SI4a*HS8{5P_K;+(&)A7%U%`CKvHOmLL(?fg6}hm~@Qm-%F; zp3p0{-Knidt&MjjeLM|dB(=vZ*P?9?=*{-0Qz?`{O)Y2{-`EH-$z3p|mV1wZ08hD*%}=4cOhy&n90czdz)1m%@ijbG zFJIzXJ3c^m@$OBr7JLOiNHu4yD>VxvpgQkl_p_PSEn{67eX{6rJNQ4+?;N53A<#b) zPb$FW5drwEjK1)fDQYh6CLWyO4I%_rB+l7b+YMD=w8gR{ML`zv8Vz^+6L(7SYLw+D z`fE_%OlDEzdjPr;E|IyUEU1(mv;T6rf|NAc?9zCy3)YyI<;q@JR1N}4+8 zc82pnLisf^t&^;kKfx@*csypcq`EG3%b!(_|JfZ!&8wfV#&>fS>#v?|t?%Y4)wNmH zeuPV3xAM|ldHSobTO-n3lil>YS7Xv$uUos)0BzMkE8XiVzVOetpdjqBjD~z3xdGq` zF0?M}plCA9+f}H`EbMXXn!b8BkyhO03KPI8i)sNm?q_ox&y^cXx%>zG*iCR1(3=9) z{P0gXpfc??vTJjJT#;FKm=|Q^SH6sXod2L*p@Z_a=M>uL%lQzN;}wt&uaDjhc!jTc z>zRzNGoxO8c1Mz%j9`DVI5ZPJ+Yh#drs5%XKqnPQ3Rn8p(_Q_HU1Ki-X^XFrp2JsH z;qKAirw8f_%*YHLj`C<@Nho}`6tAj$^a}8GuY6<;bPVP2m6}3$z-gdIyN)V&^-Ia< zjm@kDuRd8r3}m_&Fb%&na&?r+8;F?tPBl)QnK?d-p-xR;$2Qyxj->udALqgl<@yUwW`J^(O6elk7y^Z-w%LTrlg z=>Z8B%IA9|JXpdl1XqMc5?tmQfNx)dT0f+gQ5$1FNfr;^(yM?sPrU8M(Gx@d@Trj{ zd6e@7uWOtpt{jo624HZjiuf-H5Jj@^aY0i{aoUWFL!nshcln@pEE9$d&tL#n;*sbznvPy58W?AyDU5OmV zUx}d;o8>JrU0&|&@-ElLdh1e~n$7wAEUJ~?%SL%zs?Y94pEv4aW}r~KdwKgHnB-4o z`HhQF_n0gUB5wv?3Zju=rb`nG@yLn%xIn1+d+Ja1(zy}hp{ITa&ZAuj*38|Do~n}R z0K+U8Mu$QuR|0yphe>efdY;#nOAH10BTw2Ca2120-pg(%9xQGeoDoccSyw=MfT)k? zO3j-KTF95E`8PJZ0-N>8=&}g_xX`EVbZlGa!jMx|gz0S);Gq?!gR$5q2th3cE6T87 zN9>A-6-XXEy^9>KN8{hyRaWKEeoJPo)-Mqc72(kiQ9V3GV6LZ(ZkNgJ~6@^ zw~ShjS6(Qc3FRJ5XyV-qdISyma{x$i_0JL!T#j^rK@|X*fk4+1ET&Gs^iKP&2xJ}N&~mP>QD-apIZ0ei#uNt=0Gg@`U$8;%!>z11eVy z86jp&{U8gP|5P9aha?CO$LSD+eR~LgmZJ>(7S#d%RJe!I6_8XWFvdns8dP$UJ@*xo zKn?uP1~Y1;%KPeJ^oJoz=x!U(`yTsRzr4-&3nER&-cg- z-T?L+b@yMARO-jMD{_GfT$R2@XdqVP5(sbu4@~*<pcbe?^Mu3-`eZQN zA!KvJ?)){_f3yn^i!X+jTA{o3+-vz)Dd9K*^Wx)N!Pp>qjd(R~sG^>ERyl=AR?Jb% zJH}Y+A8vQH_-9p?KG9|MNYee{j9WXUGQBf%A0?sAobz~+xboHMlzwTQl}=lK++%ns~-%;DUz7{{@;hkpALJu*L^#=(rteC{IA z>9#?cw=9Z2p24bvNYMJ7xbe2qAX7Ut_kPLg1KhTpDyewTc>u8pW1BV86H-IGPzI!^ z(mQ-jum)iGjS{wo2ALjVAzbKobK$Ojvp%^u1bQg5=Q}hSO&>Exei?r?cz9>Wn5~d5 zqz*F6ss-iT9M}vtqp~WL5@BQ$Il!_1@M|LHjf;b7MjRQb8Ggi!p3KU~moRD>IL_d1 ziky+jK$GMA69quhIiTpO+l@jEiZDB%8xNbTDxW(>;B$J7)hcn7qk2F_>@ z3nfEK!^@++qgl(&cID`$lEWbhIb>6bIIf?D!=U2u`Gg$u3K53Pa^Muc2P4OKdkS$z zNN&OSx=Q$L`8-R)=Si4DX&{tKaFvfkq`5AoUn{CWWf#eiw$y_Csq{ttfGau#aeGlF zackeWp@6gjNnqcxQ5y26mQe2pZ^pjnOaS%G$=e?YUnOrI?RkPL!qO|%$z)E31alaB zPVl_ecl})1*2qFvhkkC7Yrl-N2zeTYJN!Jc@4 z0j_6(VY&ljXvJq%vMsC82N~Q1|B#hS-F2ocXpC3aS6$EyzJOm5~2}0Chy%c!^)k z9GnuI)*mgJbE^Mz0SavkQ~_i8UHCuJJ>fKbEDe#ir{SAOqnWH%B8raDWLt!WHj+i5 z5tEyf_2|1s87%$~^N>>5f`~_Gqa$Mk?^QI`=3> z@7_V>`rxG@k^yB(JVn2gS(3S@k&43y22g>F=&6MgmJvOb^Aey>A{kx%{9GQm9EnwIg2lARE88WSyOrluxYby3wG0WHNROIJ~S zSggpPD5#Q3CA*Pot(&C#3R9$$80K&ipYH}I5$ZxtLS?=N{D_Nk&7BQ!78M)U67jNj z+pY8Kb)IOr_FBIxbM*l$*iOA~7Bxkz;OvA8HD@SRiP(R`3a+ZQTY=hUP?^%@(iN&q z=^6>EOlhoyRi;!`^!yU^umRlv4KKjr6=Cd;ytP z)K8v6uo>}_XGmE2NjYhc^ONO2jNbYACn7kE2-*`vbQ!ojCvFe=^~yhrh3EAjeA6Fe z-2o<_Mwoj6o%4L4f~GVE@G7U5u&08q7BkP!6knd##rqCCaOP-U>zbN&-3p*OD6BX0 zv59Upi;G{ovUW|>)AdW4rQ!rEskLl7OXs!0lK%W3t^R9tpLO^Fy<-V8!P#pd#6R~G zP1^9JM{FpneBzruGOHV)o!a9(!R<6^t()ou@?0qs7ELU_X zP7BTfX;iX()55f61-sY} zPqFR5AFZRmarda;Nr&PJCq>x2UHp7U%_)h@QtL)s+XppzRgbn+oPJ@yo^QfLJ6_Vi z0S0uFJMLtdMa;QJD*(ng#6oj5=OYH0tFl@rL|qyjL8R2{|ToiU+@OYK)i_D`&?16|z;TY+Spzb3*^Wg;_cRI(5l zX3^18vWY7H94tQ|*Qvd(fz(bQ#QANN?+{6X78$#p(nRI$^t^uuV4z?0U#>T^Z4{PLx8!Na1 zA_9KIYkJ%N57~-(t|mE%w;y!x(fFvST_bI|@3rVXPm_xkUf1el*t=z}>*e>~*?)WW zYpqwixw4A(5cLtnmVNsM)uyjjVKF7_P|3k_R5lHb$0NFpql8qy3T^YCE}YFRedjx$kuf zm2FpwuXBo;LjP2$;cq1iCrdAQqSUaF2--u(si08!=s%UKWE?}&5Gc2s=`aYhWS>DcDCEdUd-gUSX#MH2m<-tDO)3kImDV{%WRg3#K8!gZ4z_ zHCe$W#O-0?7R|hIp*>>!oM@`uOq3S>g9Lyx^vH&!T)K{INQTtrt=eQANoED}9oAlG zjD)wd9RxzWl>$4F5<#vq*UHcm2{jyo)J7zD8)Ce26sEAinA(sWMIih*?pFH4Ja;?a zlbiUS(Lnb=sFWM5%4ng2$OdG_9tq10$c$zQ%Y}xFodn~?{66^0YMJIvYrhl<=?nnw z9rgPhAm<9CN1MUD0X`EiT+fJH@(2ua~{eS}hpXrA-^?$k4|9MjX z?VCstGfU}Tsz0l^g&suopXJbhmP7wpQvdCx{!8|{(h9FjsPO{QxQc|)RkyxJ?PnUt zIS;tyI}W)0?AOr+gG9uTe_Uv2wI3tu0_$&2TO&rga#Ee;KYv<`SecWWGAe~6$N%B@ zyZdQt3!oP{%PXIbHm}Z^U~_&){o7@8&hjx&i{*E$(OZ5YmOnE{9iEIkNnPR)`ru}9 z^qEq9+Ua8N!X5g1FXMbzf9qv&aAcnT=aS3 zyh9wVnU$L9(8Zpo#KJ|R^kD)boyB^`$>Nzii(MayukIY3syS*r_>>s` zi=4RP`A>=EzbNJd+y!2J$IGI)X_xyQIMw>>UD@t$K7pSmz1p26TJO3&g0u8xv3T(= zw|M~00k6&yv+usLO)ZRZXyL^M@%7!M?lP>p-U=5itpRyiZiC9ZDRZ#@ix}-WI`J|{G?cz!1UgHQfyh7p6axm&z~gP&f-^| z6zx_;yP)$+V*IMz?gLW2BgIVp%tc~{h}hcCoCB=Zp{MgVMJ*iTZtADlQ#ZA#=OvE% zd7H%U)ywsn{lvk7E5(!duX71;)tZ;}&dWr)wO;@1O!17hTt9b#C|$c(pFT$9JftQbjQ067nC&}Cn+&mh{p7eF8~TLU@o;>P^?D*#1vH%d4)o5&DnNf+` znKN`!HzCRneaUlTOK6nuj^`-sQR8sJf$oSkZ->YU=jg>dL~%GLwP**>f^NkNpMLo} zF$Ujq|94_$94y9dp{H>({*~_STJ0&9t3K zNBV!*EJoDl=yyINX4U7ozuyJl^RM@wHq3^V39l1OwZT{`e}j0b{w4i4_lxBl#_JaADaFUD``ug?R-O$+tsgy_^z>TUpp(G6GWn|~uV zHcZynLFwjn{a1q6-t>Yf-dwDYcug$aJWDTmO&r`jUiYmOBYxdqPg^M#{<>5jze>FH z>)q}pK<42`2I`F~#lc5=h<`ouVx%Y)i$>_bUoDCs8>Dw-%5Od|mOr*bpAEFO>`#4V z1t_{As_5VfQTq5Ay?CYA`*^WFafQg)I!b?hm6*GAu|D~Eadc~rt~ZLDC-U?Qn?xzx z#=ah*q_dpKP9K7sghwczNp6IXVH;IE!Y|;D96YHN$*L%+s&pf$H zPnstdZ|m1F){hKi*?V<+YbIV&!zwQHj)0jLHaAVi4nhhNpBi2(svZ=Yi|>yc8rLN zNY^|jp4pLewljX6`B-#-Cdv+mK89|ft1~(1C9Yc8GcGqc|1t6cXFGF)3`Zl*VLpHw ziF53yJ{A*1uWOOjHM3{Wo)fa)-=ok#v}dy|&Ix+iW?Sa|5I%6GEq{MB_lmQ*9th&! z7udoMgt14qst1Dkkpf%RfjBWWI1tRv+58T6VMVrtgTd^CZNb5q(xH7-0VMaaxgXjg zP5sqsJA5dC7o^%k4p%((Ts64Y(Hw_nLXcVoHd_ymu5j!rG?dTFvBe(AfYfJ?+~;-c z*h-%3W()j!6)*C#<$oPs&d=t3Y;*l4gs*8}3;!mAA0BTz^i2tSuzlyZZk(?|>R1S0 zGJgBLW0{PHj<;nLrtyIB+bt(DnPh=r^pDQvhLf*9aykLn@R8HuB>y8Z0VwB3D;%b{ z&PI_PS|vvJTJ(|a*mprZ$;Wo@yG*`yoGs(~F!tN_z2Cbr{#Ae5v6JEa7mS`o(e_XO z>J~agT@i&%uRZNmCE2#DqHy-L?PSp@{^K57?x_%da*wU}R1h!RW3!yj-yA)g4*$_VbZQHQ3nLgjoD2s4-`>(Eqh_T??DcfC(Eu7EjXY)Fr%ROh>4xJC; z2fnhE;8yUJ&Fg|c-~E*>>_P}%x7#-CLJ(iG+m?w`vou@&g&?2Nhp?iw7TVzwE0de> zs4e(DIcR%)AtCWVbEoQW`%q*|q_nB;d9&NU&Hym&uu}!wP!xIVZViOx4yRQ-*S1MWLxz0EIx9y?eg_V>E++7+e2?) zpJt)0^UVVO$^o0@Ry5x|$rgJnnjN&QzqOV3>0^t%9mHSjV@ta|iD!D+?%mGdJ>ImX z{eFxO|I8MBCxj3B%$9a1h`;ff&GoOgwx92e<2#dV`F|ba%4ax_G?w=nWt((2pRY}{ z`9JWn^|)7qUAHCN+sH@mx4GWW;C=VovhL$~r8niT<6gEV58Qb^g4%; z4O{F#!};iu+q3?O0cz?lwbBW@)Jl_gq0(G_WVBl0*SpjT-+a?n{4kuqjtU<|^UA@t zq(|XA>q}eiqsu(yOIy}sf8Oj(Ti)YnerbZu@+6wwwDo#&lOL$ADwi>0=`nVUuU6E0 zNKxx~PeDD+sS#(X>^IxqzYg;oidt9ZNIE333?3Rp(K0LH>%O3HGrnp4SfMmCi{sBs zpkgzN<~3?khz0rAJjmb5()g(1lx@ZK$>gb&Z)N$?!{4lw?!uzDl1Il}ShVyW5CN6g zYUvDs0s(9T5dRE-I@hiA5tE;mv-155i0dDu5 z+6TBN@Q_;i0WO~3zSyaKf%}uM`UNgKO}#u>fX}xh(J}SNARf8JB|6aIk(BAl;!0h0 z#ZIz#u}PB04d~{@)=TNv#gkesituJ_`OdMF=FLjSf^yHii3ZvCXHkhai>y%B9tQgD zS(H?hc}b^XtvNN>Y^f{6aI3{8R(~NxtQbyGp?-aSg#6MA2G0=mPJ86ES7ym z(|y=+c88*T*%R(NmZEE;LiO`ySnr!1lvW!pS<&awGYnhaZ47_Na|0ny~u_8dis8^PywNQkVIc>Qa3Nh1X@> z+1vDPUG@T-NeAn)iF^a2w)I$RjwYQ{k7c?QB#K4KTk67&CmT%F>$5YhZbyn0F+0+} zoi$3WGG${k?RCUhwJ}T$uq9EP7GgGyR_Zli!K@M!3u>mOZ^`sp5bN#njFz0`rrrw% zVjX8yra$XM7-nAJ?8e;3Idl=~MD;%sTLDo)p)Zt+Ff=yiK&v zpVjw%uck8{mr$E$S&G|NPqF=o_CL!mc>meiS&2;A-GnvrSZ44Hk9@kgS^y)6?LvJ@@U7kOsYHNR&Wq)!uo8D2$9A74A zdbD~X#ExZ$NovmO@W|m5)SPAdJP*6r3p*|)G#XCFnlmrnWH?=J&g%1ZPv~MZHid_D zrsAf|ho(Hons~SyCf}X+Qr>ec-0cK*C6=}2izGb{t(NMW>ocCv8!gbmUiyTxTEKJL zenN*@pyRgsgi2blu+Y!3&H$lIf%voUGWIkzQO)D1mgp^J(_s4edDgj#*)%}4N9xFC zk2LnRboSLX_UW!*J7lW6<}3D>?Q2!8>Wt}#csZeeIwq_dh%6C37CRNEqgt@MIOqYm z#;$FlHs(`OHLF`(v}?*fnh!MYl$P^QD!oS(jQ@Rhi1m=0hKcu_*Z4?Z?!j z9SdTMsDC@wh}Y~&xs6cx!v(yudgzMT)J!wq`#tEd)+{9b1MLeuH+?gDK^;|mQ|aCX z9dG|thhT&*v8BMQe#i@%-_q#CyPjqMJ-KLi`1%HoZ$U#fX>tqF?kQ^nRlC?RmLl7=l7E zS<9w-d@brDKC$ryFfp=V!F{iK#v=kEv5z7E8aUa}TTFYBr6b z?QPM!v$}|;t4+VRWf4v8sPC=pOLoGizjWYlcQ%3lEP;yKq0{e4pnL7u>n(m&pHCdU zTzfIuO*0I7iE%NZ-)g7NhGDBIHylH+{!_c<9?H%eBJj^xUUIT(+;O$jXUTxo4o}pg z>l)ruyr2nXM0>8bWk>tciT2EwrzcQJdltsWB~VZV8^&`tP-X-Sm5P{pU6kV5GUSd0>`X!juzQhvvOQR^LW7#aQ+-h`$A#C>xk9ts@w@ z;hh5Q>&Pm8bI{Ph<;BGm*@|n|+})&Z z)Uy{G$Cl8+UM!ez9ITY|LPw=eQ&=;PxrVZJR!~wr4jD=qtYr3K;ml)C6nvInqyR!+{gI>Q;tf7%(?R8mMPIwSzdWDdut5&%7Kdi8_vvGzXFb?Ws3iJ zXNq4Wt$`X2te}BDA?GqKAYJnsW1VO|R%g32z5=%46|nUoC26KJK_BCJQ+U!#6>`gO=ku!8z9+cCKT7mw<#rWI&<~=?`igHqw^_O z^1yq}1p|6Zj=d}5cz&^SLH8_H(pKP1 z7>^voaIk4HxvpYa{IySL!zz~KvCEJ@docyA#uCeszBFkyyT_H6C@CA`>4d*1Hyh*s zdws=t+MUL&VJpfFszHm%KZiw^dr^b8k3* zywBd#c=NyGomBzvdCqwc(|Nn;Hu=2I-`FI&0^Tn<=k2fa{^71B_qe~~ZK;6wp9`JZ zq-ddL?^C+mjsK2!{zB*W{?s||OpQ15q%9knuwptYS~VISDP)TqaR~QawDzY3^wmLXVV0g-DkPw{H%s^*=}X3FOu#(qGKQ9 zk=I03x3i5r@lVR#$dXuy^Eysss-l*w#&?{PGbwWmOQRh(5ldhZ@j$9x;^8CSc{Ob^DeWF>uUdyMs(^9xOgi?76Z7~w z=XrmDYMuqUc}5BIOrTFb#-mcchNGW$LZ1b?KK>O{`^5sGPx)5)aDh{c&0e5+W7=KK zOh5E;Zl>uPTh&d{7qdFHG5f2OGgk+l*?KcCCQ-C-a?)yxck4i8|ZEx>>(|-MLvu>mpCs z=S1XQ&UrsKpB{h7;?PM4>}SXMKeCmO1MD*6OS38bA1sv?A7b@*t9F!k2y1+ENZ7Wi$eDf}o)s{TG! z3XJs@9A!)S0*!#DuckxCST6T!PLqy$||o#L3MelmkSk~ zWWD&N_2gBAR}~)3DX)kX^MqBDbqe~%uF~{~@9~^oe`d$nyL97c zcAx*aLbK4fD=6+F_#a+D85ddj(*kL0>!zA$7SR^xF0!3!RnV|09k>JmazCKUmr%>} z6;yN?hD}~Ukyp?V+e);KcsoYxF~(h{OXg@D(YP09+8nii8_|8{XtAT!EsgD#3fT6T z<1h>unY4D!yN1qt?`@6uhzfXDnM2W>=a$=5^hbtMkqa|4ksJM?v29QR+anpyEwRlx z+xa@%;+q=V3(?LEJS#)9gc_2JmKf!ncUPUad|l(cx&q!2pc7LDBin|~*~&WGK;6K7 zD`5L@wsT8dobA-W`(|qfo~2vDvjVoeW@|&KQQ92mY^UjLC*IJEa3adNfhSbJ_6_H3 zTkC9x|E95BPyyR!D%&(#asxw&QRJG=dH*p>v&3-Sz%N(8`#R{HwcGczoEmtW&h}MZ zWOD^co+M(cQU~htASwjz+do&e=B7*}CbLNUMNtomtwf z&d9csbGDb>rrgRroQL10lFGdNp76Z4m9Q#&7zVoTlwFl)^FBQ&+?D(D*er^3<&ovq zbKig4>2iPC+w}MuUjBoFERfGCzZY!W+YamBPS)?|Yqc1cbravFa0%LsxJhY}6K!62 zTiD!Jxghap8RjQd&Ab)fZFM&DE9DdkdRu#DHoni`Z#!L>_JJYs>jl&&>g^pv zC6&0B&+CT#&-DB#1n*~f2;Z@kqMzYGyz5fYuC?jCXL#dsHd*qP)5ab1mh;A)3vwro zJM>?eBu%TuXpw$ziN+0~18&@%MbJq%9?xf8r!aRO%V*r8Ox)Rr$|ZLm%GgB3(}Pq&mq`F-hDlnVtcmpsKrW(uvr+j&eeeCcs?BBj;fS$xS7>;d8>9*Ye5n`V+1HmCEC zmQaQlk7hs6NiW{sW1xX7VxT51Nl&$+=!2uNtrDqADU5Y8wlp;SK!uYAKeU9#x^!r_h@Z+KR9Fpg$JU!k0FQ$!oMC|Q!I7R?0mG&Y+PM1OHgLl<=AH0Sx0_a zkNdE;qK0$j?D1W?)2_2Mol5E>j2i9nCCHt0o$T~7e{mK~c4@U3yUvnynf@&I=6n*Ku9^ZdGMnN;`18~- zgty=WvK1uq7{4DyUd<4r9!F7ZGxR>IvZWdKW}J7TL(id9)O983d5$N)%-=X zcUuc{f47hwmW-y{KCt!+uTe=K_~fRp6xkON@9#lLeR((wqAh*-DSmBWX*iSbxwheV6X_2VG^Q;?=PjhW!}uI#qv^wWI`5dJJRZ)ynDop|tKvO^?__Kg z9UTeB9eIuJjf5<(DNEnv?Zx1Caug5cEu+aY8kIJVRKiBXaoAP5k;E5P_-b{AL8z*4 zQeHBTV{w1(J&GKI$YyyZ3aHaMk={u(VuvSE=tij9%kmpJCW-Y znCI8-O4t-0XYn|1C=y^(Hl* z)ExMOTLuNqg>5QjQ0!btf@hYrxftnsUZn7M(GaUB>Kz`>rqPynV9BE|QSm$IlBX_H z$UMG*H@-wg^FTF}s=kZte%G}^y_JdY!v1{UMar8GU$k{6*9E+UTk6x}1@O6XCn+ow zY;GK;j7(H&JEi1i@*>98((Fax^Jo_3E#et$9<^J{d$V*}x|kp4qkf^d_js63zZV^k zQy-!Zi()SG8uqXtUvls5LRa471EeFvtklKENAnLBQJxL)xwB$k!UGtqLk*Vljiq}s zF3h6KOL<6X`t&Su{!p<;v))fHr@CXMoS^M$r5EvF#^qU*whXp=jLF?HsPWIYsc0Fx z&_eQD4sVP)K;g@Qe&+xUTMn&{U!ZK<*=RbsoKI%cl&BRDhQIzRC9Q;4yh2M?@`cqp z)D@cuzEw972(4OD%T-|3IfCL=@o2ASfpioFUZZWck7)jY{#pdr5_BHC(hK|DE0 znc0XBr%%$cY(9y%#qN(akgnBYN)q?y7gLtFH(gBmYv2G?8d7Wyr2ZV`#C`ioSjP|NBV1pQgl=EcSg!Fo9w+TZ~b|;G6#P6|HO4Nt2DPI^)hd%ln;WioA zuO5O=x9U^Bk9l2Ilcs-+@npeF3fl|;F)+J1&rsKs!4$L&gU?hNy$!ML9oo5#x94|fDEGEu_~Kv9px95*lKC?z_ft53 zJhj{oulK)3kGCWFyE1!+aJo2T@8te$1s&aq{#*4LZODT?ex?I|9FGg-2m`>RR=)l2OsiXk>CMXm434Z6(wCZZp-Y=lrp-c4m3uHg9%>Ghj zXY1KN(9!*f4`EdG0BWn>!m&0?Z}r;zI#ApJXzEAP4?y&v~+$|L0ZtV*c z_K&}Dh|>m#*cwM0{sF#0RP{SJ-pXlo=sP&r(rI+>J2>8=Y2^PsMl&DU`#o>vam!Hp zwYg+Di4J|gm?BRixJFH*Ff8=)$Z3>RgyJ2hQI<&0`hkuWAu3&@s;77>e(DTGo`S;^ zDbr6Oww4CH8PllbG!H6GPno8KoZ-=!r?jTDpZ-QGmp)HN$zcPi5vFbcZ{LBm_quxn88h1QgL?Qfrb)2V)U}n`d{I?`+lIyzrwyd zPEp8jh)Y#4T^9HGDN6Wtp^_hUz5#a{7D111z+U|#$P26CJT^iJE8$6u?>|KwZt@VH zPeUAS6I^PWkK0fQ@>wqV-QvgDCc1G8ec^rbyN&tX;UX%&4X=ybME<{HIyjD2|Bf!w z_6!~R9gX6hLjHH4W6KnZyn}R4Z%Vs~!EGQNyaOifl)HD(QyK?gr>o=lx(yLzwiuG>QIG~Dd{n+ zzM0NG=I40r$?C_!UX!Wh37A%!ywpny+cpaFf`eAHefxpIEFrO!>m`*P*s*tlsN0w7cuW0ZtLfgT@~8(L zs}39d>@AICl_;vF)SlOCLzy+DY@XbKLTX82j3}X&6u>8Tpp05l2tPZX@@h#XY%Hz! zfeMLq!$*qe)3;HyFC4oSg!YxPD-_bb@6(<#sTU>HmU5+>Z*k%RK5e@{9>01UL~r;> z4fzJVt@MN9HCt1OpX9~en~}v|O5*RgRwnsNMKbTulA`NLA@cO;I4?BtS`F?tow9)7 zORe0n|2rx(B4f?|Wf;@4@qD7n)pJ4lN4=wnd0dKy`R z5iYJz!zW|XF*(OxU?}Da4W){|LL6gYRP-7pHGLI_&$C-l{tM_TSxupZ5mF3)oJ6@1QV3@LXCtIgY0oWu zSkXZ`iHEgI9i;XM#|>TrdgpDtCwL9tTa0;0dP6dS==e+00IB}(RwcBfG@J41NtD+~ z!dKs)k!xouNNS1#;Nwd@J4?UwkMk)q64)LPBRLWU2M7+4Qi3$}HqNz(@}usrNlj=* z7ij`NwSn$+k^H4AqRNio&^Sh_ORsm88cKdBmEKjF&hxj?v)& z6={@Ibkj=hyGsM=bu|p0%lkQa94e{?{srA7{O&J+R#DObk41(OQ~S||C@HAk6a#*| zfs+>mxEUpNmhM9~&2-Vyih3qPu^2!AOI9N$lXR$P!AaA$6GixV{k1R z*hAXPe;-AzuS!YMQPh(Dsx-RZKm*^KeVn_ z8m~uAye92;`F(>h{U?uT{>xGi?ul(zuS>BmgVqb|N*x=$nzDNV`_^WGo&AWWMM*Vl z*EEPQ(m@285hc~A-ONBS)JWlR)!_MYeJLXrJ!4U0DvHJU{BC1<94qDX@r^0JH_}Os z>2hxjX%&srn+!Gn8l@cTBVqM;U}L4YuTQ+1Pl|m3>;_m zRw9Q<8H~T*kd6&U$f-7xyb>Wpm61w#qJ*WC#D(t_`L4YwX`%!%-;|Ek+Ed9*gvu)h~YPk{cXgETRF81irRqC*p;Nqm10g-?VU ziH7_iF`=0#9kQ;&=aM)xo<4q4s#Q+cJt~oxGHsINf`cc5C}T2|onc@V*^9!aNO8Q6 zF3K`P{uqcd1)_9>C@E4JZyrSX;$9Ewskm21THGb1Q*po3fbzxtmj)C*4fpSm7We%~ zPse>b($l4)atCYeiLxDg)`tRHaoH+iIP^62pjRqMwf^lhwgO)!|8yy`^42JA_9fPk zlhYB=j`YW>a=O%1a=&9G?-^1{zO@BK&yeCI&l6VKIz#gGFhxJ@Mm@Sx(G0Xt`<9CT zObE^Ev{16&ma-WJvUamEt$)y*l4hfU2MwdF*_aDfHPfNl5|%v1Qf`LiB|R!eu<^Ed zdYm?xb3qp>$w0wRo0HcZ%zJ;pVm_wf3N|6m0XlFPWzT_!4I4|vNQ?K{igm8!$+`bn z@}Gx*Fm600%|p*9NT3b#q!Rw?ILdk#U8f7(eOHQM?Wps7^vaYzlr|p&Cl*QOOZ9EF zM*7+w?sw;No7lqN^s1YnWiqXHl%0n7l+aRIm^%eq(q$Z9WuTgaL$026?%i_xgtkabP~ z%|Pt-o&W@~0D7Q`WC4sp)*2N+)^Pz0MDZsAxCKn8O#twccmcSh)ItFSf~G(Kg#c~} z;5_&QERh;W+d$Jr06C%x0Td#O1h5Z4fdI~f^A#0?;%-ZU*#@AM00x34UIjoiM*tDv zvsC~+Kyyj}Gk|#_fE*A9X9)nrF#=cwn#lqPMAjM=K-OUam{97b0Ng?2O#t>GtGxgs zKs;OpfLSPjfgn}{FaufVQ~+7lWx$L<@lXMnkkwlSkd-cgK$O}jfM%dME`aj@?hD`+ zh#j|MG^R?c%R?3Mod)IcBBFE8sk#m;x275G@PL zeF3ca2IKTfn7s!u@d79TW}yHQ1hD{`0dsRDCSrHaS}A6gW;R0BF z1~l)(jCm+^P5`?=?EL|NYaoshKwDHgM*u}2E)YN;RC-?kRZ(f^Ism_bX1D-8hhS?2 z-~~RX1h5`@xvdAV9F=wvfDibjuZLv<5dYQ)zyhE^0Ow(ia{`zG;E4bRqjun~X_nzk3f2*F1H6CrB4 z0QN!UH3E1DQ40jH1EQW2z!{W!B7kck4%h^sDk^O+fRUn61mJ>7(*=-*O4kTr5GpMY zK!7l>0B(Wii2$Axd_Dy53@U9efSsr`UI4v>eFTtq%&M&UP`bqUdxI$BW5n?v2hpU> znC)(pO>}&dxiNj_FE>}mu7y5wXR(zcd<)h~E;dz)w@9%vc4@c#R9b`8@{^z9@g`F7 z+%Dle?IC`&VFxB8cbY1$JMlEcoB1i>c|h=zc-pWF{qI`5a%`7)%8MDOlzb-UT+8Dr zdN&5ScaXDN+Q9j{e#)eLDci#D)+Mil(&JiZaJqwik$3D^yynCu7Wj&kribtV>2W&# z>EW}(E6SlGQn=Y;pCN;~k=HTIXN>sq-SmY41O4c3$|TUs9?vx{o7Ib&BN{6qK@o-$J5ykx^ZIB1}VXx7c(NcGM{VY~_qWviCET&8C{qXJt zMh*3&NyWGaAYF`S=fM8tbq>wo+n>V4-3zZ8#JyU7$~*@*=vIr;&SL^`|1T@ic`1QU z382U4r6S(5KNVfTVs4v;%CVoNY?*uZqogY+>E4gB#JzGq$`^M_Ke~KH%HtPtJndC* zOsuVBUX#*fk1r9V98Gm1i2O@1r#V1-OC)dhg>te)I>mW>kdk{F&jb9UKFXor#Vq$) zAM(G8g{q=HO5R;5*Wx?HAJfjYx70V?z1TCVKZqUo%LZbMcqldXm@)8ahsfwe`42HK zzFJR7dL%u@_K67FW7&%rMo{!IIfz&4W(zm_^HVR|;&5x;-Iit!<7u(BVjnN5>mV1~ zakD3{-i~5r`7-bSH2PLstkTGtyko5L*eqi^&u_gb-$mZQi+b7OYS&ZVsU+{`9)H4V z9d)l%Pl>A{4`UwR7&3mSt9i&pS9zevN<-#Hbt%?OKIXB_px%dm*sUl>^4WDM&0S98 z6YJ7rcRAdnp@D9407X}mi_4Wy^rLVOIgWRzOW7XsBT($Os!R8(%hBaHdj(Kz4LPzLzH)$)TSLBw4IOo8gO_}WZ>&Q}-cCgO&|k^-h6?qJ zx{aDw5GUcWq-~B z)uV_1cxY9R9(H%AS{!=W%pom2eoAvHdRE?0PARwAcrk(QblIOKHFe?;SAFSFQ+V1D zgBTm1SAs(1Of#R~gYufADX-U{lIC&|AK61W)B^JHZ9SCBVR9JbPhu4R)^ZW!(LI!V zFUZl1*Y81*ZRBHoMvUUsR*qwp#qMPxwHi6v$+LZ50s_CFq@BdEMD@B-f`D*&Z{=Wz z5-}07FY)$rzv|v%7xaXu$&=g5CoBJn!*=WyfP7;0Ux6@Eo{ErsuDG#M=a;^Nsc*uWqn+Jr4@&CYO|R&X>KEuvg>^3xB?vlK-k4B6(cK$yAO( z@NO-7{5m+8x#HhT&fsi^vc9)`n0XW$e5mmY+9qBj-hwx4Vsr0R1HJzXSYwj2n1^Zb z)8oLk)=EZy;oLP183$TZ?i&z+GbL<*94qtwuPB*=M2~Iq3IXt^`a|R+-0c-A86uCf zB{r^Ne$ovlwJqN4O=G^611YfGrWA1%)YI?ci58drTeZ}w+F9bMQ?&ss zb*koVsZ-BuUbkMOx56#YV>7+|^f~+QEZZ&E%T!4v#7{M;$+kulXvockq4 zO1%OvdjYMYR4=qDE+LqJ#%o79nwF|qDDa;tl5YBUG!Yg4D^310MbiJbY4UF=!i|c& z3WTvu{xePNicnlFvk%=GD)*qA47n?v@6Bt{K#5mL-zWS7`<^0nA#S{kx@T(V{-wNy z=!+}mZLJTa%u2jMn_Lea6* zZ;Kq5{=ZF~f1%B3gEoa)I(@BOog)Ty^k4na+auMdk7k~kTDzqB^w85<``CZXEM=zF zPN_Z-TE71C=&5$|jb>9tt(;Xj zl(73cJ&kW#XNa^zU{W0qeHj1JP30p~yjn74t;RUj=e%wiy+R-De83d_I6VA5tDdh5 z=+jyg%tt?ObFJn`r50SfO{Lc?m@c}T=1HX{T&sCfsR`F=ZC7f-wOZSy`e-J^FVpG_ zbqlW5nl9Bxvta7fzp)(ZFAbspp$W@b&{IFQ(0(G{czp~po3PypSD9r^PelDd*iGxA zny$>W?t0p8+M0S=Gi{kkwI(Qa%C%Y(m0474!ZM4RR6eOb0~_iLwTe@H67{rJajMT4 zJzbMho6C*Tg=aa2tRA8#ueD2~Nr`Z&w@0B~*e=ZxJ#CjpbFh@PIVv8Grsr?c(^vKM zHa&ey`}OLw5cNRKC>Uh>^A#Uz+mXAczX*ZK4TZpos~}9jH`MO%+(!b^+KJ%@2_I-_ z45{Ku)lUIUo$8_~G}WZu9YfqQ{1T@9_f26KYIBEIN^c~rvRDt~@9BZurU!C;TAH$6 z_u_t<5-IC-FYb3s(|Wz`#r9iKuOGmNNeyA}n z>#rJP{4S-c6;7Gg8EDO8S0L3#ce=&)$mOmHxY!=JT=cYVHJ@~ohA8!m)_U6R7tQpv z-7gyIX}ezps_E3J+jO&U((gI?eVKZ9?5+=RC~IlN>nA?rH-tRVPL6Z2rw4wmsZvMk zKhrz!yy^!jrID(SF5o75x2F`4uGMevP5-f+5Hs-qfta2qp%*+W{e4Hjg}-~K_|mCg zncP$Ca%+Yw?ajI&OMAOrp@BNRrf*rm?V&Y@9&lezDg7FFs?Y!A&fY1MD&!ZQpp=CiIM#@I#Dd;*7&+BvGKl}V(jma^6^k)~Ri zbMMmWuj=LZ>FHZ~`bd8r@jypt-L&9C&BoSB_)Trm=ik@Ua3nPb;Al>9IR?k~A$Y+? z4KIF_ujXUQJj1-G?H%sYdX>mrt7l>$7EDs!*Br25yoMKZr4N7~yH?Y}lT|U%lG$?L z8_R_VV>K$rDl=7EY3_kv=2P9+ky{qZVVi=gntgb}??$S0_#H_VPdRS9e!0nMy5RhO z!{l_ZQ1pK$r^2`XdrVHzIUF7~<5j0Gn?jw}yXz6XyV?h{LOrdGIry1oJ>6QrKhVo* z@t|xbrH?u4^gdk#9)0lf>8cjQWJN7#5B^}Rr0|7UXgKa*22)}*ZcyUFP*YK-Zl z`b^mp+5ZP7r!63Og_F||k@;^-PFv!!tK8%?y`sr!plYlC+T^q#SRMaO1^Svw>WkWV zthi|S)Oq^Pcc;$NNAh|406tF}08;1aGlO~hjA7nBwecLECUeB>+WC5!y4WKg9&6%Z zY~N9Ki>_a<#-;n)uQ#N#5q>kyaY zwEu1-{8V3QOrb$rJu*m9494anxavMhd%Jq#J+`>)PfcZudSZ5k zaXmx5*&o8w5jQl)O7#iUG zzOuYloBCoAPsQU!kvk?rY)BiU6FhiK)0)c-%dO&V6z;QDueNleq=}X4Q>d5L z#7^}&qNg>rQhhWv9U55A-=yBLyrieM>FIxC6>l*FvQL!ErlV@Xf1~GX`v2EEe!AL} z;uW6gaK>e$ihF*9TCS|&|I?1&#(%v?2Y^fHOzS_|BQ&TML{rqykC);3sG^Gex}Xzd&f?DafmYCnG%Y&AYS0_!B$xF0KsyE`JG&R4aW8#=*jc_eHeuR_ zveF?;ThLee?M!)CHPuU72N(NAit6=lhnh_(*DAM6_0kA|Pd9mYGk+s8-cM`XB-KkB zq(uRjA)hZ0_%jE-{43QyIkQ9vlqof%tA1~--c#0Uc1!hYsHTNLP~DX3)l8%F3RLg* zy$lLcZus-$^ye`72mWej3faM*CznL|B3r85NR;DwwJC;8A+?kNZ^}3fsvE5yC7+Xi zd5q1oqh-$~?n5-8MGfkfCx^7K(CnZxOHs~Zvu8B};|KT##l`GJ4@b)`wHFyOe{bMe z`Ut+>8fc)nZltKz%s`Px6O-gnxzR89PPfpzMycp^HaQRq(T_N!h3Et5Zj#)J*ZhS- zlCe4Kub(OCvE*;P|1*}B2bpY@U2DmA08`;{X{dY=kgZ2xCw}f{N-M-yC?|hbato!) ztl9|dJhA(VxKB}tXoZ(+~Sy7Nl@Ti8U}Xo`V@SG_XnMQjmXBeZEtRcFhS zrP}bPIkV-?Qed{3j?b2{)qe=So|r9<#sL}wGUQO+(^c6AfT|& zHixtjj0e)Owb&=e$nSmGS2?{_-XRVJS?~ecqK)FWPL5&xrJt0f^)hyL`~O7Q8)Sc} z@QGC^*dQO3dD@Rk=Eq{sRLqZxWs3mn{zz%LFpb}jg4Rdbmn%Qy{PQ1_{7>Xu&IkWM zNjv0X$yD7!F*~u}R2X3EPWdRW`vaxrVQZNG50srJ=SoMAp?=wc+Gw~Ux0Fr~aZ%PN z@=`9f9*WOeK9^VXrDv$0?I*K;!gUAkmfP~0 zXO#SW*_FYRM+@XRQXqKr-Y4JShfgb!UtsH)bPN2?eJOh~_95NfkM8dRl2Zrdy@FPu zgEBtWbv;e~hq1HmJU-EgISl!ASFCPy#Y9o|usmDpfwJ*O){i)96^8;S zo4=MDGAU3?ZM^=C%v96f{8q--S8k`sa$L@mdYSOW&2iaNIuDM@ak;4^0dwWJ+)FBk zW!e`aYlhIJP;MZFA}hU6?(CbWx}FGE+6V$)R0EeO0pHye!twq@*(ajNMF7Dk`E0Okl{0SpCAvH*@0;**J=p<8PJhXqgz zG&e=oY|sS!0$>;T#0cOdFzEv51DdS@xB=kYFR)P&fF}aj2I7E=0KNiodjZJEiWk5? zk(GWCM)HO{BrrM1QUov=#HR$%7+LoL_-=Ag+-%Zz76`?^K#|Z(pi}_F2p|+dvH<2n zg@u>E|3h%wC@@O^92P(ks=gwCZva>?19*U}U;+FJpo;)fz-PDsMgf>3fN;S_02aaL zvfPVT`JVi*VE^ZoVc>TK)|w(rdlhx|0p_OJ>_?rh%FTSoNo9kem~~i$PeJ-=sD9y9 zd9*YOz>}+JtxfQOR@dNDQQ$P^8Zd&U;2KzptW(!eeNSN&0UQCw`&Sh23rs;%izlu6 zRSuM9*h^d$C4PlhJjEmQH@PK$`#T!;o1E{qi)Dod4umLwgmmHq0qSmvIpX8&h|Iq4>h@kl5ffr z`Q3le<(u*_DF>p(+=4P)(Ky3zL9}QYM!f|+&qIfEw@~OOVBBs4vjUhdw}G*k;mZQ3 zDToEI62K_|G(wj3cK|N}(@FrJqSSB!>;i@aFddk~0+@nI?+c&=nBY4A5 zz+?cv%N)AD*%Yf9N1RV}83bDe6cSct6Q0+?xGF$Xc(-jzWnqI!oSjq|s zuM|ITD*B7bf)T-Dj^a`dNVmF}(aHeMxtOD>M>v!bv`AC?O6Dl3Ya&cv$=pu*5~Yt< zLK&$Lg`QLaG%(Rc39V|LElF2_Qrv+uf&7X9=0Z~MYUZl4R1;Lep5~@ZY6hscm${`R zov#e8walxf!ax|s$GlesZu*#yN>e~o;A>v}jAW^bSQlhID9sp)mIy|W+_u4#GI9=0{MA^-W+(t#clj_5xFCod>{7 z7MN1&xP#$Vfho04Ltw55%nlg9ItqZJC8+%Nz+rX46(hQj40#n+|U4dC6Fhw==h93ybDS@es22ovV5q#Qh zmU%tSb&y zOVI0>7lc@9ULZXIr^8FlCsoXvEHuX+JMSYP4j|Na7MS*g+Rg*ho6NnWnus7<1;%|e ziW7Qvb1ou8zQ`gDb%(LMv8Re1$zkT7;Aua)k_YRzW)fTv=tFCO!8VG&sx`nJc zXs#*93eADelLSo;6l<0QE;%6H1yS$k=rV_{g(zc?1yNqLirPf>leKVusHB9xZ$2wY zV=&gaZ7}cj&DU2hgk+vf%%~wDZhKJ4xebV&JH7CLvQb3XMc|1~Y6l3Eh>r=90)Rd= zFO@zQn2*p5UjlgY5hVEnK+INiE$Z>Hd9!p#pg)G}g01&v5FP*kwzaahn7_AkZj_a4 ze%;Q?hsNfbf0UYOe0oWT0kpqr_N6bknm-bJl;myZUZqksqFbMsyQ_uUe+si5(aP1L zZJ(N_Nne9Jbh~*v*F0w@1mqwH&4YJ|=CkI(z@->Tl+A;FW&rWK%#D=udFE+Ma?z5b zh0`j>cbWZ|Bm;HxGZfZ=TGZ@w^DNb?mCw!Xd@DPcS3Em65avZsdVo&iwp+w+)s6P* z0u}7l1vWZN%l z0VKZuq@NC&6D3nqi_+x~T0=k-{}qNxfqL?ld5gP{N7?#=+0QI}v@q2bu+v*|qVyFo z!bg7q%mNR@L5aQ_q%zIMVc!z{%M|$fd&~*{?vO|@?WTTpCcyz zAu#71F)@#TF^z#DkIT2S(o(C;&2m=q^?@(CTTV)?F>+h0SZ_S`~ zvk(vb`S;vsM(&9oyJ^;kgASTrjqmeI<&o>(I=dimL*LHl7c`xuG}~yo?*81M@#7Q5 zO&F1!6gX_qh*1ecUkt1>A^FXOr1(KYh9pl+nh-L0(D;Oq5#!^N22B_-DWPE_$~j_r zHoaqV(u9O569UIi7&IYad|*=Ygur1FM~zAe9Fa6)!iYhmMobl~#wARcI4)^K((u5^ zBPJw*DlmCWo4^U<1|^LjmM|`G#Q4DInAZbS5+*zo=x`-WNfUg?}w*NsL zfp)&M_A^TzYPZYc-MYiX3CT@|j7lD#5STnHaEPN)fw}>NltMj789098m@&!YAezSF z)-H=xnYPRF0e9&^UfDc!J6! z(3sT`QVL5ZIM(EA#}28WhN^Y~*j(pa?{&NV2aOgj99U7=T1F8f6A}W)j~G6BkSbxY zv+Rm^wJ{RgwwC) zhG|QREso=S93Z5K0tg7m32zUb@ZOg*w3K-3-fRT9dW?-aNmV`N*_LlT%LAL<^niGMp zbG$5cRv-j1+gSku-a%uEhY6o*rC%HfPXCN`Q(svj1*IluiTIqZOao4|OH+fve(MS#8C;&?Ph#<5xUk`#9 zK>(nXK$yf%(DoW$0)xPO@JQgAqKBU&501fO-G~KDrE7KoBDc0GR6O z$aiqpUL2WBf*{>Ua0vvjIB?i%BzTBSyTb?$=RnXV;igsHEuj}QO+Z@Ao9&mQ2p^&; z?9op!2yJYTu^0r;=_Lh0ypf;*w6wRjAXsQ5NCv_44jc-M1d|c!9XZ@I666~Q0-iMp zIUbXXrVdra7zy45L4*TAx{*LJ5NtIP{0V~g4jj&b;1x$p`kPIrhB`sI!Q%6fh8;_< zhB&SlRw=!N-P$)XC=~~W_Q)p)l8pp!fxyvjYm5XtK+r(1L~uB6Bp7FT+C!fh2@*h{ zy>Ze!G_? z0HQ#`9cyX$5b145M+!i{aN=QUED|W3_4rg8XB(Qr?Zmj>LwWl&TcErB;m99zhowM_ zb|}E_TJ^KPG~^JZ6CCo7AZ;IU1sX?7md1UNzK8tf0?;pXFJF5HRR21qr zExcznML7`sg!Bl9G!DZo?GXZv!yiq39P)AKrm3+*8mCQ~5*^Z=?^{jH9nw>fw)as{ zziBfN9UKUjVV8irL;42teI3#+53HtV9n#H_whwFqJs4?6ejgxhk8~oxu#8@<-}D5C z7etPJwf^VRCLV^geUd0h-bdO#P>S?+q=N*YUuTfE$A5u(=1;4shCLH^f26hUtJQCc z1j61M1zlgH9V=Xlw4;d&koIz5coONl4(a=bbgjRvCRd02V5F;vv|hidIS`H#c0t-6 z1qIzGdjXNaH3eyV-xle4NIN#kPNeMtY60@?A=eTA9`N+-^Hvw%v!J#Q`l6J5f_+*x z)0(3Vt<$;K)B4@}q1Dt=rNkyl@UEl7 zBOYQ#(Hx<0HVEuK=V*u=5Nrj3WEaJ^M{5r@?KV3L+TZMFhLwdI%{R7f=Qwp*_khV93 zh>LbF0G^Jc0$)dhK?Z^+An+@CY}FTc@e5e2Z0u9<#2P{iN0LO4R1!7g0-oYqq7p{} z@wjd8-3^zjHu8Grj%G7aU@HnpA|w8))vLCC1}gTzX)N{sn=Lh)8qz1Ntu^X4(Oko^ z3K8z?-WY`KU7=x+0YGUV>kk5Ze|rH0@gOh{1W&D5ns?3Co^buv;v21;9zgSY-HLWF@UES5b&;dsknpH zyUHJ$h+Xg6rLMe8w<1s!^P<(OJW17v@}>Ids4 z=yWNI)T5@oN_j!Fa}oOVb1b+U^yHUjezcD~h-Gem1=!780p>E({8Ic*`Cit^o0h*U zm;J79>HbzlTh`4GXE#v3p z=*=Ki!gn~~7D{-Q6KKKmXghyn;q4Jfehiaj#60Visb}^qs+$RHO46?j`J10Cq2|tnv z-G7jO<(Dm&kD>hMuudr<4wG?n%TdMZWUoHnF5~jWmk;)t6hBK^eiUFHn~9qIJHC2f zvVQ)6zO?OWu6kQW?<#JyDo5Qxy4vrTjTBRD7VTUmT^;02()s&I-OU}?sraAP{|{MT zy~y%iJ8uQ~wG!Bha=`N5pxzJc__g9yjh-x?*S#ze<&L`M8?cmRlnnjWxi=twzs&j>zKO*>#?k}sruorH>~Z^buTPN zWzMDG)}kwOH#P5*|NQK;=c>pJ`dzR-B*ozTqQ2SK=FNq2A@EydH9i>F76 zr(YIN&*o3hQMX0Af5&I!A36W@#u9SM$T9NkyXqK zs-01-FVBZ&yILR<&qGhoM`z>(d3q7%2kLE-e3${$-&e6ZA6wM|X^-5#q*6itI3;J~ z1-boUtl#*^%Bp|iRtl7Ta(J~WUA=;oJSQ*7*-cn~d`s!Pf03>V@<>J=>QmHg!cI~) zkpB>qhveW6%x`|txaePaWkC@L$O$>U(=<>#5AQJ7epm;n#S;%*)9zwfBEmv$HT zY8FqTt={Iyhp9fiA8o65Ir3rRK6y-TKZyAgwA=_^!Gs#QPWB$c{P`3uzJ|8R&97JE zvj6e7s|E7oLUKm7Qp~p=MK@;X;0I{$@rq^tqFpVJ?vQ8X`VT7&$RAr|k37pTe^IgO zUx**)|1bCH_-E+o33M<=*M5U;{|@af(f*%G7yZvy1$m^H{AG2J)t)UYJ!~c2TQVh4#pc7RjiJ8 zw^|_WlVkFPydXR8t#qK;CwD5&&o{q=6+&`EZvQjpyBDK79mty@z>7svOsn3n%!<_v!^58OzTOUR@$!+q0JTcb&i-jr3havA_ zAHfb9A4M<7*2gejBR9zd?ecn;P{M>9T#X$p259G6v`3D}qr%;Clg}uj`3Y>WM;?+B zvOmQ73$phq6_?xJ?DN4*SRwfgdXk`LBXaG_nBO3` z$jgT?KmI0NcxK=Sk;h~|#rDE36-4Cp`&hw#6y10X-TVo%0b$9R+MqUDDU8?eHBYjnp(JKLZW@{~LyH#S!5%j3_5 z0(tu-M`TN`EL+_|F?miF>dU|KVYaYcwL1QT?J;3W&d3Y$Q2issN(b2vXj^?XvJwv} z&d)z*Crrq8M_YTK-96F6SEEz1vp2^5X6fSm@2v{*k&ekJIV0Qqlnv(fTjYQomoEAj z<9)G#(SB%me{?_&$uW6Jwhz$l`T2N-a7=;rw1pi`-i)pvg7(S1{wGgc+ohXPv3)33 zYaE8IACC6Pog*+FlZWW8aE`_b<73csa&j!j-Q&<5vU@znvlFV8d)Gd(S|BH5Q-Ao9 z4|6tl-i{uT!vN#fDd;BIQh(Ss?W?0}cv_`EIwcR!z@u|VLG z`6(5b+uyum|3<9PBR6lt_<%emFF%9%$!F1-vfBS<0MpN5Lhokum^{1{#ydNDrfR4$N2QeN^&pOb69#kl*siu3dTzbggu^lOsazsGp}4``p<`yaUohVLD|$?x{|)1#zn3n~|JkZQUw}5%&-s&>=r8D)>%@ZWY=Ze2*_M-Y zsE4UPC|&d~ze9uxF1bSi@bE|mgl+d;>>q9^3qZWwpTZStVv{AlkTf(;DG zzK`*c9Fd2oVg6FiiBrB}`9pAS>EdkbtP1kP9@#&ytdPe;azyT2jQNer(D4V2Uz)ck zMrA?aF?rg*>R~P};f?4Xc|cy?g!$RcV0q49 zfv|3Y1d-m3o|Bhk@4J}aCU?McpUu996~;64l00|};|X~}PJWL0i>lQzW6v?6PHvLL zFEGDGu9GLf$Na`0R?d%EZ@DT^`?^i;ki}mxzxj9ay5bvU=9uxw!OF!sGg%d={>6+u zCwDf*4mxUK68SJ4j>t2zWtseTRfxxF{$N933s z?2q}*0h{4($g=}6VbMZ2Ux!Y}_UkcTI~eV~5uLnov*P$STcGwPOjwcw7vtj1=#*SP z1mkUTa!3~w#G&W`**Og35qU_i9gg`Oa?+)Olw3Ok8%W6ua_dOUuY2g=D0DZaf(5yC zG*+;WK@Z5wV=-QTE80hQMaRd4m^>uUPR0D%JISY`7w<-o&aC%~OWAT)nkr!l-4$UcfNsc4vPk!`(1@e6%B!~%l{vnKaK8Bu> z?aMLlgXM`mfC70}C)Z+XfE<&jWN`)7Z&s}?EMc`kdPL61wJWhfm)s$b$g_&`3!8Nn zHrOP$$pi9~yd>AJ#`gV{iwkRSRUlu#oXvndBa4q?gLSe;?vRJ2i~hy5EXey83v%rm z?65^{lVkFjoEhu><-fte4jN>i9Fa%ll)NN6gQCCkjg_kl%snn756BbpoNQl;D@akO$-`d6`>ofAfm#*JFb|c}$*>m*m>NV|z`qudMdJ89;{; z2IMh$MqZL@pP&Jded+x8GXwB$zzRe1g4`Tpeng& ztD#S9I-f=#id#L|%9D{>4PkCt@x{ zv0ePrK;zLi=)p#Gv@JR#C);7%dY*C7zo@AxgXP0?819IU$xE`k6Xqx6XlIPijdlNG zqUIQ@3}8w2ccJ);(DN6ggI&>s-O%-y6)x}p=apDLBG32Ff(bn70UW6X}D>}RaJ-iA%md=kqGk_^2EXnn&u>qglBWL6#+4}f%UI?#v zq$jj71Df<7zG7{cqkO1@b$!UX_HQBdAuTymQnk%#0lc}8C3mfPQKLit-B~gO=~?ut50a4ml<_Z^I4(viJhVYhZc&jlYNq z8Cl$k@fx{F_Q)Z*S9N~;kFkR$xpNQ3XXFKWd@ts^_o1EpSI+zIRSMJ@p zcD{{q?>o6y_y43QDDQ@~6e}d;=wXbH$b>Gv^Se-ynS zyEBYW$h9BHxZ3|_0EsH7xbYaeP41Bsa!PJKj_qYXr2D`3BTNW0^nl#_F~&ROm~8zF z^IJco`+q&I0K>>?vP{hh&;(Hx4+pZGfG&Hts`+CI^-tVBe%(svfBS<00T-$$WwAgUXpDO2V5t& zr1Rs?48W&^klZ5=$zyU#o|DBO?Zj&SOfSi!0=&s1HKrG4jTksXs z$t|)^4#_>RJpP7IAV1|Hr{p_x~{^q~tkSoPYzaksD-}9ORbU-@M`uCB)fb5VP$UiBN#mTe<$PT$dZjrsJ)%h1x3#3DGM2^WrazdVvXBFq?|DsYL z`20eM88bg5uU&d77JIK3FUc>xyLA=k;>CKb43pB$1qk2QlRcXBl3hiBQMC-yKuk`xw&%QzwlNCs(;ZYN8|xHAy3H}d0D#XU)b-)5!cBr zvQG}lJ@Sw|HrD-%)D+~eUmgQwaV8G1MsAQ@azO5Am-qje5=P_+c}8B4t+Q~z4!K!) zb^rG$p-qm+19C#1k~8u$x7_~4IOPmwXVZwu4RVX@kppr_j+E8@Hv@<%VMI>IQ*uh4 zlNV%h4&DE4S)d+fKn}S`l;EtOpL~*=WRKh?N8|xHsd{&Lx{a#^(ka>6dGotY9j~q5r0eL#T-8#y zYBoZjygXeN%7=+J&Olr5LAT$Bwk}17dSO!CURT&3$AqO`IMj4F)(e3ecl0`*#;vbl z{kmSK(!`f~*-7JAuL41hu3jN9 zcYd+fEB2X$m^^qrHrUcj=b8MOUJ1{5u2;e{Zk&SkL%r^sig(pMA5{vJXJkt+{AL<# zk^^!~9&~U3kzNN*w%5%My5M3=h{;R6E}I!ZLoc&toaklNj2*oynz2WYO3Ur9UU5Q&54hvE+Gu&x&s zGx4@wRIF`)p15XO7|wsL;|A6E+0&8YX`Rf4kfQM5;{U5@c2 zMrY*Fm1VrR|F^C#3-Y^RNEX*%yw*qC*P%o5knCM=^5yo=@Bd{%;fx%A!c-{Y3$k+q z#?v9Xp_dslyCg7H`(F*f*2@Z+gpfQWdpBVR2{|L%pT+#Cc6t9_QbP7QtT4P8os!45 zVm!PJJtX&T&*Q86znfr%4taD3#@nAq56Dfu%#hilOLF~ARbOs@li=Tl4c5kJm)yA< zDJm0uzRM)gIG8N*2Gu{0VtMcJv}WroHT$>a|#IdV7Hl zw)Cn!rh@n_#v9~7ui|6!o&Uysj~sRNDn6z{ORw5v9Fm8B#0G;UIwnv4gz@Ol=x#y< z*ziVJLT+r1@c=B( zzZeRHn2XD7-h zN2TTVSE)%^PrkwdMS1|J_J4R z(Zh4m1HD>KdH;7W!-VvM=tQqiV_uP?mzy!R6U>kGaxo@8dko`Vu{g`>{@?lqCd7L2 z6w^RwT`^~;vDgG1Z%ST6Psoeha{HSG9SbY8$RWA61?G=8pqn<@*@*U()&4gF=upCl zJlGx^sO^9b$^K3lAH5KrNUQ$K0r-1h!t7<}sa^`i40uU)_QZUTJR`UF($g)Q1_!Ug z2BzdCx&CU*cgY>{Xm89P9HysRHUo$c$Al?)egwwnN1^SvqFdyUJns5fVR0(DaXQ-4 zt1y@WPsrZcnBTkroshf!g_z)7gmyaUgly3koJ5%4`xmr-8Cc%`KL`avdinDjUrd;h!yjNg zBBzgGy!BJ`{0a1;;{5&}{TdTuvh!PvH}v$$W|K~~D(09pPRY#;7$kT<*Pf5gO3Ur9ZacFVU_x_ebhZoH*Rvg)0VH}hW8+yJ^Ibg| zv5C+3)N!@{)c^*s#0qUacdw}s9E! zPtVzFV&DezpE3 zkIR3(KGbvfnitahG`1sdLT8^r&&iIS>({i`BnSTo>dSAh+ztg|sb~E)4J6;dxbsc) z>|wO~UG(G;bWP71T(mC_$flkJ*mRg?SfT!7bdT)*1mn}6qT4@5FZ8U!d4KAJSUiad z9X-vk8BnOF88&V_jrrlP(MxjjdyLQizH)v+4E3bBri1DFVx|padm~!t`8Q2G+8*OE z+1?rBwUvttVq;a1Umlv|Hn~F{ya+oOk?ogY+$pUui1u!jKyHv{yJPs~J8b z08|gtei%!u{cj3VRZy{g1-eOYlZWIP*}f9nbEQ@P`eXU&3aBEQxHNB~ucC#XtIX6F zU&HuF&r4ln$+)d2K{9q2K!@a%96eagmq*VG3gkDd9;zk?;e8$Lq-b}BUR1s> zsJ3AGGmHqPX1MRFKsA`P z(4&LUR^|JJDt}B4D&IF$@fo>uc-dZ8r{U0 zUX0Hs#&Y|sgh}PQValV*cfXX=M@@wy-}?!=^&~ppWc5cbRll(*dSJK(z+}RdABBwhR@%-JZY9QVlD@^u7k1Ai?Qu(ttW88k%>eF}?pIn7r z9H>fH*BR@e+{L?B^`rMXOqi1Eug7@fV08P9=qb7JCXCyzvfTdalBj;{KFnirJbhWX2#D(_xZD7o>T+sA1Op%TUFRX+O2$;q}4sPvVH(&dwEk-X=TS9J|{v`*mZQDO< IzP8o>2TL_XSpWb4 delta 82320 zcma%k2V54%^Y^oRcMn#;M=8>*U(B$N0H)&4b^j^sP6YSL=%ysc&pQJk8b^?Zgz8gvcG& zS@T(#)Ml628o*uZ_N%8e5!vpF$uzI-%V$A;`=B+Nhw=B0qiki1WH1TXQSK@Twml46B&NdF$!cg37@>V7k@6iXU%0-5OCPh}WBLO~RAUbZZ8Z zn$xX$RbqK1?iWTK0=<=T{dTW)Hd?&$1r2dKCaZLD8c zeaqpB28UXkIKIWdnP$zX8Y5?xEpeb!iL{p`9M)^_7>!es22a*FEtqCasg}xPrdfk5 zVLWP@HNg_i2TwbgZ*k?k@kMK4jUYbwvx7k{>5SL?%$o0-XjxmTsYKH(`OKQ&7A;jk zOKu8qu>=~j{Xexjy2lnbw54=Ii_#vSTGQPlJZfEld#n>W=d{5Z zUNeXBl#0u%;tXE?9GGrtr~ItuY=k{7%j*4HtLV>gbuozsa$tdS>!NIo3$8 zOuixKppSP3=UsEGxwT^XCsVB9wZr6XQ(*W<7v-k%dI2T>Ech@Q6J&|a<5t^R?omWu|ibktfH;8O-Y z?Wjh5-$1?mD8;x*t`;MH>``k%pldr*qgO>GBe+M3YTpI7?Y zn%));`n5F|PfNb0gd*l)t=(=Lj~hvO2bibh{O)Dgjs4ncZXYZ?L*(h!zAJwicrdAb zEaMXctmcm4yj@#sM8`CK@0_);qaVL@&gvKJ$FHBWh6npKo^sA)QW0B4cojZ+`~#C| znuDT6TY(pU0-y7NH8t4P<7;HcJGhm}KKp?+C)kzm`@otX?9V3!SY1Pc_}3Gy;UUp{ zUR!HMh(F(T&YB;RR<1#B8Vu^ThkWXpy{xxF$CWhN#&eYSHuJR3e|-R7)!dr<`je8Z ziRY}r!@R7#b91R-eFtmn&VBf!0Bd6BOuo?H8rCI|UwQvvP8Wa1E5Qm~!}#Hr*03-? zp838tCaip+oa?{pY+<{t;oVaBa><(CEtfa^%9`2TkJtOkn%_N)|Fh8Q-@{cC=t65( z_LViVM*0#6LTJwAQ@w~HE$6kqi*a&M%uV5~h8_#wL} zX(oKhdCzR#cA+(Ck1ni;5ZBLIGxsF$3xU?)z0gaSw(eQViNh%DebyS2p2$;&Tc4x{ z^G+?SLHqo9&jkmg_qmoI8U`CG|MHWUm2dgrmCC#7mABR_PsUi<)}Jk<^Kq;JUwB5V z+pO2UtGTG#;lO<1qvnI-_f;zqpp7v$O*RP7^pjF?Vvz3G@jagvSv$<~b_7H5SGtED zKV!`~>=k;yNMyvXf25^xTb}9ZtO}0=UPYG{a~YQkh=%x!i!+tr#q*R^*2WpN*&}PO zjNqV}0%J0*(Gyb?bJylTQOsB8!Fp>sE-X^(jqd1V;m6F@8`o>jjr&-a!=(Z<`GZO^ zE!1eItZJsEaaLa~jc=#RH5z5x0CUEc#(OwxayeAgdwTqOUCylSS}{}H7G1xrk+$b; zx}Kuv&-k}!ZjoZj*A$OpJ|^%mxv+sI(KTGyU{eL8LfA@2_1U9fAq>+FsbC<6zv2pt z6){$%!hT?((Jv}2LVZ_k^?93QQ#DahUz3RHqM|>KnkhtP9&>F{7c&*s5v`$^V(>4l zzecb$nMQ-TW(`|vtXv4W{Dr9WjIGjYrphSi!n&C%Ar*D5KJuz^9f7y*KT>I~%`t2Z zWMo}hF9tg(rej(aE-VaU>a4dI?A8c)#ncu53K4pksvs32co{?xDk+YpP$W9RE_lnJ zj+K2@b8bV8c*=*!#Gj}`v`n!Z6i?Gic-uOK;Qhfrg7?Z2-m~nQ$5Z1g z>h5C5Z;2cw!sJ#$P^+|XkRkv%M!z%)B=V;|7lI^|2yy`tcvz-q$N#&!b8S3Z2!X=`M%kJR+6*&1>#iuWID&HU1j_Ze%=Ip@zO z&$GH_2l3bDS;Mmvcp#A3iM;PTO|HPx*4Wbtyuv(d=64Y5Omn_G+ZuKzlKU<{n06+t zeCakDqzl!h>z=Ji*G`x2r@5MR$FXoLCEc1ansmuyH0ch4=3EeuolAx7m`|nQw$`ke zZB5D!=CNa}sbANlKbFg$^kz#|n-9sR*p?V4-^-@dmMn~iXH&j-o{&wBt?(S0O%bi| z9GOiCt=NFdC4*035$%~IdC~s%yh_PLre`)qE@xi7YHRHnKdzyc#-#=7X`F4~v3Zb7 zMG-~Nv1Zx^9^l2_l&N~^%b7``fhXCU)+NoI8btAm`dGvDDX9w&)WU9DnqH0QPc=R> z(}hr0^Bpn$6JDyA{-@RL!s}}x*o6lmCS$ENR<9##k)Dde(Z@>#0u&Z8s-&-dtXJYv zak%w(crEYYTCw-3?4By?WHuLo=XO4L%>|obkqat+mNGg7gm#opTcPKrDg?DF4a)3HOduh zX|#$<#n*()rmR>kF*(3MfdJ!5tnpnDSemj%==p+A$b7MSaMxRWir!T2W|-5qeCU4r zON?ruuZ{m_+R}lhPJl#=)eFXtbJiKmv{`LRvDHL%LQ!Hgq!SCIAuCx0{-BAqpm9&n zxHP@=KvrOj*pqcGA2*~gRF~4SuG=kFdSG6bqOYuWKwJW|&%>PsT zj;}O&2VK)icOD*^jSX^#9Zn48CIwELP*-I_9MN}$2!wybt-E9(`-Q%M$gZ%;Kq98 zD%25d#s})^HR~iM8Yurk6)lb14CWi&U_Kpw(S(n$ve-mO)TY5wnW2Y`o)_k3t5MMyTi~=R-CxB0!t^3Ej>unWh>&5ZzAi!VT z6x}W3-SsqXifOKv5wFw3E;gsAqoGal?zJ@_u1;~mhPG#&VZ3{w?b+rW?rpSZ(RJ{j zCfIhNNQF=h?*KNh11eTTt@qy}gDmhBUOc#V zGRE19hB&)vn_eB9y-371<1QfIPd>|7*=@kQeH1S6#{Ir0`zQhUc1HYn`(D;lJl5IU z@cV6_UY~Iz@XS836OqE-nM+CgSx~WlWi~fWzG8u^{|Pe=-A0A`F`awVnDS<^Oy2vH zwx<(zO5Y`U{S-ZkV+s6=w<-2L=EF~YKq>FB0&el7#CR6VBWF`#Jd3eBYw#-DZcuYJ zi{OrdlslWH@~|k1ox_58a1>=AWmW0%95%>u(@?DPK8j3$)r|Oi>6Dbff=i~8%}!D9 zF_t6woG_C*mw8L=0Bq_ddr*(#tUWcI$3jbJ|Nfj3=Ycj8Bq2$d_Dr77dY5Y-cY}Ri z+iG=O<3*MQtg*CKa9F@*@QMkPu>j)OcKFK}IncnQ(pK_+pT)2VwDEoRkgsk?NguFW z{>+P_7qXx-`_*^%s_6?^7-KEyaUx>Ju%>FzB9_T`)_e41F&gX8dus3!mczS89@Jvw6Q2Yy#go zn=)6t^vxE=zBk-pwq09Z_6adT9jJ2~TCUx@8yKwFLb*vSiT~9QQ=#5;U?p46Jg84H zJHrza$#oSR^Vg=7u?i$(<0x!3%j0_ok)M_M^PohEu;RIm2aXiHsQ-t|Ql_1%>``Ms zM6S*6jqDze`ZlIF<1U{mKs6Khk2Zzg&9OeeMBlAh(+hSIE#|jvgk6+ zn6aBO*Vzdwt9RRP#$CI~$6YELR1$Z;G+=S8v=>x1+3GVkYy561SjU3+x>@A69&5ez zvnYH$6r)*Gu%0!hQNm@c8|Z2-uIbxE%NK zGc6T#`A5iS147H*BNU9(^2rhKDHZ5P9-$bKf5k?($wv2)fo|Xt%G&_pZhS^hHlPJj zulBqG^cz_szkZC;*EzWIQ=d`ZMiyyVYdBDCv5mr0QO}?IDF&(3XQ!D~rZShhXG;-^ z^P&5to`nLmiB6}o)4X9aC2eAn{6rEJY+^CeZ(uNNGg~7S=)mjLW(%v&zgtO>TOeQl zN=n*-(Ap@896!dv?=w&h`k3|M?uRJtV;0RTB4aBQs+6QF;`$i{Z$%7E`i&B{vKT(^ zH!9eQ`j#xC@NHoD{xV9~hUjp9Ii22y&UXb2H*IIFxc_o0NMl~~=XQ4LC3gNYfeq)) zS5oi}H1fO^dNrL@O4@;u;0D<4Wcijh%g`Pp^!63%7A&Q(PcYK#T}FAIK%DQFlFu%L zdY=^(y9;){ltG2NV8>*^q4vIbu^R$js3l1|s#{Zp8t}5*Q=X45J;XZ#3 zCD$Xc=jYQY;s^^bGX|dAK^aGwPjlngxM9c3PHu08lCh(lV6vl}V6sCCCe4jAlte>4 z#$d78R*w-sd&jF|_o-w0*nJGq-03*R?%M#cf4H45aHHJgaQukr6#N-XeAX~K2tf^> z!D10Z>4`|2O(*{oh#30~bWL_pq&>U?Bsl?3yAV&AC*b+rhmvC^n!58b3d#h(0NZox zW8^cH`O#;YtPK{8k26^VR*k%~ST|OIqO$NL(TXhAw`7V@SyRb_oKLbt(upiHojJ*( zc)M8g|NK9OyPXDy?C?>khC~3o0Y>{iXA{dE?vf1*d_SUyQ!J5vK-W*Pr~H%JI05cS zwZCK=xW7APe#wH%3}Jt7C&z4&IXsy;c( zk{BN`l@iaPTf8w_)!L9Fd|`*RVO?e_hy*>-I%QpWEpZZBmP%)pKQ zRnJzB5nr^8a?Yc5Z`PzI=P|@x4ET9XF&^DZjoeT&*cR5E{u+Oqmm%6BY+wNM{?#|ClU;68<2 zLbsP_;w9Fa@190!c;so*DCZJ_{}o>ca?53F`22g6p3B0S3%$q%-H~Y&bQ$)~okP)= zS&~QRcm!kHR6B0V&F3O4i=fo&E_G$iGm5NQ>^1>s3VE zfA3S!RWPicLD5%%cg&z9@w{mkW#ZXm@7*e2%jPC0xY+@YjASUv49<>aB>&4)1xK#q?Nliv;a)tgf(O{6E@ zpzt3tSqr_%y6`)L)x?_&7d%c4rj(ySePS?WiuA}}%KsU)pFT`Jw=ibEaT4>*TdYq> z&pdosToR#^x0o0Aol1GPAYwbU)os?7@rQ>f@eV}ztAj(}9f|Ww_ zUGRQ=Fs0swMVH1Fo1yIx@wuA4wMUH=f@48!* z@&_z{0VlmT`1x&Xu4^vn(~(zJAXN#%W(UEE<@e{ zAwxRYs5M=?+FoclASKV|v+=+Bh$3|{u`ILdg2 z{Na`twrtA1n*<@^KHtJtbF+p1++wJoU8a~99NZFLVhpx1qJES-J< z{iga-cp;XccWzQ}5$g6p;UY|f7RFFs5$gO1yB)lMe^H&FSOQOtrc}m#c;0l%WIR&} z`P@u%Iq$>$KBXMa!=+2`8C*-)D0v|#O8|cXGfW0B3Ya1pw7L7$aE0eOlpBgC`@OpU z{c;s0b%b!34dishY^BWrat+4w@qH8#j7ARKPYJ;+y2MWd_fugo=mvODSO|hkB^v0& zYk54zVMLa`_o3}O6?gYXV=2jrN0t981-j6k3gU;DHk; zp&BoLQ=p+i`sS;spc)S=S6gQTece?QW8tn+3sk?z!WT+g0XS9XGpf(VNxou=6>@8* zy(VN(bw03M){70Se@iC68a#saqd7JB1@<2Gap6TgWjuwuqC#CSs_$$_HXdD)TjgO4b~o%{3dUX<_73#F4Q zB)V6VH)H$B#e?^iHv2iyXb;|1%r4SBcsO741{Hd6T+JLqexAH#m8064M#Rz?V<_5_ zr}7))$n3>^N?LJ2It6?2-fST)^WtqW*U0wbGx$tS9lUuPp5aNc-aN_akI`ZW@sYN% z=E8y}|+JFpm8y!Y5st1@m!;0s~@;+3bb*pqG`+GVs_uNH5>DoSElH8opc zrUAA2AWIef@q=nwnl&6t9I?gCg6V2)-j*L9O|ErdiG!mltPUSi@o$`gh|DV>wIku zimcB!nDqm6I_b{qyPd?rLs=D9QF~t==k#xMX`|%Q7ry*ww~k%ymB^&ye!P)ou)!z# zU#BO2Jjl(@1(u2v8lmuxG1@Y9pu=t2-Hdmo#SM64K7vv2F;Z=s?Zzuaq+lbztW=$~ z;<#6%wNjj=jrJVhc)!*Vah5>a^G@5dzBLfvc(eXoje0iZ&7HQtqSX7etRWxW^mVOh zRvj(Dj%+`zh?w|aF=(`Q9r-lkUhF!xZ^Q%G4>Z0Jp9BkEZN$6rAtu^Wi}#ik)Auyq zjn{PQFYdsWcu+q&*qC?o`015h)Z$e>?lBb7@}_*2TUr-;+7BSlX8bkFBZEB0u2Ez& z9_+NDMrq5A6PBIiu|m^F?D7=_>Pm5a#BA6~S?L`|C@4tz-OH{o@7NHnE2!G-50 z(d7SFa^-cSDZD9n<-XAr-IUkoQ;KLpBi>t9Oan~R!5`gcFZMVB;FdeErxAc|`SEVb z!8VVJ@1C-UjxMw+kXNrD+#i=0Of)-yhw>FR28(PA7U&GPvd5b6ZsT`~PbL(Tqo%=> zEiG+bwD|gte)3jYMR5qEm|#Qv;b2p1$+G3Tk+nJRLq}X>cj%M-oc09rSsweNP{tgp zm5tk?dx0-IfS!$urrgzOEY8Ybw&w{N`5N!*m9G2OC$;r-m0n!Lq>WLO^BN4eHj46J zn5_}_rMH^%IyJB8ZoH$7Ud-lmaa(KC_!iv%?QQNF-U;n0{)&CG0UA>{dNDq} zr%u-Z@!}%3(-rN!9rU)NL-5o1I3Qn?bK%kSTMO<}p}F7+ZKzINR=vS$Y-%Bdm}=wy z;!|Q5?fMHyM!5rvbfhyi`9<=ejd8MrljpxhMN_t1)O;DYpJ-W2UQ>(TJ6dY-`%+6= z{BF<+@f-VAF|Bx0c9yPs@H!5?!$li?L0`1ujYcd&aCq6<9ke5!fw(LtnjCvtdV})_ zU4|{DS9*`IauuOD|Rq}zd4F>TEkOrkD~n6{LPm4wM9s2X@uOO=ZdoSb|lV_{gvTX zqSQ7R56gQ6;-n#<^j&U$CApvMuOye#f3GCdY;qY_k_$+?_8AxG|d= z4@lY=8k3g1^agRUq9toWkHdKu8$yTs@&-I-gqqhEU6qE!@jz)z0|&}V;2t#gE#96l zAE74n=fTX<&%pGZ5A_x>;#+^HCN1C@OykpcfsGIIP^(7pkg{xkvD#;oZB>&7@l0lc zIC^lwzrv+IL4mh;7~e6x9KmAC;n?4*7Gj^aEXUS1j;)5PjuG}$mCNCHbG48$iP51U zHoG~kD7M?FVdaRoYPEfZC$3ft2cXB%h8P>OP07W~`VA{bvRdUZ`*)R1vM$SbU>OtM zSf!irdSp2=?g4@5UB*t)Dz!{4>n)_*7Kf<7Huj-aPO zU{BDGlWqF{?+rVGCfeA3P#)U><**G%Rx=~*xlj!oN6XN18tP}9=GYVT#7Z?U*&fG_ zQ9!u)<{{;9oKz0SUMtmvvG$A^VB^?qNIAxQo@8IfD@iubY&zABF>#Eg33~jUas*vk z4%_G?HGG9V)qB}E_9%~|dpR8cx1zY!XWJ3<$_ic3+_%foe>({5wbA?)s!t*;szo%1 zBfJhxev7x{Bg^B{yc|A`D}){G1fai`>yrKPRyig-2?BePZCGwo_{$k~6rN{eoA6dS z3im08ZKLIC%!fRIS>_pHugh{OT*FiN|1MHW3Xid@HRS)fj9dwO8V?6kH1Q|=-s=>( z7Q>t4i+Qhd zn1__Z+|$N<>OVU3M*qcJDu?;grS>JqQK&YH2(+2y=RyBrmg7t9n`K`)%$MoRS#$DE z!#aLFtw2)7*zbeu4xU!V*b~a(+TF(WDm~Znv}bS)|K5#Ab)T-n4IrYl}p)mUZ$k z7uh%VW*gt8k9B$XmBV-KA{=1Y;XAJ!zC&z$&p+0kV|+P$-&|zhIXafZwH9gqz>lpX#~j{SZF`DuWqi&$%KHKv)3-WM!k5@1dxBlIFL_G!H`d|Wt8KO} zZsdxaC4IB`LVk7~&)wbHJ}%OFrN5nCeT{maTWu*L)41p%d`Y zYbog)p3c8(PQmBFxb<2U>O5im83|3|Y5qlCj}4{m7qJ?ef-SC#Jf+?@*aj%RNh;or zgf=A$5p5q8+AaE)XY;meDf~O^ozzlie8(MF)7o;UFqWaU|Y()j)SI# zx(NpI3SUxM9**10DU@;pGlb`BDE|h}!7kh3oBS#N0_$F3j6-Y4?-t7M!!rIB57sJL zdCOMgj$8b&#u3u@;iJEx^xNPVg(dkNIK!YdvD&R(E(GYR)& zsFssdlg6B2HOkzg30CJV&fbAr!McAaQoWt2jBa4_^vI7?{pjA!8WVDUk=|1^YyXWDDRu) z@C~%_U1Z}MQV!pS^YvhB4&Nvn-^4%k)|y@p-=TOtQOtOZ+IKC7Zyg)ob`NyE zf#vY62|6)PGxBBS@V!1)x9TmMyubIh?_HNbXOHiRx%Tb5&c>H)^6o5$@2a__W>Q9> zHk|L2AOV zuYSFP)SCT8E)}JFWwtRdBUPU_NF|8Li`?u)Bpm;^7EjF0NrrhLSlv!1B z^_XbLFMjzax)&8xmHhe8RpjF+`SCxJDa=s{D`St23HG=AYeIgrRCfFJHQ@0gKpA^f z{Xgu{z*djZ9+GAce|l4roO#Maik9pc<%cB*F<*wO9e9Q(R$m}6hJL%`e9EqTs=xAiVtJx1LU=TL%^6pVX7C!C~V{4ye>vxGg_ zKPVO{TdSUTmI4@iM}1LEa$u|tDb=N?(&Ty$R8(Ek$Na4N=8l%ZhELY~J(;3wNGbf} zN-C@&6<8)4@_)d)A1+c1&s|9|u2MMricYvnA(l`B*@4;Q=!PaX;*(}mu)rIuO`ENM zDaVL^e>SBE`ppLZAI;Y8tCqpfoUNSzEH;oQ+sKXh39~8OT?%70XpOtnmuEi2H4mxm zHQpApRlk~25NFM3sHe1&)uRWVQmr!n?mXLm|Nk9VPsPoCZHkz9appJD>zLz?&uH?jU^f0F$0%Bdk&WjpV zl-g3I`qI)erYU;Qes{V0o(PQ|^r*hnw2XZU-m~9l&b{~Feddm>GNYkRzNe=93M2eP z*Zrghyx?szH<0|5o4ALNMW2V@PG9c^k~2H44s9UqV4P>E=0;K^XI<5(CXyTHxtWyE z42W}?l-Ep(<|nkYISub2Ra2MvOVe-|)`JQI5QBqyP;ekRVlQ<~pybB*ADt=vH59G# zKy_>`@9??Mo-C6;vX!g^nuc8b$Pfn zM!`MV#{(pl@kNVNU}HIV??QfqrSk8rbvN+!7)vRGVeP1J${h?hz1f-k--7xNdr`z& zh>qiE&0ErCo_|Pgqf1TNL4N6%DIz;jaFl4%o)i;>A!(LDs~#DY0X*AG52B=Q>^Eu^ z4R`#3u18CY*aC_gEzRYj8EXD$312OW`qHcxjgbx`*3;o}@an1eDQ_HP>!i*ZFNH91 z=sQ6Q;3Ila!30$L?(3@0L^u+^+<=l+;I_o07-@R(;bWPX+J+lMYm1AOlcX?S`*q3} zss91;pDd+vObK!(OG)LP;wKyU{1c_7Op)+HL|i8|bE*`iaEDNOG83kC?XLRFk|NEP zlZGN*2i2qmDa~xzX~=l-t{S!wrr>V`Q)J?Qc+V9>!2u&FClN#cEo!w0UebO(IWC5U zTFs~6#gGK6n&`zC3ctBcg^SS~%c#{7X&4(%YnH&CN-&*Xf{yBWk6f2ZX}tLz%3KPn z0rYq&vhlu-Rwzszx(uzrkKd)#iuGi)*MXl;*5PV4B;BQt&7}YUDr{ zR!c*r&``WxVU;HG&HE|Uin!fUePESpF`PA5T_f!%xt~yV9_6f&{7atYdE}ZRML+Ut`6Y11kHGQ*`#QEWNcopkE+-tBw z0(BUrZ-sk>k;^ve0^d9buNu{*3frX`d>v+W+fnC-b0}=P6wC7I#CFW0CL1^er;}qE z47Wz@nkELM!9P>M4k^fES!=u(slDSYOjcW)!3&d}3!&hh(i65!ZS@HZ&Cj%@^j-fU z@nS=LUq#~WEsPdJC~7wbnWLCB?STa_nTyy1Y4>8jvPVkfMeWGE7aisAc52XG41>H; zTT0#cAN6cD)YBxA9QUK1Od7Z!0q#6)+%JXjIC6L_ruBi zAe{bKKII?8fQ2%Fd98-?~SUDyXNbcU3{{ zX`}X_W=An$9&?|Pjv~9M>T*nEUwThx_Jca~I84Wm)1}X(*1Vu4IiA4C*Q}%JcLKvV z@BJPnW}&~{zDK!PC_hpidQz0HVk=*nwto&?DnZuIkzGffcuHiSi__VEMTfsYIJiuY zzreJq5;gl0-M2>v3i}d@NEG!YL|bx?PJfBA+0-Xnn#9N5qs(kPrm0;|3s!w?tiox< z8O&MV&ZoRH$bMIiIxDg(+p;Us-mf6Y{5w?e6|!rqGtPUc!TL zC6P8}AlsX^e}g`^b(Wg*jaVrDrnbrv(}s?=QXR9Y96kOH{&i+1rGF1U`g|rfQ{i8q%_Q>=7`ZCa_8+9ymWzhc{dQ8p59rA4 zZ;<~bgwVK|ePi{fc*ZxDx`vy5z4;Jqj)~A22Np-pJ z4f45;Nz(jS%DWC-<73G&54y(2QcxcHPSre0&BH{ke=J4ZKz?t$_;>^P-C`;IhLpz- z$5PTwDWCVqq}ZP&9}lmNqU+Dsq>bCsP`khDvgA%$?5Pk?xfkrE07&ieJgI6072Sj%Hr& zEWy;|)#0MvAPT>a`Ox35kCbk8I{{p3^w4m5u{$rNlZs2n_f`Wg==yjP^{0d)| zuTsvhm~;gGM6SOf4*iQcvq+CjSN(n$dQ_oi4~T4Wa>e6DQXWYc7}2FiSn8}IBn43{}YDqJe|yc;dun!Py7qduWR`}bZxX$fsLTOe}Pp!_4;4v zHx@@XEI3AJ@@coAh6m~wnui8b*kdWd5@@gk-crteEJa$n8}Po{)UYR*Eby9v=Dwwc(z6Io z;^HbL7fLPhm9xi=@>AaPJQX;}Wp`WO7^((2%NTe@)5dCY0A6%CUri1vvzyRwDEV0A zvfp26I+VgKa@p@KcO9yKVZ^urQ)eipS>&J+?;SIl3S99mylU0uci_DftIOSZ`|6bL zCVSC$)#Y*cNKeNaz?~1G#2RuAbD)7Pasytk1tqx1e*A@n(p=aN=y>9X%R+(D4%OSjP5XHL7sr;i3Vu{qJ zrd*3}=|C|xWq)3A5~bFZ3)pa4;eoP)=#qy#jJNxgf;{2dBXL~mDW{ez*&~BwANSL~ zTuf8F?d#AEOP$Ddz+g#(-Y)WJ~B~1=WN*2 zfdcBw19`7NN~{kiJ|#?UZJ?a`atc4uoT7cfWMT=Ev70H~7fj~A%A^_9@RJ+z8i5q- z2Tu1k6pOQa110*&5tcShU+p!{sZ`)67xG^=P(cGZN8VUcV#Ef@Z77%j(p*0SC*GGL z8p*-(xLmA}9yRVsw;IX2_{Ou8)YwjUc=48++gNmm>eR8R+?rQvKsnXmi>sT;b<1>$ zf8Vm-DK5Wdzf(MV%YLW$2K4_GXU7;+ni{UfS^m={3g$4(N@ZI; z#vpqS^|)iFB)O%0t=z6N&tR5WeJHJ!9KpuYi&oHZ4RsBY7x9tfDJKXWv0Xz-Z727l z5v}F#xvTAAej7QSuXePi4|L^2o0Dr>be`WEP-0uz$KxD6^_o>t?_9<2^oh`#(||JD zB4B^jKm|kWZMLEF?d2?1n^tr{w3q;o=>QKx@aQN9Sk@YZ4Ecy+JEAN1X+Wnt%8{1+ zhWrK}sX@Ug&U=ib)DVQIzs{KHMu;56j#1Z8xi4>wcY`}4VtyJbSI6j(9V!P%Zm02X zZC4;Iugk~SEIR+X9Kt43)lSHBM){CV@(k&nY%{5yX2(bKZume;6>kQD9IQN*)m`!sI9^OH|lRKEXfSL3!O|AE}eh zGfJ8TlF{AeIXwPT%I}Vxz%(;C^pHwB@Krn)y`_NAUvn}QqCYbz1+UO*iesa5AmrlSNdkK{EV;l zRZqVq=T_jo>tmA`+`83QProa>GRtN|v1k3MV5IzV_boJ9d?W=g_>Yq7vn-l1N`A^@ zniwUw_So9=<%*&Bl@rnZuhpf@DAcj3uGQb$BPCk)vRpLOvcJFTA1x;`{!=~57>(G| zF^URDqfDD9)o+Z9{g%!3C~++M9E+m7v2qaq2X8rz1Ht1_6fDw5qbPbD^6!qK%yAf| z?i*@w=}+eIaw7MyM@i$+49Whl4xjD%Q|5TYs4Dd+b%LB~F&Xl|!Dk;PBG%QcN0}2b zf2vYX^@|Y_U#&~&ljL9?GD9uQw=tx*W^@twVk@ph0&-zQY?r16P_bL*CD?)ghl) zJ8ItVr-lR1_<=eUGZT6aH*lGX+0-mKk|)>ErI=;N-_VcJXF-a1ND(I|@bPshO{DMO z`8}k)@GR2yc#cQf1kWP%#Pe*VmGLane`}N99HhV1rnEV7Zkdl>-S18XL0AU|45su1 zboc-7k6!ieLgu-0Sf#7jEfGO8G?db-n|)~dTzRmx(;u79bLD2z7{rvKxpFIhHGqQV z$&rntcD=l7C%)zlU)Ej`oLBEvKbhT`GUuU{#s{kA`H-LY2~ZQ?ms1&rFW-f70Iwe) z4otDSTqsxJD@IVlLd+eX%ap!Q&i7oh3d_9kR~^Gpt(s1naP1LViH36{s0GZTM1y^YYsGQB}Sn@WNp#_WZe*eGfLG; z22fkjB+IR&e!!3bCJR0q0Ahz#zzhdXhyWe|GgAZ5VEY7c5n1;Ha1*6!tws|jgQl+l zoPn9E0l@4NKp?8PA%K3!avgQil7=TRzSdOeL0mKPD0+^fzJ_2|IOs!M^k5Ore0M3irQ{{yf zD&j0iyoOBIeq-)@L=u@dVLhGtDQ13~&`U<|#nedvzZ}9;V-vKE$N-DY0Dj$vd%PO3 z8%hcw>7be1wxE|h2hAV>)Bzt7z~McZI0@hb@NxSX!0yAK5x~K8&rvmUpr2*Rj>;%nd0Sw&_iwK~Fs8j%zP^r^)0MV$l zn*bhz^IQS^vI`SU0gOYX54XcKlh7VcX#gSs1PCAyS$zd?7V1nAzyTCrDFA;k+$R7A z;DP|=LX3w3Xb&-*b^zcIBR~MT5VfxW+`xH~0K&j}Z4(HqYx@Ct6YEG_4`RL2QC)aY#>>om z+{yJYW*mWy)P%!WU-3A1H9Z3e-sBx}J&F$K_l_ETR1Eve-%%rviTPCS+mv}6LDWK0NC zgT9i370YHr#t&WbeKX8OjQIUsZJPlG`dwYse9+657~hwx5mECTU;J)cqPpRGxdOAa zGi3Gs#}DyQm&l_IU<%ej1I1clv_ zpYU1r)Zp84s?7Tjq`Z45*=Hb`?<4I#ko-m3c_2mHmos=X3pxG*j@#YT!e8VB*-}*J zRc#$HyDY$T_*s!I8_M?vd+E6+(2hnp`+0WO$n%Ari z^bSHV! zNf~ar;{9r=d){=~Ny)J!8+6Qfr!;3JjGywR0%rx^HuI*KYD%!Bf`RUk2W3`M^2(Io z?oR0zC6Xt2lVf!yR+=1wfyYM~Dot*Q5mm&1iQeR21HX1Tz#EqymGCm0@nNphHI%S2 z`0gI6kBjnz@fTj?>ZY9Lcf2Ui&5mgKHC2Cis8G+S+b)~{DW(y6Lt~OEWb5mtL`n0C zWn1bcWOJnwp%?UMi*HNY)1z%oO7T`A%IMLwCKY-s zr^{HbHonQ}qu>Bgl68x1E|#j$Qp zkFnX7yQ_tD75s#x(KZuY)!6!qAF~W@`Kr?&Y=`@MD8aw$L#_>!2tK@z8q+`^vOUz? zhDtb9J@pR&&-@N!jraiYqf&(81HjG( zVGjFKSW_jfj9R_jsIaM$&+BFZl{FfE1kV4qrH;D zPxMq1J1CK?@+#b97NToWq@%LXb1o2ZTkq>1uJ}-Mz7m3!!ODrsqe_*?3ROI5YKZc7 z^*4ddn*J*JjS%Hh<<_r~zO4rRC3mGhM=m!hoINtsdo-yUUo?&t){8^|aYdTwWB zPUW0Z2!}4pi^_XTA)LA@6RW2RUL#+XZ)I1=H@_6+q%fr!|GBD~8m63<>RMf2#vrjs zH2UDKYLwU$_8wl9(t0WdWqdTTnwr*2Ni_3GRaF1}ioay3=kk)HXv*d;6f+PUo-;LR zppwYhZq;S5a)w#<8ysnLOZ^tG5#Je?E8bRoEN2b$@3d6I-&RtX<)$ISuci9r9pU58 z3>gfZhC+nDO={XuB|_$F!_>kNqT`MYBLF`1;Yj5y?;S>wqm-%S9;JLs>!OrebaH}2 zl}Z@3wYkYZT`6WU6nfHCO<$~xW8}UwMv<>KhNRjDO4Y$aBSiU-yFP^oU2T&j?;alS9* zSz)431OBFtujXvP@1l(#DgpA%dpLZ#=vL#UG>c0S3TbwhdKMMELEdYW*8IXfidcib zec&Dir@$2HxMPG=-UWsfdQl!FeUx!7P4Wy3WZ`Y6&$P8jS8vcT<(Q<&uXQ$eFrvlX6eykDVxS8%DXjyOgs{!AIDg z)WU5_q{JuOrO2Idym#)Z`8)Ab)71tWI8-9<-SD13YPDOLDQ)^5)BD{@SLxYInV#-e zyrr9H{#q>^T&U?DWib1g7Vc33`0YFRMmXjJPb*N+UZs$KdWVA2Fx_WdkP`Vd;Ty>*^c1IP%3Y3LXAj;n>P#!Sp zk$wtIzdup}rTuAUwbma>Mqmj9)2{@qnfwUrd<60k*26;}?9 zAy1TM()&P6dZG;UjJ#B8cM_L|#FKD?;X=J9im$W}lnzhfmiGbleX6)fH<2~^Dg5(2 zU=}?E%_so-o+^E$Qvgtpi?kUuZqJmCo*NDPEjIr1pwgsgim$@I|A`7^7y#t^m_o029GybR{!(EdVSMKp?Vq z2%rau&j{cHVD16%jLUu55Q_FOKs1DmO(0O#stn57U=kvLrvQctU>CCH3g8*U+f*5f zzm6N${H30M#Km>|R5T+E*RzISV;As+(W%`=X(zybN z0nI)E+y!u>CT#g5fFc37!-TawV3ueUZ|wn017r;nz&-GpDF8wi31A!AQD z0dVpH@E)=P1TY0aUjZx^d;~C0@DV^o!AAi90&~I3JVUwy3_jZIq7LykcW2U0wC4^V z^9gBITWym-on6c9EOSdPW%kEXx)R=27=Y!}-)}+E%PF7;z|QR))PRO& z7umCWyO%lm1;9s|#V~7Mh+4It)Uch_lf3D}rf|chKwl6YY~EF~dao%QS(*oe)&XeW zwE#v3nEO;eSxO2~JD%wbGNgv>5x4`PN~ddPbx*n*) zU=VfdWZqawO1yr5x1d2E^J6A?-G*cKg;@0g)anPIC4gZ9=m!8J z9)bmc%K|`o)#(lMg9=i;J8j| z8dC>^AAv~+=EmE=xC8T4VD19rv4<=8VADtmDdoC=!Sg<2(YU^=e>Bj8g$MOcI!uXxx#=2F=6OkOF{O z%WCc;OKU|jGDk`7cg^Z$GM}g{Ee>{2X{-6JEKLqZ|Jeyvn*nSRK=QBXY&&7x!N6P= zm=ZnqgOk%Iz?A557MOsV4!Gg*iCHdI22&WI=L*6SZJq(63QUPM&S3vgU`m?0AutVg z0TTfcw8jqwW|+V%L!)SJH5u+qyUg3UrdG&as4UdNnaxJ!rRb&*D1Xqr5%bazw9{cw z1p>Ht7_3UPIRfUjGJtuE>OwN0@kL+;3Ct(J+&BizeHdzGhIs~)hNHa05qN)h@D4eG zh$GDc3P0#Q^d)&RFi(%6i_QNHMm%m_A3l8tHhEoBtuL`wCv_>&xU z)|Sz_q!lGX?-Pd|rIBRH>U!x~yt)boK9`t1N<>1V*#P5S!j0@zbDd*dr;@aTuP9g0i zr&}+qrH=Qa)ZupUhQLC4vhEMTt*jT@!QTWHlSa-9?Gf0Io~(K?xO&z>>j#T>`UE;8 z$;C?#veOkW(b7ABo=~SB=L_|BtkK$*Qu;v1zXc z_i=~0q(d*=8_Js5VO@M|=}L+J?(W#(A>wczQr<kXP^ms0DaZ_xTKBg@~Q zt;CC5Mn$i_K}+e1_k&|2e-7R`E>MyS;GN)3)@JL1i{*+vcHFw)>~Vq1DZauE&a}#Q zaI)5|&YC~+Casg#P}nSzFHur32OfBVqD(l+gS?626oPwwpj7 z!OjgGDqA0;ZI^aw2YwhF+qiw`{t1Eb{-LpzJB8*2oxWe)HALU7(+RkH%Dr1CH7maV z;Iv6!pkGljVy-cLqNFgtnED(7v-?`*^Sw%X`2&KQp2;yIm<^`ABGb;NFkuy{F9Fr^hp+ za(k!DYkD+qM?w=faZX4=hteF~k`Cm~VNIT*j2ueql{}+ab!h0AiO#bvQ?$+nnzR>M zaTE>aOxlj=jn{PRq@zQ1Cp`5SfB1!CLJcQ;)v+o(|G3Z^C$jN4f8wL*P}K=9Jw8N# zV=0?GA@raVdG-Vyp^nCWbYiGI&dog8*TC4;{}rmc@wH3c{INntE)+S|rKjp9P7AFL zWU2L6Poo7YpG-ZS7NLCd-sv>E<&%TY2wf}Jkg-?K2<{n6P|)30G^ zm%Z++&>4X$HGOb4bOYPG#^#?LT0Jf>k*fXk97@`ho-93=7Dt_qF5f;kwDYW)!-Mry zF1R7NI408f7BtIOol8BjJvME?hsaN+9|+8)mcG3#bi?NKMDDVkNmi5v!{>({3)JYz zy7TEJ)~TtxM`^XG(9(N#H1w_Q1NnP`V@qxfy|(*U?DI+_mp=3XD=9(5OtLvwdUs zf_#MrPg9D1FgM+K=Ke>S;%MbBDMij8IbC-z9X`HcOrLezSWxWm#+;iX?4X`g+**PO z=Tjm5g`6|VgaRhq0|_fJVJ#$VTmobGD=kIc#cL1KAu&Y>^h6xwcI(_`o4yiqrr}5E zRUAPH^h6x2g4facgnkt=Vg8Yn@ZO_AL4W$}OeX!myZcle^BA2|$pm`h%?FtnQwd#4 zSS}OjiI?y-UqXRyN4Sa-CQhQC9WAo1JJQ%ZKu@g=6x(NFGbnaZEp1Pad#L9NDDK{R z!VMfo9_(1?3*IK3XdFe2mYxWTh4d?EYLuQ@qZFIv#8g9<(4!uzOjv;lcTmDYl`Iq1 zQo=q@QmgbemN#|C?zxK_^7pWgLUMm7C)vP8?BN3{&$}XlJ3xoWa67<$B z6Gmjh?5Bfbp6d5$N}&55In&~&X?>9i^h9i@fGANY|BD*bdq$bC z91||31pj9buuB@3JUtV&Y9$brQg{(6UD8mCqjbUjQj~BnD9$VkYCrxNwzSG2_I2` zzl9Z)u#n539Bt}fZ1zG>9IgZOxRiXh&-9aFZ;Oy&`iZQdzb5o1 z`f0M*(UTDLn_Q9go7$6~bL;5~vff5z|6=}{pmS2B6T0+eRak=_b5uUd=WED2`TP)B zAJ}C09cXX+-v_-toh(0!tn!`y#ibP3Tb8Kl5wbq%k)gkl{p-lQwL$MdDa-dHZ!e#y z=_In=|I5&|EukH($4Z@jD9WZAs?6LH2hTY4cgUnCw6ONt3;0O7*`BzJ@Yp?`A9PMAP{9cawh1MVxt@AsDiHgsc(IP8*i(}x++Lvytp0B4L%C5-JuF`mEu*&)8!*=u&xwVuKP(zzO%QoCD&7k|( z54evR8vDvqY3vDVpQc|@O?t;OJs=B~o@vb3YNDIiRbw)guca{*PBEsRIR53Jpr4C+ z(=d716Wv=4d@@{YJ-6t^}|2ebSMa*yLr7(UgQKKuAT1jQ8D6ivs_ z-dwlt#I0?QF*cv&Psviskee+iZWX8vmavQxybXJle%?XzW0L)XJ0E0?wNxV@RLN}b zEursM1hRQ!bb^RlFP>>^F=ygLr$7HFKx?RuUdJD&k4goy?|j>aCJ~1nkW^iql66 zkRbPn+o~32!iY>*KrQH*{4*-l?=L8BjZ#9O`Fomcrx-IPuW`pnR>PhW{$HU<5h|6F ztW}^@?tj;s)K;~+f8?8*^7LbHQxox!(`M{dx9yaA)SFsFIjJ7$Gi@hd{}=ny1ToG! zf@buTaL;W*hQiLgpJcr5g{%!{(ArT_fjE7kPP>BX`(`OwPT20BI{bwFT{0~+Z<3uD z)-+maCcozOq32yp^^)!D**Q!6%gxGBNKR$-NGaCF^J(+_%#F{bZ$GAaz0k@@A69QX zQ|=)sQSJcds2xC-67`wL>2xA%s!B4?Svdc68yt*bQdNbV55Ui&^@E;x>%=KkU$1R4 zVHGCGch-76mkAp%p+O0H=Oz<|v(UwQXkiH^97}UpcaeFEHYJ%*dn@Rz*=Fa(k7NOjwHvlPJMI$c>mV1rp|;1FzyRO3*uZ*~L;!xPTJoxxago4nZViLCaBg~{y@i@Heb&NIgjj|Nve!p$-Yj-|8M=>sXo2P z!Pz&?1^Nz@%|$uph;uTq_3iF@VLstL8c(^8gI6`LruM~$Q6u^t#Thhmed;?%ot0Cy zOX@Md+HEaH-@45or4@pncx@a?Gizrx2V}xhOju4UoZeZt>5o8{Q$pbyYQew2Zza$1 z<}|&-zn8xc!tzy=uN#%sULot#HR-p=dfzJjPqIEUA{GRFf6LaWIUf$zaf>qaW^~{N z{`O;(tWQ&9!#^bZUr3caNfxN-KC*wH?n2h@Jt*l3O3-JYvQD2)8r-9y+d>205vsMs zEu{ak!?o1j_FgHSbkfhx44*%x@}N9L(-SuM1%076ase&n8NI<9A^EmsYgL2d)+i;+ zUiWEG=$3v#Yp&ix2dmPgmRjq^KBc`f)d|QauU=s6P!LJ?WJ=GxoIL5HNgq@7$@1qL zH;&4<`qO^lZkzNi(SdG#J+t-sg}bWKd_2f4*KepMs?n*1VHGYk_MTPxg}cFqRiRuE zo6+ki|1@B|e4(+-%FP*?V7+~zu?5{&=*WHoL?c;d;KG;aJ@`6b7 zfXE!r5`=jIcL^Mp?;~i^1IU3Z;080T{Hc?815L0E7AN!kB)AH;!Rb?c_2tHw^9RU9 zaP3rI1$vq|i#rX@g3DQ+Z=XA`v+;M&8wBLD_%iMaxDO7W&+|=i0-PDx-T71p0jH&i zvy}~&X)VV)h+M!O11G@k3weI>``T{*BI5G`$4k6^&vn7uZjB%Egb28D z4KFvYnS20ca2G7L;`w1^ z@1U01nkTftHn<0_Zo})_!`xw$JL7tAP&4Q71pBMpy6JVEi`HFUK(;TUcXOw~r5Z0!$Y06Pq}QPwxOy)y&)lc&_Akm_ zz-dS1@7D>g{UFW+T?D7#gn|e0q%l}&sv+5Xm3}0 z8S0^5;pOeuxoxoV1}`_)amTp}qGn{8!V>s@)$S;F1ogcuQp~DqFeCFS+JPF%fnl8C%`#y1>E9Z zAo@HYO!+sMH0c#61&+?N?!B7!o~f;QUJ+aecfiJMUcU(ro4h>AeX6L<87;=ZDYc&X01J!By^p$X&oIl)x2m`3F2dc?Eav zO714O&3piz1OJ#;NL@sz2xDb#VADUf#c#JA5Cv z3GS}s<)Y495Mdq=*_T*?D1yu2-XC~=yTcuRjl2Ga-?ZOru>kD?!L4`v6=++0mpj_! zPJmP3$lrbWa=#n%2gnI<=Tlz=Do=jKo$YfM!DVpdi$T7#-%tH#5TN!&YMlF3u-@xr z!Fg~8+yjg8yuEV3vwPerhrEF7Uo^ozuxar81ULuI4DtND=dU>ZiINv^UEY*es9E2s z(C#N5=3#R)?i4r=u7P{t+zj4ciMt>wJRrK@*w(y(BsdL@%;x!tx!iraF1wuEHGvjZ zZ+m}$*4-Vrli)PCvm?)MeU-ZpHtBl#-T>svlJW=0b#M#Z0cYp?8l?J_1>7xg-)CoK zjqc7XB*A%bkuE_cC%rLMz%_7wU!EUdpXR?_i)&6` z;0CyN8t))>I`~X(<6Q10IJJz*J zLxNG@6|!I(94_+w8n}tRGT^TNdH|g(c>|)vZTt^+8e9c;!O`Wses(#$f*K@r!Dg8^ zkOr5*EwH$b*N+z)o4;%?jp?ypJR}2d;pd;4V09@%H1~1(D$a zQ3BV&9k8hI4x-=`IL}NQWR(Tz-2}J?j{KB&7z5{S;^hr+&u_Uyh~CUAq`@U{1Kb5C zZ{h7_!6l!a9a7B~kS8Q9a2IUc$~%aHli)14G_bovtPKLPf6)SW!S>Ji0Aee+i{KVG zT-Et*|03-Loc=`>Y=h0)bOUaF23!T(VDolWU-d6CI^eb^s^AVd{Bu5lB)AA}fP2c` z2~qS8-e3k?0k^@%oxFYmoCjB2&+<+O8j#Sr(>nAP`gkL97jHNYPJ{E{3b+C8fcxA9 z5xE-%08WGR;0m|_?tuHuw1PxxFaU5GoCjCH4R8nC_gk)@k$YeO;50Z7u7JzG;O+Im z_12)=-9Uan2*?d22F`%XHqXz0$z21-^`GvltuLkigqKd#tZ%NMzwn4po8T1bGq>nw z+~Li+bKt}lyu1RgZOO~UR?OQ8lLtfz+?i$VSf#BcJeyS%d2nhQUS0xs!KH0^etr&j zZ#(AGM06eth{g`w$(^_>;5xVo&VSX~c`Nb=^Ti`CY z_3C}bv_kDRUa5h;i|tIZ)#UCx$=!aMyS$n^`7C!0++?P` zahnC`D_y!bot*SyNj}G22DhK*j|Lz&UUU z+y}@0#ycp28{oFIT>g3hJxB=u9U25Tz-@5&eO^Bb7V9VQ&krLMkdq#e`2lwd-2IT3 z$2V|i!O0#k&-5m^>%Z=>3JD#s@eywz0nUM|;5Jx%T;L7HKIYDV%it!s2R1+9?WMrE z0tCw78n^}Sf{lOh4x`{CxR8ZF30wm=!8TZY$_HSA6WsK184rjexC(B9ZLm<6Khf)y z2~IH6`qy9qdUL3*r=lC^fFu9muP_GAfqTOJqi5Y-ew@#8{V)3iS^x9^T99Ca#d$Cwa2%Wgm%(;!qPza<0VFTr9T=Bz_x_tZEH9esn}acM5}XAW zFD>u}s*unCcfdVx_%hyJ6r2QS3J@rQE8qsW4eo)B%lUxnS8^9R5Xk?CCzQdl<-9x% z&V$R~Iye128V`srSd@8(`Rlmr;NJDTJoOXqEHkZtu^V_o6I{8GmkWzK0*-@I;F{lZ z{crjMM2_@^b-_QqSi4{Ct1B)szkAf5661WGB-p1N1h$;_=J~(id4OiD{R$)^SZYF{*0b0?nRPXCU(53aq=%UkQXBerL! ze~}slWd9-$7H@hLobn>L_6{$%!QJac|Ha+^oICpk zcNd(NcL|}%yBa`82i*Qd^549H7C81FUY=8z+tLRLb7Ii#K({xEJF7gTD|DXJY6^E5EH>rkDKPzI#BYt^-I7yzS~Scprg7VwSyw+yUnnKF;PE234z|JREqVPM zxD6H&mOoz5Ppcm>hR2JU*42;Dc+xXjxyXU@+wt-WxDJlZ<@u!@xWhX$)6c4SKooaj z2_ib5yAQ6$czJGD?#OQ3ZD#AC$LVKP@!fe&7u?>Hm*@B5PVCLy1Go3#<;AZt)3<#4 zvVbUp(~Ed{1DqV;<;4WI*pEBKOuLK*3((KL)wL=0##9Ao58w^9!Ci3VK%O7-Tkf*z z{s6g)K8QC^Jea!-P9=GH`I0Mdu%itQg3GQgC{zXp*oYznP?Z^id21mgO za2lKgmz2HFeyfns0Jp(iu-J(YI0BA=ldgC24jdUs$b-u}SqopFGqEPbJK!Ez-kC{G z`VbufN5OG$ikprMJRpkT3b+n#fo*Uf9NyXg0%(PavH>5z_&;e~|#Ez!`83Tm+ZFRb_97 zRfmKoxDB?!J+Rmn<^b4qJ@~RJ1_=pp3Y-Dwz(sHwTy^Zbid`KNn!8$u(jIc0Z5>Z% zc1H?z+abN<3xiE?44eR`cC*frJK!9|i{LW23T}YgV12tJJ#MjpEf`t=JRnSP6dVJ` z!3l5@oMJwj&JbBZhzvLnE`w{}<^t;`IcIH%cfmcdu{)bHA`Fg#6W}CwL8N#9k31VgY~_l^nNCy?j5CVg5v|b2f@T3Ao~|da0;9TXTVu-4xIPw_Ag3a!0BI9 z!8LFb+ydL+E?DfX)(O?WFm%A}UqrxBa2%Whr@=XJUfDYdC__R8Tm{#_O>hg`0ry-V z=DnLR7V^y`432^m;N(JU;TqbZWg%V!SHN{}3v7e?;P5_d+;mk<9uNs|8k_@{z*TSq z+-9Z~s>=ejLVb;mP(;8na1xvW=fP#ahU?hjH0H?qiaE_V!FR=g}qro+B9oz!9!8W+(w_K&g zelQ2XCO8I8fK%WMIOns|f5{h+<0*qH;3~KQZh|}D-oQ@(qCW`8{)Je~=ZFC|!Eta3 zoCOy>yZwu@7jXNpg6rTWxDB?!J+Rncy+GB!2mwuV|EOj1ZTi`a2Z?y*Bwt8eB@aCY{J++8;mLA3mN)7 zkDTN!ujF6vn(8)B+Lcpe>7cxL7I$3T5=iH#FW}`}bxR*zUQ&19Q7(uY4~P!fP`BXG z4aC72a2Z_r0UuEDQom`P%v|md&>jKYzlxW~)D3U+fa>aoH`=DUKaF+@T=ZLR6Lo)p z+yNWv1~Oz1a2s4vcTLe9WHxs%xU1b$_W;q(Mcnd&4n()&6^z;3jcvG% z`P{`t+;y-q!pq~Xck*^TSx6{>8za{1AIp=t19(F{bt?+}LQ?9^5!yv?9c+V*BY699 zZn_cPkvt)F6n7EaI+~X!)7-YYlY|~{_yk_wIni%gVR9$?142~6xuv|^PJ}4vj^#Y79aci! zazZC`F6RvxS8%u0Ehlt-w#3UT;EcNEgwF3@qw@6*E2?fe;U!$l8!Rv9Zi36#@$%^P z+-b0RgIn(Huo^e=3SDrc!pl=PaTmeiTX=c&R&KNExZv%uif+Jj9h_0Ol+bgi3GRWb zckm8{y2XU9A9GCeUk7Y;%L(m-y2XTc4qUDA4$|rt6FR>HZm3&M=<@u1R9`NC9jM8G z&*l5MQ|cBII==|6ujKi;I(HS^{?$15PlI%Wpf2UY=36nDFX@^Xirpx;)PAIgHT)D)j?M%f9_Rq%A4Sxx~qfEuYbw&ZE*NMx}4^}Jj}MefNQZ)C%BFY z_j1{~gC5vacXiO^DRmPE?XbFOgLXn&E`K?ImJTQY*(LS1!N2b~a^$_JDLXTcS4QQgSFYfs(C zK|AW)cY)@=4y4tM7_>{^I@kt>BYePda2A}HN%?a5>ki}U4h`B_a0T1~_rRvQql0d* z15TLZ`9Y|u?%<#+^uS4VhX!4q2Uox?u=y1}huVjZxBm1`V@jc(<^_#W?z+0gg6=qT z1uySj$=y6+`*?X&-GD%q(?>Vz4g<-<_XPhxLe@d4!pdlt|qVNy!};P9^RQd&Pq$fBCd}nGU$l zUBTVEk~?x0cOD#mke6p4=FVx$hij} z5~X(LT;5<~8Fv>PQP+jk^^@nT%Teh-9uoRs^P9Yb7`S;6FK>er7xVIzwZqWR)Kl$m z^KcK`R2Q!FhN&)0sh#-&uipVTiq;83bg=JV&hx@oaK}sBHn@EoFOS~IT>+==W90?W z}HK4A>rCkQs zz;&<b;2HE>F}f3&UhGvl;r{>x)T!wa}B z26ck#61b_Z)}Ie?bB`j&364~|ab4aBE&XJ>G?XK}~X#j|w# zl^xx3?_gh57pKw*9dKA(TS}KF!9{Qb9NUWzsDFs#!NER$7*A+|i|SfZdH{8>4Gybo zP3imuIH#{Yr4wRD^8tjj+}*Relj>4Yx_${<2ixF?yt0#A{O5XQ z53YeTId$zRJ%Bz1`~xsA;WpHjmUMm;oB>zBZLo2fy7rWAFrluTq@4%X!CkPauC%1< zx31(bp!Or~e48>nXhjv9>=R>=!F3X|aP?zPMmS zuFIhln(8_n+HG)MU4}!KH^EhP84g`uQkUV-j!$&|EZ#M3RW)@z4V_R?7sAl)f+Ole z7`oip+}|L5=?X5X3s~s;}I-#qsD50HFSC7!{s;ft6r`6RVw7cr+5ZWUvwxGv^Y{teRdDn~Ufu?0PvYf~lesh8bj=PP5Pfj-RNg=>%Pp31r@(b^-uLS; z=Y3Rz1?V@{SMUb2z8_o3Ig~2#^5pg0Dc^6OWPaiaUfy}eXSotKeZSn16>7d;=18~S z@KvCvsV;Zo1Madh_|8Mtj}38me0JVPK4FqZI<;q>IS;nMq=<;f-E=pBnWUUUZE(`} znWXzB{~Nr)B-r$QXek@afzv5ozkCF@>HCn=sqeG1awd-A6gU1qXieH~Xpd?61LeG+i=C;*qIJY}L-VKg4|2o$aiVn{ cg*OvPH>@9XKeJxiZfMUy?p Date: Wed, 18 Oct 2023 15:41:26 -0400 Subject: [PATCH 370/407] StorableAccounts::hash() returns &AccountHash (#33748) --- accounts-db/src/account_storage/meta.rs | 2 +- accounts-db/src/storable_accounts.rs | 29 +++++++++++++++---------- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/accounts-db/src/account_storage/meta.rs b/accounts-db/src/account_storage/meta.rs index 57a5e556aaa93f..d01f7b55fbfb73 100644 --- a/accounts-db/src/account_storage/meta.rs +++ b/accounts-db/src/account_storage/meta.rs @@ -76,7 +76,7 @@ impl<'a: 'b, 'b, T: ReadableAccount + Sync + 'b, U: StorableAccounts<'a, T>, V: let pubkey = self.accounts.pubkey(index); let (hash, write_version) = if self.accounts.has_hash_and_write_version() { ( - self.accounts.hash(index), + &self.accounts.hash(index).0, self.accounts.write_version(index), ) } else { diff --git a/accounts-db/src/storable_accounts.rs b/accounts-db/src/storable_accounts.rs index c54c3ad716facb..8da6885dce6947 100644 --- a/accounts-db/src/storable_accounts.rs +++ b/accounts-db/src/storable_accounts.rs @@ -1,7 +1,10 @@ //! trait for abstracting underlying storage of pubkey and account pairs to be written use { - crate::{account_storage::meta::StoredAccountMeta, accounts_db::IncludeSlotInHash}, - solana_sdk::{account::ReadableAccount, clock::Slot, hash::Hash, pubkey::Pubkey}, + crate::{ + account_storage::meta::StoredAccountMeta, accounts_db::IncludeSlotInHash, + accounts_hash::AccountHash, + }, + solana_sdk::{account::ReadableAccount, clock::Slot, pubkey::Pubkey}, }; /// abstract access to pubkey, account, slot, target_slot of either: @@ -45,7 +48,7 @@ pub trait StorableAccounts<'a, T: ReadableAccount + Sync>: Sync { /// return hash for account at 'index' /// Should only be called if 'has_hash_and_write_version' = true - fn hash(&self, _index: usize) -> &Hash { + fn hash(&self, _index: usize) -> &AccountHash { // this should never be called if has_hash_and_write_version returns false unimplemented!(); } @@ -174,8 +177,8 @@ impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> fn has_hash_and_write_version(&self) -> bool { true } - fn hash(&self, index: usize) -> &Hash { - self.account(index).hash() + fn hash(&self, index: usize) -> &AccountHash { + bytemuck::cast_ref(self.account(index).hash()) } fn write_version(&self, index: usize) -> u64 { self.account(index).write_version() @@ -278,8 +281,8 @@ impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> for StorableAccountsBySlot< fn has_hash_and_write_version(&self) -> bool { true } - fn hash(&self, index: usize) -> &Hash { - self.account(index).hash() + fn hash(&self, index: usize) -> &AccountHash { + bytemuck::cast_ref(self.account(index).hash()) } fn write_version(&self, index: usize) -> u64 { self.account(index).write_version() @@ -318,8 +321,8 @@ impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> fn has_hash_and_write_version(&self) -> bool { true } - fn hash(&self, index: usize) -> &Hash { - self.account(index).hash() + fn hash(&self, index: usize) -> &AccountHash { + bytemuck::cast_ref(self.account(index).hash()) } fn write_version(&self, index: usize) -> u64 { self.account(index).write_version() @@ -522,7 +525,9 @@ pub mod tests { // each one containing a subset of the overall # of entries (0..4) for entries in 0..6 { let data = Vec::default(); - let hashes = (0..entries).map(|_| Hash::new_unique()).collect::>(); + let hashes = (0..entries) + .map(|_| AccountHash(Hash::new_unique())) + .collect::>(); let mut raw = Vec::new(); let mut raw2 = Vec::new(); for entry in 0..entries { @@ -559,7 +564,7 @@ pub mod tests { data: &data, offset, stored_size, - hash: &hashes[entry as usize], + hash: &hashes[entry as usize].0, })); } let raw2_refs = raw2.iter().collect::>(); @@ -601,7 +606,7 @@ pub mod tests { let index = index as usize; assert_eq!(storable.account(index), &raw2[index]); assert_eq!(storable.pubkey(index), raw2[index].pubkey()); - assert_eq!(storable.hash(index), raw2[index].hash()); + assert_eq!(&storable.hash(index).0, raw2[index].hash()); assert_eq!(storable.slot(index), expected_slots[index]); assert_eq!(storable.write_version(index), raw2[index].write_version()); }) From 93d882f15847fe9658b3d70906d4ec461878ef97 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 18 Oct 2023 16:16:38 -0400 Subject: [PATCH 371/407] Moves solana-store-tool into the accounts-db crate (#33755) --- Cargo.toml | 2 +- {runtime => accounts-db}/store-tool/Cargo.toml | 0 {runtime => accounts-db}/store-tool/src/main.rs | 0 3 files changed, 1 insertion(+), 1 deletion(-) rename {runtime => accounts-db}/store-tool/Cargo.toml (100%) rename {runtime => accounts-db}/store-tool/src/main.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 9d09f953f4ad5b..095a9a005bc2e0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ members = [ "accounts-bench", "accounts-cluster-bench", "accounts-db", + "accounts-db/store-tool", "banking-bench", "banks-client", "banks-interface", @@ -84,7 +85,6 @@ members = [ "rpc-client-nonce-utils", "rpc-test", "runtime", - "runtime/store-tool", "sdk", "sdk/cargo-build-bpf", "sdk/cargo-build-sbf", diff --git a/runtime/store-tool/Cargo.toml b/accounts-db/store-tool/Cargo.toml similarity index 100% rename from runtime/store-tool/Cargo.toml rename to accounts-db/store-tool/Cargo.toml diff --git a/runtime/store-tool/src/main.rs b/accounts-db/store-tool/src/main.rs similarity index 100% rename from runtime/store-tool/src/main.rs rename to accounts-db/store-tool/src/main.rs From 1045548606a8e786a602c1bc710b6690b2ed0d07 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 18 Oct 2023 18:58:19 -0400 Subject: [PATCH 372/407] Uses AccountHash in StorableAccountsWithHashesAndWriteVersions (#33751) --- accounts-db/benches/append_vec.rs | 12 ++++--- accounts-db/src/account_storage/meta.rs | 27 ++++++++------ accounts-db/src/accounts_db.rs | 46 +++++++++++++----------- accounts-db/src/accounts_file.rs | 5 +-- accounts-db/src/accounts_hash.rs | 6 ---- accounts-db/src/ancient_append_vecs.rs | 7 ++-- accounts-db/src/append_vec.rs | 20 ++++++----- accounts-db/src/storable_accounts.rs | 8 ++--- accounts-db/src/tiered_storage.rs | 10 +++--- accounts-db/src/tiered_storage/writer.rs | 5 +-- accounts-db/store-tool/src/main.rs | 6 ++-- 11 files changed, 85 insertions(+), 67 deletions(-) diff --git a/accounts-db/benches/append_vec.rs b/accounts-db/benches/append_vec.rs index 650757d1cc03ce..9f287eeed1fb55 100644 --- a/accounts-db/benches/append_vec.rs +++ b/accounts-db/benches/append_vec.rs @@ -8,6 +8,7 @@ use { StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo, StoredMeta, }, accounts_db::INCLUDE_SLOT_IN_HASH_TESTS, + accounts_hash::AccountHash, append_vec::{ test_utils::{create_test_account, get_append_vec_path}, AppendVec, @@ -33,7 +34,7 @@ fn append_account( vec: &AppendVec, storage_meta: StoredMeta, account: &AccountSharedData, - hash: Hash, + hash: AccountHash, ) -> Option { let slot_ignored = Slot::MAX; let accounts = [(&storage_meta.pubkey, account)]; @@ -55,7 +56,7 @@ fn append_vec_append(bencher: &mut Bencher) { let vec = AppendVec::new(&path.path, true, 64 * 1024); bencher.iter(|| { let (meta, account) = create_test_account(0); - if append_account(&vec, meta, &account, Hash::default()).is_none() { + if append_account(&vec, meta, &account, AccountHash(Hash::default())).is_none() { vec.reset(); } }); @@ -65,7 +66,8 @@ fn add_test_accounts(vec: &AppendVec, size: usize) -> Vec<(usize, usize)> { (0..size) .filter_map(|sample| { let (meta, account) = create_test_account(sample); - append_account(vec, meta, &account, Hash::default()).map(|info| (sample, info.offset)) + append_account(vec, meta, &account, AccountHash(Hash::default())) + .map(|info| (sample, info.offset)) }) .collect() } @@ -110,7 +112,7 @@ fn append_vec_concurrent_append_read(bencher: &mut Bencher) { spawn(move || loop { let sample = indexes1.lock().unwrap().len(); let (meta, account) = create_test_account(sample); - if let Some(info) = append_account(&vec1, meta, &account, Hash::default()) { + if let Some(info) = append_account(&vec1, meta, &account, AccountHash(Hash::default())) { indexes1.lock().unwrap().push((sample, info.offset)) } else { break; @@ -150,7 +152,7 @@ fn append_vec_concurrent_read_append(bencher: &mut Bencher) { bencher.iter(|| { let sample: usize = thread_rng().gen_range(0..256); let (meta, account) = create_test_account(sample); - if let Some(info) = append_account(&vec, meta, &account, Hash::default()) { + if let Some(info) = append_account(&vec, meta, &account, AccountHash(Hash::default())) { indexes.lock().unwrap().push((sample, info.offset)) } }); diff --git a/accounts-db/src/account_storage/meta.rs b/accounts-db/src/account_storage/meta.rs index d01f7b55fbfb73..4f6a40a92d6d86 100644 --- a/accounts-db/src/account_storage/meta.rs +++ b/accounts-db/src/account_storage/meta.rs @@ -1,5 +1,6 @@ use { crate::{ + accounts_hash::AccountHash, append_vec::AppendVecStoredAccountMeta, storable_accounts::StorableAccounts, tiered_storage::{hot::HotAccountMeta, readable::TieredReadableAccount}, @@ -17,7 +18,7 @@ pub struct StoredAccountInfo { } lazy_static! { - static ref DEFAULT_ACCOUNT_HASH: Hash = Hash::default(); + static ref DEFAULT_ACCOUNT_HASH: AccountHash = AccountHash(Hash::default()); } /// Goal is to eliminate copies and data reshaping given various code paths that store accounts. @@ -30,7 +31,7 @@ pub struct StorableAccountsWithHashesAndWriteVersions< 'b, T: ReadableAccount + Sync + 'b, U: StorableAccounts<'a, T>, - V: Borrow, + V: Borrow, > { /// accounts to store /// always has pubkey and account @@ -41,8 +42,13 @@ pub struct StorableAccountsWithHashesAndWriteVersions< _phantom: PhantomData<&'a T>, } -impl<'a: 'b, 'b, T: ReadableAccount + Sync + 'b, U: StorableAccounts<'a, T>, V: Borrow> - StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V> +impl< + 'a: 'b, + 'b, + T: ReadableAccount + Sync + 'b, + U: StorableAccounts<'a, T>, + V: Borrow, + > StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V> { /// used when accounts contains hash and write version already pub fn new(accounts: &'b U) -> Self { @@ -71,12 +77,12 @@ impl<'a: 'b, 'b, T: ReadableAccount + Sync + 'b, U: StorableAccounts<'a, T>, V: } /// get all account fields at 'index' - pub fn get(&self, index: usize) -> (Option<&T>, &Pubkey, &Hash, StoredMetaWriteVersion) { + pub fn get(&self, index: usize) -> (Option<&T>, &Pubkey, &AccountHash, StoredMetaWriteVersion) { let account = self.accounts.account_default_if_zero_lamport(index); let pubkey = self.accounts.pubkey(index); let (hash, write_version) = if self.accounts.has_hash_and_write_version() { ( - &self.accounts.hash(index).0, + self.accounts.hash(index), self.accounts.write_version(index), ) } else { @@ -119,11 +125,12 @@ impl<'storage> StoredAccountMeta<'storage> { } } - pub fn hash(&self) -> &'storage Hash { - match self { + pub fn hash(&self) -> &'storage AccountHash { + let hash = match self { Self::AppendVec(av) => av.hash(), - Self::Hot(hot) => hot.hash().unwrap_or(&DEFAULT_ACCOUNT_HASH), - } + Self::Hot(hot) => hot.hash().unwrap_or(&DEFAULT_ACCOUNT_HASH.0), + }; + bytemuck::cast_ref(hash) } pub fn stored_size(&self) -> usize { diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 22f81f68550519..9806b186fa34cc 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -384,7 +384,7 @@ impl CurrentAncientAppendVec { INCLUDE_SLOT_IN_HASH_IRRELEVANT_APPEND_VEC_OPERATION, accounts_to_store.slot, ), - None::>, + None::>, self.append_vec(), None, StoreReclaims::Ignore, @@ -896,7 +896,7 @@ pub enum LoadedAccount<'a> { impl<'a> LoadedAccount<'a> { pub fn loaded_hash(&self) -> AccountHash { match self { - LoadedAccount::Stored(stored_account_meta) => AccountHash(*stored_account_meta.hash()), + LoadedAccount::Stored(stored_account_meta) => *stored_account_meta.hash(), LoadedAccount::Cached(cached_account) => cached_account.hash(), } } @@ -4133,7 +4133,7 @@ impl AccountsDb { &shrink_collect.alive_accounts.alive_accounts()[..], INCLUDE_SLOT_IN_HASH_IRRELEVANT_APPEND_VEC_OPERATION, ), - None::>, + None::>, shrink_in_progress.new_storage(), None, StoreReclaims::Ignore, @@ -6256,7 +6256,7 @@ impl AccountsDb { 'b, T: ReadableAccount + Sync, U: StorableAccounts<'a, T>, - V: Borrow, + V: Borrow, >( &self, slot: Slot, @@ -6623,7 +6623,7 @@ impl AccountsDb { let pubkeys = self.get_filler_account_pubkeys(filler_accounts as usize); pubkeys.iter().for_each(|key| { accounts.push((key, &account)); - hashes.push(hash); + hashes.push(AccountHash(hash)); }); self.store_accounts_frozen( (slot, &accounts[..], include_slot_in_hash), @@ -6695,7 +6695,9 @@ impl AccountsDb { INCLUDE_SLOT_IN_HASH_IRRELEVANT_APPEND_VEC_OPERATION, ); let storable = - StorableAccountsWithHashesAndWriteVersions::<'_, '_, _, _, &Hash>::new(&to_store); + StorableAccountsWithHashesAndWriteVersions::<'_, '_, _, _, &AccountHash>::new( + &to_store, + ); storage.accounts.append_accounts(&storable, 0); Arc::new(storage) @@ -6807,7 +6809,7 @@ impl AccountsDb { >( &self, accounts: &'c impl StorableAccounts<'b, T>, - hashes: Option>>, + hashes: Option>>, mut write_version_producer: P, store_to: &StoreTo, transactions: Option<&[Option<&'a SanitizedTransaction>]>, @@ -6847,7 +6849,7 @@ impl AccountsDb { self.write_accounts_to_storage( slot, storage, - &StorableAccountsWithHashesAndWriteVersions::<'_, '_, _, _, &Hash>::new( + &StorableAccountsWithHashesAndWriteVersions::<'_, '_, _, _, &AccountHash>::new( accounts, ), ) @@ -8555,7 +8557,7 @@ impl AccountsDb { // we use default hashes for now since the same account may be stored to the cache multiple times self.store_accounts_unfrozen( accounts, - None::>, + None::>, store_to, transactions, reclaim, @@ -8709,7 +8711,7 @@ impl AccountsDb { fn store_accounts_unfrozen<'a, T: ReadableAccount + Sync + ZeroLamport + 'a>( &self, accounts: impl StorableAccounts<'a, T>, - hashes: Option>>, + hashes: Option>>, store_to: &StoreTo, transactions: Option<&'a [Option<&'a SanitizedTransaction>]>, reclaim: StoreReclaims, @@ -8738,7 +8740,7 @@ impl AccountsDb { pub fn store_accounts_frozen<'a, T: ReadableAccount + Sync + ZeroLamport + 'a>( &self, accounts: impl StorableAccounts<'a, T>, - hashes: Option>>, + hashes: Option>>, storage: &Arc, write_version_producer: Option>>, reclaim: StoreReclaims, @@ -8762,7 +8764,7 @@ impl AccountsDb { fn store_accounts_custom<'a, T: ReadableAccount + Sync + ZeroLamport + 'a>( &self, accounts: impl StorableAccounts<'a, T>, - hashes: Option>>, + hashes: Option>>, write_version_producer: Option>>, store_to: &StoreTo, reset_accounts: bool, @@ -10125,7 +10127,10 @@ pub mod tests { let expected_accounts_data_len = data.last().unwrap().1.data().len(); let expected_alive_bytes = aligned_stored_size(expected_accounts_data_len); let storable = (slot0, &data[..], INCLUDE_SLOT_IN_HASH_TESTS); - let hashes = data.iter().map(|_| Hash::default()).collect::>(); + let hashes = data + .iter() + .map(|_| AccountHash(Hash::default())) + .collect::>(); let write_versions = data.iter().map(|_| 0).collect::>(); let append = StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( @@ -10619,7 +10624,7 @@ pub mod tests { accounts_db.storage.remove(&storage.slot(), false); }); - let hash = Hash::default(); + let hash = AccountHash(Hash::default()); // replace the sample storages, storing default hash values so that we rehash during scan let storages = storages @@ -10666,7 +10671,8 @@ pub mod tests { sample_storages_and_accounts(&accounts_db, INCLUDE_SLOT_IN_HASH_TESTS); let max_slot = storages.iter().map(|storage| storage.slot()).max().unwrap(); - let hash = Hash::from_str("7JcmM6TFZMkcDkZe6RKVkGaWwN5dXciGC4fa3RxvqQc9").unwrap(); + let hash = + AccountHash(Hash::from_str("7JcmM6TFZMkcDkZe6RKVkGaWwN5dXciGC4fa3RxvqQc9").unwrap()); // replace the sample storages, storing bogus hash values so that we trigger the hash mismatch let storages = storages @@ -11169,7 +11175,7 @@ pub mod tests { let accounts = [(pubkey, account)]; let slice = &accounts[..]; let account_data = (slot, slice); - let hash = Hash::default(); + let hash = AccountHash(Hash::default()); let storable_accounts = StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( &account_data, @@ -12769,7 +12775,7 @@ pub mod tests { // put wrong hash value in store so we get a mismatch db.store_accounts_unfrozen( (some_slot, &[(&key, &account)][..]), - Some(vec![&Hash::default()]), + Some(vec![&AccountHash(Hash::default())]), &StoreTo::Storage(&db.find_storage_candidate(some_slot, 1)), None, StoreReclaims::Default, @@ -13003,7 +13009,7 @@ pub mod tests { db.update_accounts_hash_for_tests(some_slot, &ancestors, false, false); // provide bogus account hashes - let some_hash = Hash::new(&[0xca; HASH_BYTES]); + let some_hash = AccountHash(Hash::new(&[0xca; HASH_BYTES])); db.store_accounts_unfrozen( (some_slot, accounts), Some(vec![&some_hash]), @@ -15841,7 +15847,7 @@ pub mod tests { accounts.accounts_index.set_startup(Startup::Startup); let storage = accounts.create_and_insert_store(slot0, 4_000, "flush_slot_cache"); - let hashes = vec![Hash::default(); 1]; + let hashes = vec![AccountHash(Hash::default()); 1]; let write_version = vec![0; 1]; storage.accounts.append_accounts( &StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( @@ -15906,7 +15912,7 @@ pub mod tests { let account_big = AccountSharedData::new(1, 1000, AccountSharedData::default().owner()); let slot0 = 0; let storage = accounts.create_and_insert_store(slot0, 4_000, "flush_slot_cache"); - let hashes = vec![Hash::default(); 2]; + let hashes = vec![AccountHash(Hash::default()); 2]; let write_version = vec![0; 2]; storage.accounts.append_accounts( &StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index dedec30af24e88..77f1717a9ca259 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -3,11 +3,12 @@ use { account_storage::meta::{ StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo, StoredAccountMeta, }, + accounts_hash::AccountHash, append_vec::{AppendVec, AppendVecError, MatchAccountOwnerError}, storable_accounts::StorableAccounts, tiered_storage::error::TieredStorageError, }, - solana_sdk::{account::ReadableAccount, clock::Slot, hash::Hash, pubkey::Pubkey}, + solana_sdk::{account::ReadableAccount, clock::Slot, pubkey::Pubkey}, std::{ borrow::Borrow, mem, @@ -154,7 +155,7 @@ impl AccountsFile { 'b, T: ReadableAccount + Sync, U: StorableAccounts<'a, T>, - V: Borrow, + V: Borrow, >( &self, accounts: &StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V>, diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index c6a1458a4bb263..315c73289645a1 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -1248,12 +1248,6 @@ pub struct AccountHash(pub Hash); // This also ensures there are no padding bytes, which is requried to safely implement Pod const _: () = assert!(std::mem::size_of::() == std::mem::size_of::()); -impl Borrow for AccountHash { - fn borrow(&self) -> &Hash { - &self.0 - } -} - /// Hash of accounts #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum AccountsHashKind { diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index c4ba88c3cc6434..09df6d5df561c0 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -12,6 +12,7 @@ use { INCLUDE_SLOT_IN_HASH_IRRELEVANT_APPEND_VEC_OPERATION, }, accounts_file::AccountsFile, + accounts_hash::AccountHash, accounts_index::{AccountsIndexScanResult, ZeroLamport}, active_stats::ActiveStatItem, append_vec::aligned_stored_size, @@ -20,9 +21,7 @@ use { rand::{thread_rng, Rng}, rayon::prelude::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}, solana_measure::measure_us, - solana_sdk::{ - account::ReadableAccount, clock::Slot, hash::Hash, pubkey::Pubkey, saturating_add_assign, - }, + solana_sdk::{account::ReadableAccount, clock::Slot, pubkey::Pubkey, saturating_add_assign}, std::{ collections::HashMap, num::NonZeroU64, @@ -372,7 +371,7 @@ impl AccountsDb { measure_us!(self.get_store_for_shrink(target_slot, bytes)); let (store_accounts_timing, rewrite_elapsed_us) = measure_us!(self.store_accounts_frozen( accounts_to_write, - None::>, + None::>, shrink_in_progress.new_storage(), None, StoreReclaims::Ignore, diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index fce45672f2a9bd..2cd8612828c254 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -11,6 +11,7 @@ use { StoredAccountMeta, StoredMeta, StoredMetaWriteVersion, }, accounts_file::{AccountsFileError, Result, ALIGN_BOUNDARY_OFFSET}, + accounts_hash::AccountHash, storable_accounts::StorableAccounts, u64_align, }, @@ -575,7 +576,7 @@ impl AppendVec { 'b, T: ReadableAccount + Sync, U: StorableAccounts<'a, T>, - V: Borrow, + V: Borrow, >( &self, accounts: &StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V>, @@ -611,7 +612,7 @@ impl AppendVec { .map(|account| account.data()) .unwrap_or_default() .as_ptr(); - let hash_ptr = hash.as_ref().as_ptr(); + let hash_ptr = hash.0.as_ref().as_ptr(); let ptrs = [ (meta_ptr as *const u8, mem::size_of::()), (account_meta_ptr as *const u8, mem::size_of::()), @@ -669,7 +670,7 @@ pub mod tests { let accounts = [(&data.0.pubkey, &data.1)]; let slice = &accounts[..]; let account_data = (slot_ignored, slice); - let hash = Hash::default(); + let hash = AccountHash(Hash::default()); let storable_accounts = StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( &account_data, @@ -740,7 +741,7 @@ pub mod tests { // for (Slot, &'a [(&'a Pubkey, &'a T)], IncludeSlotInHash) let slot = 0 as Slot; let pubkey = Pubkey::default(); - StorableAccountsWithHashesAndWriteVersions::<'_, '_, _, _, &Hash>::new(&( + StorableAccountsWithHashesAndWriteVersions::<'_, '_, _, _, &AccountHash>::new(&( slot, &[(&pubkey, &account)][..], INCLUDE_SLOT_IN_HASH_TESTS, @@ -755,7 +756,7 @@ pub mod tests { // mismatch between lens of accounts, hashes, write_versions let mut hashes = Vec::default(); if correct_hashes { - hashes.push(Hash::default()); + hashes.push(AccountHash(Hash::default())); } let mut write_versions = Vec::default(); if correct_write_versions { @@ -798,7 +799,7 @@ pub mod tests { let account = AccountSharedData::default(); let slot = 0 as Slot; let pubkeys = [Pubkey::default()]; - let hashes = Vec::::default(); + let hashes = Vec::::default(); let write_versions = Vec::default(); let mut accounts = vec![(&pubkeys[0], &account)]; accounts.clear(); @@ -819,7 +820,10 @@ pub mod tests { let account = AccountSharedData::default(); let slot = 0 as Slot; let pubkeys = [Pubkey::from([5; 32]), Pubkey::from([6; 32])]; - let hashes = vec![Hash::new(&[3; 32]), Hash::new(&[4; 32])]; + let hashes = vec![ + AccountHash(Hash::new(&[3; 32])), + AccountHash(Hash::new(&[4; 32])), + ]; let write_versions = vec![42, 43]; let accounts = [(&pubkeys[0], &account), (&pubkeys[1], &account)]; let accounts2 = (slot, &accounts[..], INCLUDE_SLOT_IN_HASH_TESTS); @@ -850,7 +854,7 @@ pub mod tests { // for (Slot, &'a [(&'a Pubkey, &'a T)], IncludeSlotInHash) let slot = 0 as Slot; let pubkey = Pubkey::default(); - let hashes = vec![Hash::default()]; + let hashes = vec![AccountHash(Hash::default())]; let write_versions = vec![0]; let accounts = [(&pubkey, &account)]; let accounts2 = (slot, &accounts[..], INCLUDE_SLOT_IN_HASH_TESTS); diff --git a/accounts-db/src/storable_accounts.rs b/accounts-db/src/storable_accounts.rs index 8da6885dce6947..900b4b5ba2fd80 100644 --- a/accounts-db/src/storable_accounts.rs +++ b/accounts-db/src/storable_accounts.rs @@ -178,7 +178,7 @@ impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> true } fn hash(&self, index: usize) -> &AccountHash { - bytemuck::cast_ref(self.account(index).hash()) + self.account(index).hash() } fn write_version(&self, index: usize) -> u64 { self.account(index).write_version() @@ -282,7 +282,7 @@ impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> for StorableAccountsBySlot< true } fn hash(&self, index: usize) -> &AccountHash { - bytemuck::cast_ref(self.account(index).hash()) + self.account(index).hash() } fn write_version(&self, index: usize) -> u64 { self.account(index).write_version() @@ -322,7 +322,7 @@ impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> true } fn hash(&self, index: usize) -> &AccountHash { - bytemuck::cast_ref(self.account(index).hash()) + self.account(index).hash() } fn write_version(&self, index: usize) -> u64 { self.account(index).write_version() @@ -606,7 +606,7 @@ pub mod tests { let index = index as usize; assert_eq!(storable.account(index), &raw2[index]); assert_eq!(storable.pubkey(index), raw2[index].pubkey()); - assert_eq!(&storable.hash(index).0, raw2[index].hash()); + assert_eq!(storable.hash(index), raw2[index].hash()); assert_eq!(storable.slot(index), expected_slots[index]); assert_eq!(storable.write_version(index), raw2[index].write_version()); }) diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index 43d34f1561cca4..6a9f0193fd0fee 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -14,13 +14,14 @@ pub mod writer; use { crate::{ account_storage::meta::{StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo}, + accounts_hash::AccountHash, storable_accounts::StorableAccounts, }, error::TieredStorageError, footer::{AccountBlockFormat, AccountMetaFormat, OwnersBlockFormat}, index::AccountIndexFormat, readable::TieredStorageReader, - solana_sdk::{account::ReadableAccount, hash::Hash}, + solana_sdk::account::ReadableAccount, std::{ borrow::Borrow, fs::OpenOptions, @@ -96,7 +97,7 @@ impl TieredStorage { 'b, T: ReadableAccount + Sync, U: StorableAccounts<'a, T>, - V: Borrow, + V: Borrow, >( &self, accounts: &StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V>, @@ -157,6 +158,7 @@ mod tests { solana_sdk::{ account::{Account, AccountSharedData}, clock::Slot, + hash::Hash, pubkey::Pubkey, system_instruction::MAX_PERMITTED_DATA_LENGTH, }, @@ -182,7 +184,7 @@ mod tests { let storable_accounts = StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( &account_data, - Vec::<&Hash>::new(), + Vec::::new(), Vec::::new(), ); @@ -341,7 +343,7 @@ mod tests { // Slot information is not used here let account_data = (Slot::MAX, &account_refs[..]); - let hashes: Vec<_> = std::iter::repeat_with(Hash::new_unique) + let hashes: Vec<_> = std::iter::repeat_with(|| AccountHash(Hash::new_unique())) .take(account_data_sizes.len()) .collect(); let write_versions: Vec<_> = accounts diff --git a/accounts-db/src/tiered_storage/writer.rs b/accounts-db/src/tiered_storage/writer.rs index 839ddca9a94d98..dece0e42732f49 100644 --- a/accounts-db/src/tiered_storage/writer.rs +++ b/accounts-db/src/tiered_storage/writer.rs @@ -3,13 +3,14 @@ use { crate::{ account_storage::meta::{StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo}, + accounts_hash::AccountHash, storable_accounts::StorableAccounts, tiered_storage::{ error::TieredStorageError, file::TieredStorageFile, footer::TieredStorageFooter, TieredStorageFormat, TieredStorageResult, }, }, - solana_sdk::{account::ReadableAccount, hash::Hash}, + solana_sdk::account::ReadableAccount, std::{borrow::Borrow, path::Path}, }; @@ -35,7 +36,7 @@ impl<'format> TieredStorageWriter<'format> { 'b, T: ReadableAccount + Sync, U: StorableAccounts<'a, T>, - V: Borrow, + V: Borrow, >( &self, accounts: &StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V>, diff --git a/accounts-db/store-tool/src/main.rs b/accounts-db/store-tool/src/main.rs index 98140ed59ba0ea..cb5838af4f21ad 100644 --- a/accounts-db/store-tool/src/main.rs +++ b/accounts-db/store-tool/src/main.rs @@ -1,7 +1,9 @@ use { clap::{crate_description, crate_name, value_t, value_t_or_exit, App, Arg}, log::*, - solana_accounts_db::{account_storage::meta::StoredAccountMeta, append_vec::AppendVec}, + solana_accounts_db::{ + account_storage::meta::StoredAccountMeta, accounts_hash::AccountHash, append_vec::AppendVec, + }, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, hash::Hash, @@ -65,7 +67,7 @@ fn main() { } fn is_account_zeroed(account: &StoredAccountMeta) -> bool { - account.hash() == &Hash::default() + account.hash() == &AccountHash(Hash::default()) && account.data_len() == 0 && account.write_version() == 0 && account.pubkey() == &Pubkey::default() From d6aba9dc483a79ab569b47b7f3df19e6535f6722 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 18 Oct 2023 19:00:11 -0400 Subject: [PATCH 373/407] Upgrades dashmap dependency to v5.5.3 (#33659) --- Cargo.lock | 89 +++++++++-------------------------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 62 +++++----------------------- 3 files changed, 31 insertions(+), 122 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 00f0bd0165ccd0..9a425d81d3dd08 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1512,26 +1512,18 @@ dependencies = [ [[package]] name = "dashmap" -version = "4.0.2" +version = "5.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if 1.0.0", - "num_cpus", + "hashbrown 0.14.1", + "lock_api", + "once_cell", + "parking_lot_core 0.9.8", "rayon", ] -[[package]] -name = "dashmap" -version = "5.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8858831f7781322e539ea39e72449c46b059638250c14344fec8d0aa6e539c" -dependencies = [ - "cfg-if 1.0.0", - "num_cpus", - "parking_lot 0.12.1", -] - [[package]] name = "data-encoding" version = "2.3.2" @@ -3618,7 +3610,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.1", + "parking_lot_core 0.9.8", ] [[package]] @@ -3637,15 +3629,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.1" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28141e0cc4143da2443301914478dc976a61ffdb3f043058310c70df2fed8954" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.2.10", + "redox_syscall 0.3.5", "smallvec", - "windows-sys 0.32.0", + "windows-targets 0.48.0", ] [[package]] @@ -4868,7 +4860,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e56dd856803e253c8f298af3f4d7eb0ae5e23a737252cd90bb4f3b435033b2d" dependencies = [ - "dashmap 5.2.0", + "dashmap", "futures 0.3.28", "lazy_static", "log", @@ -5192,7 +5184,7 @@ dependencies = [ "byteorder", "bzip2", "crossbeam-channel", - "dashmap 4.0.2", + "dashmap", "ed25519-dalek", "flate2", "fnv", @@ -5670,7 +5662,7 @@ dependencies = [ "async-trait", "bincode", "crossbeam-channel", - "dashmap 4.0.2", + "dashmap", "futures 0.3.28", "futures-util", "indexmap 2.0.2", @@ -5781,7 +5773,7 @@ dependencies = [ "bytes", "chrono", "crossbeam-channel", - "dashmap 4.0.2", + "dashmap", "eager", "etcd-client", "fs_extra", @@ -6197,7 +6189,7 @@ dependencies = [ "chrono", "chrono-humanize", "crossbeam-channel", - "dashmap 4.0.2", + "dashmap", "fs_extra", "futures 0.3.28", "itertools", @@ -6264,7 +6256,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "csv", - "dashmap 4.0.2", + "dashmap", "futures 0.3.28", "histogram", "itertools", @@ -6740,7 +6732,7 @@ dependencies = [ "bincode", "bs58", "crossbeam-channel", - "dashmap 4.0.2", + "dashmap", "itertools", "jsonrpc-core", "jsonrpc-core-client", @@ -6898,7 +6890,7 @@ dependencies = [ "byteorder", "bzip2", "crossbeam-channel", - "dashmap 4.0.2", + "dashmap", "dir-diff", "ed25519-dalek", "flate2", @@ -8965,19 +8957,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows-sys" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3df6e476185f92a12c072be4a189a0210dcdcf512a1891d6dff9edb874deadc6" -dependencies = [ - "windows_aarch64_msvc 0.32.0", - "windows_i686_gnu 0.32.0", - "windows_i686_msvc 0.32.0", - "windows_x86_64_gnu 0.32.0", - "windows_x86_64_msvc 0.32.0", -] - [[package]] name = "windows-sys" version = "0.45.0" @@ -9038,12 +9017,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" -[[package]] -name = "windows_aarch64_msvc" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" - [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -9056,12 +9029,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" -[[package]] -name = "windows_i686_gnu" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" - [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -9074,12 +9041,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" -[[package]] -name = "windows_i686_msvc" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" - [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -9092,12 +9053,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" -[[package]] -name = "windows_x86_64_gnu" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" - [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -9122,12 +9077,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" -[[package]] -name = "windows_x86_64_msvc" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" - [[package]] name = "windows_x86_64_msvc" version = "0.42.2" diff --git a/Cargo.toml b/Cargo.toml index 095a9a005bc2e0..abb304ca6d14e1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -181,7 +181,7 @@ crossbeam-channel = "0.5.8" csv = "1.3.0" ctrlc = "3.4.1" curve25519-dalek = "3.2.1" -dashmap = "4.0.2" +dashmap = "5.5.3" derivation-path = { version = "0.2.0", default-features = false } dialoguer = "0.10.4" digest = "0.10.7" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 915b4fe0f47c8c..e4117582ab6dda 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1222,12 +1222,15 @@ dependencies = [ [[package]] name = "dashmap" -version = "4.0.2" +version = "5.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if 1.0.0", - "num_cpus", + "hashbrown 0.14.1", + "lock_api", + "once_cell", + "parking_lot_core 0.9.8", "rayon", ] @@ -3212,7 +3215,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.1", + "parking_lot_core 0.9.8", ] [[package]] @@ -3231,15 +3234,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.1" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28141e0cc4143da2443301914478dc976a61ffdb3f043058310c70df2fed8954" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.2.10", + "redox_syscall 0.3.5", "smallvec", - "windows-sys 0.32.0", + "windows-targets 0.48.0", ] [[package]] @@ -7748,19 +7751,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows-sys" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3df6e476185f92a12c072be4a189a0210dcdcf512a1891d6dff9edb874deadc6" -dependencies = [ - "windows_aarch64_msvc 0.32.0", - "windows_i686_gnu 0.32.0", - "windows_i686_msvc 0.32.0", - "windows_x86_64_gnu 0.32.0", - "windows_x86_64_msvc 0.32.0", -] - [[package]] name = "windows-sys" version = "0.45.0" @@ -7821,12 +7811,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" -[[package]] -name = "windows_aarch64_msvc" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" - [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -7839,12 +7823,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" -[[package]] -name = "windows_i686_gnu" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" - [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -7857,12 +7835,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" -[[package]] -name = "windows_i686_msvc" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" - [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -7875,12 +7847,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" -[[package]] -name = "windows_x86_64_gnu" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" - [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -7905,12 +7871,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" -[[package]] -name = "windows_x86_64_msvc" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" - [[package]] name = "windows_x86_64_msvc" version = "0.42.2" From 3bfa0d291a04867e6ce34ba66d1757598d105fd1 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 19 Oct 2023 08:15:35 +0800 Subject: [PATCH 374/407] feature flag cleanup: cap_bpf_program_instruction_accounts (#33746) --- ledger-tool/src/program.rs | 1 - program-test/src/lib.rs | 1 - programs/bpf_loader/benches/serialization.rs | 18 ++++------ programs/bpf_loader/src/lib.rs | 11 +++--- programs/bpf_loader/src/serialization.rs | 35 +------------------- programs/sbf/benches/bpf_loader.rs | 2 -- 6 files changed, 11 insertions(+), 57 deletions(-) diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index 4acad738160be0..7420a1f7a10b4b 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -573,7 +573,6 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { .transaction_context .get_current_instruction_context() .unwrap(), - true, // should_cap_ix_accounts true, // copy_account_data ) .unwrap(); diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 4cc8fc9ba21bf5..3c26ac25dad59e 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -122,7 +122,6 @@ pub fn builtin_process_instruction( invoke_context .transaction_context .get_current_instruction_context()?, - true, // should_cap_ix_accounts true, // copy_account_data // There is no VM so direct mapping can not be implemented here )?; diff --git a/programs/bpf_loader/benches/serialization.rs b/programs/bpf_loader/benches/serialization.rs index 2acd8d374c1f8c..5d3c55a165e399 100644 --- a/programs/bpf_loader/benches/serialization.rs +++ b/programs/bpf_loader/benches/serialization.rs @@ -126,8 +126,7 @@ fn bench_serialize_unaligned(bencher: &mut Bencher) { .get_current_instruction_context() .unwrap(); bencher.iter(|| { - let _ = - serialize_parameters(&transaction_context, instruction_context, true, false).unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, false).unwrap(); }); } @@ -138,8 +137,7 @@ fn bench_serialize_unaligned_copy_account_data(bencher: &mut Bencher) { .get_current_instruction_context() .unwrap(); bencher.iter(|| { - let _ = - serialize_parameters(&transaction_context, instruction_context, true, true).unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, true).unwrap(); }); } @@ -151,8 +149,7 @@ fn bench_serialize_aligned(bencher: &mut Bencher) { .unwrap(); bencher.iter(|| { - let _ = - serialize_parameters(&transaction_context, instruction_context, true, false).unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, false).unwrap(); }); } @@ -164,8 +161,7 @@ fn bench_serialize_aligned_copy_account_data(bencher: &mut Bencher) { .unwrap(); bencher.iter(|| { - let _ = - serialize_parameters(&transaction_context, instruction_context, true, true).unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, true).unwrap(); }); } @@ -176,8 +172,7 @@ fn bench_serialize_unaligned_max_accounts(bencher: &mut Bencher) { .get_current_instruction_context() .unwrap(); bencher.iter(|| { - let _ = - serialize_parameters(&transaction_context, instruction_context, true, false).unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, false).unwrap(); }); } @@ -189,7 +184,6 @@ fn bench_serialize_aligned_max_accounts(bencher: &mut Bencher) { .unwrap(); bencher.iter(|| { - let _ = - serialize_parameters(&transaction_context, instruction_context, true, false).unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, false).unwrap(); }); } diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 82c623746406f2..3849f45faa1bb5 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -33,10 +33,10 @@ use { entrypoint::{MAX_PERMITTED_DATA_INCREASE, SUCCESS}, feature_set::{ bpf_account_data_direct_mapping, cap_accounts_data_allocations_per_transaction, - cap_bpf_program_instruction_accounts, delay_visibility_of_program_deployment, - enable_bpf_loader_extend_program_ix, enable_bpf_loader_set_authority_checked_ix, - enable_program_redeployment_cooldown, limit_max_instruction_trace_length, - native_programs_consume_cu, remove_bpf_loader_incorrect_program_id, + delay_visibility_of_program_deployment, enable_bpf_loader_extend_program_ix, + enable_bpf_loader_set_authority_checked_ix, enable_program_redeployment_cooldown, + limit_max_instruction_trace_length, native_programs_consume_cu, + remove_bpf_loader_incorrect_program_id, }, instruction::{AccountMeta, InstructionError}, loader_instruction::LoaderInstruction, @@ -1543,9 +1543,6 @@ fn execute<'a, 'b: 'a>( let (parameter_bytes, regions, accounts_metadata) = serialization::serialize_parameters( invoke_context.transaction_context, instruction_context, - invoke_context - .feature_set - .is_active(&cap_bpf_program_instruction_accounts::ID), !direct_mapping, )?; serialize_time.stop(); diff --git a/programs/bpf_loader/src/serialization.rs b/programs/bpf_loader/src/serialization.rs index 628b52d5dd4aa1..f9cbc2e752c54d 100644 --- a/programs/bpf_loader/src/serialization.rs +++ b/programs/bpf_loader/src/serialization.rs @@ -190,7 +190,6 @@ impl Serializer { pub fn serialize_parameters( transaction_context: &TransactionContext, instruction_context: &InstructionContext, - should_cap_ix_accounts: bool, copy_account_data: bool, ) -> Result< ( @@ -201,7 +200,7 @@ pub fn serialize_parameters( InstructionError, > { let num_ix_accounts = instruction_context.get_number_of_instruction_accounts(); - if should_cap_ix_accounts && num_ix_accounts > MAX_INSTRUCTION_ACCOUNTS as IndexOfAccount { + if num_ix_accounts > MAX_INSTRUCTION_ACCOUNTS as IndexOfAccount { return Err(InstructionError::MaxAccountsExceeded); } @@ -641,7 +640,6 @@ mod tests { struct TestCase { num_ix_accounts: usize, append_dup_account: bool, - should_cap_ix_accounts: bool, expected_err: Option, name: &'static str, } @@ -650,55 +648,27 @@ mod tests { for TestCase { num_ix_accounts, append_dup_account, - should_cap_ix_accounts, expected_err, name, } in [ - TestCase { - name: "serialize max accounts without cap", - num_ix_accounts: usize::from(MAX_INSTRUCTION_ACCOUNTS), - should_cap_ix_accounts: false, - append_dup_account: false, - expected_err: None, - }, - TestCase { - name: "serialize max accounts and append dup without cap", - num_ix_accounts: usize::from(MAX_INSTRUCTION_ACCOUNTS), - should_cap_ix_accounts: false, - append_dup_account: true, - expected_err: None, - }, TestCase { name: "serialize max accounts with cap", num_ix_accounts: usize::from(MAX_INSTRUCTION_ACCOUNTS), - should_cap_ix_accounts: true, append_dup_account: false, expected_err: None, }, TestCase { name: "serialize too many accounts with cap", num_ix_accounts: usize::from(MAX_INSTRUCTION_ACCOUNTS) + 1, - should_cap_ix_accounts: true, append_dup_account: false, expected_err: Some(InstructionError::MaxAccountsExceeded), }, TestCase { name: "serialize too many accounts and append dup with cap", num_ix_accounts: usize::from(MAX_INSTRUCTION_ACCOUNTS), - should_cap_ix_accounts: true, append_dup_account: true, expected_err: Some(InstructionError::MaxAccountsExceeded), }, - // This test case breaks parameter deserialization and can be cleaned up - // when should_cap_ix_accounts is enabled. - // - // TestCase { - // name: "serialize too many accounts and append dup without cap", - // num_ix_accounts: usize::from(MAX_INSTRUCTION_ACCOUNTS) + 1, - // should_cap_ix_accounts: false, - // append_dup_account: true, - // expected_err: None, - // }, ] { let program_id = solana_sdk::pubkey::new_rand(); let mut transaction_accounts = vec![( @@ -757,7 +727,6 @@ mod tests { let serialization_result = serialize_parameters( invoke_context.transaction_context, instruction_context, - should_cap_ix_accounts, copy_account_data, ); assert_eq!( @@ -912,7 +881,6 @@ mod tests { let (mut serialized, regions, accounts_metadata) = serialize_parameters( invoke_context.transaction_context, instruction_context, - true, copy_account_data, ) .unwrap(); @@ -1004,7 +972,6 @@ mod tests { let (mut serialized, regions, account_lengths) = serialize_parameters( invoke_context.transaction_context, instruction_context, - true, copy_account_data, ) .unwrap(); diff --git a/programs/sbf/benches/bpf_loader.rs b/programs/sbf/benches/bpf_loader.rs index cfb20868b81094..7ef6966a80dbe0 100644 --- a/programs/sbf/benches/bpf_loader.rs +++ b/programs/sbf/benches/bpf_loader.rs @@ -244,7 +244,6 @@ fn bench_create_vm(bencher: &mut Bencher) { .transaction_context .get_current_instruction_context() .unwrap(), - true, // should_cap_ix_accounts !direct_mapping, // copy_account_data ) .unwrap(); @@ -279,7 +278,6 @@ fn bench_instruction_count_tuner(_bencher: &mut Bencher) { .transaction_context .get_current_instruction_context() .unwrap(), - true, // should_cap_ix_accounts !direct_mapping, // copy_account_data ) .unwrap(); From 0fcc0a09415019d25b1ec7b74206cee2b7a01d7c Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 18 Oct 2023 20:29:40 -0400 Subject: [PATCH 375/407] Returns AccountHash from get_filler_account() (#33758) --- accounts-db/src/accounts_db.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 9806b186fa34cc..fc4fe5d58eee84 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -6623,7 +6623,7 @@ impl AccountsDb { let pubkeys = self.get_filler_account_pubkeys(filler_accounts as usize); pubkeys.iter().for_each(|key| { accounts.push((key, &account)); - hashes.push(AccountHash(hash)); + hashes.push(hash); }); self.store_accounts_frozen( (slot, &accounts[..], include_slot_in_hash), @@ -9098,9 +9098,9 @@ impl AccountsDb { } /// return 'AccountSharedData' and a hash for a filler account - fn get_filler_account(&self, rent: &Rent) -> (AccountSharedData, Hash) { + fn get_filler_account(&self, rent: &Rent) -> (AccountSharedData, AccountHash) { let string = "FiLLERACCoUNTooooooooooooooooooooooooooooooo"; - let hash = Hash::from_str(string).unwrap(); + let hash = AccountHash(Hash::from_str(string).unwrap()); let owner = Pubkey::from_str(string).unwrap(); let space = self.filler_accounts_config.size; let rent_exempt_reserve = rent.minimum_balance(space); From c1353e172cd29105beb2d02766bf198f3af5adff Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 19 Oct 2023 07:01:15 -0400 Subject: [PATCH 376/407] Removes cap_accounts_data_allocations_per_transaction featurization (#33754) --- program-runtime/src/invoke_context.rs | 1 - programs/bpf_loader/src/lib.rs | 23 +++++++---------------- runtime/src/bank.rs | 6 ------ runtime/src/bank/tests.rs | 3 +-- sdk/src/transaction_context.rs | 26 ++++++-------------------- 5 files changed, 14 insertions(+), 45 deletions(-) diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index abe49ccd84b270..9fbe42d8d40c07 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -655,7 +655,6 @@ macro_rules! with_mock_invoke_context { compute_budget.max_invoke_stack_height, compute_budget.max_instruction_trace_length, ); - $transaction_context.enable_cap_accounts_data_allocations_per_transaction(); let mut sysvar_cache = SysvarCache::default(); sysvar_cache.fill_missing_entries(|pubkey, callback| { for index in 0..$transaction_context.get_number_of_accounts() { diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 3849f45faa1bb5..8e4ead1bf3fce9 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -32,19 +32,16 @@ use { clock::Slot, entrypoint::{MAX_PERMITTED_DATA_INCREASE, SUCCESS}, feature_set::{ - bpf_account_data_direct_mapping, cap_accounts_data_allocations_per_transaction, - delay_visibility_of_program_deployment, enable_bpf_loader_extend_program_ix, - enable_bpf_loader_set_authority_checked_ix, enable_program_redeployment_cooldown, - limit_max_instruction_trace_length, native_programs_consume_cu, - remove_bpf_loader_incorrect_program_id, + bpf_account_data_direct_mapping, delay_visibility_of_program_deployment, + enable_bpf_loader_extend_program_ix, enable_bpf_loader_set_authority_checked_ix, + enable_program_redeployment_cooldown, limit_max_instruction_trace_length, + native_programs_consume_cu, remove_bpf_loader_incorrect_program_id, }, instruction::{AccountMeta, InstructionError}, loader_instruction::LoaderInstruction, loader_upgradeable_instruction::UpgradeableLoaderInstruction, native_loader, - program_error::{ - MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED, MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED, - }, + program_error::MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED, program_utils::limited_deserialize, pubkey::Pubkey, saturating_add_assign, @@ -1595,17 +1592,11 @@ fn execute<'a, 'b: 'a>( } match result { ProgramResult::Ok(status) if status != SUCCESS => { - let error: InstructionError = if (status == MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED + let error: InstructionError = if status == MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED && !invoke_context .feature_set - .is_active(&cap_accounts_data_allocations_per_transaction::id())) - || (status == MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED - && !invoke_context - .feature_set - .is_active(&limit_max_instruction_trace_length::id())) + .is_active(&limit_max_instruction_trace_length::id()) { - // Until the cap_accounts_data_allocations_per_transaction feature is - // enabled, map the `MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED` error to `InvalidError`. // Until the limit_max_instruction_trace_length feature is // enabled, map the `MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED` error to `InvalidError`. InstructionError::InvalidError diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 852ba85e353a2d..992d1d0dfcae75 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4864,12 +4864,6 @@ impl Bank { std::usize::MAX }, ); - if self - .feature_set - .is_active(&feature_set::cap_accounts_data_allocations_per_transaction::id()) - { - transaction_context.enable_cap_accounts_data_allocations_per_transaction(); - } #[cfg(debug_assertions)] transaction_context.set_signature(tx.signature()); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 58e44366f1d876..82393ef7161a2b 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -12171,8 +12171,7 @@ fn test_cap_accounts_data_allocations_per_transaction() { / MAX_PERMITTED_DATA_LENGTH as usize; let (genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let mut bank = Bank::new_for_tests(&genesis_config); - bank.activate_feature(&feature_set::cap_accounts_data_allocations_per_transaction::id()); + let bank = Bank::new_for_tests(&genesis_config); let mut instructions = Vec::new(); let mut keypairs = vec![mint_keypair.insecure_clone()]; diff --git a/sdk/src/transaction_context.rs b/sdk/src/transaction_context.rs index 266456f219361d..77cbb831fb0561 100644 --- a/sdk/src/transaction_context.rs +++ b/sdk/src/transaction_context.rs @@ -144,8 +144,6 @@ pub struct TransactionContext { accounts_resize_delta: RefCell, #[cfg(not(target_os = "solana"))] rent: Rent, - #[cfg(not(target_os = "solana"))] - is_cap_accounts_data_allocations_per_transaction_enabled: bool, /// Useful for debugging to filter by or to look it up on the explorer #[cfg(all(not(target_os = "solana"), debug_assertions))] signature: Signature, @@ -174,7 +172,6 @@ impl TransactionContext { return_data: TransactionReturnData::default(), accounts_resize_delta: RefCell::new(0), rent, - is_cap_accounts_data_allocations_per_transaction_enabled: false, #[cfg(all(not(target_os = "solana"), debug_assertions))] signature: Signature::default(), } @@ -440,12 +437,6 @@ impl TransactionContext { .map_err(|_| InstructionError::GenericError) .map(|value_ref| *value_ref) } - - /// Enables enforcing a maximum accounts data allocation size per transaction - #[cfg(not(target_os = "solana"))] - pub fn enable_cap_accounts_data_allocations_per_transaction(&mut self) { - self.is_cap_accounts_data_allocations_per_transaction_enabled = true; - } } /// Return data at the end of a transaction @@ -1114,20 +1105,15 @@ impl<'a> BorrowedAccount<'a> { if new_length > MAX_PERMITTED_DATA_LENGTH as usize { return Err(InstructionError::InvalidRealloc); } + // The resize can not exceed the per-transaction maximum + let length_delta = (new_length as i64).saturating_sub(old_length as i64); if self .transaction_context - .is_cap_accounts_data_allocations_per_transaction_enabled + .accounts_resize_delta()? + .saturating_add(length_delta) + > MAX_PERMITTED_ACCOUNTS_DATA_ALLOCATIONS_PER_TRANSACTION { - // The resize can not exceed the per-transaction maximum - let length_delta = (new_length as i64).saturating_sub(old_length as i64); - if self - .transaction_context - .accounts_resize_delta()? - .saturating_add(length_delta) - > MAX_PERMITTED_ACCOUNTS_DATA_ALLOCATIONS_PER_TRANSACTION - { - return Err(InstructionError::MaxAccountsDataAllocationsExceeded); - } + return Err(InstructionError::MaxAccountsDataAllocationsExceeded); } Ok(()) } From 7aa0faea96426f541b4e20543a0c418340592c79 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Thu, 19 Oct 2023 13:25:53 +0000 Subject: [PATCH 377/407] separates out routing repair requests from establishing connections (#33742) Currently each outgoing repair request will attempt to establish a connection if one does not already exist. This is very wasteful and consumes many tokio tasks if the remote node is down or unresponsive. The commit decouples routing packets from establishing connections by adding a buffering channel for each remote address. Outgoing packets are always sent down this channel to be processed once the connection is established. If connecting attempt fails, all packets already pushed to the channel are dropped at once, reducing the number of attempts to make a connection if the remote node is down or unresponsive. --- core/src/repair/quic_endpoint.rs | 280 ++++++++++++++++++------------- 1 file changed, 168 insertions(+), 112 deletions(-) diff --git a/core/src/repair/quic_endpoint.rs b/core/src/repair/quic_endpoint.rs index f7b445011c937a..bf3a1802144a42 100644 --- a/core/src/repair/quic_endpoint.rs +++ b/core/src/repair/quic_endpoint.rs @@ -21,16 +21,15 @@ use { collections::{hash_map::Entry, HashMap}, io::{Cursor, Error as IoError}, net::{IpAddr, SocketAddr, UdpSocket}, - ops::Deref, sync::Arc, time::Duration, }, thiserror::Error, tokio::{ sync::{ - mpsc::{Receiver as AsyncReceiver, Sender as AsyncSender}, + mpsc::{error::TrySendError, Receiver as AsyncReceiver, Sender as AsyncSender}, oneshot::Sender as OneShotSender, - RwLock, + Mutex, RwLock as AsyncRwLock, }, task::JoinHandle, }, @@ -39,7 +38,8 @@ use { const ALPN_REPAIR_PROTOCOL_ID: &[u8] = b"solana-repair"; const CONNECT_SERVER_NAME: &str = "solana-repair"; -const CLIENT_CHANNEL_CAPACITY: usize = 1 << 14; +const CLIENT_CHANNEL_BUFFER: usize = 1 << 14; +const ROUTER_CHANNEL_BUFFER: usize = 64; const CONNECTION_CACHE_CAPACITY: usize = 4096; const MAX_CONCURRENT_BIDI_STREAMS: VarInt = VarInt::from_u32(512); @@ -54,7 +54,6 @@ const CONNECTION_CLOSE_REASON_INVALID_IDENTITY: &[u8] = b"INVALID_IDENTITY"; const CONNECTION_CLOSE_REASON_REPLACED: &[u8] = b"REPLACED"; pub(crate) type AsyncTryJoinHandle = TryJoin, JoinHandle<()>>; -type ConnectionCache = HashMap<(SocketAddr, Option), Arc>>>; // Outgoing local requests. pub struct LocalRequest { @@ -125,17 +124,20 @@ pub(crate) fn new_quic_endpoint( )? }; endpoint.set_default_client_config(client_config); - let cache = Arc::>::default(); - let (client_sender, client_receiver) = tokio::sync::mpsc::channel(CLIENT_CHANNEL_CAPACITY); + let cache = Arc::>>::default(); + let (client_sender, client_receiver) = tokio::sync::mpsc::channel(CLIENT_CHANNEL_BUFFER); + let router = Arc::>>>::default(); let server_task = runtime.spawn(run_server( endpoint.clone(), remote_request_sender.clone(), + router.clone(), cache.clone(), )); let client_task = runtime.spawn(run_client( endpoint.clone(), client_receiver, remote_request_sender, + router, cache, )); let task = futures::future::try_join(server_task, client_task); @@ -187,13 +189,15 @@ fn new_transport_config() -> TransportConfig { async fn run_server( endpoint: Endpoint, remote_request_sender: Sender, - cache: Arc>, + router: Arc>>>, + cache: Arc>>, ) { while let Some(connecting) = endpoint.accept().await { tokio::task::spawn(handle_connecting_error( endpoint.clone(), connecting, remote_request_sender.clone(), + router.clone(), cache.clone(), )); } @@ -203,26 +207,68 @@ async fn run_client( endpoint: Endpoint, mut receiver: AsyncReceiver, remote_request_sender: Sender, - cache: Arc>, + router: Arc>>>, + cache: Arc>>, ) { while let Some(request) = receiver.recv().await { - tokio::task::spawn(send_request_task( + let Some(request) = try_route_request(request, &*router.read().await) else { + continue; + }; + let remote_address = request.remote_address; + let receiver = { + let mut router = router.write().await; + let Some(request) = try_route_request(request, &router) else { + continue; + }; + let (sender, receiver) = tokio::sync::mpsc::channel(ROUTER_CHANNEL_BUFFER); + sender.try_send(request).unwrap(); + router.insert(remote_address, sender); + receiver + }; + tokio::task::spawn(make_connection_task( endpoint.clone(), - request, + remote_address, remote_request_sender.clone(), + receiver, + router.clone(), cache.clone(), )); } close_quic_endpoint(&endpoint); + // Drop sender channels to unblock threads waiting on the receiving end. + router.write().await.clear(); +} + +// Routes the local request to respective channel. Drops the request if the +// channel is full. Bounces the request back if the channel is closed or does +// not exist. +fn try_route_request( + request: LocalRequest, + router: &HashMap>, +) -> Option { + match router.get(&request.remote_address) { + None => Some(request), + Some(sender) => match sender.try_send(request) { + Ok(()) => None, + Err(TrySendError::Full(request)) => { + error!("TrySendError::Full {}", request.remote_address); + None + } + Err(TrySendError::Closed(request)) => Some(request), + }, + } } async fn handle_connecting_error( endpoint: Endpoint, connecting: Connecting, remote_request_sender: Sender, - cache: Arc>, + router: Arc>>>, + cache: Arc>>, ) { - if let Err(err) = handle_connecting(endpoint, connecting, remote_request_sender, cache).await { + if let Err(err) = + handle_connecting(endpoint, connecting, remote_request_sender, router, cache).await + { error!("handle_connecting: {err:?}"); } } @@ -231,52 +277,75 @@ async fn handle_connecting( endpoint: Endpoint, connecting: Connecting, remote_request_sender: Sender, - cache: Arc>, + router: Arc>>>, + cache: Arc>>, ) -> Result<(), Error> { let connection = connecting.await?; let remote_address = connection.remote_address(); let remote_pubkey = get_remote_pubkey(&connection)?; - handle_connection_error( + let receiver = { + let (sender, receiver) = tokio::sync::mpsc::channel(ROUTER_CHANNEL_BUFFER); + router.write().await.insert(remote_address, sender); + receiver + }; + handle_connection( endpoint, remote_address, remote_pubkey, connection, remote_request_sender, + receiver, + router, cache, ) .await; Ok(()) } -async fn handle_connection_error( +async fn handle_connection( endpoint: Endpoint, remote_address: SocketAddr, remote_pubkey: Pubkey, connection: Connection, remote_request_sender: Sender, - cache: Arc>, + receiver: AsyncReceiver, + router: Arc>>>, + cache: Arc>>, ) { - cache_connection(remote_address, remote_pubkey, connection.clone(), &cache).await; - if let Err(err) = handle_connection( - &endpoint, + cache_connection(remote_pubkey, connection.clone(), &cache).await; + let send_requests_task = tokio::task::spawn(send_requests_task( + endpoint.clone(), + connection.clone(), + receiver, + )); + let recv_requests_task = tokio::task::spawn(recv_requests_task( + endpoint, remote_address, remote_pubkey, - &connection, - &remote_request_sender, - ) - .await - { - drop_connection(remote_address, remote_pubkey, &connection, &cache).await; - error!("handle_connection: {remote_pubkey}, {remote_address}, {err:?}"); + connection.clone(), + remote_request_sender, + )); + match futures::future::try_join(send_requests_task, recv_requests_task).await { + Err(err) => error!("handle_connection: {remote_pubkey}, {remote_address}, {err:?}"), + Ok(((), Err(ref err))) => { + error!("recv_requests_task: {remote_pubkey}, {remote_address}, {err:?}"); + } + Ok(((), Ok(()))) => (), + } + drop_connection(remote_pubkey, &connection, &cache).await; + if let Entry::Occupied(entry) = router.write().await.entry(remote_address) { + if entry.get().is_closed() { + entry.remove(); + } } } -async fn handle_connection( - endpoint: &Endpoint, +async fn recv_requests_task( + endpoint: Endpoint, remote_address: SocketAddr, remote_pubkey: Pubkey, - connection: &Connection, - remote_request_sender: &Sender, + connection: Connection, + remote_request_sender: Sender, ) -> Result<(), Error> { loop { let (send_stream, recv_stream) = connection.accept_bi().await?; @@ -352,32 +421,39 @@ async fn handle_streams( send_stream.finish().await.map_err(Error::from) } -async fn send_request_task( +async fn send_requests_task( endpoint: Endpoint, - request: LocalRequest, - remote_request_sender: Sender, - cache: Arc>, + connection: Connection, + mut receiver: AsyncReceiver, ) { - if let Err(err) = send_request(&endpoint, request, remote_request_sender, cache).await { - error!("send_request_task: {err:?}"); + while let Some(request) = receiver.recv().await { + tokio::task::spawn(send_request_task( + endpoint.clone(), + connection.clone(), + request, + )); + } +} + +async fn send_request_task(endpoint: Endpoint, connection: Connection, request: LocalRequest) { + if let Err(err) = send_request(endpoint, connection, request).await { + error!("send_request: {err:?}") } } async fn send_request( - endpoint: &Endpoint, + endpoint: Endpoint, + connection: Connection, LocalRequest { - remote_address, + remote_address: _, bytes, num_expected_responses, response_sender, }: LocalRequest, - remote_request_sender: Sender, - cache: Arc>, ) -> Result<(), Error> { // Assert that send won't block. debug_assert_eq!(response_sender.capacity(), None); const READ_TIMEOUT_DURATION: Duration = Duration::from_secs(10); - let connection = get_connection(endpoint, remote_address, remote_request_sender, cache).await?; let (mut send_stream, mut recv_stream) = connection.open_bi().await?; send_stream.write_all(&bytes).await?; send_stream.finish().await?; @@ -405,50 +481,57 @@ async fn send_request( response_sender .send((remote_address, chunk)) .map_err(|err| { - close_quic_endpoint(endpoint); + close_quic_endpoint(&endpoint); Error::from(err) }) }) } -async fn get_connection( - endpoint: &Endpoint, +async fn make_connection_task( + endpoint: Endpoint, remote_address: SocketAddr, remote_request_sender: Sender, - cache: Arc>, -) -> Result { - let entry = get_cache_entry(remote_address, &cache).await; + receiver: AsyncReceiver, + router: Arc>>>, + cache: Arc>>, +) { + if let Err(err) = make_connection( + endpoint, + remote_address, + remote_request_sender, + receiver, + router, + cache, + ) + .await { - let connection: Option = entry.read().await.clone(); - if let Some(connection) = connection { - if connection.close_reason().is_none() { - return Ok(connection); - } - } + error!("make_connection: {remote_address}, {err:?}"); } - let connection = { - // Need to write lock here so that only one task initiates - // a new connection to the same remote_address. - let mut entry = entry.write().await; - if let Some(connection) = entry.deref() { - if connection.close_reason().is_none() { - return Ok(connection.clone()); - } - } - let connection = endpoint - .connect(remote_address, CONNECT_SERVER_NAME)? - .await?; - entry.insert(connection).clone() - }; - tokio::task::spawn(handle_connection_error( - endpoint.clone(), +} + +async fn make_connection( + endpoint: Endpoint, + remote_address: SocketAddr, + remote_request_sender: Sender, + receiver: AsyncReceiver, + router: Arc>>>, + cache: Arc>>, +) -> Result<(), Error> { + let connection = endpoint + .connect(remote_address, CONNECT_SERVER_NAME)? + .await?; + handle_connection( + endpoint, connection.remote_address(), get_remote_pubkey(&connection)?, - connection.clone(), + connection, remote_request_sender, + receiver, + router, cache, - )); - Ok(connection) + ) + .await; + Ok(()) } fn get_remote_pubkey(connection: &Connection) -> Result { @@ -464,27 +547,13 @@ fn get_remote_pubkey(connection: &Connection) -> Result { } } -async fn get_cache_entry( - remote_address: SocketAddr, - cache: &RwLock, -) -> Arc>> { - let key = (remote_address, /*remote_pubkey:*/ None); - if let Some(entry) = cache.read().await.get(&key) { - return entry.clone(); - } - cache.write().await.entry(key).or_default().clone() -} - async fn cache_connection( - remote_address: SocketAddr, remote_pubkey: Pubkey, connection: Connection, - cache: &RwLock, + cache: &Mutex>, ) { - // The 2nd cache entry with remote_pubkey == None allows to lookup an entry - // only by SocketAddr when establishing outgoing connections. - let entries: [Arc>>; 2] = { - let mut cache = cache.write().await; + let old = { + let mut cache = cache.lock().await; if cache.len() >= CONNECTION_CACHE_CAPACITY { connection.close( CONNECTION_CLOSE_ERROR_CODE_DROPPED, @@ -492,15 +561,9 @@ async fn cache_connection( ); return; } - [Some(remote_pubkey), None].map(|remote_pubkey| { - let key = (remote_address, remote_pubkey); - cache.entry(key).or_default().clone() - }) + cache.insert(remote_pubkey, connection) }; - let mut entry = entries[0].write().await; - *entries[1].write().await = Some(connection.clone()); - if let Some(old) = entry.replace(connection) { - drop(entry); + if let Some(old) = old { old.close( CONNECTION_CLOSE_ERROR_CODE_REPLACED, CONNECTION_CLOSE_REASON_REPLACED, @@ -509,26 +572,19 @@ async fn cache_connection( } async fn drop_connection( - remote_address: SocketAddr, remote_pubkey: Pubkey, connection: &Connection, - cache: &RwLock, + cache: &Mutex>, ) { - if connection.close_reason().is_none() { - connection.close( - CONNECTION_CLOSE_ERROR_CODE_DROPPED, - CONNECTION_CLOSE_REASON_DROPPED, - ); - } - let key = (remote_address, Some(remote_pubkey)); - if let Entry::Occupied(entry) = cache.write().await.entry(key) { - if matches!(entry.get().read().await.deref(), - Some(entry) if entry.stable_id() == connection.stable_id()) - { + connection.close( + CONNECTION_CLOSE_ERROR_CODE_DROPPED, + CONNECTION_CLOSE_REASON_DROPPED, + ); + if let Entry::Occupied(entry) = cache.lock().await.entry(remote_pubkey) { + if entry.get().stable_id() == connection.stable_id() { entry.remove(); } } - // Cache entry for (remote_address, None) will be lazily evicted. } impl From> for Error { From 1fb4d3bd61332855aa076e40d93a97c319ead553 Mon Sep 17 00:00:00 2001 From: Surav Shrestha <148448735+suravkshrestha@users.noreply.github.com> Date: Thu, 19 Oct 2023 21:13:04 +0545 Subject: [PATCH 378/407] docs: fix typo in watchtower/README.md (#33771) --- watchtower/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/watchtower/README.md b/watchtower/README.md index 33bad3e458c449..33a13939cd260c 100644 --- a/watchtower/README.md +++ b/watchtower/README.md @@ -9,7 +9,7 @@ If you only care about the health of one specific validator, the notifications to issues only affecting that validator. If you do not want duplicate notifications, for example if you have elected to -recieve notifications by SMS the +receive notifications by SMS the `--no-duplicate-notifications` command-line argument will suppress identical failure notifications. From 8becb72b3eebd932e88cf975642f89b50526559e Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Thu, 19 Oct 2023 15:44:15 +0000 Subject: [PATCH 379/407] separates out routing shreds from establishing connections (#33599) Currently each outgoing shred will attempt to establish a connection if one does not already exist. This is very wasteful and consumes many tokio tasks if the remote node is down or unresponsive. The commit decouples routing packets from establishing connections by adding a buffering channel for each remote address. Outgoing packets are always sent down this channel to be processed once the connection is established. If connecting attempt fails, all packets already pushed to the channel are dropped at once, reducing the number of attempts to make a connection if the remote node is down or unresponsive. --- turbine/src/quic_endpoint.rs | 276 +++++++++++++++++++---------------- 1 file changed, 153 insertions(+), 123 deletions(-) diff --git a/turbine/src/quic_endpoint.rs b/turbine/src/quic_endpoint.rs index 0f93391e042b47..9be1dd11294c62 100644 --- a/turbine/src/quic_endpoint.rs +++ b/turbine/src/quic_endpoint.rs @@ -18,20 +18,20 @@ use { collections::{hash_map::Entry, HashMap}, io::Error as IoError, net::{IpAddr, SocketAddr, UdpSocket}, - ops::Deref, sync::Arc, }, thiserror::Error, tokio::{ sync::{ - mpsc::{Receiver as AsyncReceiver, Sender as AsyncSender}, - RwLock, + mpsc::{error::TrySendError, Receiver as AsyncReceiver, Sender as AsyncSender}, + Mutex, RwLock as AsyncRwLock, }, task::JoinHandle, }, }; -const CLIENT_CHANNEL_CAPACITY: usize = 1 << 20; +const CLIENT_CHANNEL_BUFFER: usize = 1 << 14; +const ROUTER_CHANNEL_BUFFER: usize = 64; const INITIAL_MAXIMUM_TRANSMISSION_UNIT: u16 = 1280; const ALPN_TURBINE_PROTOCOL_ID: &[u8] = b"solana-turbine"; const CONNECT_SERVER_NAME: &str = "solana-turbine"; @@ -47,7 +47,6 @@ const CONNECTION_CLOSE_REASON_INVALID_IDENTITY: &[u8] = b"INVALID_IDENTITY"; const CONNECTION_CLOSE_REASON_REPLACED: &[u8] = b"REPLACED"; pub type AsyncTryJoinHandle = TryJoin, JoinHandle<()>>; -type ConnectionCache = HashMap<(SocketAddr, Option), Arc>>>; #[derive(Error, Debug)] pub enum Error { @@ -99,10 +98,22 @@ pub fn new_quic_endpoint( )? }; endpoint.set_default_client_config(client_config); - let cache = Arc::>::default(); - let (client_sender, client_receiver) = tokio::sync::mpsc::channel(CLIENT_CHANNEL_CAPACITY); - let server_task = runtime.spawn(run_server(endpoint.clone(), sender.clone(), cache.clone())); - let client_task = runtime.spawn(run_client(endpoint.clone(), client_receiver, sender, cache)); + let cache = Arc::>>::default(); + let router = Arc::>>>::default(); + let (client_sender, client_receiver) = tokio::sync::mpsc::channel(CLIENT_CHANNEL_BUFFER); + let server_task = runtime.spawn(run_server( + endpoint.clone(), + sender.clone(), + router.clone(), + cache.clone(), + )); + let client_task = runtime.spawn(run_client( + endpoint.clone(), + client_receiver, + sender, + router, + cache, + )); let task = futures::future::try_join(server_task, client_task); Ok((endpoint, client_sender, task)) } @@ -152,13 +163,15 @@ fn new_transport_config() -> TransportConfig { async fn run_server( endpoint: Endpoint, sender: Sender<(Pubkey, SocketAddr, Bytes)>, - cache: Arc>, + router: Arc>>>, + cache: Arc>>, ) { while let Some(connecting) = endpoint.accept().await { tokio::task::spawn(handle_connecting_error( endpoint.clone(), connecting, sender.clone(), + router.clone(), cache.clone(), )); } @@ -168,27 +181,63 @@ async fn run_client( endpoint: Endpoint, mut receiver: AsyncReceiver<(SocketAddr, Bytes)>, sender: Sender<(Pubkey, SocketAddr, Bytes)>, - cache: Arc>, + router: Arc>>>, + cache: Arc>>, ) { while let Some((remote_address, bytes)) = receiver.recv().await { - tokio::task::spawn(send_datagram_task( + let Some(bytes) = try_route_bytes(&remote_address, bytes, &*router.read().await) else { + continue; + }; + let receiver = { + let mut router = router.write().await; + let Some(bytes) = try_route_bytes(&remote_address, bytes, &router) else { + continue; + }; + let (sender, receiver) = tokio::sync::mpsc::channel(ROUTER_CHANNEL_BUFFER); + sender.try_send(bytes).unwrap(); + router.insert(remote_address, sender); + receiver + }; + tokio::task::spawn(make_connection_task( endpoint.clone(), remote_address, - bytes, sender.clone(), + receiver, + router.clone(), cache.clone(), )); } close_quic_endpoint(&endpoint); + // Drop sender channels to unblock threads waiting on the receiving end. + router.write().await.clear(); +} + +fn try_route_bytes( + remote_address: &SocketAddr, + bytes: Bytes, + router: &HashMap>, +) -> Option { + match router.get(remote_address) { + None => Some(bytes), + Some(sender) => match sender.try_send(bytes) { + Ok(()) => None, + Err(TrySendError::Full(_)) => { + error!("TrySendError::Full {remote_address}"); + None + } + Err(TrySendError::Closed(bytes)) => Some(bytes), + }, + } } async fn handle_connecting_error( endpoint: Endpoint, connecting: Connecting, sender: Sender<(Pubkey, SocketAddr, Bytes)>, - cache: Arc>, + router: Arc>>>, + cache: Arc>>, ) { - if let Err(err) = handle_connecting(endpoint, connecting, sender, cache).await { + if let Err(err) = handle_connecting(endpoint, connecting, sender, router, cache).await { error!("handle_connecting: {err:?}"); } } @@ -197,52 +246,75 @@ async fn handle_connecting( endpoint: Endpoint, connecting: Connecting, sender: Sender<(Pubkey, SocketAddr, Bytes)>, - cache: Arc>, + router: Arc>>>, + cache: Arc>>, ) -> Result<(), Error> { let connection = connecting.await?; let remote_address = connection.remote_address(); let remote_pubkey = get_remote_pubkey(&connection)?; - handle_connection_error( + let receiver = { + let (sender, receiver) = tokio::sync::mpsc::channel(ROUTER_CHANNEL_BUFFER); + router.write().await.insert(remote_address, sender); + receiver + }; + handle_connection( endpoint, remote_address, remote_pubkey, connection, sender, + receiver, + router, cache, ) .await; Ok(()) } -async fn handle_connection_error( +async fn handle_connection( endpoint: Endpoint, remote_address: SocketAddr, remote_pubkey: Pubkey, connection: Connection, sender: Sender<(Pubkey, SocketAddr, Bytes)>, - cache: Arc>, + receiver: AsyncReceiver, + router: Arc>>>, + cache: Arc>>, ) { - cache_connection(remote_address, remote_pubkey, connection.clone(), &cache).await; - if let Err(err) = handle_connection( - &endpoint, + cache_connection(remote_pubkey, connection.clone(), &cache).await; + let send_datagram_task = tokio::task::spawn(send_datagram_task(connection.clone(), receiver)); + let read_datagram_task = tokio::task::spawn(read_datagram_task( + endpoint, remote_address, remote_pubkey, - &connection, - &sender, - ) - .await - { - drop_connection(remote_address, remote_pubkey, &connection, &cache).await; - error!("handle_connection: {remote_pubkey}, {remote_address}, {err:?}"); + connection.clone(), + sender, + )); + match futures::future::try_join(send_datagram_task, read_datagram_task).await { + Err(err) => error!("handle_connection: {remote_pubkey}, {remote_address}, {err:?}"), + Ok(out) => { + if let (Err(ref err), _) = out { + error!("send_datagram_task: {remote_pubkey}, {remote_address}, {err:?}"); + } + if let (_, Err(ref err)) = out { + error!("read_datagram_task: {remote_pubkey}, {remote_address}, {err:?}"); + } + } + } + drop_connection(remote_pubkey, &connection, &cache).await; + if let Entry::Occupied(entry) = router.write().await.entry(remote_address) { + if entry.get().is_closed() { + entry.remove(); + } } } -async fn handle_connection( - endpoint: &Endpoint, +async fn read_datagram_task( + endpoint: Endpoint, remote_address: SocketAddr, remote_pubkey: Pubkey, - connection: &Connection, - sender: &Sender<(Pubkey, SocketAddr, Bytes)>, + connection: Connection, + sender: Sender<(Pubkey, SocketAddr, Bytes)>, ) -> Result<(), Error> { // Assert that send won't block. debug_assert_eq!(sender.capacity(), None); @@ -250,7 +322,7 @@ async fn handle_connection( match connection.read_datagram().await { Ok(bytes) => { if let Err(err) = sender.send((remote_pubkey, remote_address, bytes)) { - close_quic_endpoint(endpoint); + close_quic_endpoint(&endpoint); return Err(Error::from(err)); } } @@ -265,67 +337,53 @@ async fn handle_connection( } async fn send_datagram_task( + connection: Connection, + mut receiver: AsyncReceiver, +) -> Result<(), Error> { + while let Some(bytes) = receiver.recv().await { + connection.send_datagram(bytes)?; + } + Ok(()) +} + +async fn make_connection_task( endpoint: Endpoint, remote_address: SocketAddr, - bytes: Bytes, sender: Sender<(Pubkey, SocketAddr, Bytes)>, - cache: Arc>, + receiver: AsyncReceiver, + router: Arc>>>, + cache: Arc>>, ) { - if let Err(err) = send_datagram(&endpoint, remote_address, bytes, sender, cache).await { - error!("send_datagram: {remote_address}, {err:?}"); + if let Err(err) = + make_connection(endpoint, remote_address, sender, receiver, router, cache).await + { + error!("make_connection: {remote_address}, {err:?}"); } } -async fn send_datagram( - endpoint: &Endpoint, +async fn make_connection( + endpoint: Endpoint, remote_address: SocketAddr, - bytes: Bytes, sender: Sender<(Pubkey, SocketAddr, Bytes)>, - cache: Arc>, + receiver: AsyncReceiver, + router: Arc>>>, + cache: Arc>>, ) -> Result<(), Error> { - let connection = get_connection(endpoint, remote_address, sender, cache).await?; - connection.send_datagram(bytes)?; - Ok(()) -} - -async fn get_connection( - endpoint: &Endpoint, - remote_address: SocketAddr, - sender: Sender<(Pubkey, SocketAddr, Bytes)>, - cache: Arc>, -) -> Result { - let entry = get_cache_entry(remote_address, &cache).await; - { - let connection: Option = entry.read().await.clone(); - if let Some(connection) = connection { - if connection.close_reason().is_none() { - return Ok(connection); - } - } - } - let connection = { - // Need to write lock here so that only one task initiates - // a new connection to the same remote_address. - let mut entry = entry.write().await; - if let Some(connection) = entry.deref() { - if connection.close_reason().is_none() { - return Ok(connection.clone()); - } - } - let connection = endpoint - .connect(remote_address, CONNECT_SERVER_NAME)? - .await?; - entry.insert(connection).clone() - }; - tokio::task::spawn(handle_connection_error( - endpoint.clone(), + let connection = endpoint + .connect(remote_address, CONNECT_SERVER_NAME)? + .await?; + handle_connection( + endpoint, connection.remote_address(), get_remote_pubkey(&connection)?, - connection.clone(), + connection, sender, + receiver, + router, cache, - )); - Ok(connection) + ) + .await; + Ok(()) } fn get_remote_pubkey(connection: &Connection) -> Result { @@ -341,62 +399,34 @@ fn get_remote_pubkey(connection: &Connection) -> Result { } } -async fn get_cache_entry( - remote_address: SocketAddr, - cache: &RwLock, -) -> Arc>> { - let key = (remote_address, /*remote_pubkey:*/ None); - if let Some(entry) = cache.read().await.get(&key) { - return entry.clone(); - } - cache.write().await.entry(key).or_default().clone() -} - async fn cache_connection( - remote_address: SocketAddr, remote_pubkey: Pubkey, connection: Connection, - cache: &RwLock, + cache: &Mutex>, ) { - let entries: [Arc>>; 2] = { - let mut cache = cache.write().await; - [Some(remote_pubkey), None].map(|remote_pubkey| { - let key = (remote_address, remote_pubkey); - cache.entry(key).or_default().clone() - }) + let Some(old) = cache.lock().await.insert(remote_pubkey, connection) else { + return; }; - let mut entry = entries[0].write().await; - *entries[1].write().await = Some(connection.clone()); - if let Some(old) = entry.replace(connection) { - drop(entry); - old.close( - CONNECTION_CLOSE_ERROR_CODE_REPLACED, - CONNECTION_CLOSE_REASON_REPLACED, - ); - } + old.close( + CONNECTION_CLOSE_ERROR_CODE_REPLACED, + CONNECTION_CLOSE_REASON_REPLACED, + ); } async fn drop_connection( - remote_address: SocketAddr, remote_pubkey: Pubkey, connection: &Connection, - cache: &RwLock, + cache: &Mutex>, ) { - if connection.close_reason().is_none() { - connection.close( - CONNECTION_CLOSE_ERROR_CODE_DROPPED, - CONNECTION_CLOSE_REASON_DROPPED, - ); - } - let key = (remote_address, Some(remote_pubkey)); - if let Entry::Occupied(entry) = cache.write().await.entry(key) { - if matches!(entry.get().read().await.deref(), - Some(entry) if entry.stable_id() == connection.stable_id()) - { + connection.close( + CONNECTION_CLOSE_ERROR_CODE_DROPPED, + CONNECTION_CLOSE_REASON_DROPPED, + ); + if let Entry::Occupied(entry) = cache.lock().await.entry(remote_pubkey) { + if entry.get().stable_id() == connection.stable_id() { entry.remove(); } } - // Cache entry for (remote_address, None) will be lazily evicted. } impl From> for Error { From 4e5c545e23fb9a74c2fed5b5ea49edb02f64744e Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 19 Oct 2023 11:52:36 -0400 Subject: [PATCH 380/407] Uses AccountHash in tiered storage (#33763) --- accounts-db/src/account_storage/meta.rs | 9 +++--- accounts-db/src/tiered_storage/byte_block.rs | 7 +++-- accounts-db/src/tiered_storage/hot.rs | 31 ++++++++++++-------- accounts-db/src/tiered_storage/meta.rs | 24 ++++++++------- accounts-db/src/tiered_storage/readable.rs | 17 ++++++----- 5 files changed, 49 insertions(+), 39 deletions(-) diff --git a/accounts-db/src/account_storage/meta.rs b/accounts-db/src/account_storage/meta.rs index 4f6a40a92d6d86..0cdf200f70669e 100644 --- a/accounts-db/src/account_storage/meta.rs +++ b/accounts-db/src/account_storage/meta.rs @@ -126,11 +126,10 @@ impl<'storage> StoredAccountMeta<'storage> { } pub fn hash(&self) -> &'storage AccountHash { - let hash = match self { - Self::AppendVec(av) => av.hash(), - Self::Hot(hot) => hot.hash().unwrap_or(&DEFAULT_ACCOUNT_HASH.0), - }; - bytemuck::cast_ref(hash) + match self { + Self::AppendVec(av) => bytemuck::cast_ref(av.hash()), + Self::Hot(hot) => hot.hash().unwrap_or(&DEFAULT_ACCOUNT_HASH), + } } pub fn stored_size(&self) -> usize { diff --git a/accounts-db/src/tiered_storage/byte_block.rs b/accounts-db/src/tiered_storage/byte_block.rs index 53af0a71374c85..e0fa8b4b136b3b 100644 --- a/accounts-db/src/tiered_storage/byte_block.rs +++ b/accounts-db/src/tiered_storage/byte_block.rs @@ -151,6 +151,7 @@ impl ByteBlockReader { mod tests { use { super::*, + crate::accounts_hash::AccountHash, solana_sdk::{hash::Hash, stake_history::Epoch}, }; @@ -311,7 +312,7 @@ mod tests { // prepare a vector of optional fields that contains all combinations // of Some and None. for rent_epoch in [None, Some(test_epoch)] { - for account_hash in [None, Some(Hash::new_unique())] { + for account_hash in [None, Some(AccountHash(Hash::new_unique()))] { some_count += rent_epoch.iter().count() + account_hash.iter().count(); opt_fields_vec.push(AccountMetaOptionalFields { @@ -351,10 +352,10 @@ mod tests { offset += std::mem::size_of::(); } if let Some(expected_hash) = opt_fields.account_hash { - let hash = read_type::(&decoded_buffer, offset).unwrap(); + let hash = read_type::(&decoded_buffer, offset).unwrap(); assert_eq!(hash, &expected_hash); verified_count += 1; - offset += std::mem::size_of::(); + offset += std::mem::size_of::(); } } diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 0ae2a597ab0b43..78271700686dd2 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -2,17 +2,22 @@ //! The account meta and related structs for hot accounts. use { - crate::tiered_storage::{ - byte_block, - footer::{AccountBlockFormat, AccountMetaFormat, OwnersBlockFormat, TieredStorageFooter}, - index::AccountIndexFormat, - meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, - mmap_utils::get_type, - TieredStorageFormat, TieredStorageResult, + crate::{ + accounts_hash::AccountHash, + tiered_storage::{ + byte_block, + footer::{ + AccountBlockFormat, AccountMetaFormat, OwnersBlockFormat, TieredStorageFooter, + }, + index::AccountIndexFormat, + meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, + mmap_utils::get_type, + TieredStorageFormat, TieredStorageResult, + }, }, memmap2::{Mmap, MmapOptions}, modular_bitfield::prelude::*, - solana_sdk::{hash::Hash, stake_history::Epoch}, + solana_sdk::stake_history::Epoch, std::{fs::OpenOptions, option::Option, path::Path}, }; @@ -152,13 +157,13 @@ impl TieredAccountMeta for HotAccountMeta { /// Returns the account hash by parsing the specified account block. None /// will be returned if this account does not persist this optional field. - fn account_hash<'a>(&self, account_block: &'a [u8]) -> Option<&'a Hash> { + fn account_hash<'a>(&self, account_block: &'a [u8]) -> Option<&'a AccountHash> { self.flags() .has_account_hash() .then(|| { let offset = self.optional_fields_offset(account_block) + AccountMetaOptionalFields::account_hash_offset(self.flags()); - byte_block::read_type::(account_block, offset) + byte_block::read_type::(account_block, offset) }) .flatten() } @@ -239,9 +244,9 @@ pub mod tests { index::AccountIndexFormat, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, }, - ::solana_sdk::{hash::Hash, pubkey::Pubkey, stake_history::Epoch}, memoffset::offset_of, rand::Rng, + solana_sdk::{hash::Hash, pubkey::Pubkey, stake_history::Epoch}, tempfile::TempDir, }; @@ -304,7 +309,7 @@ pub mod tests { let optional_fields = AccountMetaOptionalFields { rent_epoch: Some(TEST_RENT_EPOCH), - account_hash: Some(Hash::new_unique()), + account_hash: Some(AccountHash(Hash::new_unique())), }; let flags = AccountMetaFlags::new_from(&optional_fields); @@ -331,7 +336,7 @@ pub mod tests { let optional_fields = AccountMetaOptionalFields { rent_epoch: Some(TEST_RENT_EPOCH), - account_hash: Some(Hash::new_unique()), + account_hash: Some(AccountHash(Hash::new_unique())), }; let flags = AccountMetaFlags::new_from(&optional_fields); diff --git a/accounts-db/src/tiered_storage/meta.rs b/accounts-db/src/tiered_storage/meta.rs index 20147bdaf141ce..668c6ab93d8310 100644 --- a/accounts-db/src/tiered_storage/meta.rs +++ b/accounts-db/src/tiered_storage/meta.rs @@ -1,8 +1,8 @@ #![allow(dead_code)] //! The account meta and related structs for the tiered storage. use { - ::solana_sdk::{hash::Hash, stake_history::Epoch}, - modular_bitfield::prelude::*, + crate::accounts_hash::AccountHash, modular_bitfield::prelude::*, + solana_sdk::stake_history::Epoch, }; /// The struct that handles the account meta flags. @@ -65,7 +65,7 @@ pub trait TieredAccountMeta: Sized { /// Returns the account hash by parsing the specified account block. None /// will be returned if this account does not persist this optional field. - fn account_hash<'a>(&self, _account_block: &'a [u8]) -> Option<&'a Hash>; + fn account_hash<'a>(&self, _account_block: &'a [u8]) -> Option<&'a AccountHash>; /// Returns the offset of the optional fields based on the specified account /// block. @@ -98,14 +98,16 @@ pub struct AccountMetaOptionalFields { /// the epoch at which its associated account will next owe rent pub rent_epoch: Option, /// the hash of its associated account - pub account_hash: Option, + pub account_hash: Option, } impl AccountMetaOptionalFields { /// The size of the optional fields in bytes (excluding the boolean flags). pub fn size(&self) -> usize { self.rent_epoch.map_or(0, |_| std::mem::size_of::()) - + self.account_hash.map_or(0, |_| std::mem::size_of::()) + + self + .account_hash + .map_or(0, |_| std::mem::size_of::()) } /// Given the specified AccountMetaFlags, returns the size of its @@ -116,7 +118,7 @@ impl AccountMetaOptionalFields { fields_size += std::mem::size_of::(); } if flags.has_account_hash() { - fields_size += std::mem::size_of::(); + fields_size += std::mem::size_of::(); } fields_size @@ -142,7 +144,7 @@ impl AccountMetaOptionalFields { #[cfg(test)] pub mod tests { - use super::*; + use {super::*, solana_sdk::hash::Hash}; #[test] fn test_account_meta_flags_new() { @@ -194,7 +196,7 @@ pub mod tests { let test_epoch = 5432312; for rent_epoch in [None, Some(test_epoch)] { - for account_hash in [None, Some(Hash::new_unique())] { + for account_hash in [None, Some(AccountHash(Hash::new_unique()))] { update_and_verify_flags(&AccountMetaOptionalFields { rent_epoch, account_hash, @@ -208,7 +210,7 @@ pub mod tests { let test_epoch = 5432312; for rent_epoch in [None, Some(test_epoch)] { - for account_hash in [None, Some(Hash::new_unique())] { + for account_hash in [None, Some(AccountHash(Hash::new_unique()))] { let opt_fields = AccountMetaOptionalFields { rent_epoch, account_hash, @@ -216,7 +218,7 @@ pub mod tests { assert_eq!( opt_fields.size(), rent_epoch.map_or(0, |_| std::mem::size_of::()) - + account_hash.map_or(0, |_| std::mem::size_of::()) + + account_hash.map_or(0, |_| std::mem::size_of::()) ); assert_eq!( opt_fields.size(), @@ -233,7 +235,7 @@ pub mod tests { let test_epoch = 5432312; for rent_epoch in [None, Some(test_epoch)] { - for account_hash in [None, Some(Hash::new_unique())] { + for account_hash in [None, Some(AccountHash(Hash::new_unique()))] { let rent_epoch_offset = 0; let account_hash_offset = rent_epoch_offset + rent_epoch.as_ref().map(std::mem::size_of_val).unwrap_or(0); diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index 426da02ccbd260..629f08fa1d3fe6 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -1,11 +1,14 @@ use { - crate::tiered_storage::{ - footer::{AccountMetaFormat, TieredStorageFooter}, - hot::HotStorageReader, - meta::TieredAccountMeta, - TieredStorageResult, + crate::{ + accounts_hash::AccountHash, + tiered_storage::{ + footer::{AccountMetaFormat, TieredStorageFooter}, + hot::HotStorageReader, + meta::TieredAccountMeta, + TieredStorageResult, + }, }, - solana_sdk::{account::ReadableAccount, hash::Hash, pubkey::Pubkey, stake_history::Epoch}, + solana_sdk::{account::ReadableAccount, pubkey::Pubkey, stake_history::Epoch}, std::path::Path, }; @@ -32,7 +35,7 @@ impl<'accounts_file, M: TieredAccountMeta> TieredReadableAccount<'accounts_file, } /// Returns the hash of this account. - pub fn hash(&self) -> Option<&'accounts_file Hash> { + pub fn hash(&self) -> Option<&'accounts_file AccountHash> { self.meta.account_hash(self.account_block) } From c73bebe9847ecd5a1cbffa96bf03e03a7683232f Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Thu, 19 Oct 2023 11:10:42 -0500 Subject: [PATCH 381/407] Split compute budget instructions process from struct itself (#33513) * Split compute budget instruction processing from ComputeBudget struct itself, allow compute_budget_instructions be processed elsewhere without having to instantiate ComputeBudget * updated tests --- accounts-db/src/accounts.rs | 52 +- cost-model/src/cost_model.rs | 69 +- program-runtime/src/compute_budget.rs | 648 +----------------- .../src/compute_budget_processor.rs | 619 +++++++++++++++++ program-runtime/src/invoke_context.rs | 11 +- program-runtime/src/lib.rs | 1 + programs/sbf/tests/programs.rs | 17 +- runtime/src/bank.rs | 67 +- runtime/src/bank/tests.rs | 34 +- runtime/src/transaction_priority_details.rs | 40 +- 10 files changed, 796 insertions(+), 762 deletions(-) create mode 100644 program-runtime/src/compute_budget_processor.rs diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 47b372d981843a..4ff891fc8bd0a8 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -25,7 +25,7 @@ use { itertools::Itertools, log::*, solana_program_runtime::{ - compute_budget::{self, ComputeBudget}, + compute_budget_processor::process_compute_budget_instructions, loaded_programs::LoadedProgramsForTxBatch, }, solana_sdk::{ @@ -35,9 +35,8 @@ use { bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::{BankId, Slot}, feature_set::{ - self, add_set_tx_loaded_accounts_data_size_instruction, - include_loaded_accounts_data_size_in_fee_calculation, - remove_congestion_multiplier_from_fee_calculation, remove_deprecated_request_unit_ix, + self, include_loaded_accounts_data_size_in_fee_calculation, + remove_congestion_multiplier_from_fee_calculation, simplify_writable_program_account_check, FeatureSet, }, fee::FeeStructure, @@ -247,15 +246,16 @@ impl Accounts { feature_set: &FeatureSet, ) -> Result> { if feature_set.is_active(&feature_set::cap_transaction_accounts_data_size::id()) { - let mut compute_budget = - ComputeBudget::new(compute_budget::MAX_COMPUTE_UNIT_LIMIT as u64); - let _process_transaction_result = compute_budget.process_instructions( + let compute_budget_limits = process_compute_budget_instructions( tx.message().program_instructions_iter(), - !feature_set.is_active(&remove_deprecated_request_unit_ix::id()), - feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), - ); + feature_set, + ) + .unwrap_or_default(); // sanitize against setting size limit to zero - NonZeroUsize::new(compute_budget.loaded_accounts_data_size_limit).map_or( + NonZeroUsize::new( + usize::try_from(compute_budget_limits.loaded_accounts_bytes).unwrap_or_default(), + ) + .map_or( Err(TransactionError::InvalidLoadedAccountsDataSizeLimit), |v| Ok(Some(v)), ) @@ -722,7 +722,7 @@ impl Accounts { fee_structure.calculate_fee( tx.message(), lamports_per_signature, - &ComputeBudget::fee_budget_limits(tx.message().program_instructions_iter(), feature_set), + &process_compute_budget_instructions(tx.message().program_instructions_iter(), feature_set).unwrap_or_default().into(), feature_set.is_active(&remove_congestion_multiplier_from_fee_calculation::id()), feature_set.is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), ) @@ -1474,8 +1474,9 @@ mod tests { transaction_results::{DurableNonceFee, TransactionExecutionDetails}, }, assert_matches::assert_matches, - solana_program_runtime::prioritization_fee::{ - PrioritizationFeeDetails, PrioritizationFeeType, + solana_program_runtime::{ + compute_budget_processor, + prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, }, solana_sdk::{ account::{AccountSharedData, WritableAccount}, @@ -1751,13 +1752,15 @@ mod tests { ); let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&remove_deprecated_request_unit_ix::id()); + feature_set.deactivate(&solana_sdk::feature_set::remove_deprecated_request_unit_ix::id()); let message = SanitizedMessage::try_from(tx.message().clone()).unwrap(); let fee = FeeStructure::default().calculate_fee( &message, lamports_per_signature, - &ComputeBudget::fee_budget_limits(message.program_instructions_iter(), &feature_set), + &process_compute_budget_instructions(message.program_instructions_iter(), &feature_set) + .unwrap_or_default() + .into(), true, false, ); @@ -4253,7 +4256,11 @@ mod tests { let result_no_limit = Ok(None); let result_default_limit = Ok(Some( - NonZeroUsize::new(compute_budget::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES).unwrap(), + NonZeroUsize::new( + usize::try_from(compute_budget_processor::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES) + .unwrap(), + ) + .unwrap(), )); let result_requested_limit: Result> = Ok(Some(NonZeroUsize::new(99).unwrap())); @@ -4281,7 +4288,10 @@ mod tests { // if tx doesn't set limit, then default limit (64MiB) // if tx sets limit, then requested limit // if tx sets limit to zero, then TransactionError::InvalidLoadedAccountsDataSizeLimit - feature_set.activate(&add_set_tx_loaded_accounts_data_size_instruction::id(), 0); + feature_set.activate( + &solana_sdk::feature_set::add_set_tx_loaded_accounts_data_size_instruction::id(), + 0, + ); test(tx_not_set_limit, &feature_set, &result_default_limit); test(tx_set_limit_99, &feature_set, &result_requested_limit); test(tx_set_limit_0, &feature_set, &result_invalid_limit); @@ -4316,13 +4326,15 @@ mod tests { ); let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&remove_deprecated_request_unit_ix::id()); + feature_set.deactivate(&solana_sdk::feature_set::remove_deprecated_request_unit_ix::id()); let message = SanitizedMessage::try_from(tx.message().clone()).unwrap(); let fee = FeeStructure::default().calculate_fee( &message, lamports_per_signature, - &ComputeBudget::fee_budget_limits(message.program_instructions_iter(), &feature_set), + &process_compute_budget_instructions(message.program_instructions_iter(), &feature_set) + .unwrap_or_default() + .into(), true, false, ); diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index 0e8d6954202351..bb3e296d6dcbe0 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -8,17 +8,17 @@ use { crate::{block_cost_limits::*, transaction_cost::*}, log::*, - solana_program_runtime::compute_budget::{ - ComputeBudget, DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, MAX_COMPUTE_UNIT_LIMIT, + solana_program_runtime::{ + compute_budget::DEFAULT_HEAP_COST, + compute_budget_processor::{ + process_compute_budget_instructions, ComputeBudgetLimits, + DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, MAX_COMPUTE_UNIT_LIMIT, + }, }, solana_sdk::{ borsh0_10::try_from_slice_unchecked, compute_budget::{self, ComputeBudgetInstruction}, - feature_set::{ - add_set_tx_loaded_accounts_data_size_instruction, - include_loaded_accounts_data_size_in_fee_calculation, - remove_deprecated_request_unit_ix, FeatureSet, - }, + feature_set::{include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, fee::FeeStructure, instruction::CompiledInstruction, program_utils::limited_deserialize, @@ -62,10 +62,12 @@ impl CostModel { // to set limit, `compute_budget.loaded_accounts_data_size_limit` is set to default // limit of 64MB; which will convert to (64M/32K)*8CU = 16_000 CUs // - pub fn calculate_loaded_accounts_data_size_cost(compute_budget: &ComputeBudget) -> u64 { + pub fn calculate_loaded_accounts_data_size_cost( + compute_budget_limits: &ComputeBudgetLimits, + ) -> u64 { FeeStructure::calculate_memory_usage_cost( - compute_budget.loaded_accounts_data_size_limit, - compute_budget.heap_cost, + usize::try_from(compute_budget_limits.loaded_accounts_bytes).unwrap(), + DEFAULT_HEAP_COST, ) } @@ -128,32 +130,28 @@ impl CostModel { } // calculate bpf cost based on compute budget instructions - let mut compute_budget = ComputeBudget::default(); - - let result = compute_budget.process_instructions( - transaction.message().program_instructions_iter(), - !feature_set.is_active(&remove_deprecated_request_unit_ix::id()), - feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), - ); // if failed to process compute_budget instructions, the transaction will not be executed // by `bank`, therefore it should be considered as no execution cost by cost model. - match result { - Ok(_) => { + match process_compute_budget_instructions( + transaction.message().program_instructions_iter(), + feature_set, + ) { + Ok(compute_budget_limits) => { // if tx contained user-space instructions and a more accurate estimate available correct it, // where "user-space instructions" must be specifically checked by // 'compute_unit_limit_is_set' flag, because compute_budget does not distinguish // builtin and bpf instructions when calculating default compute-unit-limit. (see // compute_budget.rs test `test_process_mixed_instructions_without_compute_budget`) if bpf_costs > 0 && compute_unit_limit_is_set { - bpf_costs = compute_budget.compute_unit_limit + bpf_costs = u64::from(compute_budget_limits.compute_unit_limit); } if feature_set .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()) { loaded_accounts_data_size_cost = - Self::calculate_loaded_accounts_data_size_cost(&compute_budget); + Self::calculate_loaded_accounts_data_size_cost(&compute_budget_limits); } } Err(_) => { @@ -545,7 +543,8 @@ mod tests { // default loaded_accounts_data_size_limit const DEFAULT_PAGE_COST: u64 = 8; let expected_loaded_accounts_data_size_cost = - solana_program_runtime::compute_budget::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES as u64 + solana_program_runtime::compute_budget_processor::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES + as u64 / ACCOUNT_DATA_COST_PAGE_SIZE * DEFAULT_PAGE_COST; @@ -663,36 +662,36 @@ mod tests { #[allow(clippy::field_reassign_with_default)] #[test] fn test_calculate_loaded_accounts_data_size_cost() { - let mut compute_budget = ComputeBudget::default(); + let mut compute_budget_limits = ComputeBudgetLimits::default(); // accounts data size are priced in block of 32K, ... // ... requesting less than 32K should still be charged as one block - compute_budget.loaded_accounts_data_size_limit = 31_usize * 1024; + compute_budget_limits.loaded_accounts_bytes = 31 * 1024; assert_eq!( - compute_budget.heap_cost, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget) + DEFAULT_HEAP_COST, + CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) ); // ... requesting exact 32K should be charged as one block - compute_budget.loaded_accounts_data_size_limit = 32_usize * 1024; + compute_budget_limits.loaded_accounts_bytes = 32 * 1024; assert_eq!( - compute_budget.heap_cost, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget) + DEFAULT_HEAP_COST, + CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) ); // ... requesting slightly above 32K should be charged as 2 block - compute_budget.loaded_accounts_data_size_limit = 33_usize * 1024; + compute_budget_limits.loaded_accounts_bytes = 33 * 1024; assert_eq!( - compute_budget.heap_cost * 2, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget) + DEFAULT_HEAP_COST * 2, + CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) ); // ... requesting exact 64K should be charged as 2 block - compute_budget.loaded_accounts_data_size_limit = 64_usize * 1024; + compute_budget_limits.loaded_accounts_bytes = 64 * 1024; assert_eq!( - compute_budget.heap_cost * 2, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget) + DEFAULT_HEAP_COST * 2, + CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) ); } diff --git a/program-runtime/src/compute_budget.rs b/program-runtime/src/compute_budget.rs index f9239224b488a0..a568162c139c37 100644 --- a/program-runtime/src/compute_budget.rs +++ b/program-runtime/src/compute_budget.rs @@ -1,28 +1,11 @@ use { - crate::prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, + crate::compute_budget_processor::{self, process_compute_budget_instructions}, solana_sdk::{ - borsh0_10::try_from_slice_unchecked, - compute_budget::{self, ComputeBudgetInstruction}, - entrypoint::HEAP_LENGTH as MIN_HEAP_FRAME_BYTES, - feature_set::{ - add_set_tx_loaded_accounts_data_size_instruction, remove_deprecated_request_unit_ix, - FeatureSet, - }, - fee::FeeBudgetLimits, - instruction::{CompiledInstruction, InstructionError}, - pubkey::Pubkey, - transaction::TransactionError, + feature_set::FeatureSet, instruction::CompiledInstruction, pubkey::Pubkey, + transaction::Result, }, }; -/// The total accounts data a transaction can load is limited to 64MiB to not break -/// anyone in Mainnet-beta today. It can be set by set_loaded_accounts_data_size_limit instruction -pub const MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES: usize = 64 * 1024 * 1024; - -pub const DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT: u32 = 200_000; -pub const MAX_COMPUTE_UNIT_LIMIT: u32 = 1_400_000; -const MAX_HEAP_FRAME_BYTES: u32 = 256 * 1024; - #[cfg(RUSTC_WITH_SPECIALIZATION)] impl ::solana_frozen_abi::abi_example::AbiExample for ComputeBudget { fn example() -> Self { @@ -31,6 +14,10 @@ impl ::solana_frozen_abi::abi_example::AbiExample for ComputeBudget { } } +/// Roughly 0.5us/page, where page is 32K; given roughly 15CU/us, the +/// default heap page cost = 0.5 * 15 ~= 8CU/page +pub const DEFAULT_HEAP_COST: u64 = 8; + #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct ComputeBudget { /// Number of compute units that a transaction or individual instruction is @@ -118,9 +105,6 @@ pub struct ComputeBudget { pub alt_bn128_pairing_one_pair_cost_other: u64, /// Big integer modular exponentiation cost pub big_modular_exponentiation_cost: u64, - /// Maximum accounts data size, in bytes, that a transaction is allowed to load; The - /// value is capped by MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES to prevent overuse of memory. - pub loaded_accounts_data_size_limit: usize, /// Coefficient `a` of the quadratic function which determines the number /// of compute units consumed to call poseidon syscall for a given number /// of inputs. @@ -143,7 +127,7 @@ pub struct ComputeBudget { impl Default for ComputeBudget { fn default() -> Self { - Self::new(MAX_COMPUTE_UNIT_LIMIT as u64) + Self::new(compute_budget_processor::MAX_COMPUTE_UNIT_LIMIT as u64) } } @@ -180,14 +164,13 @@ impl ComputeBudget { curve25519_ristretto_msm_base_cost: 2303, curve25519_ristretto_msm_incremental_cost: 788, heap_size: u32::try_from(solana_sdk::entrypoint::HEAP_LENGTH).unwrap(), - heap_cost: 8, + heap_cost: DEFAULT_HEAP_COST, mem_op_base_cost: 10, alt_bn128_addition_cost: 334, alt_bn128_multiplication_cost: 3_840, alt_bn128_pairing_one_pair_cost_first: 36_364, alt_bn128_pairing_one_pair_cost_other: 12_121, big_modular_exponentiation_cost: 33, - loaded_accounts_data_size_limit: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, poseidon_cost_coefficient_a: 61, poseidon_cost_coefficient_c: 542, get_remaining_compute_units_cost: 100, @@ -198,127 +181,16 @@ impl ComputeBudget { } } - pub fn process_instructions<'a>( - &mut self, - instructions: impl Iterator, - support_request_units_deprecated: bool, - support_set_loaded_accounts_data_size_limit_ix: bool, - ) -> Result { - let mut num_non_compute_budget_instructions: u32 = 0; - let mut updated_compute_unit_limit = None; - let mut requested_heap_size = None; - let mut prioritization_fee = None; - let mut updated_loaded_accounts_data_size_limit = None; - - for (i, (program_id, instruction)) in instructions.enumerate() { - if compute_budget::check_id(program_id) { - let invalid_instruction_data_error = TransactionError::InstructionError( - i as u8, - InstructionError::InvalidInstructionData, - ); - let duplicate_instruction_error = TransactionError::DuplicateInstruction(i as u8); - - match try_from_slice_unchecked(&instruction.data) { - Ok(ComputeBudgetInstruction::RequestUnitsDeprecated { - units: compute_unit_limit, - additional_fee, - }) if support_request_units_deprecated => { - if updated_compute_unit_limit.is_some() { - return Err(duplicate_instruction_error); - } - if prioritization_fee.is_some() { - return Err(duplicate_instruction_error); - } - updated_compute_unit_limit = Some(compute_unit_limit); - prioritization_fee = - Some(PrioritizationFeeType::Deprecated(additional_fee as u64)); - } - Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { - if requested_heap_size.is_some() { - return Err(duplicate_instruction_error); - } - requested_heap_size = Some((bytes, i as u8)); - } - Ok(ComputeBudgetInstruction::SetComputeUnitLimit(compute_unit_limit)) => { - if updated_compute_unit_limit.is_some() { - return Err(duplicate_instruction_error); - } - updated_compute_unit_limit = Some(compute_unit_limit); - } - Ok(ComputeBudgetInstruction::SetComputeUnitPrice(micro_lamports)) => { - if prioritization_fee.is_some() { - return Err(duplicate_instruction_error); - } - prioritization_fee = - Some(PrioritizationFeeType::ComputeUnitPrice(micro_lamports)); - } - Ok(ComputeBudgetInstruction::SetLoadedAccountsDataSizeLimit(bytes)) - if support_set_loaded_accounts_data_size_limit_ix => - { - if updated_loaded_accounts_data_size_limit.is_some() { - return Err(duplicate_instruction_error); - } - updated_loaded_accounts_data_size_limit = Some(bytes as usize); - } - _ => return Err(invalid_instruction_data_error), - } - } else { - // only include non-request instructions in default max calc - num_non_compute_budget_instructions = - num_non_compute_budget_instructions.saturating_add(1); - } - } - - if let Some((bytes, i)) = requested_heap_size { - if bytes > MAX_HEAP_FRAME_BYTES - || bytes < MIN_HEAP_FRAME_BYTES as u32 - || bytes % 1024 != 0 - { - return Err(TransactionError::InstructionError( - i, - InstructionError::InvalidInstructionData, - )); - } - self.heap_size = bytes; - } - - let compute_unit_limit = updated_compute_unit_limit - .unwrap_or_else(|| { - num_non_compute_budget_instructions - .saturating_mul(DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT) - }) - .min(MAX_COMPUTE_UNIT_LIMIT); - self.compute_unit_limit = u64::from(compute_unit_limit); - - self.loaded_accounts_data_size_limit = updated_loaded_accounts_data_size_limit - .unwrap_or(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES) - .min(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES); - - Ok(prioritization_fee - .map(|fee_type| PrioritizationFeeDetails::new(fee_type, self.compute_unit_limit)) - .unwrap_or_default()) - } - - pub fn fee_budget_limits<'a>( + pub fn try_from_instructions<'a>( instructions: impl Iterator, feature_set: &FeatureSet, - ) -> FeeBudgetLimits { - let mut compute_budget = Self::default(); - - let prioritization_fee_details = compute_budget - .process_instructions( - instructions, - !feature_set.is_active(&remove_deprecated_request_unit_ix::id()), - feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), - ) - .unwrap_or_default(); - - FeeBudgetLimits { - loaded_accounts_data_size_limit: compute_budget.loaded_accounts_data_size_limit, - heap_cost: compute_budget.heap_cost, - compute_unit_limit: compute_budget.compute_unit_limit, - prioritization_fee: prioritization_fee_details.get_fee(), - } + ) -> Result { + let compute_budget_limits = process_compute_budget_instructions(instructions, feature_set)?; + Ok(ComputeBudget { + compute_unit_limit: u64::from(compute_budget_limits.compute_unit_limit), + heap_size: compute_budget_limits.updated_heap_bytes, + ..ComputeBudget::default() + }) } /// Returns cost of the Poseidon hash function for the given number of @@ -350,489 +222,3 @@ impl ComputeBudget { Some(final_result) } } - -#[cfg(test)] -mod tests { - use { - super::*, - solana_sdk::{ - hash::Hash, - instruction::Instruction, - message::Message, - pubkey::Pubkey, - signature::Keypair, - signer::Signer, - system_instruction::{self}, - transaction::{SanitizedTransaction, Transaction}, - }, - }; - - macro_rules! test { - ( $instructions: expr, $expected_result: expr, $expected_budget: expr, $support_set_loaded_accounts_data_size_limit_ix: expr ) => { - let payer_keypair = Keypair::new(); - let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( - &[&payer_keypair], - Message::new($instructions, Some(&payer_keypair.pubkey())), - Hash::default(), - )); - let mut compute_budget = ComputeBudget::default(); - let result = compute_budget.process_instructions( - tx.message().program_instructions_iter(), - false, /*not support request_units_deprecated*/ - $support_set_loaded_accounts_data_size_limit_ix, - ); - assert_eq!($expected_result, result); - assert_eq!(compute_budget, $expected_budget); - }; - ( $instructions: expr, $expected_result: expr, $expected_budget: expr) => { - test!($instructions, $expected_result, $expected_budget, false); - }; - } - - #[test] - fn test_process_instructions() { - // Units - test!( - &[], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: 0, - ..ComputeBudget::default() - } - ); - test!( - &[ - ComputeBudgetInstruction::set_compute_unit_limit(1), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: 1, - ..ComputeBudget::default() - } - ); - test!( - &[ - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT + 1), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT as u64, - ..ComputeBudget::default() - } - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT as u64, - ..ComputeBudget::default() - } - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(1), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: 1, - ..ComputeBudget::default() - } - ); - - test!( - &[ - ComputeBudgetInstruction::set_compute_unit_limit(1), - ComputeBudgetInstruction::set_compute_unit_price(42) - ], - Ok(PrioritizationFeeDetails::new( - PrioritizationFeeType::ComputeUnitPrice(42), - 1 - )), - ComputeBudget { - compute_unit_limit: 1, - ..ComputeBudget::default() - } - ); - - // HeapFrame - test!( - &[], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: 0, - ..ComputeBudget::default() - } - ); - test!( - &[ - ComputeBudgetInstruction::request_heap_frame(40 * 1024), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - heap_size: 40 * 1024, - ..ComputeBudget::default() - } - ); - test!( - &[ - ComputeBudgetInstruction::request_heap_frame(40 * 1024 + 1), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default() - ); - test!( - &[ - ComputeBudgetInstruction::request_heap_frame(31 * 1024), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default() - ); - test!( - &[ - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES + 1), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default() - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - heap_size: MAX_HEAP_FRAME_BYTES, - ..ComputeBudget::default() - } - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(1), - ], - Err(TransactionError::InstructionError( - 3, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default() - ); - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64 * 7, - ..ComputeBudget::default() - } - ); - - // Combined - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), - ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), - ], - Ok(PrioritizationFeeDetails::new( - PrioritizationFeeType::ComputeUnitPrice(u64::MAX), - MAX_COMPUTE_UNIT_LIMIT as u64, - )), - ComputeBudget { - compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT as u64, - heap_size: MAX_HEAP_FRAME_BYTES, - ..ComputeBudget::default() - } - ); - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(1), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), - ], - Ok(PrioritizationFeeDetails::new( - PrioritizationFeeType::ComputeUnitPrice(u64::MAX), - 1 - )), - ComputeBudget { - compute_unit_limit: 1, - heap_size: MAX_HEAP_FRAME_BYTES, - ..ComputeBudget::default() - } - ); - - // Duplicates - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT - 1), - ], - Err(TransactionError::DuplicateInstruction(2)), - ComputeBudget::default() - ); - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(MIN_HEAP_FRAME_BYTES as u32), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ], - Err(TransactionError::DuplicateInstruction(2)), - ComputeBudget::default() - ); - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_price(0), - ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), - ], - Err(TransactionError::DuplicateInstruction(2)), - ComputeBudget::default() - ); - - // deprecated - test!( - &[Instruction::new_with_borsh( - compute_budget::id(), - &compute_budget::ComputeBudgetInstruction::RequestUnitsDeprecated { - units: 1_000, - additional_fee: 10 - }, - vec![] - )], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default() - ); - } - - #[test] - fn test_process_loaded_accounts_data_size_limit_instruction() { - // Assert for empty instructions, change value of support_set_loaded_accounts_data_size_limit_ix - // will not change results, which should all be default - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - test!( - &[], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: 0, - ..ComputeBudget::default() - }, - support_set_loaded_accounts_data_size_limit_ix - ); - } - - // Assert when set_loaded_accounts_data_size_limit presents, - // if support_set_loaded_accounts_data_size_limit_ix then - // budget is set with data_size - // else - // return InstructionError - let data_size: usize = 1; - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let (expected_result, expected_budget) = - if support_set_loaded_accounts_data_size_limit_ix { - ( - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - loaded_accounts_data_size_limit: data_size, - ..ComputeBudget::default() - }, - ) - } else { - ( - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default(), - ) - }; - - test!( - &[ - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size as u32), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - expected_result, - expected_budget, - support_set_loaded_accounts_data_size_limit_ix - ); - } - - // Assert when set_loaded_accounts_data_size_limit presents, with greater than max value - // if support_set_loaded_accounts_data_size_limit_ix then - // budget is set to max data size - // else - // return InstructionError - let data_size: usize = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES + 1; - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let (expected_result, expected_budget) = - if support_set_loaded_accounts_data_size_limit_ix { - ( - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - loaded_accounts_data_size_limit: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, - ..ComputeBudget::default() - }, - ) - } else { - ( - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default(), - ) - }; - - test!( - &[ - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size as u32), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - expected_result, - expected_budget, - support_set_loaded_accounts_data_size_limit_ix - ); - } - - // Assert when set_loaded_accounts_data_size_limit is not presented - // if support_set_loaded_accounts_data_size_limit_ix then - // budget is set to default data size - // else - // return - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let (expected_result, expected_budget) = ( - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - loaded_accounts_data_size_limit: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, - ..ComputeBudget::default() - }, - ); - - test!( - &[Instruction::new_with_bincode( - Pubkey::new_unique(), - &0_u8, - vec![] - ),], - expected_result, - expected_budget, - support_set_loaded_accounts_data_size_limit_ix - ); - } - - // Assert when set_loaded_accounts_data_size_limit presents more than once, - // if support_set_loaded_accounts_data_size_limit_ix then - // return DuplicateInstruction - // else - // return InstructionError - let data_size: usize = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES; - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let (expected_result, expected_budget) = - if support_set_loaded_accounts_data_size_limit_ix { - ( - Err(TransactionError::DuplicateInstruction(2)), - ComputeBudget::default(), - ) - } else { - ( - Err(TransactionError::InstructionError( - 1, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default(), - ) - }; - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size as u32), - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size as u32), - ], - expected_result, - expected_budget, - support_set_loaded_accounts_data_size_limit_ix - ); - } - } - - #[test] - fn test_process_mixed_instructions_without_compute_budget() { - let payer_keypair = Keypair::new(); - - let transaction = - SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - system_instruction::transfer(&payer_keypair.pubkey(), &Pubkey::new_unique(), 2), - ], - Some(&payer_keypair.pubkey()), - &[&payer_keypair], - Hash::default(), - )); - - let mut compute_budget = ComputeBudget::default(); - let result = compute_budget.process_instructions( - transaction.message().program_instructions_iter(), - false, //not support request_units_deprecated - true, //support_set_loaded_accounts_data_size_limit_ix, - ); - - // assert process_instructions will be successful with default, - assert_eq!(Ok(PrioritizationFeeDetails::default()), result); - // assert the default compute_unit_limit is 2 times default: one for bpf ix, one for - // builtin ix. - assert_eq!( - compute_budget, - ComputeBudget { - compute_unit_limit: 2 * DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - ..ComputeBudget::default() - } - ); - } -} diff --git a/program-runtime/src/compute_budget_processor.rs b/program-runtime/src/compute_budget_processor.rs new file mode 100644 index 00000000000000..be5e642fadcb5d --- /dev/null +++ b/program-runtime/src/compute_budget_processor.rs @@ -0,0 +1,619 @@ +//! Process compute_budget instructions to extract and sanitize limits. +use { + crate::{ + compute_budget::DEFAULT_HEAP_COST, + prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, + }, + solana_sdk::{ + borsh0_10::try_from_slice_unchecked, + compute_budget::{self, ComputeBudgetInstruction}, + entrypoint::HEAP_LENGTH as MIN_HEAP_FRAME_BYTES, + feature_set::{ + add_set_tx_loaded_accounts_data_size_instruction, remove_deprecated_request_unit_ix, + FeatureSet, + }, + fee::FeeBudgetLimits, + instruction::{CompiledInstruction, InstructionError}, + pubkey::Pubkey, + transaction::TransactionError, + }, +}; + +const MAX_HEAP_FRAME_BYTES: u32 = 256 * 1024; +pub const DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT: u32 = 200_000; +pub const MAX_COMPUTE_UNIT_LIMIT: u32 = 1_400_000; + +/// The total accounts data a transaction can load is limited to 64MiB to not break +/// anyone in Mainnet-beta today. It can be set by set_loaded_accounts_data_size_limit instruction +pub const MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES: u32 = 64 * 1024 * 1024; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ComputeBudgetLimits { + pub updated_heap_bytes: u32, + pub compute_unit_limit: u32, + pub compute_unit_price: u64, + pub loaded_accounts_bytes: u32, +} + +impl Default for ComputeBudgetLimits { + fn default() -> Self { + ComputeBudgetLimits { + updated_heap_bytes: u32::try_from(MIN_HEAP_FRAME_BYTES).unwrap(), + compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, + compute_unit_price: 0, + loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, + } + } +} + +impl From for FeeBudgetLimits { + fn from(val: ComputeBudgetLimits) -> Self { + let prioritization_fee_details = PrioritizationFeeDetails::new( + PrioritizationFeeType::ComputeUnitPrice(val.compute_unit_price), + u64::from(val.compute_unit_limit), + ); + FeeBudgetLimits { + // NOTE - usize::from(u32).unwrap() may fail if target is 16-bit and + // `loaded_accounts_bytes` is greater than u16::MAX. In that case, panic is proper. + loaded_accounts_data_size_limit: usize::try_from(val.loaded_accounts_bytes).unwrap(), + heap_cost: DEFAULT_HEAP_COST, + compute_unit_limit: u64::from(val.compute_unit_limit), + prioritization_fee: prioritization_fee_details.get_fee(), + } + } +} + +/// Processing compute_budget could be part of tx sanitizing, failed to process +/// these instructions will drop the transaction eventually without execution, +/// may as well fail it early. +/// If succeeded, the transaction's specific limits/requests (could be default) +/// are retrieved and returned, +pub fn process_compute_budget_instructions<'a>( + instructions: impl Iterator, + feature_set: &FeatureSet, +) -> Result { + let support_request_units_deprecated = + !feature_set.is_active(&remove_deprecated_request_unit_ix::id()); + let support_set_loaded_accounts_data_size_limit_ix = + feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()); + + let mut num_non_compute_budget_instructions: u32 = 0; + let mut updated_compute_unit_limit = None; + let mut updated_compute_unit_price = None; + let mut requested_heap_size = None; + let mut updated_loaded_accounts_data_size_limit = None; + + for (i, (program_id, instruction)) in instructions.enumerate() { + if compute_budget::check_id(program_id) { + let invalid_instruction_data_error = TransactionError::InstructionError( + i as u8, + InstructionError::InvalidInstructionData, + ); + let duplicate_instruction_error = TransactionError::DuplicateInstruction(i as u8); + + match try_from_slice_unchecked(&instruction.data) { + Ok(ComputeBudgetInstruction::RequestUnitsDeprecated { + units: compute_unit_limit, + additional_fee, + }) if support_request_units_deprecated => { + if updated_compute_unit_limit.is_some() { + return Err(duplicate_instruction_error); + } + if updated_compute_unit_price.is_some() { + return Err(duplicate_instruction_error); + } + updated_compute_unit_limit = Some(compute_unit_limit); + updated_compute_unit_price = + support_deprecated_requested_units(additional_fee, compute_unit_limit); + } + Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { + if requested_heap_size.is_some() { + return Err(duplicate_instruction_error); + } + if sanitize_requested_heap_size(bytes) { + requested_heap_size = Some(bytes); + } else { + return Err(invalid_instruction_data_error); + } + } + Ok(ComputeBudgetInstruction::SetComputeUnitLimit(compute_unit_limit)) => { + if updated_compute_unit_limit.is_some() { + return Err(duplicate_instruction_error); + } + updated_compute_unit_limit = Some(compute_unit_limit); + } + Ok(ComputeBudgetInstruction::SetComputeUnitPrice(micro_lamports)) => { + if updated_compute_unit_price.is_some() { + return Err(duplicate_instruction_error); + } + updated_compute_unit_price = Some(micro_lamports); + } + Ok(ComputeBudgetInstruction::SetLoadedAccountsDataSizeLimit(bytes)) + if support_set_loaded_accounts_data_size_limit_ix => + { + if updated_loaded_accounts_data_size_limit.is_some() { + return Err(duplicate_instruction_error); + } + updated_loaded_accounts_data_size_limit = Some(bytes); + } + _ => return Err(invalid_instruction_data_error), + } + } else { + // only include non-request instructions in default max calc + num_non_compute_budget_instructions = + num_non_compute_budget_instructions.saturating_add(1); + } + } + + // sanitize limits + let updated_heap_bytes = requested_heap_size + .unwrap_or(u32::try_from(MIN_HEAP_FRAME_BYTES).unwrap()) // loader's default heap_size + .min(MAX_HEAP_FRAME_BYTES); + + let compute_unit_limit = updated_compute_unit_limit + .unwrap_or_else(|| { + num_non_compute_budget_instructions + .saturating_mul(DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT) + }) + .min(MAX_COMPUTE_UNIT_LIMIT); + + let compute_unit_price = updated_compute_unit_price.unwrap_or(0); + + let loaded_accounts_bytes = updated_loaded_accounts_data_size_limit + .unwrap_or(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES) + .min(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES); + + Ok(ComputeBudgetLimits { + updated_heap_bytes, + compute_unit_limit, + compute_unit_price, + loaded_accounts_bytes, + }) +} + +fn sanitize_requested_heap_size(bytes: u32) -> bool { + (u32::try_from(MIN_HEAP_FRAME_BYTES).unwrap()..=MAX_HEAP_FRAME_BYTES).contains(&bytes) + && bytes % 1024 == 0 +} + +// Supports request_units_derpecated ix, returns cu_price if available. +fn support_deprecated_requested_units(additional_fee: u32, compute_unit_limit: u32) -> Option { + // TODO: remove support of 'Deprecated' after feature remove_deprecated_request_unit_ix::id() is activated + const MICRO_LAMPORTS_PER_LAMPORT: u64 = 1_000_000; + + let micro_lamport_fee = + (additional_fee as u128).saturating_mul(MICRO_LAMPORTS_PER_LAMPORT as u128); + micro_lamport_fee + .checked_div(compute_unit_limit as u128) + .map(|cu_price| u64::try_from(cu_price).unwrap_or(u64::MAX)) +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_sdk::{ + hash::Hash, + instruction::Instruction, + message::Message, + pubkey::Pubkey, + signature::Keypair, + signer::Signer, + system_instruction::{self}, + transaction::{SanitizedTransaction, Transaction}, + }, + }; + + macro_rules! test { + ( $instructions: expr, $expected_result: expr, $support_set_loaded_accounts_data_size_limit_ix: expr ) => { + let payer_keypair = Keypair::new(); + let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( + &[&payer_keypair], + Message::new($instructions, Some(&payer_keypair.pubkey())), + Hash::default(), + )); + let mut feature_set = FeatureSet::default(); + feature_set.activate(&remove_deprecated_request_unit_ix::id(), 0); + if $support_set_loaded_accounts_data_size_limit_ix { + feature_set.activate(&add_set_tx_loaded_accounts_data_size_instruction::id(), 0); + } + let result = process_compute_budget_instructions( + tx.message().program_instructions_iter(), + &feature_set, + ); + assert_eq!($expected_result, result); + }; + ( $instructions: expr, $expected_result: expr ) => { + test!($instructions, $expected_result, false); + }; + } + + #[test] + fn test_process_instructions() { + // Units + test!( + &[], + Ok(ComputeBudgetLimits { + compute_unit_limit: 0, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + ComputeBudgetInstruction::set_compute_unit_limit(1), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: 1, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT + 1), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_limit(1), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: 1, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + ComputeBudgetInstruction::set_compute_unit_limit(1), + ComputeBudgetInstruction::set_compute_unit_price(42) + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: 1, + compute_unit_price: 42, + ..ComputeBudgetLimits::default() + }) + ); + + // HeapFrame + test!( + &[], + Ok(ComputeBudgetLimits { + compute_unit_limit: 0, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + ComputeBudgetInstruction::request_heap_frame(40 * 1024), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + updated_heap_bytes: 40 * 1024, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + ComputeBudgetInstruction::request_heap_frame(40 * 1024 + 1), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + ); + test!( + &[ + ComputeBudgetInstruction::request_heap_frame(31 * 1024), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + ); + test!( + &[ + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES + 1), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + updated_heap_bytes: MAX_HEAP_FRAME_BYTES, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::request_heap_frame(1), + ], + Err(TransactionError::InstructionError( + 3, + InstructionError::InvalidInstructionData, + )) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT * 7, + ..ComputeBudgetLimits::default() + }) + ); + + // Combined + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), + ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), + ], + Ok(ComputeBudgetLimits { + compute_unit_price: u64::MAX, + compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, + updated_heap_bytes: MAX_HEAP_FRAME_BYTES, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_limit(1), + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), + ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), + ], + Ok(ComputeBudgetLimits { + compute_unit_price: u64::MAX, + compute_unit_limit: 1, + updated_heap_bytes: MAX_HEAP_FRAME_BYTES, + ..ComputeBudgetLimits::default() + }) + ); + + // Duplicates + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT - 1), + ], + Err(TransactionError::DuplicateInstruction(2)) + ); + + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::request_heap_frame(MIN_HEAP_FRAME_BYTES as u32), + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), + ], + Err(TransactionError::DuplicateInstruction(2)) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_price(0), + ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), + ], + Err(TransactionError::DuplicateInstruction(2)) + ); + + // deprecated + test!( + &[Instruction::new_with_borsh( + compute_budget::id(), + &compute_budget::ComputeBudgetInstruction::RequestUnitsDeprecated { + units: 1_000, + additional_fee: 10 + }, + vec![] + )], + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + ); + } + + #[test] + fn test_process_loaded_accounts_data_size_limit_instruction() { + // Assert for empty instructions, change value of support_set_loaded_accounts_data_size_limit_ix + // will not change results, which should all be default + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + test!( + &[], + Ok(ComputeBudgetLimits { + compute_unit_limit: 0, + ..ComputeBudgetLimits::default() + }), + support_set_loaded_accounts_data_size_limit_ix + ); + } + + // Assert when set_loaded_accounts_data_size_limit presents, + // if support_set_loaded_accounts_data_size_limit_ix then + // budget is set with data_size + // else + // return InstructionError + let data_size = 1; + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + let expected_result = if support_set_loaded_accounts_data_size_limit_ix { + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + loaded_accounts_bytes: data_size, + ..ComputeBudgetLimits::default() + }) + } else { + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + }; + + test!( + &[ + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + expected_result, + support_set_loaded_accounts_data_size_limit_ix + ); + } + + // Assert when set_loaded_accounts_data_size_limit presents, with greater than max value + // if support_set_loaded_accounts_data_size_limit_ix then + // budget is set to max data size + // else + // return InstructionError + let data_size = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES + 1; + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + let expected_result = if support_set_loaded_accounts_data_size_limit_ix { + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, + ..ComputeBudgetLimits::default() + }) + } else { + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + }; + + test!( + &[ + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + expected_result, + support_set_loaded_accounts_data_size_limit_ix + ); + } + + // Assert when set_loaded_accounts_data_size_limit is not presented + // if support_set_loaded_accounts_data_size_limit_ix then + // budget is set to default data size + // else + // return + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + let expected_result = Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, + ..ComputeBudgetLimits::default() + }); + + test!( + &[Instruction::new_with_bincode( + Pubkey::new_unique(), + &0_u8, + vec![] + ),], + expected_result, + support_set_loaded_accounts_data_size_limit_ix + ); + } + + // Assert when set_loaded_accounts_data_size_limit presents more than once, + // if support_set_loaded_accounts_data_size_limit_ix then + // return DuplicateInstruction + // else + // return InstructionError + let data_size = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES; + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + let expected_result = if support_set_loaded_accounts_data_size_limit_ix { + Err(TransactionError::DuplicateInstruction(2)) + } else { + Err(TransactionError::InstructionError( + 1, + InstructionError::InvalidInstructionData, + )) + }; + + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), + ], + expected_result, + support_set_loaded_accounts_data_size_limit_ix + ); + } + } + + #[test] + fn test_process_mixed_instructions_without_compute_budget() { + let payer_keypair = Keypair::new(); + + let transaction = + SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + system_instruction::transfer(&payer_keypair.pubkey(), &Pubkey::new_unique(), 2), + ], + Some(&payer_keypair.pubkey()), + &[&payer_keypair], + Hash::default(), + )); + + let mut feature_set = FeatureSet::default(); + feature_set.activate(&remove_deprecated_request_unit_ix::id(), 0); + feature_set.activate(&add_set_tx_loaded_accounts_data_size_instruction::id(), 0); + + let result = process_compute_budget_instructions( + transaction.message().program_instructions_iter(), + &feature_set, + ); + + // assert process_instructions will be successful with default, + // and the default compute_unit_limit is 2 times default: one for bpf ix, one for + // builtin ix. + assert_eq!( + result, + Ok(ComputeBudgetLimits { + compute_unit_limit: 2 * DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + ..ComputeBudgetLimits::default() + }) + ); + } +} diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 9fbe42d8d40c07..566a98dab9be69 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -756,7 +756,7 @@ pub fn mock_process_instruction TransactionExecutionResult::NotExecuted(e.clone()), (Ok(loaded_transaction), nonce) => { - let compute_budget = if let Some(compute_budget) = - self.runtime_config.compute_budget - { - compute_budget - } else { - let mut compute_budget = - ComputeBudget::new(compute_budget::MAX_COMPUTE_UNIT_LIMIT as u64); - - let mut compute_budget_process_transaction_time = - Measure::start("compute_budget_process_transaction_time"); - let process_transaction_result = compute_budget.process_instructions( - tx.message().program_instructions_iter(), - !self - .feature_set - .is_active(&remove_deprecated_request_unit_ix::id()), - self.feature_set - .is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), - ); - compute_budget_process_transaction_time.stop(); - saturating_add_assign!( - timings - .execute_accessories - .compute_budget_process_transaction_us, - compute_budget_process_transaction_time.as_us() - ); - if let Err(err) = process_transaction_result { - return TransactionExecutionResult::NotExecuted(err); - } - compute_budget - }; + let compute_budget = + if let Some(compute_budget) = self.runtime_config.compute_budget { + compute_budget + } else { + let mut compute_budget_process_transaction_time = + Measure::start("compute_budget_process_transaction_time"); + let maybe_compute_budget = ComputeBudget::try_from_instructions( + tx.message().program_instructions_iter(), + &self.feature_set, + ); + compute_budget_process_transaction_time.stop(); + saturating_add_assign!( + timings + .execute_accessories + .compute_budget_process_transaction_us, + compute_budget_process_transaction_time.as_us() + ); + if let Err(err) = maybe_compute_budget { + return TransactionExecutionResult::NotExecuted(err); + } + maybe_compute_budget.unwrap() + }; let result = self.execute_loaded_transaction( tx, diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 82393ef7161a2b..cd1e227591a520 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -46,7 +46,8 @@ use { }, solana_logger, solana_program_runtime::{ - compute_budget::{self, ComputeBudget, MAX_COMPUTE_UNIT_LIMIT}, + compute_budget::ComputeBudget, + compute_budget_processor::{self, MAX_COMPUTE_UNIT_LIMIT}, declare_process_instruction, invoke_context::mock_process_instruction, loaded_programs::{LoadedProgram, LoadedProgramType, DELAY_VISIBILITY_SLOT_OFFSET}, @@ -10120,7 +10121,9 @@ fn test_compute_budget_program_noop() { assert_eq!( *compute_budget, ComputeBudget { - compute_unit_limit: compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, + compute_unit_limit: u64::from( + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + ), heap_size: 48 * 1024, ..ComputeBudget::default() } @@ -10133,7 +10136,7 @@ fn test_compute_budget_program_noop() { let message = Message::new( &[ ComputeBudgetInstruction::set_compute_unit_limit( - compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, ), ComputeBudgetInstruction::request_heap_frame(48 * 1024), Instruction::new_with_bincode(program_id, &0, vec![]), @@ -10163,7 +10166,9 @@ fn test_compute_request_instruction() { assert_eq!( *compute_budget, ComputeBudget { - compute_unit_limit: compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, + compute_unit_limit: u64::from( + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + ), heap_size: 48 * 1024, ..ComputeBudget::default() } @@ -10176,7 +10181,7 @@ fn test_compute_request_instruction() { let message = Message::new( &[ ComputeBudgetInstruction::set_compute_unit_limit( - compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, ), ComputeBudgetInstruction::request_heap_frame(48 * 1024), Instruction::new_with_bincode(program_id, &0, vec![]), @@ -10213,7 +10218,9 @@ fn test_failed_compute_request_instruction() { assert_eq!( *compute_budget, ComputeBudget { - compute_unit_limit: compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, + compute_unit_limit: u64::from( + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + ), heap_size: 48 * 1024, ..ComputeBudget::default() } @@ -10444,14 +10451,19 @@ fn calculate_test_fee( remove_congestion_multiplier: bool, ) -> u64 { let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&remove_deprecated_request_unit_ix::id()); + feature_set.deactivate(&solana_sdk::feature_set::remove_deprecated_request_unit_ix::id()); if !support_set_accounts_data_size_limit_ix { - feature_set.deactivate(&include_loaded_accounts_data_size_in_fee_calculation::id()); + feature_set.deactivate( + &solana_sdk::feature_set::include_loaded_accounts_data_size_in_fee_calculation::id(), + ); } let budget_limits = - ComputeBudget::fee_budget_limits(message.program_instructions_iter(), &feature_set); + process_compute_budget_instructions(message.program_instructions_iter(), &feature_set) + .unwrap_or_default() + .into(); + fee_structure.calculate_fee( message, lamports_per_signature, @@ -11478,7 +11490,9 @@ fn test_rent_state_list_len() { ); let compute_budget = bank.runtime_config.compute_budget.unwrap_or_else(|| { - ComputeBudget::new(compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64) + ComputeBudget::new(u64::from( + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + )) }); let transaction_context = TransactionContext::new( loaded_txs[0].0.as_ref().unwrap().accounts.clone(), diff --git a/runtime/src/transaction_priority_details.rs b/runtime/src/transaction_priority_details.rs index 0d0a94df4ed393..401f3e87893887 100644 --- a/runtime/src/transaction_priority_details.rs +++ b/runtime/src/transaction_priority_details.rs @@ -1,6 +1,7 @@ use { - solana_program_runtime::compute_budget::ComputeBudget, + solana_program_runtime::compute_budget_processor::process_compute_budget_instructions, solana_sdk::{ + feature_set::FeatureSet, instruction::CompiledInstruction, pubkey::Pubkey, transaction::{SanitizedTransaction, SanitizedVersionedTransaction}, @@ -23,18 +24,17 @@ pub trait GetTransactionPriorityDetails { instructions: impl Iterator, _round_compute_unit_price_enabled: bool, ) -> Option { - let mut compute_budget = ComputeBudget::default(); - let prioritization_fee_details = compute_budget - .process_instructions( - instructions, - true, // supports prioritization by request_units_deprecated instruction - true, // enable support set accounts data size instruction - // TODO: round_compute_unit_price_enabled: bool - ) - .ok()?; + let mut feature_set = FeatureSet::default(); + feature_set.activate( + &solana_sdk::feature_set::add_set_tx_loaded_accounts_data_size_instruction::id(), + 0, + ); + + let compute_budget_limits = + process_compute_budget_instructions(instructions, &feature_set).ok()?; Some(TransactionPriorityDetails { - priority: prioritization_fee_details.get_priority(), - compute_unit_limit: compute_budget.compute_unit_limit, + priority: compute_budget_limits.compute_unit_price, + compute_unit_limit: u64::from(compute_budget_limits.compute_unit_limit), }) } } @@ -98,8 +98,8 @@ mod tests { Some(TransactionPriorityDetails { priority: 0, compute_unit_limit: - solana_program_runtime::compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64 + solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + as u64, }) ); @@ -111,8 +111,8 @@ mod tests { Some(TransactionPriorityDetails { priority: 0, compute_unit_limit: - solana_program_runtime::compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64 + solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + as u64, }) ); } @@ -174,8 +174,8 @@ mod tests { Some(TransactionPriorityDetails { priority: requested_price, compute_unit_limit: - solana_program_runtime::compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64 + solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + as u64, }) ); @@ -187,8 +187,8 @@ mod tests { Some(TransactionPriorityDetails { priority: requested_price, compute_unit_limit: - solana_program_runtime::compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64 + solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + as u64, }) ); } From 4cb5065e0d2fad7249fb79b3c31f64a6608a3721 Mon Sep 17 00:00:00 2001 From: Joe C Date: Thu, 19 Oct 2023 18:15:51 +0200 Subject: [PATCH 382/407] program error: add `InvalidAccountOwner` (#33766) --- sdk/program/src/program_error.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/sdk/program/src/program_error.rs b/sdk/program/src/program_error.rs index a0d217a96b84f4..928a12dab84543 100644 --- a/sdk/program/src/program_error.rs +++ b/sdk/program/src/program_error.rs @@ -59,6 +59,8 @@ pub enum ProgramError { MaxInstructionTraceLengthExceeded, #[error("Builtin programs must consume compute units")] BuiltinProgramsMustConsumeComputeUnits, + #[error("Invalid account owner")] + InvalidAccountOwner, } pub trait PrintProgramError { @@ -107,6 +109,7 @@ impl PrintProgramError for ProgramError { Self::BuiltinProgramsMustConsumeComputeUnits => { msg!("Error: BuiltinProgramsMustConsumeComputeUnits") } + Self::InvalidAccountOwner => msg!("Error: InvalidAccountOwner"), } } } @@ -141,6 +144,7 @@ pub const MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED: u64 = to_builtin!(19); pub const INVALID_ACCOUNT_DATA_REALLOC: u64 = to_builtin!(20); pub const MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED: u64 = to_builtin!(21); pub const BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS: u64 = to_builtin!(22); +pub const INVALID_ACCOUNT_OWNER: u64 = to_builtin!(23); // Warning: Any new program errors added here must also be: // - Added to the below conversions // - Added as an equivalent to InstructionError @@ -177,6 +181,7 @@ impl From for u64 { ProgramError::BuiltinProgramsMustConsumeComputeUnits => { BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS } + ProgramError::InvalidAccountOwner => INVALID_ACCOUNT_OWNER, ProgramError::Custom(error) => { if error == 0 { CUSTOM_ZERO @@ -215,6 +220,7 @@ impl From for ProgramError { BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS => { Self::BuiltinProgramsMustConsumeComputeUnits } + INVALID_ACCOUNT_OWNER => Self::InvalidAccountOwner, _ => Self::Custom(error as u32), } } @@ -253,6 +259,7 @@ impl TryFrom for ProgramError { Self::Error::BuiltinProgramsMustConsumeComputeUnits => { Ok(Self::BuiltinProgramsMustConsumeComputeUnits) } + Self::Error::InvalidAccountOwner => Ok(Self::InvalidAccountOwner), _ => Err(error), } } @@ -289,6 +296,7 @@ where BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS => { Self::BuiltinProgramsMustConsumeComputeUnits } + INVALID_ACCOUNT_OWNER => Self::InvalidAccountOwner, _ => { // A valid custom error has no bits set in the upper 32 if error >> BUILTIN_BIT_SHIFT == 0 { From f13c78b7c8539b9bda8ec6dc0ca036711e759bb4 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 19 Oct 2023 09:50:38 -0700 Subject: [PATCH 383/407] add ancient append vec test (#33762) * add ancient append vec test * Update accounts-db/src/ancient_append_vecs.rs Co-authored-by: Brooks * add some comments --------- Co-authored-by: Brooks --- accounts-db/src/ancient_append_vecs.rs | 169 +++++++++++++++++++++++++ 1 file changed, 169 insertions(+) diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 09df6d5df561c0..eabc7ef064c506 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -1622,6 +1622,175 @@ pub mod tests { } } + #[test] + fn test_calc_accounts_to_combine_older_dup() { + // looking at 1 storage + // with 2 accounts + // 1 with 1 ref + // 1 with 2 refs (and the other ref is from a newer slot) + // So, the other alive ref will cause the account with 2 refs to have to remain in the slot where it currently is. + for method in TestWriteMultipleRefs::iter() { + let num_slots = 1; + // creating 1 more sample slot/storage, but effectively act like 1 slot + let (db, mut storages, slots, infos) = get_sample_storages(num_slots + 1, None); + let slots = slots.start..slots.start + 1; + let storage = storages.first().unwrap().clone(); + let ignored_storage = storages.pop().unwrap(); + let original_results = storages + .iter() + .map(|store| db.get_unique_accounts_from_storage(store)) + .collect::>(); + let pk_with_1_ref = solana_sdk::pubkey::new_rand(); + let slot1 = slots.start; + let account_with_2_refs = original_results + .first() + .unwrap() + .stored_accounts + .first() + .unwrap(); + let pk_with_2_refs = account_with_2_refs.pubkey(); + let mut account_with_1_ref = account_with_2_refs.to_account_shared_data(); + account_with_1_ref.checked_add_lamports(1).unwrap(); + append_single_account_with_default_hash( + &storage, + &pk_with_1_ref, + &account_with_1_ref, + 0, + true, + Some(&db.accounts_index), + ); + // add the account with 2 refs into the storage we're ignoring. + // The storage we're ignoring has a higher slot. + // The index entry for pk_with_2_refs will have both slots in it. + // The slot of `storage` is lower than the slot of `ignored_storage`. + // But, both are 'alive', aka in the index. + append_single_account_with_default_hash( + &ignored_storage, + pk_with_2_refs, + &account_with_2_refs.to_account_shared_data(), + 0, + true, + Some(&db.accounts_index), + ); + + // update to get both accounts in the storage + let original_results = storages + .iter() + .map(|store| db.get_unique_accounts_from_storage(store)) + .collect::>(); + assert_eq!(original_results.first().unwrap().stored_accounts.len(), 2); + let accounts_per_storage = infos + .iter() + .zip(original_results.into_iter()) + .collect::>(); + + let accounts_to_combine = db.calc_accounts_to_combine(&accounts_per_storage); + let slots_vec = slots.collect::>(); + assert_eq!(accounts_to_combine.accounts_to_combine.len(), num_slots); + // all accounts should be in many_refs + let mut accounts_keep = accounts_to_combine + .accounts_keep_slots + .keys() + .cloned() + .collect::>(); + accounts_keep.sort_unstable(); + assert_eq!(accounts_keep, slots_vec); + assert!(accounts_to_combine.target_slots_sorted.is_empty()); + assert_eq!(accounts_to_combine.accounts_keep_slots.len(), num_slots); + assert_eq!( + accounts_to_combine + .accounts_keep_slots + .get(&slot1) + .unwrap() + .accounts + .iter() + .map(|meta| meta.pubkey()) + .collect::>(), + vec![pk_with_2_refs] + ); + assert_eq!(accounts_to_combine.accounts_to_combine.len(), 1); + let one_ref_accounts = &accounts_to_combine + .accounts_to_combine + .first() + .unwrap() + .alive_accounts + .one_ref + .accounts; + assert_eq!( + one_ref_accounts + .iter() + .map(|meta| meta.pubkey()) + .collect::>(), + vec![&pk_with_1_ref] + ); + assert_eq!( + one_ref_accounts + .iter() + .map(|meta| meta.to_account_shared_data()) + .collect::>(), + vec![account_with_1_ref] + ); + assert!(accounts_to_combine + .accounts_to_combine + .iter() + .all(|shrink_collect| shrink_collect.alive_accounts.many_refs.accounts.is_empty())); + + // test write_ancient_accounts_to_same_slot_multiple_refs since we built interesting 'AccountsToCombine' + let write_ancient_accounts = match method { + TestWriteMultipleRefs::MultipleRefs => { + let mut write_ancient_accounts = WriteAncientAccounts::default(); + db.write_ancient_accounts_to_same_slot_multiple_refs( + accounts_to_combine.accounts_keep_slots.values(), + &mut write_ancient_accounts, + ); + write_ancient_accounts + } + TestWriteMultipleRefs::PackedStorages => { + let packed_contents = Vec::default(); + db.write_packed_storages(&accounts_to_combine, packed_contents) + } + }; + assert_eq!(write_ancient_accounts.shrinks_in_progress.len(), num_slots); + let mut shrinks_in_progress = write_ancient_accounts + .shrinks_in_progress + .iter() + .collect::>(); + shrinks_in_progress.sort_unstable_by(|a, b| a.0.cmp(b.0)); + assert_eq!( + shrinks_in_progress + .iter() + .map(|(slot, _)| **slot) + .collect::>(), + slots_vec + ); + assert_eq!( + shrinks_in_progress + .iter() + .map(|(_, shrink_in_progress)| shrink_in_progress.old_storage().append_vec_id()) + .collect::>(), + storages + .iter() + .map(|storage| storage.append_vec_id()) + .collect::>() + ); + // assert that we wrote the 2_ref account to the newly shrunk append vec + let shrink_in_progress = shrinks_in_progress.first().unwrap().1; + let accounts_shrunk_same_slot = shrink_in_progress.new_storage().accounts.accounts(0); + assert_eq!(accounts_shrunk_same_slot.len(), 1); + assert_eq!( + accounts_shrunk_same_slot.first().unwrap().pubkey(), + pk_with_2_refs + ); + assert_eq!( + accounts_shrunk_same_slot + .first() + .unwrap() + .to_account_shared_data(), + account_with_2_refs.to_account_shared_data() + ); + } + } + #[test] fn test_calc_accounts_to_combine_opposite() { // 1 storage From 383aef218dee5a0fa64b126828d06b64d29999ba Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 19 Oct 2023 23:28:53 +0200 Subject: [PATCH 384/407] Make Blockstore populate TransactionStatusIndex entries (#33756) A previous change removed logic that populated the TransactionStatusIndex entries at each of the legacy primary index keys (0 and 1). While these entries will not be read or written in the future, these entries are necessary for backwards compatibility. Namely, branches <= v1.17 expect these entries to be present and .unwrap()'s could fail if they are not. So, add the initialization of these entries back into Blockstore logic. We can remove initialization of these entries once our stable and beta branches are both versions that do not expect these entries to be present (should be v1.18). --- ledger/src/blockstore.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 74440cdd0c0a8f..ce9336e1132192 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -2109,6 +2109,17 @@ impl Blockstore { if !self.is_primary_access() { return Ok(()); } + + // Initialize TransactionStatusIndexMeta if they are not present already + if self.transaction_status_index_cf.get(0)?.is_none() { + self.transaction_status_index_cf + .put(0, &TransactionStatusIndexMeta::default())?; + } + if self.transaction_status_index_cf.get(1)?.is_none() { + self.transaction_status_index_cf + .put(1, &TransactionStatusIndexMeta::default())?; + } + // If present, delete dummy entries inserted by old software // https://github.com/solana-labs/solana/blob/bc2b372/ledger/src/blockstore.rs#L2130-L2137 let transaction_status_dummy_key = cf::TransactionStatus::as_index(2); @@ -2152,7 +2163,7 @@ impl Blockstore { highest_primary_index_slot = Some(meta.max_slot); } } - if highest_primary_index_slot.is_some() { + if highest_primary_index_slot.is_some_and(|slot| slot != 0) { self.set_highest_primary_index_slot(highest_primary_index_slot); } else { self.db.set_clean_slot_0(true); From ce8ad77373bca151b85c007e53fac4e42a028834 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 19 Oct 2023 17:38:09 -0400 Subject: [PATCH 385/407] Uses AccountHash in AppendVec (#33764) --- accounts-db/src/account_storage/meta.rs | 2 +- accounts-db/src/accounts_db.rs | 10 +++++----- accounts-db/src/ancient_append_vecs.rs | 6 +++--- accounts-db/src/append_vec.rs | 12 ++++++------ accounts-db/src/storable_accounts.rs | 6 +++--- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/accounts-db/src/account_storage/meta.rs b/accounts-db/src/account_storage/meta.rs index 0cdf200f70669e..1442b4845bf604 100644 --- a/accounts-db/src/account_storage/meta.rs +++ b/accounts-db/src/account_storage/meta.rs @@ -127,7 +127,7 @@ impl<'storage> StoredAccountMeta<'storage> { pub fn hash(&self) -> &'storage AccountHash { match self { - Self::AppendVec(av) => bytemuck::cast_ref(av.hash()), + Self::AppendVec(av) => av.hash(), Self::Hot(hot) => hot.hash().unwrap_or(&DEFAULT_ACCOUNT_HASH), } } diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index fc4fe5d58eee84..604fed349c114d 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -10186,7 +10186,7 @@ pub mod tests { rent_epoch: 0, }; let offset = 3; - let hash = Hash::new(&[2; 32]); + let hash = AccountHash(Hash::new(&[2; 32])); let stored_meta = StoredMeta { // global write version write_version_obsolete: 0, @@ -10289,7 +10289,7 @@ pub mod tests { }; let offset = 99; let stored_size = 101; - let hash = Hash::new_unique(); + let hash = AccountHash(Hash::new_unique()); let stored_account = StoredAccountMeta::AppendVec(AppendVecStoredAccountMeta { meta: &meta, account_meta: &account_meta, @@ -12649,7 +12649,7 @@ pub mod tests { }; let offset = 99; let stored_size = 101; - let hash = Hash::new_unique(); + let hash = AccountHash(Hash::new_unique()); let stored_account = StoredAccountMeta::AppendVec(AppendVecStoredAccountMeta { meta: &meta, account_meta: &account_meta, @@ -12691,11 +12691,11 @@ pub mod tests { const ACCOUNT_DATA_LEN: usize = 3; let data: [u8; ACCOUNT_DATA_LEN] = [0x69, 0x6a, 0x6b]; let offset: usize = 0x6c_6d_6e_6f_70_71_72_73; - let hash = Hash::from([ + let hash = AccountHash(Hash::from([ 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, - ]); + ])); let stored_account = StoredAccountMeta::AppendVec(AppendVecStoredAccountMeta { meta: &meta, diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index eabc7ef064c506..685b6962b93e8a 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -2026,7 +2026,7 @@ pub mod tests { rent_epoch: 0, }; let offset = 3; - let hash = Hash::new(&[2; 32]); + let hash = AccountHash(Hash::new(&[2; 32])); let stored_meta = StoredMeta { // global write version write_version_obsolete: 0, @@ -3009,7 +3009,7 @@ pub mod tests { }; let offset = 99; let stored_size = 101; - let hash = Hash::new_unique(); + let hash = AccountHash(Hash::new_unique()); let stored_account = StoredAccountMeta::AppendVec(AppendVecStoredAccountMeta { meta: &meta, account_meta: &account_meta, @@ -3091,7 +3091,7 @@ pub mod tests { }; let offset = 99; let stored_size = 1; // size is 1 byte for each entry to test `bytes` later - let hash = Hash::new_unique(); + let hash = AccountHash(Hash::new_unique()); let stored_account = StoredAccountMeta::AppendVec(AppendVecStoredAccountMeta { meta: &meta, account_meta: &account_meta, diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index 2cd8612828c254..8cc2a6f5b3358c 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -20,7 +20,6 @@ use { solana_sdk::{ account::{AccountSharedData, ReadableAccount}, clock::Slot, - hash::Hash, pubkey::Pubkey, stake_history::Epoch, }, @@ -115,7 +114,7 @@ pub struct AppendVecStoredAccountMeta<'append_vec> { pub(crate) data: &'append_vec [u8], pub(crate) offset: usize, pub(crate) stored_size: usize, - pub(crate) hash: &'append_vec Hash, + pub(crate) hash: &'append_vec AccountHash, } impl<'append_vec> AppendVecStoredAccountMeta<'append_vec> { @@ -123,7 +122,7 @@ impl<'append_vec> AppendVecStoredAccountMeta<'append_vec> { &self.meta.pubkey } - pub fn hash(&self) -> &'append_vec Hash { + pub fn hash(&self) -> &'append_vec AccountHash { self.hash } @@ -488,7 +487,7 @@ impl AppendVec { pub fn get_account(&self, offset: usize) -> Option<(StoredAccountMeta, usize)> { let (meta, next): (&StoredMeta, _) = self.get_type(offset)?; let (account_meta, next): (&AccountMeta, _) = self.get_type(next)?; - let (hash, next): (&Hash, _) = self.get_type(next)?; + let (hash, next): (&AccountHash, _) = self.get_type(next)?; let (data, next) = self.get_slice(next, meta.data_len as usize)?; let stored_size = next - offset; Some(( @@ -612,11 +611,11 @@ impl AppendVec { .map(|account| account.data()) .unwrap_or_default() .as_ptr(); - let hash_ptr = hash.0.as_ref().as_ptr(); + let hash_ptr = bytemuck::bytes_of(hash).as_ptr(); let ptrs = [ (meta_ptr as *const u8, mem::size_of::()), (account_meta_ptr as *const u8, mem::size_of::()), - (hash_ptr, mem::size_of::()), + (hash_ptr, mem::size_of::()), (data_ptr, data_len), ]; if let Some(res) = self.append_ptrs_locked(&mut offset, &ptrs) { @@ -655,6 +654,7 @@ pub mod tests { rand::{thread_rng, Rng}, solana_sdk::{ account::{accounts_equal, Account, AccountSharedData, WritableAccount}, + hash::Hash, timing::duration_as_ms, }, std::{mem::ManuallyDrop, time::Instant}, diff --git a/accounts-db/src/storable_accounts.rs b/accounts-db/src/storable_accounts.rs index 900b4b5ba2fd80..7e12063a05aeff 100644 --- a/accounts-db/src/storable_accounts.rs +++ b/accounts-db/src/storable_accounts.rs @@ -384,7 +384,7 @@ pub mod tests { let data = Vec::default(); let offset = 99; let stored_size = 101; - let hash = Hash::new_unique(); + let hash = AccountHash(Hash::new_unique()); let stored_account = StoredAccountMeta::AppendVec(AppendVecStoredAccountMeta { meta: &meta, account_meta: &account_meta, @@ -410,7 +410,7 @@ pub mod tests { for entries in 0..2 { for starting_slot in 0..max_slots { let data = Vec::default(); - let hash = Hash::new_unique(); + let hash = AccountHash(Hash::new_unique()); let mut raw = Vec::new(); let mut raw2 = Vec::new(); let mut raw4 = Vec::new(); @@ -564,7 +564,7 @@ pub mod tests { data: &data, offset, stored_size, - hash: &hashes[entry as usize].0, + hash: &hashes[entry as usize], })); } let raw2_refs = raw2.iter().collect::>(); From fb80288f885a62bcd923f4c9579fd0edeafaff9b Mon Sep 17 00:00:00 2001 From: ripatel-fd Date: Fri, 20 Oct 2023 01:14:01 +0200 Subject: [PATCH 386/407] zk-token-sdk: Fix incorrect mention of OsRng in docs (#33774) Co-authored-by: Richard Patel --- .../sigma_proofs/batched_grouped_ciphertext_validity_proof.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zk-token-sdk/src/sigma_proofs/batched_grouped_ciphertext_validity_proof.rs b/zk-token-sdk/src/sigma_proofs/batched_grouped_ciphertext_validity_proof.rs index 7247b3dfb7654b..59b7aceca20978 100644 --- a/zk-token-sdk/src/sigma_proofs/batched_grouped_ciphertext_validity_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/batched_grouped_ciphertext_validity_proof.rs @@ -45,6 +45,8 @@ impl BatchedGroupedCiphertext2HandlesValidityProof { /// /// The function simply batches the input openings and invokes the standard grouped ciphertext /// validity proof constructor. + /// + /// This function is randomized. It uses `OsRng` internally to generate random scalars. pub fn new>( (destination_pubkey, auditor_pubkey): (&ElGamalPubkey, &ElGamalPubkey), (amount_lo, amount_hi): (T, T), @@ -71,8 +73,6 @@ impl BatchedGroupedCiphertext2HandlesValidityProof { /// The function does *not* hash the public keys, commitment, or decryption handles into the /// transcript. For security, the caller (the main protocol) should hash these public /// components prior to invoking this constructor. - /// - /// This function is randomized. It uses `OsRng` internally to generate random scalars. pub fn verify( self, (destination_pubkey, auditor_pubkey): (&ElGamalPubkey, &ElGamalPubkey), From 37d093a30ef58bf4999f18a676ee2f86f70ac22e Mon Sep 17 00:00:00 2001 From: Joe C Date: Fri, 20 Oct 2023 07:33:21 +0200 Subject: [PATCH 387/407] program error: add `ArithmeticOverflow` (#33767) --- sdk/program/src/program_error.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/sdk/program/src/program_error.rs b/sdk/program/src/program_error.rs index 928a12dab84543..6eb7e9ecd71981 100644 --- a/sdk/program/src/program_error.rs +++ b/sdk/program/src/program_error.rs @@ -61,6 +61,8 @@ pub enum ProgramError { BuiltinProgramsMustConsumeComputeUnits, #[error("Invalid account owner")] InvalidAccountOwner, + #[error("Program arithmetic overflowed")] + ArithmeticOverflow, } pub trait PrintProgramError { @@ -110,6 +112,7 @@ impl PrintProgramError for ProgramError { msg!("Error: BuiltinProgramsMustConsumeComputeUnits") } Self::InvalidAccountOwner => msg!("Error: InvalidAccountOwner"), + Self::ArithmeticOverflow => msg!("Error: ArithmeticOverflow"), } } } @@ -145,6 +148,7 @@ pub const INVALID_ACCOUNT_DATA_REALLOC: u64 = to_builtin!(20); pub const MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED: u64 = to_builtin!(21); pub const BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS: u64 = to_builtin!(22); pub const INVALID_ACCOUNT_OWNER: u64 = to_builtin!(23); +pub const ARITHMETIC_OVERFLOW: u64 = to_builtin!(24); // Warning: Any new program errors added here must also be: // - Added to the below conversions // - Added as an equivalent to InstructionError @@ -182,6 +186,7 @@ impl From for u64 { BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS } ProgramError::InvalidAccountOwner => INVALID_ACCOUNT_OWNER, + ProgramError::ArithmeticOverflow => ARITHMETIC_OVERFLOW, ProgramError::Custom(error) => { if error == 0 { CUSTOM_ZERO @@ -221,6 +226,7 @@ impl From for ProgramError { Self::BuiltinProgramsMustConsumeComputeUnits } INVALID_ACCOUNT_OWNER => Self::InvalidAccountOwner, + ARITHMETIC_OVERFLOW => Self::ArithmeticOverflow, _ => Self::Custom(error as u32), } } @@ -260,6 +266,7 @@ impl TryFrom for ProgramError { Ok(Self::BuiltinProgramsMustConsumeComputeUnits) } Self::Error::InvalidAccountOwner => Ok(Self::InvalidAccountOwner), + Self::Error::ArithmeticOverflow => Ok(Self::ArithmeticOverflow), _ => Err(error), } } @@ -297,6 +304,7 @@ where Self::BuiltinProgramsMustConsumeComputeUnits } INVALID_ACCOUNT_OWNER => Self::InvalidAccountOwner, + ARITHMETIC_OVERFLOW => Self::ArithmeticOverflow, _ => { // A valid custom error has no bits set in the upper 32 if error >> BUILTIN_BIT_SHIFT == 0 { From 6b1e9b89749b4323459e51b4d6bc6b2b3092d5f7 Mon Sep 17 00:00:00 2001 From: Joe C Date: Fri, 20 Oct 2023 07:35:22 +0200 Subject: [PATCH 388/407] SDK: update error variants in `Feature::from_account_info` (#33750) --- sdk/program/src/feature.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sdk/program/src/feature.rs b/sdk/program/src/feature.rs index 25a3ec8a45b73e..b46704ebcb9992 100644 --- a/sdk/program/src/feature.rs +++ b/sdk/program/src/feature.rs @@ -30,9 +30,10 @@ impl Feature { pub fn from_account_info(account_info: &AccountInfo) -> Result { if *account_info.owner != id() { - return Err(ProgramError::InvalidArgument); + return Err(ProgramError::InvalidAccountOwner); } - bincode::deserialize(&account_info.data.borrow()).map_err(|_| ProgramError::InvalidArgument) + bincode::deserialize(&account_info.data.borrow()) + .map_err(|_| ProgramError::InvalidAccountData) } } From 6798e05b0a54b63fa43a258b4cc1207e0082904f Mon Sep 17 00:00:00 2001 From: ripatel-fd Date: Fri, 20 Oct 2023 10:26:13 +0200 Subject: [PATCH 389/407] Fix typo in feature_set.rs (#33777) Co-authored-by: Richard Patel --- sdk/src/feature_set.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index bc81d781a7c176..8682836c2ba247 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -756,8 +756,8 @@ lazy_static! { (zk_token_sdk_enabled::id(), "enable Zk Token proof program and syscalls"), (curve25519_syscall_enabled::id(), "enable curve25519 syscalls"), (versioned_tx_message_enabled::id(), "enable versioned transaction message processing"), - (libsecp256k1_fail_on_bad_count::id(), "fail libsec256k1_verify if count appears wrong"), - (libsecp256k1_fail_on_bad_count2::id(), "fail libsec256k1_verify if count appears wrong"), + (libsecp256k1_fail_on_bad_count::id(), "fail libsecp256k1_verify if count appears wrong"), + (libsecp256k1_fail_on_bad_count2::id(), "fail libsecp256k1_verify if count appears wrong"), (instructions_sysvar_owned_by_sysvar::id(), "fix owner for instructions sysvar"), (stake_program_advance_activating_credits_observed::id(), "Enable advancing credits observed for activation epoch #19309"), (credits_auto_rewind::id(), "Auto rewind stake's credits_observed if (accidental) vote recreation is detected #22546"), From 092c2132d65391a6b8b45d3d3aa5fdb189ea9d06 Mon Sep 17 00:00:00 2001 From: Nishit Mehta <80708599+nishitxmehta@users.noreply.github.com> Date: Fri, 20 Oct 2023 15:03:41 +0530 Subject: [PATCH 390/407] Fixed typing errors (#33778) --- CHANGELOG.md | 2 +- CONTRIBUTING.md | 2 +- SECURITY.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f329001e138635..1bc3ceb4dd42f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ All notable changes to this project will be documented in this file. Please follow the [guidance](#adding-to-this-changelog) at the bottom of this file when making changes The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) -and follows a [Backwards Compatability Policy](https://docs.solana.com/developing/backwards-compatibility) +and follows a [Backwards Compatibility Policy](https://docs.solana.com/developing/backwards-compatibility) Release channels have their own copy of this changelog: * [edge - v1.18](#edge-channel) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5894203afc291a..9204a7e57b63d8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -159,7 +159,7 @@ New feature gates should also always have a corresponding tracking issue and should be updated each time the feature is activated on a cluster. * "automerge": When a PR is labelled with "automerge", the PR will be -automically merged once CI passes. In general, this label should only +automatically merged once CI passes. In general, this label should only be used for small hot-fix (fewer than 100 lines) or automatic generated PRs. If you're uncertain, it's usually the case that the PR is not qualified as "automerge". diff --git a/SECURITY.md b/SECURITY.md index 905316c2dc3da4..02b37486a09197 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -114,7 +114,7 @@ $400,000 USD in locked SOL tokens (locked for 12 months) #### DoS Attacks: $100,000 USD in locked SOL tokens (locked for 12 months) -* Remote resource exaustion via Non-RPC protocols +* Remote resource exhaustion via Non-RPC protocols #### Supply Chain Attacks: $100,000 USD in locked SOL tokens (locked for 12 months) From e1a9f8ef1794df51afc02d1e074ab2f831e81461 Mon Sep 17 00:00:00 2001 From: Pierre Date: Fri, 20 Oct 2023 23:27:31 +1100 Subject: [PATCH 391/407] CLI remove unwrap_or_default() on rpc calls (#33782) --- cli/src/cluster_query.rs | 26 ++++++++++---------------- cli/src/feature.rs | 3 +-- 2 files changed, 11 insertions(+), 18 deletions(-) diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index 0470cf761ad95d..ee683081ed4790 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -1886,23 +1886,17 @@ pub fn process_show_validators( progress_bar.set_message("Fetching block production..."); let skip_rate: HashMap<_, _> = rpc_client - .get_block_production() - .ok() - .map(|result| { - result - .value - .by_identity - .into_iter() - .map(|(identity, (leader_slots, blocks_produced))| { - ( - identity, - 100. * (leader_slots.saturating_sub(blocks_produced)) as f64 - / leader_slots as f64, - ) - }) - .collect() + .get_block_production()? + .value + .by_identity + .into_iter() + .map(|(identity, (leader_slots, blocks_produced))| { + ( + identity, + 100. * (leader_slots.saturating_sub(blocks_produced)) as f64 / leader_slots as f64, + ) }) - .unwrap_or_default(); + .collect(); progress_bar.set_message("Fetching version information..."); let mut node_version = HashMap::new(); diff --git a/cli/src/feature.rs b/cli/src/feature.rs index d55f3dee88a7d0..708ea302b9ac27 100644 --- a/cli/src/feature.rs +++ b/cli/src/feature.rs @@ -854,8 +854,7 @@ fn process_status( let mut features = vec![]; for feature_ids in feature_ids.chunks(MAX_MULTIPLE_ACCOUNTS) { let mut feature_chunk = rpc_client - .get_multiple_accounts(feature_ids) - .unwrap_or_default() + .get_multiple_accounts(feature_ids)? .into_iter() .zip(feature_ids) .map(|(account, feature_id)| { From c98c24bd6d3b987cd94db8e90ce356d8f4010d42 Mon Sep 17 00:00:00 2001 From: steviez Date: Fri, 20 Oct 2023 15:53:45 +0200 Subject: [PATCH 392/407] =?UTF-8?q?Revert=20"Split=20compute=20budget=20in?= =?UTF-8?q?structions=20process=20from=20struct=20itself=20=E2=80=A6=20(#3?= =?UTF-8?q?3784)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Revert "Split compute budget instructions process from struct itself (#33513)" This reverts commit c73bebe9847ecd5a1cbffa96bf03e03a7683232f. This was found to be a consensus breaking change. --- accounts-db/src/accounts.rs | 52 +- cost-model/src/cost_model.rs | 69 +- program-runtime/src/compute_budget.rs | 648 +++++++++++++++++- .../src/compute_budget_processor.rs | 619 ----------------- program-runtime/src/invoke_context.rs | 11 +- program-runtime/src/lib.rs | 1 - programs/sbf/tests/programs.rs | 17 +- runtime/src/bank.rs | 67 +- runtime/src/bank/tests.rs | 34 +- runtime/src/transaction_priority_details.rs | 40 +- 10 files changed, 762 insertions(+), 796 deletions(-) delete mode 100644 program-runtime/src/compute_budget_processor.rs diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 4ff891fc8bd0a8..47b372d981843a 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -25,7 +25,7 @@ use { itertools::Itertools, log::*, solana_program_runtime::{ - compute_budget_processor::process_compute_budget_instructions, + compute_budget::{self, ComputeBudget}, loaded_programs::LoadedProgramsForTxBatch, }, solana_sdk::{ @@ -35,8 +35,9 @@ use { bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::{BankId, Slot}, feature_set::{ - self, include_loaded_accounts_data_size_in_fee_calculation, - remove_congestion_multiplier_from_fee_calculation, + self, add_set_tx_loaded_accounts_data_size_instruction, + include_loaded_accounts_data_size_in_fee_calculation, + remove_congestion_multiplier_from_fee_calculation, remove_deprecated_request_unit_ix, simplify_writable_program_account_check, FeatureSet, }, fee::FeeStructure, @@ -246,16 +247,15 @@ impl Accounts { feature_set: &FeatureSet, ) -> Result> { if feature_set.is_active(&feature_set::cap_transaction_accounts_data_size::id()) { - let compute_budget_limits = process_compute_budget_instructions( + let mut compute_budget = + ComputeBudget::new(compute_budget::MAX_COMPUTE_UNIT_LIMIT as u64); + let _process_transaction_result = compute_budget.process_instructions( tx.message().program_instructions_iter(), - feature_set, - ) - .unwrap_or_default(); + !feature_set.is_active(&remove_deprecated_request_unit_ix::id()), + feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), + ); // sanitize against setting size limit to zero - NonZeroUsize::new( - usize::try_from(compute_budget_limits.loaded_accounts_bytes).unwrap_or_default(), - ) - .map_or( + NonZeroUsize::new(compute_budget.loaded_accounts_data_size_limit).map_or( Err(TransactionError::InvalidLoadedAccountsDataSizeLimit), |v| Ok(Some(v)), ) @@ -722,7 +722,7 @@ impl Accounts { fee_structure.calculate_fee( tx.message(), lamports_per_signature, - &process_compute_budget_instructions(tx.message().program_instructions_iter(), feature_set).unwrap_or_default().into(), + &ComputeBudget::fee_budget_limits(tx.message().program_instructions_iter(), feature_set), feature_set.is_active(&remove_congestion_multiplier_from_fee_calculation::id()), feature_set.is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), ) @@ -1474,9 +1474,8 @@ mod tests { transaction_results::{DurableNonceFee, TransactionExecutionDetails}, }, assert_matches::assert_matches, - solana_program_runtime::{ - compute_budget_processor, - prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, + solana_program_runtime::prioritization_fee::{ + PrioritizationFeeDetails, PrioritizationFeeType, }, solana_sdk::{ account::{AccountSharedData, WritableAccount}, @@ -1752,15 +1751,13 @@ mod tests { ); let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&solana_sdk::feature_set::remove_deprecated_request_unit_ix::id()); + feature_set.deactivate(&remove_deprecated_request_unit_ix::id()); let message = SanitizedMessage::try_from(tx.message().clone()).unwrap(); let fee = FeeStructure::default().calculate_fee( &message, lamports_per_signature, - &process_compute_budget_instructions(message.program_instructions_iter(), &feature_set) - .unwrap_or_default() - .into(), + &ComputeBudget::fee_budget_limits(message.program_instructions_iter(), &feature_set), true, false, ); @@ -4256,11 +4253,7 @@ mod tests { let result_no_limit = Ok(None); let result_default_limit = Ok(Some( - NonZeroUsize::new( - usize::try_from(compute_budget_processor::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES) - .unwrap(), - ) - .unwrap(), + NonZeroUsize::new(compute_budget::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES).unwrap(), )); let result_requested_limit: Result> = Ok(Some(NonZeroUsize::new(99).unwrap())); @@ -4288,10 +4281,7 @@ mod tests { // if tx doesn't set limit, then default limit (64MiB) // if tx sets limit, then requested limit // if tx sets limit to zero, then TransactionError::InvalidLoadedAccountsDataSizeLimit - feature_set.activate( - &solana_sdk::feature_set::add_set_tx_loaded_accounts_data_size_instruction::id(), - 0, - ); + feature_set.activate(&add_set_tx_loaded_accounts_data_size_instruction::id(), 0); test(tx_not_set_limit, &feature_set, &result_default_limit); test(tx_set_limit_99, &feature_set, &result_requested_limit); test(tx_set_limit_0, &feature_set, &result_invalid_limit); @@ -4326,15 +4316,13 @@ mod tests { ); let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&solana_sdk::feature_set::remove_deprecated_request_unit_ix::id()); + feature_set.deactivate(&remove_deprecated_request_unit_ix::id()); let message = SanitizedMessage::try_from(tx.message().clone()).unwrap(); let fee = FeeStructure::default().calculate_fee( &message, lamports_per_signature, - &process_compute_budget_instructions(message.program_instructions_iter(), &feature_set) - .unwrap_or_default() - .into(), + &ComputeBudget::fee_budget_limits(message.program_instructions_iter(), &feature_set), true, false, ); diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index bb3e296d6dcbe0..0e8d6954202351 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -8,17 +8,17 @@ use { crate::{block_cost_limits::*, transaction_cost::*}, log::*, - solana_program_runtime::{ - compute_budget::DEFAULT_HEAP_COST, - compute_budget_processor::{ - process_compute_budget_instructions, ComputeBudgetLimits, - DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, MAX_COMPUTE_UNIT_LIMIT, - }, + solana_program_runtime::compute_budget::{ + ComputeBudget, DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, MAX_COMPUTE_UNIT_LIMIT, }, solana_sdk::{ borsh0_10::try_from_slice_unchecked, compute_budget::{self, ComputeBudgetInstruction}, - feature_set::{include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, + feature_set::{ + add_set_tx_loaded_accounts_data_size_instruction, + include_loaded_accounts_data_size_in_fee_calculation, + remove_deprecated_request_unit_ix, FeatureSet, + }, fee::FeeStructure, instruction::CompiledInstruction, program_utils::limited_deserialize, @@ -62,12 +62,10 @@ impl CostModel { // to set limit, `compute_budget.loaded_accounts_data_size_limit` is set to default // limit of 64MB; which will convert to (64M/32K)*8CU = 16_000 CUs // - pub fn calculate_loaded_accounts_data_size_cost( - compute_budget_limits: &ComputeBudgetLimits, - ) -> u64 { + pub fn calculate_loaded_accounts_data_size_cost(compute_budget: &ComputeBudget) -> u64 { FeeStructure::calculate_memory_usage_cost( - usize::try_from(compute_budget_limits.loaded_accounts_bytes).unwrap(), - DEFAULT_HEAP_COST, + compute_budget.loaded_accounts_data_size_limit, + compute_budget.heap_cost, ) } @@ -130,28 +128,32 @@ impl CostModel { } // calculate bpf cost based on compute budget instructions + let mut compute_budget = ComputeBudget::default(); + + let result = compute_budget.process_instructions( + transaction.message().program_instructions_iter(), + !feature_set.is_active(&remove_deprecated_request_unit_ix::id()), + feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), + ); // if failed to process compute_budget instructions, the transaction will not be executed // by `bank`, therefore it should be considered as no execution cost by cost model. - match process_compute_budget_instructions( - transaction.message().program_instructions_iter(), - feature_set, - ) { - Ok(compute_budget_limits) => { + match result { + Ok(_) => { // if tx contained user-space instructions and a more accurate estimate available correct it, // where "user-space instructions" must be specifically checked by // 'compute_unit_limit_is_set' flag, because compute_budget does not distinguish // builtin and bpf instructions when calculating default compute-unit-limit. (see // compute_budget.rs test `test_process_mixed_instructions_without_compute_budget`) if bpf_costs > 0 && compute_unit_limit_is_set { - bpf_costs = u64::from(compute_budget_limits.compute_unit_limit); + bpf_costs = compute_budget.compute_unit_limit } if feature_set .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()) { loaded_accounts_data_size_cost = - Self::calculate_loaded_accounts_data_size_cost(&compute_budget_limits); + Self::calculate_loaded_accounts_data_size_cost(&compute_budget); } } Err(_) => { @@ -543,8 +545,7 @@ mod tests { // default loaded_accounts_data_size_limit const DEFAULT_PAGE_COST: u64 = 8; let expected_loaded_accounts_data_size_cost = - solana_program_runtime::compute_budget_processor::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES - as u64 + solana_program_runtime::compute_budget::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES as u64 / ACCOUNT_DATA_COST_PAGE_SIZE * DEFAULT_PAGE_COST; @@ -662,36 +663,36 @@ mod tests { #[allow(clippy::field_reassign_with_default)] #[test] fn test_calculate_loaded_accounts_data_size_cost() { - let mut compute_budget_limits = ComputeBudgetLimits::default(); + let mut compute_budget = ComputeBudget::default(); // accounts data size are priced in block of 32K, ... // ... requesting less than 32K should still be charged as one block - compute_budget_limits.loaded_accounts_bytes = 31 * 1024; + compute_budget.loaded_accounts_data_size_limit = 31_usize * 1024; assert_eq!( - DEFAULT_HEAP_COST, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) + compute_budget.heap_cost, + CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget) ); // ... requesting exact 32K should be charged as one block - compute_budget_limits.loaded_accounts_bytes = 32 * 1024; + compute_budget.loaded_accounts_data_size_limit = 32_usize * 1024; assert_eq!( - DEFAULT_HEAP_COST, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) + compute_budget.heap_cost, + CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget) ); // ... requesting slightly above 32K should be charged as 2 block - compute_budget_limits.loaded_accounts_bytes = 33 * 1024; + compute_budget.loaded_accounts_data_size_limit = 33_usize * 1024; assert_eq!( - DEFAULT_HEAP_COST * 2, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) + compute_budget.heap_cost * 2, + CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget) ); // ... requesting exact 64K should be charged as 2 block - compute_budget_limits.loaded_accounts_bytes = 64 * 1024; + compute_budget.loaded_accounts_data_size_limit = 64_usize * 1024; assert_eq!( - DEFAULT_HEAP_COST * 2, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) + compute_budget.heap_cost * 2, + CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget) ); } diff --git a/program-runtime/src/compute_budget.rs b/program-runtime/src/compute_budget.rs index a568162c139c37..f9239224b488a0 100644 --- a/program-runtime/src/compute_budget.rs +++ b/program-runtime/src/compute_budget.rs @@ -1,11 +1,28 @@ use { - crate::compute_budget_processor::{self, process_compute_budget_instructions}, + crate::prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, solana_sdk::{ - feature_set::FeatureSet, instruction::CompiledInstruction, pubkey::Pubkey, - transaction::Result, + borsh0_10::try_from_slice_unchecked, + compute_budget::{self, ComputeBudgetInstruction}, + entrypoint::HEAP_LENGTH as MIN_HEAP_FRAME_BYTES, + feature_set::{ + add_set_tx_loaded_accounts_data_size_instruction, remove_deprecated_request_unit_ix, + FeatureSet, + }, + fee::FeeBudgetLimits, + instruction::{CompiledInstruction, InstructionError}, + pubkey::Pubkey, + transaction::TransactionError, }, }; +/// The total accounts data a transaction can load is limited to 64MiB to not break +/// anyone in Mainnet-beta today. It can be set by set_loaded_accounts_data_size_limit instruction +pub const MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES: usize = 64 * 1024 * 1024; + +pub const DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT: u32 = 200_000; +pub const MAX_COMPUTE_UNIT_LIMIT: u32 = 1_400_000; +const MAX_HEAP_FRAME_BYTES: u32 = 256 * 1024; + #[cfg(RUSTC_WITH_SPECIALIZATION)] impl ::solana_frozen_abi::abi_example::AbiExample for ComputeBudget { fn example() -> Self { @@ -14,10 +31,6 @@ impl ::solana_frozen_abi::abi_example::AbiExample for ComputeBudget { } } -/// Roughly 0.5us/page, where page is 32K; given roughly 15CU/us, the -/// default heap page cost = 0.5 * 15 ~= 8CU/page -pub const DEFAULT_HEAP_COST: u64 = 8; - #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct ComputeBudget { /// Number of compute units that a transaction or individual instruction is @@ -105,6 +118,9 @@ pub struct ComputeBudget { pub alt_bn128_pairing_one_pair_cost_other: u64, /// Big integer modular exponentiation cost pub big_modular_exponentiation_cost: u64, + /// Maximum accounts data size, in bytes, that a transaction is allowed to load; The + /// value is capped by MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES to prevent overuse of memory. + pub loaded_accounts_data_size_limit: usize, /// Coefficient `a` of the quadratic function which determines the number /// of compute units consumed to call poseidon syscall for a given number /// of inputs. @@ -127,7 +143,7 @@ pub struct ComputeBudget { impl Default for ComputeBudget { fn default() -> Self { - Self::new(compute_budget_processor::MAX_COMPUTE_UNIT_LIMIT as u64) + Self::new(MAX_COMPUTE_UNIT_LIMIT as u64) } } @@ -164,13 +180,14 @@ impl ComputeBudget { curve25519_ristretto_msm_base_cost: 2303, curve25519_ristretto_msm_incremental_cost: 788, heap_size: u32::try_from(solana_sdk::entrypoint::HEAP_LENGTH).unwrap(), - heap_cost: DEFAULT_HEAP_COST, + heap_cost: 8, mem_op_base_cost: 10, alt_bn128_addition_cost: 334, alt_bn128_multiplication_cost: 3_840, alt_bn128_pairing_one_pair_cost_first: 36_364, alt_bn128_pairing_one_pair_cost_other: 12_121, big_modular_exponentiation_cost: 33, + loaded_accounts_data_size_limit: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, poseidon_cost_coefficient_a: 61, poseidon_cost_coefficient_c: 542, get_remaining_compute_units_cost: 100, @@ -181,16 +198,127 @@ impl ComputeBudget { } } - pub fn try_from_instructions<'a>( + pub fn process_instructions<'a>( + &mut self, + instructions: impl Iterator, + support_request_units_deprecated: bool, + support_set_loaded_accounts_data_size_limit_ix: bool, + ) -> Result { + let mut num_non_compute_budget_instructions: u32 = 0; + let mut updated_compute_unit_limit = None; + let mut requested_heap_size = None; + let mut prioritization_fee = None; + let mut updated_loaded_accounts_data_size_limit = None; + + for (i, (program_id, instruction)) in instructions.enumerate() { + if compute_budget::check_id(program_id) { + let invalid_instruction_data_error = TransactionError::InstructionError( + i as u8, + InstructionError::InvalidInstructionData, + ); + let duplicate_instruction_error = TransactionError::DuplicateInstruction(i as u8); + + match try_from_slice_unchecked(&instruction.data) { + Ok(ComputeBudgetInstruction::RequestUnitsDeprecated { + units: compute_unit_limit, + additional_fee, + }) if support_request_units_deprecated => { + if updated_compute_unit_limit.is_some() { + return Err(duplicate_instruction_error); + } + if prioritization_fee.is_some() { + return Err(duplicate_instruction_error); + } + updated_compute_unit_limit = Some(compute_unit_limit); + prioritization_fee = + Some(PrioritizationFeeType::Deprecated(additional_fee as u64)); + } + Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { + if requested_heap_size.is_some() { + return Err(duplicate_instruction_error); + } + requested_heap_size = Some((bytes, i as u8)); + } + Ok(ComputeBudgetInstruction::SetComputeUnitLimit(compute_unit_limit)) => { + if updated_compute_unit_limit.is_some() { + return Err(duplicate_instruction_error); + } + updated_compute_unit_limit = Some(compute_unit_limit); + } + Ok(ComputeBudgetInstruction::SetComputeUnitPrice(micro_lamports)) => { + if prioritization_fee.is_some() { + return Err(duplicate_instruction_error); + } + prioritization_fee = + Some(PrioritizationFeeType::ComputeUnitPrice(micro_lamports)); + } + Ok(ComputeBudgetInstruction::SetLoadedAccountsDataSizeLimit(bytes)) + if support_set_loaded_accounts_data_size_limit_ix => + { + if updated_loaded_accounts_data_size_limit.is_some() { + return Err(duplicate_instruction_error); + } + updated_loaded_accounts_data_size_limit = Some(bytes as usize); + } + _ => return Err(invalid_instruction_data_error), + } + } else { + // only include non-request instructions in default max calc + num_non_compute_budget_instructions = + num_non_compute_budget_instructions.saturating_add(1); + } + } + + if let Some((bytes, i)) = requested_heap_size { + if bytes > MAX_HEAP_FRAME_BYTES + || bytes < MIN_HEAP_FRAME_BYTES as u32 + || bytes % 1024 != 0 + { + return Err(TransactionError::InstructionError( + i, + InstructionError::InvalidInstructionData, + )); + } + self.heap_size = bytes; + } + + let compute_unit_limit = updated_compute_unit_limit + .unwrap_or_else(|| { + num_non_compute_budget_instructions + .saturating_mul(DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT) + }) + .min(MAX_COMPUTE_UNIT_LIMIT); + self.compute_unit_limit = u64::from(compute_unit_limit); + + self.loaded_accounts_data_size_limit = updated_loaded_accounts_data_size_limit + .unwrap_or(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES) + .min(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES); + + Ok(prioritization_fee + .map(|fee_type| PrioritizationFeeDetails::new(fee_type, self.compute_unit_limit)) + .unwrap_or_default()) + } + + pub fn fee_budget_limits<'a>( instructions: impl Iterator, feature_set: &FeatureSet, - ) -> Result { - let compute_budget_limits = process_compute_budget_instructions(instructions, feature_set)?; - Ok(ComputeBudget { - compute_unit_limit: u64::from(compute_budget_limits.compute_unit_limit), - heap_size: compute_budget_limits.updated_heap_bytes, - ..ComputeBudget::default() - }) + ) -> FeeBudgetLimits { + let mut compute_budget = Self::default(); + + let prioritization_fee_details = compute_budget + .process_instructions( + instructions, + !feature_set.is_active(&remove_deprecated_request_unit_ix::id()), + feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), + ) + .unwrap_or_default(); + + FeeBudgetLimits { + loaded_accounts_data_size_limit: compute_budget.loaded_accounts_data_size_limit, + heap_cost: compute_budget.heap_cost, + compute_unit_limit: compute_budget.compute_unit_limit, + prioritization_fee: prioritization_fee_details.get_fee(), + } } /// Returns cost of the Poseidon hash function for the given number of @@ -222,3 +350,489 @@ impl ComputeBudget { Some(final_result) } } + +#[cfg(test)] +mod tests { + use { + super::*, + solana_sdk::{ + hash::Hash, + instruction::Instruction, + message::Message, + pubkey::Pubkey, + signature::Keypair, + signer::Signer, + system_instruction::{self}, + transaction::{SanitizedTransaction, Transaction}, + }, + }; + + macro_rules! test { + ( $instructions: expr, $expected_result: expr, $expected_budget: expr, $support_set_loaded_accounts_data_size_limit_ix: expr ) => { + let payer_keypair = Keypair::new(); + let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( + &[&payer_keypair], + Message::new($instructions, Some(&payer_keypair.pubkey())), + Hash::default(), + )); + let mut compute_budget = ComputeBudget::default(); + let result = compute_budget.process_instructions( + tx.message().program_instructions_iter(), + false, /*not support request_units_deprecated*/ + $support_set_loaded_accounts_data_size_limit_ix, + ); + assert_eq!($expected_result, result); + assert_eq!(compute_budget, $expected_budget); + }; + ( $instructions: expr, $expected_result: expr, $expected_budget: expr) => { + test!($instructions, $expected_result, $expected_budget, false); + }; + } + + #[test] + fn test_process_instructions() { + // Units + test!( + &[], + Ok(PrioritizationFeeDetails::default()), + ComputeBudget { + compute_unit_limit: 0, + ..ComputeBudget::default() + } + ); + test!( + &[ + ComputeBudgetInstruction::set_compute_unit_limit(1), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Ok(PrioritizationFeeDetails::default()), + ComputeBudget { + compute_unit_limit: 1, + ..ComputeBudget::default() + } + ); + test!( + &[ + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT + 1), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Ok(PrioritizationFeeDetails::default()), + ComputeBudget { + compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT as u64, + ..ComputeBudget::default() + } + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), + ], + Ok(PrioritizationFeeDetails::default()), + ComputeBudget { + compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT as u64, + ..ComputeBudget::default() + } + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_limit(1), + ], + Ok(PrioritizationFeeDetails::default()), + ComputeBudget { + compute_unit_limit: 1, + ..ComputeBudget::default() + } + ); + + test!( + &[ + ComputeBudgetInstruction::set_compute_unit_limit(1), + ComputeBudgetInstruction::set_compute_unit_price(42) + ], + Ok(PrioritizationFeeDetails::new( + PrioritizationFeeType::ComputeUnitPrice(42), + 1 + )), + ComputeBudget { + compute_unit_limit: 1, + ..ComputeBudget::default() + } + ); + + // HeapFrame + test!( + &[], + Ok(PrioritizationFeeDetails::default()), + ComputeBudget { + compute_unit_limit: 0, + ..ComputeBudget::default() + } + ); + test!( + &[ + ComputeBudgetInstruction::request_heap_frame(40 * 1024), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Ok(PrioritizationFeeDetails::default()), + ComputeBudget { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, + heap_size: 40 * 1024, + ..ComputeBudget::default() + } + ); + test!( + &[ + ComputeBudgetInstruction::request_heap_frame(40 * 1024 + 1), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )), + ComputeBudget::default() + ); + test!( + &[ + ComputeBudgetInstruction::request_heap_frame(31 * 1024), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )), + ComputeBudget::default() + ); + test!( + &[ + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES + 1), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )), + ComputeBudget::default() + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), + ], + Ok(PrioritizationFeeDetails::default()), + ComputeBudget { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, + heap_size: MAX_HEAP_FRAME_BYTES, + ..ComputeBudget::default() + } + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::request_heap_frame(1), + ], + Err(TransactionError::InstructionError( + 3, + InstructionError::InvalidInstructionData, + )), + ComputeBudget::default() + ); + + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Ok(PrioritizationFeeDetails::default()), + ComputeBudget { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64 * 7, + ..ComputeBudget::default() + } + ); + + // Combined + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), + ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), + ], + Ok(PrioritizationFeeDetails::new( + PrioritizationFeeType::ComputeUnitPrice(u64::MAX), + MAX_COMPUTE_UNIT_LIMIT as u64, + )), + ComputeBudget { + compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT as u64, + heap_size: MAX_HEAP_FRAME_BYTES, + ..ComputeBudget::default() + } + ); + + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_limit(1), + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), + ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), + ], + Ok(PrioritizationFeeDetails::new( + PrioritizationFeeType::ComputeUnitPrice(u64::MAX), + 1 + )), + ComputeBudget { + compute_unit_limit: 1, + heap_size: MAX_HEAP_FRAME_BYTES, + ..ComputeBudget::default() + } + ); + + // Duplicates + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT - 1), + ], + Err(TransactionError::DuplicateInstruction(2)), + ComputeBudget::default() + ); + + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::request_heap_frame(MIN_HEAP_FRAME_BYTES as u32), + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), + ], + Err(TransactionError::DuplicateInstruction(2)), + ComputeBudget::default() + ); + + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_price(0), + ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), + ], + Err(TransactionError::DuplicateInstruction(2)), + ComputeBudget::default() + ); + + // deprecated + test!( + &[Instruction::new_with_borsh( + compute_budget::id(), + &compute_budget::ComputeBudgetInstruction::RequestUnitsDeprecated { + units: 1_000, + additional_fee: 10 + }, + vec![] + )], + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )), + ComputeBudget::default() + ); + } + + #[test] + fn test_process_loaded_accounts_data_size_limit_instruction() { + // Assert for empty instructions, change value of support_set_loaded_accounts_data_size_limit_ix + // will not change results, which should all be default + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + test!( + &[], + Ok(PrioritizationFeeDetails::default()), + ComputeBudget { + compute_unit_limit: 0, + ..ComputeBudget::default() + }, + support_set_loaded_accounts_data_size_limit_ix + ); + } + + // Assert when set_loaded_accounts_data_size_limit presents, + // if support_set_loaded_accounts_data_size_limit_ix then + // budget is set with data_size + // else + // return InstructionError + let data_size: usize = 1; + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + let (expected_result, expected_budget) = + if support_set_loaded_accounts_data_size_limit_ix { + ( + Ok(PrioritizationFeeDetails::default()), + ComputeBudget { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, + loaded_accounts_data_size_limit: data_size, + ..ComputeBudget::default() + }, + ) + } else { + ( + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )), + ComputeBudget::default(), + ) + }; + + test!( + &[ + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size as u32), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + expected_result, + expected_budget, + support_set_loaded_accounts_data_size_limit_ix + ); + } + + // Assert when set_loaded_accounts_data_size_limit presents, with greater than max value + // if support_set_loaded_accounts_data_size_limit_ix then + // budget is set to max data size + // else + // return InstructionError + let data_size: usize = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES + 1; + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + let (expected_result, expected_budget) = + if support_set_loaded_accounts_data_size_limit_ix { + ( + Ok(PrioritizationFeeDetails::default()), + ComputeBudget { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, + loaded_accounts_data_size_limit: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, + ..ComputeBudget::default() + }, + ) + } else { + ( + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )), + ComputeBudget::default(), + ) + }; + + test!( + &[ + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size as u32), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + expected_result, + expected_budget, + support_set_loaded_accounts_data_size_limit_ix + ); + } + + // Assert when set_loaded_accounts_data_size_limit is not presented + // if support_set_loaded_accounts_data_size_limit_ix then + // budget is set to default data size + // else + // return + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + let (expected_result, expected_budget) = ( + Ok(PrioritizationFeeDetails::default()), + ComputeBudget { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, + loaded_accounts_data_size_limit: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, + ..ComputeBudget::default() + }, + ); + + test!( + &[Instruction::new_with_bincode( + Pubkey::new_unique(), + &0_u8, + vec![] + ),], + expected_result, + expected_budget, + support_set_loaded_accounts_data_size_limit_ix + ); + } + + // Assert when set_loaded_accounts_data_size_limit presents more than once, + // if support_set_loaded_accounts_data_size_limit_ix then + // return DuplicateInstruction + // else + // return InstructionError + let data_size: usize = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES; + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + let (expected_result, expected_budget) = + if support_set_loaded_accounts_data_size_limit_ix { + ( + Err(TransactionError::DuplicateInstruction(2)), + ComputeBudget::default(), + ) + } else { + ( + Err(TransactionError::InstructionError( + 1, + InstructionError::InvalidInstructionData, + )), + ComputeBudget::default(), + ) + }; + + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size as u32), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size as u32), + ], + expected_result, + expected_budget, + support_set_loaded_accounts_data_size_limit_ix + ); + } + } + + #[test] + fn test_process_mixed_instructions_without_compute_budget() { + let payer_keypair = Keypair::new(); + + let transaction = + SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + system_instruction::transfer(&payer_keypair.pubkey(), &Pubkey::new_unique(), 2), + ], + Some(&payer_keypair.pubkey()), + &[&payer_keypair], + Hash::default(), + )); + + let mut compute_budget = ComputeBudget::default(); + let result = compute_budget.process_instructions( + transaction.message().program_instructions_iter(), + false, //not support request_units_deprecated + true, //support_set_loaded_accounts_data_size_limit_ix, + ); + + // assert process_instructions will be successful with default, + assert_eq!(Ok(PrioritizationFeeDetails::default()), result); + // assert the default compute_unit_limit is 2 times default: one for bpf ix, one for + // builtin ix. + assert_eq!( + compute_budget, + ComputeBudget { + compute_unit_limit: 2 * DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, + ..ComputeBudget::default() + } + ); + } +} diff --git a/program-runtime/src/compute_budget_processor.rs b/program-runtime/src/compute_budget_processor.rs deleted file mode 100644 index be5e642fadcb5d..00000000000000 --- a/program-runtime/src/compute_budget_processor.rs +++ /dev/null @@ -1,619 +0,0 @@ -//! Process compute_budget instructions to extract and sanitize limits. -use { - crate::{ - compute_budget::DEFAULT_HEAP_COST, - prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, - }, - solana_sdk::{ - borsh0_10::try_from_slice_unchecked, - compute_budget::{self, ComputeBudgetInstruction}, - entrypoint::HEAP_LENGTH as MIN_HEAP_FRAME_BYTES, - feature_set::{ - add_set_tx_loaded_accounts_data_size_instruction, remove_deprecated_request_unit_ix, - FeatureSet, - }, - fee::FeeBudgetLimits, - instruction::{CompiledInstruction, InstructionError}, - pubkey::Pubkey, - transaction::TransactionError, - }, -}; - -const MAX_HEAP_FRAME_BYTES: u32 = 256 * 1024; -pub const DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT: u32 = 200_000; -pub const MAX_COMPUTE_UNIT_LIMIT: u32 = 1_400_000; - -/// The total accounts data a transaction can load is limited to 64MiB to not break -/// anyone in Mainnet-beta today. It can be set by set_loaded_accounts_data_size_limit instruction -pub const MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES: u32 = 64 * 1024 * 1024; - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ComputeBudgetLimits { - pub updated_heap_bytes: u32, - pub compute_unit_limit: u32, - pub compute_unit_price: u64, - pub loaded_accounts_bytes: u32, -} - -impl Default for ComputeBudgetLimits { - fn default() -> Self { - ComputeBudgetLimits { - updated_heap_bytes: u32::try_from(MIN_HEAP_FRAME_BYTES).unwrap(), - compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, - compute_unit_price: 0, - loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, - } - } -} - -impl From for FeeBudgetLimits { - fn from(val: ComputeBudgetLimits) -> Self { - let prioritization_fee_details = PrioritizationFeeDetails::new( - PrioritizationFeeType::ComputeUnitPrice(val.compute_unit_price), - u64::from(val.compute_unit_limit), - ); - FeeBudgetLimits { - // NOTE - usize::from(u32).unwrap() may fail if target is 16-bit and - // `loaded_accounts_bytes` is greater than u16::MAX. In that case, panic is proper. - loaded_accounts_data_size_limit: usize::try_from(val.loaded_accounts_bytes).unwrap(), - heap_cost: DEFAULT_HEAP_COST, - compute_unit_limit: u64::from(val.compute_unit_limit), - prioritization_fee: prioritization_fee_details.get_fee(), - } - } -} - -/// Processing compute_budget could be part of tx sanitizing, failed to process -/// these instructions will drop the transaction eventually without execution, -/// may as well fail it early. -/// If succeeded, the transaction's specific limits/requests (could be default) -/// are retrieved and returned, -pub fn process_compute_budget_instructions<'a>( - instructions: impl Iterator, - feature_set: &FeatureSet, -) -> Result { - let support_request_units_deprecated = - !feature_set.is_active(&remove_deprecated_request_unit_ix::id()); - let support_set_loaded_accounts_data_size_limit_ix = - feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()); - - let mut num_non_compute_budget_instructions: u32 = 0; - let mut updated_compute_unit_limit = None; - let mut updated_compute_unit_price = None; - let mut requested_heap_size = None; - let mut updated_loaded_accounts_data_size_limit = None; - - for (i, (program_id, instruction)) in instructions.enumerate() { - if compute_budget::check_id(program_id) { - let invalid_instruction_data_error = TransactionError::InstructionError( - i as u8, - InstructionError::InvalidInstructionData, - ); - let duplicate_instruction_error = TransactionError::DuplicateInstruction(i as u8); - - match try_from_slice_unchecked(&instruction.data) { - Ok(ComputeBudgetInstruction::RequestUnitsDeprecated { - units: compute_unit_limit, - additional_fee, - }) if support_request_units_deprecated => { - if updated_compute_unit_limit.is_some() { - return Err(duplicate_instruction_error); - } - if updated_compute_unit_price.is_some() { - return Err(duplicate_instruction_error); - } - updated_compute_unit_limit = Some(compute_unit_limit); - updated_compute_unit_price = - support_deprecated_requested_units(additional_fee, compute_unit_limit); - } - Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { - if requested_heap_size.is_some() { - return Err(duplicate_instruction_error); - } - if sanitize_requested_heap_size(bytes) { - requested_heap_size = Some(bytes); - } else { - return Err(invalid_instruction_data_error); - } - } - Ok(ComputeBudgetInstruction::SetComputeUnitLimit(compute_unit_limit)) => { - if updated_compute_unit_limit.is_some() { - return Err(duplicate_instruction_error); - } - updated_compute_unit_limit = Some(compute_unit_limit); - } - Ok(ComputeBudgetInstruction::SetComputeUnitPrice(micro_lamports)) => { - if updated_compute_unit_price.is_some() { - return Err(duplicate_instruction_error); - } - updated_compute_unit_price = Some(micro_lamports); - } - Ok(ComputeBudgetInstruction::SetLoadedAccountsDataSizeLimit(bytes)) - if support_set_loaded_accounts_data_size_limit_ix => - { - if updated_loaded_accounts_data_size_limit.is_some() { - return Err(duplicate_instruction_error); - } - updated_loaded_accounts_data_size_limit = Some(bytes); - } - _ => return Err(invalid_instruction_data_error), - } - } else { - // only include non-request instructions in default max calc - num_non_compute_budget_instructions = - num_non_compute_budget_instructions.saturating_add(1); - } - } - - // sanitize limits - let updated_heap_bytes = requested_heap_size - .unwrap_or(u32::try_from(MIN_HEAP_FRAME_BYTES).unwrap()) // loader's default heap_size - .min(MAX_HEAP_FRAME_BYTES); - - let compute_unit_limit = updated_compute_unit_limit - .unwrap_or_else(|| { - num_non_compute_budget_instructions - .saturating_mul(DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT) - }) - .min(MAX_COMPUTE_UNIT_LIMIT); - - let compute_unit_price = updated_compute_unit_price.unwrap_or(0); - - let loaded_accounts_bytes = updated_loaded_accounts_data_size_limit - .unwrap_or(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES) - .min(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES); - - Ok(ComputeBudgetLimits { - updated_heap_bytes, - compute_unit_limit, - compute_unit_price, - loaded_accounts_bytes, - }) -} - -fn sanitize_requested_heap_size(bytes: u32) -> bool { - (u32::try_from(MIN_HEAP_FRAME_BYTES).unwrap()..=MAX_HEAP_FRAME_BYTES).contains(&bytes) - && bytes % 1024 == 0 -} - -// Supports request_units_derpecated ix, returns cu_price if available. -fn support_deprecated_requested_units(additional_fee: u32, compute_unit_limit: u32) -> Option { - // TODO: remove support of 'Deprecated' after feature remove_deprecated_request_unit_ix::id() is activated - const MICRO_LAMPORTS_PER_LAMPORT: u64 = 1_000_000; - - let micro_lamport_fee = - (additional_fee as u128).saturating_mul(MICRO_LAMPORTS_PER_LAMPORT as u128); - micro_lamport_fee - .checked_div(compute_unit_limit as u128) - .map(|cu_price| u64::try_from(cu_price).unwrap_or(u64::MAX)) -} - -#[cfg(test)] -mod tests { - use { - super::*, - solana_sdk::{ - hash::Hash, - instruction::Instruction, - message::Message, - pubkey::Pubkey, - signature::Keypair, - signer::Signer, - system_instruction::{self}, - transaction::{SanitizedTransaction, Transaction}, - }, - }; - - macro_rules! test { - ( $instructions: expr, $expected_result: expr, $support_set_loaded_accounts_data_size_limit_ix: expr ) => { - let payer_keypair = Keypair::new(); - let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( - &[&payer_keypair], - Message::new($instructions, Some(&payer_keypair.pubkey())), - Hash::default(), - )); - let mut feature_set = FeatureSet::default(); - feature_set.activate(&remove_deprecated_request_unit_ix::id(), 0); - if $support_set_loaded_accounts_data_size_limit_ix { - feature_set.activate(&add_set_tx_loaded_accounts_data_size_instruction::id(), 0); - } - let result = process_compute_budget_instructions( - tx.message().program_instructions_iter(), - &feature_set, - ); - assert_eq!($expected_result, result); - }; - ( $instructions: expr, $expected_result: expr ) => { - test!($instructions, $expected_result, false); - }; - } - - #[test] - fn test_process_instructions() { - // Units - test!( - &[], - Ok(ComputeBudgetLimits { - compute_unit_limit: 0, - ..ComputeBudgetLimits::default() - }) - ); - test!( - &[ - ComputeBudgetInstruction::set_compute_unit_limit(1), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Ok(ComputeBudgetLimits { - compute_unit_limit: 1, - ..ComputeBudgetLimits::default() - }) - ); - test!( - &[ - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT + 1), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Ok(ComputeBudgetLimits { - compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, - ..ComputeBudgetLimits::default() - }) - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), - ], - Ok(ComputeBudgetLimits { - compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, - ..ComputeBudgetLimits::default() - }) - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(1), - ], - Ok(ComputeBudgetLimits { - compute_unit_limit: 1, - ..ComputeBudgetLimits::default() - }) - ); - test!( - &[ - ComputeBudgetInstruction::set_compute_unit_limit(1), - ComputeBudgetInstruction::set_compute_unit_price(42) - ], - Ok(ComputeBudgetLimits { - compute_unit_limit: 1, - compute_unit_price: 42, - ..ComputeBudgetLimits::default() - }) - ); - - // HeapFrame - test!( - &[], - Ok(ComputeBudgetLimits { - compute_unit_limit: 0, - ..ComputeBudgetLimits::default() - }) - ); - test!( - &[ - ComputeBudgetInstruction::request_heap_frame(40 * 1024), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Ok(ComputeBudgetLimits { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, - updated_heap_bytes: 40 * 1024, - ..ComputeBudgetLimits::default() - }) - ); - test!( - &[ - ComputeBudgetInstruction::request_heap_frame(40 * 1024 + 1), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )) - ); - test!( - &[ - ComputeBudgetInstruction::request_heap_frame(31 * 1024), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )) - ); - test!( - &[ - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES + 1), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )) - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ], - Ok(ComputeBudgetLimits { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, - updated_heap_bytes: MAX_HEAP_FRAME_BYTES, - ..ComputeBudgetLimits::default() - }) - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(1), - ], - Err(TransactionError::InstructionError( - 3, - InstructionError::InvalidInstructionData, - )) - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Ok(ComputeBudgetLimits { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT * 7, - ..ComputeBudgetLimits::default() - }) - ); - - // Combined - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), - ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), - ], - Ok(ComputeBudgetLimits { - compute_unit_price: u64::MAX, - compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, - updated_heap_bytes: MAX_HEAP_FRAME_BYTES, - ..ComputeBudgetLimits::default() - }) - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(1), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), - ], - Ok(ComputeBudgetLimits { - compute_unit_price: u64::MAX, - compute_unit_limit: 1, - updated_heap_bytes: MAX_HEAP_FRAME_BYTES, - ..ComputeBudgetLimits::default() - }) - ); - - // Duplicates - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT - 1), - ], - Err(TransactionError::DuplicateInstruction(2)) - ); - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(MIN_HEAP_FRAME_BYTES as u32), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ], - Err(TransactionError::DuplicateInstruction(2)) - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_price(0), - ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), - ], - Err(TransactionError::DuplicateInstruction(2)) - ); - - // deprecated - test!( - &[Instruction::new_with_borsh( - compute_budget::id(), - &compute_budget::ComputeBudgetInstruction::RequestUnitsDeprecated { - units: 1_000, - additional_fee: 10 - }, - vec![] - )], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )) - ); - } - - #[test] - fn test_process_loaded_accounts_data_size_limit_instruction() { - // Assert for empty instructions, change value of support_set_loaded_accounts_data_size_limit_ix - // will not change results, which should all be default - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - test!( - &[], - Ok(ComputeBudgetLimits { - compute_unit_limit: 0, - ..ComputeBudgetLimits::default() - }), - support_set_loaded_accounts_data_size_limit_ix - ); - } - - // Assert when set_loaded_accounts_data_size_limit presents, - // if support_set_loaded_accounts_data_size_limit_ix then - // budget is set with data_size - // else - // return InstructionError - let data_size = 1; - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let expected_result = if support_set_loaded_accounts_data_size_limit_ix { - Ok(ComputeBudgetLimits { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, - loaded_accounts_bytes: data_size, - ..ComputeBudgetLimits::default() - }) - } else { - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )) - }; - - test!( - &[ - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - expected_result, - support_set_loaded_accounts_data_size_limit_ix - ); - } - - // Assert when set_loaded_accounts_data_size_limit presents, with greater than max value - // if support_set_loaded_accounts_data_size_limit_ix then - // budget is set to max data size - // else - // return InstructionError - let data_size = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES + 1; - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let expected_result = if support_set_loaded_accounts_data_size_limit_ix { - Ok(ComputeBudgetLimits { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, - loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, - ..ComputeBudgetLimits::default() - }) - } else { - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )) - }; - - test!( - &[ - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - expected_result, - support_set_loaded_accounts_data_size_limit_ix - ); - } - - // Assert when set_loaded_accounts_data_size_limit is not presented - // if support_set_loaded_accounts_data_size_limit_ix then - // budget is set to default data size - // else - // return - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let expected_result = Ok(ComputeBudgetLimits { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, - loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, - ..ComputeBudgetLimits::default() - }); - - test!( - &[Instruction::new_with_bincode( - Pubkey::new_unique(), - &0_u8, - vec![] - ),], - expected_result, - support_set_loaded_accounts_data_size_limit_ix - ); - } - - // Assert when set_loaded_accounts_data_size_limit presents more than once, - // if support_set_loaded_accounts_data_size_limit_ix then - // return DuplicateInstruction - // else - // return InstructionError - let data_size = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES; - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let expected_result = if support_set_loaded_accounts_data_size_limit_ix { - Err(TransactionError::DuplicateInstruction(2)) - } else { - Err(TransactionError::InstructionError( - 1, - InstructionError::InvalidInstructionData, - )) - }; - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), - ], - expected_result, - support_set_loaded_accounts_data_size_limit_ix - ); - } - } - - #[test] - fn test_process_mixed_instructions_without_compute_budget() { - let payer_keypair = Keypair::new(); - - let transaction = - SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - system_instruction::transfer(&payer_keypair.pubkey(), &Pubkey::new_unique(), 2), - ], - Some(&payer_keypair.pubkey()), - &[&payer_keypair], - Hash::default(), - )); - - let mut feature_set = FeatureSet::default(); - feature_set.activate(&remove_deprecated_request_unit_ix::id(), 0); - feature_set.activate(&add_set_tx_loaded_accounts_data_size_instruction::id(), 0); - - let result = process_compute_budget_instructions( - transaction.message().program_instructions_iter(), - &feature_set, - ); - - // assert process_instructions will be successful with default, - // and the default compute_unit_limit is 2 times default: one for bpf ix, one for - // builtin ix. - assert_eq!( - result, - Ok(ComputeBudgetLimits { - compute_unit_limit: 2 * DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, - ..ComputeBudgetLimits::default() - }) - ); - } -} diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 566a98dab9be69..9fbe42d8d40c07 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -756,7 +756,7 @@ pub fn mock_process_instruction TransactionExecutionResult::NotExecuted(e.clone()), (Ok(loaded_transaction), nonce) => { - let compute_budget = - if let Some(compute_budget) = self.runtime_config.compute_budget { - compute_budget - } else { - let mut compute_budget_process_transaction_time = - Measure::start("compute_budget_process_transaction_time"); - let maybe_compute_budget = ComputeBudget::try_from_instructions( - tx.message().program_instructions_iter(), - &self.feature_set, - ); - compute_budget_process_transaction_time.stop(); - saturating_add_assign!( - timings - .execute_accessories - .compute_budget_process_transaction_us, - compute_budget_process_transaction_time.as_us() - ); - if let Err(err) = maybe_compute_budget { - return TransactionExecutionResult::NotExecuted(err); - } - maybe_compute_budget.unwrap() - }; + let compute_budget = if let Some(compute_budget) = + self.runtime_config.compute_budget + { + compute_budget + } else { + let mut compute_budget = + ComputeBudget::new(compute_budget::MAX_COMPUTE_UNIT_LIMIT as u64); + + let mut compute_budget_process_transaction_time = + Measure::start("compute_budget_process_transaction_time"); + let process_transaction_result = compute_budget.process_instructions( + tx.message().program_instructions_iter(), + !self + .feature_set + .is_active(&remove_deprecated_request_unit_ix::id()), + self.feature_set + .is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), + ); + compute_budget_process_transaction_time.stop(); + saturating_add_assign!( + timings + .execute_accessories + .compute_budget_process_transaction_us, + compute_budget_process_transaction_time.as_us() + ); + if let Err(err) = process_transaction_result { + return TransactionExecutionResult::NotExecuted(err); + } + compute_budget + }; let result = self.execute_loaded_transaction( tx, diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index cd1e227591a520..82393ef7161a2b 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -46,8 +46,7 @@ use { }, solana_logger, solana_program_runtime::{ - compute_budget::ComputeBudget, - compute_budget_processor::{self, MAX_COMPUTE_UNIT_LIMIT}, + compute_budget::{self, ComputeBudget, MAX_COMPUTE_UNIT_LIMIT}, declare_process_instruction, invoke_context::mock_process_instruction, loaded_programs::{LoadedProgram, LoadedProgramType, DELAY_VISIBILITY_SLOT_OFFSET}, @@ -10121,9 +10120,7 @@ fn test_compute_budget_program_noop() { assert_eq!( *compute_budget, ComputeBudget { - compute_unit_limit: u64::from( - compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - ), + compute_unit_limit: compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, heap_size: 48 * 1024, ..ComputeBudget::default() } @@ -10136,7 +10133,7 @@ fn test_compute_budget_program_noop() { let message = Message::new( &[ ComputeBudgetInstruction::set_compute_unit_limit( - compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, ), ComputeBudgetInstruction::request_heap_frame(48 * 1024), Instruction::new_with_bincode(program_id, &0, vec![]), @@ -10166,9 +10163,7 @@ fn test_compute_request_instruction() { assert_eq!( *compute_budget, ComputeBudget { - compute_unit_limit: u64::from( - compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - ), + compute_unit_limit: compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, heap_size: 48 * 1024, ..ComputeBudget::default() } @@ -10181,7 +10176,7 @@ fn test_compute_request_instruction() { let message = Message::new( &[ ComputeBudgetInstruction::set_compute_unit_limit( - compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, ), ComputeBudgetInstruction::request_heap_frame(48 * 1024), Instruction::new_with_bincode(program_id, &0, vec![]), @@ -10218,9 +10213,7 @@ fn test_failed_compute_request_instruction() { assert_eq!( *compute_budget, ComputeBudget { - compute_unit_limit: u64::from( - compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - ), + compute_unit_limit: compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, heap_size: 48 * 1024, ..ComputeBudget::default() } @@ -10451,19 +10444,14 @@ fn calculate_test_fee( remove_congestion_multiplier: bool, ) -> u64 { let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&solana_sdk::feature_set::remove_deprecated_request_unit_ix::id()); + feature_set.deactivate(&remove_deprecated_request_unit_ix::id()); if !support_set_accounts_data_size_limit_ix { - feature_set.deactivate( - &solana_sdk::feature_set::include_loaded_accounts_data_size_in_fee_calculation::id(), - ); + feature_set.deactivate(&include_loaded_accounts_data_size_in_fee_calculation::id()); } let budget_limits = - process_compute_budget_instructions(message.program_instructions_iter(), &feature_set) - .unwrap_or_default() - .into(); - + ComputeBudget::fee_budget_limits(message.program_instructions_iter(), &feature_set); fee_structure.calculate_fee( message, lamports_per_signature, @@ -11490,9 +11478,7 @@ fn test_rent_state_list_len() { ); let compute_budget = bank.runtime_config.compute_budget.unwrap_or_else(|| { - ComputeBudget::new(u64::from( - compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, - )) + ComputeBudget::new(compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64) }); let transaction_context = TransactionContext::new( loaded_txs[0].0.as_ref().unwrap().accounts.clone(), diff --git a/runtime/src/transaction_priority_details.rs b/runtime/src/transaction_priority_details.rs index 401f3e87893887..0d0a94df4ed393 100644 --- a/runtime/src/transaction_priority_details.rs +++ b/runtime/src/transaction_priority_details.rs @@ -1,7 +1,6 @@ use { - solana_program_runtime::compute_budget_processor::process_compute_budget_instructions, + solana_program_runtime::compute_budget::ComputeBudget, solana_sdk::{ - feature_set::FeatureSet, instruction::CompiledInstruction, pubkey::Pubkey, transaction::{SanitizedTransaction, SanitizedVersionedTransaction}, @@ -24,17 +23,18 @@ pub trait GetTransactionPriorityDetails { instructions: impl Iterator, _round_compute_unit_price_enabled: bool, ) -> Option { - let mut feature_set = FeatureSet::default(); - feature_set.activate( - &solana_sdk::feature_set::add_set_tx_loaded_accounts_data_size_instruction::id(), - 0, - ); - - let compute_budget_limits = - process_compute_budget_instructions(instructions, &feature_set).ok()?; + let mut compute_budget = ComputeBudget::default(); + let prioritization_fee_details = compute_budget + .process_instructions( + instructions, + true, // supports prioritization by request_units_deprecated instruction + true, // enable support set accounts data size instruction + // TODO: round_compute_unit_price_enabled: bool + ) + .ok()?; Some(TransactionPriorityDetails { - priority: compute_budget_limits.compute_unit_price, - compute_unit_limit: u64::from(compute_budget_limits.compute_unit_limit), + priority: prioritization_fee_details.get_priority(), + compute_unit_limit: compute_budget.compute_unit_limit, }) } } @@ -98,8 +98,8 @@ mod tests { Some(TransactionPriorityDetails { priority: 0, compute_unit_limit: - solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64, + solana_program_runtime::compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + as u64 }) ); @@ -111,8 +111,8 @@ mod tests { Some(TransactionPriorityDetails { priority: 0, compute_unit_limit: - solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64, + solana_program_runtime::compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + as u64 }) ); } @@ -174,8 +174,8 @@ mod tests { Some(TransactionPriorityDetails { priority: requested_price, compute_unit_limit: - solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64, + solana_program_runtime::compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + as u64 }) ); @@ -187,8 +187,8 @@ mod tests { Some(TransactionPriorityDetails { priority: requested_price, compute_unit_limit: - solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64, + solana_program_runtime::compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + as u64 }) ); } From 59cb3b57ee78b6c59f4cb5a744fa84b1198c77e4 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Fri, 20 Oct 2023 08:47:03 -0700 Subject: [PATCH 393/407] Set a global fork graph in program cache (#33776) * Set a global fork graph in program cache * fix deadlock * review feedback --- core/src/replay_stage.rs | 1 + ledger/src/bank_forks_utils.rs | 9 ++ ledger/src/blockstore_processor.rs | 5 + program-runtime/src/loaded_programs.rs | 186 +++++++++++++++---------- runtime/src/bank.rs | 7 +- runtime/src/bank_forks.rs | 20 +-- 6 files changed, 145 insertions(+), 83 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 5af02b9c382898..2e9aba1dd964e1 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -3859,6 +3859,7 @@ impl ReplayStage { epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, drop_bank_sender: &Sender>>, ) { + bank_forks.read().unwrap().prune_program_cache(new_root); let removed_banks = bank_forks.write().unwrap().set_root( new_root, accounts_background_request_sender, diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index 0be01e9bde975b..8552a59033326f 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -189,6 +189,15 @@ pub fn load_bank_forks( (bank_forks, None) }; + bank_forks + .read() + .expect("Failed to read lock the bank forks") + .root_bank() + .loaded_programs_cache + .write() + .expect("Failed to write lock the program cache") + .set_fork_graph(bank_forks.clone()); + let mut leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank_forks.read().unwrap().root_bank()); if process_options.full_leader_cache { diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 618fe4a2c4a2c3..10ff57a1202581 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -1555,6 +1555,11 @@ fn load_frozen_forks( root = new_root_bank.slot(); leader_schedule_cache.set_root(new_root_bank); + new_root_bank + .loaded_programs_cache + .write() + .unwrap() + .prune(root, new_root_bank.epoch()); let _ = bank_forks.write().unwrap().set_root( root, accounts_background_request_sender, diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 0ded17ee7877de..26a17790d34991 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -4,7 +4,7 @@ use { timings::ExecuteDetailsTimings, }, itertools::Itertools, - log::{debug, log_enabled, trace}, + log::{debug, error, log_enabled, trace}, percentage::PercentageInteger, solana_measure::measure::Measure, solana_rbpf::{ @@ -24,7 +24,7 @@ use { fmt::{Debug, Formatter}, sync::{ atomic::{AtomicU64, Ordering}, - Arc, + Arc, RwLock, }, }, }; @@ -442,8 +442,8 @@ impl Default for ProgramRuntimeEnvironments { } } -#[derive(Debug, Default)] -pub struct LoadedPrograms { +#[derive(Debug)] +pub struct LoadedPrograms { /// A two level index: /// /// Pubkey is the address of a program, multiple versions can coexists simultaneously under the same address (in different slots). @@ -455,6 +455,20 @@ pub struct LoadedPrograms { /// Environments of the current epoch pub environments: ProgramRuntimeEnvironments, pub stats: Stats, + fork_graph: Option>>, +} + +impl Default for LoadedPrograms { + fn default() -> Self { + Self { + entries: HashMap::new(), + latest_root_slot: 0, + latest_root_epoch: 0, + environments: ProgramRuntimeEnvironments::default(), + stats: Stats::default(), + fork_graph: None, + } + } } #[derive(Clone, Debug, Default)] @@ -531,7 +545,11 @@ pub enum LoadedProgramMatchCriteria { NoCriteria, } -impl LoadedPrograms { +impl LoadedPrograms { + pub fn set_fork_graph(&mut self, fork_graph: Arc>) { + self.fork_graph = Some(fork_graph); + } + /// Returns the current environments depending on the given epoch pub fn get_environments_for_epoch(&self, _epoch: Epoch) -> &ProgramRuntimeEnvironments { &self.environments @@ -625,12 +643,15 @@ impl LoadedPrograms { } /// Before rerooting the blockstore this removes all superfluous entries - pub fn prune( - &mut self, - fork_graph: &F, - new_root_slot: Slot, - new_root_epoch: Epoch, - ) { + pub fn prune(&mut self, new_root_slot: Slot, new_root_epoch: Epoch) { + let Some(fork_graph) = self.fork_graph.clone() else { + error!("Program cache doesn't have fork graph."); + return; + }; + let Ok(fork_graph) = fork_graph.read() else { + error!("Failed to lock fork graph for reading."); + return; + }; for second_level in self.entries.values_mut() { // Remove entries un/re/deployed on orphan forks let mut first_ancestor_found = false; @@ -911,7 +932,7 @@ impl solana_frozen_abi::abi_example::AbiExample for LoadedProgram { } #[cfg(RUSTC_WITH_SPECIALIZATION)] -impl solana_frozen_abi::abi_example::AbiExample for LoadedPrograms { +impl solana_frozen_abi::abi_example::AbiExample for LoadedPrograms { fn example() -> Self { // LoadedPrograms isn't serializable by definition. Self::default() @@ -937,7 +958,7 @@ mod tests { ops::ControlFlow, sync::{ atomic::{AtomicU64, Ordering}, - Arc, + Arc, RwLock, }, }, }; @@ -945,7 +966,7 @@ mod tests { static MOCK_ENVIRONMENT: std::sync::OnceLock = std::sync::OnceLock::::new(); - fn new_mock_cache() -> LoadedPrograms { + fn new_mock_cache() -> LoadedPrograms { let mut cache = LoadedPrograms::default(); cache.environments.program_runtime_v1 = MOCK_ENVIRONMENT .get_or_init(|| Arc::new(BuiltinProgram::new_mock())) @@ -999,8 +1020,8 @@ mod tests { }) } - fn set_tombstone( - cache: &mut LoadedPrograms, + fn set_tombstone( + cache: &mut LoadedPrograms, key: Pubkey, slot: Slot, reason: LoadedProgramType, @@ -1008,8 +1029,8 @@ mod tests { cache.assign_program(key, Arc::new(LoadedProgram::new_tombstone(slot, reason))) } - fn insert_unloaded_program( - cache: &mut LoadedPrograms, + fn insert_unloaded_program( + cache: &mut LoadedPrograms, key: Pubkey, slot: Slot, ) -> Arc { @@ -1031,9 +1052,10 @@ mod tests { cache.replenish(key, unloaded).1 } - fn num_matching_entries

    (cache: &LoadedPrograms, predicate: P) -> usize + fn num_matching_entries(cache: &LoadedPrograms, predicate: P) -> usize where P: Fn(&LoadedProgramType) -> bool, + FG: ForkGraph, { cache .entries @@ -1052,7 +1074,7 @@ mod tests { let mut programs = vec![]; let mut num_total_programs: usize = 0; - let mut cache = new_mock_cache(); + let mut cache = new_mock_cache::(); let program1 = Pubkey::new_unique(); let program1_deployment_slots = [0, 10, 20]; @@ -1218,7 +1240,7 @@ mod tests { #[test] fn test_usage_count_of_unloaded_program() { - let mut cache = new_mock_cache(); + let mut cache = new_mock_cache::(); let program = Pubkey::new_unique(); let num_total_programs = 6; @@ -1270,7 +1292,7 @@ mod tests { #[test] fn test_replace_tombstones() { - let mut cache = new_mock_cache(); + let mut cache = new_mock_cache::(); let program1 = Pubkey::new_unique(); let env = Arc::new(BuiltinProgram::new_mock()); set_tombstone( @@ -1302,7 +1324,7 @@ mod tests { assert_eq!(tombstone.deployment_slot, 100); assert_eq!(tombstone.effective_slot, 100); - let mut cache = new_mock_cache(); + let mut cache = new_mock_cache::(); let program1 = Pubkey::new_unique(); let tombstone = set_tombstone( &mut cache, @@ -1362,48 +1384,55 @@ mod tests { #[test] fn test_prune_empty() { - let mut cache = new_mock_cache(); - let fork_graph = TestForkGraph { + let mut cache = new_mock_cache::(); + let fork_graph = Arc::new(RwLock::new(TestForkGraph { relation: BlockRelation::Unrelated, - }; + })); - cache.prune(&fork_graph, 0, 0); + cache.set_fork_graph(fork_graph); + + cache.prune(0, 0); assert!(cache.entries.is_empty()); - cache.prune(&fork_graph, 10, 0); + cache.prune(10, 0); assert!(cache.entries.is_empty()); - let mut cache = new_mock_cache(); - let fork_graph = TestForkGraph { + let mut cache = new_mock_cache::(); + let fork_graph = Arc::new(RwLock::new(TestForkGraph { relation: BlockRelation::Ancestor, - }; + })); + + cache.set_fork_graph(fork_graph); - cache.prune(&fork_graph, 0, 0); + cache.prune(0, 0); assert!(cache.entries.is_empty()); - cache.prune(&fork_graph, 10, 0); + cache.prune(10, 0); assert!(cache.entries.is_empty()); - let mut cache = new_mock_cache(); - let fork_graph = TestForkGraph { + let mut cache = new_mock_cache::(); + let fork_graph = Arc::new(RwLock::new(TestForkGraph { relation: BlockRelation::Descendant, - }; + })); + + cache.set_fork_graph(fork_graph); - cache.prune(&fork_graph, 0, 0); + cache.prune(0, 0); assert!(cache.entries.is_empty()); - cache.prune(&fork_graph, 10, 0); + cache.prune(10, 0); assert!(cache.entries.is_empty()); - let mut cache = new_mock_cache(); - let fork_graph = TestForkGraph { + let mut cache = new_mock_cache::(); + let fork_graph = Arc::new(RwLock::new(TestForkGraph { relation: BlockRelation::Unknown, - }; + })); + cache.set_fork_graph(fork_graph); - cache.prune(&fork_graph, 0, 0); + cache.prune(0, 0); assert!(cache.entries.is_empty()); - cache.prune(&fork_graph, 10, 0); + cache.prune(10, 0); assert!(cache.entries.is_empty()); } @@ -1512,7 +1541,7 @@ mod tests { #[test] fn test_fork_extract_and_prune() { - let mut cache = new_mock_cache(); + let mut cache = new_mock_cache::(); // Fork graph created for the test // 0 @@ -1534,6 +1563,9 @@ mod tests { fork_graph.insert_fork(&[0, 5, 11, 15, 16, 19, 21, 23]); fork_graph.insert_fork(&[0, 5, 11, 25, 27]); + let fork_graph = Arc::new(RwLock::new(fork_graph)); + cache.set_fork_graph(fork_graph); + let program1 = Pubkey::new_unique(); assert!(!cache.replenish(program1, new_test_loaded_program(0, 1)).0); assert!(!cache.replenish(program1, new_test_loaded_program(10, 11)).0); @@ -1783,7 +1815,7 @@ mod tests { programs.pop(); } - cache.prune(&fork_graph, 5, 0); + cache.prune(5, 0); // Fork graph after pruning // 0 @@ -1848,7 +1880,7 @@ mod tests { assert!(match_slot(&found, &program3, 25, 27)); assert!(match_slot(&found, &program4, 5, 27)); - cache.prune(&fork_graph, 15, 0); + cache.prune(15, 0); // Fork graph after pruning // 0 @@ -1893,7 +1925,7 @@ mod tests { #[test] fn test_extract_using_deployment_slot() { - let mut cache = new_mock_cache(); + let mut cache = new_mock_cache::(); // Fork graph created for the test // 0 @@ -1915,6 +1947,9 @@ mod tests { fork_graph.insert_fork(&[0, 5, 11, 15, 16, 19, 21, 23]); fork_graph.insert_fork(&[0, 5, 11, 25, 27]); + let fork_graph = Arc::new(RwLock::new(fork_graph)); + cache.set_fork_graph(fork_graph); + let program1 = Pubkey::new_unique(); assert!(!cache.replenish(program1, new_test_loaded_program(0, 1)).0); assert!(!cache.replenish(program1, new_test_loaded_program(20, 21)).0); @@ -1978,7 +2013,7 @@ mod tests { #[test] fn test_extract_unloaded() { - let mut cache = new_mock_cache(); + let mut cache = new_mock_cache::(); // Fork graph created for the test // 0 @@ -2000,6 +2035,9 @@ mod tests { fork_graph.insert_fork(&[0, 5, 11, 15, 16, 19, 21, 23]); fork_graph.insert_fork(&[0, 5, 11, 25, 27]); + let fork_graph = Arc::new(RwLock::new(fork_graph)); + cache.set_fork_graph(fork_graph); + let program1 = Pubkey::new_unique(); assert!(!cache.replenish(program1, new_test_loaded_program(0, 1)).0); assert!(!cache.replenish(program1, new_test_loaded_program(20, 21)).0); @@ -2096,7 +2134,7 @@ mod tests { #[test] fn test_prune_expired() { - let mut cache = new_mock_cache(); + let mut cache = new_mock_cache::(); // Fork graph created for the test // 0 @@ -2117,6 +2155,8 @@ mod tests { fork_graph.insert_fork(&[0, 10, 20, 22]); fork_graph.insert_fork(&[0, 5, 11, 15, 16, 19, 21, 23]); fork_graph.insert_fork(&[0, 5, 11, 25, 27]); + let fork_graph = Arc::new(RwLock::new(fork_graph)); + cache.set_fork_graph(fork_graph); let program1 = Pubkey::new_unique(); assert!(!cache.replenish(program1, new_test_loaded_program(10, 11)).0); @@ -2198,7 +2238,7 @@ mod tests { ); // New root 5 should not evict the expired entry for program1 - cache.prune(&fork_graph, 5, 0); + cache.prune(5, 0); assert_eq!( cache .entries @@ -2209,13 +2249,13 @@ mod tests { ); // New root 15 should evict the expired entry for program1 - cache.prune(&fork_graph, 15, 0); + cache.prune(15, 0); assert!(cache.entries.get(&program1).is_none()); } #[test] fn test_fork_prune_find_first_ancestor() { - let mut cache = new_mock_cache(); + let mut cache = new_mock_cache::(); // Fork graph created for the test // 0 @@ -2230,12 +2270,14 @@ mod tests { let mut fork_graph = TestForkGraphSpecific::default(); fork_graph.insert_fork(&[0, 10, 20]); fork_graph.insert_fork(&[0, 5]); + let fork_graph = Arc::new(RwLock::new(fork_graph)); + cache.set_fork_graph(fork_graph); let program1 = Pubkey::new_unique(); assert!(!cache.replenish(program1, new_test_loaded_program(0, 1)).0); assert!(!cache.replenish(program1, new_test_loaded_program(5, 6)).0); - cache.prune(&fork_graph, 10, 0); + cache.prune(10, 0); let working_slot = TestWorkingSlot::new(20, &[0, 10, 20]); let ExtractedPrograms { @@ -2261,7 +2303,7 @@ mod tests { #[test] fn test_prune_by_deployment_slot() { - let mut cache = new_mock_cache(); + let mut cache = new_mock_cache::(); // Fork graph created for the test // 0 @@ -2276,6 +2318,8 @@ mod tests { let mut fork_graph = TestForkGraphSpecific::default(); fork_graph.insert_fork(&[0, 10, 20]); fork_graph.insert_fork(&[0, 5]); + let fork_graph = Arc::new(RwLock::new(fork_graph)); + cache.set_fork_graph(fork_graph); let program1 = Pubkey::new_unique(); assert!(!cache.replenish(program1, new_test_loaded_program(0, 1)).0); @@ -2380,34 +2424,34 @@ mod tests { #[test] fn test_usable_entries_for_slot() { - new_mock_cache(); + new_mock_cache::(); let tombstone = Arc::new(LoadedProgram::new_tombstone(0, LoadedProgramType::Closed)); - assert!(LoadedPrograms::is_entry_usable( + assert!(LoadedPrograms::::is_entry_usable( &tombstone, 0, &LoadedProgramMatchCriteria::NoCriteria )); - assert!(LoadedPrograms::is_entry_usable( + assert!(LoadedPrograms::::is_entry_usable( &tombstone, 1, &LoadedProgramMatchCriteria::Tombstone )); - assert!(LoadedPrograms::is_entry_usable( + assert!(LoadedPrograms::::is_entry_usable( &tombstone, 1, &LoadedProgramMatchCriteria::NoCriteria )); - assert!(LoadedPrograms::is_entry_usable( + assert!(LoadedPrograms::::is_entry_usable( &tombstone, 1, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) )); - assert!(!LoadedPrograms::is_entry_usable( + assert!(!LoadedPrograms::::is_entry_usable( &tombstone, 1, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) @@ -2415,31 +2459,31 @@ mod tests { let program = new_test_loaded_program(0, 1); - assert!(LoadedPrograms::is_entry_usable( + assert!(LoadedPrograms::::is_entry_usable( &program, 0, &LoadedProgramMatchCriteria::NoCriteria )); - assert!(!LoadedPrograms::is_entry_usable( + assert!(!LoadedPrograms::::is_entry_usable( &program, 1, &LoadedProgramMatchCriteria::Tombstone )); - assert!(LoadedPrograms::is_entry_usable( + assert!(LoadedPrograms::::is_entry_usable( &program, 1, &LoadedProgramMatchCriteria::NoCriteria )); - assert!(LoadedPrograms::is_entry_usable( + assert!(LoadedPrograms::::is_entry_usable( &program, 1, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) )); - assert!(!LoadedPrograms::is_entry_usable( + assert!(!LoadedPrograms::::is_entry_usable( &program, 1, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) @@ -2452,37 +2496,37 @@ mod tests { Some(2), )); - assert!(LoadedPrograms::is_entry_usable( + assert!(LoadedPrograms::::is_entry_usable( &program, 0, &LoadedProgramMatchCriteria::NoCriteria )); - assert!(LoadedPrograms::is_entry_usable( + assert!(LoadedPrograms::::is_entry_usable( &program, 1, &LoadedProgramMatchCriteria::NoCriteria )); - assert!(!LoadedPrograms::is_entry_usable( + assert!(!LoadedPrograms::::is_entry_usable( &program, 1, &LoadedProgramMatchCriteria::Tombstone )); - assert!(!LoadedPrograms::is_entry_usable( + assert!(!LoadedPrograms::::is_entry_usable( &program, 2, &LoadedProgramMatchCriteria::NoCriteria )); - assert!(LoadedPrograms::is_entry_usable( + assert!(LoadedPrograms::::is_entry_usable( &program, 1, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) )); - assert!(!LoadedPrograms::is_entry_usable( + assert!(!LoadedPrograms::::is_entry_usable( &program, 1, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 992d1d0dfcae75..9758210182e918 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -39,6 +39,7 @@ pub use solana_sdk::reward_type::RewardType; use { crate::{ bank::metrics::*, + bank_forks::BankForks, builtins::{BuiltinPrototype, BUILTINS}, epoch_rewards_hasher::hash_rewards_into_partitions, epoch_stakes::{EpochStakes, NodeVoteAccounts}, @@ -820,7 +821,7 @@ pub struct Bank { pub incremental_snapshot_persistence: Option, - pub loaded_programs_cache: Arc>, + pub loaded_programs_cache: Arc>>, pub check_program_modification_slot: bool, @@ -1070,7 +1071,7 @@ impl Bank { accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), fee_structure: FeeStructure::default(), - loaded_programs_cache: Arc::>::default(), + loaded_programs_cache: Arc::>>::default(), check_program_modification_slot: false, epoch_reward_status: EpochRewardStatus::default(), }; @@ -1856,7 +1857,7 @@ impl Bank { accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), fee_structure: FeeStructure::default(), - loaded_programs_cache: Arc::>::default(), + loaded_programs_cache: Arc::>>::default(), check_program_modification_slot: false, epoch_reward_status: EpochRewardStatus::default(), }; diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index c1ef6830d1998c..27abe800620ac9 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -55,6 +55,7 @@ struct SetRootTimings { prune_remove_ms: i64, } +#[derive(Debug)] pub struct BankForks { banks: HashMap>, descendants: HashMap>, @@ -404,6 +405,16 @@ impl BankForks { ) } + pub fn prune_program_cache(&self, root: Slot) { + if let Some(root_bank) = self.banks.get(&root) { + root_bank + .loaded_programs_cache + .write() + .unwrap() + .prune(root, root_bank.epoch()); + } + } + pub fn set_root( &mut self, root: Slot, @@ -411,15 +422,6 @@ impl BankForks { highest_super_majority_root: Option, ) -> Vec> { let program_cache_prune_start = Instant::now(); - let root_bank = self - .banks - .get(&root) - .expect("root bank didn't exist in bank_forks"); - root_bank - .loaded_programs_cache - .write() - .unwrap() - .prune(self, root, root_bank.epoch()); let set_root_start = Instant::now(); let (removed_banks, set_root_metrics) = self.do_set_root_return_metrics( root, From e5dfc9cb276fa4542dac30e85d2767fcd33d5899 Mon Sep 17 00:00:00 2001 From: Will Hickey Date: Fri, 20 Oct 2023 11:01:52 -0500 Subject: [PATCH 394/407] Add check for CHANGELOG.md change when changelog label applied to PR (#33675) * Add check for CHANGELOG.md change when changelog label applied to PR * Update changelog --- .github/scripts/check-changelog.sh | 10 ++++++++++ .github/workflows/changelog-label.yml | 20 ++++++++++++++++++++ CHANGELOG.md | 1 + 3 files changed, 31 insertions(+) create mode 100755 .github/scripts/check-changelog.sh create mode 100644 .github/workflows/changelog-label.yml diff --git a/.github/scripts/check-changelog.sh b/.github/scripts/check-changelog.sh new file mode 100755 index 00000000000000..a310000d878ab2 --- /dev/null +++ b/.github/scripts/check-changelog.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -uo pipefail + +CHANGELOG_FILE="CHANGELOG.md" +echo "Checking: git diff --exit-code origin/${BASE_REF} -- ${CHANGELOG_FILE}" + +if git diff --exit-code "origin/${BASE_REF}" -- "${CHANGELOG_FILE}"; then + >&2 echo "Error: this pull request requires an entry in $CHANGELOG_FILE, but no entry was found" + exit 1 +fi diff --git a/.github/workflows/changelog-label.yml b/.github/workflows/changelog-label.yml new file mode 100644 index 00000000000000..c63f7821c260dd --- /dev/null +++ b/.github/workflows/changelog-label.yml @@ -0,0 +1,20 @@ +name: Require changelog entry + +on: + pull_request: + types: [opened, synchronize, reopened, labeled, unlabeled] + +jobs: + check-changelog: + if: contains(github.event.pull_request.labels.*.name, 'changelog') + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Check if changes to CHANGELOG.md + shell: bash + env: + BASE_REF: ${{ github.event.pull_request.base.ref }} + run: .github/scripts/check-changelog.sh diff --git a/CHANGELOG.md b/CHANGELOG.md index 1bc3ceb4dd42f5..a99a5ffe0045a1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ Release channels have their own copy of this changelog: ## [1.18.0] - Unreleased * Changes + * Added a github check to support `changelog` label * Upgrade Notes ## [1.17.0] From e13756133adab4f2d3e6ebc42f5b4da1b518a327 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 20 Oct 2023 09:55:37 -0700 Subject: [PATCH 395/407] ancient: add many_refs_this_is_newest_alive (#33741) * add many_refs_this_is_newest_alive * fix pathological case, add comments * add log * update comments * add log when we fail to pack * use with_capacity * fix log comment * clippy --- accounts-db/src/accounts_db.rs | 75 ++- accounts-db/src/ancient_append_vecs.rs | 710 +++++++++++-------------- 2 files changed, 373 insertions(+), 412 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 604fed349c114d..bfba76b6c380c6 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -201,8 +201,12 @@ pub(crate) struct AliveAccounts<'a> { /// separate pubkeys into those with a single refcount and those with > 1 refcount #[derive(Debug)] pub(crate) struct ShrinkCollectAliveSeparatedByRefs<'a> { + /// accounts where ref_count = 1 pub(crate) one_ref: AliveAccounts<'a>, - pub(crate) many_refs: AliveAccounts<'a>, + /// account where ref_count > 1, but this slot contains the alive entry with the highest slot + pub(crate) many_refs_this_is_newest_alive: AliveAccounts<'a>, + /// account where ref_count > 1, and this slot is NOT the highest alive entry in the index for the pubkey + pub(crate) many_refs_old_alive: AliveAccounts<'a>, } /// Configuration Parameters for running accounts hash and total lamports verification @@ -228,7 +232,12 @@ pub struct VerifyAccountsHashAndLamportsConfig<'a> { pub(crate) trait ShrinkCollectRefs<'a>: Sync + Send { fn with_capacity(capacity: usize, slot: Slot) -> Self; fn collect(&mut self, other: Self); - fn add(&mut self, ref_count: u64, account: &'a StoredAccountMeta<'a>); + fn add( + &mut self, + ref_count: u64, + account: &'a StoredAccountMeta<'a>, + slot_list: &[(Slot, AccountInfo)], + ); fn len(&self) -> usize; fn alive_bytes(&self) -> usize; fn alive_accounts(&self) -> &Vec<&'a StoredAccountMeta<'a>>; @@ -246,7 +255,12 @@ impl<'a> ShrinkCollectRefs<'a> for AliveAccounts<'a> { slot, } } - fn add(&mut self, _ref_count: u64, account: &'a StoredAccountMeta<'a>) { + fn add( + &mut self, + _ref_count: u64, + account: &'a StoredAccountMeta<'a>, + _slot_list: &[(Slot, AccountInfo)], + ) { self.accounts.push(account); self.bytes = self.bytes.saturating_add(account.stored_size()); } @@ -264,29 +278,50 @@ impl<'a> ShrinkCollectRefs<'a> for AliveAccounts<'a> { impl<'a> ShrinkCollectRefs<'a> for ShrinkCollectAliveSeparatedByRefs<'a> { fn collect(&mut self, other: Self) { self.one_ref.collect(other.one_ref); - self.many_refs.collect(other.many_refs); + self.many_refs_this_is_newest_alive + .collect(other.many_refs_this_is_newest_alive); + self.many_refs_old_alive.collect(other.many_refs_old_alive); } fn with_capacity(capacity: usize, slot: Slot) -> Self { Self { one_ref: AliveAccounts::with_capacity(capacity, slot), - many_refs: AliveAccounts::with_capacity(capacity, slot), + many_refs_this_is_newest_alive: AliveAccounts::with_capacity(0, slot), + many_refs_old_alive: AliveAccounts::with_capacity(0, slot), } } - fn add(&mut self, ref_count: u64, account: &'a StoredAccountMeta<'a>) { + fn add( + &mut self, + ref_count: u64, + account: &'a StoredAccountMeta<'a>, + slot_list: &[(Slot, AccountInfo)], + ) { let other = if ref_count == 1 { &mut self.one_ref + } else if slot_list.len() == 1 + || !slot_list + .iter() + .any(|(slot_list_slot, _info)| slot_list_slot > &self.many_refs_old_alive.slot) + { + // this entry is alive but is newer than any other slot in the index + &mut self.many_refs_this_is_newest_alive } else { - &mut self.many_refs + // This entry is alive but is older than at least one other slot in the index. + // We would expect clean to get rid of the entry for THIS slot at some point, but clean hasn't done that yet. + &mut self.many_refs_old_alive }; - other.add(ref_count, account); + other.add(ref_count, account, slot_list); } fn len(&self) -> usize { - self.one_ref.len().saturating_add(self.many_refs.len()) + self.one_ref + .len() + .saturating_add(self.many_refs_old_alive.len()) + .saturating_add(self.many_refs_this_is_newest_alive.len()) } fn alive_bytes(&self) -> usize { self.one_ref .alive_bytes() - .saturating_add(self.many_refs.alive_bytes()) + .saturating_add(self.many_refs_old_alive.alive_bytes()) + .saturating_add(self.many_refs_this_is_newest_alive.alive_bytes()) } fn alive_accounts(&self) -> &Vec<&'a StoredAccountMeta<'a>> { unimplemented!("illegal use"); @@ -2015,6 +2050,7 @@ pub(crate) struct ShrinkStatsSub { pub(crate) rewrite_elapsed_us: u64, pub(crate) create_and_insert_store_elapsed_us: u64, pub(crate) unpackable_slots_count: usize, + pub(crate) newest_alive_packed_count: usize, } impl ShrinkStatsSub { @@ -2027,9 +2063,12 @@ impl ShrinkStatsSub { other.create_and_insert_store_elapsed_us ); saturating_add_assign!(self.unpackable_slots_count, other.unpackable_slots_count); + saturating_add_assign!( + self.newest_alive_packed_count, + other.newest_alive_packed_count + ); } } - #[derive(Debug, Default)] pub struct ShrinkStats { last_report: AtomicInterval, @@ -2043,6 +2082,7 @@ pub struct ShrinkStats { remove_old_stores_shrink_us: AtomicU64, rewrite_elapsed: AtomicU64, unpackable_slots_count: AtomicU64, + newest_alive_packed_count: AtomicU64, drop_storage_entries_elapsed: AtomicU64, recycle_stores_write_elapsed: AtomicU64, accounts_removed: AtomicUsize, @@ -2227,6 +2267,13 @@ impl ShrinkAncientStats { .swap(0, Ordering::Relaxed) as i64, i64 ), + ( + "newest_alive_packed_count", + self.shrink_stats + .newest_alive_packed_count + .swap(0, Ordering::Relaxed) as i64, + i64 + ), ( "drop_storage_entries_elapsed", self.shrink_stats @@ -3887,7 +3934,7 @@ impl AccountsDb { // Since we are shrinking these entries, we need to disambiguate append_vec_ids during this period and those only exist in the in-memory accounts index. index_entries_being_shrunk.push(Arc::clone(entry.unwrap())); all_are_zero_lamports &= stored_account.lamports() == 0; - alive_accounts.add(ref_count, stored_account); + alive_accounts.add(ref_count, stored_account, slot_list); alive += 1; } } @@ -4187,6 +4234,10 @@ impl AccountsDb { shrink_stats .unpackable_slots_count .fetch_add(stats_sub.unpackable_slots_count as u64, Ordering::Relaxed); + shrink_stats.newest_alive_packed_count.fetch_add( + stats_sub.newest_alive_packed_count as u64, + Ordering::Relaxed, + ); } /// get stores for 'slot' diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 685b6962b93e8a..8336731b30d511 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -21,7 +21,7 @@ use { rand::{thread_rng, Rng}, rayon::prelude::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}, solana_measure::measure_us, - solana_sdk::{account::ReadableAccount, clock::Slot, pubkey::Pubkey, saturating_add_assign}, + solana_sdk::{account::ReadableAccount, clock::Slot, saturating_add_assign}, std::{ collections::HashMap, num::NonZeroU64, @@ -274,6 +274,37 @@ impl AccountsDb { self.shrink_ancient_stats.report(); } + /// return false if `many_refs_newest` accounts cannot be moved into `target_slots_sorted`. + /// The slot # would be violated. + /// accounts in `many_refs_newest` must be moved a slot >= each account's current slot. + /// If that can be done, this fn returns true + fn many_ref_accounts_can_be_moved( + many_refs_newest: &[AliveAccounts<'_>], + target_slots_sorted: &[Slot], + tuning: &PackedAncientStorageTuning, + ) -> bool { + let alive_bytes = many_refs_newest + .iter() + .map(|alive| alive.bytes) + .sum::(); + let required_ideal_packed = (alive_bytes as u64 / tuning.ideal_storage_size + 1) as usize; + if alive_bytes == 0 { + // nothing required, so no problem moving nothing + return true; + } + if target_slots_sorted.len() < required_ideal_packed { + return false; + } + let i_last = target_slots_sorted + .len() + .saturating_sub(required_ideal_packed); + + let highest_slot = target_slots_sorted[i_last]; + many_refs_newest + .iter() + .all(|many| many.slot <= highest_slot) + } + fn combine_ancient_slots_packed_internal( &self, sorted_slots: Vec, @@ -293,15 +324,48 @@ impl AccountsDb { &ancient_slot_infos.all_infos[..], ); - let accounts_to_combine = self.calc_accounts_to_combine(&accounts_per_storage); + let mut accounts_to_combine = self.calc_accounts_to_combine(&accounts_per_storage); metrics.unpackable_slots_count += accounts_to_combine.unpackable_slots_count; - // pack the accounts with 1 ref + let mut many_refs_newest = accounts_to_combine + .accounts_to_combine + .iter_mut() + .filter_map(|alive| { + let newest_alive = + std::mem::take(&mut alive.alive_accounts.many_refs_this_is_newest_alive); + (!newest_alive.accounts.is_empty()).then_some(newest_alive) + }) + .collect::>(); + + // Sort highest slot to lowest slot. This way, we will put the multi ref accounts with the highest slots in the highest + // packed slot. + many_refs_newest.sort_unstable_by(|a, b| b.slot.cmp(&a.slot)); + metrics.newest_alive_packed_count += many_refs_newest.len(); + + if !Self::many_ref_accounts_can_be_moved( + &many_refs_newest, + &accounts_to_combine.target_slots_sorted, + &tuning, + ) { + datapoint_info!("shrink_ancient_stats", ("high_slot", 1, i64)); + log::info!( + "unable to ancient pack: highest available slot: {:?}, lowest required slot: {:?}", + accounts_to_combine.target_slots_sorted.last(), + many_refs_newest.last().map(|accounts| accounts.slot) + ); + self.addref_accounts_failed_to_shrink_ancient(accounts_to_combine); + return; + } + + // pack the accounts with 1 ref or refs > 1 but the slot we're packing is the highest alive slot for the pubkey. + // Note the `chain` below combining the 2 types of refs. let pack = PackedAncientStorage::pack( - accounts_to_combine - .accounts_to_combine - .iter() - .map(|shrink_collect| &shrink_collect.alive_accounts.one_ref), + many_refs_newest.iter().chain( + accounts_to_combine + .accounts_to_combine + .iter() + .map(|shrink_collect| &shrink_collect.alive_accounts.one_ref), + ), tuning.ideal_storage_size, ); @@ -382,6 +446,7 @@ impl AccountsDb { rewrite_elapsed_us, create_and_insert_store_elapsed_us, unpackable_slots_count: 0, + newest_alive_packed_count: 0, }); write_ancient_accounts .shrinks_in_progress @@ -561,20 +626,37 @@ impl AccountsDb { .zip(accounts_per_storage.iter()) .enumerate() { - self.revisit_accounts_with_many_refs(shrink_collect); - let many_refs = &mut shrink_collect.alive_accounts.many_refs; - if !many_refs.accounts.is_empty() { - // there are accounts with ref_count > 1. This means this account must remain IN this slot. - // The same account could exist in a newer or older slot. Moving this account across slots could result - // in this alive version of the account now being in a slot OLDER than the non-alive instances. + let many_refs_old_alive = &mut shrink_collect.alive_accounts.many_refs_old_alive; + if !many_refs_old_alive.accounts.is_empty() { + many_refs_old_alive.accounts.iter().for_each(|account| { + // these accounts could indicate clean bugs or low memory conditions where we are forced to flush non-roots + log::info!( + "ancient append vec: found unpackable account: {}, {}", + many_refs_old_alive.slot, + account.pubkey() + ); + }); + // There are alive accounts with ref_count > 1, where the entry for the account in the index is NOT the highest slot. (`many_refs_old_alive`) + // This means this account must remain IN this slot. There could be alive or dead references to this same account in any older slot. + // Moving it to a lower slot could move it before an alive or dead entry to this same account. + // Moving it to a higher slot could move it ahead of other slots where this account is also alive. We know a higher slot exists that contains this account. + // So, moving this account to a different slot could result in the moved account being before or after other instances of this account newer or older. + // This would fail the invariant that the highest slot # where an account exists defines the most recent account. + // It could be a clean error or a transient condition that will resolve if we encounter this situation. + // The count of these accounts per call will be reported by metrics in `unpackable_slots_count` if shrink_collect.unrefed_pubkeys.is_empty() && shrink_collect.alive_accounts.one_ref.accounts.is_empty() + && shrink_collect + .alive_accounts + .many_refs_this_is_newest_alive + .accounts + .is_empty() { // all accounts in this append vec are alive and have > 1 ref, so nothing to be done for this append vec remove.push(i); continue; } - accounts_keep_slots.insert(info.slot, std::mem::take(many_refs)); + accounts_keep_slots.insert(info.slot, std::mem::take(many_refs_old_alive)); } else { // No alive accounts in this slot have a ref_count > 1. So, ALL alive accounts in this slot can be written to any other slot // we find convenient. There is NO other instance of any account to conflict with. @@ -594,66 +676,6 @@ impl AccountsDb { } } - /// return pubkeys from `many_refs` accounts - fn get_many_refs_pubkeys<'a>( - shrink_collect: &ShrinkCollect<'a, ShrinkCollectAliveSeparatedByRefs<'a>>, - ) -> Vec { - shrink_collect - .alive_accounts - .many_refs - .accounts - .iter() - .map(|account| *account.pubkey()) - .collect::>() - } - - /// After calling `shrink_collect()` on many slots, any dead accounts in those slots would be unref'd. - /// Alive accounts which had ref_count > 1 are stored in `shrink_collect.alive_accounts.many_refs`. - /// Since many slots were being visited, it is possible that at a point in time, an account was found to be alive and have ref_count > 1. - /// Concurrently, another slot was visited which also had the account, but the account was dead and unref'd in that `shrink_collect()` call. - /// So, now that all unrefs have occurred, go back through the small number of `many_refs` accounts and for all that now only have 1 ref_count, - /// move the account from `many_refs` to `one_ref`. - fn revisit_accounts_with_many_refs<'a>( - &self, - shrink_collect: &mut ShrinkCollect<'a, ShrinkCollectAliveSeparatedByRefs<'a>>, - ) { - // collect pk values here to avoid borrow checker - let pks = Self::get_many_refs_pubkeys(shrink_collect); - let mut index = 0; - let mut saved = 0; - self.accounts_index.scan( - pks.iter(), - |_pubkey, slots_refs, _entry| { - index += 1; - if let Some((_slot_list, ref_count)) = slots_refs { - if ref_count == 1 { - // This entry has been unref'd during shrink ancient, so it can now move out of `many_refs` and into `one_ref`. - // This could happen if the same pubkey is in 2 append vecs that are BOTH being shrunk right now. - // Note that `shrink_collect()`, which was previously called to create `shrink_collect`, unrefs any dead accounts. - let many_refs = &mut shrink_collect.alive_accounts.many_refs; - let account = many_refs.accounts.remove(index - 1); - if many_refs.accounts.is_empty() { - // all accounts in `many_refs` now have only 1 ref, so this slot can now be combined into another. - saved += 1; - } - let bytes = account.stored_size(); - shrink_collect.alive_accounts.one_ref.accounts.push(account); - saturating_add_assign!(shrink_collect.alive_accounts.one_ref.bytes, bytes); - many_refs.bytes -= bytes; - // since we removed an entry from many_refs.accounts, we need to index one less - index -= 1; - } - } - AccountsIndexScanResult::OnlyKeepInMemoryIfDirty - }, - None, - false, - ); - self.shrink_ancient_stats - .second_pass_one_ref - .fetch_add(saved, Ordering::Relaxed); - } - /// create packed storage and write contents of 'packed' to it. /// accumulate results in 'write_ancient_accounts' fn write_one_packed_storage<'a, 'b: 'a>( @@ -910,7 +932,7 @@ pub mod tests { use { super::*, crate::{ - account_info::AccountInfo, + account_info::{AccountInfo, StorageLocation}, account_storage::meta::{AccountMeta, StoredAccountMeta, StoredMeta}, accounts_db::{ get_temp_accounts_paths, @@ -919,7 +941,7 @@ pub mod tests { create_db_with_storages_and_index, create_storages_and_update_index, get_all_accounts, remove_account_for_tests, CAN_RANDOMLY_SHRINK_FALSE, }, - INCLUDE_SLOT_IN_HASH_TESTS, MAX_RECYCLE_STORES, + ShrinkCollectRefs, INCLUDE_SLOT_IN_HASH_TESTS, MAX_RECYCLE_STORES, }, accounts_index::UpsertReclaim, append_vec::{aligned_stored_size, AppendVec, AppendVecStoredAccountMeta}, @@ -1483,38 +1505,36 @@ pub mod tests { let accounts_to_combine = db.calc_accounts_to_combine(&accounts_per_storage); - if !add_dead_account && two_refs { - assert!(accounts_to_combine.accounts_to_combine.is_empty()); - continue; - } else { - assert_eq!( + assert_eq!( accounts_to_combine.accounts_to_combine.len(), num_slots, "method: {method:?}, num_slots: {num_slots}, two_refs: {two_refs}" ); - } - if two_refs { - // all accounts should be in many_refs - let mut accounts_keep = accounts_to_combine - .accounts_keep_slots - .keys() - .cloned() - .collect::>(); + + if add_dead_account { assert!(!accounts_to_combine .accounts_to_combine .iter() .any(|a| a.unrefed_pubkeys.is_empty())); - // sort because accounts_keep_slots is a hashmap, with non-deterministic ordering - accounts_keep.sort_unstable(); + } + // all accounts should be in one_ref and all slots are available as target slots + assert_eq!( + accounts_to_combine.target_slots_sorted, if unsorted_slots { - accounts_keep = accounts_keep.into_iter().rev().collect(); + slots_vec.iter().cloned().rev().collect::>() + } else { + slots_vec.clone() } - assert_eq!(accounts_keep, slots_vec); - assert!(accounts_to_combine.target_slots_sorted.is_empty()); - assert_eq!( - accounts_to_combine.accounts_keep_slots.len(), - num_slots - ); + ); + assert!(accounts_to_combine.accounts_keep_slots.is_empty()); + assert!(accounts_to_combine.accounts_to_combine.iter().all( + |shrink_collect| shrink_collect + .alive_accounts + .many_refs_old_alive + .accounts + .is_empty() + )); + if two_refs { assert!(accounts_to_combine.accounts_to_combine.iter().all( |shrink_collect| shrink_collect .alive_accounts @@ -1523,29 +1543,13 @@ pub mod tests { .is_empty() )); assert!(accounts_to_combine.accounts_to_combine.iter().all( - |shrink_collect| shrink_collect + |shrink_collect| !shrink_collect .alive_accounts - .many_refs + .many_refs_this_is_newest_alive .accounts .is_empty() )); } else { - if add_dead_account { - assert!(!accounts_to_combine - .accounts_to_combine - .iter() - .any(|a| a.unrefed_pubkeys.is_empty())); - } - // all accounts should be in one_ref and all slots are available as target slots - assert_eq!( - accounts_to_combine.target_slots_sorted, - if unsorted_slots { - slots_vec.iter().cloned().rev().collect::>() - } else { - slots_vec.clone() - } - ); - assert!(accounts_to_combine.accounts_keep_slots.is_empty()); assert!(accounts_to_combine.accounts_to_combine.iter().all( |shrink_collect| !shrink_collect .alive_accounts @@ -1556,7 +1560,7 @@ pub mod tests { assert!(accounts_to_combine.accounts_to_combine.iter().all( |shrink_collect| shrink_collect .alive_accounts - .many_refs + .many_refs_this_is_newest_alive .accounts .is_empty() )); @@ -1578,43 +1582,8 @@ pub mod tests { db.write_packed_storages(&accounts_to_combine, packed_contents) } }; - if two_refs { - assert_eq!( - write_ancient_accounts.shrinks_in_progress.len(), - num_slots - ); - let mut shrinks_in_progress = write_ancient_accounts - .shrinks_in_progress - .iter() - .collect::>(); - // sort because shrinks_in_progress is a HashMap with non-deterministic order - shrinks_in_progress.sort_unstable_by(|a, b| a.0.cmp(b.0)); - if unsorted_slots { - shrinks_in_progress = - shrinks_in_progress.into_iter().rev().collect(); - } - assert_eq!( - shrinks_in_progress - .iter() - .map(|(slot, _)| **slot) - .collect::>(), - slots_vec - ); - assert_eq!( - shrinks_in_progress - .iter() - .map(|(_, shrink_in_progress)| shrink_in_progress - .old_storage() - .append_vec_id()) - .collect::>(), - storages - .iter() - .map(|storage| storage.append_vec_id()) - .collect::>() - ); - } else { - assert!(write_ancient_accounts.shrinks_in_progress.is_empty()); - } + + assert!(write_ancient_accounts.shrinks_in_progress.is_empty()); } } } @@ -1628,7 +1597,7 @@ pub mod tests { // with 2 accounts // 1 with 1 ref // 1 with 2 refs (and the other ref is from a newer slot) - // So, the other alive ref will cause the account with 2 refs to have to remain in the slot where it currently is. + // So, the other alive ref will cause the account with 2 refs to be put into many_refs_old_alive and then accounts_keep_slots for method in TestWriteMultipleRefs::iter() { let num_slots = 1; // creating 1 more sample slot/storage, but effectively act like 1 slot @@ -1733,7 +1702,21 @@ pub mod tests { assert!(accounts_to_combine .accounts_to_combine .iter() - .all(|shrink_collect| shrink_collect.alive_accounts.many_refs.accounts.is_empty())); + .all(|shrink_collect| shrink_collect + .alive_accounts + .many_refs_this_is_newest_alive + .accounts + .is_empty())); + assert_eq!(accounts_to_combine.accounts_to_combine.len(), 1); + + assert!(accounts_to_combine + .accounts_to_combine + .iter() + .all(|shrink_collect| shrink_collect + .alive_accounts + .many_refs_old_alive + .accounts + .is_empty())); // test write_ancient_accounts_to_same_slot_multiple_refs since we built interesting 'AccountsToCombine' let write_ancient_accounts = match method { @@ -1796,7 +1779,8 @@ pub mod tests { // 1 storage // 2 accounts // 1 with 1 ref - // 1 with 2 refs + // 1 with 2 refs, with the idea that the other ref is from an older slot, so this one is the newer index entry + // The result will be that the account, even though it has refcount > 1, can be moved to a newer slot. for method in TestWriteMultipleRefs::iter() { let num_slots = 1; let (db, storages, slots, infos) = get_sample_storages(num_slots, None); @@ -1848,21 +1832,24 @@ pub mod tests { let accounts_to_combine = db.calc_accounts_to_combine(&accounts_per_storage); let slots_vec = slots.collect::>(); assert_eq!(accounts_to_combine.accounts_to_combine.len(), num_slots); - // all accounts should be in many_refs + // all accounts should be in many_refs_this_is_newest_alive let mut accounts_keep = accounts_to_combine .accounts_keep_slots .keys() .cloned() .collect::>(); accounts_keep.sort_unstable(); - assert_eq!(accounts_keep, slots_vec); - assert!(accounts_to_combine.target_slots_sorted.is_empty()); - assert_eq!(accounts_to_combine.accounts_keep_slots.len(), num_slots); + assert_eq!(accounts_to_combine.target_slots_sorted, slots_vec); + assert!(accounts_keep.is_empty()); + assert!(!accounts_to_combine.target_slots_sorted.is_empty()); + assert_eq!(accounts_to_combine.accounts_to_combine.len(), num_slots); assert_eq!( accounts_to_combine - .accounts_keep_slots - .get(&slot1) + .accounts_to_combine + .first() .unwrap() + .alive_accounts + .many_refs_this_is_newest_alive .accounts .iter() .map(|meta| meta.pubkey()) @@ -1894,7 +1881,11 @@ pub mod tests { assert!(accounts_to_combine .accounts_to_combine .iter() - .all(|shrink_collect| shrink_collect.alive_accounts.many_refs.accounts.is_empty())); + .all(|shrink_collect| !shrink_collect + .alive_accounts + .many_refs_this_is_newest_alive + .accounts + .is_empty())); // test write_ancient_accounts_to_same_slot_multiple_refs since we built interesting 'AccountsToCombine' let write_ancient_accounts = match method { @@ -1911,33 +1902,11 @@ pub mod tests { db.write_packed_storages(&accounts_to_combine, packed_contents) } }; - assert_eq!(write_ancient_accounts.shrinks_in_progress.len(), num_slots); - let mut shrinks_in_progress = write_ancient_accounts - .shrinks_in_progress - .iter() - .collect::>(); - shrinks_in_progress.sort_unstable_by(|a, b| a.0.cmp(b.0)); - assert_eq!( - shrinks_in_progress - .iter() - .map(|(slot, _)| **slot) - .collect::>(), - slots_vec - ); - assert_eq!( - shrinks_in_progress - .iter() - .map(|(_, shrink_in_progress)| shrink_in_progress.old_storage().append_vec_id()) - .collect::>(), - storages - .iter() - .map(|storage| storage.append_vec_id()) - .collect::>() - ); - // assert that we wrote the 2_ref account to the newly shrunk append vec - let shrink_in_progress = shrinks_in_progress.first().unwrap().1; - let accounts_shrunk_same_slot = shrink_in_progress.new_storage().accounts.accounts(0); - assert_eq!(accounts_shrunk_same_slot.len(), 1); + assert!(write_ancient_accounts.shrinks_in_progress.is_empty()); + // assert that we wrote the 2_ref account (and the 1 ref account) to the newly shrunk append vec + let storage = db.storage.get_slot_storage_entry(slot1).unwrap(); + let accounts_shrunk_same_slot = storage.accounts.accounts(0); + assert_eq!(accounts_shrunk_same_slot.len(), 2); assert_eq!( accounts_shrunk_same_slot.first().unwrap().pubkey(), pk_with_2_refs @@ -2980,214 +2949,6 @@ pub mod tests { } } - #[test] - fn test_get_many_refs_pubkeys() { - let rent_epoch = 0; - let lamports = 0; - let executable = false; - let owner = Pubkey::default(); - let data = Vec::new(); - - let pubkey = solana_sdk::pubkey::new_rand(); - let pubkey2 = solana_sdk::pubkey::new_rand(); - - let meta = StoredMeta { - write_version_obsolete: 5, - pubkey, - data_len: 7, - }; - let meta2 = StoredMeta { - write_version_obsolete: 5, - pubkey: pubkey2, - data_len: 7, - }; - let account_meta = AccountMeta { - lamports, - owner, - executable, - rent_epoch, - }; - let offset = 99; - let stored_size = 101; - let hash = AccountHash(Hash::new_unique()); - let stored_account = StoredAccountMeta::AppendVec(AppendVecStoredAccountMeta { - meta: &meta, - account_meta: &account_meta, - data: &data, - offset, - stored_size, - hash: &hash, - }); - let stored_account2 = StoredAccountMeta::AppendVec(AppendVecStoredAccountMeta { - meta: &meta2, - account_meta: &account_meta, - data: &data, - offset, - stored_size, - hash: &hash, - }); - for (many_refs_accounts, expected) in [ - (Vec::default(), Vec::default()), - (vec![&stored_account], vec![pubkey]), - ( - vec![&stored_account, &stored_account2], - vec![pubkey, pubkey2], - ), - ] { - let shrink_collect = ShrinkCollect:: { - slot: 0, - capacity: 0, - aligned_total_bytes: 0, - unrefed_pubkeys: Vec::default(), - alive_accounts: ShrinkCollectAliveSeparatedByRefs { - one_ref: AliveAccounts { - slot: 0, - accounts: Vec::default(), - bytes: 0, - }, - many_refs: AliveAccounts { - slot: 0, - accounts: many_refs_accounts, - bytes: 0, - }, - }, - alive_total_bytes: 0, - total_starting_accounts: 0, - all_are_zero_lamports: false, - _index_entries_being_shrunk: Vec::default(), - }; - let pks = AccountsDb::get_many_refs_pubkeys(&shrink_collect); - assert_eq!(pks, expected); - } - } - - #[test] - fn test_revisit_accounts_with_many_refs() { - let db = AccountsDb::new_single_for_tests(); - let rent_epoch = 0; - let lamports = 0; - let executable = false; - let owner = Pubkey::default(); - let data = Vec::new(); - - let pubkey = solana_sdk::pubkey::new_rand(); - let pubkey2 = solana_sdk::pubkey::new_rand(); - - let meta = StoredMeta { - write_version_obsolete: 5, - pubkey, - data_len: 7, - }; - let meta2 = StoredMeta { - write_version_obsolete: 5, - pubkey: pubkey2, - data_len: 7, - }; - let account_meta = AccountMeta { - lamports, - owner, - executable, - rent_epoch, - }; - let offset = 99; - let stored_size = 1; // size is 1 byte for each entry to test `bytes` later - let hash = AccountHash(Hash::new_unique()); - let stored_account = StoredAccountMeta::AppendVec(AppendVecStoredAccountMeta { - meta: &meta, - account_meta: &account_meta, - data: &data, - offset, - stored_size, - hash: &hash, - }); - let stored_account2 = StoredAccountMeta::AppendVec(AppendVecStoredAccountMeta { - meta: &meta2, - account_meta: &account_meta, - data: &data, - offset, - stored_size, - hash: &hash, - }); - let empty_account = AccountSharedData::default(); - - // sweep through different contents of `many_refs.accounts` - for many_refs_accounts in [ - Vec::default(), - vec![&stored_account], - vec![&stored_account, &stored_account2], - ] { - // how many of `many_ref_accounts` should be found in the index with ref_count=1 - for mut accounts_with_ref_count_one in 0..many_refs_accounts.len() { - // if `set_to_two_ref_count`, then add to index with ref_count=2, and expect same results as accounts_with_ref_count_one = 0 - for set_to_two_ref_count in [false, true] { - many_refs_accounts - .iter() - .take(accounts_with_ref_count_one) - .for_each(|account| { - let k = account.pubkey(); - for slot in 1..if set_to_two_ref_count { 3 } else { 2 } { - // each upserting here (to a different slot) adds a refcount of 1 since entry is NOT cached - db.accounts_index.upsert( - slot, - slot, - k, - &empty_account, - &crate::accounts_index::AccountSecondaryIndexes::default(), - AccountInfo::default(), - &mut Vec::default(), - UpsertReclaim::IgnoreReclaims, - ); - } - }); - if set_to_two_ref_count { - // expect same results as accounts_with_ref_count_one = 0 since we set refcounts to 2 - accounts_with_ref_count_one = 0; - } - let mut shrink_collect = ShrinkCollect:: { - slot: 0, - capacity: 0, - aligned_total_bytes: 0, - unrefed_pubkeys: Vec::default(), - alive_accounts: ShrinkCollectAliveSeparatedByRefs { - one_ref: AliveAccounts { - slot: 0, - accounts: Vec::default(), - bytes: 0, - }, - many_refs: AliveAccounts { - slot: 0, - accounts: many_refs_accounts.clone(), - bytes: many_refs_accounts.len(), - }, - }, - alive_total_bytes: 0, - total_starting_accounts: 0, - all_are_zero_lamports: false, - _index_entries_being_shrunk: Vec::default(), - }; - db.revisit_accounts_with_many_refs(&mut shrink_collect); - // verify what got moved `many_refs` to `one_ref` - assert_eq!( - shrink_collect.alive_accounts.one_ref.accounts.len(), - accounts_with_ref_count_one - ); - assert_eq!( - shrink_collect.alive_accounts.one_ref.bytes, - accounts_with_ref_count_one - ); - assert_eq!( - shrink_collect.alive_accounts.many_refs.accounts, - many_refs_accounts[accounts_with_ref_count_one..].to_vec(), - ); - assert_eq!( - shrink_collect.alive_accounts.many_refs.bytes, - many_refs_accounts.len() - accounts_with_ref_count_one - ); - } - } - } - } - /// combines ALL possible slots in `sorted_slots` fn combine_ancient_slots_packed_for_tests(db: &AccountsDb, sorted_slots: Vec) { // combine normal append vec(s) into packed ancient append vec @@ -3291,6 +3052,154 @@ pub mod tests { } } + #[test] + fn test_shrink_collect_alive_add() { + let num_slots = 1; + let data_size = None; + let (_db, storages, _slots, _infos) = get_sample_storages(num_slots, data_size); + + let account = storages[0].accounts.get_account(0).unwrap().0; + let slot = 1; + let capacity = 0; + for i in 0..4usize { + let mut alive_accounts = + ShrinkCollectAliveSeparatedByRefs::with_capacity(capacity, slot); + let lamports = 1; + + match i { + 0 => { + // empty slot list (ignored anyway) because ref_count = 1 + let slot_list = vec![]; + alive_accounts.add(1, &account, &slot_list); + assert!(!alive_accounts.one_ref.accounts.is_empty()); + assert!(alive_accounts.many_refs_old_alive.accounts.is_empty()); + assert!(alive_accounts + .many_refs_this_is_newest_alive + .accounts + .is_empty()); + } + 1 => { + // non-empty slot list (but ignored) because slot_list = 1 + let slot_list = + vec![(slot, AccountInfo::new(StorageLocation::Cached, lamports))]; + alive_accounts.add(2, &account, &slot_list); + assert!(alive_accounts.one_ref.accounts.is_empty()); + assert!(alive_accounts.many_refs_old_alive.accounts.is_empty()); + assert!(!alive_accounts + .many_refs_this_is_newest_alive + .accounts + .is_empty()); + } + 2 => { + // multiple slot list, ref_count=2, this is NOT newest alive, so many_refs_old_alive + let slot_list = vec![ + (slot, AccountInfo::new(StorageLocation::Cached, lamports)), + ( + slot + 1, + AccountInfo::new(StorageLocation::Cached, lamports), + ), + ]; + alive_accounts.add(2, &account, &slot_list); + assert!(alive_accounts.one_ref.accounts.is_empty()); + assert!(!alive_accounts.many_refs_old_alive.accounts.is_empty()); + assert!(alive_accounts + .many_refs_this_is_newest_alive + .accounts + .is_empty()); + } + 3 => { + // multiple slot list, ref_count=2, this is newest + let slot_list = vec![ + (slot, AccountInfo::new(StorageLocation::Cached, lamports)), + ( + slot - 1, + AccountInfo::new(StorageLocation::Cached, lamports), + ), + ]; + alive_accounts.add(2, &account, &slot_list); + assert!(alive_accounts.one_ref.accounts.is_empty()); + assert!(alive_accounts.many_refs_old_alive.accounts.is_empty()); + assert!(!alive_accounts + .many_refs_this_is_newest_alive + .accounts + .is_empty()); + } + _ => { + panic!("unexpected"); + } + } + } + } + + #[test] + fn test_many_ref_accounts_can_be_moved() { + let tuning = PackedAncientStorageTuning { + // only allow 10k slots old enough to be ancient + max_ancient_slots: 10_000, + // re-combine/shrink 55% of the data savings this pass + percent_of_alive_shrunk_data: 55, + ideal_storage_size: NonZeroU64::new(1000).unwrap(), + can_randomly_shrink: false, + }; + + // nothing to move, so no problem fitting it + let many_refs_newest = vec![]; + let target_slots_sorted = vec![]; + assert!(AccountsDb::many_ref_accounts_can_be_moved( + &many_refs_newest, + &target_slots_sorted, + &tuning + )); + // something to move, no target slots, so can't fit + let slot = 1; + let many_refs_newest = vec![AliveAccounts { + bytes: 1, + slot, + accounts: Vec::default(), + }]; + assert!(!AccountsDb::many_ref_accounts_can_be_moved( + &many_refs_newest, + &target_slots_sorted, + &tuning + )); + + // something to move, 1 target slot, so can fit + let target_slots_sorted = vec![slot]; + assert!(AccountsDb::many_ref_accounts_can_be_moved( + &many_refs_newest, + &target_slots_sorted, + &tuning + )); + + // too much to move to 1 target slot, so can't fit + let many_refs_newest = vec![AliveAccounts { + bytes: tuning.ideal_storage_size.get() as usize, + slot, + accounts: Vec::default(), + }]; + assert!(!AccountsDb::many_ref_accounts_can_be_moved( + &many_refs_newest, + &target_slots_sorted, + &tuning + )); + + // more than 1 slot to move, 2 target slots, so can fit + let target_slots_sorted = vec![slot, slot + 1]; + assert!(AccountsDb::many_ref_accounts_can_be_moved( + &many_refs_newest, + &target_slots_sorted, + &tuning + )); + + // lowest target slot is below required slot + let target_slots_sorted = vec![slot - 1, slot]; + assert!(!AccountsDb::many_ref_accounts_can_be_moved( + &many_refs_newest, + &target_slots_sorted, + &tuning + )); + } + #[test] fn test_addref_accounts_failed_to_shrink_ancient() { let db = AccountsDb::new_single_for_tests(); @@ -3330,7 +3239,8 @@ pub mod tests { aligned_total_bytes: 0, alive_accounts: ShrinkCollectAliveSeparatedByRefs { one_ref: AliveAccounts::default(), - many_refs: AliveAccounts::default(), + many_refs_this_is_newest_alive: AliveAccounts::default(), + many_refs_old_alive: AliveAccounts::default(), }, alive_total_bytes: 0, total_starting_accounts: 0, From 96052d230a85d64fe6d9ee5fcf885a486b94bed7 Mon Sep 17 00:00:00 2001 From: Thomas P Date: Fri, 20 Oct 2023 19:17:03 +0200 Subject: [PATCH 396/407] Docker images > 1.17.0 are broken because of wrong baseimage (#33709) fix(dockerfile): make sure to use the bullseye image for building Solana's release image is based on bullseye, make sure we compile Solana with the same environment --- ci/docker-rust/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index 8dfc347d54d697..c15b21636f365f 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -1,6 +1,6 @@ # Note: when the rust version is changed also modify # ci/rust-version.sh to pick up the new image tag -FROM rust:1.73.0 +FROM rust:1.73.0-bullseye ARG NODE_MAJOR=18 From dc3c827299f0139b681dc79fa0a71622bce2feeb Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Fri, 20 Oct 2023 17:50:54 +0000 Subject: [PATCH 397/407] prunes repair QUIC connections (#33775) The commit implements lazy eviction for repair QUIC connections. The cache is allowed to grow to 2 x capacity at which point at least half of the entries with lowest stake are evicted, resulting in an amortized O(1) performance. --- core/src/repair/quic_endpoint.rs | 144 +++++++++++++++++++++++++++---- core/src/validator.rs | 1 + 2 files changed, 130 insertions(+), 15 deletions(-) diff --git a/core/src/repair/quic_endpoint.rs b/core/src/repair/quic_endpoint.rs index bf3a1802144a42..7d1cd29a32589f 100644 --- a/core/src/repair/quic_endpoint.rs +++ b/core/src/repair/quic_endpoint.rs @@ -13,15 +13,20 @@ use { rustls::{Certificate, PrivateKey}, serde_bytes::ByteBuf, solana_quic_client::nonblocking::quic_client::SkipServerVerification, + solana_runtime::bank_forks::BankForks, solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey, signature::Keypair}, solana_streamer::{ quic::SkipClientVerification, tls_certificates::new_self_signed_tls_certificate, }, std::{ + cmp::Reverse, collections::{hash_map::Entry, HashMap}, io::{Cursor, Error as IoError}, net::{IpAddr, SocketAddr, UdpSocket}, - sync::Arc, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, RwLock, + }, time::Duration, }, thiserror::Error, @@ -40,18 +45,20 @@ const CONNECT_SERVER_NAME: &str = "solana-repair"; const CLIENT_CHANNEL_BUFFER: usize = 1 << 14; const ROUTER_CHANNEL_BUFFER: usize = 64; -const CONNECTION_CACHE_CAPACITY: usize = 4096; +const CONNECTION_CACHE_CAPACITY: usize = 3072; const MAX_CONCURRENT_BIDI_STREAMS: VarInt = VarInt::from_u32(512); const CONNECTION_CLOSE_ERROR_CODE_SHUTDOWN: VarInt = VarInt::from_u32(1); const CONNECTION_CLOSE_ERROR_CODE_DROPPED: VarInt = VarInt::from_u32(2); const CONNECTION_CLOSE_ERROR_CODE_INVALID_IDENTITY: VarInt = VarInt::from_u32(3); const CONNECTION_CLOSE_ERROR_CODE_REPLACED: VarInt = VarInt::from_u32(4); +const CONNECTION_CLOSE_ERROR_CODE_PRUNED: VarInt = VarInt::from_u32(5); const CONNECTION_CLOSE_REASON_SHUTDOWN: &[u8] = b"SHUTDOWN"; const CONNECTION_CLOSE_REASON_DROPPED: &[u8] = b"DROPPED"; const CONNECTION_CLOSE_REASON_INVALID_IDENTITY: &[u8] = b"INVALID_IDENTITY"; const CONNECTION_CLOSE_REASON_REPLACED: &[u8] = b"REPLACED"; +const CONNECTION_CLOSE_REASON_PRUNED: &[u8] = b"PRUNED"; pub(crate) type AsyncTryJoinHandle = TryJoin, JoinHandle<()>>; @@ -108,6 +115,7 @@ pub(crate) fn new_quic_endpoint( socket: UdpSocket, address: IpAddr, remote_request_sender: Sender, + bank_forks: Arc>, ) -> Result<(Endpoint, AsyncSender, AsyncTryJoinHandle), Error> { let (cert, key) = new_self_signed_tls_certificate(keypair, address)?; let server_config = new_server_config(cert.clone(), key.clone())?; @@ -124,12 +132,15 @@ pub(crate) fn new_quic_endpoint( )? }; endpoint.set_default_client_config(client_config); + let prune_cache_pending = Arc::::default(); let cache = Arc::>>::default(); let (client_sender, client_receiver) = tokio::sync::mpsc::channel(CLIENT_CHANNEL_BUFFER); let router = Arc::>>>::default(); let server_task = runtime.spawn(run_server( endpoint.clone(), remote_request_sender.clone(), + bank_forks.clone(), + prune_cache_pending.clone(), router.clone(), cache.clone(), )); @@ -137,6 +148,8 @@ pub(crate) fn new_quic_endpoint( endpoint.clone(), client_receiver, remote_request_sender, + bank_forks, + prune_cache_pending, router, cache, )); @@ -189,6 +202,8 @@ fn new_transport_config() -> TransportConfig { async fn run_server( endpoint: Endpoint, remote_request_sender: Sender, + bank_forks: Arc>, + prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, ) { @@ -197,6 +212,8 @@ async fn run_server( endpoint.clone(), connecting, remote_request_sender.clone(), + bank_forks.clone(), + prune_cache_pending.clone(), router.clone(), cache.clone(), )); @@ -207,6 +224,8 @@ async fn run_client( endpoint: Endpoint, mut receiver: AsyncReceiver, remote_request_sender: Sender, + bank_forks: Arc>, + prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, ) { @@ -230,6 +249,8 @@ async fn run_client( remote_address, remote_request_sender.clone(), receiver, + bank_forks.clone(), + prune_cache_pending.clone(), router.clone(), cache.clone(), )); @@ -263,11 +284,21 @@ async fn handle_connecting_error( endpoint: Endpoint, connecting: Connecting, remote_request_sender: Sender, + bank_forks: Arc>, + prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, ) { - if let Err(err) = - handle_connecting(endpoint, connecting, remote_request_sender, router, cache).await + if let Err(err) = handle_connecting( + endpoint, + connecting, + remote_request_sender, + bank_forks, + prune_cache_pending, + router, + cache, + ) + .await { error!("handle_connecting: {err:?}"); } @@ -277,6 +308,8 @@ async fn handle_connecting( endpoint: Endpoint, connecting: Connecting, remote_request_sender: Sender, + bank_forks: Arc>, + prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, ) -> Result<(), Error> { @@ -295,6 +328,8 @@ async fn handle_connecting( connection, remote_request_sender, receiver, + bank_forks, + prune_cache_pending, router, cache, ) @@ -302,6 +337,7 @@ async fn handle_connecting( Ok(()) } +#[allow(clippy::too_many_arguments)] async fn handle_connection( endpoint: Endpoint, remote_address: SocketAddr, @@ -309,10 +345,20 @@ async fn handle_connection( connection: Connection, remote_request_sender: Sender, receiver: AsyncReceiver, + bank_forks: Arc>, + prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, ) { - cache_connection(remote_pubkey, connection.clone(), &cache).await; + cache_connection( + remote_pubkey, + connection.clone(), + bank_forks, + prune_cache_pending, + router.clone(), + cache.clone(), + ) + .await; let send_requests_task = tokio::task::spawn(send_requests_task( endpoint.clone(), connection.clone(), @@ -492,6 +538,8 @@ async fn make_connection_task( remote_address: SocketAddr, remote_request_sender: Sender, receiver: AsyncReceiver, + bank_forks: Arc>, + prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, ) { @@ -500,6 +548,8 @@ async fn make_connection_task( remote_address, remote_request_sender, receiver, + bank_forks, + prune_cache_pending, router, cache, ) @@ -514,6 +564,8 @@ async fn make_connection( remote_address: SocketAddr, remote_request_sender: Sender, receiver: AsyncReceiver, + bank_forks: Arc>, + prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, ) -> Result<(), Error> { @@ -527,6 +579,8 @@ async fn make_connection( connection, remote_request_sender, receiver, + bank_forks, + prune_cache_pending, router, cache, ) @@ -550,18 +604,17 @@ fn get_remote_pubkey(connection: &Connection) -> Result { async fn cache_connection( remote_pubkey: Pubkey, connection: Connection, - cache: &Mutex>, + bank_forks: Arc>, + prune_cache_pending: Arc, + router: Arc>>>, + cache: Arc>>, ) { - let old = { + let (old, should_prune_cache) = { let mut cache = cache.lock().await; - if cache.len() >= CONNECTION_CACHE_CAPACITY { - connection.close( - CONNECTION_CLOSE_ERROR_CODE_DROPPED, - CONNECTION_CLOSE_REASON_DROPPED, - ); - return; - } - cache.insert(remote_pubkey, connection) + ( + cache.insert(remote_pubkey, connection), + cache.len() >= CONNECTION_CACHE_CAPACITY.saturating_mul(2), + ) }; if let Some(old) = old { old.close( @@ -569,6 +622,14 @@ async fn cache_connection( CONNECTION_CLOSE_REASON_REPLACED, ); } + if should_prune_cache && !prune_cache_pending.swap(true, Ordering::Relaxed) { + tokio::task::spawn(prune_connection_cache( + bank_forks, + prune_cache_pending, + router, + cache, + )); + } } async fn drop_connection( @@ -587,6 +648,50 @@ async fn drop_connection( } } +async fn prune_connection_cache( + bank_forks: Arc>, + prune_cache_pending: Arc, + router: Arc>>>, + cache: Arc>>, +) { + debug_assert!(prune_cache_pending.load(Ordering::Relaxed)); + let staked_nodes = { + let root_bank = bank_forks.read().unwrap().root_bank(); + root_bank.staked_nodes() + }; + { + let mut cache = cache.lock().await; + if cache.len() < CONNECTION_CACHE_CAPACITY.saturating_mul(2) { + prune_cache_pending.store(false, Ordering::Relaxed); + return; + } + let mut connections: Vec<_> = cache + .drain() + .filter(|(_, connection)| connection.close_reason().is_none()) + .map(|entry @ (pubkey, _)| { + let stake = staked_nodes.get(&pubkey).copied().unwrap_or_default(); + (stake, entry) + }) + .collect(); + connections + .select_nth_unstable_by_key(CONNECTION_CACHE_CAPACITY, |&(stake, _)| Reverse(stake)); + for (_, (_, connection)) in &connections[CONNECTION_CACHE_CAPACITY..] { + connection.close( + CONNECTION_CLOSE_ERROR_CODE_PRUNED, + CONNECTION_CLOSE_REASON_PRUNED, + ); + } + cache.extend( + connections + .into_iter() + .take(CONNECTION_CACHE_CAPACITY) + .map(|(_, entry)| entry), + ); + prune_cache_pending.store(false, Ordering::Relaxed); + } + router.write().await.retain(|_, sender| !sender.is_closed()); +} + impl From> for Error { fn from(_: crossbeam_channel::SendError) -> Self { Error::ChannelSendError @@ -598,6 +703,8 @@ mod tests { use { super::*, itertools::{izip, multiunzip}, + solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}, + solana_runtime::bank::Bank, solana_sdk::signature::Signer, std::{iter::repeat_with, net::Ipv4Addr, time::Duration}, }; @@ -625,6 +732,12 @@ mod tests { repeat_with(crossbeam_channel::unbounded::) .take(NUM_ENDPOINTS) .unzip(); + let bank_forks = { + let GenesisConfigInfo { genesis_config, .. } = + create_genesis_config(/*mint_lamports:*/ 100_000); + let bank = Bank::new_for_tests(&genesis_config); + Arc::new(RwLock::new(BankForks::new(bank))) + }; let (endpoints, senders, tasks): (Vec<_>, Vec<_>, Vec<_>) = multiunzip( keypairs .iter() @@ -637,6 +750,7 @@ mod tests { socket, IpAddr::V4(Ipv4Addr::LOCALHOST), remote_request_sender, + bank_forks.clone(), ) .unwrap() }), diff --git a/core/src/validator.rs b/core/src/validator.rs index 3075fb5261a180..16429049d0ef53 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1208,6 +1208,7 @@ impl Validator { .expect("Operator must spin up node with valid QUIC serve-repair address") .ip(), repair_quic_endpoint_sender, + bank_forks.clone(), ) .unwrap(); From a5c7c999e2060971d9369bde13e05247c9ddd415 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 20 Oct 2023 21:39:50 +0200 Subject: [PATCH 398/407] Bump solana_rbpf to v0.8.0 (#33679) * Bumps solana_rbpf to v0.8.0 * Adjustments: Replaces declare_syscall!() with declare_builtin_function!(). Removes Config::encrypt_runtime_environment. Simplifies error propagation. --- Cargo.lock | 5 +- Cargo.toml | 2 +- ledger/src/blockstore_processor.rs | 8 +- program-runtime/src/invoke_context.rs | 125 ++-- program-runtime/src/loaded_programs.rs | 13 +- program-runtime/src/message_processor.rs | 12 +- program-runtime/src/stable_log.rs | 4 +- program-test/Cargo.toml | 1 + program-test/src/lib.rs | 56 +- .../tests/common.rs | 7 +- .../tests/create_lookup_table_ix.rs | 7 +- .../address-lookup-table/src/processor.rs | 40 +- programs/bpf-loader-tests/tests/common.rs | 3 +- programs/bpf_loader/src/lib.rs | 61 +- programs/bpf_loader/src/syscalls/cpi.rs | 12 +- programs/bpf_loader/src/syscalls/logging.rs | 22 +- programs/bpf_loader/src/syscalls/mem_ops.rs | 59 +- programs/bpf_loader/src/syscalls/mod.rs | 545 +++++--------- programs/bpf_loader/src/syscalls/sysvar.rs | 26 +- programs/compute-budget/src/lib.rs | 12 +- programs/config/src/config_processor.rs | 220 +++--- programs/loader-v4/src/lib.rs | 43 +- programs/sbf/Cargo.lock | 5 +- programs/sbf/Cargo.toml | 2 +- programs/sbf/tests/programs.rs | 6 +- programs/stake/src/stake_instruction.rs | 702 +++++++++--------- programs/system/src/system_processor.rs | 462 ++++++------ programs/vote/benches/process_vote.rs | 2 +- programs/vote/src/vote_processor.rs | 361 +++++---- programs/zk-token-proof/src/lib.rs | 2 +- runtime/benches/bank.rs | 4 +- runtime/src/bank.rs | 6 +- runtime/src/bank/tests.rs | 90 +-- runtime/src/builtins.rs | 30 +- 34 files changed, 1373 insertions(+), 1582 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9a425d81d3dd08..6de911a6b63477 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6640,6 +6640,7 @@ dependencies = [ "solana-sdk", "solana-stake-program", "solana-vote-program", + "solana_rbpf", "test-case", "thiserror", "tokio", @@ -7625,9 +7626,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.7.2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "103318aa365ff7caa8cf534f2246b5eb7e5b34668736d52b1266b143f7a21196" +checksum = "3d457cc2ba742c120492a64b7fa60e22c575e891f6b55039f4d736568fb112a3" dependencies = [ "byteorder", "combine", diff --git a/Cargo.toml b/Cargo.toml index abb304ca6d14e1..c17d5444020b15 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -378,7 +378,7 @@ solana-wen-restart = { path = "wen-restart", version = "=1.18.0" } solana-zk-keygen = { path = "zk-keygen", version = "=1.18.0" } solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=1.18.0" } solana-zk-token-sdk = { path = "zk-token-sdk", version = "=1.18.0" } -solana_rbpf = "=0.7.2" +solana_rbpf = "=0.8.0" spl-associated-token-account = "=2.2.0" spl-instruction-padding = "0.1" spl-memo = "=4.0.0" diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 10ff57a1202581..e17bc52a889ded 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -2994,7 +2994,7 @@ pub mod tests { ] } - declare_process_instruction!(mock_processor_ok, 1, |_invoke_context| { + declare_process_instruction!(MockBuiltinOk, 1, |_invoke_context| { // Always succeeds Ok(()) }); @@ -3002,7 +3002,7 @@ pub mod tests { let mock_program_id = solana_sdk::pubkey::new_rand(); let mut bank = Bank::new_for_tests(&genesis_config); - bank.add_mockup_builtin(mock_program_id, mock_processor_ok); + bank.add_mockup_builtin(mock_program_id, MockBuiltinOk::vm); let tx = Transaction::new_signed_with_payer( &[Instruction::new_with_bincode( @@ -3023,7 +3023,7 @@ pub mod tests { let bankhash_ok = bank.hash(); assert!(result.is_ok()); - declare_process_instruction!(mock_processor_err, 1, |invoke_context| { + declare_process_instruction!(MockBuiltinErr, 1, |invoke_context| { let instruction_errors = get_instruction_errors(); let err = invoke_context @@ -3043,7 +3043,7 @@ pub mod tests { (0..get_instruction_errors().len()).for_each(|err| { let mut bank = Bank::new_for_tests(&genesis_config); - bank.add_mockup_builtin(mock_program_id, mock_processor_err); + bank.add_mockup_builtin(mock_program_id, MockBuiltinErr::vm); let tx = Transaction::new_signed_with_payer( &[Instruction::new_with_bincode( diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 9fbe42d8d40c07..be95fca637ccdb 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -12,9 +12,10 @@ use { solana_measure::measure::Measure, solana_rbpf::{ ebpf::MM_HEAP_START, - elf::SBPFVersion, + error::{EbpfError, ProgramResult}, memory_region::MemoryMapping, - vm::{BuiltinFunction, Config, ContextObject, ProgramResult}, + program::{BuiltinFunction, SBPFVersion}, + vm::{Config, ContextObject, EbpfVm}, }, solana_sdk::{ account::AccountSharedData, @@ -39,44 +40,46 @@ use { }, }; -pub type ProcessInstructionWithContext = BuiltinFunction>; +pub type BuiltinFunctionWithContext = BuiltinFunction>; /// Adapter so we can unify the interfaces of built-in programs and syscalls #[macro_export] macro_rules! declare_process_instruction { ($process_instruction:ident, $cu_to_consume:expr, |$invoke_context:ident| $inner:tt) => { - pub fn $process_instruction( - invoke_context: &mut $crate::invoke_context::InvokeContext, - _arg0: u64, - _arg1: u64, - _arg2: u64, - _arg3: u64, - _arg4: u64, - _memory_mapping: &mut $crate::solana_rbpf::memory_region::MemoryMapping, - result: &mut $crate::solana_rbpf::vm::ProgramResult, - ) { - fn process_instruction_inner( - $invoke_context: &mut $crate::invoke_context::InvokeContext, - ) -> std::result::Result<(), solana_sdk::instruction::InstructionError> { - $inner + $crate::solana_rbpf::declare_builtin_function!( + $process_instruction, + fn rust( + invoke_context: &mut $crate::invoke_context::InvokeContext, + _arg0: u64, + _arg1: u64, + _arg2: u64, + _arg3: u64, + _arg4: u64, + _memory_mapping: &mut $crate::solana_rbpf::memory_region::MemoryMapping, + ) -> std::result::Result> { + fn process_instruction_inner( + $invoke_context: &mut $crate::invoke_context::InvokeContext, + ) -> std::result::Result<(), solana_sdk::instruction::InstructionError> { + $inner + } + let consumption_result = if $cu_to_consume > 0 + && invoke_context + .feature_set + .is_active(&solana_sdk::feature_set::native_programs_consume_cu::id()) + { + invoke_context.consume_checked($cu_to_consume) + } else { + Ok(()) + }; + consumption_result + .and_then(|_| { + process_instruction_inner(invoke_context) + .map(|_| 0) + .map_err(|err| Box::new(err) as Box) + }) + .into() } - let consumption_result = if $cu_to_consume > 0 - && invoke_context - .feature_set - .is_active(&solana_sdk::feature_set::native_programs_consume_cu::id()) - { - invoke_context.consume_checked($cu_to_consume) - } else { - Ok(()) - }; - *result = consumption_result - .and_then(|_| { - process_instruction_inner(invoke_context) - .map(|_| 0) - .map_err(|err| Box::new(err) as Box) - }) - .into(); - } + ); }; } @@ -468,11 +471,11 @@ impl<'a> InvokeContext<'a> { .programs_loaded_for_tx_batch .find(&builtin_id) .ok_or(InstructionError::UnsupportedProgramId)?; - let process_instruction = match &entry.program { + let function = match &entry.program { LoadedProgramType::Builtin(program) => program .get_function_registry() .lookup_by_key(ENTRYPOINT_KEY) - .map(|(_name, process_instruction)| process_instruction), + .map(|(_name, function)| function), _ => None, } .ok_or(InstructionError::UnsupportedProgramId)?; @@ -484,31 +487,41 @@ impl<'a> InvokeContext<'a> { let logger = self.get_log_collector(); stable_log::program_invoke(&logger, &program_id, self.get_stack_height()); let pre_remaining_units = self.get_remaining(); + // In program-runtime v2 we will create this VM instance only once per transaction. + // `program_runtime_environment_v2.get_config()` will be used instead of `mock_config`. + // For now, only built-ins are invoked from here, so the VM and its Config are irrelevant. let mock_config = Config::default(); - let mut mock_memory_mapping = - MemoryMapping::new(Vec::new(), &mock_config, &SBPFVersion::V2).unwrap(); - let mut result = ProgramResult::Ok(0); - process_instruction( + let empty_memory_mapping = + MemoryMapping::new(Vec::new(), &mock_config, &SBPFVersion::V1).unwrap(); + let mut vm = EbpfVm::new( + self.programs_loaded_for_tx_batch + .environments + .program_runtime_v2 + .clone(), + &SBPFVersion::V1, // Removes lifetime tracking unsafe { std::mem::transmute::<&mut InvokeContext, &mut InvokeContext>(self) }, + empty_memory_mapping, 0, - 0, - 0, - 0, - 0, - &mut mock_memory_mapping, - &mut result, ); - let result = match result { + vm.invoke_function(function); + let result = match vm.program_result { ProgramResult::Ok(_) => { stable_log::program_success(&logger, &program_id); Ok(()) } - ProgramResult::Err(err) => { - stable_log::program_failure(&logger, &program_id, err.as_ref()); - if let Some(err) = err.downcast_ref::() { - Err(err.clone()) + ProgramResult::Err(ref err) => { + if let EbpfError::SyscallError(syscall_error) = err { + if let Some(instruction_err) = syscall_error.downcast_ref::() + { + stable_log::program_failure(&logger, &program_id, instruction_err); + Err(instruction_err.clone()) + } else { + stable_log::program_failure(&logger, &program_id, syscall_error); + Err(InstructionError::ProgramFailedToComplete) + } } else { + stable_log::program_failure(&logger, &program_id, err); Err(InstructionError::ProgramFailedToComplete) } } @@ -699,7 +712,7 @@ pub fn mock_process_instruction, instruction_account_metas: Vec, expected_result: Result<(), InstructionError>, - process_instruction: ProcessInstructionWithContext, + builtin_function: BuiltinFunctionWithContext, mut pre_adjustments: F, mut post_adjustments: G, ) -> Vec { @@ -734,7 +747,7 @@ pub fn mock_process_instruction Self { let mut function_registry = FunctionRegistry::default(); function_registry - .register_function_hashed(*b"entrypoint", entrypoint) + .register_function_hashed(*b"entrypoint", builtin_function) .unwrap(); Self { deployment_slot, @@ -949,7 +950,7 @@ mod tests { }, assert_matches::assert_matches, percentage::Percentage, - solana_rbpf::vm::BuiltinProgram, + solana_rbpf::program::BuiltinProgram, solana_sdk::{ clock::{Epoch, Slot}, pubkey::Pubkey, diff --git a/program-runtime/src/message_processor.rs b/program-runtime/src/message_processor.rs index 6b3727e6a8ce74..e3a0dabd8d07bf 100644 --- a/program-runtime/src/message_processor.rs +++ b/program-runtime/src/message_processor.rs @@ -220,7 +220,7 @@ mod tests { ChangeData { data: u8 }, } - declare_process_instruction!(process_instruction, 1, |invoke_context| { + declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let instruction_data = instruction_context.get_instruction_data(); @@ -271,7 +271,7 @@ mod tests { let mut programs_loaded_for_tx_batch = LoadedProgramsForTxBatch::default(); programs_loaded_for_tx_batch.replenish( mock_system_program_id, - Arc::new(LoadedProgram::new_builtin(0, 0, process_instruction)), + Arc::new(LoadedProgram::new_builtin(0, 0, MockBuiltin::vm)), ); let account_keys = (0..transaction_context.get_number_of_accounts()) .map(|index| { @@ -432,7 +432,7 @@ mod tests { DoWork { lamports: u64, data: u8 }, } - declare_process_instruction!(process_instruction, 1, |invoke_context| { + declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let instruction_data = instruction_context.get_instruction_data(); @@ -500,7 +500,7 @@ mod tests { let mut programs_loaded_for_tx_batch = LoadedProgramsForTxBatch::default(); programs_loaded_for_tx_batch.replenish( mock_program_id, - Arc::new(LoadedProgram::new_builtin(0, 0, process_instruction)), + Arc::new(LoadedProgram::new_builtin(0, 0, MockBuiltin::vm)), ); let account_metas = vec![ AccountMeta::new( @@ -645,7 +645,7 @@ mod tests { #[test] fn test_precompile() { let mock_program_id = Pubkey::new_unique(); - declare_process_instruction!(process_instruction, 1, |_invoke_context| { + declare_process_instruction!(MockBuiltin, 1, |_invoke_context| { Err(InstructionError::Custom(0xbabb1e)) }); @@ -684,7 +684,7 @@ mod tests { let mut programs_loaded_for_tx_batch = LoadedProgramsForTxBatch::default(); programs_loaded_for_tx_batch.replenish( mock_program_id, - Arc::new(LoadedProgram::new_builtin(0, 0, process_instruction)), + Arc::new(LoadedProgram::new_builtin(0, 0, MockBuiltin::vm)), ); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); let mut programs_updated_only_for_global_cache = LoadedProgramsForTxBatch::default(); diff --git a/program-runtime/src/stable_log.rs b/program-runtime/src/stable_log.rs index 9ba7542e9c0fbf..748c4d7639214a 100644 --- a/program-runtime/src/stable_log.rs +++ b/program-runtime/src/stable_log.rs @@ -101,10 +101,10 @@ pub fn program_success(log_collector: &Option>>, progra /// ```notrust /// "Program

    failed: " /// ``` -pub fn program_failure( +pub fn program_failure( log_collector: &Option>>, program_id: &Pubkey, - err: &dyn std::error::Error, + err: &E, ) { ic_logger_msg!(log_collector, "Program {} failed: {}", program_id, err); } diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index 87a9c88487a30d..c4ab4507b27eae 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -27,6 +27,7 @@ solana-program-runtime = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-vote-program = { workspace = true } +solana_rbpf = { workspace = true } test-case = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 3c26ac25dad59e..95b9e6103d8122 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -13,7 +13,7 @@ use { solana_banks_server::banks_server::start_local_server, solana_bpf_loader_program::serialization::serialize_parameters, solana_program_runtime::{ - compute_budget::ComputeBudget, ic_msg, invoke_context::ProcessInstructionWithContext, + compute_budget::ComputeBudget, ic_msg, invoke_context::BuiltinFunctionWithContext, loaded_programs::LoadedProgram, stable_log, timings::ExecuteTimings, }, solana_runtime::{ @@ -66,6 +66,10 @@ pub use { solana_banks_client::{BanksClient, BanksClientError}, solana_banks_interface::BanksTransactionResultWithMetadata, solana_program_runtime::invoke_context::InvokeContext, + solana_rbpf::{ + error::EbpfError, + vm::{get_runtime_environment_key, EbpfVm}, + }, solana_sdk::transaction_context::IndexOfAccount, }; @@ -94,10 +98,10 @@ fn get_invoke_context<'a, 'b>() -> &'a mut InvokeContext<'b> { unsafe { transmute::(ptr) } } -pub fn builtin_process_instruction( - process_instruction: solana_sdk::entrypoint::ProcessInstruction, +pub fn invoke_builtin_function( + builtin_function: solana_sdk::entrypoint::ProcessInstruction, invoke_context: &mut InvokeContext, -) -> Result<(), Box> { +) -> Result> { set_invoke_context(invoke_context); let transaction_context = &invoke_context.transaction_context; @@ -130,9 +134,10 @@ pub fn builtin_process_instruction( unsafe { deserialize(&mut parameter_bytes.as_slice_mut()[0] as *mut u8) }; // Execute the program - process_instruction(program_id, &account_infos, instruction_data).map_err(|err| { - let err: Box = Box::new(InstructionError::from(u64::from(err))); - stable_log::program_failure(&log_collector, program_id, err.as_ref()); + builtin_function(program_id, &account_infos, instruction_data).map_err(|err| { + let err = InstructionError::from(u64::from(err)); + stable_log::program_failure(&log_collector, program_id, &err); + let err: Box = Box::new(err); err })?; stable_log::program_success(&log_collector, program_id); @@ -169,21 +174,24 @@ pub fn builtin_process_instruction( } } - Ok(()) + Ok(0) } /// Converts a `solana-program`-style entrypoint into the runtime's entrypoint style, for /// use with `ProgramTest::add_program` #[macro_export] macro_rules! processor { - ($process_instruction:expr) => { - Some( - |invoke_context, _arg0, _arg1, _arg2, _arg3, _arg4, _memory_mapping, result| { - *result = $crate::builtin_process_instruction($process_instruction, invoke_context) - .map(|_| 0) + ($builtin_function:expr) => { + Some(|vm, _arg0, _arg1, _arg2, _arg3, _arg4| { + let vm = unsafe { + &mut *((vm as *mut u64).offset(-($crate::get_runtime_environment_key() as isize)) + as *mut $crate::EbpfVm<$crate::InvokeContext>) + }; + vm.program_result = + $crate::invoke_builtin_function($builtin_function, vm.context_object_pointer) + .map_err(|err| $crate::EbpfError::SyscallError(err)) .into(); - }, - ) + }) }; } @@ -506,10 +514,10 @@ impl ProgramTest { pub fn new( program_name: &str, program_id: Pubkey, - process_instruction: Option, + builtin_function: Option, ) -> Self { let mut me = Self::default(); - me.add_program(program_name, program_id, process_instruction); + me.add_program(program_name, program_id, builtin_function); me } @@ -600,13 +608,13 @@ impl ProgramTest { /// `program_name` will also be used to locate the SBF shared object in the current or fixtures /// directory. /// - /// If `process_instruction` is provided, the natively built-program may be used instead of the + /// If `builtin_function` is provided, the natively built-program may be used instead of the /// SBF shared object depending on the `BPF_OUT_DIR` environment variable. pub fn add_program( &mut self, program_name: &str, program_id: Pubkey, - process_instruction: Option, + builtin_function: Option, ) { let add_bpf = |this: &mut ProgramTest, program_file: PathBuf| { let data = read_file(&program_file); @@ -680,7 +688,7 @@ impl ProgramTest { }; let program_file = find_file(&format!("{program_name}.so")); - match (self.prefer_bpf, program_file, process_instruction) { + match (self.prefer_bpf, program_file, builtin_function) { // If SBF is preferred (i.e., `test-sbf` is invoked) and a BPF shared object exists, // use that as the program data. (true, Some(file), _) => add_bpf(self, file), @@ -689,8 +697,8 @@ impl ProgramTest { // processor function as is. // // TODO: figure out why tests hang if a processor panics when running native code. - (false, _, Some(process)) => { - self.add_builtin_program(program_name, program_id, process) + (false, _, Some(builtin_function)) => { + self.add_builtin_program(program_name, program_id, builtin_function) } // Invalid: `test-sbf` invocation with no matching SBF shared object. @@ -713,13 +721,13 @@ impl ProgramTest { &mut self, program_name: &str, program_id: Pubkey, - process_instruction: ProcessInstructionWithContext, + builtin_function: BuiltinFunctionWithContext, ) { info!("\"{}\" builtin program", program_name); self.builtin_programs.push(( program_id, program_name.to_string(), - LoadedProgram::new_builtin(0, program_name.len(), process_instruction), + LoadedProgram::new_builtin(0, program_name.len(), builtin_function), )); } diff --git a/programs/address-lookup-table-tests/tests/common.rs b/programs/address-lookup-table-tests/tests/common.rs index 48b80199312a14..064244858cda70 100644 --- a/programs/address-lookup-table-tests/tests/common.rs +++ b/programs/address-lookup-table-tests/tests/common.rs @@ -1,6 +1,5 @@ #![allow(dead_code)] use { - solana_address_lookup_table_program::processor::process_instruction, solana_program_test::*, solana_sdk::{ account::AccountSharedData, @@ -20,7 +19,11 @@ use { }; pub async fn setup_test_context() -> ProgramTestContext { - let program_test = ProgramTest::new("", id(), Some(process_instruction)); + let program_test = ProgramTest::new( + "", + id(), + Some(solana_address_lookup_table_program::processor::Entrypoint::vm), + ); program_test.start_with_context().await } diff --git a/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs b/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs index 183de53e31382a..39ff9aea6604d5 100644 --- a/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs +++ b/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs @@ -1,7 +1,6 @@ use { assert_matches::assert_matches, common::{assert_ix_error, overwrite_slot_hashes_with_slots, setup_test_context}, - solana_address_lookup_table_program::processor::process_instruction, solana_program_test::*, solana_sdk::{ address_lookup_table::{ @@ -23,7 +22,11 @@ use { mod common; pub async fn setup_test_context_without_authority_feature() -> ProgramTestContext { - let mut program_test = ProgramTest::new("", id(), Some(process_instruction)); + let mut program_test = ProgramTest::new( + "", + id(), + Some(solana_address_lookup_table_program::processor::Entrypoint::vm), + ); program_test.deactivate_feature( feature_set::relax_authority_signer_check_for_lookup_table_creation::id(), ); diff --git a/programs/address-lookup-table/src/processor.rs b/programs/address-lookup-table/src/processor.rs index 6f71b293d03a4c..4db568c71a1a20 100644 --- a/programs/address-lookup-table/src/processor.rs +++ b/programs/address-lookup-table/src/processor.rs @@ -21,29 +21,25 @@ use { pub const DEFAULT_COMPUTE_UNITS: u64 = 750; -declare_process_instruction!( - process_instruction, - DEFAULT_COMPUTE_UNITS, - |invoke_context| { - let transaction_context = &invoke_context.transaction_context; - let instruction_context = transaction_context.get_current_instruction_context()?; - let instruction_data = instruction_context.get_instruction_data(); - match limited_deserialize(instruction_data)? { - ProgramInstruction::CreateLookupTable { - recent_slot, - bump_seed, - } => Processor::create_lookup_table(invoke_context, recent_slot, bump_seed), - ProgramInstruction::FreezeLookupTable => Processor::freeze_lookup_table(invoke_context), - ProgramInstruction::ExtendLookupTable { new_addresses } => { - Processor::extend_lookup_table(invoke_context, new_addresses) - } - ProgramInstruction::DeactivateLookupTable => { - Processor::deactivate_lookup_table(invoke_context) - } - ProgramInstruction::CloseLookupTable => Processor::close_lookup_table(invoke_context), - } +declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| { + let transaction_context = &invoke_context.transaction_context; + let instruction_context = transaction_context.get_current_instruction_context()?; + let instruction_data = instruction_context.get_instruction_data(); + match limited_deserialize(instruction_data)? { + ProgramInstruction::CreateLookupTable { + recent_slot, + bump_seed, + } => Processor::create_lookup_table(invoke_context, recent_slot, bump_seed), + ProgramInstruction::FreezeLookupTable => Processor::freeze_lookup_table(invoke_context), + ProgramInstruction::ExtendLookupTable { new_addresses } => { + Processor::extend_lookup_table(invoke_context, new_addresses) + } + ProgramInstruction::DeactivateLookupTable => { + Processor::deactivate_lookup_table(invoke_context) + } + ProgramInstruction::CloseLookupTable => Processor::close_lookup_table(invoke_context), } -); +}); fn checked_add(a: usize, b: usize) -> Result { a.checked_add(b).ok_or(InstructionError::ArithmeticOverflow) diff --git a/programs/bpf-loader-tests/tests/common.rs b/programs/bpf-loader-tests/tests/common.rs index eeaf957a7e140c..99cae212c7f481 100644 --- a/programs/bpf-loader-tests/tests/common.rs +++ b/programs/bpf-loader-tests/tests/common.rs @@ -1,7 +1,6 @@ #![allow(dead_code)] use { - solana_bpf_loader_program::process_instruction, solana_program_test::*, solana_sdk::{ account::AccountSharedData, @@ -15,7 +14,7 @@ use { }; pub async fn setup_test_context() -> ProgramTestContext { - let program_test = ProgramTest::new("", id(), Some(process_instruction)); + let program_test = ProgramTest::new("", id(), Some(solana_bpf_loader_program::Entrypoint::vm)); program_test.start_with_context().await } diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 8e4ead1bf3fce9..dd944b7c8a1513 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -18,12 +18,14 @@ use { }, solana_rbpf::{ aligned_memory::AlignedMemory, + declare_builtin_function, ebpf::{self, HOST_ALIGN, MM_HEAP_START}, elf::Executable, - error::EbpfError, + error::{EbpfError, ProgramResult}, memory_region::{AccessType, MemoryCowCallback, MemoryMapping, MemoryRegion}, + program::BuiltinProgram, verifier::RequisiteVerifier, - vm::{BuiltinProgram, ContextObject, EbpfVm, ProgramResult}, + vm::{ContextObject, EbpfVm}, }, solana_sdk::{ account::WritableAccount, @@ -265,7 +267,7 @@ pub fn create_vm<'a, 'b>( trace_log: Vec::new(), })?; Ok(EbpfVm::new( - program.get_config(), + program.get_loader().clone(), program.get_sbpf_version(), invoke_context, memory_mapping, @@ -317,7 +319,7 @@ macro_rules! create_vm { macro_rules! mock_create_vm { ($vm:ident, $additional_regions:expr, $accounts_metadata:expr, $invoke_context:expr $(,)?) => { let loader = std::sync::Arc::new(BuiltinProgram::new_mock()); - let function_registry = solana_rbpf::elf::FunctionRegistry::default(); + let function_registry = solana_rbpf::program::FunctionRegistry::default(); let executable = solana_rbpf::elf::Executable::::from_text_bytes( &[0x95, 0, 0, 0, 0, 0, 0, 0], loader, @@ -371,20 +373,22 @@ fn create_memory_mapping<'a, 'b, C: ContextObject>( }) } -pub fn process_instruction( - invoke_context: &mut InvokeContext, - _arg0: u64, - _arg1: u64, - _arg2: u64, - _arg3: u64, - _arg4: u64, - _memory_mapping: &mut MemoryMapping, - result: &mut ProgramResult, -) { - *result = process_instruction_inner(invoke_context).into(); -} +declare_builtin_function!( + Entrypoint, + fn rust( + invoke_context: &mut InvokeContext, + _arg0: u64, + _arg1: u64, + _arg2: u64, + _arg3: u64, + _arg4: u64, + _memory_mapping: &mut MemoryMapping, + ) -> Result> { + process_instruction_inner(invoke_context) + } +); -fn process_instruction_inner( +pub fn process_instruction_inner( invoke_context: &mut InvokeContext, ) -> Result> { let log_collector = invoke_context.get_log_collector(); @@ -1607,13 +1611,12 @@ fn execute<'a, 'b: 'a>( } ProgramResult::Err(mut error) => { if direct_mapping { - if let Some(EbpfError::AccessViolation( - _pc, + if let EbpfError::AccessViolation( AccessType::Store, address, _size, _section_name, - )) = error.downcast_ref() + ) = error { // If direct_mapping is enabled and a program tries to write to a readonly // region we'll get a memory access violation. Map it to a more specific @@ -1621,7 +1624,7 @@ fn execute<'a, 'b: 'a>( if let Some((instruction_account_index, _)) = account_region_addrs .iter() .enumerate() - .find(|(_, vm_region)| vm_region.contains(address)) + .find(|(_, vm_region)| vm_region.contains(&address)) { let transaction_context = &invoke_context.transaction_context; let instruction_context = @@ -1632,17 +1635,21 @@ fn execute<'a, 'b: 'a>( instruction_account_index as IndexOfAccount, )?; - error = Box::new(if account.is_executable() { + error = EbpfError::SyscallError(Box::new(if account.is_executable() { InstructionError::ExecutableDataModified } else if account.is_writable() { InstructionError::ExternalAccountDataModified } else { InstructionError::ReadonlyDataModified - }) + })); } } } - Err(error) + Err(if let EbpfError::SyscallError(err) = error { + err + } else { + error.into() + }) } _ => Ok(()), } @@ -1790,7 +1797,7 @@ mod tests { transaction_accounts, instruction_accounts, expected_result, - super::process_instruction, + Entrypoint::vm, |invoke_context| { test_utils::load_all_invoked_programs(invoke_context); }, @@ -2009,7 +2016,7 @@ mod tests { vec![(program_id, program_account.clone())], Vec::new(), Err(InstructionError::ProgramFailedToComplete), - super::process_instruction, + Entrypoint::vm, |invoke_context| { invoke_context.mock_set_remaining(0); test_utils::load_all_invoked_programs(invoke_context); @@ -2555,7 +2562,7 @@ mod tests { transaction_accounts, instruction_accounts, expected_result, - super::process_instruction, + Entrypoint::vm, |_invoke_context| {}, |_invoke_context| {}, ) diff --git a/programs/bpf_loader/src/syscalls/cpi.rs b/programs/bpf_loader/src/syscalls/cpi.rs index 79fc52c6ca8ffa..c2264b95c294fc 100644 --- a/programs/bpf_loader/src/syscalls/cpi.rs +++ b/programs/bpf_loader/src/syscalls/cpi.rs @@ -1,6 +1,6 @@ use { super::*, - crate::{declare_syscall, serialization::account_data_region_memory_state}, + crate::serialization::account_data_region_memory_state, scopeguard::defer, solana_program_runtime::invoke_context::SerializedAccountMetadata, solana_rbpf::{ @@ -455,10 +455,10 @@ trait SyscallInvokeSigned { ) -> Result, Error>; } -declare_syscall!( +declare_builtin_function!( /// Cross-program invocation called from Rust SyscallInvokeSignedRust, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, instruction_addr: u64, account_infos_addr: u64, @@ -689,10 +689,10 @@ struct SolSignerSeedsC { len: u64, } -declare_syscall!( +declare_builtin_function!( /// Cross-program invocation called from C SyscallInvokeSignedC, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, instruction_addr: u64, account_infos_addr: u64, @@ -1730,7 +1730,7 @@ mod tests { invoke_context::SerializedAccountMetadata, with_mock_invoke_context, }, solana_rbpf::{ - ebpf::MM_INPUT_START, elf::SBPFVersion, memory_region::MemoryRegion, vm::Config, + ebpf::MM_INPUT_START, memory_region::MemoryRegion, program::SBPFVersion, vm::Config, }, solana_sdk::{ account::{Account, AccountSharedData}, diff --git a/programs/bpf_loader/src/syscalls/logging.rs b/programs/bpf_loader/src/syscalls/logging.rs index f6d69153d2bc52..c5faf0a1057fde 100644 --- a/programs/bpf_loader/src/syscalls/logging.rs +++ b/programs/bpf_loader/src/syscalls/logging.rs @@ -1,9 +1,9 @@ -use {super::*, crate::declare_syscall, solana_rbpf::vm::ContextObject}; +use {super::*, solana_rbpf::vm::ContextObject}; -declare_syscall!( +declare_builtin_function!( /// Log a user's info message SyscallLog, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, addr: u64, len: u64, @@ -36,10 +36,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Log 5 64-bit values SyscallLogU64, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, arg1: u64, arg2: u64, @@ -59,10 +59,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Log current compute consumption SyscallLogBpfComputeUnits, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, _arg1: u64, _arg2: u64, @@ -83,10 +83,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Log 5 64-bit values SyscallLogPubkey, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, pubkey_addr: u64, _arg2: u64, @@ -108,10 +108,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Log data handling SyscallLogData, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, addr: u64, len: u64, diff --git a/programs/bpf_loader/src/syscalls/mem_ops.rs b/programs/bpf_loader/src/syscalls/mem_ops.rs index 93d5b69cecd818..9354270ac2f0b7 100644 --- a/programs/bpf_loader/src/syscalls/mem_ops.rs +++ b/programs/bpf_loader/src/syscalls/mem_ops.rs @@ -1,6 +1,5 @@ use { super::*, - crate::declare_syscall, solana_rbpf::{error::EbpfError, memory_region::MemoryRegion}, std::slice, }; @@ -14,10 +13,10 @@ fn mem_op_consume(invoke_context: &mut InvokeContext, n: u64) -> Result<(), Erro consume_compute_meter(invoke_context, cost) } -declare_syscall!( +declare_builtin_function!( /// memcpy SyscallMemcpy, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, dst_addr: u64, src_addr: u64, @@ -37,10 +36,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// memmove SyscallMemmove, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, dst_addr: u64, src_addr: u64, @@ -55,10 +54,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// memcmp SyscallMemcmp, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, s1_addr: u64, s2_addr: u64, @@ -113,10 +112,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// memset SyscallMemset, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, dst_addr: u64, c: u64, @@ -375,7 +374,6 @@ impl<'a> MemoryChunkIterator<'a> { len: u64, ) -> Result, EbpfError> { let vm_addr_end = vm_addr.checked_add(len).ok_or(EbpfError::AccessViolation( - 0, access_type, vm_addr, len, @@ -394,26 +392,19 @@ impl<'a> MemoryChunkIterator<'a> { fn region(&mut self, vm_addr: u64) -> Result<&'a MemoryRegion, Error> { match self.memory_mapping.region(self.access_type, vm_addr) { Ok(region) => Ok(region), - Err(error) => match error.downcast_ref() { - Some(EbpfError::AccessViolation(pc, access_type, _vm_addr, _len, name)) => { - Err(Box::new(EbpfError::AccessViolation( - *pc, - *access_type, - self.initial_vm_addr, - self.len, - name, - ))) - } - Some(EbpfError::StackAccessViolation(pc, access_type, _vm_addr, _len, frame)) => { + Err(error) => match error { + EbpfError::AccessViolation(access_type, _vm_addr, _len, name) => Err(Box::new( + EbpfError::AccessViolation(access_type, self.initial_vm_addr, self.len, name), + )), + EbpfError::StackAccessViolation(access_type, _vm_addr, _len, frame) => { Err(Box::new(EbpfError::StackAccessViolation( - *pc, - *access_type, + access_type, self.initial_vm_addr, self.len, - *frame, + frame, ))) } - _ => Err(error), + _ => Err(error.into()), }, } } @@ -489,7 +480,7 @@ mod tests { use { super::*, assert_matches::assert_matches, - solana_rbpf::{ebpf::MM_PROGRAM_START, elf::SBPFVersion}, + solana_rbpf::{ebpf::MM_PROGRAM_START, program::SBPFVersion}, }; fn to_chunk_vec<'a>( @@ -547,7 +538,7 @@ mod tests { .unwrap(); assert_matches!( src_chunk_iter.next().unwrap().unwrap_err().downcast_ref().unwrap(), - EbpfError::AccessViolation(0, AccessType::Load, addr, 42, "unknown") if *addr == MM_PROGRAM_START - 1 + EbpfError::AccessViolation(AccessType::Load, addr, 42, "unknown") if *addr == MM_PROGRAM_START - 1 ); // check oob at the upper bound. Since the memory mapping isn't empty, @@ -558,7 +549,7 @@ mod tests { assert!(src_chunk_iter.next().unwrap().is_ok()); assert_matches!( src_chunk_iter.next().unwrap().unwrap_err().downcast_ref().unwrap(), - EbpfError::AccessViolation(0, AccessType::Load, addr, 43, "program") if *addr == MM_PROGRAM_START + EbpfError::AccessViolation(AccessType::Load, addr, 43, "program") if *addr == MM_PROGRAM_START ); // check oob at the upper bound on the first next_back() @@ -568,7 +559,7 @@ mod tests { .rev(); assert_matches!( src_chunk_iter.next().unwrap().unwrap_err().downcast_ref().unwrap(), - EbpfError::AccessViolation(0, AccessType::Load, addr, 43, "program") if *addr == MM_PROGRAM_START + EbpfError::AccessViolation(AccessType::Load, addr, 43, "program") if *addr == MM_PROGRAM_START ); // check oob at the upper bound on the 2nd next_back() @@ -579,7 +570,7 @@ mod tests { assert!(src_chunk_iter.next().unwrap().is_ok()); assert_matches!( src_chunk_iter.next().unwrap().unwrap_err().downcast_ref().unwrap(), - EbpfError::AccessViolation(0, AccessType::Load, addr, 43, "unknown") if *addr == MM_PROGRAM_START - 1 + EbpfError::AccessViolation(AccessType::Load, addr, 43, "unknown") if *addr == MM_PROGRAM_START - 1 ); } @@ -707,7 +698,7 @@ mod tests { false, |_src, _dst, _len| Ok::<_, Error>(0), ).unwrap_err().downcast_ref().unwrap(), - EbpfError::AccessViolation(0, AccessType::Load, addr, 8, "program") if *addr == MM_PROGRAM_START + 8 + EbpfError::AccessViolation(AccessType::Load, addr, 8, "program") if *addr == MM_PROGRAM_START + 8 ); // src is shorter than dst @@ -722,12 +713,12 @@ mod tests { false, |_src, _dst, _len| Ok::<_, Error>(0), ).unwrap_err().downcast_ref().unwrap(), - EbpfError::AccessViolation(0, AccessType::Load, addr, 3, "program") if *addr == MM_PROGRAM_START + 10 + EbpfError::AccessViolation(AccessType::Load, addr, 3, "program") if *addr == MM_PROGRAM_START + 10 ); } #[test] - #[should_panic(expected = "AccessViolation(0, Store, 4294967296, 4")] + #[should_panic(expected = "AccessViolation(Store, 4294967296, 4")] fn test_memmove_non_contiguous_readonly() { let config = Config { aligned_memory_mapping: false, @@ -817,7 +808,7 @@ mod tests { } #[test] - #[should_panic(expected = "AccessViolation(0, Store, 4294967296, 9")] + #[should_panic(expected = "AccessViolation(Store, 4294967296, 9")] fn test_memset_non_contiguous_readonly() { let config = Config { aligned_memory_mapping: false, diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 4193b9fcfc97a8..5519ed3aa5db7b 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -16,9 +16,10 @@ use { stable_log, timings::ExecuteTimings, }, solana_rbpf::{ - elf::FunctionRegistry, + declare_builtin_function, memory_region::{AccessType, MemoryMapping}, - vm::{BuiltinFunction, BuiltinProgram, Config, ProgramResult}, + program::{BuiltinFunction, BuiltinProgram, FunctionRegistry}, + vm::Config, }, solana_sdk::{ account::ReadableAccount, @@ -281,7 +282,6 @@ pub fn create_program_runtime_environment_v1<'a>( reject_broken_elfs: reject_deployment_of_broken_elfs, noop_instruction_rate: 256, sanitize_user_provided_values: true, - encrypt_runtime_environment: true, external_internal_function_hash_collision: feature_set .is_active(&error_on_syscall_bpf_function_hash_collisions::id()), reject_callx_r10: feature_set.is_active(&reject_callx_r10::id()), @@ -295,42 +295,42 @@ pub fn create_program_runtime_environment_v1<'a>( let mut result = FunctionRegistry::>::default(); // Abort - result.register_function_hashed(*b"abort", SyscallAbort::call)?; + result.register_function_hashed(*b"abort", SyscallAbort::vm)?; // Panic - result.register_function_hashed(*b"sol_panic_", SyscallPanic::call)?; + result.register_function_hashed(*b"sol_panic_", SyscallPanic::vm)?; // Logging - result.register_function_hashed(*b"sol_log_", SyscallLog::call)?; - result.register_function_hashed(*b"sol_log_64_", SyscallLogU64::call)?; - result.register_function_hashed(*b"sol_log_compute_units_", SyscallLogBpfComputeUnits::call)?; - result.register_function_hashed(*b"sol_log_pubkey", SyscallLogPubkey::call)?; + result.register_function_hashed(*b"sol_log_", SyscallLog::vm)?; + result.register_function_hashed(*b"sol_log_64_", SyscallLogU64::vm)?; + result.register_function_hashed(*b"sol_log_compute_units_", SyscallLogBpfComputeUnits::vm)?; + result.register_function_hashed(*b"sol_log_pubkey", SyscallLogPubkey::vm)?; // Program defined addresses (PDA) result.register_function_hashed( *b"sol_create_program_address", - SyscallCreateProgramAddress::call, + SyscallCreateProgramAddress::vm, )?; result.register_function_hashed( *b"sol_try_find_program_address", - SyscallTryFindProgramAddress::call, + SyscallTryFindProgramAddress::vm, )?; // Sha256 - result.register_function_hashed(*b"sol_sha256", SyscallHash::call::)?; + result.register_function_hashed(*b"sol_sha256", SyscallHash::vm::)?; // Keccak256 - result.register_function_hashed(*b"sol_keccak256", SyscallHash::call::)?; + result.register_function_hashed(*b"sol_keccak256", SyscallHash::vm::)?; // Secp256k1 Recover - result.register_function_hashed(*b"sol_secp256k1_recover", SyscallSecp256k1Recover::call)?; + result.register_function_hashed(*b"sol_secp256k1_recover", SyscallSecp256k1Recover::vm)?; // Blake3 register_feature_gated_function!( result, blake3_syscall_enabled, *b"sol_blake3", - SyscallHash::call::, + SyscallHash::vm::, )?; // Elliptic Curve Operations @@ -338,78 +338,78 @@ pub fn create_program_runtime_environment_v1<'a>( result, curve25519_syscall_enabled, *b"sol_curve_validate_point", - SyscallCurvePointValidation::call, + SyscallCurvePointValidation::vm, )?; register_feature_gated_function!( result, curve25519_syscall_enabled, *b"sol_curve_group_op", - SyscallCurveGroupOps::call, + SyscallCurveGroupOps::vm, )?; register_feature_gated_function!( result, curve25519_syscall_enabled, *b"sol_curve_multiscalar_mul", - SyscallCurveMultiscalarMultiplication::call, + SyscallCurveMultiscalarMultiplication::vm, )?; // Sysvars - result.register_function_hashed(*b"sol_get_clock_sysvar", SyscallGetClockSysvar::call)?; + result.register_function_hashed(*b"sol_get_clock_sysvar", SyscallGetClockSysvar::vm)?; result.register_function_hashed( *b"sol_get_epoch_schedule_sysvar", - SyscallGetEpochScheduleSysvar::call, + SyscallGetEpochScheduleSysvar::vm, )?; register_feature_gated_function!( result, !disable_fees_sysvar, *b"sol_get_fees_sysvar", - SyscallGetFeesSysvar::call, + SyscallGetFeesSysvar::vm, )?; - result.register_function_hashed(*b"sol_get_rent_sysvar", SyscallGetRentSysvar::call)?; + result.register_function_hashed(*b"sol_get_rent_sysvar", SyscallGetRentSysvar::vm)?; register_feature_gated_function!( result, last_restart_slot_syscall_enabled, *b"sol_get_last_restart_slot", - SyscallGetLastRestartSlotSysvar::call, + SyscallGetLastRestartSlotSysvar::vm, )?; register_feature_gated_function!( result, epoch_rewards_syscall_enabled, *b"sol_get_epoch_rewards_sysvar", - SyscallGetEpochRewardsSysvar::call, + SyscallGetEpochRewardsSysvar::vm, )?; // Memory ops - result.register_function_hashed(*b"sol_memcpy_", SyscallMemcpy::call)?; - result.register_function_hashed(*b"sol_memmove_", SyscallMemmove::call)?; - result.register_function_hashed(*b"sol_memcmp_", SyscallMemcmp::call)?; - result.register_function_hashed(*b"sol_memset_", SyscallMemset::call)?; + result.register_function_hashed(*b"sol_memcpy_", SyscallMemcpy::vm)?; + result.register_function_hashed(*b"sol_memmove_", SyscallMemmove::vm)?; + result.register_function_hashed(*b"sol_memcmp_", SyscallMemcmp::vm)?; + result.register_function_hashed(*b"sol_memset_", SyscallMemset::vm)?; // Processed sibling instructions result.register_function_hashed( *b"sol_get_processed_sibling_instruction", - SyscallGetProcessedSiblingInstruction::call, + SyscallGetProcessedSiblingInstruction::vm, )?; // Stack height - result.register_function_hashed(*b"sol_get_stack_height", SyscallGetStackHeight::call)?; + result.register_function_hashed(*b"sol_get_stack_height", SyscallGetStackHeight::vm)?; // Return data - result.register_function_hashed(*b"sol_set_return_data", SyscallSetReturnData::call)?; - result.register_function_hashed(*b"sol_get_return_data", SyscallGetReturnData::call)?; + result.register_function_hashed(*b"sol_set_return_data", SyscallSetReturnData::vm)?; + result.register_function_hashed(*b"sol_get_return_data", SyscallGetReturnData::vm)?; // Cross-program invocation - result.register_function_hashed(*b"sol_invoke_signed_c", SyscallInvokeSignedC::call)?; - result.register_function_hashed(*b"sol_invoke_signed_rust", SyscallInvokeSignedRust::call)?; + result.register_function_hashed(*b"sol_invoke_signed_c", SyscallInvokeSignedC::vm)?; + result.register_function_hashed(*b"sol_invoke_signed_rust", SyscallInvokeSignedRust::vm)?; // Memory allocator register_feature_gated_function!( result, !disable_deploy_of_alloc_free_syscall, *b"sol_alloc_free_", - SyscallAllocFree::call, + SyscallAllocFree::vm, )?; // Alt_bn128 @@ -417,7 +417,7 @@ pub fn create_program_runtime_environment_v1<'a>( result, enable_alt_bn128_syscall, *b"sol_alt_bn128_group_op", - SyscallAltBn128::call, + SyscallAltBn128::vm, )?; // Big_mod_exp @@ -425,7 +425,7 @@ pub fn create_program_runtime_environment_v1<'a>( result, enable_big_mod_exp_syscall, *b"sol_big_mod_exp", - SyscallBigModExp::call, + SyscallBigModExp::vm, )?; // Poseidon @@ -433,7 +433,7 @@ pub fn create_program_runtime_environment_v1<'a>( result, enable_poseidon_syscall, *b"sol_poseidon", - SyscallPoseidon::call, + SyscallPoseidon::vm, )?; // Accessing remaining compute units @@ -441,7 +441,7 @@ pub fn create_program_runtime_environment_v1<'a>( result, remaining_compute_units_syscall_enabled, *b"sol_remaining_compute_units", - SyscallRemainingComputeUnits::call + SyscallRemainingComputeUnits::vm )?; // Alt_bn128_compression @@ -449,11 +449,11 @@ pub fn create_program_runtime_environment_v1<'a>( result, enable_alt_bn128_compression_syscall, *b"sol_alt_bn128_compression", - SyscallAltBn128Compression::call, + SyscallAltBn128Compression::vm, )?; // Log data - result.register_function_hashed(*b"sol_log_data", SyscallLogData::call)?; + result.register_function_hashed(*b"sol_log_data", SyscallLogData::vm)?; Ok(BuiltinProgram::new_loader(config, result)) } @@ -471,7 +471,10 @@ fn translate( vm_addr: u64, len: u64, ) -> Result { - memory_mapping.map(access_type, vm_addr, len, 0).into() + memory_mapping + .map(access_type, vm_addr, len) + .map_err(|err| err.into()) + .into() } fn translate_type_inner<'a, T>( @@ -590,65 +593,13 @@ fn translate_string_and_do( } } -#[macro_export] -macro_rules! declare_syscall { - ($(#[$attr:meta])* $name:ident, $inner_call:item) => { - $(#[$attr])* - pub struct $name {} - impl $name { - $inner_call - pub fn call( - invoke_context: &mut InvokeContext, - arg_a: u64, - arg_b: u64, - arg_c: u64, - arg_d: u64, - arg_e: u64, - memory_mapping: &mut MemoryMapping, - result: &mut ProgramResult, - ) { - let converted_result: ProgramResult = Self::inner_call( - invoke_context, arg_a, arg_b, arg_c, arg_d, arg_e, memory_mapping, - ).into(); - *result = converted_result; - } - } - }; -} - -#[macro_export] -macro_rules! declare_syscallhash { - ($(#[$attr:meta])* $name:ident, $inner_call:item) => { - $(#[$attr])* - pub struct $name {} - impl $name { - $inner_call - pub fn call( - invoke_context: &mut InvokeContext, - arg_a: u64, - arg_b: u64, - arg_c: u64, - arg_d: u64, - arg_e: u64, - memory_mapping: &mut MemoryMapping, - result: &mut ProgramResult, - ) { - let converted_result: ProgramResult = Self::inner_call::( - invoke_context, arg_a, arg_b, arg_c, arg_d, arg_e, memory_mapping, - ).into(); - *result = converted_result; - } - } - }; -} - -declare_syscall!( +declare_builtin_function!( /// Abort syscall functions, called when the SBF program calls `abort()` /// LLVM will insert calls to `abort()` if it detects an untenable situation, /// `abort()` is not intended to be called explicitly by the program. /// Causes the SBF program to be halted immediately SyscallAbort, - fn inner_call( + fn rust( _invoke_context: &mut InvokeContext, _arg1: u64, _arg2: u64, @@ -661,11 +612,11 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Panic syscall function, called when the SBF program calls 'sol_panic_()` /// Causes the SBF program to be halted immediately SyscallPanic, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, file: u64, len: u64, @@ -690,7 +641,7 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Dynamic memory allocation syscall called when the SBF program calls /// `sol_alloc_free_()`. The allocator is expected to allocate/free /// from/to a given chunk of memory and enforce size restrictions. The @@ -698,7 +649,7 @@ declare_syscall!( /// information about that memory (start address and size) is passed /// to the VM to use for enforcement. SyscallAllocFree, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, size: u64, free_addr: u64, @@ -765,10 +716,10 @@ fn translate_and_check_program_address_inputs<'a>( Ok((seeds, program_id)) } -declare_syscall!( +declare_builtin_function!( /// Create a program address SyscallCreateProgramAddress, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, seeds_addr: u64, seeds_len: u64, @@ -806,10 +757,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Create a program address SyscallTryFindProgramAddress, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, seeds_addr: u64, seeds_len: u64, @@ -873,10 +824,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// secp256k1_recover SyscallSecp256k1Recover, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, hash_addr: u64, recovery_id_val: u64, @@ -944,12 +895,12 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( // Elliptic Curve Point Validation // // Currently, only curve25519 Edwards and Ristretto representations are supported SyscallCurvePointValidation, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, curve_id: u64, point_addr: u64, @@ -1001,12 +952,12 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( // Elliptic Curve Group Operations // // Currently, only curve25519 Edwards and Ristretto representations are supported SyscallCurveGroupOps, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, curve_id: u64, group_op: u64, @@ -1202,12 +1153,12 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( // Elliptic Curve Multiscalar Multiplication // // Currently, only curve25519 Edwards and Ristretto representations are supported SyscallCurveMultiscalarMultiplication, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, curve_id: u64, scalars_addr: u64, @@ -1307,10 +1258,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Set return data SyscallSetReturnData, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, addr: u64, len: u64, @@ -1356,13 +1307,13 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Get return data SyscallGetReturnData, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, return_data_addr: u64, - mut length: u64, + length: u64, program_id_addr: u64, _arg4: u64, _arg5: u64, @@ -1373,7 +1324,7 @@ declare_syscall!( consume_compute_meter(invoke_context, budget.syscall_base_cost)?; let (program_id, return_data) = invoke_context.transaction_context.get_return_data(); - length = length.min(return_data.len() as u64); + let length = length.min(return_data.len() as u64); if length != 0 { let cost = length .saturating_add(size_of::() as u64) @@ -1421,10 +1372,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Get a processed sigling instruction SyscallGetProcessedSiblingInstruction, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, index: u64, meta_addr: u64, @@ -1567,10 +1518,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Get current call stack height SyscallGetStackHeight, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, _arg1: u64, _arg2: u64, @@ -1587,10 +1538,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// alt_bn128 group operations SyscallAltBn128, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, group_op: u64, input_addr: u64, @@ -1674,10 +1625,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Big integer modular exponentiation SyscallBigModExp, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, params: u64, return_value: u64, @@ -1753,10 +1704,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( // Poseidon SyscallPoseidon, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, parameters: u64, endianness: u64, @@ -1825,10 +1776,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Read remaining compute units SyscallRemainingComputeUnits, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, _arg1: u64, _arg2: u64, @@ -1845,10 +1796,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// alt_bn128 g1 and g2 compression and decompression SyscallAltBn128Compression, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, op: u64, input_addr: u64, @@ -1948,10 +1899,10 @@ declare_syscall!( } ); -declare_syscallhash!( +declare_builtin_function!( // Generic Hashing Syscall - SyscallHash, - fn inner_call( + SyscallHash, + fn rust( invoke_context: &mut InvokeContext, vals_addr: u64, vals_len: u64, @@ -2030,10 +1981,7 @@ mod tests { core::slice, solana_program_runtime::{invoke_context::InvokeContext, with_mock_invoke_context}, solana_rbpf::{ - elf::SBPFVersion, - error::EbpfError, - memory_region::MemoryRegion, - vm::{BuiltinFunction, Config}, + error::EbpfError, memory_region::MemoryRegion, program::SBPFVersion, vm::Config, }, solana_sdk::{ account::{create_account_shared_data_for_test, AccountSharedData}, @@ -2053,9 +2001,8 @@ mod tests { macro_rules! assert_access_violation { ($result:expr, $va:expr, $len:expr) => { match $result.unwrap_err().downcast_ref::().unwrap() { - EbpfError::AccessViolation(_, _, va, len, _) if $va == *va && $len == *len => {} - EbpfError::StackAccessViolation(_, _, va, len, _) if $va == *va && $len == *len => { - } + EbpfError::AccessViolation(_, va, len, _) if $va == *va && $len == *len => {} + EbpfError::StackAccessViolation(_, va, len, _) if $va == *va && $len == *len => {} _ => panic!(), } }; @@ -2291,17 +2238,7 @@ mod tests { prepare_mockup!(invoke_context, program_id, bpf_loader::id()); let config = Config::default(); let mut memory_mapping = MemoryMapping::new(vec![], &config, &SBPFVersion::V2).unwrap(); - let mut result = ProgramResult::Ok(0); - SyscallAbort::call( - &mut invoke_context, - 0, - 0, - 0, - 0, - 0, - &mut memory_mapping, - &mut result, - ); + let result = SyscallAbort::rust(&mut invoke_context, 0, 0, 0, 0, 0, &mut memory_mapping); result.unwrap(); } @@ -2320,8 +2257,7 @@ mod tests { .unwrap(); invoke_context.mock_set_remaining(string.len() as u64 - 1); - let mut result = ProgramResult::Ok(0); - SyscallPanic::call( + let result = SyscallPanic::rust( &mut invoke_context, 0x100000000, string.len() as u64, @@ -2329,16 +2265,14 @@ mod tests { 84, 0, &mut memory_mapping, - &mut result, ); assert_matches!( result, - ProgramResult::Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ComputationalBudgetExceeded + Result::Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ComputationalBudgetExceeded ); invoke_context.mock_set_remaining(string.len() as u64); - let mut result = ProgramResult::Ok(0); - SyscallPanic::call( + let result = SyscallPanic::rust( &mut invoke_context, 0x100000000, string.len() as u64, @@ -2346,7 +2280,6 @@ mod tests { 84, 0, &mut memory_mapping, - &mut result, ); result.unwrap(); } @@ -2365,8 +2298,7 @@ mod tests { .unwrap(); invoke_context.mock_set_remaining(400 - 1); - let mut result = ProgramResult::Ok(0); - SyscallLog::call( + let result = SyscallLog::rust( &mut invoke_context, 0x100000001, // AccessViolation string.len() as u64, @@ -2374,11 +2306,9 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_access_violation!(result, 0x100000001, string.len() as u64); - let mut result = ProgramResult::Ok(0); - SyscallLog::call( + let result = SyscallLog::rust( &mut invoke_context, 0x100000000, string.len() as u64 * 2, // AccessViolation @@ -2386,12 +2316,10 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_access_violation!(result, 0x100000000, string.len() as u64 * 2); - let mut result = ProgramResult::Ok(0); - SyscallLog::call( + let result = SyscallLog::rust( &mut invoke_context, 0x100000000, string.len() as u64, @@ -2399,11 +2327,9 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); result.unwrap(); - let mut result = ProgramResult::Ok(0); - SyscallLog::call( + let result = SyscallLog::rust( &mut invoke_context, 0x100000000, string.len() as u64, @@ -2411,11 +2337,10 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_matches!( result, - ProgramResult::Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ComputationalBudgetExceeded + Result::Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ComputationalBudgetExceeded ); assert_eq!( @@ -2436,17 +2361,7 @@ mod tests { invoke_context.mock_set_remaining(cost); let config = Config::default(); let mut memory_mapping = MemoryMapping::new(vec![], &config, &SBPFVersion::V2).unwrap(); - let mut result = ProgramResult::Ok(0); - SyscallLogU64::call( - &mut invoke_context, - 1, - 2, - 3, - 4, - 5, - &mut memory_mapping, - &mut result, - ); + let result = SyscallLogU64::rust(&mut invoke_context, 1, 2, 3, 4, 5, &mut memory_mapping); result.unwrap(); assert_eq!( @@ -2473,8 +2388,7 @@ mod tests { ) .unwrap(); - let mut result = ProgramResult::Ok(0); - SyscallLogPubkey::call( + let result = SyscallLogPubkey::rust( &mut invoke_context, 0x100000001, // AccessViolation 32, @@ -2482,30 +2396,19 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_access_violation!(result, 0x100000001, 32); invoke_context.mock_set_remaining(1); - let mut result = ProgramResult::Ok(0); - SyscallLogPubkey::call( - &mut invoke_context, - 100, - 32, - 0, - 0, - 0, - &mut memory_mapping, - &mut result, - ); + let result = + SyscallLogPubkey::rust(&mut invoke_context, 100, 32, 0, 0, 0, &mut memory_mapping); assert_matches!( result, - ProgramResult::Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ComputationalBudgetExceeded + Result::Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ComputationalBudgetExceeded ); invoke_context.mock_set_remaining(cost); - let mut result = ProgramResult::Ok(0); - SyscallLogPubkey::call( + let result = SyscallLogPubkey::rust( &mut invoke_context, 0x100000000, 0, @@ -2513,7 +2416,6 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); result.unwrap(); @@ -2536,8 +2438,7 @@ mod tests { let mut vm = vm.unwrap(); let invoke_context = &mut vm.context_object_pointer; let memory_mapping = &mut vm.memory_mapping; - let mut result = ProgramResult::Ok(0); - SyscallAllocFree::call( + let result = SyscallAllocFree::rust( invoke_context, solana_sdk::entrypoint::HEAP_LENGTH as u64, 0, @@ -2545,11 +2446,9 @@ mod tests { 0, 0, memory_mapping, - &mut result, ); assert_ne!(result.unwrap(), 0); - let mut result = ProgramResult::Ok(0); - SyscallAllocFree::call( + let result = SyscallAllocFree::rust( invoke_context, solana_sdk::entrypoint::HEAP_LENGTH as u64, 0, @@ -2557,20 +2456,10 @@ mod tests { 0, 0, memory_mapping, - &mut result, ); assert_eq!(result.unwrap(), 0); - let mut result = ProgramResult::Ok(0); - SyscallAllocFree::call( - invoke_context, - u64::MAX, - 0, - 0, - 0, - 0, - memory_mapping, - &mut result, - ); + let result = + SyscallAllocFree::rust(invoke_context, u64::MAX, 0, 0, 0, 0, memory_mapping); assert_eq!(result.unwrap(), 0); } @@ -2583,12 +2472,10 @@ mod tests { let invoke_context = &mut vm.context_object_pointer; let memory_mapping = &mut vm.memory_mapping; for _ in 0..100 { - let mut result = ProgramResult::Ok(0); - SyscallAllocFree::call(invoke_context, 1, 0, 0, 0, 0, memory_mapping, &mut result); + let result = SyscallAllocFree::rust(invoke_context, 1, 0, 0, 0, 0, memory_mapping); assert_ne!(result.unwrap(), 0); } - let mut result = ProgramResult::Ok(0); - SyscallAllocFree::call( + let result = SyscallAllocFree::rust( invoke_context, solana_sdk::entrypoint::HEAP_LENGTH as u64, 0, @@ -2596,7 +2483,6 @@ mod tests { 0, 0, memory_mapping, - &mut result, ); assert_eq!(result.unwrap(), 0); } @@ -2609,12 +2495,10 @@ mod tests { let invoke_context = &mut vm.context_object_pointer; let memory_mapping = &mut vm.memory_mapping; for _ in 0..12 { - let mut result = ProgramResult::Ok(0); - SyscallAllocFree::call(invoke_context, 1, 0, 0, 0, 0, memory_mapping, &mut result); + let result = SyscallAllocFree::rust(invoke_context, 1, 0, 0, 0, 0, memory_mapping); assert_ne!(result.unwrap(), 0); } - let mut result = ProgramResult::Ok(0); - SyscallAllocFree::call( + let result = SyscallAllocFree::rust( invoke_context, solana_sdk::entrypoint::HEAP_LENGTH as u64, 0, @@ -2622,7 +2506,6 @@ mod tests { 0, 0, memory_mapping, - &mut result, ); assert_eq!(result.unwrap(), 0); } @@ -2635,8 +2518,7 @@ mod tests { let mut vm = vm.unwrap(); let invoke_context = &mut vm.context_object_pointer; let memory_mapping = &mut vm.memory_mapping; - let mut result = ProgramResult::Ok(0); - SyscallAllocFree::call( + let result = SyscallAllocFree::rust( invoke_context, size_of::() as u64, 0, @@ -2644,7 +2526,6 @@ mod tests { 0, 0, memory_mapping, - &mut result, ); let address = result.unwrap(); assert_ne!(address, 0); @@ -2701,8 +2582,7 @@ mod tests { * 4, ); - let mut result = ProgramResult::Ok(0); - SyscallHash::call::( + let result = SyscallHash::rust::( &mut invoke_context, ro_va, ro_len, @@ -2710,14 +2590,12 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); result.unwrap(); let hash_local = hashv(&[bytes1.as_ref(), bytes2.as_ref()]).to_bytes(); assert_eq!(hash_result, hash_local); - let mut result = ProgramResult::Ok(0); - SyscallHash::call::( + let result = SyscallHash::rust::( &mut invoke_context, ro_va - 1, // AccessViolation ro_len, @@ -2725,11 +2603,9 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_access_violation!(result, ro_va - 1, 32); - let mut result = ProgramResult::Ok(0); - SyscallHash::call::( + let result = SyscallHash::rust::( &mut invoke_context, ro_va, ro_len + 1, // AccessViolation @@ -2737,11 +2613,9 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_access_violation!(result, ro_va, 48); - let mut result = ProgramResult::Ok(0); - SyscallHash::call::( + let result = SyscallHash::rust::( &mut invoke_context, ro_va, ro_len, @@ -2749,11 +2623,9 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_access_violation!(result, rw_va - 1, HASH_BYTES as u64); - let mut result = ProgramResult::Ok(0); - SyscallHash::call::( + let result = SyscallHash::rust::( &mut invoke_context, ro_va, ro_len, @@ -2761,11 +2633,10 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_matches!( result, - ProgramResult::Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ComputationalBudgetExceeded + Result::Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ComputationalBudgetExceeded ); } @@ -2805,8 +2676,7 @@ mod tests { * 2, ); - let mut result = ProgramResult::Ok(0); - SyscallCurvePointValidation::call( + let result = SyscallCurvePointValidation::rust( &mut invoke_context, CURVE25519_EDWARDS, valid_bytes_va, @@ -2814,12 +2684,10 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_eq!(0, result.unwrap()); - let mut result = ProgramResult::Ok(0); - SyscallCurvePointValidation::call( + let result = SyscallCurvePointValidation::rust( &mut invoke_context, CURVE25519_EDWARDS, invalid_bytes_va, @@ -2827,12 +2695,10 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_eq!(1, result.unwrap()); - let mut result = ProgramResult::Ok(0); - SyscallCurvePointValidation::call( + let result = SyscallCurvePointValidation::rust( &mut invoke_context, CURVE25519_EDWARDS, valid_bytes_va, @@ -2840,11 +2706,10 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_matches!( result, - ProgramResult::Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ComputationalBudgetExceeded + Result::Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ComputationalBudgetExceeded ); } @@ -2884,8 +2749,7 @@ mod tests { * 2, ); - let mut result = ProgramResult::Ok(0); - SyscallCurvePointValidation::call( + let result = SyscallCurvePointValidation::rust( &mut invoke_context, CURVE25519_RISTRETTO, valid_bytes_va, @@ -2893,12 +2757,10 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_eq!(0, result.unwrap()); - let mut result = ProgramResult::Ok(0); - SyscallCurvePointValidation::call( + let result = SyscallCurvePointValidation::rust( &mut invoke_context, CURVE25519_RISTRETTO, invalid_bytes_va, @@ -2906,12 +2768,10 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_eq!(1, result.unwrap()); - let mut result = ProgramResult::Ok(0); - SyscallCurvePointValidation::call( + let result = SyscallCurvePointValidation::rust( &mut invoke_context, CURVE25519_RISTRETTO, valid_bytes_va, @@ -2919,11 +2779,10 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_matches!( result, - ProgramResult::Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ComputationalBudgetExceeded + Result::Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ComputationalBudgetExceeded ); } @@ -2985,8 +2844,7 @@ mod tests { * 2, ); - let mut result = ProgramResult::Ok(0); - SyscallCurveGroupOps::call( + let result = SyscallCurveGroupOps::rust( &mut invoke_context, CURVE25519_EDWARDS, ADD, @@ -2994,7 +2852,6 @@ mod tests { right_point_va, result_point_va, &mut memory_mapping, - &mut result, ); assert_eq!(0, result.unwrap()); @@ -3004,8 +2861,7 @@ mod tests { ]; assert_eq!(expected_sum, result_point); - let mut result = ProgramResult::Ok(0); - SyscallCurveGroupOps::call( + let result = SyscallCurveGroupOps::rust( &mut invoke_context, CURVE25519_EDWARDS, ADD, @@ -3013,12 +2869,10 @@ mod tests { right_point_va, result_point_va, &mut memory_mapping, - &mut result, ); assert_eq!(1, result.unwrap()); - let mut result = ProgramResult::Ok(0); - SyscallCurveGroupOps::call( + let result = SyscallCurveGroupOps::rust( &mut invoke_context, CURVE25519_EDWARDS, SUB, @@ -3026,7 +2880,6 @@ mod tests { right_point_va, result_point_va, &mut memory_mapping, - &mut result, ); assert_eq!(0, result.unwrap()); @@ -3036,8 +2889,7 @@ mod tests { ]; assert_eq!(expected_difference, result_point); - let mut result = ProgramResult::Ok(0); - SyscallCurveGroupOps::call( + let result = SyscallCurveGroupOps::rust( &mut invoke_context, CURVE25519_EDWARDS, SUB, @@ -3045,12 +2897,10 @@ mod tests { right_point_va, result_point_va, &mut memory_mapping, - &mut result, ); assert_eq!(1, result.unwrap()); - let mut result = ProgramResult::Ok(0); - SyscallCurveGroupOps::call( + let result = SyscallCurveGroupOps::rust( &mut invoke_context, CURVE25519_EDWARDS, MUL, @@ -3058,7 +2908,6 @@ mod tests { right_point_va, result_point_va, &mut memory_mapping, - &mut result, ); result.unwrap(); @@ -3068,8 +2917,7 @@ mod tests { ]; assert_eq!(expected_product, result_point); - let mut result = ProgramResult::Ok(0); - SyscallCurveGroupOps::call( + let result = SyscallCurveGroupOps::rust( &mut invoke_context, CURVE25519_EDWARDS, MUL, @@ -3077,12 +2925,10 @@ mod tests { invalid_point_va, result_point_va, &mut memory_mapping, - &mut result, ); assert_eq!(1, result.unwrap()); - let mut result = ProgramResult::Ok(0); - SyscallCurveGroupOps::call( + let result = SyscallCurveGroupOps::rust( &mut invoke_context, CURVE25519_EDWARDS, MUL, @@ -3090,11 +2936,10 @@ mod tests { invalid_point_va, result_point_va, &mut memory_mapping, - &mut result, ); assert_matches!( result, - ProgramResult::Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ComputationalBudgetExceeded + Result::Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ComputationalBudgetExceeded ); } @@ -3156,8 +3001,7 @@ mod tests { * 2, ); - let mut result = ProgramResult::Ok(0); - SyscallCurveGroupOps::call( + let result = SyscallCurveGroupOps::rust( &mut invoke_context, CURVE25519_RISTRETTO, ADD, @@ -3165,7 +3009,6 @@ mod tests { right_point_va, result_point_va, &mut memory_mapping, - &mut result, ); assert_eq!(0, result.unwrap()); @@ -3175,8 +3018,7 @@ mod tests { ]; assert_eq!(expected_sum, result_point); - let mut result = ProgramResult::Ok(0); - SyscallCurveGroupOps::call( + let result = SyscallCurveGroupOps::rust( &mut invoke_context, CURVE25519_RISTRETTO, ADD, @@ -3184,12 +3026,10 @@ mod tests { right_point_va, result_point_va, &mut memory_mapping, - &mut result, ); assert_eq!(1, result.unwrap()); - let mut result = ProgramResult::Ok(0); - SyscallCurveGroupOps::call( + let result = SyscallCurveGroupOps::rust( &mut invoke_context, CURVE25519_RISTRETTO, SUB, @@ -3197,7 +3037,6 @@ mod tests { right_point_va, result_point_va, &mut memory_mapping, - &mut result, ); assert_eq!(0, result.unwrap()); @@ -3207,8 +3046,7 @@ mod tests { ]; assert_eq!(expected_difference, result_point); - let mut result = ProgramResult::Ok(0); - SyscallCurveGroupOps::call( + let result = SyscallCurveGroupOps::rust( &mut invoke_context, CURVE25519_RISTRETTO, SUB, @@ -3216,13 +3054,11 @@ mod tests { right_point_va, result_point_va, &mut memory_mapping, - &mut result, ); assert_eq!(1, result.unwrap()); - let mut result = ProgramResult::Ok(0); - SyscallCurveGroupOps::call( + let result = SyscallCurveGroupOps::rust( &mut invoke_context, CURVE25519_RISTRETTO, MUL, @@ -3230,7 +3066,6 @@ mod tests { right_point_va, result_point_va, &mut memory_mapping, - &mut result, ); result.unwrap(); @@ -3240,8 +3075,7 @@ mod tests { ]; assert_eq!(expected_product, result_point); - let mut result = ProgramResult::Ok(0); - SyscallCurveGroupOps::call( + let result = SyscallCurveGroupOps::rust( &mut invoke_context, CURVE25519_RISTRETTO, MUL, @@ -3249,13 +3083,11 @@ mod tests { invalid_point_va, result_point_va, &mut memory_mapping, - &mut result, ); assert_eq!(1, result.unwrap()); - let mut result = ProgramResult::Ok(0); - SyscallCurveGroupOps::call( + let result = SyscallCurveGroupOps::rust( &mut invoke_context, CURVE25519_RISTRETTO, MUL, @@ -3263,11 +3095,10 @@ mod tests { invalid_point_va, result_point_va, &mut memory_mapping, - &mut result, ); assert_matches!( result, - ProgramResult::Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ComputationalBudgetExceeded + Result::Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ComputationalBudgetExceeded ); } @@ -3344,8 +3175,7 @@ mod tests { .curve25519_ristretto_msm_incremental_cost, ); - let mut result = ProgramResult::Ok(0); - SyscallCurveMultiscalarMultiplication::call( + let result = SyscallCurveMultiscalarMultiplication::rust( &mut invoke_context, CURVE25519_EDWARDS, scalars_va, @@ -3353,7 +3183,6 @@ mod tests { 2, result_point_va, &mut memory_mapping, - &mut result, ); assert_eq!(0, result.unwrap()); @@ -3363,8 +3192,7 @@ mod tests { ]; assert_eq!(expected_product, result_point); - let mut result = ProgramResult::Ok(0); - SyscallCurveMultiscalarMultiplication::call( + let result = SyscallCurveMultiscalarMultiplication::rust( &mut invoke_context, CURVE25519_RISTRETTO, scalars_va, @@ -3372,7 +3200,6 @@ mod tests { 2, result_point_va, &mut memory_mapping, - &mut result, ); assert_eq!(0, result.unwrap()); @@ -3484,8 +3311,7 @@ mod tests { ) .unwrap(); - let mut result = ProgramResult::Ok(0); - SyscallGetClockSysvar::call( + let result = SyscallGetClockSysvar::rust( &mut invoke_context, got_clock_va, 0, @@ -3493,7 +3319,6 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); result.unwrap(); assert_eq!(got_clock, src_clock); @@ -3522,8 +3347,7 @@ mod tests { ) .unwrap(); - let mut result = ProgramResult::Ok(0); - SyscallGetEpochScheduleSysvar::call( + let result = SyscallGetEpochScheduleSysvar::rust( &mut invoke_context, got_epochschedule_va, 0, @@ -3531,7 +3355,6 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); result.unwrap(); assert_eq!(got_epochschedule, src_epochschedule); @@ -3561,8 +3384,7 @@ mod tests { ) .unwrap(); - let mut result = ProgramResult::Ok(0); - SyscallGetFeesSysvar::call( + let result = SyscallGetFeesSysvar::rust( &mut invoke_context, got_fees_va, 0, @@ -3570,7 +3392,6 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); result.unwrap(); assert_eq!(got_fees, src_fees); @@ -3595,8 +3416,7 @@ mod tests { ) .unwrap(); - let mut result = ProgramResult::Ok(0); - SyscallGetRentSysvar::call( + let result = SyscallGetRentSysvar::rust( &mut invoke_context, got_rent_va, 0, @@ -3604,7 +3424,6 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); result.unwrap(); assert_eq!(got_rent, src_rent); @@ -3631,8 +3450,7 @@ mod tests { ) .unwrap(); - let mut result = ProgramResult::Ok(0); - SyscallGetEpochRewardsSysvar::call( + let result = SyscallGetEpochRewardsSysvar::rust( &mut invoke_context, got_rewards_va, 0, @@ -3640,7 +3458,6 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); result.unwrap(); assert_eq!(got_rewards, src_rewards); @@ -3654,12 +3471,22 @@ mod tests { } } + type BuiltinFunctionRustInterface<'a> = fn( + &mut InvokeContext<'a>, + u64, + u64, + u64, + u64, + u64, + &mut MemoryMapping, + ) -> Result>; + fn call_program_address_common<'a, 'b: 'a>( invoke_context: &'a mut InvokeContext<'b>, seeds: &[&[u8]], program_id: &Pubkey, overlap_outputs: bool, - syscall: BuiltinFunction>, + syscall: BuiltinFunctionRustInterface<'b>, ) -> Result<(Pubkey, u8), Error> { const SEEDS_VA: u64 = 0x100000000; const PROGRAM_ID_VA: u64 = 0x200000000; @@ -3692,8 +3519,7 @@ mod tests { )); let mut memory_mapping = MemoryMapping::new(regions, &config, &SBPFVersion::V2).unwrap(); - let mut result = ProgramResult::Ok(0); - syscall( + let result = syscall( invoke_context, SEEDS_VA, seeds.len() as u64, @@ -3705,9 +3531,8 @@ mod tests { BUMP_SEED_VA }, &mut memory_mapping, - &mut result, ); - Result::::from(result).map(|_| (address, bump_seed)) + result.map(|_| (address, bump_seed)) } fn create_program_address( @@ -3720,7 +3545,7 @@ mod tests { seeds, address, false, - SyscallCreateProgramAddress::call, + SyscallCreateProgramAddress::rust, )?; Ok(address) } @@ -3735,7 +3560,7 @@ mod tests { seeds, address, false, - SyscallTryFindProgramAddress::call, + SyscallTryFindProgramAddress::rust, ) } @@ -3762,8 +3587,7 @@ mod tests { prepare_mockup!(invoke_context, program_id, bpf_loader::id()); - let mut result = ProgramResult::Ok(0); - SyscallSetReturnData::call( + let result = SyscallSetReturnData::rust( &mut invoke_context, SRC_VA, data.len() as u64, @@ -3771,12 +3595,10 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_eq!(result.unwrap(), 0); - let mut result = ProgramResult::Ok(0); - SyscallGetReturnData::call( + let result = SyscallGetReturnData::rust( &mut invoke_context, DST_VA, data_buffer.len() as u64, @@ -3784,14 +3606,12 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_eq!(result.unwrap() as usize, data.len()); assert_eq!(data.get(0..data_buffer.len()).unwrap(), data_buffer); assert_eq!(id_buffer, program_id.to_bytes()); - let mut result = ProgramResult::Ok(0); - SyscallGetReturnData::call( + let result = SyscallGetReturnData::rust( &mut invoke_context, PROGRAM_ID_VA, data_buffer.len() as u64, @@ -3799,11 +3619,10 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_matches!( result, - ProgramResult::Err(error) if error.downcast_ref::().unwrap() == &SyscallError::CopyOverlapping + Result::Err(error) if error.downcast_ref::().unwrap() == &SyscallError::CopyOverlapping ); } @@ -3897,8 +3716,7 @@ mod tests { .unwrap(); invoke_context.mock_set_remaining(syscall_base_cost); - let mut result = ProgramResult::Ok(0); - SyscallGetProcessedSiblingInstruction::call( + let result = SyscallGetProcessedSiblingInstruction::rust( &mut invoke_context, 0, VM_BASE_ADDRESS.saturating_add(META_OFFSET as u64), @@ -3906,7 +3724,6 @@ mod tests { VM_BASE_ADDRESS.saturating_add(DATA_OFFSET as u64), VM_BASE_ADDRESS.saturating_add(ACCOUNTS_OFFSET as u64), &mut memory_mapping, - &mut result, ); assert_eq!(result.unwrap(), 1); { @@ -3929,8 +3746,7 @@ mod tests { } invoke_context.mock_set_remaining(syscall_base_cost); - let mut result = ProgramResult::Ok(0); - SyscallGetProcessedSiblingInstruction::call( + let result = SyscallGetProcessedSiblingInstruction::rust( &mut invoke_context, 1, VM_BASE_ADDRESS.saturating_add(META_OFFSET as u64), @@ -3938,13 +3754,11 @@ mod tests { VM_BASE_ADDRESS.saturating_add(DATA_OFFSET as u64), VM_BASE_ADDRESS.saturating_add(ACCOUNTS_OFFSET as u64), &mut memory_mapping, - &mut result, ); assert_eq!(result.unwrap(), 0); invoke_context.mock_set_remaining(syscall_base_cost); - let mut result = ProgramResult::Ok(0); - SyscallGetProcessedSiblingInstruction::call( + let result = SyscallGetProcessedSiblingInstruction::rust( &mut invoke_context, 0, VM_BASE_ADDRESS.saturating_add(META_OFFSET as u64), @@ -3952,11 +3766,10 @@ mod tests { VM_BASE_ADDRESS.saturating_add(META_OFFSET as u64), VM_BASE_ADDRESS.saturating_add(META_OFFSET as u64), &mut memory_mapping, - &mut result, ); assert_matches!( result, - ProgramResult::Err(error) if error.downcast_ref::().unwrap() == &SyscallError::CopyOverlapping + Result::Err(error) if error.downcast_ref::().unwrap() == &SyscallError::CopyOverlapping ); } @@ -4138,7 +3951,7 @@ mod tests { seeds, &address, true, - SyscallTryFindProgramAddress::call, + SyscallTryFindProgramAddress::rust, ), Result::Err(error) if error.downcast_ref::().unwrap() == &SyscallError::CopyOverlapping ); @@ -4186,8 +3999,7 @@ mod tests { + (MAX_LEN * MAX_LEN) / budget.big_modular_exponentiation_cost, ); - let mut result = ProgramResult::Ok(0); - SyscallBigModExp::call( + let result = SyscallBigModExp::rust( &mut invoke_context, VADDR_PARAMS, VADDR_OUT, @@ -4195,7 +4007,6 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_eq!(result.unwrap(), 0); @@ -4229,8 +4040,7 @@ mod tests { + (INV_LEN * INV_LEN) / budget.big_modular_exponentiation_cost, ); - let mut result = ProgramResult::Ok(0); - SyscallBigModExp::call( + let result = SyscallBigModExp::rust( &mut invoke_context, VADDR_PARAMS, VADDR_OUT, @@ -4238,12 +4048,11 @@ mod tests { 0, 0, &mut memory_mapping, - &mut result, ); assert_matches!( result, - ProgramResult::Err(error) if error.downcast_ref::().unwrap() == &SyscallError::InvalidLength + Result::Err(error) if error.downcast_ref::().unwrap() == &SyscallError::InvalidLength ); } } diff --git a/programs/bpf_loader/src/syscalls/sysvar.rs b/programs/bpf_loader/src/syscalls/sysvar.rs index d86402b34078e5..e8777569cef1da 100644 --- a/programs/bpf_loader/src/syscalls/sysvar.rs +++ b/programs/bpf_loader/src/syscalls/sysvar.rs @@ -1,4 +1,4 @@ -use {super::*, crate::declare_syscall}; +use super::*; fn get_sysvar( sysvar: Result, InstructionError>, @@ -22,10 +22,10 @@ fn get_sysvar( Ok(SUCCESS) } -declare_syscall!( +declare_builtin_function!( /// Get a Clock sysvar SyscallGetClockSysvar, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, var_addr: u64, _arg2: u64, @@ -44,10 +44,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Get a EpochSchedule sysvar SyscallGetEpochScheduleSysvar, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, var_addr: u64, _arg2: u64, @@ -66,10 +66,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Get a EpochRewards sysvar SyscallGetEpochRewardsSysvar, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, var_addr: u64, _arg2: u64, @@ -88,10 +88,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Get a Fees sysvar SyscallGetFeesSysvar, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, var_addr: u64, _arg2: u64, @@ -113,10 +113,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Get a Rent sysvar SyscallGetRentSysvar, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, var_addr: u64, _arg2: u64, @@ -135,10 +135,10 @@ declare_syscall!( } ); -declare_syscall!( +declare_builtin_function!( /// Get a Last Restart Slot sysvar SyscallGetLastRestartSlotSysvar, - fn inner_call( + fn rust( invoke_context: &mut InvokeContext, var_addr: u64, _arg2: u64, diff --git a/programs/compute-budget/src/lib.rs b/programs/compute-budget/src/lib.rs index e296ca3a2f8324..01bbd7a8b4f21c 100644 --- a/programs/compute-budget/src/lib.rs +++ b/programs/compute-budget/src/lib.rs @@ -2,11 +2,7 @@ use solana_program_runtime::declare_process_instruction; pub const DEFAULT_COMPUTE_UNITS: u64 = 150; -declare_process_instruction!( - process_instruction, - DEFAULT_COMPUTE_UNITS, - |_invoke_context| { - // Do nothing, compute budget instructions handled by the runtime - Ok(()) - } -); +declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |_invoke_context| { + // Do nothing, compute budget instructions handled by the runtime + Ok(()) +}); diff --git a/programs/config/src/config_processor.rs b/programs/config/src/config_processor.rs index 628e77cb93af43..d053405698452a 100644 --- a/programs/config/src/config_processor.rs +++ b/programs/config/src/config_processor.rs @@ -13,131 +13,127 @@ use { pub const DEFAULT_COMPUTE_UNITS: u64 = 450; -declare_process_instruction!( - process_instruction, - DEFAULT_COMPUTE_UNITS, - |invoke_context| { - let transaction_context = &invoke_context.transaction_context; - let instruction_context = transaction_context.get_current_instruction_context()?; - let data = instruction_context.get_instruction_data(); - - let key_list: ConfigKeys = limited_deserialize(data)?; - let config_account_key = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(0)?, - )?; - let config_account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - let is_config_account_signer = config_account.is_signer(); - let current_data: ConfigKeys = { - if config_account.get_owner() != &crate::id() { - return Err(InstructionError::InvalidAccountOwner); - } +declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| { + let transaction_context = &invoke_context.transaction_context; + let instruction_context = transaction_context.get_current_instruction_context()?; + let data = instruction_context.get_instruction_data(); + + let key_list: ConfigKeys = limited_deserialize(data)?; + let config_account_key = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(0)?, + )?; + let config_account = + instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let is_config_account_signer = config_account.is_signer(); + let current_data: ConfigKeys = { + if config_account.get_owner() != &crate::id() { + return Err(InstructionError::InvalidAccountOwner); + } - deserialize(config_account.get_data()).map_err(|err| { - ic_msg!( - invoke_context, - "Unable to deserialize config account: {}", - err - ); - InstructionError::InvalidAccountData - })? - }; - drop(config_account); - - let current_signer_keys: Vec = current_data - .keys - .iter() - .filter(|(_, is_signer)| *is_signer) - .map(|(pubkey, _)| *pubkey) - .collect(); - if current_signer_keys.is_empty() { - // Config account keypair must be a signer on account initialization, - // or when no signers specified in Config data - if !is_config_account_signer { - return Err(InstructionError::MissingRequiredSignature); - } + deserialize(config_account.get_data()).map_err(|err| { + ic_msg!( + invoke_context, + "Unable to deserialize config account: {}", + err + ); + InstructionError::InvalidAccountData + })? + }; + drop(config_account); + + let current_signer_keys: Vec = current_data + .keys + .iter() + .filter(|(_, is_signer)| *is_signer) + .map(|(pubkey, _)| *pubkey) + .collect(); + if current_signer_keys.is_empty() { + // Config account keypair must be a signer on account initialization, + // or when no signers specified in Config data + if !is_config_account_signer { + return Err(InstructionError::MissingRequiredSignature); } + } - let mut counter = 0; - for (signer, _) in key_list.keys.iter().filter(|(_, is_signer)| *is_signer) { - counter += 1; - if signer != config_account_key { - let signer_account = instruction_context - .try_borrow_instruction_account(transaction_context, counter as IndexOfAccount) - .map_err(|_| { - ic_msg!( - invoke_context, - "account {:?} is not in account list", - signer, - ); - InstructionError::MissingRequiredSignature - })?; - if !signer_account.is_signer() { - ic_msg!( - invoke_context, - "account {:?} signer_key().is_none()", - signer - ); - return Err(InstructionError::MissingRequiredSignature); - } - if signer_account.get_key() != signer { + let mut counter = 0; + for (signer, _) in key_list.keys.iter().filter(|(_, is_signer)| *is_signer) { + counter += 1; + if signer != config_account_key { + let signer_account = instruction_context + .try_borrow_instruction_account(transaction_context, counter as IndexOfAccount) + .map_err(|_| { ic_msg!( invoke_context, - "account[{:?}].signer_key() does not match Config data)", - counter + 1 + "account {:?} is not in account list", + signer, ); - return Err(InstructionError::MissingRequiredSignature); - } - // If Config account is already initialized, update signatures must match Config data - if !current_data.keys.is_empty() - && !current_signer_keys.iter().any(|pubkey| pubkey == signer) - { - ic_msg!( - invoke_context, - "account {:?} is not in stored signer list", - signer - ); - return Err(InstructionError::MissingRequiredSignature); - } - } else if !is_config_account_signer { - ic_msg!(invoke_context, "account[0].signer_key().is_none()"); + InstructionError::MissingRequiredSignature + })?; + if !signer_account.is_signer() { + ic_msg!( + invoke_context, + "account {:?} signer_key().is_none()", + signer + ); return Err(InstructionError::MissingRequiredSignature); } - } - - if invoke_context - .feature_set - .is_active(&feature_set::dedupe_config_program_signers::id()) - { - let total_new_keys = key_list.keys.len(); - let unique_new_keys = key_list.keys.into_iter().collect::>(); - if unique_new_keys.len() != total_new_keys { - ic_msg!(invoke_context, "new config contains duplicate keys"); - return Err(InstructionError::InvalidArgument); + if signer_account.get_key() != signer { + ic_msg!( + invoke_context, + "account[{:?}].signer_key() does not match Config data)", + counter + 1 + ); + return Err(InstructionError::MissingRequiredSignature); } - } - - // Check for Config data signers not present in incoming account update - if current_signer_keys.len() > counter { - ic_msg!( - invoke_context, - "too few signers: {:?}; expected: {:?}", - counter, - current_signer_keys.len() - ); + // If Config account is already initialized, update signatures must match Config data + if !current_data.keys.is_empty() + && !current_signer_keys.iter().any(|pubkey| pubkey == signer) + { + ic_msg!( + invoke_context, + "account {:?} is not in stored signer list", + signer + ); + return Err(InstructionError::MissingRequiredSignature); + } + } else if !is_config_account_signer { + ic_msg!(invoke_context, "account[0].signer_key().is_none()"); return Err(InstructionError::MissingRequiredSignature); } + } - let mut config_account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - if config_account.get_data().len() < data.len() { - ic_msg!(invoke_context, "instruction data too large"); - return Err(InstructionError::InvalidInstructionData); + if invoke_context + .feature_set + .is_active(&feature_set::dedupe_config_program_signers::id()) + { + let total_new_keys = key_list.keys.len(); + let unique_new_keys = key_list.keys.into_iter().collect::>(); + if unique_new_keys.len() != total_new_keys { + ic_msg!(invoke_context, "new config contains duplicate keys"); + return Err(InstructionError::InvalidArgument); } - config_account.get_data_mut()?[..data.len()].copy_from_slice(data); - Ok(()) } -); + + // Check for Config data signers not present in incoming account update + if current_signer_keys.len() > counter { + ic_msg!( + invoke_context, + "too few signers: {:?}; expected: {:?}", + counter, + current_signer_keys.len() + ); + return Err(InstructionError::MissingRequiredSignature); + } + + let mut config_account = + instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + if config_account.get_data().len() < data.len() { + ic_msg!(invoke_context, "instruction data too large"); + return Err(InstructionError::InvalidInstructionData); + } + config_account.get_data_mut()?[..data.len()].copy_from_slice(data); + Ok(()) +}); #[cfg(test)] mod tests { @@ -169,7 +165,7 @@ mod tests { transaction_accounts, instruction_accounts, expected_result, - super::process_instruction, + Entrypoint::vm, |_invoke_context| {}, |_invoke_context| {}, ) diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index 6f15096ecc6389..5372975e18e0c8 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -12,10 +12,12 @@ use { }, solana_rbpf::{ aligned_memory::AlignedMemory, - ebpf, - elf::{Executable, FunctionRegistry}, + declare_builtin_function, ebpf, + elf::Executable, + error::ProgramResult, memory_region::{MemoryMapping, MemoryRegion}, - vm::{BuiltinProgram, Config, ContextObject, EbpfVm, ProgramResult}, + program::{BuiltinProgram, FunctionRegistry}, + vm::{Config, ContextObject, EbpfVm}, }, solana_sdk::{ entrypoint::SUCCESS, @@ -81,7 +83,6 @@ pub fn create_program_runtime_environment_v2<'a>( reject_broken_elfs: true, noop_instruction_rate: 256, sanitize_user_provided_values: true, - encrypt_runtime_environment: true, external_internal_function_hash_collision: true, reject_callx_r10: true, enable_sbpf_v1: false, @@ -131,7 +132,7 @@ pub fn create_vm<'a, 'b>( Box::new(InstructionError::ProgramEnvironmentSetupFailure) })?; Ok(EbpfVm::new( - config, + program.get_loader().clone(), sbpf_version, invoke_context, memory_mapping, @@ -182,9 +183,9 @@ fn execute<'a, 'b: 'a>( match result { ProgramResult::Ok(status) if status != SUCCESS => { let error: InstructionError = status.into(); - Err(Box::new(error) as Box) + Err(error.into()) } - ProgramResult::Err(error) => Err(error), + ProgramResult::Err(error) => Err(error.into()), _ => Ok(()), } } @@ -527,18 +528,20 @@ pub fn process_instruction_transfer_authority( Ok(()) } -pub fn process_instruction( - invoke_context: &mut InvokeContext, - _arg0: u64, - _arg1: u64, - _arg2: u64, - _arg3: u64, - _arg4: u64, - _memory_mapping: &mut MemoryMapping, - result: &mut ProgramResult, -) { - *result = process_instruction_inner(invoke_context).into(); -} +declare_builtin_function!( + Entrypoint, + fn rust( + invoke_context: &mut InvokeContext, + _arg0: u64, + _arg1: u64, + _arg2: u64, + _arg3: u64, + _arg4: u64, + _memory_mapping: &mut MemoryMapping, + ) -> Result> { + process_instruction_inner(invoke_context) + } +); pub fn process_instruction_inner( invoke_context: &mut InvokeContext, @@ -700,7 +703,7 @@ mod tests { transaction_accounts, instruction_accounts, expected_result, - super::process_instruction, + Entrypoint::vm, |invoke_context| { invoke_context .programs_modified_by_tx diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index e4117582ab6dda..edb3ba752962a3 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5341,6 +5341,7 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-vote-program", + "solana_rbpf", "test-case", "thiserror", "tokio", @@ -6514,9 +6515,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.7.2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "103318aa365ff7caa8cf534f2246b5eb7e5b34668736d52b1266b143f7a21196" +checksum = "3d457cc2ba742c120492a64b7fa60e22c575e891f6b55039f4d736568fb112a3" dependencies = [ "byteorder 1.5.0", "combine", diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 507bf01385e44c..6f069a3f5bfd8b 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -48,7 +48,7 @@ solana-sdk = { path = "../../sdk", version = "=1.18.0" } solana-transaction-status = { path = "../../transaction-status", version = "=1.18.0" } solana-validator = { path = "../../validator", version = "=1.18.0" } solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=1.18.0" } -solana_rbpf = "=0.7.2" +solana_rbpf = "=0.8.0" static_assertions = "1.1.0" thiserror = "1.0" diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index f6b680b1054442..8a7cfb693afdd9 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -1449,7 +1449,7 @@ fn assert_instruction_count() { transaction_accounts, instruction_accounts, Ok(()), - solana_bpf_loader_program::process_instruction, + solana_bpf_loader_program::Entrypoint::vm, |invoke_context| { *prev_compute_meter.borrow_mut() = invoke_context.get_remaining(); solana_bpf_loader_program::test_utils::load_all_invoked_programs(invoke_context); @@ -4397,7 +4397,7 @@ fn test_cpi_change_account_data_memory_allocation() { let feature_set = FeatureSet::all_enabled(); bank.feature_set = Arc::new(feature_set); - declare_process_instruction!(process_instruction, 42, |invoke_context| { + declare_process_instruction!(MockBuiltin, 42, |invoke_context| { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let instruction_data = instruction_context.get_instruction_data(); @@ -4428,7 +4428,7 @@ fn test_cpi_change_account_data_memory_allocation() { bank.add_builtin( builtin_program_id, "test_cpi_change_account_data_memory_allocation_builtin".to_string(), - LoadedProgram::new_builtin(0, 42, process_instruction), + LoadedProgram::new_builtin(0, 42, MockBuiltin::vm), ); let bank = Arc::new(bank); diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index 20b6c9e0ebe3c4..cd0b59e82534ae 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -54,411 +54,391 @@ fn get_optional_pubkey<'a>( pub const DEFAULT_COMPUTE_UNITS: u64 = 750; -declare_process_instruction!( - process_instruction, - DEFAULT_COMPUTE_UNITS, - |invoke_context| { - let transaction_context = &invoke_context.transaction_context; - let instruction_context = transaction_context.get_current_instruction_context()?; - let data = instruction_context.get_instruction_data(); - - trace!("process_instruction: {:?}", data); - - let get_stake_account = || { - let me = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - if *me.get_owner() != id() { - return Err(InstructionError::InvalidAccountOwner); - } - Ok(me) - }; +declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| { + let transaction_context = &invoke_context.transaction_context; + let instruction_context = transaction_context.get_current_instruction_context()?; + let data = instruction_context.get_instruction_data(); - let signers = instruction_context.get_signers(transaction_context)?; - match limited_deserialize(data) { - Ok(StakeInstruction::Initialize(authorized, lockup)) => { - let mut me = get_stake_account()?; - let rent = - get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; - initialize(&mut me, &authorized, &lockup, &rent) - } - Ok(StakeInstruction::Authorize(authorized_pubkey, stake_authorize)) => { - let mut me = get_stake_account()?; - let require_custodian_for_locked_stake_authorize = invoke_context - .feature_set - .is_active(&feature_set::require_custodian_for_locked_stake_authorize::id()); - - if require_custodian_for_locked_stake_authorize { - let clock = get_sysvar_with_account_check::clock( - invoke_context, - instruction_context, - 1, - )?; - instruction_context.check_number_of_instruction_accounts(3)?; - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 3, false)?; - - authorize( - &mut me, - &signers, - &authorized_pubkey, - stake_authorize, - require_custodian_for_locked_stake_authorize, - &clock, - custodian_pubkey, - ) - } else { - authorize( - &mut me, - &signers, - &authorized_pubkey, - stake_authorize, - require_custodian_for_locked_stake_authorize, - &Clock::default(), - None, - ) - } - } - Ok(StakeInstruction::AuthorizeWithSeed(args)) => { - let mut me = get_stake_account()?; - instruction_context.check_number_of_instruction_accounts(2)?; - let require_custodian_for_locked_stake_authorize = invoke_context - .feature_set - .is_active(&feature_set::require_custodian_for_locked_stake_authorize::id()); - if require_custodian_for_locked_stake_authorize { - let clock = get_sysvar_with_account_check::clock( - invoke_context, - instruction_context, - 2, - )?; - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 3, false)?; - - authorize_with_seed( - transaction_context, - instruction_context, - &mut me, - 1, - &args.authority_seed, - &args.authority_owner, - &args.new_authorized_pubkey, - args.stake_authorize, - require_custodian_for_locked_stake_authorize, - &clock, - custodian_pubkey, - ) - } else { - authorize_with_seed( - transaction_context, - instruction_context, - &mut me, - 1, - &args.authority_seed, - &args.authority_owner, - &args.new_authorized_pubkey, - args.stake_authorize, - require_custodian_for_locked_stake_authorize, - &Clock::default(), - None, - ) - } + trace!("process_instruction: {:?}", data); + + let get_stake_account = || { + let me = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + if *me.get_owner() != id() { + return Err(InstructionError::InvalidAccountOwner); + } + Ok(me) + }; + + let signers = instruction_context.get_signers(transaction_context)?; + match limited_deserialize(data) { + Ok(StakeInstruction::Initialize(authorized, lockup)) => { + let mut me = get_stake_account()?; + let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; + initialize(&mut me, &authorized, &lockup, &rent) + } + Ok(StakeInstruction::Authorize(authorized_pubkey, stake_authorize)) => { + let mut me = get_stake_account()?; + let require_custodian_for_locked_stake_authorize = invoke_context + .feature_set + .is_active(&feature_set::require_custodian_for_locked_stake_authorize::id()); + + if require_custodian_for_locked_stake_authorize { + let clock = + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; + instruction_context.check_number_of_instruction_accounts(3)?; + let custodian_pubkey = + get_optional_pubkey(transaction_context, instruction_context, 3, false)?; + + authorize( + &mut me, + &signers, + &authorized_pubkey, + stake_authorize, + require_custodian_for_locked_stake_authorize, + &clock, + custodian_pubkey, + ) + } else { + authorize( + &mut me, + &signers, + &authorized_pubkey, + stake_authorize, + require_custodian_for_locked_stake_authorize, + &Clock::default(), + None, + ) } - Ok(StakeInstruction::DelegateStake) => { - let me = get_stake_account()?; - instruction_context.check_number_of_instruction_accounts(2)?; + } + Ok(StakeInstruction::AuthorizeWithSeed(args)) => { + let mut me = get_stake_account()?; + instruction_context.check_number_of_instruction_accounts(2)?; + let require_custodian_for_locked_stake_authorize = invoke_context + .feature_set + .is_active(&feature_set::require_custodian_for_locked_stake_authorize::id()); + if require_custodian_for_locked_stake_authorize { let clock = get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; - let stake_history = get_sysvar_with_account_check::stake_history( - invoke_context, - instruction_context, - 3, - )?; - instruction_context.check_number_of_instruction_accounts(5)?; - drop(me); - if !invoke_context - .feature_set - .is_active(&feature_set::reduce_stake_warmup_cooldown::id()) - { - // Post feature activation, remove both the feature gate code and the config completely in the interface - let config_account = instruction_context - .try_borrow_instruction_account(transaction_context, 4)?; - #[allow(deprecated)] - if !config::check_id(config_account.get_key()) { - return Err(InstructionError::InvalidArgument); - } - config::from(&config_account).ok_or(InstructionError::InvalidArgument)?; - } - delegate( - invoke_context, + let custodian_pubkey = + get_optional_pubkey(transaction_context, instruction_context, 3, false)?; + + authorize_with_seed( transaction_context, instruction_context, - 0, + &mut me, 1, + &args.authority_seed, + &args.authority_owner, + &args.new_authorized_pubkey, + args.stake_authorize, + require_custodian_for_locked_stake_authorize, &clock, - &stake_history, - &signers, - &invoke_context.feature_set, + custodian_pubkey, ) - } - Ok(StakeInstruction::Split(lamports)) => { - let me = get_stake_account()?; - instruction_context.check_number_of_instruction_accounts(2)?; - drop(me); - split( - invoke_context, + } else { + authorize_with_seed( transaction_context, instruction_context, - 0, - lamports, + &mut me, 1, - &signers, + &args.authority_seed, + &args.authority_owner, + &args.new_authorized_pubkey, + args.stake_authorize, + require_custodian_for_locked_stake_authorize, + &Clock::default(), + None, ) } - Ok(StakeInstruction::Merge) => { - let me = get_stake_account()?; - instruction_context.check_number_of_instruction_accounts(2)?; + } + Ok(StakeInstruction::DelegateStake) => { + let me = get_stake_account()?; + instruction_context.check_number_of_instruction_accounts(2)?; + let clock = + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; + let stake_history = get_sysvar_with_account_check::stake_history( + invoke_context, + instruction_context, + 3, + )?; + instruction_context.check_number_of_instruction_accounts(5)?; + drop(me); + if !invoke_context + .feature_set + .is_active(&feature_set::reduce_stake_warmup_cooldown::id()) + { + // Post feature activation, remove both the feature gate code and the config completely in the interface + let config_account = + instruction_context.try_borrow_instruction_account(transaction_context, 4)?; + #[allow(deprecated)] + if !config::check_id(config_account.get_key()) { + return Err(InstructionError::InvalidArgument); + } + config::from(&config_account).ok_or(InstructionError::InvalidArgument)?; + } + delegate( + invoke_context, + transaction_context, + instruction_context, + 0, + 1, + &clock, + &stake_history, + &signers, + &invoke_context.feature_set, + ) + } + Ok(StakeInstruction::Split(lamports)) => { + let me = get_stake_account()?; + instruction_context.check_number_of_instruction_accounts(2)?; + drop(me); + split( + invoke_context, + transaction_context, + instruction_context, + 0, + lamports, + 1, + &signers, + ) + } + Ok(StakeInstruction::Merge) => { + let me = get_stake_account()?; + instruction_context.check_number_of_instruction_accounts(2)?; + let clock = + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; + let stake_history = get_sysvar_with_account_check::stake_history( + invoke_context, + instruction_context, + 3, + )?; + drop(me); + merge( + invoke_context, + transaction_context, + instruction_context, + 0, + 1, + &clock, + &stake_history, + &signers, + ) + } + Ok(StakeInstruction::Withdraw(lamports)) => { + let me = get_stake_account()?; + instruction_context.check_number_of_instruction_accounts(2)?; + let clock = + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; + let stake_history = get_sysvar_with_account_check::stake_history( + invoke_context, + instruction_context, + 3, + )?; + instruction_context.check_number_of_instruction_accounts(5)?; + drop(me); + withdraw( + transaction_context, + instruction_context, + 0, + lamports, + 1, + &clock, + &stake_history, + 4, + if instruction_context.get_number_of_instruction_accounts() >= 6 { + Some(5) + } else { + None + }, + new_warmup_cooldown_rate_epoch(invoke_context), + ) + } + Ok(StakeInstruction::Deactivate) => { + let mut me = get_stake_account()?; + let clock = + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; + deactivate(invoke_context, &mut me, &clock, &signers) + } + Ok(StakeInstruction::SetLockup(lockup)) => { + let mut me = get_stake_account()?; + let clock = invoke_context.get_sysvar_cache().get_clock()?; + set_lockup(&mut me, &lockup, &signers, &clock) + } + Ok(StakeInstruction::InitializeChecked) => { + let mut me = get_stake_account()?; + if invoke_context + .feature_set + .is_active(&feature_set::vote_stake_checked_instructions::id()) + { + instruction_context.check_number_of_instruction_accounts(4)?; + let staker_pubkey = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(2)?, + )?; + let withdrawer_pubkey = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(3)?, + )?; + if !instruction_context.is_instruction_account_signer(3)? { + return Err(InstructionError::MissingRequiredSignature); + } + + let authorized = Authorized { + staker: *staker_pubkey, + withdrawer: *withdrawer_pubkey, + }; + + let rent = + get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; + initialize(&mut me, &authorized, &Lockup::default(), &rent) + } else { + Err(InstructionError::InvalidInstructionData) + } + } + Ok(StakeInstruction::AuthorizeChecked(stake_authorize)) => { + let mut me = get_stake_account()?; + if invoke_context + .feature_set + .is_active(&feature_set::vote_stake_checked_instructions::id()) + { let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; - let stake_history = get_sysvar_with_account_check::stake_history( - invoke_context, - instruction_context, - 3, + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; + instruction_context.check_number_of_instruction_accounts(4)?; + let authorized_pubkey = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(3)?, )?; - drop(me); - merge( - invoke_context, - transaction_context, - instruction_context, - 0, - 1, - &clock, - &stake_history, + if !instruction_context.is_instruction_account_signer(3)? { + return Err(InstructionError::MissingRequiredSignature); + } + let custodian_pubkey = + get_optional_pubkey(transaction_context, instruction_context, 4, false)?; + + authorize( + &mut me, &signers, + authorized_pubkey, + stake_authorize, + true, + &clock, + custodian_pubkey, ) + } else { + Err(InstructionError::InvalidInstructionData) } - Ok(StakeInstruction::Withdraw(lamports)) => { - let me = get_stake_account()?; + } + Ok(StakeInstruction::AuthorizeCheckedWithSeed(args)) => { + let mut me = get_stake_account()?; + if invoke_context + .feature_set + .is_active(&feature_set::vote_stake_checked_instructions::id()) + { instruction_context.check_number_of_instruction_accounts(2)?; let clock = get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; - let stake_history = get_sysvar_with_account_check::stake_history( - invoke_context, - instruction_context, - 3, + instruction_context.check_number_of_instruction_accounts(4)?; + let authorized_pubkey = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(3)?, )?; - instruction_context.check_number_of_instruction_accounts(5)?; - drop(me); - withdraw( + if !instruction_context.is_instruction_account_signer(3)? { + return Err(InstructionError::MissingRequiredSignature); + } + let custodian_pubkey = + get_optional_pubkey(transaction_context, instruction_context, 4, false)?; + + authorize_with_seed( transaction_context, instruction_context, - 0, - lamports, + &mut me, 1, + &args.authority_seed, + &args.authority_owner, + authorized_pubkey, + args.stake_authorize, + true, &clock, - &stake_history, - 4, - if instruction_context.get_number_of_instruction_accounts() >= 6 { - Some(5) - } else { - None - }, - new_warmup_cooldown_rate_epoch(invoke_context), + custodian_pubkey, ) + } else { + Err(InstructionError::InvalidInstructionData) } - Ok(StakeInstruction::Deactivate) => { - let mut me = get_stake_account()?; - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; - deactivate(invoke_context, &mut me, &clock, &signers) - } - Ok(StakeInstruction::SetLockup(lockup)) => { - let mut me = get_stake_account()?; + } + Ok(StakeInstruction::SetLockupChecked(lockup_checked)) => { + let mut me = get_stake_account()?; + if invoke_context + .feature_set + .is_active(&feature_set::vote_stake_checked_instructions::id()) + { + let custodian_pubkey = + get_optional_pubkey(transaction_context, instruction_context, 2, true)?; + + let lockup = LockupArgs { + unix_timestamp: lockup_checked.unix_timestamp, + epoch: lockup_checked.epoch, + custodian: custodian_pubkey.cloned(), + }; let clock = invoke_context.get_sysvar_cache().get_clock()?; set_lockup(&mut me, &lockup, &signers, &clock) + } else { + Err(InstructionError::InvalidInstructionData) } - Ok(StakeInstruction::InitializeChecked) => { - let mut me = get_stake_account()?; - if invoke_context - .feature_set - .is_active(&feature_set::vote_stake_checked_instructions::id()) - { - instruction_context.check_number_of_instruction_accounts(4)?; - let staker_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(2)?, - )?; - let withdrawer_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(3)?, - )?; - if !instruction_context.is_instruction_account_signer(3)? { - return Err(InstructionError::MissingRequiredSignature); - } - - let authorized = Authorized { - staker: *staker_pubkey, - withdrawer: *withdrawer_pubkey, - }; - - let rent = get_sysvar_with_account_check::rent( - invoke_context, - instruction_context, - 1, - )?; - initialize(&mut me, &authorized, &Lockup::default(), &rent) - } else { - Err(InstructionError::InvalidInstructionData) - } - } - Ok(StakeInstruction::AuthorizeChecked(stake_authorize)) => { - let mut me = get_stake_account()?; - if invoke_context - .feature_set - .is_active(&feature_set::vote_stake_checked_instructions::id()) - { - let clock = get_sysvar_with_account_check::clock( - invoke_context, - instruction_context, - 1, - )?; - instruction_context.check_number_of_instruction_accounts(4)?; - let authorized_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(3)?, - )?; - if !instruction_context.is_instruction_account_signer(3)? { - return Err(InstructionError::MissingRequiredSignature); - } - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 4, false)?; - - authorize( - &mut me, - &signers, - authorized_pubkey, - stake_authorize, - true, - &clock, - custodian_pubkey, - ) - } else { - Err(InstructionError::InvalidInstructionData) - } - } - Ok(StakeInstruction::AuthorizeCheckedWithSeed(args)) => { - let mut me = get_stake_account()?; - if invoke_context + } + Ok(StakeInstruction::GetMinimumDelegation) => { + let feature_set = invoke_context.feature_set.as_ref(); + let minimum_delegation = crate::get_minimum_delegation(feature_set); + let minimum_delegation = Vec::from(minimum_delegation.to_le_bytes()); + invoke_context + .transaction_context + .set_return_data(id(), minimum_delegation) + } + Ok(StakeInstruction::DeactivateDelinquent) => { + let mut me = get_stake_account()?; + instruction_context.check_number_of_instruction_accounts(3)?; + + let clock = invoke_context.get_sysvar_cache().get_clock()?; + deactivate_delinquent( + invoke_context, + transaction_context, + instruction_context, + &mut me, + 1, + 2, + clock.epoch, + ) + } + Ok(StakeInstruction::Redelegate) => { + let mut me = get_stake_account()?; + if invoke_context + .feature_set + .is_active(&feature_set::stake_redelegate_instruction::id()) + { + instruction_context.check_number_of_instruction_accounts(3)?; + if !invoke_context .feature_set - .is_active(&feature_set::vote_stake_checked_instructions::id()) + .is_active(&feature_set::reduce_stake_warmup_cooldown::id()) { - instruction_context.check_number_of_instruction_accounts(2)?; - let clock = get_sysvar_with_account_check::clock( - invoke_context, - instruction_context, - 2, - )?; - instruction_context.check_number_of_instruction_accounts(4)?; - let authorized_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(3)?, - )?; - if !instruction_context.is_instruction_account_signer(3)? { - return Err(InstructionError::MissingRequiredSignature); + // Post feature activation, remove both the feature gate code and the config completely in the interface + let config_account = instruction_context + .try_borrow_instruction_account(transaction_context, 3)?; + #[allow(deprecated)] + if !config::check_id(config_account.get_key()) { + return Err(InstructionError::InvalidArgument); } - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 4, false)?; - - authorize_with_seed( - transaction_context, - instruction_context, - &mut me, - 1, - &args.authority_seed, - &args.authority_owner, - authorized_pubkey, - args.stake_authorize, - true, - &clock, - custodian_pubkey, - ) - } else { - Err(InstructionError::InvalidInstructionData) - } - } - Ok(StakeInstruction::SetLockupChecked(lockup_checked)) => { - let mut me = get_stake_account()?; - if invoke_context - .feature_set - .is_active(&feature_set::vote_stake_checked_instructions::id()) - { - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 2, true)?; - - let lockup = LockupArgs { - unix_timestamp: lockup_checked.unix_timestamp, - epoch: lockup_checked.epoch, - custodian: custodian_pubkey.cloned(), - }; - let clock = invoke_context.get_sysvar_cache().get_clock()?; - set_lockup(&mut me, &lockup, &signers, &clock) - } else { - Err(InstructionError::InvalidInstructionData) + config::from(&config_account).ok_or(InstructionError::InvalidArgument)?; } - } - Ok(StakeInstruction::GetMinimumDelegation) => { - let feature_set = invoke_context.feature_set.as_ref(); - let minimum_delegation = crate::get_minimum_delegation(feature_set); - let minimum_delegation = Vec::from(minimum_delegation.to_le_bytes()); - invoke_context - .transaction_context - .set_return_data(id(), minimum_delegation) - } - Ok(StakeInstruction::DeactivateDelinquent) => { - let mut me = get_stake_account()?; - instruction_context.check_number_of_instruction_accounts(3)?; - - let clock = invoke_context.get_sysvar_cache().get_clock()?; - deactivate_delinquent( + redelegate( invoke_context, transaction_context, instruction_context, &mut me, 1, 2, - clock.epoch, + &signers, ) + } else { + Err(InstructionError::InvalidInstructionData) } - Ok(StakeInstruction::Redelegate) => { - let mut me = get_stake_account()?; - if invoke_context - .feature_set - .is_active(&feature_set::stake_redelegate_instruction::id()) - { - instruction_context.check_number_of_instruction_accounts(3)?; - if !invoke_context - .feature_set - .is_active(&feature_set::reduce_stake_warmup_cooldown::id()) - { - // Post feature activation, remove both the feature gate code and the config completely in the interface - let config_account = instruction_context - .try_borrow_instruction_account(transaction_context, 3)?; - #[allow(deprecated)] - if !config::check_id(config_account.get_key()) { - return Err(InstructionError::InvalidArgument); - } - config::from(&config_account).ok_or(InstructionError::InvalidArgument)?; - } - redelegate( - invoke_context, - transaction_context, - instruction_context, - &mut me, - 1, - 2, - &signers, - ) - } else { - Err(InstructionError::InvalidInstructionData) - } - } - Err(err) => Err(err), } + Err(err) => Err(err), } -); +}); #[cfg(test)] mod tests { @@ -572,7 +552,7 @@ mod tests { transaction_accounts, instruction_accounts, expected_result, - super::process_instruction, + Entrypoint::vm, |invoke_context| { invoke_context.feature_set = Arc::clone(&feature_set); }, @@ -7046,7 +7026,7 @@ mod tests { transaction_accounts, instruction_accounts, Ok(()), - super::process_instruction, + Entrypoint::vm, |invoke_context| { invoke_context.feature_set = Arc::clone(&feature_set); }, diff --git a/programs/system/src/system_processor.rs b/programs/system/src/system_processor.rs index dc6c1e3dbe9c92..b224997dc625a7 100644 --- a/programs/system/src/system_processor.rs +++ b/programs/system/src/system_processor.rs @@ -314,252 +314,246 @@ fn transfer_with_seed( pub const DEFAULT_COMPUTE_UNITS: u64 = 150; -declare_process_instruction!( - process_instruction, - DEFAULT_COMPUTE_UNITS, - |invoke_context| { - let transaction_context = &invoke_context.transaction_context; - let instruction_context = transaction_context.get_current_instruction_context()?; - let instruction_data = instruction_context.get_instruction_data(); - let instruction = limited_deserialize(instruction_data)?; - - trace!("process_instruction: {:?}", instruction); - - let signers = instruction_context.get_signers(transaction_context)?; - match instruction { - SystemInstruction::CreateAccount { +declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| { + let transaction_context = &invoke_context.transaction_context; + let instruction_context = transaction_context.get_current_instruction_context()?; + let instruction_data = instruction_context.get_instruction_data(); + let instruction = limited_deserialize(instruction_data)?; + + trace!("process_instruction: {:?}", instruction); + + let signers = instruction_context.get_signers(transaction_context)?; + match instruction { + SystemInstruction::CreateAccount { + lamports, + space, + owner, + } => { + instruction_context.check_number_of_instruction_accounts(2)?; + let to_address = Address::create( + transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(1)?, + )?, + None, + invoke_context, + )?; + create_account( + 0, + 1, + &to_address, lamports, space, - owner, - } => { - instruction_context.check_number_of_instruction_accounts(2)?; - let to_address = Address::create( - transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(1)?, - )?, - None, - invoke_context, - )?; - create_account( - 0, - 1, - &to_address, - lamports, - space, - &owner, - &signers, - invoke_context, - transaction_context, - instruction_context, - ) - } - SystemInstruction::CreateAccountWithSeed { - base, - seed, + &owner, + &signers, + invoke_context, + transaction_context, + instruction_context, + ) + } + SystemInstruction::CreateAccountWithSeed { + base, + seed, + lamports, + space, + owner, + } => { + instruction_context.check_number_of_instruction_accounts(2)?; + let to_address = Address::create( + transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(1)?, + )?, + Some((&base, &seed, &owner)), + invoke_context, + )?; + create_account( + 0, + 1, + &to_address, lamports, space, - owner, - } => { - instruction_context.check_number_of_instruction_accounts(2)?; - let to_address = Address::create( - transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(1)?, - )?, - Some((&base, &seed, &owner)), - invoke_context, - )?; - create_account( - 0, - 1, - &to_address, - lamports, - space, - &owner, - &signers, - invoke_context, - transaction_context, - instruction_context, - ) - } - SystemInstruction::Assign { owner } => { - instruction_context.check_number_of_instruction_accounts(1)?; - let mut account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - let address = Address::create( - transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(0)?, - )?, - None, - invoke_context, - )?; - assign(&mut account, &address, &owner, &signers, invoke_context) - } - SystemInstruction::Transfer { lamports } => { - instruction_context.check_number_of_instruction_accounts(2)?; - transfer( - 0, - 1, - lamports, - invoke_context, - transaction_context, - instruction_context, - ) - } - SystemInstruction::TransferWithSeed { + &owner, + &signers, + invoke_context, + transaction_context, + instruction_context, + ) + } + SystemInstruction::Assign { owner } => { + instruction_context.check_number_of_instruction_accounts(1)?; + let mut account = + instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let address = Address::create( + transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(0)?, + )?, + None, + invoke_context, + )?; + assign(&mut account, &address, &owner, &signers, invoke_context) + } + SystemInstruction::Transfer { lamports } => { + instruction_context.check_number_of_instruction_accounts(2)?; + transfer( + 0, + 1, lamports, - from_seed, - from_owner, - } => { - instruction_context.check_number_of_instruction_accounts(3)?; - transfer_with_seed( - 0, - 1, - &from_seed, - &from_owner, - 2, - lamports, - invoke_context, - transaction_context, - instruction_context, - ) - } - SystemInstruction::AdvanceNonceAccount => { - instruction_context.check_number_of_instruction_accounts(1)?; - let mut me = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - #[allow(deprecated)] - let recent_blockhashes = get_sysvar_with_account_check::recent_blockhashes( - invoke_context, - instruction_context, - 1, - )?; - if recent_blockhashes.is_empty() { - ic_msg!( - invoke_context, - "Advance nonce account: recent blockhash list is empty", - ); - return Err(SystemError::NonceNoRecentBlockhashes.into()); - } - advance_nonce_account(&mut me, &signers, invoke_context) - } - SystemInstruction::WithdrawNonceAccount(lamports) => { - instruction_context.check_number_of_instruction_accounts(2)?; - #[allow(deprecated)] - let _recent_blockhashes = get_sysvar_with_account_check::recent_blockhashes( - invoke_context, - instruction_context, - 2, - )?; - let rent = - get_sysvar_with_account_check::rent(invoke_context, instruction_context, 3)?; - withdraw_nonce_account( - 0, - lamports, - 1, - &rent, - &signers, + invoke_context, + transaction_context, + instruction_context, + ) + } + SystemInstruction::TransferWithSeed { + lamports, + from_seed, + from_owner, + } => { + instruction_context.check_number_of_instruction_accounts(3)?; + transfer_with_seed( + 0, + 1, + &from_seed, + &from_owner, + 2, + lamports, + invoke_context, + transaction_context, + instruction_context, + ) + } + SystemInstruction::AdvanceNonceAccount => { + instruction_context.check_number_of_instruction_accounts(1)?; + let mut me = + instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + #[allow(deprecated)] + let recent_blockhashes = get_sysvar_with_account_check::recent_blockhashes( + invoke_context, + instruction_context, + 1, + )?; + if recent_blockhashes.is_empty() { + ic_msg!( invoke_context, - transaction_context, - instruction_context, - ) + "Advance nonce account: recent blockhash list is empty", + ); + return Err(SystemError::NonceNoRecentBlockhashes.into()); } - SystemInstruction::InitializeNonceAccount(authorized) => { - instruction_context.check_number_of_instruction_accounts(1)?; - let mut me = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - #[allow(deprecated)] - let recent_blockhashes = get_sysvar_with_account_check::recent_blockhashes( + advance_nonce_account(&mut me, &signers, invoke_context) + } + SystemInstruction::WithdrawNonceAccount(lamports) => { + instruction_context.check_number_of_instruction_accounts(2)?; + #[allow(deprecated)] + let _recent_blockhashes = get_sysvar_with_account_check::recent_blockhashes( + invoke_context, + instruction_context, + 2, + )?; + let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 3)?; + withdraw_nonce_account( + 0, + lamports, + 1, + &rent, + &signers, + invoke_context, + transaction_context, + instruction_context, + ) + } + SystemInstruction::InitializeNonceAccount(authorized) => { + instruction_context.check_number_of_instruction_accounts(1)?; + let mut me = + instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + #[allow(deprecated)] + let recent_blockhashes = get_sysvar_with_account_check::recent_blockhashes( + invoke_context, + instruction_context, + 1, + )?; + if recent_blockhashes.is_empty() { + ic_msg!( invoke_context, - instruction_context, - 1, - )?; - if recent_blockhashes.is_empty() { - ic_msg!( - invoke_context, - "Initialize nonce account: recent blockhash list is empty", - ); - return Err(SystemError::NonceNoRecentBlockhashes.into()); - } - let rent = - get_sysvar_with_account_check::rent(invoke_context, instruction_context, 2)?; - initialize_nonce_account(&mut me, &authorized, &rent, invoke_context) + "Initialize nonce account: recent blockhash list is empty", + ); + return Err(SystemError::NonceNoRecentBlockhashes.into()); } - SystemInstruction::AuthorizeNonceAccount(nonce_authority) => { - instruction_context.check_number_of_instruction_accounts(1)?; - let mut me = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - authorize_nonce_account(&mut me, &nonce_authority, &signers, invoke_context) + let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 2)?; + initialize_nonce_account(&mut me, &authorized, &rent, invoke_context) + } + SystemInstruction::AuthorizeNonceAccount(nonce_authority) => { + instruction_context.check_number_of_instruction_accounts(1)?; + let mut me = + instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + authorize_nonce_account(&mut me, &nonce_authority, &signers, invoke_context) + } + SystemInstruction::UpgradeNonceAccount => { + instruction_context.check_number_of_instruction_accounts(1)?; + let mut nonce_account = + instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + if !system_program::check_id(nonce_account.get_owner()) { + return Err(InstructionError::InvalidAccountOwner); } - SystemInstruction::UpgradeNonceAccount => { - instruction_context.check_number_of_instruction_accounts(1)?; - let mut nonce_account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - if !system_program::check_id(nonce_account.get_owner()) { - return Err(InstructionError::InvalidAccountOwner); - } - if !nonce_account.is_writable() { - return Err(InstructionError::InvalidArgument); - } - let nonce_versions: nonce::state::Versions = nonce_account.get_state()?; - match nonce_versions.upgrade() { - None => Err(InstructionError::InvalidArgument), - Some(nonce_versions) => nonce_account.set_state(&nonce_versions), - } + if !nonce_account.is_writable() { + return Err(InstructionError::InvalidArgument); } - SystemInstruction::Allocate { space } => { - instruction_context.check_number_of_instruction_accounts(1)?; - let mut account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - let address = Address::create( - transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(0)?, - )?, - None, - invoke_context, - )?; - allocate(&mut account, &address, space, &signers, invoke_context) + let nonce_versions: nonce::state::Versions = nonce_account.get_state()?; + match nonce_versions.upgrade() { + None => Err(InstructionError::InvalidArgument), + Some(nonce_versions) => nonce_account.set_state(&nonce_versions), } - SystemInstruction::AllocateWithSeed { - base, - seed, + } + SystemInstruction::Allocate { space } => { + instruction_context.check_number_of_instruction_accounts(1)?; + let mut account = + instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let address = Address::create( + transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(0)?, + )?, + None, + invoke_context, + )?; + allocate(&mut account, &address, space, &signers, invoke_context) + } + SystemInstruction::AllocateWithSeed { + base, + seed, + space, + owner, + } => { + instruction_context.check_number_of_instruction_accounts(1)?; + let mut account = + instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let address = Address::create( + transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(0)?, + )?, + Some((&base, &seed, &owner)), + invoke_context, + )?; + allocate_and_assign( + &mut account, + &address, space, - owner, - } => { - instruction_context.check_number_of_instruction_accounts(1)?; - let mut account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - let address = Address::create( - transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(0)?, - )?, - Some((&base, &seed, &owner)), - invoke_context, - )?; - allocate_and_assign( - &mut account, - &address, - space, - &owner, - &signers, - invoke_context, - ) - } - SystemInstruction::AssignWithSeed { base, seed, owner } => { - instruction_context.check_number_of_instruction_accounts(1)?; - let mut account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - let address = Address::create( - transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(0)?, - )?, - Some((&base, &seed, &owner)), - invoke_context, - )?; - assign(&mut account, &address, &owner, &signers, invoke_context) - } + &owner, + &signers, + invoke_context, + ) + } + SystemInstruction::AssignWithSeed { base, seed, owner } => { + instruction_context.check_number_of_instruction_accounts(1)?; + let mut account = + instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let address = Address::create( + transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(0)?, + )?, + Some((&base, &seed, &owner)), + invoke_context, + )?; + assign(&mut account, &address, &owner, &signers, invoke_context) } } -); +}); #[cfg(test)] mod tests { @@ -609,7 +603,7 @@ mod tests { transaction_accounts, instruction_accounts, expected_result, - super::process_instruction, + Entrypoint::vm, |_invoke_context| {}, |_invoke_context| {}, ) @@ -1599,7 +1593,7 @@ mod tests { }, ], Ok(()), - super::process_instruction, + Entrypoint::vm, |invoke_context: &mut InvokeContext| { invoke_context.blockhash = hash(&serialize(&0).unwrap()); }, @@ -1946,7 +1940,7 @@ mod tests { }, ], Err(SystemError::NonceNoRecentBlockhashes.into()), - super::process_instruction, + Entrypoint::vm, |invoke_context: &mut InvokeContext| { invoke_context.blockhash = hash(&serialize(&0).unwrap()); }, diff --git a/programs/vote/benches/process_vote.rs b/programs/vote/benches/process_vote.rs index 6c9cb979c90484..9008971f086237 100644 --- a/programs/vote/benches/process_vote.rs +++ b/programs/vote/benches/process_vote.rs @@ -108,7 +108,7 @@ fn bench_process_vote_instruction( transaction_accounts.clone(), instruction_account_metas.clone(), Ok(()), - solana_vote_program::vote_processor::process_instruction, + solana_vote_program::vote_processor::Entrypoint::vm, |_invoke_context| {}, |_invoke_context| {}, ); diff --git a/programs/vote/src/vote_processor.rs b/programs/vote/src/vote_processor.rs index 423193f5d333c9..d09309ddc81fb5 100644 --- a/programs/vote/src/vote_processor.rs +++ b/programs/vote/src/vote_processor.rs @@ -54,209 +54,198 @@ fn process_authorize_with_seed_instruction( // units; can consume based on instructions in the future like `bpf_loader` does. pub const DEFAULT_COMPUTE_UNITS: u64 = 2_100; -declare_process_instruction!( - process_instruction, - DEFAULT_COMPUTE_UNITS, - |invoke_context| { - let transaction_context = &invoke_context.transaction_context; - let instruction_context = transaction_context.get_current_instruction_context()?; - let data = instruction_context.get_instruction_data(); - - trace!("process_instruction: {:?}", data); - - let mut me = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - if *me.get_owner() != id() { - return Err(InstructionError::InvalidAccountOwner); - } +declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| { + let transaction_context = &invoke_context.transaction_context; + let instruction_context = transaction_context.get_current_instruction_context()?; + let data = instruction_context.get_instruction_data(); - let signers = instruction_context.get_signers(transaction_context)?; - match limited_deserialize(data)? { - VoteInstruction::InitializeAccount(vote_init) => { - let rent = - get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; - if !rent.is_exempt(me.get_lamports(), me.get_data().len()) { - return Err(InstructionError::InsufficientFunds); - } - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; - vote_state::initialize_account( - &mut me, - &vote_init, - &signers, - &clock, - &invoke_context.feature_set, - ) + trace!("process_instruction: {:?}", data); + + let mut me = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + if *me.get_owner() != id() { + return Err(InstructionError::InvalidAccountOwner); + } + + let signers = instruction_context.get_signers(transaction_context)?; + match limited_deserialize(data)? { + VoteInstruction::InitializeAccount(vote_init) => { + let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; + if !rent.is_exempt(me.get_lamports(), me.get_data().len()) { + return Err(InstructionError::InsufficientFunds); } - VoteInstruction::Authorize(voter_pubkey, vote_authorize) => { - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; - vote_state::authorize( - &mut me, - &voter_pubkey, - vote_authorize, - &signers, - &clock, - &invoke_context.feature_set, - ) + let clock = + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; + vote_state::initialize_account( + &mut me, + &vote_init, + &signers, + &clock, + &invoke_context.feature_set, + ) + } + VoteInstruction::Authorize(voter_pubkey, vote_authorize) => { + let clock = + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; + vote_state::authorize( + &mut me, + &voter_pubkey, + vote_authorize, + &signers, + &clock, + &invoke_context.feature_set, + ) + } + VoteInstruction::AuthorizeWithSeed(args) => { + instruction_context.check_number_of_instruction_accounts(3)?; + process_authorize_with_seed_instruction( + invoke_context, + instruction_context, + transaction_context, + &mut me, + &args.new_authority, + args.authorization_type, + &args.current_authority_derived_key_owner, + args.current_authority_derived_key_seed.as_str(), + ) + } + VoteInstruction::AuthorizeCheckedWithSeed(args) => { + instruction_context.check_number_of_instruction_accounts(4)?; + let new_authority = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(3)?, + )?; + if !instruction_context.is_instruction_account_signer(3)? { + return Err(InstructionError::MissingRequiredSignature); } - VoteInstruction::AuthorizeWithSeed(args) => { - instruction_context.check_number_of_instruction_accounts(3)?; - process_authorize_with_seed_instruction( - invoke_context, - instruction_context, - transaction_context, - &mut me, - &args.new_authority, - args.authorization_type, - &args.current_authority_derived_key_owner, - args.current_authority_derived_key_seed.as_str(), - ) + process_authorize_with_seed_instruction( + invoke_context, + instruction_context, + transaction_context, + &mut me, + new_authority, + args.authorization_type, + &args.current_authority_derived_key_owner, + args.current_authority_derived_key_seed.as_str(), + ) + } + VoteInstruction::UpdateValidatorIdentity => { + instruction_context.check_number_of_instruction_accounts(2)?; + let node_pubkey = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(1)?, + )?; + vote_state::update_validator_identity( + &mut me, + node_pubkey, + &signers, + &invoke_context.feature_set, + ) + } + VoteInstruction::UpdateCommission(commission) => { + if invoke_context.feature_set.is_active( + &feature_set::commission_updates_only_allowed_in_first_half_of_epoch::id(), + ) { + let sysvar_cache = invoke_context.get_sysvar_cache(); + let epoch_schedule = sysvar_cache.get_epoch_schedule()?; + let clock = sysvar_cache.get_clock()?; + if !vote_state::is_commission_update_allowed(clock.slot, &epoch_schedule) { + return Err(VoteError::CommissionUpdateTooLate.into()); + } } - VoteInstruction::AuthorizeCheckedWithSeed(args) => { + vote_state::update_commission( + &mut me, + commission, + &signers, + &invoke_context.feature_set, + ) + } + VoteInstruction::Vote(vote) | VoteInstruction::VoteSwitch(vote, _) => { + let slot_hashes = + get_sysvar_with_account_check::slot_hashes(invoke_context, instruction_context, 1)?; + let clock = + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; + vote_state::process_vote_with_account( + &mut me, + &slot_hashes, + &clock, + &vote, + &signers, + &invoke_context.feature_set, + ) + } + VoteInstruction::UpdateVoteState(vote_state_update) + | VoteInstruction::UpdateVoteStateSwitch(vote_state_update, _) => { + let sysvar_cache = invoke_context.get_sysvar_cache(); + let slot_hashes = sysvar_cache.get_slot_hashes()?; + let clock = sysvar_cache.get_clock()?; + vote_state::process_vote_state_update( + &mut me, + slot_hashes.slot_hashes(), + &clock, + vote_state_update, + &signers, + &invoke_context.feature_set, + ) + } + VoteInstruction::CompactUpdateVoteState(vote_state_update) + | VoteInstruction::CompactUpdateVoteStateSwitch(vote_state_update, _) => { + let sysvar_cache = invoke_context.get_sysvar_cache(); + let slot_hashes = sysvar_cache.get_slot_hashes()?; + let clock = sysvar_cache.get_clock()?; + vote_state::process_vote_state_update( + &mut me, + slot_hashes.slot_hashes(), + &clock, + vote_state_update, + &signers, + &invoke_context.feature_set, + ) + } + + VoteInstruction::Withdraw(lamports) => { + instruction_context.check_number_of_instruction_accounts(2)?; + let rent_sysvar = invoke_context.get_sysvar_cache().get_rent()?; + let clock_sysvar = invoke_context.get_sysvar_cache().get_clock()?; + + drop(me); + vote_state::withdraw( + transaction_context, + instruction_context, + 0, + lamports, + 1, + &signers, + &rent_sysvar, + &clock_sysvar, + &invoke_context.feature_set, + ) + } + VoteInstruction::AuthorizeChecked(vote_authorize) => { + if invoke_context + .feature_set + .is_active(&feature_set::vote_stake_checked_instructions::id()) + { instruction_context.check_number_of_instruction_accounts(4)?; - let new_authority = transaction_context.get_key_of_account_at_index( + let voter_pubkey = transaction_context.get_key_of_account_at_index( instruction_context.get_index_of_instruction_account_in_transaction(3)?, )?; if !instruction_context.is_instruction_account_signer(3)? { return Err(InstructionError::MissingRequiredSignature); } - process_authorize_with_seed_instruction( - invoke_context, - instruction_context, - transaction_context, - &mut me, - new_authority, - args.authorization_type, - &args.current_authority_derived_key_owner, - args.current_authority_derived_key_seed.as_str(), - ) - } - VoteInstruction::UpdateValidatorIdentity => { - instruction_context.check_number_of_instruction_accounts(2)?; - let node_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(1)?, - )?; - vote_state::update_validator_identity( - &mut me, - node_pubkey, - &signers, - &invoke_context.feature_set, - ) - } - VoteInstruction::UpdateCommission(commission) => { - if invoke_context.feature_set.is_active( - &feature_set::commission_updates_only_allowed_in_first_half_of_epoch::id(), - ) { - let sysvar_cache = invoke_context.get_sysvar_cache(); - let epoch_schedule = sysvar_cache.get_epoch_schedule()?; - let clock = sysvar_cache.get_clock()?; - if !vote_state::is_commission_update_allowed(clock.slot, &epoch_schedule) { - return Err(VoteError::CommissionUpdateTooLate.into()); - } - } - vote_state::update_commission( - &mut me, - commission, - &signers, - &invoke_context.feature_set, - ) - } - VoteInstruction::Vote(vote) | VoteInstruction::VoteSwitch(vote, _) => { - let slot_hashes = get_sysvar_with_account_check::slot_hashes( - invoke_context, - instruction_context, - 1, - )?; let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; - vote_state::process_vote_with_account( - &mut me, - &slot_hashes, - &clock, - &vote, - &signers, - &invoke_context.feature_set, - ) - } - VoteInstruction::UpdateVoteState(vote_state_update) - | VoteInstruction::UpdateVoteStateSwitch(vote_state_update, _) => { - let sysvar_cache = invoke_context.get_sysvar_cache(); - let slot_hashes = sysvar_cache.get_slot_hashes()?; - let clock = sysvar_cache.get_clock()?; - vote_state::process_vote_state_update( + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; + vote_state::authorize( &mut me, - slot_hashes.slot_hashes(), - &clock, - vote_state_update, + voter_pubkey, + vote_authorize, &signers, - &invoke_context.feature_set, - ) - } - VoteInstruction::CompactUpdateVoteState(vote_state_update) - | VoteInstruction::CompactUpdateVoteStateSwitch(vote_state_update, _) => { - let sysvar_cache = invoke_context.get_sysvar_cache(); - let slot_hashes = sysvar_cache.get_slot_hashes()?; - let clock = sysvar_cache.get_clock()?; - vote_state::process_vote_state_update( - &mut me, - slot_hashes.slot_hashes(), &clock, - vote_state_update, - &signers, - &invoke_context.feature_set, - ) - } - - VoteInstruction::Withdraw(lamports) => { - instruction_context.check_number_of_instruction_accounts(2)?; - let rent_sysvar = invoke_context.get_sysvar_cache().get_rent()?; - let clock_sysvar = invoke_context.get_sysvar_cache().get_clock()?; - - drop(me); - vote_state::withdraw( - transaction_context, - instruction_context, - 0, - lamports, - 1, - &signers, - &rent_sysvar, - &clock_sysvar, &invoke_context.feature_set, ) - } - VoteInstruction::AuthorizeChecked(vote_authorize) => { - if invoke_context - .feature_set - .is_active(&feature_set::vote_stake_checked_instructions::id()) - { - instruction_context.check_number_of_instruction_accounts(4)?; - let voter_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(3)?, - )?; - if !instruction_context.is_instruction_account_signer(3)? { - return Err(InstructionError::MissingRequiredSignature); - } - let clock = get_sysvar_with_account_check::clock( - invoke_context, - instruction_context, - 1, - )?; - vote_state::authorize( - &mut me, - voter_pubkey, - vote_authorize, - &signers, - &clock, - &invoke_context.feature_set, - ) - } else { - Err(InstructionError::InvalidInstructionData) - } + } else { + Err(InstructionError::InvalidInstructionData) } } } -); +}); #[cfg(test)] mod tests { @@ -320,7 +309,7 @@ mod tests { transaction_accounts, instruction_accounts, expected_result, - super::process_instruction, + Entrypoint::vm, |_invoke_context| {}, |_invoke_context| {}, ) @@ -339,7 +328,7 @@ mod tests { transaction_accounts, instruction_accounts, expected_result, - super::process_instruction, + Entrypoint::vm, |invoke_context| { invoke_context.feature_set = std::sync::Arc::new(FeatureSet::default()); }, diff --git a/programs/zk-token-proof/src/lib.rs b/programs/zk-token-proof/src/lib.rs index 6ed1fb1f33e17f..3e43c564e70cef 100644 --- a/programs/zk-token-proof/src/lib.rs +++ b/programs/zk-token-proof/src/lib.rs @@ -130,7 +130,7 @@ fn process_close_proof_context(invoke_context: &mut InvokeContext) -> Result<(), Ok(()) } -declare_process_instruction!(process_instruction, 0, |invoke_context| { +declare_process_instruction!(Entrypoint, 0, |invoke_context| { // Consume compute units if feature `native_programs_consume_cu` is activated let native_programs_consume_cu = invoke_context .feature_set diff --git a/runtime/benches/bank.rs b/runtime/benches/bank.rs index b853789ddbc21b..21f4976d695e73 100644 --- a/runtime/benches/bank.rs +++ b/runtime/benches/bank.rs @@ -125,13 +125,13 @@ fn do_bench_transactions( // freeze bank so that slot hashes is populated bank.freeze(); - declare_process_instruction!(process_instruction, 1, |_invoke_context| { + declare_process_instruction!(MockBuiltin, 1, |_invoke_context| { // Do nothing Ok(()) }); let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), 1); - bank.add_mockup_builtin(Pubkey::from(BUILTIN_PROGRAM_ID), process_instruction); + bank.add_mockup_builtin(Pubkey::from(BUILTIN_PROGRAM_ID), MockBuiltin::vm); bank.add_builtin_account("solana_noop_program", &Pubkey::from(NOOP_PROGRAM_ID), false); let bank = Arc::new(bank); let bank_client = BankClient::new_shared(bank.clone()); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 9758210182e918..8402c2f05d641e 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -107,7 +107,7 @@ use { solana_program_runtime::{ accounts_data_meter::MAX_ACCOUNTS_DATA_LEN, compute_budget::{self, ComputeBudget}, - invoke_context::ProcessInstructionWithContext, + invoke_context::BuiltinFunctionWithContext, loaded_programs::{ LoadProgramMetrics, LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, LoadedPrograms, LoadedProgramsForTxBatch, WorkingSlot, DELAY_VISIBILITY_SLOT_OFFSET, @@ -7885,12 +7885,12 @@ impl Bank { pub fn add_mockup_builtin( &mut self, program_id: Pubkey, - entrypoint: ProcessInstructionWithContext, + builtin_function: BuiltinFunctionWithContext, ) { self.add_builtin( program_id, "mockup".to_string(), - LoadedProgram::new_builtin(self.slot, 0, entrypoint), + LoadedProgram::new_builtin(self.slot, 0, builtin_function), ); } diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 82393ef7161a2b..a20c6e37406116 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -653,7 +653,7 @@ fn assert_capitalization_diff( } } -declare_process_instruction!(process_instruction, 1, |_invoke_context| { +declare_process_instruction!(MockBuiltin, 1, |_invoke_context| { // Default for all tests which don't bring their own processor Ok(()) }); @@ -1246,7 +1246,7 @@ fn test_rent_complex() { Deduction, } - declare_process_instruction!(process_instruction, 1, |invoke_context| { + declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let instruction_data = instruction_context.get_instruction_data(); @@ -1281,7 +1281,7 @@ fn test_rent_complex() { root_bank.restore_old_behavior_for_fragile_tests(); let root_bank = Arc::new(root_bank); let mut bank = create_child_bank_for_rent_test(root_bank, &genesis_config); - bank.add_mockup_builtin(mock_program_id, process_instruction); + bank.add_mockup_builtin(mock_program_id, MockBuiltin::vm); assert_eq!(bank.last_blockhash(), genesis_config.hash()); @@ -4684,7 +4684,7 @@ fn test_add_builtin() { fn mock_vote_program_id() -> Pubkey { Pubkey::from([42u8; 32]) } - declare_process_instruction!(process_instruction, 1, |invoke_context| { + declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let program_id = instruction_context.get_last_program_key(transaction_context)?; @@ -4695,7 +4695,7 @@ fn test_add_builtin() { }); assert!(bank.get_account(&mock_vote_program_id()).is_none()); - bank.add_mockup_builtin(mock_vote_program_id(), process_instruction); + bank.add_mockup_builtin(mock_vote_program_id(), MockBuiltin::vm); assert!(bank.get_account(&mock_vote_program_id()).is_some()); let mock_account = Keypair::new(); @@ -4740,7 +4740,7 @@ fn test_add_duplicate_static_program() { } = create_genesis_config_with_leader(500, &solana_sdk::pubkey::new_rand(), 0); let bank = Bank::new_for_tests(&genesis_config); - declare_process_instruction!(process_instruction, 1, |_invoke_context| { + declare_process_instruction!(MockBuiltin, 1, |_invoke_context| { Err(InstructionError::Custom(42)) }); @@ -4771,7 +4771,7 @@ fn test_add_duplicate_static_program() { let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), slot); let vote_loader_account = bank.get_account(&solana_vote_program::id()).unwrap(); - bank.add_mockup_builtin(solana_vote_program::id(), process_instruction); + bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); let new_vote_loader_account = bank.get_account(&solana_vote_program::id()).unwrap(); // Vote loader account should not be updated since it was included in the genesis config. assert_eq!(vote_loader_account.data(), new_vote_loader_account.data()); @@ -4789,7 +4789,7 @@ fn test_add_instruction_processor_for_existing_unrelated_accounts() { for pass in 0..5 { let mut bank = create_simple_test_bank(500); - declare_process_instruction!(process_instruction, 1, |_invoke_context| { + declare_process_instruction!(MockBuiltin, 1, |_invoke_context| { Err(InstructionError::Custom(42)) }); @@ -4825,12 +4825,12 @@ fn test_add_instruction_processor_for_existing_unrelated_accounts() { bank.add_builtin( vote_id, "mock_program1".to_string(), - LoadedProgram::new_builtin(0, 0, process_instruction), + LoadedProgram::new_builtin(0, 0, MockBuiltin::vm), ); bank.add_builtin( stake_id, "mock_program2".to_string(), - LoadedProgram::new_builtin(0, 0, process_instruction), + LoadedProgram::new_builtin(0, 0, MockBuiltin::vm), ); { let stakes = bank.stakes_cache.stakes(); @@ -4854,8 +4854,8 @@ fn test_add_instruction_processor_for_existing_unrelated_accounts() { // Re-adding builtin programs should be no-op bank.update_accounts_hash_for_tests(); let old_hash = bank.get_accounts_hash().unwrap(); - bank.add_mockup_builtin(vote_id, process_instruction); - bank.add_mockup_builtin(stake_id, process_instruction); + bank.add_mockup_builtin(vote_id, MockBuiltin::vm); + bank.add_mockup_builtin(stake_id, MockBuiltin::vm); add_root_and_flush_write_cache(&bank); bank.update_accounts_hash_for_tests(); let new_hash = bank.get_accounts_hash().unwrap(); @@ -6086,7 +6086,7 @@ fn test_transaction_with_duplicate_accounts_in_instruction() { let (genesis_config, mint_keypair) = create_genesis_config(500); let mut bank = Bank::new_for_tests(&genesis_config); - declare_process_instruction!(process_instruction, 1, |invoke_context| { + declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let instruction_data = instruction_context.get_instruction_data(); @@ -6107,7 +6107,7 @@ fn test_transaction_with_duplicate_accounts_in_instruction() { }); let mock_program_id = Pubkey::from([2u8; 32]); - bank.add_mockup_builtin(mock_program_id, process_instruction); + bank.add_mockup_builtin(mock_program_id, MockBuiltin::vm); let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); @@ -6143,7 +6143,7 @@ fn test_transaction_with_program_ids_passed_to_programs() { let mut bank = Bank::new_for_tests(&genesis_config); let mock_program_id = Pubkey::from([2u8; 32]); - bank.add_mockup_builtin(mock_program_id, process_instruction); + bank.add_mockup_builtin(mock_program_id, MockBuiltin::vm); let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); @@ -6198,7 +6198,7 @@ fn test_account_ids_after_program_ids() { let slot = bank.slot().saturating_add(1); let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), slot); - bank.add_mockup_builtin(solana_vote_program::id(), process_instruction); + bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); let result = bank.process_transaction(&tx); assert_eq!(result, Ok(())); let account = bank.get_account(&solana_vote_program::id()).unwrap(); @@ -6248,7 +6248,7 @@ fn test_duplicate_account_key() { AccountMeta::new(to_pubkey, false), ]; - bank.add_mockup_builtin(solana_vote_program::id(), process_instruction); + bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); let instruction = Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas); let mut tx = Transaction::new_signed_with_payer( @@ -6277,7 +6277,7 @@ fn test_process_transaction_with_too_many_account_locks() { AccountMeta::new(to_pubkey, false), ]; - bank.add_mockup_builtin(solana_vote_program::id(), process_instruction); + bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); let instruction = Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas); let mut tx = Transaction::new_signed_with_payer( @@ -6310,7 +6310,7 @@ fn test_program_id_as_payer() { AccountMeta::new(to_pubkey, false), ]; - bank.add_mockup_builtin(solana_vote_program::id(), process_instruction); + bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); let instruction = Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas); let mut tx = Transaction::new_signed_with_payer( @@ -6356,7 +6356,7 @@ fn test_ref_account_key_after_program_id() { let slot = bank.slot().saturating_add(1); let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), slot); - bank.add_mockup_builtin(solana_vote_program::id(), process_instruction); + bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); let instruction = Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas); let mut tx = Transaction::new_signed_with_payer( @@ -6390,7 +6390,7 @@ fn test_fuzz_instructions() { bank.add_builtin( key, name.clone(), - LoadedProgram::new_builtin(0, 0, process_instruction), + LoadedProgram::new_builtin(0, 0, MockBuiltin::vm), ); (key, name.as_bytes().to_vec()) }) @@ -6584,7 +6584,7 @@ fn test_bank_hash_consistency() { #[ignore] #[test] fn test_same_program_id_uses_unique_executable_accounts() { - declare_process_instruction!(process_instruction, 1, |invoke_context| { + declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; instruction_context @@ -6597,7 +6597,7 @@ fn test_same_program_id_uses_unique_executable_accounts() { // Add a new program let program1_pubkey = solana_sdk::pubkey::new_rand(); - bank.add_mockup_builtin(program1_pubkey, process_instruction); + bank.add_mockup_builtin(program1_pubkey, MockBuiltin::vm); // Add a new program owned by the first let program2_pubkey = solana_sdk::pubkey::new_rand(); @@ -6814,13 +6814,13 @@ fn test_add_builtin_no_overwrite() { Arc::get_mut(&mut bank) .unwrap() - .add_mockup_builtin(program_id, process_instruction); + .add_mockup_builtin(program_id, MockBuiltin::vm); assert_eq!(bank.get_account_modified_slot(&program_id).unwrap().1, slot); let mut bank = Arc::new(new_from_parent(bank)); Arc::get_mut(&mut bank) .unwrap() - .add_mockup_builtin(program_id, process_instruction); + .add_mockup_builtin(program_id, MockBuiltin::vm); assert_eq!(bank.get_account_modified_slot(&program_id).unwrap().1, slot); } @@ -6838,13 +6838,13 @@ fn test_add_builtin_loader_no_overwrite() { Arc::get_mut(&mut bank) .unwrap() - .add_mockup_builtin(loader_id, process_instruction); + .add_mockup_builtin(loader_id, MockBuiltin::vm); assert_eq!(bank.get_account_modified_slot(&loader_id).unwrap().1, slot); let mut bank = Arc::new(new_from_parent(bank)); Arc::get_mut(&mut bank) .unwrap() - .add_mockup_builtin(loader_id, process_instruction); + .add_mockup_builtin(loader_id, MockBuiltin::vm); assert_eq!(bank.get_account_modified_slot(&loader_id).unwrap().1, slot); } @@ -7403,7 +7403,7 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len() { ], Vec::new(), Ok(()), - solana_bpf_loader_program::process_instruction, + solana_bpf_loader_program::Entrypoint::vm, |invoke_context| { invoke_context .programs_modified_by_tx @@ -9708,7 +9708,7 @@ fn test_tx_return_data() { ); let mut bank = Bank::new_for_tests(&genesis_config); - declare_process_instruction!(process_instruction, 1, |invoke_context| { + declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let mock_program_id = Pubkey::from([2u8; 32]); let transaction_context = &mut invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; @@ -9726,7 +9726,7 @@ fn test_tx_return_data() { let mock_program_id = Pubkey::from([2u8; 32]); let blockhash = bank.last_blockhash(); - bank.add_mockup_builtin(mock_program_id, process_instruction); + bank.add_mockup_builtin(mock_program_id, MockBuiltin::vm); for index in [ None, @@ -9906,7 +9906,7 @@ fn test_transfer_sysvar() { ); let mut bank = Bank::new_for_tests(&genesis_config); - declare_process_instruction!(process_instruction, 1, |invoke_context| { + declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; instruction_context @@ -9916,7 +9916,7 @@ fn test_transfer_sysvar() { }); let program_id = solana_sdk::pubkey::new_rand(); - bank.add_mockup_builtin(program_id, process_instruction); + bank.add_mockup_builtin(program_id, MockBuiltin::vm); let blockhash = bank.last_blockhash(); #[allow(deprecated)] @@ -10115,7 +10115,7 @@ fn test_compute_budget_program_noop() { ); let mut bank = Bank::new_for_tests(&genesis_config); - declare_process_instruction!(process_instruction, 1, |invoke_context| { + declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let compute_budget = invoke_context.get_compute_budget(); assert_eq!( *compute_budget, @@ -10128,7 +10128,7 @@ fn test_compute_budget_program_noop() { Ok(()) }); let program_id = solana_sdk::pubkey::new_rand(); - bank.add_mockup_builtin(program_id, process_instruction); + bank.add_mockup_builtin(program_id, MockBuiltin::vm); let message = Message::new( &[ @@ -10158,7 +10158,7 @@ fn test_compute_request_instruction() { ); let mut bank = Bank::new_for_tests(&genesis_config); - declare_process_instruction!(process_instruction, 1, |invoke_context| { + declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let compute_budget = invoke_context.get_compute_budget(); assert_eq!( *compute_budget, @@ -10171,7 +10171,7 @@ fn test_compute_request_instruction() { Ok(()) }); let program_id = solana_sdk::pubkey::new_rand(); - bank.add_mockup_builtin(program_id, process_instruction); + bank.add_mockup_builtin(program_id, MockBuiltin::vm); let message = Message::new( &[ @@ -10208,7 +10208,7 @@ fn test_failed_compute_request_instruction() { bank.transfer(10, &mint_keypair, &payer1_keypair.pubkey()) .unwrap(); - declare_process_instruction!(process_instruction, 1, |invoke_context| { + declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let compute_budget = invoke_context.get_compute_budget(); assert_eq!( *compute_budget, @@ -10221,7 +10221,7 @@ fn test_failed_compute_request_instruction() { Ok(()) }); let program_id = solana_sdk::pubkey::new_rand(); - bank.add_mockup_builtin(program_id, process_instruction); + bank.add_mockup_builtin(program_id, MockBuiltin::vm); // This message will not be executed because the compute budget request is invalid let message0 = Message::new( @@ -10825,7 +10825,7 @@ enum MockTransferInstruction { Transfer(u64), } -declare_process_instruction!(mock_transfer_process_instruction, 1, |invoke_context| { +declare_process_instruction!(MockTransferBuiltin, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let instruction_data = instruction_context.get_instruction_data(); @@ -10908,7 +10908,7 @@ fn test_invalid_rent_state_changes_existing_accounts() { ); let mut bank = Bank::new_for_tests(&genesis_config); - bank.add_mockup_builtin(mock_program_id, mock_transfer_process_instruction); + bank.add_mockup_builtin(mock_program_id, MockTransferBuiltin::vm); let recent_blockhash = bank.last_blockhash(); let check_account_is_rent_exempt = |pubkey: &Pubkey| -> bool { @@ -10991,7 +10991,7 @@ fn test_invalid_rent_state_changes_new_accounts() { let rent_exempt_minimum = genesis_config.rent.minimum_balance(account_data_size); let mut bank = Bank::new_for_tests(&genesis_config); - bank.add_mockup_builtin(mock_program_id, mock_transfer_process_instruction); + bank.add_mockup_builtin(mock_program_id, MockTransferBuiltin::vm); let recent_blockhash = bank.last_blockhash(); let check_account_is_rent_exempt = |pubkey: &Pubkey| -> bool { @@ -11050,7 +11050,7 @@ fn test_drained_created_account() { let created_keypair = Keypair::new(); let mut bank = Bank::new_for_tests(&genesis_config); - bank.add_mockup_builtin(mock_program_id, mock_transfer_process_instruction); + bank.add_mockup_builtin(mock_program_id, MockTransferBuiltin::vm); let recent_blockhash = bank.last_blockhash(); // Create and drain a small data size account @@ -11578,7 +11578,7 @@ enum MockReallocInstruction { Realloc(usize, u64, Pubkey), } -declare_process_instruction!(mock_realloc_process_instruction, 1, |invoke_context| { +declare_process_instruction!(MockReallocBuiltin, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let instruction_data = instruction_context.get_instruction_data(); @@ -11658,7 +11658,7 @@ fn test_resize_and_rent() { let mut bank = Bank::new_for_tests(&genesis_config); let mock_program_id = Pubkey::new_unique(); - bank.add_mockup_builtin(mock_program_id, mock_realloc_process_instruction); + bank.add_mockup_builtin(mock_program_id, MockReallocBuiltin::vm); let recent_blockhash = bank.last_blockhash(); let account_data_size_small = 1024; @@ -11929,7 +11929,7 @@ fn test_accounts_data_size_and_resize_transactions() { } = genesis_utils::create_genesis_config(100 * LAMPORTS_PER_SOL); let mut bank = Bank::new_for_tests(&genesis_config); let mock_program_id = Pubkey::new_unique(); - bank.add_mockup_builtin(mock_program_id, mock_realloc_process_instruction); + bank.add_mockup_builtin(mock_program_id, MockReallocBuiltin::vm); let recent_blockhash = bank.last_blockhash(); diff --git a/runtime/src/builtins.rs b/runtime/src/builtins.rs index 06a1709335b1db..2c7c36fa0ec415 100644 --- a/runtime/src/builtins.rs +++ b/runtime/src/builtins.rs @@ -1,5 +1,5 @@ use { - solana_program_runtime::invoke_context::ProcessInstructionWithContext, + solana_program_runtime::invoke_context::BuiltinFunctionWithContext, solana_sdk::{ bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, feature_set, pubkey::Pubkey, }, @@ -10,7 +10,7 @@ pub struct BuiltinPrototype { pub feature_id: Option, pub program_id: Pubkey, pub name: &'static str, - pub entrypoint: ProcessInstructionWithContext, + pub entrypoint: BuiltinFunctionWithContext, } impl std::fmt::Debug for BuiltinPrototype { @@ -27,7 +27,7 @@ impl std::fmt::Debug for BuiltinPrototype { impl solana_frozen_abi::abi_example::AbiExample for BuiltinPrototype { fn example() -> Self { // BuiltinPrototype isn't serializable by definition. - solana_program_runtime::declare_process_instruction!(entrypoint, 0, |_invoke_context| { + solana_program_runtime::declare_process_instruction!(MockBuiltin, 0, |_invoke_context| { // Do nothing Ok(()) }); @@ -35,7 +35,7 @@ impl solana_frozen_abi::abi_example::AbiExample for BuiltinPrototype { feature_id: None, program_id: Pubkey::default(), name: "", - entrypoint, + entrypoint: MockBuiltin::vm, } } } @@ -45,66 +45,66 @@ pub static BUILTINS: &[BuiltinPrototype] = &[ feature_id: None, program_id: solana_system_program::id(), name: "system_program", - entrypoint: solana_system_program::system_processor::process_instruction, + entrypoint: solana_system_program::system_processor::Entrypoint::vm, }, BuiltinPrototype { feature_id: None, program_id: solana_vote_program::id(), name: "vote_program", - entrypoint: solana_vote_program::vote_processor::process_instruction, + entrypoint: solana_vote_program::vote_processor::Entrypoint::vm, }, BuiltinPrototype { feature_id: None, program_id: solana_stake_program::id(), name: "stake_program", - entrypoint: solana_stake_program::stake_instruction::process_instruction, + entrypoint: solana_stake_program::stake_instruction::Entrypoint::vm, }, BuiltinPrototype { feature_id: None, program_id: solana_config_program::id(), name: "config_program", - entrypoint: solana_config_program::config_processor::process_instruction, + entrypoint: solana_config_program::config_processor::Entrypoint::vm, }, BuiltinPrototype { feature_id: None, program_id: bpf_loader_deprecated::id(), name: "solana_bpf_loader_deprecated_program", - entrypoint: solana_bpf_loader_program::process_instruction, + entrypoint: solana_bpf_loader_program::Entrypoint::vm, }, BuiltinPrototype { feature_id: None, program_id: bpf_loader::id(), name: "solana_bpf_loader_program", - entrypoint: solana_bpf_loader_program::process_instruction, + entrypoint: solana_bpf_loader_program::Entrypoint::vm, }, BuiltinPrototype { feature_id: None, program_id: bpf_loader_upgradeable::id(), name: "solana_bpf_loader_upgradeable_program", - entrypoint: solana_bpf_loader_program::process_instruction, + entrypoint: solana_bpf_loader_program::Entrypoint::vm, }, BuiltinPrototype { feature_id: None, program_id: solana_sdk::compute_budget::id(), name: "compute_budget_program", - entrypoint: solana_compute_budget_program::process_instruction, + entrypoint: solana_compute_budget_program::Entrypoint::vm, }, BuiltinPrototype { feature_id: None, program_id: solana_sdk::address_lookup_table::program::id(), name: "address_lookup_table_program", - entrypoint: solana_address_lookup_table_program::processor::process_instruction, + entrypoint: solana_address_lookup_table_program::processor::Entrypoint::vm, }, BuiltinPrototype { feature_id: Some(feature_set::zk_token_sdk_enabled::id()), program_id: solana_zk_token_sdk::zk_token_proof_program::id(), name: "zk_token_proof_program", - entrypoint: solana_zk_token_proof_program::process_instruction, + entrypoint: solana_zk_token_proof_program::Entrypoint::vm, }, BuiltinPrototype { feature_id: Some(feature_set::enable_program_runtime_v2_and_loader_v4::id()), program_id: solana_sdk::loader_v4::id(), name: "loader_v4", - entrypoint: solana_loader_v4_program::process_instruction, + entrypoint: solana_loader_v4_program::Entrypoint::vm, }, ]; From dd2b1bb5a00ad9385aa16845148a7d32f68ca3c7 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Fri, 20 Oct 2023 13:02:37 -0700 Subject: [PATCH 399/407] [zk-token-sdk] Limit max seed length for key derivations (#33700) * limit max seed length for elgamal keypairs * limit max seed length for authenticated encryption keys * Apply suggestions from code review Co-authored-by: Jon Cinque * rename `SeedLengthTooLarge` to `SeedLengthTooLong` --------- Co-authored-by: Jon Cinque --- zk-token-sdk/src/encryption/auth_encryption.rs | 18 ++++++++++++++++++ zk-token-sdk/src/encryption/elgamal.rs | 9 +++++++++ 2 files changed, 27 insertions(+) diff --git a/zk-token-sdk/src/encryption/auth_encryption.rs b/zk-token-sdk/src/encryption/auth_encryption.rs index 4445a40dfc1689..046f529ca4e634 100644 --- a/zk-token-sdk/src/encryption/auth_encryption.rs +++ b/zk-token-sdk/src/encryption/auth_encryption.rs @@ -50,6 +50,8 @@ pub enum AuthenticatedEncryptionError { DerivationMethodNotSupported, #[error("seed length too short for derivation")] SeedLengthTooShort, + #[error("seed length too long for derivation")] + SeedLengthTooLong, } struct AuthenticatedEncryption; @@ -172,10 +174,14 @@ impl EncodableKey for AeKey { impl SeedDerivable for AeKey { fn from_seed(seed: &[u8]) -> Result> { const MINIMUM_SEED_LEN: usize = AE_KEY_LEN; + const MAXIMUM_SEED_LEN: usize = 65535; if seed.len() < MINIMUM_SEED_LEN { return Err(AuthenticatedEncryptionError::SeedLengthTooShort.into()); } + if seed.len() > MAXIMUM_SEED_LEN { + return Err(AuthenticatedEncryptionError::SeedLengthTooLong.into()); + } let mut hasher = Sha3_512::new(); hasher.update(seed); @@ -278,4 +284,16 @@ mod tests { let null_signer = NullSigner::new(&Pubkey::default()); assert!(AeKey::new_from_signer(&null_signer, Pubkey::default().as_ref()).is_err()); } + + #[test] + fn test_aes_key_from_seed() { + let good_seed = vec![0; 32]; + assert!(AeKey::from_seed(&good_seed).is_ok()); + + let too_short_seed = vec![0; 15]; + assert!(AeKey::from_seed(&too_short_seed).is_err()); + + let too_long_seed = vec![0; 65536]; + assert!(AeKey::from_seed(&too_long_seed).is_err()); + } } diff --git a/zk-token-sdk/src/encryption/elgamal.rs b/zk-token-sdk/src/encryption/elgamal.rs index c57a10b740024e..bee5cb39c307ec 100644 --- a/zk-token-sdk/src/encryption/elgamal.rs +++ b/zk-token-sdk/src/encryption/elgamal.rs @@ -76,6 +76,8 @@ pub enum ElGamalError { DerivationMethodNotSupported, #[error("seed length too short for derivation")] SeedLengthTooShort, + #[error("seed length too long for derivation")] + SeedLengthTooLong, } /// Algorithm handle for the twisted ElGamal encryption scheme @@ -449,10 +451,14 @@ impl ElGamalSecretKey { /// Derive an ElGamal secret key from an entropy seed. pub fn from_seed(seed: &[u8]) -> Result { const MINIMUM_SEED_LEN: usize = ELGAMAL_SECRET_KEY_LEN; + const MAXIMUM_SEED_LEN: usize = 65535; if seed.len() < MINIMUM_SEED_LEN { return Err(ElGamalError::SeedLengthTooShort); } + if seed.len() > MAXIMUM_SEED_LEN { + return Err(ElGamalError::SeedLengthTooLong); + } Ok(ElGamalSecretKey(Scalar::hash_from_bytes::(seed))) } @@ -1026,6 +1032,9 @@ mod tests { let too_short_seed = vec![0; 31]; assert!(ElGamalKeypair::from_seed(&too_short_seed).is_err()); + + let too_long_seed = vec![0; 65536]; + assert!(ElGamalKeypair::from_seed(&too_long_seed).is_err()); } #[test] From e0b59a6f5351d889f9d1a2538d4607a7d340d6a9 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Fri, 20 Oct 2023 21:52:37 +0000 Subject: [PATCH 400/407] prunes turbine QUIC connections (#33663) The commit implements lazy eviction for turbine QUIC connections. The cache is allowed to grow to 2 x capacity at which point at least half of the entries with lowest stake are evicted, resulting in an amortized O(1) performance. --- core/src/validator.rs | 1 + turbine/src/quic_endpoint.rs | 158 ++++++++++++++++++++++++++++++++--- 2 files changed, 147 insertions(+), 12 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index 16429049d0ef53..41d7cf8c945eab 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1184,6 +1184,7 @@ impl Validator { .expect("Operator must spin up node with valid QUIC TVU address") .ip(), turbine_quic_endpoint_sender, + bank_forks.clone(), ) .unwrap(); diff --git a/turbine/src/quic_endpoint.rs b/turbine/src/quic_endpoint.rs index 9be1dd11294c62..0f362fd1a34c69 100644 --- a/turbine/src/quic_endpoint.rs +++ b/turbine/src/quic_endpoint.rs @@ -10,15 +10,20 @@ use { rcgen::RcgenError, rustls::{Certificate, PrivateKey}, solana_quic_client::nonblocking::quic_client::SkipServerVerification, + solana_runtime::bank_forks::BankForks, solana_sdk::{pubkey::Pubkey, signature::Keypair}, solana_streamer::{ quic::SkipClientVerification, tls_certificates::new_self_signed_tls_certificate, }, std::{ + cmp::Reverse, collections::{hash_map::Entry, HashMap}, io::Error as IoError, net::{IpAddr, SocketAddr, UdpSocket}, - sync::Arc, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, RwLock, + }, }, thiserror::Error, tokio::{ @@ -32,6 +37,7 @@ use { const CLIENT_CHANNEL_BUFFER: usize = 1 << 14; const ROUTER_CHANNEL_BUFFER: usize = 64; +const CONNECTION_CACHE_CAPACITY: usize = 3072; const INITIAL_MAXIMUM_TRANSMISSION_UNIT: u16 = 1280; const ALPN_TURBINE_PROTOCOL_ID: &[u8] = b"solana-turbine"; const CONNECT_SERVER_NAME: &str = "solana-turbine"; @@ -40,11 +46,13 @@ const CONNECTION_CLOSE_ERROR_CODE_SHUTDOWN: VarInt = VarInt::from_u32(1); const CONNECTION_CLOSE_ERROR_CODE_DROPPED: VarInt = VarInt::from_u32(2); const CONNECTION_CLOSE_ERROR_CODE_INVALID_IDENTITY: VarInt = VarInt::from_u32(3); const CONNECTION_CLOSE_ERROR_CODE_REPLACED: VarInt = VarInt::from_u32(4); +const CONNECTION_CLOSE_ERROR_CODE_PRUNED: VarInt = VarInt::from_u32(5); const CONNECTION_CLOSE_REASON_SHUTDOWN: &[u8] = b"SHUTDOWN"; const CONNECTION_CLOSE_REASON_DROPPED: &[u8] = b"DROPPED"; const CONNECTION_CLOSE_REASON_INVALID_IDENTITY: &[u8] = b"INVALID_IDENTITY"; const CONNECTION_CLOSE_REASON_REPLACED: &[u8] = b"REPLACED"; +const CONNECTION_CLOSE_REASON_PRUNED: &[u8] = b"PRUNED"; pub type AsyncTryJoinHandle = TryJoin, JoinHandle<()>>; @@ -75,6 +83,7 @@ pub fn new_quic_endpoint( socket: UdpSocket, address: IpAddr, sender: Sender<(Pubkey, SocketAddr, Bytes)>, + bank_forks: Arc>, ) -> Result< ( Endpoint, @@ -98,12 +107,15 @@ pub fn new_quic_endpoint( )? }; endpoint.set_default_client_config(client_config); + let prune_cache_pending = Arc::::default(); let cache = Arc::>>::default(); let router = Arc::>>>::default(); let (client_sender, client_receiver) = tokio::sync::mpsc::channel(CLIENT_CHANNEL_BUFFER); let server_task = runtime.spawn(run_server( endpoint.clone(), sender.clone(), + bank_forks.clone(), + prune_cache_pending.clone(), router.clone(), cache.clone(), )); @@ -111,6 +123,8 @@ pub fn new_quic_endpoint( endpoint.clone(), client_receiver, sender, + bank_forks, + prune_cache_pending, router, cache, )); @@ -163,6 +177,8 @@ fn new_transport_config() -> TransportConfig { async fn run_server( endpoint: Endpoint, sender: Sender<(Pubkey, SocketAddr, Bytes)>, + bank_forks: Arc>, + prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, ) { @@ -171,6 +187,8 @@ async fn run_server( endpoint.clone(), connecting, sender.clone(), + bank_forks.clone(), + prune_cache_pending.clone(), router.clone(), cache.clone(), )); @@ -181,6 +199,8 @@ async fn run_client( endpoint: Endpoint, mut receiver: AsyncReceiver<(SocketAddr, Bytes)>, sender: Sender<(Pubkey, SocketAddr, Bytes)>, + bank_forks: Arc>, + prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, ) { @@ -203,6 +223,8 @@ async fn run_client( remote_address, sender.clone(), receiver, + bank_forks.clone(), + prune_cache_pending.clone(), router.clone(), cache.clone(), )); @@ -234,10 +256,22 @@ async fn handle_connecting_error( endpoint: Endpoint, connecting: Connecting, sender: Sender<(Pubkey, SocketAddr, Bytes)>, + bank_forks: Arc>, + prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, ) { - if let Err(err) = handle_connecting(endpoint, connecting, sender, router, cache).await { + if let Err(err) = handle_connecting( + endpoint, + connecting, + sender, + bank_forks, + prune_cache_pending, + router, + cache, + ) + .await + { error!("handle_connecting: {err:?}"); } } @@ -246,6 +280,8 @@ async fn handle_connecting( endpoint: Endpoint, connecting: Connecting, sender: Sender<(Pubkey, SocketAddr, Bytes)>, + bank_forks: Arc>, + prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, ) -> Result<(), Error> { @@ -264,6 +300,8 @@ async fn handle_connecting( connection, sender, receiver, + bank_forks, + prune_cache_pending, router, cache, ) @@ -271,6 +309,7 @@ async fn handle_connecting( Ok(()) } +#[allow(clippy::too_many_arguments)] async fn handle_connection( endpoint: Endpoint, remote_address: SocketAddr, @@ -278,10 +317,20 @@ async fn handle_connection( connection: Connection, sender: Sender<(Pubkey, SocketAddr, Bytes)>, receiver: AsyncReceiver, + bank_forks: Arc>, + prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, ) { - cache_connection(remote_pubkey, connection.clone(), &cache).await; + cache_connection( + remote_pubkey, + connection.clone(), + bank_forks, + prune_cache_pending, + router.clone(), + cache.clone(), + ) + .await; let send_datagram_task = tokio::task::spawn(send_datagram_task(connection.clone(), receiver)); let read_datagram_task = tokio::task::spawn(read_datagram_task( endpoint, @@ -351,11 +400,22 @@ async fn make_connection_task( remote_address: SocketAddr, sender: Sender<(Pubkey, SocketAddr, Bytes)>, receiver: AsyncReceiver, + bank_forks: Arc>, + prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, ) { - if let Err(err) = - make_connection(endpoint, remote_address, sender, receiver, router, cache).await + if let Err(err) = make_connection( + endpoint, + remote_address, + sender, + receiver, + bank_forks, + prune_cache_pending, + router, + cache, + ) + .await { error!("make_connection: {remote_address}, {err:?}"); } @@ -366,6 +426,8 @@ async fn make_connection( remote_address: SocketAddr, sender: Sender<(Pubkey, SocketAddr, Bytes)>, receiver: AsyncReceiver, + bank_forks: Arc>, + prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, ) -> Result<(), Error> { @@ -379,6 +441,8 @@ async fn make_connection( connection, sender, receiver, + bank_forks, + prune_cache_pending, router, cache, ) @@ -402,15 +466,32 @@ fn get_remote_pubkey(connection: &Connection) -> Result { async fn cache_connection( remote_pubkey: Pubkey, connection: Connection, - cache: &Mutex>, + bank_forks: Arc>, + prune_cache_pending: Arc, + router: Arc>>>, + cache: Arc>>, ) { - let Some(old) = cache.lock().await.insert(remote_pubkey, connection) else { - return; + let (old, should_prune_cache) = { + let mut cache = cache.lock().await; + ( + cache.insert(remote_pubkey, connection), + cache.len() >= CONNECTION_CACHE_CAPACITY.saturating_mul(2), + ) }; - old.close( - CONNECTION_CLOSE_ERROR_CODE_REPLACED, - CONNECTION_CLOSE_REASON_REPLACED, - ); + if let Some(old) = old { + old.close( + CONNECTION_CLOSE_ERROR_CODE_REPLACED, + CONNECTION_CLOSE_REASON_REPLACED, + ); + } + if should_prune_cache && !prune_cache_pending.swap(true, Ordering::Relaxed) { + tokio::task::spawn(prune_connection_cache( + bank_forks, + prune_cache_pending, + router, + cache, + )); + } } async fn drop_connection( @@ -429,6 +510,50 @@ async fn drop_connection( } } +async fn prune_connection_cache( + bank_forks: Arc>, + prune_cache_pending: Arc, + router: Arc>>>, + cache: Arc>>, +) { + debug_assert!(prune_cache_pending.load(Ordering::Relaxed)); + let staked_nodes = { + let root_bank = bank_forks.read().unwrap().root_bank(); + root_bank.staked_nodes() + }; + { + let mut cache = cache.lock().await; + if cache.len() < CONNECTION_CACHE_CAPACITY.saturating_mul(2) { + prune_cache_pending.store(false, Ordering::Relaxed); + return; + } + let mut connections: Vec<_> = cache + .drain() + .filter(|(_, connection)| connection.close_reason().is_none()) + .map(|entry @ (pubkey, _)| { + let stake = staked_nodes.get(&pubkey).copied().unwrap_or_default(); + (stake, entry) + }) + .collect(); + connections + .select_nth_unstable_by_key(CONNECTION_CACHE_CAPACITY, |&(stake, _)| Reverse(stake)); + for (_, (_, connection)) in &connections[CONNECTION_CACHE_CAPACITY..] { + connection.close( + CONNECTION_CLOSE_ERROR_CODE_PRUNED, + CONNECTION_CLOSE_REASON_PRUNED, + ); + } + cache.extend( + connections + .into_iter() + .take(CONNECTION_CACHE_CAPACITY) + .map(|(_, entry)| entry), + ); + prune_cache_pending.store(false, Ordering::Relaxed); + } + router.write().await.retain(|_, sender| !sender.is_closed()); +} + impl From> for Error { fn from(_: crossbeam_channel::SendError) -> Self { Error::ChannelSendError @@ -440,6 +565,8 @@ mod tests { use { super::*, itertools::{izip, multiunzip}, + solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}, + solana_runtime::bank::Bank, solana_sdk::signature::Signer, std::{iter::repeat_with, net::Ipv4Addr, time::Duration}, }; @@ -467,6 +594,12 @@ mod tests { repeat_with(crossbeam_channel::unbounded::<(Pubkey, SocketAddr, Bytes)>) .take(NUM_ENDPOINTS) .unzip(); + let bank_forks = { + let GenesisConfigInfo { genesis_config, .. } = + create_genesis_config(/*mint_lamports:*/ 100_000); + let bank = Bank::new_for_tests(&genesis_config); + Arc::new(RwLock::new(BankForks::new(bank))) + }; let (endpoints, senders, tasks): (Vec<_>, Vec<_>, Vec<_>) = multiunzip(keypairs.iter().zip(sockets).zip(senders).map( |((keypair, socket), sender)| { @@ -476,6 +609,7 @@ mod tests { socket, IpAddr::V4(Ipv4Addr::LOCALHOST), sender, + bank_forks.clone(), ) .unwrap() }, From 6fd0dcb3eb7da9b0f4becd7fdc8a9624034c41fa Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Fri, 20 Oct 2023 15:41:24 -0700 Subject: [PATCH 401/407] Add support for fetching program in cargo registry (#33759) * Rename publisher.rs to crate_handler.rs * support for fetching program in cargo registry --- cargo-registry/src/crate_handler.rs | 356 ++++++++++++++++++++++++++++ cargo-registry/src/main.rs | 40 +++- cargo-registry/src/publisher.rs | 173 -------------- cargo-registry/src/sparse_index.rs | 29 ++- cli/src/program_v4.rs | 2 +- 5 files changed, 410 insertions(+), 190 deletions(-) create mode 100644 cargo-registry/src/crate_handler.rs delete mode 100644 cargo-registry/src/publisher.rs diff --git a/cargo-registry/src/crate_handler.rs b/cargo-registry/src/crate_handler.rs new file mode 100644 index 00000000000000..c55ab4ff07395f --- /dev/null +++ b/cargo-registry/src/crate_handler.rs @@ -0,0 +1,356 @@ +use { + crate::{ + client::{Client, RPCCommandConfig}, + sparse_index::{IndexEntry, RegistryIndex}, + }, + flate2::{ + read::{GzDecoder, GzEncoder}, + Compression, + }, + hyper::body::Bytes, + log::*, + serde::{Deserialize, Serialize}, + serde_json::from_slice, + sha2::{Digest, Sha256}, + solana_cli::program_v4::{process_deploy_program, process_dump, read_and_verify_elf}, + solana_sdk::{ + pubkey::Pubkey, + signature::{Keypair, Signer}, + signer::EncodableKey, + }, + std::{ + collections::BTreeMap, + fs, + io::{Cursor, Read}, + mem::size_of, + ops::Deref, + path::{Path, PathBuf}, + str::FromStr, + sync::Arc, + }, + tar::{Archive, Builder}, + tempfile::{tempdir, TempDir}, +}; + +pub(crate) type Error = Box; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(rename_all = "lowercase")] +pub(crate) enum DependencyType { + Dev, + Build, + Normal, +} + +#[allow(dead_code)] +#[derive(Clone, Debug, Deserialize, Serialize)] +pub(crate) struct Dependency { + pub name: String, + pub version_req: String, + pub features: Vec, + pub optional: bool, + pub default_features: bool, + pub target: Option, + pub kind: DependencyType, + pub registry: Option, + pub explicit_name_in_toml: Option, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[allow(unused)] +pub(crate) struct PackageMetaData { + pub name: String, + pub vers: String, + pub deps: Vec, + pub features: BTreeMap>, + pub authors: Vec, + pub description: Option, + pub documentation: Option, + pub homepage: Option, + pub readme: Option, + pub readme_file: Option, + pub keywords: Vec, + pub categories: Vec, + pub license: Option, + pub license_file: Option, + pub repository: Option, + pub badges: BTreeMap>, + pub links: Option, + pub rust_version: Option, +} + +impl PackageMetaData { + fn new(bytes: &Bytes) -> serde_json::Result<(PackageMetaData, usize)> { + let (json_length, sizeof_length) = Self::read_u32_length(bytes)?; + let end_of_meta_data = sizeof_length.saturating_add(json_length as usize); + let json_body = bytes.slice(sizeof_length..end_of_meta_data); + from_slice::(json_body.deref()).map(|data| (data, end_of_meta_data)) + } + + fn read_u32_length(bytes: &Bytes) -> serde_json::Result<(u32, usize)> { + let sizeof_length = size_of::(); + let length_le = bytes.slice(0..sizeof_length); + let length = + u32::from_le_bytes(length_le.deref().try_into().expect("Failed to read length")); + Ok((length, sizeof_length)) + } +} + +pub(crate) struct Program { + path: String, + id: Pubkey, + _tempdir: Arc, +} + +impl Program { + fn deploy(&self, client: Arc, signer: &dyn Signer) -> Result<(), Error> { + if self.id != signer.pubkey() { + return Err("Signer doesn't match program ID".into()); + } + + let program_data = read_and_verify_elf(self.path.as_ref()) + .map_err(|e| format!("failed to read the program: {}", e))?; + + let command_config = RPCCommandConfig::new(client.as_ref()); + + process_deploy_program( + client.rpc_client.clone(), + &command_config.0, + &program_data, + program_data.len() as u32, + &signer.pubkey(), + Some(signer), + ) + .map_err(|e| { + error!("Failed to deploy the program: {}", e); + format!("Failed to deploy the program: {}", e) + })?; + + Ok(()) + } + + fn dump(&self, client: Arc) -> Result<(), Error> { + info!("Fetching program {:?}", self.id); + let command_config = RPCCommandConfig::new(client.as_ref()); + + process_dump( + client.rpc_client.clone(), + command_config.0.commitment, + Some(self.id), + &self.path, + ) + .map_err(|e| { + error!("Failed to fetch the program: {}", e); + format!("Failed to fetch the program: {}", e) + })?; + + Ok(()) + } + + pub(crate) fn crate_name_to_program_id(crate_name: &str) -> Option { + crate_name + .split_once('-') + .and_then(|(_prefix, id_str)| Pubkey::from_str(id_str).ok()) + } +} + +impl From<&UnpackedCrate> for Program { + fn from(value: &UnpackedCrate) -> Self { + Self { + path: value.program_path.clone(), + id: value.program_id, + _tempdir: value.tempdir.clone(), + } + } +} + +pub(crate) struct CratePackage(pub(crate) Bytes); + +impl From for Result { + fn from(value: UnpackedCrate) -> Self { + let mut archive = Builder::new(Vec::new()); + archive.append_dir_all(".", value.tempdir.path())?; + let data = archive.into_inner()?; + let reader = Cursor::new(data); + let mut encoder = GzEncoder::new(reader, Compression::fast()); + let mut zipped_data = Vec::new(); + encoder.read_to_end(&mut zipped_data)?; + + let meta_str = serde_json::to_string(&value.meta)?; + + let sizeof_length = size_of::(); + let mut packed = Vec::with_capacity( + sizeof_length + .saturating_add(meta_str.len()) + .saturating_add(sizeof_length) + .saturating_add(zipped_data.len()), + ); + + packed[..sizeof_length].copy_from_slice(&u32::to_le_bytes(meta_str.len() as u32)); + let offset = sizeof_length; + let end = offset.saturating_add(meta_str.len()); + packed[offset..end].copy_from_slice(meta_str.as_bytes()); + let offset = end; + let end = offset.saturating_add(sizeof_length); + packed[offset..end].copy_from_slice(&u32::to_le_bytes(zipped_data.len() as u32)); + let offset = end; + packed[offset..].copy_from_slice(&zipped_data); + + Ok(CratePackage(Bytes::from(packed))) + } +} + +pub(crate) struct UnpackedCrate { + meta: PackageMetaData, + cksum: String, + tempdir: Arc, + program_path: String, + program_id: Pubkey, + keypair: Option, +} + +impl From for Result { + fn from(value: CratePackage) -> Self { + let bytes = value.0; + let (meta, offset) = PackageMetaData::new(&bytes)?; + + let (_crate_file_length, length_size) = + PackageMetaData::read_u32_length(&bytes.slice(offset..))?; + let crate_bytes = bytes.slice(offset.saturating_add(length_size)..); + let cksum = format!("{:x}", Sha256::digest(&crate_bytes)); + + let decoder = GzDecoder::new(crate_bytes.as_ref()); + let mut archive = Archive::new(decoder); + + let tempdir = tempdir()?; + archive.unpack(tempdir.path())?; + + let lib_name = UnpackedCrate::program_library_name(&tempdir, &meta)?; + + let program_path = + UnpackedCrate::make_path(&tempdir, &meta, format!("out/{}.so", lib_name)) + .into_os_string() + .into_string() + .map_err(|_| "Failed to get program file path")?; + + let keypair = Keypair::read_from_file(UnpackedCrate::make_path( + &tempdir, + &meta, + format!("out/{}-keypair.json", lib_name), + )) + .map_err(|e| format!("Failed to get keypair from the file: {}", e))?; + + Ok(UnpackedCrate { + meta, + cksum, + tempdir: Arc::new(tempdir), + program_path, + program_id: keypair.pubkey(), + keypair: Some(keypair), + }) + } +} + +impl UnpackedCrate { + pub(crate) fn publish( + &self, + client: Arc, + index: Arc, + ) -> Result<(), Error> { + let Some(signer) = &self.keypair else { + return Err("No signer provided for the program deployment".into()); + }; + + Program::from(self).deploy(client, signer)?; + + let mut entry: IndexEntry = self.meta.clone().into(); + entry.cksum = self.cksum.clone(); + index.insert_entry(entry)?; + + info!("Successfully deployed the program"); + Ok(()) + } + + pub(crate) fn fetch_index(id: Pubkey, client: Arc) -> Result { + let (_program, unpacked_crate) = Self::fetch_program(id, client)?; + + let mut entry: IndexEntry = unpacked_crate.meta.clone().into(); + entry.cksum = unpacked_crate.cksum.clone(); + + Ok(entry) + } + + #[allow(dead_code)] + pub(crate) fn fetch(id: Pubkey, client: Arc) -> Result { + let (_program, unpacked_crate) = Self::fetch_program(id, client)?; + UnpackedCrate::into(unpacked_crate) + } + + fn fetch_program(id: Pubkey, client: Arc) -> Result<(Program, UnpackedCrate), Error> { + let crate_obj = Self::new_empty(id)?; + let program = Program::from(&crate_obj); + program.dump(client)?; + + // Decompile the program + // Generate a Cargo.toml + + Ok((program, crate_obj)) + } + + fn new_empty(id: Pubkey) -> Result { + let meta = PackageMetaData { + name: id.to_string(), + vers: "0.1".to_string(), + deps: vec![], + features: BTreeMap::new(), + authors: vec![], + description: None, + documentation: None, + homepage: None, + readme: None, + readme_file: None, + keywords: vec![], + categories: vec![], + license: None, + license_file: None, + repository: None, + badges: BTreeMap::new(), + links: None, + rust_version: None, + }; + + let tempdir = tempdir()?; + + let program_path = Self::make_path(&tempdir, &meta, format!("out/{}.so", id)) + .into_os_string() + .into_string() + .map_err(|_| "Failed to get program file path")?; + + Ok(Self { + meta, + cksum: "".to_string(), + tempdir: Arc::new(tempdir), + program_path, + program_id: id, + keypair: None, + }) + } + + fn make_path>(tempdir: &TempDir, meta: &PackageMetaData, append: P) -> PathBuf { + let mut path = tempdir.path().to_path_buf(); + path.push(format!("{}-{}/", meta.name, meta.vers)); + path.push(append); + path + } + + fn program_library_name(tempdir: &TempDir, meta: &PackageMetaData) -> Result { + let toml_content = fs::read_to_string(Self::make_path(tempdir, meta, "Cargo.toml.orig"))?; + let toml = toml_content.parse::()?; + let library_name = toml + .get("lib") + .and_then(|v| v.get("name")) + .and_then(|v| v.as_str()) + .ok_or("Failed to get module name")?; + Ok(library_name.to_string()) + } +} diff --git a/cargo-registry/src/main.rs b/cargo-registry/src/main.rs index 419e8cf434202d..073b8e42cb609c 100644 --- a/cargo-registry/src/main.rs +++ b/cargo-registry/src/main.rs @@ -2,7 +2,7 @@ use { crate::{ client::Client, - publisher::{Error, Publisher}, + crate_handler::{CratePackage, Error, Program, UnpackedCrate}, sparse_index::RegistryIndex, }, hyper::{ @@ -18,7 +18,7 @@ use { }; mod client; -mod publisher; +mod crate_handler; mod response_builder; mod sparse_index; @@ -38,10 +38,14 @@ impl CargoRegistryService { match bytes { Ok(data) => { - let Ok(result) = tokio::task::spawn_blocking(move || { - Publisher::publish_crate(data, client, index) - }) - .await + let Ok(crate_object) = CratePackage(data).into() else { + return response_builder::error_response( + hyper::StatusCode::INTERNAL_SERVER_ERROR, + "Failed to parse the crate information", + ); + }; + let Ok(result) = + tokio::task::spawn_blocking(move || crate_object.publish(client, index)).await else { return response_builder::error_response( hyper::StatusCode::INTERNAL_SERVER_ERROR, @@ -74,6 +78,27 @@ impl CargoRegistryService { }) } + fn handle_download_crate_request( + path: &str, + _request: &hyper::Request, + client: Arc, + ) -> hyper::Response { + let Some((path, crate_name, _version)) = Self::get_crate_name_and_version(path) else { + return response_builder::error_in_parsing(); + }; + + if path.len() != PATH_PREFIX.len() { + return response_builder::error_incorrect_length(); + } + + let _package = Program::crate_name_to_program_id(crate_name) + .and_then(|id| UnpackedCrate::fetch(id, client).ok()); + + // Return the package to the caller in the response + + response_builder::error_not_implemented() + } + fn handle_yank_request( path: &str, _request: &hyper::Request, @@ -183,7 +208,7 @@ impl CargoRegistryService { } if path.starts_with(index.index_root.as_str()) { - return Ok(index.handler(request)); + return Ok(index.handler(request, client.clone())); } if !path.starts_with(PATH_PREFIX) { @@ -216,6 +241,7 @@ impl CargoRegistryService { Method::GET => match endpoint { "crates" => Self::handle_get_crates_request(path, &request), "owners" => Self::handle_get_owners_request(path, &request), + "download" => Self::handle_download_crate_request(path, &request, client.clone()), _ => response_builder::error_not_allowed(), }, Method::DELETE => match endpoint { diff --git a/cargo-registry/src/publisher.rs b/cargo-registry/src/publisher.rs deleted file mode 100644 index ea4c74a7251b67..00000000000000 --- a/cargo-registry/src/publisher.rs +++ /dev/null @@ -1,173 +0,0 @@ -use { - crate::{ - client::{Client, RPCCommandConfig}, - sparse_index::{IndexEntry, RegistryIndex}, - }, - flate2::read::GzDecoder, - hyper::body::Bytes, - log::*, - serde::{Deserialize, Serialize}, - serde_json::from_slice, - sha2::{Digest, Sha256}, - solana_cli::program_v4::{process_deploy_program, read_and_verify_elf}, - solana_sdk::{ - signature::{Keypair, Signer}, - signer::EncodableKey, - }, - std::{ - collections::BTreeMap, - fs, - mem::size_of, - ops::Deref, - path::{Path, PathBuf}, - sync::Arc, - }, - tar::Archive, - tempfile::{tempdir, TempDir}, -}; - -pub(crate) type Error = Box; - -#[derive(Debug, Deserialize, Serialize)] -#[serde(rename_all = "lowercase")] -pub(crate) enum DependencyType { - Dev, - Build, - Normal, -} - -#[allow(dead_code)] -#[derive(Debug, Deserialize)] -pub(crate) struct Dependency { - pub name: String, - pub version_req: String, - pub features: Vec, - pub optional: bool, - pub default_features: bool, - pub target: Option, - pub kind: DependencyType, - pub registry: Option, - pub explicit_name_in_toml: Option, -} - -#[derive(Debug, Deserialize)] -#[allow(unused)] -pub(crate) struct PackageMetaData { - pub name: String, - pub vers: String, - pub deps: Vec, - pub features: BTreeMap>, - pub authors: Vec, - pub description: Option, - pub documentation: Option, - pub homepage: Option, - pub readme: Option, - pub readme_file: Option, - pub keywords: Vec, - pub categories: Vec, - pub license: Option, - pub license_file: Option, - pub repository: Option, - pub badges: BTreeMap>, - pub links: Option, - pub rust_version: Option, -} - -impl PackageMetaData { - fn new(bytes: &Bytes) -> serde_json::Result<(PackageMetaData, usize)> { - let (json_length, sizeof_length) = Self::read_u32_length(bytes)?; - let end_of_meta_data = sizeof_length.saturating_add(json_length as usize); - let json_body = bytes.slice(sizeof_length..end_of_meta_data); - from_slice::(json_body.deref()).map(|data| (data, end_of_meta_data)) - } - - fn read_u32_length(bytes: &Bytes) -> serde_json::Result<(u32, usize)> { - let sizeof_length = size_of::(); - let length_le = bytes.slice(0..sizeof_length); - let length = - u32::from_le_bytes(length_le.deref().try_into().expect("Failed to read length")); - Ok((length, sizeof_length)) - } -} - -pub(crate) struct Publisher {} - -impl Publisher { - fn make_path>(tempdir: &TempDir, meta: &PackageMetaData, append: P) -> PathBuf { - let mut path = tempdir.path().to_path_buf(); - path.push(format!("{}-{}/", meta.name, meta.vers)); - path.push(append); - path - } - - fn program_library_name(tempdir: &TempDir, meta: &PackageMetaData) -> Result { - let toml_content = fs::read_to_string(Self::make_path(tempdir, meta, "Cargo.toml.orig"))?; - let toml = toml_content.parse::()?; - let library_name = toml - .get("lib") - .and_then(|v| v.get("name")) - .and_then(|v| v.as_str()) - .ok_or("Failed to get module name")?; - Ok(library_name.to_string()) - } - - pub(crate) fn publish_crate( - bytes: Bytes, - client: Arc, - index: Arc, - ) -> Result<(), Error> { - let (meta_data, offset) = PackageMetaData::new(&bytes)?; - - let (_crate_file_length, length_size) = - PackageMetaData::read_u32_length(&bytes.slice(offset..))?; - let crate_bytes = bytes.slice(offset.saturating_add(length_size)..); - let crate_cksum = format!("{:x}", Sha256::digest(&crate_bytes)); - - let decoder = GzDecoder::new(crate_bytes.as_ref()); - let mut archive = Archive::new(decoder); - - let tempdir = tempdir()?; - archive.unpack(tempdir.path())?; - - let command_config = RPCCommandConfig::new(client.as_ref()); - - let lib_name = Self::program_library_name(&tempdir, &meta_data)?; - - let program_path = Self::make_path(&tempdir, &meta_data, format!("out/{}.so", lib_name)) - .into_os_string() - .into_string() - .map_err(|_| "Failed to get program file path")?; - - let program_data = read_and_verify_elf(program_path.as_ref()) - .map_err(|e| format!("failed to read the program: {}", e))?; - - let program_keypair = Keypair::read_from_file(Self::make_path( - &tempdir, - &meta_data, - format!("out/{}-keypair.json", lib_name), - )) - .map_err(|e| format!("Failed to get keypair from the file: {}", e))?; - - info!("Deploying program at {:?}", program_keypair.pubkey()); - - process_deploy_program( - client.rpc_client.clone(), - &command_config.0, - &program_data, - program_data.len() as u32, - &program_keypair.pubkey(), - Some(&program_keypair), - ) - .map_err(|e| { - error!("Failed to deploy the program: {}", e); - format!("Failed to deploy the program: {}", e) - })?; - - let mut entry: IndexEntry = meta_data.into(); - entry.cksum = crate_cksum; - index.insert_entry(entry)?; - - info!("Successfully deployed the program"); - Ok(()) - } -} diff --git a/cargo-registry/src/sparse_index.rs b/cargo-registry/src/sparse_index.rs index e29a581c1c7819..68ff4bfac1c6a0 100644 --- a/cargo-registry/src/sparse_index.rs +++ b/cargo-registry/src/sparse_index.rs @@ -1,11 +1,15 @@ use { crate::{ - publisher::{Dependency, Error, PackageMetaData}, + client::Client, + crate_handler::{Dependency, Error, PackageMetaData, Program, UnpackedCrate}, response_builder, }, log::info, serde::{Deserialize, Serialize}, - std::{collections::BTreeMap, sync::RwLock}, + std::{ + collections::BTreeMap, + sync::{Arc, RwLock}, + }, }; #[derive(Debug, Default, Deserialize, Serialize)] @@ -94,6 +98,7 @@ impl RegistryIndex { pub(crate) fn handler( &self, request: hyper::Request, + client: Arc, ) -> hyper::Response { let path = request.uri().path(); let expected_root = self.index_root.as_str(); @@ -115,7 +120,7 @@ impl RegistryIndex { return response_builder::success_response_str(&self.config); } - self.handle_crate_lookup_request(path) + self.handle_crate_lookup_request(path, client) } pub(crate) fn insert_entry(&self, entry: IndexEntry) -> Result<(), Error> { @@ -150,7 +155,11 @@ impl RegistryIndex { .then_some(crate_name) } - fn handle_crate_lookup_request(&self, path: &str) -> hyper::Response { + fn handle_crate_lookup_request( + &self, + path: &str, + client: Arc, + ) -> hyper::Response { let Some(crate_name) = Self::get_crate_name_from_path(path) else { return response_builder::error_response( hyper::StatusCode::BAD_REQUEST, @@ -167,15 +176,17 @@ impl RegistryIndex { ); }; - let Some(entry) = read_index.get(crate_name) else { + let response = if let Some(entry) = read_index.get(crate_name) { + Some(serde_json::to_string(entry)) + } else { // The index currently doesn't contain the program entry. // Fetch the program information from the network using RPC client. - // In the meanwhile, return empty success response, so that the registry - // client continues to poll us for the index information. - return response_builder::success_response(); + Program::crate_name_to_program_id(crate_name) + .and_then(|id| UnpackedCrate::fetch_index(id, client).ok()) + .map(|entry| serde_json::to_string(&entry)) }; - let Ok(response) = serde_json::to_string(entry) else { + let Some(Ok(response)) = response else { return response_builder::error_response( hyper::StatusCode::INTERNAL_SERVER_ERROR, "Internal error. index entry is corrupted", diff --git a/cli/src/program_v4.rs b/cli/src/program_v4.rs index 41a8fa9de32b61..324f3040b83d4c 100644 --- a/cli/src/program_v4.rs +++ b/cli/src/program_v4.rs @@ -752,7 +752,7 @@ fn process_show( } } -fn process_dump( +pub fn process_dump( rpc_client: Arc, commitment: CommitmentConfig, account_pubkey: Option, From 01f1bf27994d9813fadfcd134befd3a449aaa0bd Mon Sep 17 00:00:00 2001 From: Illia Bobyr Date: Fri, 20 Oct 2023 18:20:51 -0700 Subject: [PATCH 402/407] zeroize: Allow versions newer than 1.3 for `aes-gcm-siv` (#33618) `aes-gcm-siv` v0.10.3 has a constraints on maximum `zeroize` version, set to be 1.3 or below. At the same time, `cargo` does not want to construct a dependency graph with duplicate instances of a crate, when the first non-zero version of those instances are the same. That is, it refuses to build a workspace with both 1.3 and 1.4 versions of `zeroize`. `zeroize` is actually backward compatible, and `aes-gcm-siv` restriction is overly pessimistic. This package lifted this restriction in a newer versions, but we still depend on older versions and can not immediately update. In order to be able to use a version of `zeroize` newer than 1.3 we need to remove a similar restriction from `curve25519-dalek` as well. --- Cargo.lock | 3 +-- Cargo.toml | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6de911a6b63477..a45948d2bebe51 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -51,8 +51,7 @@ dependencies = [ [[package]] name = "aes-gcm-siv" version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589c637f0e68c877bbd59a4599bbe849cac8e5f3e4b5a3ebae8f528cd218dcdc" +source = "git+https://github.com/RustCrypto/AEADs?rev=6105d7a5591aefa646a95d12b5e8d3f55a9214ef#6105d7a5591aefa646a95d12b5e8d3f55a9214ef" dependencies = [ "aead", "aes", diff --git a/Cargo.toml b/Cargo.toml index c17d5444020b15..9a01dc5a896ce9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -418,6 +418,7 @@ wasm-bindgen = "0.2" winapi = "0.3.8" winreg = "0.50" x509-parser = "0.14.0" +# See "zeroize versioning issues" below if you are updating this version. zeroize = { version = "1.3", default-features = false } zstd = "0.11.2" @@ -450,3 +451,43 @@ crossbeam-epoch = { git = "https://github.com/solana-labs/crossbeam", rev = "fd2 # overrides in sync. solana-program = { path = "sdk/program" } solana-zk-token-sdk = { path = "zk-token-sdk" } +# +# === zeroize versioning issues === +# +# A number of packages used explicit upper bound on the `zeroize` package, such +# as `>=1, <1.4`. The problem is that cargo still does not duplicate `zeroize` +# if a newer version is available and requested by another package and just +# fails the whole dependency resolution process. +# +# This is described in +# +# https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#multiple-requirements +# +# So we have to patch `zeroize` dependency specifications in the projects that +# introduce these constraints. They have already removed these constraints in +# newer versions, but we have not updated yet. As we update, we need to remove +# these patch requests. +# +# When our dependencies are upgraded, we can remove this patches. Before that +# we might need to maintain these patches in sync with our full dependency +# tree. + +# Our dependency tree has `aes-gcm-siv` v0.10.3 and the `zeroize` restriction +# was removed in the next commit just after the release. So it seems safe to +# patch to this commit. +# +# `aes-gcm-siv` v0.10.3 release: +# +# https://github.com/RustCrypto/AEADs/releases/tag/aes-gcm-siv-v0.10.3 +# +# Corresponds to commit +# +# https://github.com/RustCrypto/AEADs/commit/6f16f4577a1fc839a2346cf8c5531c85a44bf5c0 +# +# Comparison with `6105d7a5591aefa646a95d12b5e8d3f55a9214ef` pinned here: +# +# https://github.com/RustCrypto/AEADs/compare/aes-gcm-siv-v0.10.3..6105d7a5591aefa646a95d12b5e8d3f55a9214ef +# +[patch.crates-io.aes-gcm-siv] +git = "https://github.com/RustCrypto/AEADs" +rev = "6105d7a5591aefa646a95d12b5e8d3f55a9214ef" From 5a963529a83eee6d433568e4f78b5aea379b62ec Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Sat, 21 Oct 2023 15:56:43 +0900 Subject: [PATCH 403/407] Add BankWithScheduler for upcoming scheduler code (#33704) * Add BankWithScheduler for upcoming scheduler code * Remove too confusing insert_without_scheduler() * Add doc comment as a bonus * Simplify BankForks::banks() * Add derive(Debug) on BankWithScheduler --- core/benches/banking_stage.rs | 4 +- core/src/replay_stage.rs | 29 +++++-- core/tests/epoch_accounts_hash.rs | 52 ++++++----- ledger/src/blockstore_processor.rs | 110 +++++++++++++++--------- poh/src/poh_recorder.rs | 13 ++- program-test/src/lib.rs | 16 ++-- rpc/src/rpc.rs | 13 ++- runtime/src/bank.rs | 5 ++ runtime/src/bank_forks.rs | 48 +++++++---- runtime/src/installed_scheduler_pool.rs | 84 ++++++++++++++++++ runtime/src/lib.rs | 1 + 11 files changed, 270 insertions(+), 105 deletions(-) create mode 100644 runtime/src/installed_scheduler_pool.rs diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 2526c2a6369c5a..64300c274c38aa 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -37,7 +37,8 @@ use { }, solana_poh::poh_recorder::{create_test_recorder, WorkingBankEntry}, solana_runtime::{ - bank::Bank, bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache, + bank::Bank, bank_forks::BankForks, installed_scheduler_pool::BankWithScheduler, + prioritization_fee_cache::PrioritizationFeeCache, }, solana_sdk::{ genesis_config::GenesisConfig, @@ -398,6 +399,7 @@ fn simulate_process_entries( num_accounts: usize, ) { let bank = Arc::new(Bank::new_for_benches(genesis_config)); + let bank = BankWithScheduler::new_without_scheduler(bank); for i in 0..(num_accounts / 2) { bank.transfer(initial_lamports, mint_keypair, &keypairs[i * 2].pubkey()) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 2e9aba1dd964e1..40483babbefef3 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -61,6 +61,7 @@ use { bank::{bank_hash_details, Bank, NewBankOptions}, bank_forks::{BankForks, MAX_ROOT_DISTANCE_FOR_VOTE_ONLY}, commitment::BlockCommitmentCache, + installed_scheduler_pool::BankWithScheduler, prioritization_fee_cache::PrioritizationFeeCache, }, solana_sdk::{ @@ -1988,7 +1989,7 @@ impl ReplayStage { #[allow(clippy::too_many_arguments)] fn replay_blockstore_into_bank( - bank: &Arc, + bank: &BankWithScheduler, blockstore: &Blockstore, replay_stats: &RwLock, replay_progress: &RwLock, @@ -2599,7 +2600,11 @@ impl ReplayStage { return replay_result; } - let bank = bank_forks.read().unwrap().get(bank_slot).unwrap(); + let bank = bank_forks + .read() + .unwrap() + .get_with_scheduler(bank_slot) + .unwrap(); let parent_slot = bank.parent_slot(); let (num_blocks_on_fork, num_dropped_blocks_on_fork) = { let stats = progress_lock @@ -2687,7 +2692,11 @@ impl ReplayStage { debug!("bank_slot {:?} is marked dead", bank_slot); replay_result.is_slot_dead = true; } else { - let bank = bank_forks.read().unwrap().get(bank_slot).unwrap(); + let bank = bank_forks + .read() + .unwrap() + .get_with_scheduler(bank_slot) + .unwrap(); let parent_slot = bank.parent_slot(); let prev_leader_slot = progress.get_bank_prev_leader_slot(&bank); let (num_blocks_on_fork, num_dropped_blocks_on_fork) = { @@ -2768,7 +2777,11 @@ impl ReplayStage { } let bank_slot = replay_result.bank_slot; - let bank = &bank_forks.read().unwrap().get(bank_slot).unwrap(); + let bank = &bank_forks + .read() + .unwrap() + .get_with_scheduler(bank_slot) + .unwrap(); if let Some(replay_result) = &replay_result.replay_result { match replay_result { Ok(replay_tx_count) => tx_count += replay_tx_count, @@ -2826,7 +2839,9 @@ impl ReplayStage { ); // report cost tracker stats cost_update_sender - .send(CostUpdate::FrozenBank { bank: bank.clone() }) + .send(CostUpdate::FrozenBank { + bank: bank.clone_without_scheduler(), + }) .unwrap_or_else(|err| { warn!("cost_update_sender failed sending bank stats: {:?}", err) }); @@ -2887,7 +2902,7 @@ impl ReplayStage { if let Some(sender) = bank_notification_sender { sender .sender - .send(BankNotification::Frozen(bank.clone())) + .send(BankNotification::Frozen(bank.clone_without_scheduler())) .unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err)); } blockstore_processor::cache_block_meta(bank, cache_block_meta_sender); @@ -4747,7 +4762,7 @@ pub(crate) mod tests { assert_eq!(bank0.tick_height(), bank0.max_tick_height()); let bank1 = Bank::new_from_parent(bank0, &Pubkey::default(), 1); bank_forks.write().unwrap().insert(bank1); - let bank1 = bank_forks.read().unwrap().get(1).unwrap(); + let bank1 = bank_forks.read().unwrap().get_with_scheduler(1).unwrap(); let bank1_progress = progress .entry(bank1.slot()) .or_insert_with(|| ForkProgress::new(bank1.last_blockhash(), None, None, 0, 0)); diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index 8fa6919e99db1f..3b759a06428c18 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -599,18 +599,22 @@ fn test_epoch_accounts_hash_and_warping() { ); // flush the write cache so warping can calculate the accounts hash from storages bank.force_flush_accounts_cache(); - let bank = bank_forks.write().unwrap().insert(Bank::warp_from_parent( - bank, - &Pubkey::default(), - eah_stop_slot_in_next_epoch, - CalcAccountsHashDataSource::Storages, - )); + let bank = bank_forks + .write() + .unwrap() + .insert(Bank::warp_from_parent( + bank, + &Pubkey::default(), + eah_stop_slot_in_next_epoch, + CalcAccountsHashDataSource::Storages, + )) + .clone_without_scheduler(); let slot = bank.slot().checked_add(1).unwrap(); - let bank = - bank_forks - .write() - .unwrap() - .insert(Bank::new_from_parent(bank, &Pubkey::default(), slot)); + let bank = bank_forks + .write() + .unwrap() + .insert(Bank::new_from_parent(bank, &Pubkey::default(), slot)) + .clone_without_scheduler(); bank_forks.write().unwrap().set_root( bank.slot(), &test_environment @@ -634,18 +638,22 @@ fn test_epoch_accounts_hash_and_warping() { epoch_schedule.get_first_slot_in_epoch(bank.epoch() + 1) + eah_start_offset; // flush the write cache so warping can calculate the accounts hash from storages bank.force_flush_accounts_cache(); - let bank = bank_forks.write().unwrap().insert(Bank::warp_from_parent( - bank, - &Pubkey::default(), - eah_start_slot_in_next_epoch, - CalcAccountsHashDataSource::Storages, - )); + let bank = bank_forks + .write() + .unwrap() + .insert(Bank::warp_from_parent( + bank, + &Pubkey::default(), + eah_start_slot_in_next_epoch, + CalcAccountsHashDataSource::Storages, + )) + .clone_without_scheduler(); let slot = bank.slot().checked_add(1).unwrap(); - let bank = - bank_forks - .write() - .unwrap() - .insert(Bank::new_from_parent(bank, &Pubkey::default(), slot)); + let bank = bank_forks + .write() + .unwrap() + .insert(Bank::new_from_parent(bank, &Pubkey::default(), slot)) + .clone_without_scheduler(); bank_forks.write().unwrap().set_root( bank.slot(), &test_environment diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index e17bc52a889ded..d89ee2758f7488 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -39,6 +39,7 @@ use { bank_forks::BankForks, bank_utils, commitment::VOTE_THRESHOLD_SIZE, + installed_scheduler_pool::BankWithScheduler, prioritization_fee_cache::PrioritizationFeeCache, runtime_config::RuntimeConfig, transaction_batch::TransactionBatch, @@ -418,13 +419,13 @@ fn execute_batches( /// This method is for use testing against a single Bank, and assumes `Bank::transaction_count()` /// represents the number of transactions executed in this Bank pub fn process_entries_for_tests( - bank: &Arc, + bank: &BankWithScheduler, entries: Vec, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, ) -> Result<()> { let verify_transaction = { - let bank = bank.clone(); + let bank = bank.clone_with_scheduler(); move |versioned_tx: VersionedTransaction| -> Result { bank.verify_transaction(versioned_tx, TransactionVerificationMode::FullVerification) } @@ -463,7 +464,7 @@ pub fn process_entries_for_tests( } fn process_entries( - bank: &Arc, + bank: &BankWithScheduler, entries: &mut [ReplayEntry], transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, @@ -715,11 +716,16 @@ pub(crate) fn process_blockstore_for_bank_0( accounts_update_notifier, exit, ); + let bank0_slot = bank0.slot(); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0))); info!("Processing ledger for slot 0..."); process_bank_0( - &bank_forks.read().unwrap().root_bank(), + &bank_forks + .read() + .unwrap() + .get_with_scheduler(bank0_slot) + .unwrap(), blockstore, opts, &VerifyRecyclers::default(), @@ -889,7 +895,7 @@ fn verify_ticks( fn confirm_full_slot( blockstore: &Blockstore, - bank: &Arc, + bank: &BankWithScheduler, opts: &ProcessOptions, recyclers: &VerifyRecyclers, progress: &mut ConfirmationProgress, @@ -1050,7 +1056,7 @@ impl ConfirmationProgress { #[allow(clippy::too_many_arguments)] pub fn confirm_slot( blockstore: &Blockstore, - bank: &Arc, + bank: &BankWithScheduler, timing: &mut ConfirmationTiming, progress: &mut ConfirmationProgress, skip_verification: bool, @@ -1095,7 +1101,7 @@ pub fn confirm_slot( #[allow(clippy::too_many_arguments)] fn confirm_slot_entries( - bank: &Arc, + bank: &BankWithScheduler, slot_entries_load_result: (Vec, u64, bool), timing: &mut ConfirmationTiming, progress: &mut ConfirmationProgress, @@ -1192,7 +1198,7 @@ fn confirm_slot_entries( }; let verify_transaction = { - let bank = bank.clone(); + let bank = bank.clone_with_scheduler(); move |versioned_tx: VersionedTransaction, verification_mode: TransactionVerificationMode| -> Result { @@ -1291,7 +1297,7 @@ fn confirm_slot_entries( // Special handling required for processing the entries in slot 0 fn process_bank_0( - bank0: &Arc, + bank0: &BankWithScheduler, blockstore: &Blockstore, opts: &ProcessOptions, recyclers: &VerifyRecyclers, @@ -1490,7 +1496,7 @@ fn load_frozen_forks( // Block must be frozen by this point; otherwise, // process_single_slot() would have errored above. assert!(bank.is_frozen()); - all_banks.insert(bank.slot(), bank.clone()); + all_banks.insert(bank.slot(), bank.clone_with_scheduler()); m.stop(); process_single_slot_us += m.as_us(); @@ -1520,7 +1526,7 @@ fn load_frozen_forks( // Ensure cluster-confirmed root and parents are set as root in blockstore let mut rooted_slots = vec![]; - let mut new_root_bank = cluster_root_bank.clone(); + let mut new_root_bank = cluster_root_bank.clone_without_scheduler(); loop { if new_root_bank.slot() == root { break; } // Found the last root in the chain, yay! assert!(new_root_bank.slot() > root); @@ -1675,7 +1681,7 @@ fn supermajority_root_from_vote_accounts( #[allow(clippy::too_many_arguments)] fn process_single_slot( blockstore: &Blockstore, - bank: &Arc, + bank: &BankWithScheduler, opts: &ProcessOptions, recyclers: &VerifyRecyclers, progress: &mut ConfirmationProgress, @@ -1907,6 +1913,18 @@ pub mod tests { } } + fn process_entries_for_tests_without_scheduler( + bank: &Arc, + entries: Vec, + ) -> Result<()> { + process_entries_for_tests( + &BankWithScheduler::new_without_scheduler(bank.clone()), + entries, + None, + None, + ) + } + #[test] fn test_process_blockstore_with_missing_hashes() { do_test_process_blockstore_with_missing_hashes(AccessType::Primary); @@ -2602,7 +2620,7 @@ pub mod tests { ); // Now ensure the TX is accepted despite pointing to the ID of an empty entry. - process_entries_for_tests(&bank, slot_entries, None, None).unwrap(); + process_entries_for_tests_without_scheduler(&bank, slot_entries).unwrap(); assert_eq!(bank.process_transaction(&tx), Ok(())); } @@ -2737,7 +2755,7 @@ pub mod tests { assert_eq!(bank.tick_height(), 0); let tick = next_entry(&genesis_config.hash(), 1, vec![]); assert_eq!( - process_entries_for_tests(&bank, vec![tick], None, None), + process_entries_for_tests_without_scheduler(&bank, vec![tick]), Ok(()) ); assert_eq!(bank.tick_height(), 1); @@ -2772,7 +2790,7 @@ pub mod tests { ); let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]); assert_eq!( - process_entries_for_tests(&bank, vec![entry_1, entry_2], None, None), + process_entries_for_tests_without_scheduler(&bank, vec![entry_1, entry_2]), Ok(()) ); assert_eq!(bank.get_balance(&keypair1.pubkey()), 2); @@ -2828,11 +2846,9 @@ pub mod tests { ); assert_eq!( - process_entries_for_tests( + process_entries_for_tests_without_scheduler( &bank, vec![entry_1_to_mint, entry_2_to_3_mint_to_1], - None, - None, ), Ok(()) ); @@ -2899,11 +2915,9 @@ pub mod tests { ], ); - assert!(process_entries_for_tests( + assert!(process_entries_for_tests_without_scheduler( &bank, vec![entry_1_to_mint.clone(), entry_2_to_3_mint_to_1.clone()], - None, - None, ) .is_err()); @@ -3017,7 +3031,7 @@ pub mod tests { let entry = next_entry(&bank.last_blockhash(), 1, vec![tx]); let bank = Arc::new(bank); - let result = process_entries_for_tests(&bank, vec![entry], None, None); + let result = process_entries_for_tests_without_scheduler(&bank, vec![entry]); bank.freeze(); let blockhash_ok = bank.last_blockhash(); let bankhash_ok = bank.hash(); @@ -3058,7 +3072,7 @@ pub mod tests { let entry = next_entry(&bank.last_blockhash(), 1, vec![tx]); let bank = Arc::new(bank); - let _result = process_entries_for_tests(&bank, vec![entry], None, None); + let _result = process_entries_for_tests_without_scheduler(&bank, vec![entry]); bank.freeze(); assert_eq!(blockhash_ok, bank.last_blockhash()); @@ -3150,15 +3164,13 @@ pub mod tests { // keypair2=3 // keypair3=3 - assert!(process_entries_for_tests( + assert!(process_entries_for_tests_without_scheduler( &bank, vec![ entry_1_to_mint, entry_2_to_3_and_1_to_mint, entry_conflict_itself, ], - None, - None, ) .is_err()); @@ -3206,7 +3218,7 @@ pub mod tests { system_transaction::transfer(&keypair2, &keypair4.pubkey(), 1, bank.last_blockhash()); let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]); assert_eq!( - process_entries_for_tests(&bank, vec![entry_1, entry_2], None, None), + process_entries_for_tests_without_scheduler(&bank, vec![entry_1, entry_2]), Ok(()) ); assert_eq!(bank.get_balance(&keypair3.pubkey()), 1); @@ -3267,7 +3279,7 @@ pub mod tests { }) .collect(); assert_eq!( - process_entries_for_tests(&bank, entries, None, None), + process_entries_for_tests_without_scheduler(&bank, entries), Ok(()) ); } @@ -3330,7 +3342,7 @@ pub mod tests { // Transfer lamports to each other let entry = next_entry(&bank.last_blockhash(), 1, tx_vector); assert_eq!( - process_entries_for_tests(&bank, vec![entry], None, None), + process_entries_for_tests_without_scheduler(&bank, vec![entry]), Ok(()) ); bank.squash(); @@ -3390,7 +3402,10 @@ pub mod tests { system_transaction::transfer(&keypair1, &keypair4.pubkey(), 1, bank.last_blockhash()); let entry_2 = next_entry(&tick.hash, 1, vec![tx]); assert_eq!( - process_entries_for_tests(&bank, vec![entry_1, tick, entry_2.clone()], None, None,), + process_entries_for_tests_without_scheduler( + &bank, + vec![entry_1, tick, entry_2.clone()], + ), Ok(()) ); assert_eq!(bank.get_balance(&keypair3.pubkey()), 1); @@ -3401,7 +3416,7 @@ pub mod tests { system_transaction::transfer(&keypair2, &keypair3.pubkey(), 1, bank.last_blockhash()); let entry_3 = next_entry(&entry_2.hash, 1, vec![tx]); assert_eq!( - process_entries_for_tests(&bank, vec![entry_3], None, None), + process_entries_for_tests_without_scheduler(&bank, vec![entry_3]), Err(TransactionError::AccountNotFound) ); } @@ -3481,7 +3496,7 @@ pub mod tests { ); assert_eq!( - process_entries_for_tests(&bank, vec![entry_1_to_mint], None, None), + process_entries_for_tests_without_scheduler(&bank, vec![entry_1_to_mint]), Err(TransactionError::AccountInUse) ); @@ -3560,7 +3575,7 @@ pub mod tests { // Set up bank1 let mut bank_forks = BankForks::new(Bank::new_for_tests(&genesis_config)); - let bank0 = bank_forks.get(0).unwrap(); + let bank0 = bank_forks.get_with_scheduler(0).unwrap(); let opts = ProcessOptions { run_verification: true, accounts_db_test_hash_calculation: true, @@ -3569,7 +3584,11 @@ pub mod tests { let recyclers = VerifyRecyclers::default(); process_bank_0(&bank0, &blockstore, &opts, &recyclers, None, None); let bank0_last_blockhash = bank0.last_blockhash(); - let bank1 = bank_forks.insert(Bank::new_from_parent(bank0, &Pubkey::default(), 1)); + let bank1 = bank_forks.insert(Bank::new_from_parent( + bank0.clone_without_scheduler(), + &Pubkey::default(), + 1, + )); confirm_full_slot( &blockstore, &bank1, @@ -3684,7 +3703,7 @@ pub mod tests { }) .collect(); info!("paying iteration {}", i); - process_entries_for_tests(&bank, entries, None, None).expect("paying failed"); + process_entries_for_tests_without_scheduler(&bank, entries).expect("paying failed"); let entries: Vec<_> = (0..NUM_TRANSFERS) .step_by(NUM_TRANSFERS_PER_ENTRY) @@ -3707,16 +3726,14 @@ pub mod tests { .collect(); info!("refunding iteration {}", i); - process_entries_for_tests(&bank, entries, None, None).expect("refunding failed"); + process_entries_for_tests_without_scheduler(&bank, entries).expect("refunding failed"); // advance to next block - process_entries_for_tests( + process_entries_for_tests_without_scheduler( &bank, (0..bank.ticks_per_slot()) .map(|_| next_entry_mut(&mut hash, 1, vec![])) .collect::>(), - None, - None, ) .expect("process ticks failed"); @@ -3756,7 +3773,7 @@ pub mod tests { let entry = next_entry(&new_blockhash, 1, vec![tx]); entries.push(entry); - process_entries_for_tests(&bank0, entries, None, None).unwrap(); + process_entries_for_tests_without_scheduler(&bank0, entries).unwrap(); assert_eq!(bank0.get_balance(&keypair.pubkey()), 1) } @@ -3922,7 +3939,12 @@ pub mod tests { .collect(); let entry = next_entry(&bank_1_blockhash, 1, vote_txs); let (replay_vote_sender, replay_vote_receiver) = crossbeam_channel::unbounded(); - let _ = process_entries_for_tests(&bank1, vec![entry], None, Some(&replay_vote_sender)); + let _ = process_entries_for_tests( + &BankWithScheduler::new_without_scheduler(bank1), + vec![entry], + None, + Some(&replay_vote_sender), + ); let successes: BTreeSet = replay_vote_receiver .try_iter() .map(|(vote_pubkey, ..)| vote_pubkey) @@ -4210,7 +4232,7 @@ pub mod tests { prev_entry_hash: Hash, ) -> result::Result<(), BlockstoreProcessorError> { confirm_slot_entries( - bank, + &BankWithScheduler::new_without_scheduler(bank.clone()), (slot_entries, 0, slot_full), &mut ConfirmationTiming::default(), &mut ConfirmationProgress::new(prev_entry_hash), @@ -4232,7 +4254,9 @@ pub mod tests { .. } = create_genesis_config(100 * LAMPORTS_PER_SOL); let genesis_hash = genesis_config.hash(); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = BankWithScheduler::new_without_scheduler(Arc::new(Bank::new_for_tests( + &genesis_config, + ))); let mut timing = ConfirmationTiming::default(); let mut progress = ConfirmationProgress::new(genesis_hash); let amount = genesis_config.rent.minimum_balance(0); diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index bb14042cb584e9..817c7548bd445b 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -25,7 +25,7 @@ use { }, solana_measure::{measure, measure_us}, solana_metrics::poh_timing_point::{send_poh_timing_point, PohTimingSender, SlotPohTimingInfo}, - solana_runtime::bank::Bank, + solana_runtime::{bank::Bank, installed_scheduler_pool::BankWithScheduler}, solana_sdk::{ clock::{Slot, NUM_CONSECUTIVE_LEADER_SLOTS}, hash::Hash, @@ -264,9 +264,8 @@ impl PohRecorderBank { } } -#[derive(Clone)] pub struct WorkingBank { - pub bank: Arc, + pub bank: BankWithScheduler, pub start: Arc, pub min_tick_height: u64, pub max_tick_height: u64, @@ -596,7 +595,7 @@ impl PohRecorder { self.leader_last_tick_height = leader_last_tick_height; } - pub fn set_bank(&mut self, bank: Arc, track_transaction_indexes: bool) { + pub fn set_bank(&mut self, bank: BankWithScheduler, track_transaction_indexes: bool) { assert!(self.working_bank.is_none()); self.leader_bank_notifier.set_in_progress(&bank); let working_bank = WorkingBank { @@ -644,12 +643,12 @@ impl PohRecorder { #[cfg(feature = "dev-context-only-utils")] pub fn set_bank_for_test(&mut self, bank: Arc) { - self.set_bank(bank, false) + self.set_bank(BankWithScheduler::new_without_scheduler(bank), false) } #[cfg(test)] pub fn set_bank_with_transaction_index_for_test(&mut self, bank: Arc) { - self.set_bank(bank, true) + self.set_bank(BankWithScheduler::new_without_scheduler(bank), true) } // Flush cache will delay flushing the cache for a bank until it past the WorkingBank::min_tick_height @@ -1092,7 +1091,7 @@ pub fn create_test_recorder( ); let ticks_per_slot = bank.ticks_per_slot(); - poh_recorder.set_bank(bank, false); + poh_recorder.set_bank(BankWithScheduler::new_without_scheduler(bank), false); let poh_recorder = Arc::new(RwLock::new(poh_recorder)); let poh_service = PohService::new( poh_recorder.clone(), diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 95b9e6103d8122..5da0534860d219 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -1129,13 +1129,15 @@ impl ProgramTestContext { bank.freeze(); bank } else { - bank_forks.insert(Bank::warp_from_parent( - bank, - &Pubkey::default(), - pre_warp_slot, - // some warping tests cannot use the append vecs because of the sequence of adding roots and flushing - solana_accounts_db::accounts_db::CalcAccountsHashDataSource::IndexForTests, - )) + bank_forks + .insert(Bank::warp_from_parent( + bank, + &Pubkey::default(), + pre_warp_slot, + // some warping tests cannot use the append vecs because of the sequence of adding roots and flushing + solana_accounts_db::accounts_db::CalcAccountsHashDataSource::IndexForTests, + )) + .clone_without_scheduler() }; let (snapshot_request_sender, snapshot_request_receiver) = crossbeam_channel::unbounded(); diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 709c186889995e..1a8cc045fa02f7 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -50,6 +50,7 @@ use { bank::{Bank, TransactionSimulationResult}, bank_forks::BankForks, commitment::{BlockCommitmentArray, BlockCommitmentCache, CommitmentSlots}, + installed_scheduler_pool::BankWithScheduler, non_circulating_supply::calculate_non_circulating_supply, prioritization_fee_cache::PrioritizationFeeCache, snapshot_config::SnapshotConfig, @@ -4602,7 +4603,7 @@ pub fn populate_blockstore_for_tests( // that they are matched properly by get_rooted_block assert_eq!( solana_ledger::blockstore_processor::process_entries_for_tests( - &bank, + &BankWithScheduler::new_without_scheduler(bank), entries, Some( &solana_ledger::blockstore_processor::TransactionStatusSender { @@ -4962,7 +4963,12 @@ pub mod tests { for (i, root) in roots.iter().enumerate() { let new_bank = Bank::new_from_parent(parent_bank.clone(), parent_bank.collector_id(), *root); - parent_bank = self.bank_forks.write().unwrap().insert(new_bank); + parent_bank = self + .bank_forks + .write() + .unwrap() + .insert(new_bank) + .clone_without_scheduler(); let parent = if i > 0 { roots[i - 1] } else { 0 }; fill_blockstore_slot_with_ticks( &self.blockstore, @@ -5004,7 +5010,8 @@ pub mod tests { .bank_forks .write() .unwrap() - .insert(Bank::new_from_parent(parent_bank, &Pubkey::default(), slot)); + .insert(Bank::new_from_parent(parent_bank, &Pubkey::default(), slot)) + .clone_without_scheduler(); let new_block_commitment = BlockCommitmentCache::new( HashMap::new(), diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 8402c2f05d641e..bd0786a9b2a3d5 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -8513,11 +8513,16 @@ impl Drop for Bank { pub mod test_utils { use { super::Bank, + crate::installed_scheduler_pool::BankWithScheduler, solana_sdk::{hash::hashv, pubkey::Pubkey}, solana_vote_program::vote_state::{self, BlockTimestamp, VoteStateVersions}, std::sync::Arc, }; pub fn goto_end_of_slot(bank: Arc) { + goto_end_of_slot_with_scheduler(&BankWithScheduler::new_without_scheduler(bank)) + } + + pub fn goto_end_of_slot_with_scheduler(bank: &BankWithScheduler) { let mut tick_hash = bank.last_blockhash(); loop { tick_hash = hashv(&[tick_hash.as_ref(), &[42]]); diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index 27abe800620ac9..71315bc4b875c3 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -4,6 +4,7 @@ use { crate::{ accounts_background_service::{AbsRequestSender, SnapshotRequest, SnapshotRequestKind}, bank::{epoch_accounts_hash_utils, Bank, SquashTiming}, + installed_scheduler_pool::BankWithScheduler, snapshot_config::SnapshotConfig, }, log::*, @@ -57,7 +58,7 @@ struct SetRootTimings { #[derive(Debug)] pub struct BankForks { - banks: HashMap>, + banks: HashMap, descendants: HashMap>, root: Arc, @@ -82,8 +83,8 @@ impl BankForks { Self::new_from_banks(&[Arc::new(bank)], root) } - pub fn banks(&self) -> HashMap> { - self.banks.clone() + pub fn banks(&self) -> &HashMap { + &self.banks } pub fn get_vote_only_mode_signal(&self) -> Arc { @@ -119,7 +120,7 @@ impl BankForks { self.banks .iter() .filter(|(_, b)| b.is_frozen()) - .map(|(k, b)| (*k, b.clone())) + .map(|(&k, b)| (k, b.clone_without_scheduler())) .collect() } @@ -131,8 +132,13 @@ impl BankForks { .collect() } + pub fn get_with_scheduler(&self, bank_slot: Slot) -> Option { + self.banks.get(&bank_slot).map(|b| b.clone_with_scheduler()) + } + pub fn get(&self, bank_slot: Slot) -> Option> { - self.banks.get(&bank_slot).cloned() + self.get_with_scheduler(bank_slot) + .map(|b| b.clone_without_scheduler()) } pub fn get_with_checked_hash( @@ -159,10 +165,19 @@ impl BankForks { // Iterate through the heads of all the different forks for bank in initial_forks { - banks.insert(bank.slot(), bank.clone()); + banks.insert( + bank.slot(), + BankWithScheduler::new_without_scheduler(bank.clone()), + ); let parents = bank.parents(); for parent in parents { - if banks.insert(parent.slot(), parent.clone()).is_some() { + if banks + .insert( + parent.slot(), + BankWithScheduler::new_without_scheduler(parent.clone()), + ) + .is_some() + { // All ancestors have already been inserted by another fork break; } @@ -187,12 +202,12 @@ impl BankForks { } } - pub fn insert(&mut self, mut bank: Bank) -> Arc { + pub fn insert(&mut self, mut bank: Bank) -> BankWithScheduler { bank.check_program_modification_slot = self.root.load(Ordering::Relaxed) < self.highest_slot_at_startup; - let bank = Arc::new(bank); - let prev = self.banks.insert(bank.slot(), bank.clone()); + let bank = BankWithScheduler::new_without_scheduler(Arc::new(bank)); + let prev = self.banks.insert(bank.slot(), bank.clone_with_scheduler()); assert!(prev.is_none()); let slot = bank.slot(); self.descendants.entry(slot).or_default(); @@ -202,7 +217,7 @@ impl BankForks { bank } - pub fn insert_from_ledger(&mut self, bank: Bank) -> Arc { + pub fn insert_from_ledger(&mut self, bank: Bank) -> BankWithScheduler { self.highest_slot_at_startup = std::cmp::max(self.highest_slot_at_startup, bank.slot()); self.insert(bank) } @@ -224,7 +239,7 @@ impl BankForks { if entry.get().is_empty() { entry.remove_entry(); } - Some(bank) + Some(bank.clone_without_scheduler()) } pub fn highest_slot(&self) -> Slot { @@ -235,6 +250,10 @@ impl BankForks { self[self.highest_slot()].clone() } + pub fn working_bank_with_scheduler(&self) -> &BankWithScheduler { + &self.banks[&self.highest_slot()] + } + fn do_set_root_return_metrics( &mut self, root: Slot, @@ -247,9 +266,8 @@ impl BankForks { // ensure atomic ordering correctness. self.root.store(root, Ordering::Release); - let root_bank = self - .banks - .get(&root) + let root_bank = &self + .get(root) .expect("root bank didn't exist in bank_forks"); let new_epoch = root_bank.epoch(); if old_epoch != new_epoch { diff --git a/runtime/src/installed_scheduler_pool.rs b/runtime/src/installed_scheduler_pool.rs new file mode 100644 index 00000000000000..9fd3a5546097cc --- /dev/null +++ b/runtime/src/installed_scheduler_pool.rs @@ -0,0 +1,84 @@ +//! Currently, there's only one auxiliary type called BankWithScheduler.. This file will be +//! populated by later PRs to align with the filename. + +#[cfg(feature = "dev-context-only-utils")] +use qualifier_attr::qualifiers; +use { + crate::bank::Bank, + std::{ + fmt::Debug, + ops::Deref, + sync::{Arc, RwLock}, + }, +}; + +// currently dummy type; will be replaced with the introduction of real type by upcoming pr... +pub type DefaultInstalledSchedulerBox = (); + +/// Very thin wrapper around Arc +/// +/// It brings type-safety against accidental mixing of bank and scheduler with different slots, +/// which is a pretty dangerous condition. Also, it guarantees to call wait_for_termination() via +/// ::drop() inside BankForks::set_root()'s pruning, perfectly matching to Arc's lifetime by +/// piggybacking on the pruning. +/// +/// Semantically, a scheduler is tightly coupled with a particular bank. But scheduler wasn't put +/// into Bank fields to avoid circular-references (a scheduler needs to refer to its accompanied +/// Arc). BankWithScheduler behaves almost like Arc. It only adds a few of transaction +/// scheduling and scheduler management functions. For this reason, `bank` variable names should be +/// used for `BankWithScheduler` across codebase. +/// +/// BankWithScheduler even implements Deref for convenience. And Clone is omitted to implement to +/// avoid ambiguity as to which to clone: BankWithScheduler or Arc. Use +/// clone_without_scheduler() for Arc. Otherwise, use clone_with_scheduler() (this should be +/// unusual outside scheduler code-path) +#[derive(Debug)] +pub struct BankWithScheduler { + inner: Arc, +} + +#[derive(Debug)] +pub struct BankWithSchedulerInner { + bank: Arc, + #[allow(dead_code)] + scheduler: InstalledSchedulerRwLock, +} +pub type InstalledSchedulerRwLock = RwLock>; + +impl BankWithScheduler { + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] + pub(crate) fn new(bank: Arc, scheduler: Option) -> Self { + Self { + inner: Arc::new(BankWithSchedulerInner { + bank, + scheduler: RwLock::new(scheduler), + }), + } + } + + pub fn new_without_scheduler(bank: Arc) -> Self { + Self::new(bank, None) + } + + pub fn clone_with_scheduler(&self) -> BankWithScheduler { + BankWithScheduler { + inner: self.inner.clone(), + } + } + + pub fn clone_without_scheduler(&self) -> Arc { + self.inner.bank.clone() + } + + pub const fn no_scheduler_available() -> InstalledSchedulerRwLock { + RwLock::new(None) + } +} + +impl Deref for BankWithScheduler { + type Target = Arc; + + fn deref(&self) -> &Self::Target { + &self.inner.bank + } +} diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 503d24410e8cdc..1bbd479848e987 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -16,6 +16,7 @@ pub mod epoch_stakes; pub mod genesis_utils; pub mod inline_feature_gate_program; pub mod inline_spl_associated_token_account; +pub mod installed_scheduler_pool; pub mod loader_utils; pub mod non_circulating_supply; pub mod prioritization_fee; From 56ccffdaa5394f179dce6c0383918e571aca8bff Mon Sep 17 00:00:00 2001 From: steviez Date: Sat, 21 Oct 2023 11:38:31 +0200 Subject: [PATCH 404/407] Replace get_tmp_ledger_path!() with self cleaning version (#33702) This macro is used a lot for tests to create a ledger path in order to open a Blockstore. Files will be left on disk unless the test remembers to call Blockstore::destroy() on the directory. So, instead of requiring this, use the get_tmp_ledger_path_auto_delete!() macro that creates a TempDir (which automatically deletes itself when it goes out of scope). --- banking-bench/src/main.rs | 396 ++-- client-test/tests/client.rs | 6 +- core/benches/banking_stage.rs | 252 ++- core/src/consensus.rs | 151 +- core/src/repair/ancestor_hashes_service.rs | 9 +- core/src/repair/repair_service.rs | 477 +++-- core/src/repair/serve_repair.rs | 549 +++--- core/src/replay_stage.rs | 12 +- ledger/benches/blockstore.rs | 34 +- ledger/benches/protobuf.rs | 18 +- poh/src/poh_recorder.rs | 1696 ++++++++--------- poh/src/poh_service.rs | 311 ++- rpc/src/cluster_tpu_info.rs | 218 ++- rpc/src/rpc_service.rs | 25 +- rpc/src/rpc_subscriptions.rs | 13 +- rpc/src/transaction_status_service.rs | 8 +- turbine/src/broadcast_stage.rs | 110 +- .../broadcast_stage/standard_broadcast_run.rs | 7 +- 18 files changed, 2081 insertions(+), 2211 deletions(-) diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 8b8ee2b2723c72..c96ab074a642ad 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -14,7 +14,7 @@ use { solana_ledger::{ blockstore::Blockstore, genesis_utils::{create_genesis_config, GenesisConfigInfo}, - get_tmp_ledger_path, + get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache, }, solana_measure::measure::Measure, @@ -410,216 +410,212 @@ fn main() { } } - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Arc::new( - Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"), - ); - let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); - let (exit, poh_recorder, poh_service, signal_receiver) = create_test_recorder( - bank.clone(), - blockstore.clone(), - None, - Some(leader_schedule_cache), - ); - let (banking_tracer, tracer_thread) = - BankingTracer::new(matches.is_present("trace_banking").then_some(( - &blockstore.banking_trace_path(), - exit.clone(), - BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT, - ))) - .unwrap(); - let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); - let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote(); - let (gossip_vote_sender, gossip_vote_receiver) = - banking_tracer.create_channel_gossip_vote(); - let cluster_info = { - let keypair = Arc::new(Keypair::new()); - let node = Node::new_localhost_with_pubkey(&keypair.pubkey()); - ClusterInfo::new(node.info, keypair, SocketAddrSpace::Unspecified) - }; - let cluster_info = Arc::new(cluster_info); - let tpu_disable_quic = matches.is_present("tpu_disable_quic"); - let connection_cache = match tpu_disable_quic { - false => ConnectionCache::new_quic( - "connection_cache_banking_bench_quic", - DEFAULT_TPU_CONNECTION_POOL_SIZE, - ), - true => ConnectionCache::with_udp( - "connection_cache_banking_bench_udp", - DEFAULT_TPU_CONNECTION_POOL_SIZE, - ), - }; - let banking_stage = BankingStage::new_thread_local_multi_iterator( - &cluster_info, - &poh_recorder, - non_vote_receiver, - tpu_vote_receiver, - gossip_vote_receiver, - num_banking_threads, - None, - replay_vote_sender, - None, - Arc::new(connection_cache), - bank_forks.clone(), - &Arc::new(PrioritizationFeeCache::new(0u64)), - ); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Arc::new( + Blockstore::open(ledger_path.path()).expect("Expected to be able to open database ledger"), + ); + let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); + let (exit, poh_recorder, poh_service, signal_receiver) = create_test_recorder( + bank.clone(), + blockstore.clone(), + None, + Some(leader_schedule_cache), + ); + let (banking_tracer, tracer_thread) = + BankingTracer::new(matches.is_present("trace_banking").then_some(( + &blockstore.banking_trace_path(), + exit.clone(), + BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT, + ))) + .unwrap(); + let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); + let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote(); + let (gossip_vote_sender, gossip_vote_receiver) = banking_tracer.create_channel_gossip_vote(); + let cluster_info = { + let keypair = Arc::new(Keypair::new()); + let node = Node::new_localhost_with_pubkey(&keypair.pubkey()); + ClusterInfo::new(node.info, keypair, SocketAddrSpace::Unspecified) + }; + let cluster_info = Arc::new(cluster_info); + let tpu_disable_quic = matches.is_present("tpu_disable_quic"); + let connection_cache = match tpu_disable_quic { + false => ConnectionCache::new_quic( + "connection_cache_banking_bench_quic", + DEFAULT_TPU_CONNECTION_POOL_SIZE, + ), + true => ConnectionCache::with_udp( + "connection_cache_banking_bench_udp", + DEFAULT_TPU_CONNECTION_POOL_SIZE, + ), + }; + let banking_stage = BankingStage::new_thread_local_multi_iterator( + &cluster_info, + &poh_recorder, + non_vote_receiver, + tpu_vote_receiver, + gossip_vote_receiver, + num_banking_threads, + None, + replay_vote_sender, + None, + Arc::new(connection_cache), + bank_forks.clone(), + &Arc::new(PrioritizationFeeCache::new(0u64)), + ); - // This is so that the signal_receiver does not go out of scope after the closure. - // If it is dropped before poh_service, then poh_service will error when - // calling send() on the channel. - let signal_receiver = Arc::new(signal_receiver); - let mut total_us = 0; - let mut tx_total_us = 0; - let base_tx_count = bank.transaction_count(); - let mut txs_processed = 0; - let collector = solana_sdk::pubkey::new_rand(); - let mut total_sent = 0; - for current_iteration_index in 0..iterations { - trace!("RUNNING ITERATION {}", current_iteration_index); - let now = Instant::now(); - let mut sent = 0; - - let packets_for_this_iteration = &all_packets[current_iteration_index % num_chunks]; - for (packet_batch_index, packet_batch) in - packets_for_this_iteration.packet_batches.iter().enumerate() - { - sent += packet_batch.len(); - trace!( - "Sending PacketBatch index {}, {}", - packet_batch_index, - timestamp(), - ); - non_vote_sender - .send(BankingPacketBatch::new((vec![packet_batch.clone()], None))) - .unwrap(); - } + // This is so that the signal_receiver does not go out of scope after the closure. + // If it is dropped before poh_service, then poh_service will error when + // calling send() on the channel. + let signal_receiver = Arc::new(signal_receiver); + let mut total_us = 0; + let mut tx_total_us = 0; + let base_tx_count = bank.transaction_count(); + let mut txs_processed = 0; + let collector = solana_sdk::pubkey::new_rand(); + let mut total_sent = 0; + for current_iteration_index in 0..iterations { + trace!("RUNNING ITERATION {}", current_iteration_index); + let now = Instant::now(); + let mut sent = 0; + + let packets_for_this_iteration = &all_packets[current_iteration_index % num_chunks]; + for (packet_batch_index, packet_batch) in + packets_for_this_iteration.packet_batches.iter().enumerate() + { + sent += packet_batch.len(); + trace!( + "Sending PacketBatch index {}, {}", + packet_batch_index, + timestamp(), + ); + non_vote_sender + .send(BankingPacketBatch::new((vec![packet_batch.clone()], None))) + .unwrap(); + } - for tx in &packets_for_this_iteration.transactions { - loop { - if bank.get_signature_status(&tx.signatures[0]).is_some() { - break; - } - if poh_recorder.read().unwrap().bank().is_none() { - break; - } - sleep(Duration::from_millis(5)); + for tx in &packets_for_this_iteration.transactions { + loop { + if bank.get_signature_status(&tx.signatures[0]).is_some() { + break; + } + if poh_recorder.read().unwrap().bank().is_none() { + break; } + sleep(Duration::from_millis(5)); } + } - // check if txs had been processed by bank. Returns when all transactions are - // processed, with `FALSE` indicate there is still bank. or returns TRUE indicate a - // bank has expired before receiving all txs. - if check_txs( - &signal_receiver, - packets_for_this_iteration.transactions.len(), - &poh_recorder, - ) { - eprintln!( - "[iteration {}, tx sent {}, slot {} expired, bank tx count {}]", - current_iteration_index, - sent, - bank.slot(), - bank.transaction_count(), - ); - tx_total_us += duration_as_us(&now.elapsed()); - - let mut poh_time = Measure::start("poh_time"); - poh_recorder - .write() - .unwrap() - .reset(bank.clone(), Some((bank.slot(), bank.slot() + 1))); - poh_time.stop(); - - let mut new_bank_time = Measure::start("new_bank"); - let new_slot = bank.slot() + 1; - let new_bank = Bank::new_from_parent(bank, &collector, new_slot); - new_bank_time.stop(); - - let mut insert_time = Measure::start("insert_time"); - bank_forks.write().unwrap().insert(new_bank); - bank = bank_forks.read().unwrap().working_bank(); - insert_time.stop(); - - // set cost tracker limits to MAX so it will not filter out TXs - bank.write_cost_tracker().unwrap().set_limits( - std::u64::MAX, - std::u64::MAX, - std::u64::MAX, - ); + // check if txs had been processed by bank. Returns when all transactions are + // processed, with `FALSE` indicate there is still bank. or returns TRUE indicate a + // bank has expired before receiving all txs. + if check_txs( + &signal_receiver, + packets_for_this_iteration.transactions.len(), + &poh_recorder, + ) { + eprintln!( + "[iteration {}, tx sent {}, slot {} expired, bank tx count {}]", + current_iteration_index, + sent, + bank.slot(), + bank.transaction_count(), + ); + tx_total_us += duration_as_us(&now.elapsed()); + + let mut poh_time = Measure::start("poh_time"); + poh_recorder + .write() + .unwrap() + .reset(bank.clone(), Some((bank.slot(), bank.slot() + 1))); + poh_time.stop(); + + let mut new_bank_time = Measure::start("new_bank"); + let new_slot = bank.slot() + 1; + let new_bank = Bank::new_from_parent(bank, &collector, new_slot); + new_bank_time.stop(); + + let mut insert_time = Measure::start("insert_time"); + bank_forks.write().unwrap().insert(new_bank); + bank = bank_forks.read().unwrap().working_bank(); + insert_time.stop(); + + // set cost tracker limits to MAX so it will not filter out TXs + bank.write_cost_tracker().unwrap().set_limits( + std::u64::MAX, + std::u64::MAX, + std::u64::MAX, + ); - assert!(poh_recorder.read().unwrap().bank().is_none()); - poh_recorder - .write() - .unwrap() - .set_bank_for_test(bank.clone()); - assert!(poh_recorder.read().unwrap().bank().is_some()); - debug!( - "new_bank_time: {}us insert_time: {}us poh_time: {}us", - new_bank_time.as_us(), - insert_time.as_us(), - poh_time.as_us(), - ); - } else { - eprintln!( - "[iteration {}, tx sent {}, slot {} active, bank tx count {}]", - current_iteration_index, - sent, - bank.slot(), - bank.transaction_count(), - ); - tx_total_us += duration_as_us(&now.elapsed()); - } + assert!(poh_recorder.read().unwrap().bank().is_none()); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); + assert!(poh_recorder.read().unwrap().bank().is_some()); + debug!( + "new_bank_time: {}us insert_time: {}us poh_time: {}us", + new_bank_time.as_us(), + insert_time.as_us(), + poh_time.as_us(), + ); + } else { + eprintln!( + "[iteration {}, tx sent {}, slot {} active, bank tx count {}]", + current_iteration_index, + sent, + bank.slot(), + bank.transaction_count(), + ); + tx_total_us += duration_as_us(&now.elapsed()); + } - // This signature clear may not actually clear the signatures - // in this chunk, but since we rotate between CHUNKS then - // we should clear them by the time we come around again to re-use that chunk. - bank.clear_signatures(); - total_us += duration_as_us(&now.elapsed()); - total_sent += sent; - - if current_iteration_index % num_chunks == 0 { - let last_blockhash = bank.last_blockhash(); - for packets_for_single_iteration in all_packets.iter_mut() { - packets_for_single_iteration.refresh_blockhash(last_blockhash); - } + // This signature clear may not actually clear the signatures + // in this chunk, but since we rotate between CHUNKS then + // we should clear them by the time we come around again to re-use that chunk. + bank.clear_signatures(); + total_us += duration_as_us(&now.elapsed()); + total_sent += sent; + + if current_iteration_index % num_chunks == 0 { + let last_blockhash = bank.last_blockhash(); + for packets_for_single_iteration in all_packets.iter_mut() { + packets_for_single_iteration.refresh_blockhash(last_blockhash); } } - txs_processed += bank_forks - .read() - .unwrap() - .working_bank() - .transaction_count(); - debug!("processed: {} base: {}", txs_processed, base_tx_count); - - eprintln!("[total_sent: {}, base_tx_count: {}, txs_processed: {}, txs_landed: {}, total_us: {}, tx_total_us: {}]", + } + txs_processed += bank_forks + .read() + .unwrap() + .working_bank() + .transaction_count(); + debug!("processed: {} base: {}", txs_processed, base_tx_count); + + eprintln!("[total_sent: {}, base_tx_count: {}, txs_processed: {}, txs_landed: {}, total_us: {}, tx_total_us: {}]", total_sent, base_tx_count, txs_processed, (txs_processed - base_tx_count), total_us, tx_total_us); - eprintln!( - "{{'name': 'banking_bench_total', 'median': '{:.2}'}}", - (1000.0 * 1000.0 * total_sent as f64) / (total_us as f64), - ); - eprintln!( - "{{'name': 'banking_bench_tx_total', 'median': '{:.2}'}}", - (1000.0 * 1000.0 * total_sent as f64) / (tx_total_us as f64), - ); - eprintln!( - "{{'name': 'banking_bench_success_tx_total', 'median': '{:.2}'}}", - (1000.0 * 1000.0 * (txs_processed - base_tx_count) as f64) / (total_us as f64), - ); + eprintln!( + "{{'name': 'banking_bench_total', 'median': '{:.2}'}}", + (1000.0 * 1000.0 * total_sent as f64) / (total_us as f64), + ); + eprintln!( + "{{'name': 'banking_bench_tx_total', 'median': '{:.2}'}}", + (1000.0 * 1000.0 * total_sent as f64) / (tx_total_us as f64), + ); + eprintln!( + "{{'name': 'banking_bench_success_tx_total', 'median': '{:.2}'}}", + (1000.0 * 1000.0 * (txs_processed - base_tx_count) as f64) / (total_us as f64), + ); - drop(non_vote_sender); - drop(tpu_vote_sender); - drop(gossip_vote_sender); - exit.store(true, Ordering::Relaxed); - banking_stage.join().unwrap(); - debug!("waited for banking_stage"); - poh_service.join().unwrap(); - sleep(Duration::from_secs(1)); - debug!("waited for poh_service"); - if let Some(tracer_thread) = tracer_thread { - tracer_thread.join().unwrap().unwrap(); - } + drop(non_vote_sender); + drop(tpu_vote_sender); + drop(gossip_vote_sender); + exit.store(true, Ordering::Relaxed); + banking_stage.join().unwrap(); + debug!("waited for banking_stage"); + poh_service.join().unwrap(); + sleep(Duration::from_secs(1)); + debug!("waited for poh_service"); + if let Some(tracer_thread) = tracer_thread { + tracer_thread.join().unwrap().unwrap(); } - let _unused = Blockstore::destroy(&ledger_path); } diff --git a/client-test/tests/client.rs b/client-test/tests/client.rs index 65acd1adaae39d..01ecc263f64c0b 100644 --- a/client-test/tests/client.rs +++ b/client-test/tests/client.rs @@ -2,7 +2,7 @@ use { futures_util::StreamExt, rand::Rng, serde_json::{json, Value}, - solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path}, + solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path_auto_delete}, solana_pubsub_client::{nonblocking, pubsub_client::PubsubClient}, solana_rpc::{ optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, @@ -233,8 +233,8 @@ fn test_block_subscription() { let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); // setup Blockstore - let ledger_path = get_tmp_ledger_path!(); - let blockstore = Blockstore::open(&ledger_path).unwrap(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let blockstore = Arc::new(blockstore); // populate ledger with test txs diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 64300c274c38aa..2a3d9c297cc17d 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -29,7 +29,7 @@ use { blockstore::Blockstore, blockstore_processor::process_entries_for_tests, genesis_utils::{create_genesis_config, GenesisConfigInfo}, - get_tmp_ledger_path, + get_tmp_ledger_path_auto_delete, }, solana_perf::{ packet::{to_packet_batches, Packet}, @@ -83,49 +83,46 @@ fn check_txs(receiver: &Arc>, ref_tx_count: usize) { fn bench_consume_buffered(bencher: &mut Bencher) { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100_000); let bank = Arc::new(Bank::new_for_benches(&genesis_config)); - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Arc::new( - Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"), - ); - let (exit, poh_recorder, poh_service, _signal_receiver) = - create_test_recorder(bank, blockstore, None, None); - - let recorder = poh_recorder.read().unwrap().new_recorder(); - let bank_start = poh_recorder.read().unwrap().bank_start().unwrap(); - - let tx = test_tx(); - let transactions = vec![tx; 4194304]; - let batches = transactions - .iter() - .filter_map(|transaction| { - let packet = Packet::from_data(None, transaction).ok().unwrap(); - DeserializedPacket::new(packet).ok() - }) - .collect::>(); - let batches_len = batches.len(); - let mut transaction_buffer = UnprocessedTransactionStorage::new_transaction_storage( - UnprocessedPacketBatches::from_iter(batches, 2 * batches_len), - ThreadType::Transactions, + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Arc::new( + Blockstore::open(ledger_path.path()).expect("Expected to be able to open database ledger"), + ); + let (exit, poh_recorder, poh_service, _signal_receiver) = + create_test_recorder(bank, blockstore, None, None); + + let recorder = poh_recorder.read().unwrap().new_recorder(); + let bank_start = poh_recorder.read().unwrap().bank_start().unwrap(); + + let tx = test_tx(); + let transactions = vec![tx; 4194304]; + let batches = transactions + .iter() + .filter_map(|transaction| { + let packet = Packet::from_data(None, transaction).ok().unwrap(); + DeserializedPacket::new(packet).ok() + }) + .collect::>(); + let batches_len = batches.len(); + let mut transaction_buffer = UnprocessedTransactionStorage::new_transaction_storage( + UnprocessedPacketBatches::from_iter(batches, 2 * batches_len), + ThreadType::Transactions, + ); + let (s, _r) = unbounded(); + let committer = Committer::new(None, s, Arc::new(PrioritizationFeeCache::new(0u64))); + let consumer = Consumer::new(committer, recorder, QosService::new(1), None); + // This tests the performance of buffering packets. + // If the packet buffers are copied, performance will be poor. + bencher.iter(move || { + consumer.consume_buffered_packets( + &bank_start, + &mut transaction_buffer, + &BankingStageStats::default(), + &mut LeaderSlotMetricsTracker::new(0), ); - let (s, _r) = unbounded(); - let committer = Committer::new(None, s, Arc::new(PrioritizationFeeCache::new(0u64))); - let consumer = Consumer::new(committer, recorder, QosService::new(1), None); - // This tests the performance of buffering packets. - // If the packet buffers are copied, performance will be poor. - bencher.iter(move || { - consumer.consume_buffered_packets( - &bank_start, - &mut transaction_buffer, - &BankingStageStats::default(), - &mut LeaderSlotMetricsTracker::new(0), - ); - }); + }); - exit.store(true, Ordering::Relaxed); - poh_service.join().unwrap(); - } - let _unused = Blockstore::destroy(&ledger_path); + exit.store(true, Ordering::Relaxed); + poh_service.join().unwrap(); } fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec { @@ -279,95 +276,92 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { packet_batches }); - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Arc::new( - Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"), - ); - let (exit, poh_recorder, poh_service, signal_receiver) = - create_test_recorder(bank.clone(), blockstore, None, None); - let cluster_info = { - let keypair = Arc::new(Keypair::new()); - let node = Node::new_localhost_with_pubkey(&keypair.pubkey()); - ClusterInfo::new(node.info, keypair, SocketAddrSpace::Unspecified) - }; - let cluster_info = Arc::new(cluster_info); - let (s, _r) = unbounded(); - let _banking_stage = BankingStage::new( - BlockProductionMethod::ThreadLocalMultiIterator, - &cluster_info, - &poh_recorder, - non_vote_receiver, - tpu_vote_receiver, - gossip_vote_receiver, - None, - s, - None, - Arc::new(ConnectionCache::new("connection_cache_test")), - bank_forks, - &Arc::new(PrioritizationFeeCache::new(0u64)), - ); - - let chunk_len = verified.len() / CHUNKS; - let mut start = 0; - - // This is so that the signal_receiver does not go out of scope after the closure. - // If it is dropped before poh_service, then poh_service will error when - // calling send() on the channel. - let signal_receiver = Arc::new(signal_receiver); - let signal_receiver2 = signal_receiver; - bencher.iter(move || { - let now = Instant::now(); - let mut sent = 0; - if let Some(vote_packets) = &vote_packets { - tpu_vote_sender - .send(BankingPacketBatch::new(( - vote_packets[start..start + chunk_len].to_vec(), - None, - ))) - .unwrap(); - gossip_vote_sender - .send(BankingPacketBatch::new(( - vote_packets[start..start + chunk_len].to_vec(), - None, - ))) - .unwrap(); - } - for v in verified[start..start + chunk_len].chunks(chunk_len / num_threads) { - debug!( - "sending... {}..{} {} v.len: {}", - start, - start + chunk_len, - timestamp(), - v.len(), - ); - for xv in v { - sent += xv.len(); - } - non_vote_sender - .send(BankingPacketBatch::new((v.to_vec(), None))) - .unwrap(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Arc::new( + Blockstore::open(ledger_path.path()).expect("Expected to be able to open database ledger"), + ); + let (exit, poh_recorder, poh_service, signal_receiver) = + create_test_recorder(bank.clone(), blockstore, None, None); + let cluster_info = { + let keypair = Arc::new(Keypair::new()); + let node = Node::new_localhost_with_pubkey(&keypair.pubkey()); + ClusterInfo::new(node.info, keypair, SocketAddrSpace::Unspecified) + }; + let cluster_info = Arc::new(cluster_info); + let (s, _r) = unbounded(); + let _banking_stage = BankingStage::new( + BlockProductionMethod::ThreadLocalMultiIterator, + &cluster_info, + &poh_recorder, + non_vote_receiver, + tpu_vote_receiver, + gossip_vote_receiver, + None, + s, + None, + Arc::new(ConnectionCache::new("connection_cache_test")), + bank_forks, + &Arc::new(PrioritizationFeeCache::new(0u64)), + ); + + let chunk_len = verified.len() / CHUNKS; + let mut start = 0; + + // This is so that the signal_receiver does not go out of scope after the closure. + // If it is dropped before poh_service, then poh_service will error when + // calling send() on the channel. + let signal_receiver = Arc::new(signal_receiver); + let signal_receiver2 = signal_receiver; + bencher.iter(move || { + let now = Instant::now(); + let mut sent = 0; + if let Some(vote_packets) = &vote_packets { + tpu_vote_sender + .send(BankingPacketBatch::new(( + vote_packets[start..start + chunk_len].to_vec(), + None, + ))) + .unwrap(); + gossip_vote_sender + .send(BankingPacketBatch::new(( + vote_packets[start..start + chunk_len].to_vec(), + None, + ))) + .unwrap(); + } + for v in verified[start..start + chunk_len].chunks(chunk_len / num_threads) { + debug!( + "sending... {}..{} {} v.len: {}", + start, + start + chunk_len, + timestamp(), + v.len(), + ); + for xv in v { + sent += xv.len(); } + non_vote_sender + .send(BankingPacketBatch::new((v.to_vec(), None))) + .unwrap(); + } - check_txs(&signal_receiver2, txes / CHUNKS); - - // This signature clear may not actually clear the signatures - // in this chunk, but since we rotate between CHUNKS then - // we should clear them by the time we come around again to re-use that chunk. - bank.clear_signatures(); - trace!( - "time: {} checked: {} sent: {}", - duration_as_us(&now.elapsed()), - txes / CHUNKS, - sent, - ); - start += chunk_len; - start %= verified.len(); - }); - exit.store(true, Ordering::Relaxed); - poh_service.join().unwrap(); - } - let _unused = Blockstore::destroy(&ledger_path); + check_txs(&signal_receiver2, txes / CHUNKS); + + // This signature clear may not actually clear the signatures + // in this chunk, but since we rotate between CHUNKS then + // we should clear them by the time we come around again to re-use that chunk. + bank.clear_signatures(); + trace!( + "time: {} checked: {} sent: {}", + duration_as_us(&now.elapsed()), + txes / CHUNKS, + sent, + ); + start += chunk_len; + start %= verified.len(); + }); + exit.store(true, Ordering::Relaxed); + poh_service.join().unwrap(); } #[bench] diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 675dfc691e675d..08b72ebf18b327 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -1518,7 +1518,7 @@ pub mod test { vote_simulator::VoteSimulator, }, itertools::Itertools, - solana_ledger::{blockstore::make_slot_entries, get_tmp_ledger_path}, + solana_ledger::{blockstore::make_slot_entries, get_tmp_ledger_path_auto_delete}, solana_runtime::bank::Bank, solana_sdk::{ account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, @@ -2928,36 +2928,33 @@ pub mod test { #[test] fn test_reconcile_blockstore_roots_with_tower_normal() { solana_logger::setup(); - let blockstore_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&blockstore_path).unwrap(); - - let (shreds, _) = make_slot_entries(1, 0, 42, /*merkle_variant:*/ true); - blockstore.insert_shreds(shreds, None, false).unwrap(); - let (shreds, _) = make_slot_entries(3, 1, 42, /*merkle_variant:*/ true); - blockstore.insert_shreds(shreds, None, false).unwrap(); - let (shreds, _) = make_slot_entries(4, 1, 42, /*merkle_variant:*/ true); - blockstore.insert_shreds(shreds, None, false).unwrap(); - assert!(!blockstore.is_root(0)); - assert!(!blockstore.is_root(1)); - assert!(!blockstore.is_root(3)); - assert!(!blockstore.is_root(4)); - - let mut tower = Tower::default(); - tower.vote_state.root_slot = Some(4); - reconcile_blockstore_roots_with_external_source( - ExternalRootSource::Tower(tower.root()), - &blockstore, - &mut blockstore.last_root(), - ) - .unwrap(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let (shreds, _) = make_slot_entries(1, 0, 42, /*merkle_variant:*/ true); + blockstore.insert_shreds(shreds, None, false).unwrap(); + let (shreds, _) = make_slot_entries(3, 1, 42, /*merkle_variant:*/ true); + blockstore.insert_shreds(shreds, None, false).unwrap(); + let (shreds, _) = make_slot_entries(4, 1, 42, /*merkle_variant:*/ true); + blockstore.insert_shreds(shreds, None, false).unwrap(); + assert!(!blockstore.is_root(0)); + assert!(!blockstore.is_root(1)); + assert!(!blockstore.is_root(3)); + assert!(!blockstore.is_root(4)); - assert!(!blockstore.is_root(0)); - assert!(blockstore.is_root(1)); - assert!(!blockstore.is_root(3)); - assert!(blockstore.is_root(4)); - } - Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); + let mut tower = Tower::default(); + tower.vote_state.root_slot = Some(4); + reconcile_blockstore_roots_with_external_source( + ExternalRootSource::Tower(tower.root()), + &blockstore, + &mut blockstore.last_root(), + ) + .unwrap(); + + assert!(!blockstore.is_root(0)); + assert!(blockstore.is_root(1)); + assert!(!blockstore.is_root(3)); + assert!(blockstore.is_root(4)); } #[test] @@ -2966,61 +2963,55 @@ pub mod test { external root (Tower(4))!?")] fn test_reconcile_blockstore_roots_with_tower_panic_no_common_root() { solana_logger::setup(); - let blockstore_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&blockstore_path).unwrap(); - - let (shreds, _) = make_slot_entries(1, 0, 42, /*merkle_variant:*/ true); - blockstore.insert_shreds(shreds, None, false).unwrap(); - let (shreds, _) = make_slot_entries(3, 1, 42, /*merkle_variant:*/ true); - blockstore.insert_shreds(shreds, None, false).unwrap(); - let (shreds, _) = make_slot_entries(4, 1, 42, /*merkle_variant:*/ true); - blockstore.insert_shreds(shreds, None, false).unwrap(); - blockstore.set_roots(std::iter::once(&3)).unwrap(); - assert!(!blockstore.is_root(0)); - assert!(!blockstore.is_root(1)); - assert!(blockstore.is_root(3)); - assert!(!blockstore.is_root(4)); - - let mut tower = Tower::default(); - tower.vote_state.root_slot = Some(4); - reconcile_blockstore_roots_with_external_source( - ExternalRootSource::Tower(tower.root()), - &blockstore, - &mut blockstore.last_root(), - ) - .unwrap(); - } - Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let (shreds, _) = make_slot_entries(1, 0, 42, /*merkle_variant:*/ true); + blockstore.insert_shreds(shreds, None, false).unwrap(); + let (shreds, _) = make_slot_entries(3, 1, 42, /*merkle_variant:*/ true); + blockstore.insert_shreds(shreds, None, false).unwrap(); + let (shreds, _) = make_slot_entries(4, 1, 42, /*merkle_variant:*/ true); + blockstore.insert_shreds(shreds, None, false).unwrap(); + blockstore.set_roots(std::iter::once(&3)).unwrap(); + assert!(!blockstore.is_root(0)); + assert!(!blockstore.is_root(1)); + assert!(blockstore.is_root(3)); + assert!(!blockstore.is_root(4)); + + let mut tower = Tower::default(); + tower.vote_state.root_slot = Some(4); + reconcile_blockstore_roots_with_external_source( + ExternalRootSource::Tower(tower.root()), + &blockstore, + &mut blockstore.last_root(), + ) + .unwrap(); } #[test] fn test_reconcile_blockstore_roots_with_tower_nop_no_parent() { solana_logger::setup(); - let blockstore_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&blockstore_path).unwrap(); - - let (shreds, _) = make_slot_entries(1, 0, 42, /*merkle_variant:*/ true); - blockstore.insert_shreds(shreds, None, false).unwrap(); - let (shreds, _) = make_slot_entries(3, 1, 42, /*merkle_variant:*/ true); - blockstore.insert_shreds(shreds, None, false).unwrap(); - assert!(!blockstore.is_root(0)); - assert!(!blockstore.is_root(1)); - assert!(!blockstore.is_root(3)); - - let mut tower = Tower::default(); - tower.vote_state.root_slot = Some(4); - assert_eq!(blockstore.last_root(), 0); - reconcile_blockstore_roots_with_external_source( - ExternalRootSource::Tower(tower.root()), - &blockstore, - &mut blockstore.last_root(), - ) - .unwrap(); - assert_eq!(blockstore.last_root(), 0); - } - Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let (shreds, _) = make_slot_entries(1, 0, 42, /*merkle_variant:*/ true); + blockstore.insert_shreds(shreds, None, false).unwrap(); + let (shreds, _) = make_slot_entries(3, 1, 42, /*merkle_variant:*/ true); + blockstore.insert_shreds(shreds, None, false).unwrap(); + assert!(!blockstore.is_root(0)); + assert!(!blockstore.is_root(1)); + assert!(!blockstore.is_root(3)); + + let mut tower = Tower::default(); + tower.vote_state.root_slot = Some(4); + assert_eq!(blockstore.last_root(), 0); + reconcile_blockstore_roots_with_external_source( + ExternalRootSource::Tower(tower.root()), + &blockstore, + &mut blockstore.last_root(), + ) + .unwrap(); + assert_eq!(blockstore.last_root(), 0); } #[test] diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs index 3214c89e14ea15..f6be342939944c 100644 --- a/core/src/repair/ancestor_hashes_service.rs +++ b/core/src/repair/ancestor_hashes_service.rs @@ -918,7 +918,10 @@ mod test { cluster_info::{ClusterInfo, Node}, contact_info::{ContactInfo, Protocol}, }, - solana_ledger::{blockstore::make_many_slot_entries, get_tmp_ledger_path, shred::Nonce}, + solana_ledger::{ + blockstore::make_many_slot_entries, get_tmp_ledger_path, + get_tmp_ledger_path_auto_delete, shred::Nonce, + }, solana_runtime::{accounts_background_service::AbsRequestSender, bank_forks::BankForks}, solana_sdk::{ hash::Hash, @@ -1938,8 +1941,8 @@ mod test { .. } = ManageAncestorHashesState::new(bank_forks); - let ledger_path = get_tmp_ledger_path!(); - let blockstore = Blockstore::open(&ledger_path).unwrap(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); // Create invalid packet with fewer bytes than the size of the nonce let mut packet = Packet::default(); diff --git a/core/src/repair/repair_service.rs b/core/src/repair/repair_service.rs index c7cfab03f8f8ed..7f18ced291cf15 100644 --- a/core/src/repair/repair_service.rs +++ b/core/src/repair/repair_service.rs @@ -864,7 +864,7 @@ mod test { make_chaining_slot_entries, make_many_slot_entries, make_slot_entries, Blockstore, }, genesis_utils::{create_genesis_config, GenesisConfigInfo}, - get_tmp_ledger_path, + get_tmp_ledger_path_auto_delete, shred::max_ticks_per_n_shreds, }, solana_runtime::bank::Bank, @@ -884,289 +884,270 @@ mod test { #[test] pub fn test_repair_orphan() { - let blockstore_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&blockstore_path).unwrap(); - - // Create some orphan slots - let (mut shreds, _) = make_slot_entries(1, 0, 1, /*merkle_variant:*/ true); - let (shreds2, _) = make_slot_entries(5, 2, 1, /*merkle_variant:*/ true); - shreds.extend(shreds2); - blockstore.insert_shreds(shreds, None, false).unwrap(); - let mut repair_weight = RepairWeight::new(0); - assert_eq!( - repair_weight.get_best_weighted_repairs( - &blockstore, - &HashMap::new(), - &EpochSchedule::default(), - MAX_ORPHANS, - MAX_REPAIR_LENGTH, - MAX_UNKNOWN_LAST_INDEX_REPAIRS, - MAX_CLOSEST_COMPLETION_REPAIRS, - &mut RepairTiming::default(), - &mut BestRepairsStats::default(), - ), - vec![ - ShredRepairType::Orphan(2), - ShredRepairType::HighestShred(0, 0) - ] - ); - } - - Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + // Create some orphan slots + let (mut shreds, _) = make_slot_entries(1, 0, 1, /*merkle_variant:*/ true); + let (shreds2, _) = make_slot_entries(5, 2, 1, /*merkle_variant:*/ true); + shreds.extend(shreds2); + blockstore.insert_shreds(shreds, None, false).unwrap(); + let mut repair_weight = RepairWeight::new(0); + assert_eq!( + repair_weight.get_best_weighted_repairs( + &blockstore, + &HashMap::new(), + &EpochSchedule::default(), + MAX_ORPHANS, + MAX_REPAIR_LENGTH, + MAX_UNKNOWN_LAST_INDEX_REPAIRS, + MAX_CLOSEST_COMPLETION_REPAIRS, + &mut RepairTiming::default(), + &mut BestRepairsStats::default(), + ), + vec![ + ShredRepairType::Orphan(2), + ShredRepairType::HighestShred(0, 0) + ] + ); } #[test] pub fn test_repair_empty_slot() { - let blockstore_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&blockstore_path).unwrap(); - - let (shreds, _) = make_slot_entries(2, 0, 1, /*merkle_variant:*/ true); - - // Write this shred to slot 2, should chain to slot 0, which we haven't received - // any shreds for - blockstore.insert_shreds(shreds, None, false).unwrap(); - let mut repair_weight = RepairWeight::new(0); - - // Check that repair tries to patch the empty slot - assert_eq!( - repair_weight.get_best_weighted_repairs( - &blockstore, - &HashMap::new(), - &EpochSchedule::default(), - MAX_ORPHANS, - MAX_REPAIR_LENGTH, - MAX_UNKNOWN_LAST_INDEX_REPAIRS, - MAX_CLOSEST_COMPLETION_REPAIRS, - &mut RepairTiming::default(), - &mut BestRepairsStats::default(), - ), - vec![ShredRepairType::HighestShred(0, 0)] - ); - } - Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let (shreds, _) = make_slot_entries(2, 0, 1, /*merkle_variant:*/ true); + + // Write this shred to slot 2, should chain to slot 0, which we haven't received + // any shreds for + blockstore.insert_shreds(shreds, None, false).unwrap(); + let mut repair_weight = RepairWeight::new(0); + + // Check that repair tries to patch the empty slot + assert_eq!( + repair_weight.get_best_weighted_repairs( + &blockstore, + &HashMap::new(), + &EpochSchedule::default(), + MAX_ORPHANS, + MAX_REPAIR_LENGTH, + MAX_UNKNOWN_LAST_INDEX_REPAIRS, + MAX_CLOSEST_COMPLETION_REPAIRS, + &mut RepairTiming::default(), + &mut BestRepairsStats::default(), + ), + vec![ShredRepairType::HighestShred(0, 0)] + ); } #[test] pub fn test_generate_repairs() { - let blockstore_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&blockstore_path).unwrap(); - - let nth = 3; - let num_slots = 2; - - // Create some shreds - let (mut shreds, _) = make_many_slot_entries(0, num_slots, 150); - let num_shreds = shreds.len() as u64; - let num_shreds_per_slot = num_shreds / num_slots; - - // write every nth shred - let mut shreds_to_write = vec![]; - let mut missing_indexes_per_slot = vec![]; - for i in (0..num_shreds).rev() { - let index = i % num_shreds_per_slot; - // get_best_repair_shreds only returns missing shreds in - // between shreds received; So this should either insert the - // last shred in each slot, or exclude missing shreds after the - // last inserted shred from expected repairs. - if index % nth == 0 || index + 1 == num_shreds_per_slot { - shreds_to_write.insert(0, shreds.remove(i as usize)); - } else if i < num_shreds_per_slot { - missing_indexes_per_slot.insert(0, index); - } + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let nth = 3; + let num_slots = 2; + + // Create some shreds + let (mut shreds, _) = make_many_slot_entries(0, num_slots, 150); + let num_shreds = shreds.len() as u64; + let num_shreds_per_slot = num_shreds / num_slots; + + // write every nth shred + let mut shreds_to_write = vec![]; + let mut missing_indexes_per_slot = vec![]; + for i in (0..num_shreds).rev() { + let index = i % num_shreds_per_slot; + // get_best_repair_shreds only returns missing shreds in + // between shreds received; So this should either insert the + // last shred in each slot, or exclude missing shreds after the + // last inserted shred from expected repairs. + if index % nth == 0 || index + 1 == num_shreds_per_slot { + shreds_to_write.insert(0, shreds.remove(i as usize)); + } else if i < num_shreds_per_slot { + missing_indexes_per_slot.insert(0, index); } - blockstore - .insert_shreds(shreds_to_write, None, false) - .unwrap(); - let expected: Vec = (0..num_slots) - .flat_map(|slot| { - missing_indexes_per_slot - .iter() - .map(move |shred_index| ShredRepairType::Shred(slot, *shred_index)) - }) - .collect(); - - let mut repair_weight = RepairWeight::new(0); - sleep_shred_deferment_period(); - assert_eq!( - repair_weight.get_best_weighted_repairs( - &blockstore, - &HashMap::new(), - &EpochSchedule::default(), - MAX_ORPHANS, - MAX_REPAIR_LENGTH, - MAX_UNKNOWN_LAST_INDEX_REPAIRS, - MAX_CLOSEST_COMPLETION_REPAIRS, - &mut RepairTiming::default(), - &mut BestRepairsStats::default(), - ), - expected - ); - - assert_eq!( - repair_weight.get_best_weighted_repairs( - &blockstore, - &HashMap::new(), - &EpochSchedule::default(), - MAX_ORPHANS, - expected.len() - 2, - MAX_UNKNOWN_LAST_INDEX_REPAIRS, - MAX_CLOSEST_COMPLETION_REPAIRS, - &mut RepairTiming::default(), - &mut BestRepairsStats::default(), - )[..], - expected[0..expected.len() - 2] - ); } - Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); + blockstore + .insert_shreds(shreds_to_write, None, false) + .unwrap(); + let expected: Vec = (0..num_slots) + .flat_map(|slot| { + missing_indexes_per_slot + .iter() + .map(move |shred_index| ShredRepairType::Shred(slot, *shred_index)) + }) + .collect(); + + let mut repair_weight = RepairWeight::new(0); + sleep_shred_deferment_period(); + assert_eq!( + repair_weight.get_best_weighted_repairs( + &blockstore, + &HashMap::new(), + &EpochSchedule::default(), + MAX_ORPHANS, + MAX_REPAIR_LENGTH, + MAX_UNKNOWN_LAST_INDEX_REPAIRS, + MAX_CLOSEST_COMPLETION_REPAIRS, + &mut RepairTiming::default(), + &mut BestRepairsStats::default(), + ), + expected + ); + + assert_eq!( + repair_weight.get_best_weighted_repairs( + &blockstore, + &HashMap::new(), + &EpochSchedule::default(), + MAX_ORPHANS, + expected.len() - 2, + MAX_UNKNOWN_LAST_INDEX_REPAIRS, + MAX_CLOSEST_COMPLETION_REPAIRS, + &mut RepairTiming::default(), + &mut BestRepairsStats::default(), + )[..], + expected[0..expected.len() - 2] + ); } #[test] pub fn test_generate_highest_repair() { - let blockstore_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&blockstore_path).unwrap(); - - let num_entries_per_slot = 100; - - // Create some shreds - let (mut shreds, _) = make_slot_entries( - 0, // slot - 0, // parent_slot - num_entries_per_slot as u64, - true, // merkle_variant - ); - let num_shreds_per_slot = shreds.len() as u64; + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - // Remove last shred (which is also last in slot) so that slot is not complete - shreds.pop(); + let num_entries_per_slot = 100; - blockstore.insert_shreds(shreds, None, false).unwrap(); - - // We didn't get the last shred for this slot, so ask for the highest shred for that slot - let expected: Vec = - vec![ShredRepairType::HighestShred(0, num_shreds_per_slot - 1)]; - - sleep_shred_deferment_period(); - let mut repair_weight = RepairWeight::new(0); - assert_eq!( - repair_weight.get_best_weighted_repairs( - &blockstore, - &HashMap::new(), - &EpochSchedule::default(), - MAX_ORPHANS, - MAX_REPAIR_LENGTH, - MAX_UNKNOWN_LAST_INDEX_REPAIRS, - MAX_CLOSEST_COMPLETION_REPAIRS, - &mut RepairTiming::default(), - &mut BestRepairsStats::default(), - ), - expected - ); - } - Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); + // Create some shreds + let (mut shreds, _) = make_slot_entries( + 0, // slot + 0, // parent_slot + num_entries_per_slot as u64, + true, // merkle_variant + ); + let num_shreds_per_slot = shreds.len() as u64; + + // Remove last shred (which is also last in slot) so that slot is not complete + shreds.pop(); + + blockstore.insert_shreds(shreds, None, false).unwrap(); + + // We didn't get the last shred for this slot, so ask for the highest shred for that slot + let expected: Vec = + vec![ShredRepairType::HighestShred(0, num_shreds_per_slot - 1)]; + + sleep_shred_deferment_period(); + let mut repair_weight = RepairWeight::new(0); + assert_eq!( + repair_weight.get_best_weighted_repairs( + &blockstore, + &HashMap::new(), + &EpochSchedule::default(), + MAX_ORPHANS, + MAX_REPAIR_LENGTH, + MAX_UNKNOWN_LAST_INDEX_REPAIRS, + MAX_CLOSEST_COMPLETION_REPAIRS, + &mut RepairTiming::default(), + &mut BestRepairsStats::default(), + ), + expected + ); } #[test] pub fn test_repair_range() { - let blockstore_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&blockstore_path).unwrap(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - let slots: Vec = vec![1, 3, 5, 7, 8]; - let num_entries_per_slot = max_ticks_per_n_shreds(1, None) + 1; + let slots: Vec = vec![1, 3, 5, 7, 8]; + let num_entries_per_slot = max_ticks_per_n_shreds(1, None) + 1; - let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot); - for (mut slot_shreds, _) in shreds.into_iter() { - slot_shreds.remove(0); - blockstore.insert_shreds(slot_shreds, None, false).unwrap(); - } + let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot); + for (mut slot_shreds, _) in shreds.into_iter() { + slot_shreds.remove(0); + blockstore.insert_shreds(slot_shreds, None, false).unwrap(); + } - // Iterate through all possible combinations of start..end (inclusive on both - // sides of the range) - for start in 0..slots.len() { - for end in start..slots.len() { - let repair_slot_range = RepairSlotRange { - start: slots[start], - end: slots[end], - }; - let expected: Vec = (repair_slot_range.start - ..=repair_slot_range.end) - .map(|slot_index| { - if slots.contains(&slot_index) { - ShredRepairType::Shred(slot_index, 0) - } else { - ShredRepairType::HighestShred(slot_index, 0) - } - }) - .collect(); - - sleep_shred_deferment_period(); - assert_eq!( - RepairService::generate_repairs_in_range( - &blockstore, - std::usize::MAX, - &repair_slot_range, - ), - expected - ); - } + // Iterate through all possible combinations of start..end (inclusive on both + // sides of the range) + for start in 0..slots.len() { + for end in start..slots.len() { + let repair_slot_range = RepairSlotRange { + start: slots[start], + end: slots[end], + }; + let expected: Vec = (repair_slot_range.start + ..=repair_slot_range.end) + .map(|slot_index| { + if slots.contains(&slot_index) { + ShredRepairType::Shred(slot_index, 0) + } else { + ShredRepairType::HighestShred(slot_index, 0) + } + }) + .collect(); + + sleep_shred_deferment_period(); + assert_eq!( + RepairService::generate_repairs_in_range( + &blockstore, + std::usize::MAX, + &repair_slot_range, + ), + expected + ); } } - Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_repair_range_highest() { - let blockstore_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&blockstore_path).unwrap(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - let num_entries_per_slot = 10; + let num_entries_per_slot = 10; - let num_slots = 1; - let start = 5; + let num_slots = 1; + let start = 5; - // Create some shreds in slots 0..num_slots - for i in start..start + num_slots { - let parent = if i > 0 { i - 1 } else { 0 }; - let (shreds, _) = make_slot_entries( - i, // slot - parent, - num_entries_per_slot as u64, - true, // merkle_variant - ); - - blockstore.insert_shreds(shreds, None, false).unwrap(); - } - - let end = 4; - let expected: Vec = vec![ - ShredRepairType::HighestShred(end - 2, 0), - ShredRepairType::HighestShred(end - 1, 0), - ShredRepairType::HighestShred(end, 0), - ]; - - let repair_slot_range = RepairSlotRange { start: 2, end }; - - assert_eq!( - RepairService::generate_repairs_in_range( - &blockstore, - std::usize::MAX, - &repair_slot_range, - ), - expected + // Create some shreds in slots 0..num_slots + for i in start..start + num_slots { + let parent = if i > 0 { i - 1 } else { 0 }; + let (shreds, _) = make_slot_entries( + i, // slot + parent, + num_entries_per_slot as u64, + true, // merkle_variant ); + + blockstore.insert_shreds(shreds, None, false).unwrap(); } - Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); + + let end = 4; + let expected: Vec = vec![ + ShredRepairType::HighestShred(end - 2, 0), + ShredRepairType::HighestShred(end - 1, 0), + ShredRepairType::HighestShred(end, 0), + ]; + + let repair_slot_range = RepairSlotRange { start: 2, end }; + + assert_eq!( + RepairService::generate_repairs_in_range( + &blockstore, + std::usize::MAX, + &repair_slot_range, + ), + expected + ); } #[test] pub fn test_generate_duplicate_repairs_for_slot() { - let blockstore_path = get_tmp_ledger_path!(); - let blockstore = Blockstore::open(&blockstore_path).unwrap(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let dead_slot = 9; // SlotMeta doesn't exist, should make no repairs @@ -1203,8 +1184,8 @@ mod test { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); - let blockstore_path = get_tmp_ledger_path!(); - let blockstore = Blockstore::open(&blockstore_path).unwrap(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let cluster_slots = ClusterSlots::default(); let cluster_info = Arc::new(new_test_cluster_info()); let identity_keypair = cluster_info.keypair().clone(); diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index 8ab42c28829f1d..27fb63323d130f 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -1423,7 +1423,7 @@ mod tests { blockstore::make_many_slot_entries, blockstore_processor::fill_blockstore_slot_with_ticks, genesis_utils::{create_genesis_config, GenesisConfigInfo}, - get_tmp_ledger_path, + get_tmp_ledger_path_auto_delete, shred::{max_ticks_per_n_shreds, Shred, ShredFlags}, }, solana_perf::packet::{deserialize_from_with_limit, Packet}, @@ -1853,64 +1853,60 @@ mod tests { fn run_highest_window_request(slot: Slot, num_slots: u64, nonce: Nonce) { let recycler = PacketBatchRecycler::default(); solana_logger::setup(); - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); - let rv = ServeRepair::run_highest_window_request( - &recycler, - &socketaddr_any!(), - &blockstore, - 0, - 0, - nonce, - ); - assert!(rv.is_none()); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); + let rv = ServeRepair::run_highest_window_request( + &recycler, + &socketaddr_any!(), + &blockstore, + 0, + 0, + nonce, + ); + assert!(rv.is_none()); - let _ = fill_blockstore_slot_with_ticks( - &blockstore, - max_ticks_per_n_shreds(1, None) + 1, - slot, - slot - num_slots + 1, - Hash::default(), - ); + let _ = fill_blockstore_slot_with_ticks( + &blockstore, + max_ticks_per_n_shreds(1, None) + 1, + slot, + slot - num_slots + 1, + Hash::default(), + ); - let index = 1; - let rv = ServeRepair::run_highest_window_request( - &recycler, - &socketaddr_any!(), - &blockstore, - slot, - index, - nonce, - ) - .expect("packets"); - let request = ShredRepairType::HighestShred(slot, index); - verify_responses(&request, rv.iter()); - - let rv: Vec = rv - .into_iter() - .filter_map(|p| { - assert_eq!(repair_response::nonce(p).unwrap(), nonce); - Shred::new_from_serialized_shred(p.data(..).unwrap().to_vec()).ok() - }) - .collect(); - assert!(!rv.is_empty()); - let index = blockstore.meta(slot).unwrap().unwrap().received - 1; - assert_eq!(rv[0].index(), index as u32); - assert_eq!(rv[0].slot(), slot); - - let rv = ServeRepair::run_highest_window_request( - &recycler, - &socketaddr_any!(), - &blockstore, - slot, - index + 1, - nonce, - ); - assert!(rv.is_none()); - } + let index = 1; + let rv = ServeRepair::run_highest_window_request( + &recycler, + &socketaddr_any!(), + &blockstore, + slot, + index, + nonce, + ) + .expect("packets"); + let request = ShredRepairType::HighestShred(slot, index); + verify_responses(&request, rv.iter()); - Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); + let rv: Vec = rv + .into_iter() + .filter_map(|p| { + assert_eq!(repair_response::nonce(p).unwrap(), nonce); + Shred::new_from_serialized_shred(p.data(..).unwrap().to_vec()).ok() + }) + .collect(); + assert!(!rv.is_empty()); + let index = blockstore.meta(slot).unwrap().unwrap().received - 1; + assert_eq!(rv[0].index(), index as u32); + assert_eq!(rv[0].slot(), slot); + + let rv = ServeRepair::run_highest_window_request( + &recycler, + &socketaddr_any!(), + &blockstore, + slot, + index + 1, + nonce, + ); + assert!(rv.is_none()); } #[test] @@ -1922,48 +1918,44 @@ mod tests { fn run_window_request(slot: Slot, nonce: Nonce) { let recycler = PacketBatchRecycler::default(); solana_logger::setup(); - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); - let rv = ServeRepair::run_window_request( - &recycler, - &socketaddr_any!(), - &blockstore, - slot, - 0, - nonce, - ); - assert!(rv.is_none()); - let shred = Shred::new_from_data(slot, 1, 1, &[], ShredFlags::empty(), 0, 2, 0); - - blockstore - .insert_shreds(vec![shred], None, false) - .expect("Expect successful ledger write"); - - let index = 1; - let rv = ServeRepair::run_window_request( - &recycler, - &socketaddr_any!(), - &blockstore, - slot, - index, - nonce, - ) - .expect("packets"); - let request = ShredRepairType::Shred(slot, index); - verify_responses(&request, rv.iter()); - let rv: Vec = rv - .into_iter() - .filter_map(|p| { - assert_eq!(repair_response::nonce(p).unwrap(), nonce); - Shred::new_from_serialized_shred(p.data(..).unwrap().to_vec()).ok() - }) - .collect(); - assert_eq!(rv[0].index(), 1); - assert_eq!(rv[0].slot(), slot); - } - - Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); + let rv = ServeRepair::run_window_request( + &recycler, + &socketaddr_any!(), + &blockstore, + slot, + 0, + nonce, + ); + assert!(rv.is_none()); + let shred = Shred::new_from_data(slot, 1, 1, &[], ShredFlags::empty(), 0, 2, 0); + + blockstore + .insert_shreds(vec![shred], None, false) + .expect("Expect successful ledger write"); + + let index = 1; + let rv = ServeRepair::run_window_request( + &recycler, + &socketaddr_any!(), + &blockstore, + slot, + index, + nonce, + ) + .expect("packets"); + let request = ShredRepairType::Shred(slot, index); + verify_responses(&request, rv.iter()); + let rv: Vec = rv + .into_iter() + .filter_map(|p| { + assert_eq!(repair_response::nonce(p).unwrap(), nonce); + Shred::new_from_serialized_shred(p.data(..).unwrap().to_vec()).ok() + }) + .collect(); + assert_eq!(rv[0].index(), 1); + assert_eq!(rv[0].slot(), slot); } fn new_test_cluster_info() -> ClusterInfo { @@ -2095,122 +2087,114 @@ mod tests { fn run_orphan(slot: Slot, num_slots: u64, nonce: Nonce) { solana_logger::setup(); let recycler = PacketBatchRecycler::default(); - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); - let rv = - ServeRepair::run_orphan(&recycler, &socketaddr_any!(), &blockstore, slot, 0, nonce); - assert!(rv.is_none()); - - // Create slots [slot, slot + num_slots) with 5 shreds apiece - let (shreds, _) = make_many_slot_entries(slot, num_slots, 5); - - blockstore - .insert_shreds(shreds, None, false) - .expect("Expect successful ledger write"); - - // We don't have slot `slot + num_slots`, so we don't know how to service this request - let rv = ServeRepair::run_orphan( - &recycler, - &socketaddr_any!(), - &blockstore, - slot + num_slots, - 5, - nonce, - ); - assert!(rv.is_none()); - - // For a orphan request for `slot + num_slots - 1`, we should return the highest shreds - // from slots in the range [slot, slot + num_slots - 1] - let rv: Vec<_> = ServeRepair::run_orphan( - &recycler, - &socketaddr_any!(), - &blockstore, - slot + num_slots - 1, - 5, - nonce, - ) - .expect("run_orphan packets") - .iter() - .cloned() - .collect(); - - // Verify responses - let request = ShredRepairType::Orphan(slot); - verify_responses(&request, rv.iter()); - - let expected: Vec<_> = (slot..slot + num_slots) - .rev() - .filter_map(|slot| { - let index = blockstore.meta(slot).unwrap().unwrap().received - 1; - repair_response::repair_response_packet( - &blockstore, - slot, - index, - &socketaddr_any!(), - nonce, - ) - }) - .collect(); - assert_eq!(rv, expected); - } + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); + let rv = + ServeRepair::run_orphan(&recycler, &socketaddr_any!(), &blockstore, slot, 0, nonce); + assert!(rv.is_none()); + + // Create slots [slot, slot + num_slots) with 5 shreds apiece + let (shreds, _) = make_many_slot_entries(slot, num_slots, 5); + + blockstore + .insert_shreds(shreds, None, false) + .expect("Expect successful ledger write"); + + // We don't have slot `slot + num_slots`, so we don't know how to service this request + let rv = ServeRepair::run_orphan( + &recycler, + &socketaddr_any!(), + &blockstore, + slot + num_slots, + 5, + nonce, + ); + assert!(rv.is_none()); + + // For a orphan request for `slot + num_slots - 1`, we should return the highest shreds + // from slots in the range [slot, slot + num_slots - 1] + let rv: Vec<_> = ServeRepair::run_orphan( + &recycler, + &socketaddr_any!(), + &blockstore, + slot + num_slots - 1, + 5, + nonce, + ) + .expect("run_orphan packets") + .iter() + .cloned() + .collect(); - Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); + // Verify responses + let request = ShredRepairType::Orphan(slot); + verify_responses(&request, rv.iter()); + + let expected: Vec<_> = (slot..slot + num_slots) + .rev() + .filter_map(|slot| { + let index = blockstore.meta(slot).unwrap().unwrap().received - 1; + repair_response::repair_response_packet( + &blockstore, + slot, + index, + &socketaddr_any!(), + nonce, + ) + }) + .collect(); + assert_eq!(rv, expected); } #[test] fn run_orphan_corrupted_shred_size() { solana_logger::setup(); let recycler = PacketBatchRecycler::default(); - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); - // Create slots [1, 2] with 1 shred apiece - let (mut shreds, _) = make_many_slot_entries(1, 2, 1); - - assert_eq!(shreds[0].slot(), 1); - assert_eq!(shreds[0].index(), 0); - // TODO: The test previously relied on corrupting shred payload - // size which we no longer want to expose. Current test no longer - // covers packet size check in repair_response_packet_from_bytes. - shreds.remove(0); - blockstore - .insert_shreds(shreds, None, false) - .expect("Expect successful ledger write"); - let nonce = 42; - // Make sure repair response is corrupted - assert!(repair_response::repair_response_packet( - &blockstore, - 1, - 0, - &socketaddr_any!(), - nonce, - ) - .is_none()); - - // Orphan request for slot 2 should only return slot 1 since - // calling `repair_response_packet` on slot 1's shred will - // be corrupted - let rv: Vec<_> = - ServeRepair::run_orphan(&recycler, &socketaddr_any!(), &blockstore, 2, 5, nonce) - .expect("run_orphan packets") - .iter() - .cloned() - .collect(); - - // Verify responses - let expected = vec![repair_response::repair_response_packet( - &blockstore, - 2, - 0, - &socketaddr_any!(), - nonce, - ) - .unwrap()]; - assert_eq!(rv, expected); - } + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); + // Create slots [1, 2] with 1 shred apiece + let (mut shreds, _) = make_many_slot_entries(1, 2, 1); + + assert_eq!(shreds[0].slot(), 1); + assert_eq!(shreds[0].index(), 0); + // TODO: The test previously relied on corrupting shred payload + // size which we no longer want to expose. Current test no longer + // covers packet size check in repair_response_packet_from_bytes. + shreds.remove(0); + blockstore + .insert_shreds(shreds, None, false) + .expect("Expect successful ledger write"); + let nonce = 42; + // Make sure repair response is corrupted + assert!(repair_response::repair_response_packet( + &blockstore, + 1, + 0, + &socketaddr_any!(), + nonce, + ) + .is_none()); + + // Orphan request for slot 2 should only return slot 1 since + // calling `repair_response_packet` on slot 1's shred will + // be corrupted + let rv: Vec<_> = + ServeRepair::run_orphan(&recycler, &socketaddr_any!(), &blockstore, 2, 5, nonce) + .expect("run_orphan packets") + .iter() + .cloned() + .collect(); - Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); + // Verify responses + let expected = vec![repair_response::repair_response_packet( + &blockstore, + 2, + 0, + &socketaddr_any!(), + nonce, + ) + .unwrap()]; + assert_eq!(rv, expected); } #[test] @@ -2223,95 +2207,92 @@ mod tests { solana_logger::setup(); let recycler = PacketBatchRecycler::default(); - let ledger_path = get_tmp_ledger_path!(); - { - let slot = 0; - let num_slots = MAX_ANCESTOR_RESPONSES as u64; - let nonce = 10; + let ledger_path = get_tmp_ledger_path_auto_delete!(); - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); + let slot = 0; + let num_slots = MAX_ANCESTOR_RESPONSES as u64; + let nonce = 10; - // Create slots [slot, slot + num_slots) with 5 shreds apiece - let (shreds, _) = make_many_slot_entries(slot, num_slots, 5); + let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); - blockstore - .insert_shreds(shreds, None, false) - .expect("Expect successful ledger write"); + // Create slots [slot, slot + num_slots) with 5 shreds apiece + let (shreds, _) = make_many_slot_entries(slot, num_slots, 5); - // We don't have slot `slot + num_slots`, so we return empty - let rv = ServeRepair::run_ancestor_hashes( - &recycler, - &socketaddr_any!(), - &blockstore, - slot + num_slots, - nonce, - ) - .expect("run_ancestor_hashes packets"); - assert_eq!(rv.len(), 1); - let packet = &rv[0]; - let ancestor_hashes_response = deserialize_ancestor_hashes_response(packet); - match ancestor_hashes_response { - AncestorHashesResponse::Hashes(hashes) => { - assert!(hashes.is_empty()); - } - _ => { - panic!("unexpected response: {:?}", &ancestor_hashes_response); - } - } + blockstore + .insert_shreds(shreds, None, false) + .expect("Expect successful ledger write"); - // `slot + num_slots - 1` is not marked duplicate confirmed so nothing should return - // empty - let rv = ServeRepair::run_ancestor_hashes( - &recycler, - &socketaddr_any!(), - &blockstore, - slot + num_slots - 1, - nonce, - ) - .expect("run_ancestor_hashes packets"); - assert_eq!(rv.len(), 1); - let packet = &rv[0]; - let ancestor_hashes_response = deserialize_ancestor_hashes_response(packet); - match ancestor_hashes_response { - AncestorHashesResponse::Hashes(hashes) => { - assert!(hashes.is_empty()); - } - _ => { - panic!("unexpected response: {:?}", &ancestor_hashes_response); - } + // We don't have slot `slot + num_slots`, so we return empty + let rv = ServeRepair::run_ancestor_hashes( + &recycler, + &socketaddr_any!(), + &blockstore, + slot + num_slots, + nonce, + ) + .expect("run_ancestor_hashes packets"); + assert_eq!(rv.len(), 1); + let packet = &rv[0]; + let ancestor_hashes_response = deserialize_ancestor_hashes_response(packet); + match ancestor_hashes_response { + AncestorHashesResponse::Hashes(hashes) => { + assert!(hashes.is_empty()); } + _ => { + panic!("unexpected response: {:?}", &ancestor_hashes_response); + } + } - // Set duplicate confirmed - let mut expected_ancestors = Vec::with_capacity(num_slots as usize); - expected_ancestors.resize(num_slots as usize, (0, Hash::default())); - for (i, duplicate_confirmed_slot) in (slot..slot + num_slots).enumerate() { - let frozen_hash = Hash::new_unique(); - expected_ancestors[num_slots as usize - i - 1] = - (duplicate_confirmed_slot, frozen_hash); - blockstore.insert_bank_hash(duplicate_confirmed_slot, frozen_hash, true); + // `slot + num_slots - 1` is not marked duplicate confirmed so nothing should return + // empty + let rv = ServeRepair::run_ancestor_hashes( + &recycler, + &socketaddr_any!(), + &blockstore, + slot + num_slots - 1, + nonce, + ) + .expect("run_ancestor_hashes packets"); + assert_eq!(rv.len(), 1); + let packet = &rv[0]; + let ancestor_hashes_response = deserialize_ancestor_hashes_response(packet); + match ancestor_hashes_response { + AncestorHashesResponse::Hashes(hashes) => { + assert!(hashes.is_empty()); } - let rv = ServeRepair::run_ancestor_hashes( - &recycler, - &socketaddr_any!(), - &blockstore, - slot + num_slots - 1, - nonce, - ) - .expect("run_ancestor_hashes packets"); - assert_eq!(rv.len(), 1); - let packet = &rv[0]; - let ancestor_hashes_response = deserialize_ancestor_hashes_response(packet); - match ancestor_hashes_response { - AncestorHashesResponse::Hashes(hashes) => { - assert_eq!(hashes, expected_ancestors); - } - _ => { - panic!("unexpected response: {:?}", &ancestor_hashes_response); - } + _ => { + panic!("unexpected response: {:?}", &ancestor_hashes_response); } } - Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); + // Set duplicate confirmed + let mut expected_ancestors = Vec::with_capacity(num_slots as usize); + expected_ancestors.resize(num_slots as usize, (0, Hash::default())); + for (i, duplicate_confirmed_slot) in (slot..slot + num_slots).enumerate() { + let frozen_hash = Hash::new_unique(); + expected_ancestors[num_slots as usize - i - 1] = + (duplicate_confirmed_slot, frozen_hash); + blockstore.insert_bank_hash(duplicate_confirmed_slot, frozen_hash, true); + } + let rv = ServeRepair::run_ancestor_hashes( + &recycler, + &socketaddr_any!(), + &blockstore, + slot + num_slots - 1, + nonce, + ) + .expect("run_ancestor_hashes packets"); + assert_eq!(rv.len(), 1); + let packet = &rv[0]; + let ancestor_hashes_response = deserialize_ancestor_hashes_response(packet); + match ancestor_hashes_response { + AncestorHashesResponse::Hashes(hashes) => { + assert_eq!(hashes, expected_ancestors); + } + _ => { + panic!("unexpected response: {:?}", &ancestor_hashes_response); + } + } } #[test] diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 40483babbefef3..1e083e930b8d10 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -4078,7 +4078,7 @@ pub(crate) mod tests { blockstore::{entries_to_test_shreds, make_slot_entries, BlockstoreError}, create_new_tmp_ledger, genesis_utils::{create_genesis_config, create_genesis_config_with_leader}, - get_tmp_ledger_path, + get_tmp_ledger_path, get_tmp_ledger_path_auto_delete, shred::{Shred, ShredFlags, LEGACY_SHRED_DATA_CAPACITY}, }, solana_rpc::{ @@ -6405,9 +6405,10 @@ pub(crate) mod tests { let mut vote_simulator = VoteSimulator::new(1); vote_simulator.fill_bank_forks(forks, &HashMap::>::new(), true); let (bank_forks, mut progress) = (vote_simulator.bank_forks, vote_simulator.progress); - let ledger_path = get_tmp_ledger_path!(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Arc::new( - Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"), + Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"), ); let mut tower = Tower::new_for_tests(8, 2.0 / 3.0); @@ -6552,9 +6553,10 @@ pub(crate) mod tests { vote_simulator.fill_bank_forks(forks, &validator_votes, true); let (bank_forks, mut progress) = (vote_simulator.bank_forks, vote_simulator.progress); - let ledger_path = get_tmp_ledger_path!(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Arc::new( - Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"), + Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"), ); let mut tower = Tower::new_for_tests(8, 0.67); diff --git a/ledger/benches/blockstore.rs b/ledger/benches/blockstore.rs index 18f92bb60e201b..27296d412d7ab3 100644 --- a/ledger/benches/blockstore.rs +++ b/ledger/benches/blockstore.rs @@ -8,7 +8,7 @@ use { solana_entry::entry::{create_ticks, Entry}, solana_ledger::{ blockstore::{entries_to_test_shreds, Blockstore}, - get_tmp_ledger_path, + get_tmp_ledger_path_auto_delete, }, solana_sdk::{clock::Slot, hash::Hash}, std::path::Path, @@ -23,8 +23,6 @@ fn bench_write_shreds(bench: &mut Bencher, entries: Vec, ledger_path: &Pa let shreds = entries_to_test_shreds(&entries, 0, 0, true, 0, /*merkle_variant:*/ true); blockstore.insert_shreds(shreds, None, false).unwrap(); }); - - Blockstore::destroy(ledger_path).expect("Expected successful database destruction"); } // Insert some shreds into the ledger in preparation for read benchmarks @@ -59,28 +57,28 @@ fn setup_read_bench( #[bench] #[ignore] fn bench_write_small(bench: &mut Bencher) { - let ledger_path = get_tmp_ledger_path!(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); let num_entries = 32 * 1024; let entries = create_ticks(num_entries, 0, Hash::default()); - bench_write_shreds(bench, entries, &ledger_path); + bench_write_shreds(bench, entries, ledger_path.path()); } // Write big shreds to the ledger #[bench] #[ignore] fn bench_write_big(bench: &mut Bencher) { - let ledger_path = get_tmp_ledger_path!(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); let num_entries = 32 * 1024; let entries = create_ticks(num_entries, 0, Hash::default()); - bench_write_shreds(bench, entries, &ledger_path); + bench_write_shreds(bench, entries, ledger_path.path()); } #[bench] #[ignore] fn bench_read_sequential(bench: &mut Bencher) { - let ledger_path = get_tmp_ledger_path!(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = - Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"); + Blockstore::open(ledger_path.path()).expect("Expected to be able to open database ledger"); // Insert some big and small shreds into the ledger let num_small_shreds = 32 * 1024; @@ -98,16 +96,14 @@ fn bench_read_sequential(bench: &mut Bencher) { let _ = blockstore.get_data_shred(slot, i % total_shreds); } }); - - Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[bench] #[ignore] fn bench_read_random(bench: &mut Bencher) { - let ledger_path = get_tmp_ledger_path!(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = - Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"); + Blockstore::open(ledger_path.path()).expect("Expected to be able to open database ledger"); // Insert some big and small shreds into the ledger let num_small_shreds = 32 * 1024; @@ -129,36 +125,32 @@ fn bench_read_random(bench: &mut Bencher) { let _ = blockstore.get_data_shred(slot, *i as u64); } }); - - Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[bench] #[ignore] fn bench_insert_data_shred_small(bench: &mut Bencher) { - let ledger_path = get_tmp_ledger_path!(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = - Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"); + Blockstore::open(ledger_path.path()).expect("Expected to be able to open database ledger"); let num_entries = 32 * 1024; let entries = create_ticks(num_entries, 0, Hash::default()); bench.iter(move || { let shreds = entries_to_test_shreds(&entries, 0, 0, true, 0, /*merkle_variant:*/ true); blockstore.insert_shreds(shreds, None, false).unwrap(); }); - Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[bench] #[ignore] fn bench_insert_data_shred_big(bench: &mut Bencher) { - let ledger_path = get_tmp_ledger_path!(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = - Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"); + Blockstore::open(ledger_path.path()).expect("Expected to be able to open database ledger"); let num_entries = 32 * 1024; let entries = create_ticks(num_entries, 0, Hash::default()); bench.iter(move || { let shreds = entries_to_test_shreds(&entries, 0, 0, true, 0, /*merkle_variant:*/ true); blockstore.insert_shreds(shreds, None, false).unwrap(); }); - Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } diff --git a/ledger/benches/protobuf.rs b/ledger/benches/protobuf.rs index 8e462a84beb683..6ee7d934bfde72 100644 --- a/ledger/benches/protobuf.rs +++ b/ledger/benches/protobuf.rs @@ -7,7 +7,7 @@ use { solana_ledger::{ blockstore::Blockstore, blockstore_db::{columns as cf, LedgerColumn}, - get_tmp_ledger_path, + get_tmp_ledger_path_auto_delete, }, solana_runtime::bank::RewardType, solana_sdk::{clock::Slot, pubkey}, @@ -86,22 +86,22 @@ fn bench_read_rewards( #[bench] fn bench_serialize_write_bincode(bencher: &mut Bencher) { - let ledger_path = get_tmp_ledger_path!(); - bench_write_rewards(bencher, &ledger_path, write_bincode_rewards); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + bench_write_rewards(bencher, ledger_path.path(), write_bincode_rewards); } #[bench] fn bench_serialize_write_protobuf(bencher: &mut Bencher) { - let ledger_path = get_tmp_ledger_path!(); - bench_write_rewards(bencher, &ledger_path, write_protobuf_rewards); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + bench_write_rewards(bencher, ledger_path.path(), write_protobuf_rewards); } #[bench] fn bench_read_bincode(bencher: &mut Bencher) { - let ledger_path = get_tmp_ledger_path!(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); bench_read_rewards( bencher, - &ledger_path, + ledger_path.path(), write_bincode_rewards, read_bincode_rewards, ); @@ -109,10 +109,10 @@ fn bench_read_bincode(bencher: &mut Bencher) { #[bench] fn bench_read_protobuf(bencher: &mut Bencher) { - let ledger_path = get_tmp_ledger_path!(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); bench_read_rewards( bencher, - &ledger_path, + ledger_path.path(), write_protobuf_rewards, read_protobuf_rewards, ); diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index 817c7548bd445b..a598e001fc8684 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -1112,7 +1112,9 @@ mod tests { super::*, bincode::serialize, crossbeam_channel::bounded, - solana_ledger::{blockstore::Blockstore, blockstore_meta::SlotMeta, get_tmp_ledger_path}, + solana_ledger::{ + blockstore::Blockstore, blockstore_meta::SlotMeta, get_tmp_ledger_path_auto_delete, + }, solana_perf::test_tx::test_tx, solana_sdk::{clock::DEFAULT_TICKS_PER_SLOT, hash::hash}, }; @@ -1120,1059 +1122,995 @@ mod tests { #[test] fn test_poh_recorder_no_zero_tick() { let prev_hash = Hash::default(); - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( - 0, - prev_hash, - bank, - Some((4, 4)), - DEFAULT_TICKS_PER_SLOT, - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::default()), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); - poh_recorder.tick(); - assert_eq!(poh_recorder.tick_cache.len(), 1); - assert_eq!(poh_recorder.tick_cache[0].1, 1); - assert_eq!(poh_recorder.tick_height, 1); - } - Blockstore::destroy(&ledger_path).unwrap(); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + prev_hash, + bank, + Some((4, 4)), + DEFAULT_TICKS_PER_SLOT, + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::default()), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); + poh_recorder.tick(); + assert_eq!(poh_recorder.tick_cache.len(), 1); + assert_eq!(poh_recorder.tick_cache[0].1, 1); + assert_eq!(poh_recorder.tick_height, 1); } #[test] fn test_poh_recorder_tick_height_is_last_tick() { let prev_hash = Hash::default(); - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( - 0, - prev_hash, - bank, - Some((4, 4)), - DEFAULT_TICKS_PER_SLOT, - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::default()), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); - poh_recorder.tick(); - poh_recorder.tick(); - assert_eq!(poh_recorder.tick_cache.len(), 2); - assert_eq!(poh_recorder.tick_cache[1].1, 2); - assert_eq!(poh_recorder.tick_height, 2); - } - Blockstore::destroy(&ledger_path).unwrap(); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + prev_hash, + bank, + Some((4, 4)), + DEFAULT_TICKS_PER_SLOT, + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::default()), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); + poh_recorder.tick(); + poh_recorder.tick(); + assert_eq!(poh_recorder.tick_cache.len(), 2); + assert_eq!(poh_recorder.tick_cache[1].1, 2); + assert_eq!(poh_recorder.tick_height, 2); } #[test] fn test_poh_recorder_reset_clears_cache() { - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); - let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( - 0, - Hash::default(), - bank0.clone(), - Some((4, 4)), - DEFAULT_TICKS_PER_SLOT, - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::default()), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); - poh_recorder.tick(); - assert_eq!(poh_recorder.tick_cache.len(), 1); - poh_recorder.reset(bank0, Some((4, 4))); - assert_eq!(poh_recorder.tick_cache.len(), 0); - } - Blockstore::destroy(&ledger_path).unwrap(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + Hash::default(), + bank0.clone(), + Some((4, 4)), + DEFAULT_TICKS_PER_SLOT, + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::default()), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); + poh_recorder.tick(); + assert_eq!(poh_recorder.tick_cache.len(), 1); + poh_recorder.reset(bank0, Some((4, 4))); + assert_eq!(poh_recorder.tick_cache.len(), 0); } #[test] fn test_poh_recorder_clear() { - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( - 0, - prev_hash, - bank.clone(), - Some((4, 4)), - bank.ticks_per_slot(), - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let prev_hash = bank.last_blockhash(); + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + prev_hash, + bank.clone(), + Some((4, 4)), + bank.ticks_per_slot(), + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); - poh_recorder.set_bank_for_test(bank); - assert!(poh_recorder.working_bank.is_some()); - poh_recorder.clear_bank(); - assert!(poh_recorder.working_bank.is_none()); - } - Blockstore::destroy(&ledger_path).unwrap(); + poh_recorder.set_bank_for_test(bank); + assert!(poh_recorder.working_bank.is_some()); + poh_recorder.clear_bank(); + assert!(poh_recorder.working_bank.is_none()); } #[test] fn test_poh_recorder_tick_sent_after_min() { - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); - let prev_hash = bank0.last_blockhash(); - let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( - 0, - prev_hash, - bank0.clone(), - Some((4, 4)), - bank0.ticks_per_slot(), - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::new_from_bank(&bank0)), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); - - bank0.fill_bank_with_ticks_for_tests(); - let bank1 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 1)); - - // Set a working bank - poh_recorder.set_bank_for_test(bank1.clone()); - - // Tick until poh_recorder.tick_height == working bank's min_tick_height - let num_new_ticks = bank1.tick_height() - poh_recorder.tick_height(); - println!("{} {}", bank1.tick_height(), poh_recorder.tick_height()); - assert!(num_new_ticks > 0); - for _ in 0..num_new_ticks { - poh_recorder.tick(); - } + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let prev_hash = bank0.last_blockhash(); + let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( + 0, + prev_hash, + bank0.clone(), + Some((4, 4)), + bank0.ticks_per_slot(), + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank0)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); - // Check that poh_recorder.tick_height == working bank's min_tick_height - let min_tick_height = poh_recorder.working_bank.as_ref().unwrap().min_tick_height; - assert_eq!(min_tick_height, bank1.tick_height()); - assert_eq!(poh_recorder.tick_height(), min_tick_height); + bank0.fill_bank_with_ticks_for_tests(); + let bank1 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 1)); - //poh_recorder.tick height == working bank's min_tick_height, - // so no ticks should have been flushed yet - assert_eq!(poh_recorder.tick_cache.last().unwrap().1, num_new_ticks); - assert!(entry_receiver.try_recv().is_err()); + // Set a working bank + poh_recorder.set_bank_for_test(bank1.clone()); - // all ticks are sent after height > min - let tick_height_before = poh_recorder.tick_height(); + // Tick until poh_recorder.tick_height == working bank's min_tick_height + let num_new_ticks = bank1.tick_height() - poh_recorder.tick_height(); + println!("{} {}", bank1.tick_height(), poh_recorder.tick_height()); + assert!(num_new_ticks > 0); + for _ in 0..num_new_ticks { poh_recorder.tick(); - assert_eq!(poh_recorder.tick_height, tick_height_before + 1); - assert_eq!(poh_recorder.tick_cache.len(), 0); - let mut num_entries = 0; - while let Ok((wbank, (_entry, _tick_height))) = entry_receiver.try_recv() { - assert_eq!(wbank.slot(), bank1.slot()); - num_entries += 1; - } + } - // All the cached ticks, plus the new tick above should have been flushed - assert_eq!(num_entries, num_new_ticks + 1); + // Check that poh_recorder.tick_height == working bank's min_tick_height + let min_tick_height = poh_recorder.working_bank.as_ref().unwrap().min_tick_height; + assert_eq!(min_tick_height, bank1.tick_height()); + assert_eq!(poh_recorder.tick_height(), min_tick_height); + + //poh_recorder.tick height == working bank's min_tick_height, + // so no ticks should have been flushed yet + assert_eq!(poh_recorder.tick_cache.last().unwrap().1, num_new_ticks); + assert!(entry_receiver.try_recv().is_err()); + + // all ticks are sent after height > min + let tick_height_before = poh_recorder.tick_height(); + poh_recorder.tick(); + assert_eq!(poh_recorder.tick_height, tick_height_before + 1); + assert_eq!(poh_recorder.tick_cache.len(), 0); + let mut num_entries = 0; + while let Ok((wbank, (_entry, _tick_height))) = entry_receiver.try_recv() { + assert_eq!(wbank.slot(), bank1.slot()); + num_entries += 1; } - Blockstore::destroy(&ledger_path).unwrap(); + + // All the cached ticks, plus the new tick above should have been flushed + assert_eq!(num_entries, num_new_ticks + 1); } #[test] fn test_poh_recorder_tick_sent_upto_and_including_max() { - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( - 0, - prev_hash, - bank.clone(), - Some((4, 4)), - bank.ticks_per_slot(), - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); - - // Tick further than the bank's max height - for _ in 0..bank.max_tick_height() + 1 { - poh_recorder.tick(); - } - assert_eq!( - poh_recorder.tick_cache.last().unwrap().1, - bank.max_tick_height() + 1 - ); - assert_eq!(poh_recorder.tick_height, bank.max_tick_height() + 1); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let prev_hash = bank.last_blockhash(); + let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( + 0, + prev_hash, + bank.clone(), + Some((4, 4)), + bank.ticks_per_slot(), + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); - poh_recorder.set_bank_for_test(bank.clone()); + // Tick further than the bank's max height + for _ in 0..bank.max_tick_height() + 1 { poh_recorder.tick(); + } + assert_eq!( + poh_recorder.tick_cache.last().unwrap().1, + bank.max_tick_height() + 1 + ); + assert_eq!(poh_recorder.tick_height, bank.max_tick_height() + 1); - assert_eq!(poh_recorder.tick_height, bank.max_tick_height() + 2); - assert!(poh_recorder.working_bank.is_none()); - let mut num_entries = 0; - while entry_receiver.try_recv().is_ok() { - num_entries += 1; - } + poh_recorder.set_bank_for_test(bank.clone()); + poh_recorder.tick(); - // Should only flush up to bank's max tick height, despite the tick cache - // having many more entries - assert_eq!(num_entries, bank.max_tick_height()); + assert_eq!(poh_recorder.tick_height, bank.max_tick_height() + 2); + assert!(poh_recorder.working_bank.is_none()); + let mut num_entries = 0; + while entry_receiver.try_recv().is_ok() { + num_entries += 1; } - Blockstore::destroy(&ledger_path).unwrap(); + + // Should only flush up to bank's max tick height, despite the tick cache + // having many more entries + assert_eq!(num_entries, bank.max_tick_height()); } #[test] fn test_poh_recorder_record_to_early() { - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); - let prev_hash = bank0.last_blockhash(); - let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( - 0, - prev_hash, - bank0.clone(), - Some((4, 4)), - bank0.ticks_per_slot(), - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::new_from_bank(&bank0)), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let prev_hash = bank0.last_blockhash(); + let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( + 0, + prev_hash, + bank0.clone(), + Some((4, 4)), + bank0.ticks_per_slot(), + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank0)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); - bank0.fill_bank_with_ticks_for_tests(); - let bank1 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 1)); - poh_recorder.set_bank_for_test(bank1.clone()); - // Let poh_recorder tick up to bank1.tick_height() - 1 - for _ in 0..bank1.tick_height() - 1 { - poh_recorder.tick() - } - let tx = test_tx(); - let h1 = hash(b"hello world!"); - - // We haven't yet reached the minimum tick height for the working bank, - // so record should fail - assert_matches!( - poh_recorder.record(bank1.slot(), h1, vec![tx.into()]), - Err(PohRecorderError::MinHeightNotReached) - ); - assert!(entry_receiver.try_recv().is_err()); + bank0.fill_bank_with_ticks_for_tests(); + let bank1 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 1)); + poh_recorder.set_bank_for_test(bank1.clone()); + // Let poh_recorder tick up to bank1.tick_height() - 1 + for _ in 0..bank1.tick_height() - 1 { + poh_recorder.tick() } - Blockstore::destroy(&ledger_path).unwrap(); + let tx = test_tx(); + let h1 = hash(b"hello world!"); + + // We haven't yet reached the minimum tick height for the working bank, + // so record should fail + assert_matches!( + poh_recorder.record(bank1.slot(), h1, vec![tx.into()]), + Err(PohRecorderError::MinHeightNotReached) + ); + assert!(entry_receiver.try_recv().is_err()); } #[test] fn test_poh_recorder_record_bad_slot() { - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( - 0, - prev_hash, - bank.clone(), - Some((4, 4)), - bank.ticks_per_slot(), - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let prev_hash = bank.last_blockhash(); + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + prev_hash, + bank.clone(), + Some((4, 4)), + bank.ticks_per_slot(), + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); - poh_recorder.set_bank_for_test(bank.clone()); - let tx = test_tx(); - let h1 = hash(b"hello world!"); + poh_recorder.set_bank_for_test(bank.clone()); + let tx = test_tx(); + let h1 = hash(b"hello world!"); - // Fulfills min height criteria for a successful record - assert_eq!( - poh_recorder.tick_height(), - poh_recorder.working_bank.as_ref().unwrap().min_tick_height - ); + // Fulfills min height criteria for a successful record + assert_eq!( + poh_recorder.tick_height(), + poh_recorder.working_bank.as_ref().unwrap().min_tick_height + ); - // However we hand over a bad slot so record fails - let bad_slot = bank.slot() + 1; - assert_matches!( - poh_recorder.record(bad_slot, h1, vec![tx.into()]), - Err(PohRecorderError::MaxHeightReached) - ); - } - Blockstore::destroy(&ledger_path).unwrap(); + // However we hand over a bad slot so record fails + let bad_slot = bank.slot() + 1; + assert_matches!( + poh_recorder.record(bad_slot, h1, vec![tx.into()]), + Err(PohRecorderError::MaxHeightReached) + ); } #[test] fn test_poh_recorder_record_at_min_passes() { - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); - let prev_hash = bank0.last_blockhash(); - let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( - 0, - prev_hash, - bank0.clone(), - Some((4, 4)), - bank0.ticks_per_slot(), - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::new_from_bank(&bank0)), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let prev_hash = bank0.last_blockhash(); + let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( + 0, + prev_hash, + bank0.clone(), + Some((4, 4)), + bank0.ticks_per_slot(), + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank0)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); - bank0.fill_bank_with_ticks_for_tests(); - let bank1 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 1)); - poh_recorder.set_bank_for_test(bank1.clone()); + bank0.fill_bank_with_ticks_for_tests(); + let bank1 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 1)); + poh_recorder.set_bank_for_test(bank1.clone()); - // Record up to exactly min tick height - let min_tick_height = poh_recorder.working_bank.as_ref().unwrap().min_tick_height; - while poh_recorder.tick_height() < min_tick_height { - poh_recorder.tick(); - } + // Record up to exactly min tick height + let min_tick_height = poh_recorder.working_bank.as_ref().unwrap().min_tick_height; + while poh_recorder.tick_height() < min_tick_height { + poh_recorder.tick(); + } - assert_eq!(poh_recorder.tick_cache.len() as u64, min_tick_height); - - // Check record succeeds on boundary condition where - // poh_recorder.tick height == poh_recorder.working_bank.min_tick_height - assert_eq!(poh_recorder.tick_height, min_tick_height); - let tx = test_tx(); - let h1 = hash(b"hello world!"); - assert!(poh_recorder - .record(bank1.slot(), h1, vec![tx.into()]) - .is_ok()); - assert_eq!(poh_recorder.tick_cache.len(), 0); - - //tick in the cache + entry - for _ in 0..min_tick_height { - let (_bank, (e, _tick_height)) = entry_receiver.recv().unwrap(); - assert!(e.is_tick()); - } + assert_eq!(poh_recorder.tick_cache.len() as u64, min_tick_height); + // Check record succeeds on boundary condition where + // poh_recorder.tick height == poh_recorder.working_bank.min_tick_height + assert_eq!(poh_recorder.tick_height, min_tick_height); + let tx = test_tx(); + let h1 = hash(b"hello world!"); + assert!(poh_recorder + .record(bank1.slot(), h1, vec![tx.into()]) + .is_ok()); + assert_eq!(poh_recorder.tick_cache.len(), 0); + + //tick in the cache + entry + for _ in 0..min_tick_height { let (_bank, (e, _tick_height)) = entry_receiver.recv().unwrap(); - assert!(!e.is_tick()); + assert!(e.is_tick()); } - Blockstore::destroy(&ledger_path).unwrap(); + + let (_bank, (e, _tick_height)) = entry_receiver.recv().unwrap(); + assert!(!e.is_tick()); } #[test] fn test_poh_recorder_record_at_max_fails() { - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( - 0, - prev_hash, - bank.clone(), - Some((4, 4)), - bank.ticks_per_slot(), - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let prev_hash = bank.last_blockhash(); + let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( + 0, + prev_hash, + bank.clone(), + Some((4, 4)), + bank.ticks_per_slot(), + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); - poh_recorder.set_bank_for_test(bank.clone()); - let num_ticks_to_max = bank.max_tick_height() - poh_recorder.tick_height; - for _ in 0..num_ticks_to_max { - poh_recorder.tick(); - } - let tx = test_tx(); - let h1 = hash(b"hello world!"); - assert!(poh_recorder - .record(bank.slot(), h1, vec![tx.into()]) - .is_err()); - for _ in 0..num_ticks_to_max { - let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap(); - assert!(entry.is_tick()); - } + poh_recorder.set_bank_for_test(bank.clone()); + let num_ticks_to_max = bank.max_tick_height() - poh_recorder.tick_height; + for _ in 0..num_ticks_to_max { + poh_recorder.tick(); + } + let tx = test_tx(); + let h1 = hash(b"hello world!"); + assert!(poh_recorder + .record(bank.slot(), h1, vec![tx.into()]) + .is_err()); + for _ in 0..num_ticks_to_max { + let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap(); + assert!(entry.is_tick()); } - Blockstore::destroy(&ledger_path).unwrap(); } #[test] fn test_poh_recorder_record_transaction_index() { - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( - 0, - prev_hash, - bank.clone(), - Some((4, 4)), - bank.ticks_per_slot(), - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let prev_hash = bank.last_blockhash(); + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + prev_hash, + bank.clone(), + Some((4, 4)), + bank.ticks_per_slot(), + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); - poh_recorder.set_bank_with_transaction_index_for_test(bank.clone()); - poh_recorder.tick(); - assert_eq!( - poh_recorder - .working_bank - .as_ref() - .unwrap() - .transaction_index - .unwrap(), - 0 - ); + poh_recorder.set_bank_with_transaction_index_for_test(bank.clone()); + poh_recorder.tick(); + assert_eq!( + poh_recorder + .working_bank + .as_ref() + .unwrap() + .transaction_index + .unwrap(), + 0 + ); - let tx0 = test_tx(); - let tx1 = test_tx(); - let h1 = hash(b"hello world!"); - let record_result = poh_recorder - .record(bank.slot(), h1, vec![tx0.into(), tx1.into()]) + let tx0 = test_tx(); + let tx1 = test_tx(); + let h1 = hash(b"hello world!"); + let record_result = poh_recorder + .record(bank.slot(), h1, vec![tx0.into(), tx1.into()]) + .unwrap() + .unwrap(); + assert_eq!(record_result, 0); + assert_eq!( + poh_recorder + .working_bank + .as_ref() .unwrap() - .unwrap(); - assert_eq!(record_result, 0); - assert_eq!( - poh_recorder - .working_bank - .as_ref() - .unwrap() - .transaction_index - .unwrap(), - 2 - ); + .transaction_index + .unwrap(), + 2 + ); - let tx = test_tx(); - let h2 = hash(b"foobar"); - let record_result = poh_recorder - .record(bank.slot(), h2, vec![tx.into()]) + let tx = test_tx(); + let h2 = hash(b"foobar"); + let record_result = poh_recorder + .record(bank.slot(), h2, vec![tx.into()]) + .unwrap() + .unwrap(); + assert_eq!(record_result, 2); + assert_eq!( + poh_recorder + .working_bank + .as_ref() .unwrap() - .unwrap(); - assert_eq!(record_result, 2); - assert_eq!( - poh_recorder - .working_bank - .as_ref() - .unwrap() - .transaction_index - .unwrap(), - 3 - ); - } - Blockstore::destroy(&ledger_path).unwrap(); + .transaction_index + .unwrap(), + 3 + ); } #[test] fn test_poh_cache_on_disconnect() { - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); - let prev_hash = bank0.last_blockhash(); - let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( - 0, - prev_hash, - bank0.clone(), - Some((4, 4)), - bank0.ticks_per_slot(), - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::new_from_bank(&bank0)), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); - - bank0.fill_bank_with_ticks_for_tests(); - let bank1 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 1)); - poh_recorder.set_bank_for_test(bank1); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let prev_hash = bank0.last_blockhash(); + let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( + 0, + prev_hash, + bank0.clone(), + Some((4, 4)), + bank0.ticks_per_slot(), + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank0)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); - // Check we can make two ticks without hitting min_tick_height - let remaining_ticks_to_min = - poh_recorder.working_bank.as_ref().unwrap().min_tick_height - - poh_recorder.tick_height(); - for _ in 0..remaining_ticks_to_min { - poh_recorder.tick(); - } - assert_eq!(poh_recorder.tick_height, remaining_ticks_to_min); - assert_eq!( - poh_recorder.tick_cache.len(), - remaining_ticks_to_min as usize - ); - assert!(poh_recorder.working_bank.is_some()); + bank0.fill_bank_with_ticks_for_tests(); + let bank1 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 1)); + poh_recorder.set_bank_for_test(bank1); - // Drop entry receiver, and try to tick again. Because - // the reciever is closed, the ticks will not be drained from the cache, - // and the working bank will be cleared - drop(entry_receiver); + // Check we can make two ticks without hitting min_tick_height + let remaining_ticks_to_min = poh_recorder.working_bank.as_ref().unwrap().min_tick_height + - poh_recorder.tick_height(); + for _ in 0..remaining_ticks_to_min { poh_recorder.tick(); - - // Check everything is cleared - assert!(poh_recorder.working_bank.is_none()); - // Extra +1 for the tick that happened after the drop of the entry receiver. - assert_eq!( - poh_recorder.tick_cache.len(), - remaining_ticks_to_min as usize + 1 - ); } - Blockstore::destroy(&ledger_path).unwrap(); + assert_eq!(poh_recorder.tick_height, remaining_ticks_to_min); + assert_eq!( + poh_recorder.tick_cache.len(), + remaining_ticks_to_min as usize + ); + assert!(poh_recorder.working_bank.is_some()); + + // Drop entry receiver, and try to tick again. Because + // the reciever is closed, the ticks will not be drained from the cache, + // and the working bank will be cleared + drop(entry_receiver); + poh_recorder.tick(); + + // Check everything is cleared + assert!(poh_recorder.working_bank.is_none()); + // Extra +1 for the tick that happened after the drop of the entry receiver. + assert_eq!( + poh_recorder.tick_cache.len(), + remaining_ticks_to_min as usize + 1 + ); } #[test] fn test_reset_current() { - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( - 0, - Hash::default(), - bank.clone(), - Some((4, 4)), - DEFAULT_TICKS_PER_SLOT, - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::default()), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); - poh_recorder.tick(); - poh_recorder.tick(); - assert_eq!(poh_recorder.tick_cache.len(), 2); - poh_recorder.reset(bank, Some((4, 4))); - assert_eq!(poh_recorder.tick_cache.len(), 0); - } - Blockstore::destroy(&ledger_path).unwrap(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + Hash::default(), + bank.clone(), + Some((4, 4)), + DEFAULT_TICKS_PER_SLOT, + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::default()), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); + poh_recorder.tick(); + poh_recorder.tick(); + assert_eq!(poh_recorder.tick_cache.len(), 2); + poh_recorder.reset(bank, Some((4, 4))); + assert_eq!(poh_recorder.tick_cache.len(), 0); } #[test] fn test_reset_with_cached() { - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( - 0, - Hash::default(), - bank.clone(), - Some((4, 4)), - DEFAULT_TICKS_PER_SLOT, - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::default()), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); - poh_recorder.tick(); - poh_recorder.tick(); - assert_eq!(poh_recorder.tick_cache.len(), 2); - poh_recorder.reset(bank, Some((4, 4))); - assert_eq!(poh_recorder.tick_cache.len(), 0); - } - Blockstore::destroy(&ledger_path).unwrap(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + Hash::default(), + bank.clone(), + Some((4, 4)), + DEFAULT_TICKS_PER_SLOT, + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::default()), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); + poh_recorder.tick(); + poh_recorder.tick(); + assert_eq!(poh_recorder.tick_cache.len(), 2); + poh_recorder.reset(bank, Some((4, 4))); + assert_eq!(poh_recorder.tick_cache.len(), 0); } #[test] fn test_reset_to_new_value() { solana_logger::setup(); - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( - 0, - Hash::default(), - bank.clone(), - Some((4, 4)), - DEFAULT_TICKS_PER_SLOT, - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::default()), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); - poh_recorder.tick(); - poh_recorder.tick(); - poh_recorder.tick(); - poh_recorder.tick(); - assert_eq!(poh_recorder.tick_cache.len(), 4); - assert_eq!(poh_recorder.tick_height, 4); - poh_recorder.reset(bank, Some((4, 4))); // parent slot 0 implies tick_height of 3 - assert_eq!(poh_recorder.tick_cache.len(), 0); - poh_recorder.tick(); - assert_eq!(poh_recorder.tick_height, DEFAULT_TICKS_PER_SLOT + 1); - } - Blockstore::destroy(&ledger_path).unwrap(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + Hash::default(), + bank.clone(), + Some((4, 4)), + DEFAULT_TICKS_PER_SLOT, + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::default()), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); + poh_recorder.tick(); + poh_recorder.tick(); + poh_recorder.tick(); + poh_recorder.tick(); + assert_eq!(poh_recorder.tick_cache.len(), 4); + assert_eq!(poh_recorder.tick_height, 4); + poh_recorder.reset(bank, Some((4, 4))); // parent slot 0 implies tick_height of 3 + assert_eq!(poh_recorder.tick_cache.len(), 0); + poh_recorder.tick(); + assert_eq!(poh_recorder.tick_height, DEFAULT_TICKS_PER_SLOT + 1); } #[test] fn test_reset_clear_bank() { - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + Hash::default(), + bank.clone(), + Some((4, 4)), + bank.ticks_per_slot(), + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); + + poh_recorder.set_bank_for_test(bank.clone()); + assert_eq!(bank.slot(), 0); + poh_recorder.reset(bank, Some((4, 4))); + assert!(poh_recorder.working_bank.is_none()); + } + + #[test] + pub fn test_clear_signal() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (sender, receiver) = bounded(1); + let (mut poh_recorder, _entry_receiver, _record_receiver) = + PohRecorder::new_with_clear_signal( 0, Hash::default(), bank.clone(), - Some((4, 4)), + None, bank.ticks_per_slot(), &Pubkey::default(), Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + Some(sender), + &Arc::new(LeaderScheduleCache::default()), &PohConfig::default(), + None, Arc::new(AtomicBool::default()), ); - - poh_recorder.set_bank_for_test(bank.clone()); - assert_eq!(bank.slot(), 0); - poh_recorder.reset(bank, Some((4, 4))); - assert!(poh_recorder.working_bank.is_none()); - } - Blockstore::destroy(&ledger_path).unwrap(); - } - - #[test] - pub fn test_clear_signal() { - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let (sender, receiver) = bounded(1); - let (mut poh_recorder, _entry_receiver, _record_receiver) = - PohRecorder::new_with_clear_signal( - 0, - Hash::default(), - bank.clone(), - None, - bank.ticks_per_slot(), - &Pubkey::default(), - Arc::new(blockstore), - Some(sender), - &Arc::new(LeaderScheduleCache::default()), - &PohConfig::default(), - None, - Arc::new(AtomicBool::default()), - ); - poh_recorder.set_bank_for_test(bank); - poh_recorder.clear_bank(); - assert!(receiver.try_recv().is_ok()); - } - Blockstore::destroy(&ledger_path).unwrap(); + poh_recorder.set_bank_for_test(bank); + poh_recorder.clear_bank(); + assert!(receiver.try_recv().is_ok()); } #[test] fn test_poh_recorder_record_sets_start_slot() { solana_logger::setup(); - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let ticks_per_slot = 5; - let GenesisConfigInfo { - mut genesis_config, .. - } = create_genesis_config(2); - genesis_config.ticks_per_slot = ticks_per_slot; - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - - let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( - 0, - prev_hash, - bank.clone(), - Some((4, 4)), - bank.ticks_per_slot(), - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let ticks_per_slot = 5; + let GenesisConfigInfo { + mut genesis_config, .. + } = create_genesis_config(2); + genesis_config.ticks_per_slot = ticks_per_slot; + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - poh_recorder.set_bank_for_test(bank.clone()); + let prev_hash = bank.last_blockhash(); + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + prev_hash, + bank.clone(), + Some((4, 4)), + bank.ticks_per_slot(), + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); - // Simulate ticking much further than working_bank.max_tick_height - let max_tick_height = poh_recorder.working_bank.as_ref().unwrap().max_tick_height; - for _ in 0..3 * max_tick_height { - poh_recorder.tick(); - } + poh_recorder.set_bank_for_test(bank.clone()); - let tx = test_tx(); - let h1 = hash(b"hello world!"); - assert!(poh_recorder - .record(bank.slot(), h1, vec![tx.into()]) - .is_err()); - assert!(poh_recorder.working_bank.is_none()); - - // Even thought we ticked much further than working_bank.max_tick_height, - // the `start_slot` is still the slot of the last workign bank set by - // the earlier call to `poh_recorder.set_bank()` - assert_eq!(poh_recorder.start_slot(), bank.slot()); + // Simulate ticking much further than working_bank.max_tick_height + let max_tick_height = poh_recorder.working_bank.as_ref().unwrap().max_tick_height; + for _ in 0..3 * max_tick_height { + poh_recorder.tick(); } - Blockstore::destroy(&ledger_path).unwrap(); + + let tx = test_tx(); + let h1 = hash(b"hello world!"); + assert!(poh_recorder + .record(bank.slot(), h1, vec![tx.into()]) + .is_err()); + assert!(poh_recorder.working_bank.is_none()); + + // Even thought we ticked much further than working_bank.max_tick_height, + // the `start_slot` is still the slot of the last workign bank set by + // the earlier call to `poh_recorder.set_bank()` + assert_eq!(poh_recorder.start_slot(), bank.slot()); } #[test] fn test_reached_leader_tick() { solana_logger::setup(); - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let prev_hash = bank.last_blockhash(); - let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); - let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( - 0, - prev_hash, - bank.clone(), - None, - bank.ticks_per_slot(), - &Pubkey::default(), - Arc::new(blockstore), - &leader_schedule_cache, - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let prev_hash = bank.last_blockhash(); + let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + prev_hash, + bank.clone(), + None, + bank.ticks_per_slot(), + &Pubkey::default(), + Arc::new(blockstore), + &leader_schedule_cache, + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); - let bootstrap_validator_id = leader_schedule_cache.slot_leader_at(0, None).unwrap(); + let bootstrap_validator_id = leader_schedule_cache.slot_leader_at(0, None).unwrap(); - assert!(poh_recorder.reached_leader_tick(0)); + assert!(poh_recorder.reached_leader_tick(0)); - let grace_ticks = bank.ticks_per_slot() * MAX_GRACE_SLOTS; - let new_tick_height = NUM_CONSECUTIVE_LEADER_SLOTS * bank.ticks_per_slot(); - for _ in 0..new_tick_height { - poh_recorder.tick(); - } + let grace_ticks = bank.ticks_per_slot() * MAX_GRACE_SLOTS; + let new_tick_height = NUM_CONSECUTIVE_LEADER_SLOTS * bank.ticks_per_slot(); + for _ in 0..new_tick_height { + poh_recorder.tick(); + } - poh_recorder.grace_ticks = grace_ticks; + poh_recorder.grace_ticks = grace_ticks; - // False, because the Poh was reset on slot 0, which - // is a block produced by the previous leader, so a grace - // period must be given - assert!(!poh_recorder.reached_leader_tick(new_tick_height + grace_ticks)); + // False, because the Poh was reset on slot 0, which + // is a block produced by the previous leader, so a grace + // period must be given + assert!(!poh_recorder.reached_leader_tick(new_tick_height + grace_ticks)); - // Tick `NUM_CONSECUTIVE_LEADER_SLOTS` more times - let new_tick_height = 2 * NUM_CONSECUTIVE_LEADER_SLOTS * bank.ticks_per_slot(); - for _ in 0..new_tick_height { - poh_recorder.tick(); - } - // True, because - // 1) the Poh was reset on slot 0 - // 2) Our slot starts at 2 * NUM_CONSECUTIVE_LEADER_SLOTS, which means - // none of the previous leader's `NUM_CONSECUTIVE_LEADER_SLOTS` were slots - // this Poh built on (previous leader was on different fork). Thus, skip the - // grace period. - assert!(poh_recorder.reached_leader_tick(new_tick_height + grace_ticks)); - - // From the bootstrap validator's perspective, it should have reached - // the tick because the previous slot was also it's own slot (all slots - // belong to the bootstrap leader b/c it's the only staked node!), and - // validators don't give grace periods if previous slot was also their own. - poh_recorder.id = bootstrap_validator_id; - assert!(poh_recorder.reached_leader_tick(new_tick_height + grace_ticks)); + // Tick `NUM_CONSECUTIVE_LEADER_SLOTS` more times + let new_tick_height = 2 * NUM_CONSECUTIVE_LEADER_SLOTS * bank.ticks_per_slot(); + for _ in 0..new_tick_height { + poh_recorder.tick(); } + // True, because + // 1) the Poh was reset on slot 0 + // 2) Our slot starts at 2 * NUM_CONSECUTIVE_LEADER_SLOTS, which means + // none of the previous leader's `NUM_CONSECUTIVE_LEADER_SLOTS` were slots + // this Poh built on (previous leader was on different fork). Thus, skip the + // grace period. + assert!(poh_recorder.reached_leader_tick(new_tick_height + grace_ticks)); + + // From the bootstrap validator's perspective, it should have reached + // the tick because the previous slot was also it's own slot (all slots + // belong to the bootstrap leader b/c it's the only staked node!), and + // validators don't give grace periods if previous slot was also their own. + poh_recorder.id = bootstrap_validator_id; + assert!(poh_recorder.reached_leader_tick(new_tick_height + grace_ticks)); } #[test] fn test_reached_leader_slot() { solana_logger::setup(); - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); - let prev_hash = bank0.last_blockhash(); - let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( - 0, - prev_hash, - bank0.clone(), - None, - bank0.ticks_per_slot(), - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::new_from_bank(&bank0)), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); - - // Test that with no next leader slot, we don't reach the leader slot - assert_eq!( - poh_recorder.reached_leader_slot(), - PohLeaderStatus::NotReached - ); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let prev_hash = bank0.last_blockhash(); + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + prev_hash, + bank0.clone(), + None, + bank0.ticks_per_slot(), + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank0)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); - // Test that with no next leader slot in reset(), we don't reach the leader slot - assert_eq!(bank0.slot(), 0); - poh_recorder.reset(bank0.clone(), None); - assert_eq!( - poh_recorder.reached_leader_slot(), - PohLeaderStatus::NotReached - ); + // Test that with no next leader slot, we don't reach the leader slot + assert_eq!( + poh_recorder.reached_leader_slot(), + PohLeaderStatus::NotReached + ); - // Provide a leader slot one slot down - poh_recorder.reset(bank0.clone(), Some((2, 2))); + // Test that with no next leader slot in reset(), we don't reach the leader slot + assert_eq!(bank0.slot(), 0); + poh_recorder.reset(bank0.clone(), None); + assert_eq!( + poh_recorder.reached_leader_slot(), + PohLeaderStatus::NotReached + ); - let init_ticks = poh_recorder.tick_height(); + // Provide a leader slot one slot down + poh_recorder.reset(bank0.clone(), Some((2, 2))); - // Send one slot worth of ticks - for _ in 0..bank0.ticks_per_slot() { - poh_recorder.tick(); - } + let init_ticks = poh_recorder.tick_height(); - // Tick should be recorded - assert_eq!( - poh_recorder.tick_height(), - init_ticks + bank0.ticks_per_slot() - ); + // Send one slot worth of ticks + for _ in 0..bank0.ticks_per_slot() { + poh_recorder.tick(); + } - let parent_meta = SlotMeta { - received: 1, - ..SlotMeta::default() - }; - poh_recorder - .blockstore - .put_meta_bytes(0, &serialize(&parent_meta).unwrap()) - .unwrap(); - - // Test that we don't reach the leader slot because of grace ticks - assert_eq!( - poh_recorder.reached_leader_slot(), - PohLeaderStatus::NotReached - ); + // Tick should be recorded + assert_eq!( + poh_recorder.tick_height(), + init_ticks + bank0.ticks_per_slot() + ); - // reset poh now. we should immediately be leader - let bank1 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 1)); - assert_eq!(bank1.slot(), 1); - poh_recorder.reset(bank1.clone(), Some((2, 2))); - assert_eq!( - poh_recorder.reached_leader_slot(), - PohLeaderStatus::Reached { - poh_slot: 2, - parent_slot: 1, - } - ); + let parent_meta = SlotMeta { + received: 1, + ..SlotMeta::default() + }; + poh_recorder + .blockstore + .put_meta_bytes(0, &serialize(&parent_meta).unwrap()) + .unwrap(); - // Now test that with grace ticks we can reach leader slot - // Set the leader slot one slot down - poh_recorder.reset(bank1.clone(), Some((3, 3))); + // Test that we don't reach the leader slot because of grace ticks + assert_eq!( + poh_recorder.reached_leader_slot(), + PohLeaderStatus::NotReached + ); - // Send one slot worth of ticks ("skips" slot 2) - for _ in 0..bank1.ticks_per_slot() { - poh_recorder.tick(); + // reset poh now. we should immediately be leader + let bank1 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 1)); + assert_eq!(bank1.slot(), 1); + poh_recorder.reset(bank1.clone(), Some((2, 2))); + assert_eq!( + poh_recorder.reached_leader_slot(), + PohLeaderStatus::Reached { + poh_slot: 2, + parent_slot: 1, } + ); - // We are not the leader yet, as expected - assert_eq!( - poh_recorder.reached_leader_slot(), - PohLeaderStatus::NotReached - ); + // Now test that with grace ticks we can reach leader slot + // Set the leader slot one slot down + poh_recorder.reset(bank1.clone(), Some((3, 3))); - // Send the grace ticks - for _ in 0..bank1.ticks_per_slot() / GRACE_TICKS_FACTOR { - poh_recorder.tick(); - } + // Send one slot worth of ticks ("skips" slot 2) + for _ in 0..bank1.ticks_per_slot() { + poh_recorder.tick(); + } - // We should be the leader now - // without sending more ticks, we should be leader now - assert_eq!( - poh_recorder.reached_leader_slot(), - PohLeaderStatus::Reached { - poh_slot: 3, - parent_slot: 1, - } - ); + // We are not the leader yet, as expected + assert_eq!( + poh_recorder.reached_leader_slot(), + PohLeaderStatus::NotReached + ); - // Let's test that correct grace ticks are reported - // Set the leader slot one slot down - let bank2 = Arc::new(Bank::new_from_parent(bank1.clone(), &Pubkey::default(), 2)); - poh_recorder.reset(bank2.clone(), Some((4, 4))); + // Send the grace ticks + for _ in 0..bank1.ticks_per_slot() / GRACE_TICKS_FACTOR { + poh_recorder.tick(); + } - // send ticks for a slot - for _ in 0..bank1.ticks_per_slot() { - poh_recorder.tick(); + // We should be the leader now + // without sending more ticks, we should be leader now + assert_eq!( + poh_recorder.reached_leader_slot(), + PohLeaderStatus::Reached { + poh_slot: 3, + parent_slot: 1, } + ); - // We are not the leader yet, as expected - assert_eq!( - poh_recorder.reached_leader_slot(), - PohLeaderStatus::NotReached - ); - let bank3 = Arc::new(Bank::new_from_parent(bank2, &Pubkey::default(), 3)); - assert_eq!(bank3.slot(), 3); - poh_recorder.reset(bank3.clone(), Some((4, 4))); - - // without sending more ticks, we should be leader now - assert_eq!( - poh_recorder.reached_leader_slot(), - PohLeaderStatus::Reached { - poh_slot: 4, - parent_slot: 3, - } - ); + // Let's test that correct grace ticks are reported + // Set the leader slot one slot down + let bank2 = Arc::new(Bank::new_from_parent(bank1.clone(), &Pubkey::default(), 2)); + poh_recorder.reset(bank2.clone(), Some((4, 4))); - // Let's test that if a node overshoots the ticks for its target - // leader slot, reached_leader_slot() will return true, because it's overdue - // Set the leader slot one slot down - let bank4 = Arc::new(Bank::new_from_parent(bank3, &Pubkey::default(), 4)); - poh_recorder.reset(bank4.clone(), Some((5, 5))); + // send ticks for a slot + for _ in 0..bank1.ticks_per_slot() { + poh_recorder.tick(); + } + + // We are not the leader yet, as expected + assert_eq!( + poh_recorder.reached_leader_slot(), + PohLeaderStatus::NotReached + ); + let bank3 = Arc::new(Bank::new_from_parent(bank2, &Pubkey::default(), 3)); + assert_eq!(bank3.slot(), 3); + poh_recorder.reset(bank3.clone(), Some((4, 4))); - // Overshoot ticks for the slot - let overshoot_factor = 4; - for _ in 0..overshoot_factor * bank4.ticks_per_slot() { - poh_recorder.tick(); + // without sending more ticks, we should be leader now + assert_eq!( + poh_recorder.reached_leader_slot(), + PohLeaderStatus::Reached { + poh_slot: 4, + parent_slot: 3, } + ); - // We are overdue to lead - assert_eq!( - poh_recorder.reached_leader_slot(), - PohLeaderStatus::Reached { - poh_slot: 9, - parent_slot: 4, - } - ); + // Let's test that if a node overshoots the ticks for its target + // leader slot, reached_leader_slot() will return true, because it's overdue + // Set the leader slot one slot down + let bank4 = Arc::new(Bank::new_from_parent(bank3, &Pubkey::default(), 4)); + poh_recorder.reset(bank4.clone(), Some((5, 5))); + + // Overshoot ticks for the slot + let overshoot_factor = 4; + for _ in 0..overshoot_factor * bank4.ticks_per_slot() { + poh_recorder.tick(); } - Blockstore::destroy(&ledger_path).unwrap(); + + // We are overdue to lead + assert_eq!( + poh_recorder.reached_leader_slot(), + PohLeaderStatus::Reached { + poh_slot: 9, + parent_slot: 4, + } + ); } #[test] fn test_would_be_leader_soon() { - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( - 0, - prev_hash, - bank.clone(), - None, - bank.ticks_per_slot(), - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let prev_hash = bank.last_blockhash(); + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + prev_hash, + bank.clone(), + None, + bank.ticks_per_slot(), + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); - // Test that with no leader slot, we don't reach the leader tick - assert!(!poh_recorder.would_be_leader(2 * bank.ticks_per_slot())); + // Test that with no leader slot, we don't reach the leader tick + assert!(!poh_recorder.would_be_leader(2 * bank.ticks_per_slot())); - assert_eq!(bank.slot(), 0); - poh_recorder.reset(bank.clone(), None); + assert_eq!(bank.slot(), 0); + poh_recorder.reset(bank.clone(), None); - assert!(!poh_recorder.would_be_leader(2 * bank.ticks_per_slot())); + assert!(!poh_recorder.would_be_leader(2 * bank.ticks_per_slot())); - // We reset with leader slot after 3 slots - let bank_slot = bank.slot() + 3; - poh_recorder.reset(bank.clone(), Some((bank_slot, bank_slot))); + // We reset with leader slot after 3 slots + let bank_slot = bank.slot() + 3; + poh_recorder.reset(bank.clone(), Some((bank_slot, bank_slot))); - // Test that the node won't be leader in next 2 slots - assert!(!poh_recorder.would_be_leader(2 * bank.ticks_per_slot())); + // Test that the node won't be leader in next 2 slots + assert!(!poh_recorder.would_be_leader(2 * bank.ticks_per_slot())); - // Test that the node will be leader in next 3 slots - assert!(poh_recorder.would_be_leader(3 * bank.ticks_per_slot())); + // Test that the node will be leader in next 3 slots + assert!(poh_recorder.would_be_leader(3 * bank.ticks_per_slot())); - assert!(!poh_recorder.would_be_leader(2 * bank.ticks_per_slot())); + assert!(!poh_recorder.would_be_leader(2 * bank.ticks_per_slot())); - // Move the bank up a slot (so that max_tick_height > slot 0's tick_height) - let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), 1)); - // If we set the working bank, the node should be leader within next 2 slots - poh_recorder.set_bank_for_test(bank.clone()); - assert!(poh_recorder.would_be_leader(2 * bank.ticks_per_slot())); - } + // Move the bank up a slot (so that max_tick_height > slot 0's tick_height) + let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), 1)); + // If we set the working bank, the node should be leader within next 2 slots + poh_recorder.set_bank_for_test(bank.clone()); + assert!(poh_recorder.would_be_leader(2 * bank.ticks_per_slot())); } #[test] fn test_flush_virtual_ticks() { - let ledger_path = get_tmp_ledger_path!(); - { - // test that virtual ticks are flushed into a newly set bank asap - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let genesis_hash = bank.last_blockhash(); - - let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( - 0, - bank.last_blockhash(), - bank.clone(), - Some((2, 2)), - bank.ticks_per_slot(), - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); - //create a new bank - let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), 2)); - // add virtual ticks into poh for slots 0, 1, and 2 - for _ in 0..(bank.ticks_per_slot() * 3) { - poh_recorder.tick(); - } - poh_recorder.set_bank_for_test(bank.clone()); - assert!(!bank.is_hash_valid_for_age(&genesis_hash, 0)); - assert!(bank.is_hash_valid_for_age(&genesis_hash, 1)); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + // test that virtual ticks are flushed into a newly set bank asap + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let genesis_hash = bank.last_blockhash(); + + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + bank.last_blockhash(), + bank.clone(), + Some((2, 2)), + bank.ticks_per_slot(), + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); + //create a new bank + let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), 2)); + // add virtual ticks into poh for slots 0, 1, and 2 + for _ in 0..(bank.ticks_per_slot() * 3) { + poh_recorder.tick(); } + poh_recorder.set_bank_for_test(bank.clone()); + assert!(!bank.is_hash_valid_for_age(&genesis_hash, 0)); + assert!(bank.is_hash_valid_for_age(&genesis_hash, 1)); } #[test] diff --git a/poh/src/poh_service.rs b/poh/src/poh_service.rs index 65806b54532744..a01c688a527aec 100644 --- a/poh/src/poh_service.rs +++ b/poh/src/poh_service.rs @@ -385,7 +385,7 @@ mod tests { solana_ledger::{ blockstore::Blockstore, genesis_utils::{create_genesis_config, GenesisConfigInfo}, - get_tmp_ledger_path, + get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache, }, solana_measure::measure::Measure, @@ -404,171 +404,168 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); let prev_hash = bank.last_blockhash(); - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path) - .expect("Expected to be able to open database ledger"); - - let default_target_tick_duration = - timing::duration_as_us(&PohConfig::default().target_tick_duration); - let target_tick_duration = Duration::from_micros(default_target_tick_duration); - let poh_config = PohConfig { - hashes_per_tick: Some(clock::DEFAULT_HASHES_PER_TICK), - target_tick_duration, - target_tick_count: None, - }; - let exit = Arc::new(AtomicBool::new(false)); - - let ticks_per_slot = bank.ticks_per_slot(); - let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); - let blockstore = Arc::new(blockstore); - let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( - bank.tick_height(), - prev_hash, - bank.clone(), - Some((4, 4)), - ticks_per_slot, - &Pubkey::default(), - blockstore, - &leader_schedule_cache, - &poh_config, - exit.clone(), - ); - let poh_recorder = Arc::new(RwLock::new(poh_recorder)); - let ticks_per_slot = bank.ticks_per_slot(); - let bank_slot = bank.slot(); - - // specify RUN_TIME to run in a benchmark-like mode - // to calibrate batch size - let run_time = std::env::var("RUN_TIME") - .map(|x| x.parse().unwrap()) - .unwrap_or(0); - let is_test_run = run_time == 0; - - let entry_producer = { - let poh_recorder = poh_recorder.clone(); - let exit = exit.clone(); - - Builder::new() - .name("solPohEntryProd".to_string()) - .spawn(move || { - let now = Instant::now(); - let mut total_us = 0; - let mut total_times = 0; - let h1 = hash(b"hello world!"); - let tx = VersionedTransaction::from(test_tx()); - loop { - // send some data - let mut time = Measure::start("record"); - let _ = poh_recorder.write().unwrap().record( - bank_slot, - h1, - vec![tx.clone()], - ); - time.stop(); - total_us += time.as_us(); - total_times += 1; - if is_test_run && thread_rng().gen_ratio(1, 4) { - sleep(Duration::from_millis(200)); - } - - if exit.load(Ordering::Relaxed) { - info!( - "spent:{}ms record: {}ms entries recorded: {}", - now.elapsed().as_millis(), - total_us / 1000, - total_times, - ); - break; - } + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + + let default_target_tick_duration = + timing::duration_as_us(&PohConfig::default().target_tick_duration); + let target_tick_duration = Duration::from_micros(default_target_tick_duration); + let poh_config = PohConfig { + hashes_per_tick: Some(clock::DEFAULT_HASHES_PER_TICK), + target_tick_duration, + target_tick_count: None, + }; + let exit = Arc::new(AtomicBool::new(false)); + + let ticks_per_slot = bank.ticks_per_slot(); + let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); + let blockstore = Arc::new(blockstore); + let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( + bank.tick_height(), + prev_hash, + bank.clone(), + Some((4, 4)), + ticks_per_slot, + &Pubkey::default(), + blockstore, + &leader_schedule_cache, + &poh_config, + exit.clone(), + ); + let poh_recorder = Arc::new(RwLock::new(poh_recorder)); + let ticks_per_slot = bank.ticks_per_slot(); + let bank_slot = bank.slot(); + + // specify RUN_TIME to run in a benchmark-like mode + // to calibrate batch size + let run_time = std::env::var("RUN_TIME") + .map(|x| x.parse().unwrap()) + .unwrap_or(0); + let is_test_run = run_time == 0; + + let entry_producer = { + let poh_recorder = poh_recorder.clone(); + let exit = exit.clone(); + + Builder::new() + .name("solPohEntryProd".to_string()) + .spawn(move || { + let now = Instant::now(); + let mut total_us = 0; + let mut total_times = 0; + let h1 = hash(b"hello world!"); + let tx = VersionedTransaction::from(test_tx()); + loop { + // send some data + let mut time = Measure::start("record"); + let _ = + poh_recorder + .write() + .unwrap() + .record(bank_slot, h1, vec![tx.clone()]); + time.stop(); + total_us += time.as_us(); + total_times += 1; + if is_test_run && thread_rng().gen_ratio(1, 4) { + sleep(Duration::from_millis(200)); } - }) - .unwrap() - }; - - let hashes_per_batch = std::env::var("HASHES_PER_BATCH") - .map(|x| x.parse().unwrap()) - .unwrap_or(DEFAULT_HASHES_PER_BATCH); - let poh_service = PohService::new( - poh_recorder.clone(), - &poh_config, - exit.clone(), - 0, - DEFAULT_PINNED_CPU_CORE, - hashes_per_batch, - record_receiver, - ); - poh_recorder.write().unwrap().set_bank_for_test(bank); - - // get some events - let mut hashes = 0; - let mut need_tick = true; - let mut need_entry = true; - let mut need_partial = true; - let mut num_ticks = 0; - - let time = Instant::now(); - while run_time != 0 || need_tick || need_entry || need_partial { - let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap(); - - if entry.is_tick() { - num_ticks += 1; - assert!( - entry.num_hashes <= poh_config.hashes_per_tick.unwrap(), - "{} <= {}", - entry.num_hashes, - poh_config.hashes_per_tick.unwrap() - ); - if entry.num_hashes == poh_config.hashes_per_tick.unwrap() { - need_tick = false; - } else { - need_partial = false; + if exit.load(Ordering::Relaxed) { + info!( + "spent:{}ms record: {}ms entries recorded: {}", + now.elapsed().as_millis(), + total_us / 1000, + total_times, + ); + break; + } } + }) + .unwrap() + }; - hashes += entry.num_hashes; - - assert_eq!(hashes, poh_config.hashes_per_tick.unwrap()); - - hashes = 0; + let hashes_per_batch = std::env::var("HASHES_PER_BATCH") + .map(|x| x.parse().unwrap()) + .unwrap_or(DEFAULT_HASHES_PER_BATCH); + let poh_service = PohService::new( + poh_recorder.clone(), + &poh_config, + exit.clone(), + 0, + DEFAULT_PINNED_CPU_CORE, + hashes_per_batch, + record_receiver, + ); + poh_recorder.write().unwrap().set_bank_for_test(bank); + + // get some events + let mut hashes = 0; + let mut need_tick = true; + let mut need_entry = true; + let mut need_partial = true; + let mut num_ticks = 0; + + let time = Instant::now(); + while run_time != 0 || need_tick || need_entry || need_partial { + let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap(); + + if entry.is_tick() { + num_ticks += 1; + assert!( + entry.num_hashes <= poh_config.hashes_per_tick.unwrap(), + "{} <= {}", + entry.num_hashes, + poh_config.hashes_per_tick.unwrap() + ); + + if entry.num_hashes == poh_config.hashes_per_tick.unwrap() { + need_tick = false; } else { - assert!(entry.num_hashes >= 1); - need_entry = false; - hashes += entry.num_hashes; + need_partial = false; } - if run_time != 0 { - if time.elapsed().as_millis() > run_time { - break; - } - } else { - assert!( - time.elapsed().as_secs() < 60, - "Test should not run for this long! {}s tick {} entry {} partial {}", - time.elapsed().as_secs(), - need_tick, - need_entry, - need_partial, - ); - } + hashes += entry.num_hashes; + + assert_eq!(hashes, poh_config.hashes_per_tick.unwrap()); + + hashes = 0; + } else { + assert!(entry.num_hashes >= 1); + need_entry = false; + hashes += entry.num_hashes; } - info!( - "target_tick_duration: {} ticks_per_slot: {}", - poh_config.target_tick_duration.as_nanos(), - ticks_per_slot - ); - let elapsed = time.elapsed(); - info!( - "{} ticks in {}ms {}us/tick", - num_ticks, - elapsed.as_millis(), - elapsed.as_micros() / num_ticks - ); - exit.store(true, Ordering::Relaxed); - poh_service.join().unwrap(); - entry_producer.join().unwrap(); + if run_time != 0 { + if time.elapsed().as_millis() > run_time { + break; + } + } else { + assert!( + time.elapsed().as_secs() < 60, + "Test should not run for this long! {}s tick {} entry {} partial {}", + time.elapsed().as_secs(), + need_tick, + need_entry, + need_partial, + ); + } } - Blockstore::destroy(&ledger_path).unwrap(); + info!( + "target_tick_duration: {} ticks_per_slot: {}", + poh_config.target_tick_duration.as_nanos(), + ticks_per_slot + ); + let elapsed = time.elapsed(); + info!( + "{} ticks in {}ms {}us/tick", + num_ticks, + elapsed.as_millis(), + elapsed.as_micros() / num_ticks + ); + + exit.store(true, Ordering::Relaxed); + poh_service.join().unwrap(); + entry_producer.join().unwrap(); } } diff --git a/rpc/src/cluster_tpu_info.rs b/rpc/src/cluster_tpu_info.rs index 5a692944fa6faf..ad35773216bdcb 100644 --- a/rpc/src/cluster_tpu_info.rs +++ b/rpc/src/cluster_tpu_info.rs @@ -108,7 +108,8 @@ mod test { super::*, solana_gossip::contact_info::ContactInfo, solana_ledger::{ - blockstore::Blockstore, get_tmp_ledger_path, leader_schedule_cache::LeaderScheduleCache, + blockstore::Blockstore, get_tmp_ledger_path_auto_delete, + leader_schedule_cache::LeaderScheduleCache, }, solana_runtime::{ bank::Bank, @@ -128,123 +129,120 @@ mod test { #[test] fn test_get_leader_tpus() { - let ledger_path = get_tmp_ledger_path!(); - { - let blockstore = Blockstore::open(&ledger_path).unwrap(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - let validator_vote_keypairs0 = ValidatorVoteKeypairs::new_rand(); - let validator_vote_keypairs1 = ValidatorVoteKeypairs::new_rand(); - let validator_vote_keypairs2 = ValidatorVoteKeypairs::new_rand(); - let validator_keypairs = vec![ - &validator_vote_keypairs0, - &validator_vote_keypairs1, - &validator_vote_keypairs2, - ]; - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( - 1_000_000_000, - &validator_keypairs, - vec![10_000; 3], - ); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let validator_vote_keypairs0 = ValidatorVoteKeypairs::new_rand(); + let validator_vote_keypairs1 = ValidatorVoteKeypairs::new_rand(); + let validator_vote_keypairs2 = ValidatorVoteKeypairs::new_rand(); + let validator_keypairs = vec![ + &validator_vote_keypairs0, + &validator_vote_keypairs1, + &validator_vote_keypairs2, + ]; + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( + 1_000_000_000, + &validator_keypairs, + vec![10_000; 3], + ); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let (poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( - 0, - bank.last_blockhash(), - bank.clone(), - Some((2, 2)), - bank.ticks_per_slot(), - &Pubkey::default(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); + let (poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + bank.last_blockhash(), + bank.clone(), + Some((2, 2)), + bank.ticks_per_slot(), + &Pubkey::default(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); - let node_keypair = Arc::new(Keypair::new()); - let cluster_info = Arc::new(ClusterInfo::new( - ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()), - node_keypair, - SocketAddrSpace::Unspecified, - )); + let node_keypair = Arc::new(Keypair::new()); + let cluster_info = Arc::new(ClusterInfo::new( + ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()), + node_keypair, + SocketAddrSpace::Unspecified, + )); - let validator0_socket = ( - SocketAddr::from((Ipv4Addr::LOCALHOST, 1111)), - SocketAddr::from((Ipv4Addr::LOCALHOST, 1111 + QUIC_PORT_OFFSET)), - ); - let validator1_socket = ( - SocketAddr::from((Ipv4Addr::LOCALHOST, 2222)), - SocketAddr::from((Ipv4Addr::LOCALHOST, 2222 + QUIC_PORT_OFFSET)), - ); - let validator2_socket = ( - SocketAddr::from((Ipv4Addr::LOCALHOST, 3333)), - SocketAddr::from((Ipv4Addr::LOCALHOST, 3333 + QUIC_PORT_OFFSET)), - ); - let recent_peers: HashMap<_, _> = vec![ - ( - validator_vote_keypairs0.node_keypair.pubkey(), - validator0_socket, - ), - ( - validator_vote_keypairs1.node_keypair.pubkey(), - validator1_socket, - ), - ( - validator_vote_keypairs2.node_keypair.pubkey(), - validator2_socket, - ), - ] - .iter() - .cloned() - .collect(); - let leader_info = ClusterTpuInfo { - cluster_info, - poh_recorder: Arc::new(RwLock::new(poh_recorder)), - recent_peers: recent_peers.clone(), - }; + let validator0_socket = ( + SocketAddr::from((Ipv4Addr::LOCALHOST, 1111)), + SocketAddr::from((Ipv4Addr::LOCALHOST, 1111 + QUIC_PORT_OFFSET)), + ); + let validator1_socket = ( + SocketAddr::from((Ipv4Addr::LOCALHOST, 2222)), + SocketAddr::from((Ipv4Addr::LOCALHOST, 2222 + QUIC_PORT_OFFSET)), + ); + let validator2_socket = ( + SocketAddr::from((Ipv4Addr::LOCALHOST, 3333)), + SocketAddr::from((Ipv4Addr::LOCALHOST, 3333 + QUIC_PORT_OFFSET)), + ); + let recent_peers: HashMap<_, _> = vec![ + ( + validator_vote_keypairs0.node_keypair.pubkey(), + validator0_socket, + ), + ( + validator_vote_keypairs1.node_keypair.pubkey(), + validator1_socket, + ), + ( + validator_vote_keypairs2.node_keypair.pubkey(), + validator2_socket, + ), + ] + .iter() + .cloned() + .collect(); + let leader_info = ClusterTpuInfo { + cluster_info, + poh_recorder: Arc::new(RwLock::new(poh_recorder)), + recent_peers: recent_peers.clone(), + }; - let slot = bank.slot(); - let first_leader = - solana_ledger::leader_schedule_utils::slot_leader_at(slot, &bank).unwrap(); - assert_eq!( - leader_info.get_leader_tpus(1, Protocol::UDP), - vec![&recent_peers.get(&first_leader).unwrap().0] - ); + let slot = bank.slot(); + let first_leader = + solana_ledger::leader_schedule_utils::slot_leader_at(slot, &bank).unwrap(); + assert_eq!( + leader_info.get_leader_tpus(1, Protocol::UDP), + vec![&recent_peers.get(&first_leader).unwrap().0] + ); - let second_leader = solana_ledger::leader_schedule_utils::slot_leader_at( - slot + NUM_CONSECUTIVE_LEADER_SLOTS, - &bank, - ) - .unwrap(); - let mut expected_leader_sockets = vec![ - &recent_peers.get(&first_leader).unwrap().0, - &recent_peers.get(&second_leader).unwrap().0, - ]; - expected_leader_sockets.dedup(); - assert_eq!( - leader_info.get_leader_tpus(2, Protocol::UDP), - expected_leader_sockets - ); + let second_leader = solana_ledger::leader_schedule_utils::slot_leader_at( + slot + NUM_CONSECUTIVE_LEADER_SLOTS, + &bank, + ) + .unwrap(); + let mut expected_leader_sockets = vec![ + &recent_peers.get(&first_leader).unwrap().0, + &recent_peers.get(&second_leader).unwrap().0, + ]; + expected_leader_sockets.dedup(); + assert_eq!( + leader_info.get_leader_tpus(2, Protocol::UDP), + expected_leader_sockets + ); - let third_leader = solana_ledger::leader_schedule_utils::slot_leader_at( - slot + (2 * NUM_CONSECUTIVE_LEADER_SLOTS), - &bank, - ) - .unwrap(); - let mut expected_leader_sockets = vec![ - &recent_peers.get(&first_leader).unwrap().0, - &recent_peers.get(&second_leader).unwrap().0, - &recent_peers.get(&third_leader).unwrap().0, - ]; - expected_leader_sockets.dedup(); - assert_eq!( - leader_info.get_leader_tpus(3, Protocol::UDP), - expected_leader_sockets - ); + let third_leader = solana_ledger::leader_schedule_utils::slot_leader_at( + slot + (2 * NUM_CONSECUTIVE_LEADER_SLOTS), + &bank, + ) + .unwrap(); + let mut expected_leader_sockets = vec![ + &recent_peers.get(&first_leader).unwrap().0, + &recent_peers.get(&second_leader).unwrap().0, + &recent_peers.get(&third_leader).unwrap().0, + ]; + expected_leader_sockets.dedup(); + assert_eq!( + leader_info.get_leader_tpus(3, Protocol::UDP), + expected_leader_sockets + ); - for x in 4..8 { - assert!(leader_info.get_leader_tpus(x, Protocol::UDP).len() <= recent_peers.len()); - } + for x in 4..8 { + assert!(leader_info.get_leader_tpus(x, Protocol::UDP).len() <= recent_peers.len()); } - Blockstore::destroy(&ledger_path).unwrap(); } } diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index 92822b342eb69d..9c8616b874308a 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -586,7 +586,7 @@ mod tests { crate::rpc::{create_validator_exit, tests::new_test_cluster_info}, solana_ledger::{ genesis_utils::{create_genesis_config, GenesisConfigInfo}, - get_tmp_ledger_path, + get_tmp_ledger_path_auto_delete, }, solana_rpc_client_api::config::RpcContextConfig, solana_runtime::bank::Bank, @@ -618,8 +618,8 @@ mod tests { solana_net_utils::find_available_port_in_range(ip_addr, (10000, 65535)).unwrap(), ); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); - let ledger_path = get_tmp_ledger_path!(); - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); @@ -719,8 +719,8 @@ mod tests { #[test] fn test_is_file_get_path() { - let ledger_path = get_tmp_ledger_path!(); - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let bank_forks = create_bank_forks(); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); @@ -728,13 +728,13 @@ mod tests { let bank_forks = create_bank_forks(); let rrm = RpcRequestMiddleware::new( - ledger_path.clone(), + ledger_path.path().to_path_buf(), None, bank_forks.clone(), health.clone(), ); let rrm_with_snapshot_config = RpcRequestMiddleware::new( - ledger_path.clone(), + ledger_path.path().to_path_buf(), Some(SnapshotConfig::default()), bank_forks, health, @@ -829,15 +829,14 @@ mod tests { fn test_process_file_get() { let runtime = Runtime::new().unwrap(); - let ledger_path = get_tmp_ledger_path!(); - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); - let genesis_path = ledger_path.join(DEFAULT_GENESIS_ARCHIVE); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); + let genesis_path = ledger_path.path().join(DEFAULT_GENESIS_ARCHIVE); let bank_forks = create_bank_forks(); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); - let rrm = RpcRequestMiddleware::new( - ledger_path.clone(), + ledger_path.path().to_path_buf(), None, bank_forks, RpcHealth::stub(optimistically_confirmed_bank, blockstore), @@ -872,7 +871,7 @@ mod tests { { std::fs::remove_file(&genesis_path).unwrap(); { - let mut file = std::fs::File::create(ledger_path.join("wrong")).unwrap(); + let mut file = std::fs::File::create(ledger_path.path().join("wrong")).unwrap(); file.write_all(b"wrong file").unwrap(); } symlink::symlink_file("wrong", &genesis_path).unwrap(); diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index 6fca7d45035837..4a6ef24171ffce 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -1259,6 +1259,7 @@ pub(crate) mod tests { rpc_pubsub_service, }, serial_test::serial, + solana_ledger::get_tmp_ledger_path_auto_delete, solana_rpc_client_api::config::{ RpcAccountInfoConfig, RpcBlockSubscribeConfig, RpcBlockSubscribeFilter, RpcProgramAccountsConfig, RpcSignatureSubscribeConfig, RpcTransactionLogsConfig, @@ -1473,8 +1474,8 @@ pub(crate) mod tests { let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); - let ledger_path = get_tmp_ledger_path!(); - let blockstore = Blockstore::open(&ledger_path).unwrap(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let blockstore = Arc::new(blockstore); let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let max_complete_rewards_slot = Arc::new(AtomicU64::default()); @@ -1593,8 +1594,8 @@ pub(crate) mod tests { let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); - let ledger_path = get_tmp_ledger_path!(); - let blockstore = Blockstore::open(&ledger_path).unwrap(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let blockstore = Arc::new(blockstore); let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let max_complete_rewards_slot = Arc::new(AtomicU64::default()); @@ -1711,8 +1712,8 @@ pub(crate) mod tests { let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); - let ledger_path = get_tmp_ledger_path!(); - let blockstore = Blockstore::open(&ledger_path).unwrap(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let blockstore = Arc::new(blockstore); let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let max_complete_rewards_slot = Arc::new(AtomicU64::default()); diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index ccc4364891dd75..193efb69fa481f 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -230,7 +230,7 @@ pub(crate) mod tests { nonce_info::{NonceFull, NoncePartial}, rent_debits::RentDebits, }, - solana_ledger::{genesis_utils::create_genesis_config, get_tmp_ledger_path}, + solana_ledger::{genesis_utils::create_genesis_config, get_tmp_ledger_path_auto_delete}, solana_runtime::bank::{Bank, TransactionBalancesSet}, solana_sdk::{ account_utils::StateMut, @@ -339,9 +339,9 @@ pub(crate) mod tests { let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); let (transaction_status_sender, transaction_status_receiver) = unbounded(); - let ledger_path = get_tmp_ledger_path!(); - let blockstore = - Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); let blockstore = Arc::new(blockstore); let transaction = build_test_transaction_legacy(); diff --git a/turbine/src/broadcast_stage.rs b/turbine/src/broadcast_stage.rs index 07be0d0bfd6daa..e9568da69ec27b 100644 --- a/turbine/src/broadcast_stage.rs +++ b/turbine/src/broadcast_stage.rs @@ -505,7 +505,7 @@ pub mod test { solana_ledger::{ blockstore::Blockstore, genesis_utils::{create_genesis_config, GenesisConfigInfo}, - get_tmp_ledger_path, + get_tmp_ledger_path_auto_delete, shred::{max_ticks_per_n_shreds, ProcessShredsStats, ReedSolomonCache, Shredder}, }, solana_runtime::bank::Bank, @@ -590,8 +590,8 @@ pub mod test { #[test] fn test_duplicate_retransmit_signal() { // Setup - let ledger_path = get_tmp_ledger_path!(); - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let (transmit_sender, transmit_receiver) = unbounded(); let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded(); @@ -694,66 +694,62 @@ pub mod test { #[test] fn test_broadcast_ledger() { solana_logger::setup(); - let ledger_path = get_tmp_ledger_path!(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + // Create the leader scheduler + let leader_keypair = Arc::new(Keypair::new()); + + let (entry_sender, entry_receiver) = unbounded(); + let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded(); + let broadcast_service = setup_dummy_broadcast_service( + leader_keypair, + ledger_path.path(), + entry_receiver, + retransmit_slots_receiver, + ); + let start_tick_height; + let max_tick_height; + let ticks_per_slot; + let slot; { - // Create the leader scheduler - let leader_keypair = Arc::new(Keypair::new()); - - let (entry_sender, entry_receiver) = unbounded(); - let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded(); - let broadcast_service = setup_dummy_broadcast_service( - leader_keypair, - &ledger_path, - entry_receiver, - retransmit_slots_receiver, - ); - let start_tick_height; - let max_tick_height; - let ticks_per_slot; - let slot; - { - let bank = broadcast_service.bank; - start_tick_height = bank.tick_height(); - max_tick_height = bank.max_tick_height(); - ticks_per_slot = bank.ticks_per_slot(); - slot = bank.slot(); - let ticks = create_ticks(max_tick_height - start_tick_height, 0, Hash::default()); - for (i, tick) in ticks.into_iter().enumerate() { - entry_sender - .send((bank.clone(), (tick, i as u64 + 1))) - .expect("Expect successful send to broadcast service"); - } + let bank = broadcast_service.bank; + start_tick_height = bank.tick_height(); + max_tick_height = bank.max_tick_height(); + ticks_per_slot = bank.ticks_per_slot(); + slot = bank.slot(); + let ticks = create_ticks(max_tick_height - start_tick_height, 0, Hash::default()); + for (i, tick) in ticks.into_iter().enumerate() { + entry_sender + .send((bank.clone(), (tick, i as u64 + 1))) + .expect("Expect successful send to broadcast service"); } + } - trace!( - "[broadcast_ledger] max_tick_height: {}, start_tick_height: {}, ticks_per_slot: {}", - max_tick_height, - start_tick_height, - ticks_per_slot, - ); + trace!( + "[broadcast_ledger] max_tick_height: {}, start_tick_height: {}, ticks_per_slot: {}", + max_tick_height, + start_tick_height, + ticks_per_slot, + ); - let mut entries = vec![]; - for _ in 0..10 { - entries = broadcast_service - .blockstore - .get_slot_entries(slot, 0) - .expect("Expect entries to be present"); - if entries.len() >= max_tick_height as usize { - break; - } - sleep(Duration::from_millis(1000)); + let mut entries = vec![]; + for _ in 0..10 { + entries = broadcast_service + .blockstore + .get_slot_entries(slot, 0) + .expect("Expect entries to be present"); + if entries.len() >= max_tick_height as usize { + break; } - assert_eq!(entries.len(), max_tick_height as usize); - - drop(entry_sender); - drop(retransmit_slots_sender); - broadcast_service - .broadcast_service - .join() - .expect("Expect successful join of broadcast service"); + sleep(Duration::from_millis(1000)); } - - Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); + assert_eq!(entries.len(), max_tick_height as usize); + + drop(entry_sender); + drop(retransmit_slots_sender); + broadcast_service + .broadcast_service + .join() + .expect("Expect successful join of broadcast service"); } } diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index 031e72012340e7..6dcaccd9f28fa6 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -503,7 +503,7 @@ mod test { solana_gossip::cluster_info::{ClusterInfo, Node}, solana_ledger::{ blockstore::Blockstore, genesis_utils::create_genesis_config, get_tmp_ledger_path, - shred::max_ticks_per_n_shreds, + get_tmp_ledger_path_auto_delete, shred::max_ticks_per_n_shreds, }, solana_runtime::bank::Bank, solana_sdk::{ @@ -815,9 +815,10 @@ mod test { bs.current_slot_and_parent = Some((1, 0)); let entries = create_ticks(10_000, 1, solana_sdk::hash::Hash::default()); - let ledger_path = get_tmp_ledger_path!(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Arc::new( - Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"), + Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"), ); let mut stats = ProcessShredsStats::default(); From af9c754690a98472d3976c22ec098c9802d4284c Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Sat, 21 Oct 2023 13:33:10 -0500 Subject: [PATCH 405/407] Crates have identical build.rs to frozen-abi can just be symlink (#33787) crates have identical build.rs to frozen-abi can just be symlink --- accounts-db/build.rs | 28 +--------------------------- cost-model/build.rs | 28 +--------------------------- ledger/build.rs | 28 +--------------------------- vote/build.rs | 28 +--------------------------- 4 files changed, 4 insertions(+), 108 deletions(-) mode change 100644 => 120000 accounts-db/build.rs mode change 100644 => 120000 cost-model/build.rs mode change 100644 => 120000 ledger/build.rs mode change 100644 => 120000 vote/build.rs diff --git a/accounts-db/build.rs b/accounts-db/build.rs deleted file mode 100644 index c9550c1c5c4f22..00000000000000 --- a/accounts-db/build.rs +++ /dev/null @@ -1,27 +0,0 @@ -extern crate rustc_version; -use rustc_version::{version_meta, Channel}; - -fn main() { - // Copied and adapted from - // https://github.com/Kimundi/rustc-version-rs/blob/1d692a965f4e48a8cb72e82cda953107c0d22f47/README.md#example - // Licensed under Apache-2.0 + MIT - match version_meta().unwrap().channel { - Channel::Stable => { - println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); - } - Channel::Beta => { - println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); - } - Channel::Nightly => { - println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - } - Channel::Dev => { - println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - // See https://github.com/solana-labs/solana/issues/11055 - // We may be running the custom `rust-bpf-builder` toolchain, - // which currently needs `#![feature(proc_macro_hygiene)]` to - // be applied. - println!("cargo:rustc-cfg=RUSTC_NEEDS_PROC_MACRO_HYGIENE"); - } - } -} diff --git a/accounts-db/build.rs b/accounts-db/build.rs new file mode 120000 index 00000000000000..ae66c237c5f4fd --- /dev/null +++ b/accounts-db/build.rs @@ -0,0 +1 @@ +../frozen-abi/build.rs \ No newline at end of file diff --git a/cost-model/build.rs b/cost-model/build.rs deleted file mode 100644 index c9550c1c5c4f22..00000000000000 --- a/cost-model/build.rs +++ /dev/null @@ -1,27 +0,0 @@ -extern crate rustc_version; -use rustc_version::{version_meta, Channel}; - -fn main() { - // Copied and adapted from - // https://github.com/Kimundi/rustc-version-rs/blob/1d692a965f4e48a8cb72e82cda953107c0d22f47/README.md#example - // Licensed under Apache-2.0 + MIT - match version_meta().unwrap().channel { - Channel::Stable => { - println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); - } - Channel::Beta => { - println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); - } - Channel::Nightly => { - println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - } - Channel::Dev => { - println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - // See https://github.com/solana-labs/solana/issues/11055 - // We may be running the custom `rust-bpf-builder` toolchain, - // which currently needs `#![feature(proc_macro_hygiene)]` to - // be applied. - println!("cargo:rustc-cfg=RUSTC_NEEDS_PROC_MACRO_HYGIENE"); - } - } -} diff --git a/cost-model/build.rs b/cost-model/build.rs new file mode 120000 index 00000000000000..ae66c237c5f4fd --- /dev/null +++ b/cost-model/build.rs @@ -0,0 +1 @@ +../frozen-abi/build.rs \ No newline at end of file diff --git a/ledger/build.rs b/ledger/build.rs deleted file mode 100644 index c9550c1c5c4f22..00000000000000 --- a/ledger/build.rs +++ /dev/null @@ -1,27 +0,0 @@ -extern crate rustc_version; -use rustc_version::{version_meta, Channel}; - -fn main() { - // Copied and adapted from - // https://github.com/Kimundi/rustc-version-rs/blob/1d692a965f4e48a8cb72e82cda953107c0d22f47/README.md#example - // Licensed under Apache-2.0 + MIT - match version_meta().unwrap().channel { - Channel::Stable => { - println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); - } - Channel::Beta => { - println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); - } - Channel::Nightly => { - println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - } - Channel::Dev => { - println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - // See https://github.com/solana-labs/solana/issues/11055 - // We may be running the custom `rust-bpf-builder` toolchain, - // which currently needs `#![feature(proc_macro_hygiene)]` to - // be applied. - println!("cargo:rustc-cfg=RUSTC_NEEDS_PROC_MACRO_HYGIENE"); - } - } -} diff --git a/ledger/build.rs b/ledger/build.rs new file mode 120000 index 00000000000000..ae66c237c5f4fd --- /dev/null +++ b/ledger/build.rs @@ -0,0 +1 @@ +../frozen-abi/build.rs \ No newline at end of file diff --git a/vote/build.rs b/vote/build.rs deleted file mode 100644 index c9550c1c5c4f22..00000000000000 --- a/vote/build.rs +++ /dev/null @@ -1,27 +0,0 @@ -extern crate rustc_version; -use rustc_version::{version_meta, Channel}; - -fn main() { - // Copied and adapted from - // https://github.com/Kimundi/rustc-version-rs/blob/1d692a965f4e48a8cb72e82cda953107c0d22f47/README.md#example - // Licensed under Apache-2.0 + MIT - match version_meta().unwrap().channel { - Channel::Stable => { - println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); - } - Channel::Beta => { - println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); - } - Channel::Nightly => { - println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - } - Channel::Dev => { - println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - // See https://github.com/solana-labs/solana/issues/11055 - // We may be running the custom `rust-bpf-builder` toolchain, - // which currently needs `#![feature(proc_macro_hygiene)]` to - // be applied. - println!("cargo:rustc-cfg=RUSTC_NEEDS_PROC_MACRO_HYGIENE"); - } - } -} diff --git a/vote/build.rs b/vote/build.rs new file mode 120000 index 00000000000000..ae66c237c5f4fd --- /dev/null +++ b/vote/build.rs @@ -0,0 +1 @@ +../frozen-abi/build.rs \ No newline at end of file From 8260ffc1efae02dd9a1250d21786e6b005524de9 Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Sat, 21 Oct 2023 16:22:16 -0400 Subject: [PATCH 406/407] Bump platform-tools to v1.39 (#33804) --- programs/sbf/rust/invoke/src/processor.rs | 10 ++++++---- programs/sbf/rust/ro_modify/src/lib.rs | 3 +-- programs/sbf/tests/programs.rs | 8 ++++---- sdk/bpf/scripts/install.sh | 2 +- sdk/cargo-build-sbf/src/main.rs | 4 ++-- sdk/program/Cargo.toml | 2 +- sdk/sbf/scripts/install.sh | 2 +- 7 files changed, 16 insertions(+), 15 deletions(-) diff --git a/programs/sbf/rust/invoke/src/processor.rs b/programs/sbf/rust/invoke/src/processor.rs index 7c689cfcf860ae..36f4f2481f3808 100644 --- a/programs/sbf/rust/invoke/src/processor.rs +++ b/programs/sbf/rust/invoke/src/processor.rs @@ -1131,10 +1131,11 @@ fn process_instruction( #[rustversion::attr(since(1.72), allow(invalid_reference_casting))] fn overwrite_account_key(account: &AccountInfo, key: *const Pubkey) { unsafe { - *mem::transmute::<_, *mut *const Pubkey>(&account.key) = key; + let ptr = mem::transmute::<_, *mut *const Pubkey>(&account.key); + std::ptr::write_volatile(ptr, key); } } - overwrite_account_key(account, key as *const Pubkey); + overwrite_account_key(&accounts[ARGUMENT_INDEX], key as *const Pubkey); let callee_program_id = accounts[CALLEE_PROGRAM_INDEX].key; invoke( @@ -1181,7 +1182,8 @@ fn process_instruction( #[rustversion::attr(since(1.72), allow(invalid_reference_casting))] fn overwrite_account_owner(account: &AccountInfo, owner: *const Pubkey) { unsafe { - *mem::transmute::<_, *mut *const Pubkey>(&account.owner) = owner; + let ptr = mem::transmute::<_, *mut *const Pubkey>(&account.owner); + std::ptr::write_volatile(ptr, owner); } } overwrite_account_owner(account, owner as *const Pubkey); @@ -1309,7 +1311,7 @@ struct RcBox { #[rustversion::attr(since(1.72), allow(invalid_reference_casting))] unsafe fn overwrite_account_data(account: &AccountInfo, data: Rc>) { - std::ptr::write( + std::ptr::write_volatile( &account.data as *const _ as usize as *mut Rc>, data, ); diff --git a/programs/sbf/rust/ro_modify/src/lib.rs b/programs/sbf/rust/ro_modify/src/lib.rs index d067e810898c0f..daa529f5370f19 100644 --- a/programs/sbf/rust/ro_modify/src/lib.rs +++ b/programs/sbf/rust/ro_modify/src/lib.rs @@ -150,8 +150,7 @@ fn process_instruction( // Not sure how to get a const data length in an Rc> } 3 => { - let mut new_accounts = - &mut [READONLY_ACCOUNTS[0].clone(), READONLY_ACCOUNTS[1].clone()]; + let new_accounts = &mut [READONLY_ACCOUNTS[0].clone(), READONLY_ACCOUNTS[1].clone()]; new_accounts[1].owner_addr = &PUBKEY as *const _ as u64; let system_instruction = system_instruction::assign(accounts[1].key, program_id); let metas = &[SolAccountMeta { diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 8a7cfb693afdd9..2cc8a76875bb32 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -1403,17 +1403,17 @@ fn assert_instruction_count() { { programs.extend_from_slice(&[ ("solana_sbf_rust_128bit", 1218), - ("solana_sbf_rust_alloc", 5067), + ("solana_sbf_rust_alloc", 5077), ("solana_sbf_rust_custom_heap", 398), ("solana_sbf_rust_dep_crate", 2), - ("solana_sbf_rust_iter", 1013), + ("solana_sbf_rust_iter", 1514), ("solana_sbf_rust_many_args", 1289), ("solana_sbf_rust_mem", 2067), ("solana_sbf_rust_membuiltins", 1539), ("solana_sbf_rust_noop", 275), ("solana_sbf_rust_param_passing", 146), ("solana_sbf_rust_rand", 378), - ("solana_sbf_rust_sanity", 51931), + ("solana_sbf_rust_sanity", 51953), ("solana_sbf_rust_secp256k1_recover", 91185), ("solana_sbf_rust_sha", 24059), ]); @@ -1465,7 +1465,7 @@ fn assert_instruction_count() { diff, 100.0_f64 * consumption as f64 / *expected_consumption as f64 - 100.0_f64, ); - assert_eq!(consumption, *expected_consumption); + assert!(consumption <= *expected_consumption); }, ); } diff --git a/sdk/bpf/scripts/install.sh b/sdk/bpf/scripts/install.sh index 1ca638dc13e1e8..55d2cbc19f4dc6 100755 --- a/sdk/bpf/scripts/install.sh +++ b/sdk/bpf/scripts/install.sh @@ -109,7 +109,7 @@ if [[ ! -e criterion-$version.md || ! -e criterion ]]; then fi # Install Rust-BPF -version=v1.37 +version=v1.39 if [[ ! -e bpf-tools-$version.md || ! -e bpf-tools ]]; then ( set -e diff --git a/sdk/cargo-build-sbf/src/main.rs b/sdk/cargo-build-sbf/src/main.rs index 6ff755d8d1bb21..e123680fc33d04 100644 --- a/sdk/cargo-build-sbf/src/main.rs +++ b/sdk/cargo-build-sbf/src/main.rs @@ -82,7 +82,7 @@ where .iter() .map(|arg| arg.as_ref().to_str().unwrap_or("?")) .join(" "); - info!("spawn: {}", msg); + info!("spawn: {:?} {}", program, msg); let child = Command::new(program) .args(args) @@ -911,7 +911,7 @@ fn main() { // The following line is scanned by CI configuration script to // separate cargo caches according to the version of platform-tools. - let platform_tools_version = String::from("v1.37"); + let platform_tools_version = String::from("v1.39"); let rust_base_version = get_base_rust_version(platform_tools_version.as_str()); let version = format!( "{}\nplatform-tools {}\n{}", diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 3f73eaf1d76c27..f608ed61943826 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -9,7 +9,7 @@ repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } edition = { workspace = true } -rust-version = "1.68.0" # solana platform-tools rust version +rust-version = "1.72.0" # solana platform-tools rust version [dependencies] bincode = { workspace = true } diff --git a/sdk/sbf/scripts/install.sh b/sdk/sbf/scripts/install.sh index 5b591c682ad85d..08f5f79e1b3417 100755 --- a/sdk/sbf/scripts/install.sh +++ b/sdk/sbf/scripts/install.sh @@ -109,7 +109,7 @@ if [[ ! -e criterion-$version.md || ! -e criterion ]]; then fi # Install platform tools -version=v1.37 +version=v1.39 if [[ ! -e platform-tools-$version.md || ! -e platform-tools ]]; then ( set -e From abf3b3e527c8b24b122ab2cccb34d9aff05f8c15 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Sun, 22 Oct 2023 05:51:14 -0700 Subject: [PATCH 407/407] Custom debug impl for LoadedPrograms cache (#33808) --- program-runtime/src/loaded_programs.rs | 12 +++++++++++- runtime/src/bank_forks.rs | 1 - 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 8b2b165136bf42..eb4abd1821530f 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -443,7 +443,6 @@ impl Default for ProgramRuntimeEnvironments { } } -#[derive(Debug)] pub struct LoadedPrograms { /// A two level index: /// @@ -459,6 +458,17 @@ pub struct LoadedPrograms { fork_graph: Option>>, } +impl Debug for LoadedPrograms { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("LoadedPrograms") + .field("root slot", &self.latest_root_slot) + .field("root epoch", &self.latest_root_epoch) + .field("stats", &self.stats) + .field("cache", &self.entries) + .finish() + } +} + impl Default for LoadedPrograms { fn default() -> Self { Self { diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index 71315bc4b875c3..528c9d6aee16df 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -56,7 +56,6 @@ struct SetRootTimings { prune_remove_ms: i64, } -#[derive(Debug)] pub struct BankForks { banks: HashMap, descendants: HashMap>,

    Z|RihmjWIFSlL$J;QX_ z#$l2T`O^*NO8h+%U)zt$Ha|%RyCj`<58d4T>SxfXOgL7TeM|d~^5}Bem#t2x zej*9I_($ZkCY|wh<wMGh&%Kt)s0zbzl@7d>a=lItbh8$$}{ z*s3Kc=Zx_59^N~F2D-_Ukm({adeXef{9S>{xAUx402G{i}^^mkzzvd&{s{5gJ;0hlM zuISy9;3536{60RMO#ivu`ne(YC!s&Bc)A>VKBkkez4-fH){m~Q4ug&me_Y?~fbaT? z?`klf?MuI+$KEO7&P*hF%*)66%=BMv`9<~FBLEKPdV>57@pn>vX60giHikkh_qh7$ zV4sF>)UeB8+#mZj-sObpBFA;&MGou4i@bS!VmwL#wD-5~+YzVOPfV||U)9y$mY&p~ zqQ4X6M1P~a=x^=6vEJ(Qd}!wr+7T#E{odY>f8bm`YR6&rACBX>60xYPuVfnsBwVo% zc1pP2^vPoV0NpR3+_>FWYY+JNU2D(G-7Mha^X5IfHN0TlxAD*Wz2DFB?}#wI8%5jQ zeq5q(7W7NAKQiFsG3eu3zRdJnIlv3?bK3j`c(U`r%_zHqQ6HbjG~V@%&%68{*kLjX z&hNP;^~@&5n;pdajM5Julk5TUWZ35+oIf*v&yAJzkm)E#pZDeH_o&$uf0v8p3CHzh zkMW=O4davTBk-LrG7|0}@aOp0wo=1x7as+mn7^;n;*BruJ2XtYQ0Oy!sjosLAzH^< z;7eDFzrXvJZ`FI-&qw5gUzCi#j=5=@mdm!NL3WtP z!jXB#I(S8L?boWSH^!|U-5M3 zi@(d~`eO7_LWO+W`umK{FVev$r0()9dM={;_7~@w&)2J*UuF9)q|a}X>EuJbc(!Q0 zcuI`NN6+ec3FoiZ=ldT$HXlva$FAq)lj}QzN1goPBKZYB&%Xwl;8?%lXYmISM>%5u zPs&SjJ(LV|d2SC8fb;cgt_KOe%NNFni1K$T7VJlAw+j01(sHyPNuQ7RZI(T#BKK^1iFW7D~w@pm}0sWFlplub{#UMqJfgKEpA16}v1jnB4lqywD$`7i3R z`pQoj9E@Xh>0qbkyGP&uJWOW6`8ZBJ#|(!h7V9sm3sT)ow|>WQp8dG}QO!_m5BYxi zAm|T~h3!Fq?tS)^ND4`@jT7wOY(!PkM)|- z!TP_iN6`Y<=%tGH0{OJ~1$q?p zh#qx$C9!a>Pcum0%&^2lKeav(bmMnt>!(C79frIy+-`JID~1S@Ui!7~3%b4T?+(ut zk%{@Xsy}LWMvv>QZjJZ()aZp8o_nwPHZMpAqY@GK8`MunJowY!cdXZ6NOHaS@Aso` zqaT?Z`FY`3?`~(Sz@D)5xF2rQ0@Dbl*j{A|>v!~pTY#ihGDE7!2t#83O zIg9UGTS=##%@F<77Qfo^5g+y+>Ko}thSy=lbKK6^_YB-V@bMtq(npZ-xai++^7&di z_|hwq{-k|h-Rx{1FWNh#_=0y|GQfk#`o8G`yI`-DTVNO1ymK+Tz~OoSi|v>Jh3E5A zr_b$ie;>}zx6*zQ{7k?4{DS-=zoLWseuU@q^TCmQ|J~r%4ll@&xA3rExEXVaB`nf0guZHQD_bDHJePTRhM7yD#Isyxkj!&tqMp^!vFi=fl>CiP4D- z;kJ?c3VVhIZZ8$~>=`Xu8g zeC|MHlNJSR7<@x%WN>PC4ITy@fF?$#Z!8S$i7`ZdTSrFs4(_%rKT?VuV18t1WN2y# zbnhz-Sn{@!$*GCG#i^mu5i-S-Z{53RYG`t3aIN8Dxi{~bC=~{xO1^=ik%7|v;ppC} zaCBF=6RaDU+z?LfE`@taBjM0w=(G$?P7aL>-d-BtJ2U}C4GoSIruI&hV(N>aq*R>R zJ~28tQ5fD9^Bc_FwQHz21Qy;%;MMrBm3EAd-Z6|;j6q4m8p6y_osb}SVdCbA!M($5 zEftdd$h{+@(<74;#nqG2w5x|l2VOq`N~TH^BZWO-N%NsZ2~>r9N5G!q?!wMJrLZ_U zP%4Bs3{8&hDMaDW@YtRbAs5K$aH0g_M^K_LqA*ajbpIF_P#V}1t=>I4Tv|P`cLYMV zdU|x?-qkzDcIBo%!1kXKcNYq|5u%f zf9gpE`1h~c0o4Hj_uVX}HSxxapsRxJeM`RfCit^|@a%v2Nvk9{M%4#* zZ0^!CrPFAYo-Mg~h^X=ET&5%l!o zy;IBqf3eU*sRJe;h4U~rk)mPyF1l?plgvg@g?xC@;SQugM zoe;;}VZyASS^>si0om6%bGeX*j0cG96o!UJ*F*FsOKBzI&a9;^E4V0*0r%I;{qPS;t5&$Y3nH~Z*&`a9aC{ue1!_?E0 zL|%Aw+Li1?uMPJ?O2S=KMxocSm&)zi!QR{Z^ng!7o$Q^g)5;JG&~`&KrZOgBlCpO2n)9D6QZc46Jkpf~{=#E7%*tevgF~E*0-B4M1~GER5|V8GWdx-`F3w zJ<+O_eMtG5t}D8_y1?^EuzPG2$hmO=VTe^3N)An`$`~t6Ky-#hxCcs;v}3|iM9PqE zl?si!3uM7aj%^syINTGxKD?f)17nfV21x)gl?Lh#Z&`y=JNjAl);%{BVC#fqdv{`- z4DT&P@%U&td<kdd>G^6@(h$c8cO* z%?I#=jmjZ9DI*KY)JPkdi1vg=i^fET@56xVrsNw-(`ZZ|L?uT}amPTKoNSA;JFgPP zV@1-<{Ff$&RZCr<@@wc0KwF^EsDXB2NT=G^H8HAkB=X`JoV7NjLoAVRUuneRcTi^u zWYqzta(7LXO8YSK*~Fy=NKB$*ISDIsXx9*Aw<6EHmJk@I|NVgZf|n_Q$ti`^ounVra!c! z7R^YLh4K|vXLP#Q)u_hM>1EQyu?70fz)@6n+*Jj^aaTlL%dqW-?vGio+C6Sd(X9rk zs{2-kTv6%*kRnz?Lm~=I6i=|!sIk0nP-7_`lXjLM3sB!9Q@N7NWr6NQX>#}Io`HNk zchPYpIdTd)tIdAQt?gR7R?OeHXRS`^h(6a%EKn?gnh6-kTStl$ky!7h_i{S8RoxcH1=OG1D7V2}`=4I6IO(Y4ox;2kpP>=-3?PFj|QFbA2gE_@^UwKd4(w*lgm zFu>H3WTQtVYuX|f)yXU0Dn|bq7PhL-yxub@xtZhWb~PYvcuaz++rl1s(v04mC(UT4 z@k#S-#WX20Q8;O4kjClHsb&{=gIGN|)dKl3Ob^BWIw>lZr5y_4$$RhEP~RXYl2tu+ z%h-_2E~|Q*3r)Q=mBy3SHc;ACz`|5tLC3m28G)*5zCm*sV?^^;A)%Z&S*{8bPKpwk zgdu%$s&8khz(tq7VZ>CSw9Rr{uNlbJIw;+a20B^J(?E-pnflt)m*8$i4(}e5RR~!_ zojl_ywi-r1uCanZD06GNaD)q;7)~VVitzGqXEX&iQU#NHTm-B)T1?qol!ks4mu1xQ z`4Vk6F|js;PU4Lvus5E)ZV2xvO(l{NKdW+`V1!t#h-;F?2ZqxxNts!!l*ez;$q}ms zttVxqE@Uo}ZK%R2G}$7Vl0$TFVFI?*;D(Txq7^W|3}HwP7e;99`kKxa;UwmGGRo2z zN?P`qF3RM1vc$D%tTSPT8--It!=;+}BMpcOK8^yR>ka&A6--|$Ji7lM zVyu40XuT909_uA}tJO{0U6?iusw%osCQS8Gw2^dhsVYL8t0ENHswilb`t5sr=|0GO zRbHd0D$Yr%imdR`cVrexu18LyFnGI2H=oSHs zO$h)!?FCj_Q=TYc*943ITwwG*6W%ve;Hu{7!(6bqe8!*_EHWET@j~!-bFsO$uJ6jC z^Ql*2%MV!5v<(CMPCP!S&w%xP@M`eq<^3 z)Rr{QWG$1*O$PCiGhM`7e3{bplq?Y!PP{%@mG$Hclc&@Q#l)IGytIME7B1CRcGxN_ zHohN@mELNDrINXNRR&f~-8INFU3g5W`%_qPWPjsSQkJEBSy4APPrCZ(?B>#wEIXds zdZQPM*>JLj#(FD^@entq7bUWh}#S=3(xs(_qSha@e=q$c? zxG+cDJhp1MxoUOrMlSl@bwX;{g@>~D5>cs;;@vSv$ue|E&Ypz|le)IKPZrFO`O*h~2BX79% zjoWX#{f-^^JA3cC`%Q(NMa=03cMsij@9@az*f=H|d+(dRKibC=TBqfLR6}D^x;fKg zrCQH=)vM3`fwm>{=bU@q`PumkF1+Y9OMmdS%U;(WYKD%Bm#^r&WaX+$FS~sHirnj0 zcg46$I!{rQMtFvUb5?7j#f}!-UYf+J#Dk^;T3GvVrXR7D4=Q;*u;~) zCyT(w4i+~NxC8q;*z3dj4(=IJsZ3()j;CsH7ziUCCRp1gublTNP3WnfzP(dJdnOH* z9a$NwOy-ypT=}kvpS+ApM)8F{n4}CsZ_j$GGPW*54!OvWH zQTdN|UHzAT+LV9!caD5~#UH%06roZNjH{aIrj!(W}<1JsAx$7^# z)bcY2UcTY){^~E@PRQ3o97_{5hjzGJ!QD(b`obH{9VISfGz72C7$=;B5!_b*E(5pi1CR} zc0d`dhU#6rYtIyYZvBw$a4o5p#+K$KtxL{ca_&VJUbO6@6v+Mrry5Cs$>F&>Vf4RG1TXx%}+pfFqj@yQAo4xIcyPv=NlXw4J;pM_b zJFmKD`#r<={KP#!Kk>d#nx0N)zF$wp-N>g zUB)Y7KpmINF|UQBwPaGMhNcGKt9>G!ZpZ*fG7KkfA$qNiX>^SLOWd2lQ}M-*-*@HO zmuy*bi%|A`kAzT3NolcU$(E%M+FVPPvL$;dN_M3ZvPYx{6;Y{F6tYz+CH&7^clzF$ zZ@$0(?|Gir>p8tN^ZtB3=QA^Bo4IGsIfDiHK+8-19z-VM@u1u;W-)<4z>uJ#O#%l2g$_y#5F5w9z=#7m zwpa&2)gurr=3(2OJ;Bba*zT!8Eb=0xy;Z)$nKfl5ULISC3nRCKW*(LnZLK+Xeu5QkO; zv7oAfD`o^midvLG+JTw`sv{^vAb&xNuvkn$StpS|ivX1xP_&^;2Sp6D3gCJ`$p%-w zSYJWmX9Cv=3MSMvKqVNI22gcLpaL<1h7;5dTHyh$2uL1KOhIX)7HH5)fd+yAsuZ3H zv@)Q&ELLt3*kizgf(Qy0wRnTU&7kswqJRO%0fzx&BNwYY)I39BiZ&imT%U@V-HTiO z_{D8h)8f{BhPtJM%dN+N(8cXY-G=w3Qn${J9zk2OT^7y&Y{7^~b!dAzNzfB)^@lDf zTn5{O758eMfvt+fHxJeg7|MWpP}BMin5@22yHt*Bz_gmXc-}4AfYCi`KWP1SgMQ5I zK#}Xbh8C0Wo(&3aGED5*!q7QIF$8}9KMC-C@2~eUFm@FTcLBGte-DuagQE}n?I*9( zB`f^hg-x6Y@ee!{&MrH^Ws2P@~| zB}7Nep%eNa0`|CE+TO^gb%!rK?~9b@F;jcP3FEG4q38l}$rFD=~Img8p4R)ZbPRdCRGG7A? zk@W>*mZjGG^>FmDeEl6jEKleR{xh0`{{cDhuq(9&Dp_0BTiaQhZnQHr-DtYQaxo*` z7zyg)I|V=7oo%Pi=XuzYai4$Qr>K9no61}JYwCXA2w3<&+fQZIXCCV(&+e?_`aMWx zo!`L;C8AxiGQY>D9F}3FD7A0fQ{&&$RKEQnwe^Wsjhhy8hE&$p zYkBgfyEWWnZZnmu9z7M5GS@$)%UPCDTi z)oE8SQ!rml<<9f@@#lOD@7$X&qp}AtNBztc>GYfV8Y-77Dmq)OE`9WK{vMT!gO7E! zywgkPS!ky+gSPbAdz||Qqq5N9w*9<1u}ivhdoZ^0D)IQ*@HXE0OqHiycqV>Dl?G@ifu8(U1}eV5X%9L{L@ z&W9_ba!1|B4$J!f3PoHEmEWyj^@acI@2JhVdsMy}xbNxt4s!beTsxKDSgt(wbq`-! z2(FvTS_-16{kOB;oyYZ4Ipx;-eU^`(=a=IKsjOp|m-GDi`PnYq7?n813;N}gaYbhs@V}`n6TT~#b5gX63?57&!5YQ0BSSfy=e6;yRL*3*kUe{z^P?4> zXHh?`?eLr|RqTTort+!IyrZ_83@K;u;#8J>RgBjkTv>k=FGJ<{1w&i;3odaD_{9$& zuCI)_kjbyy{Q|E;)fWi8FH3y6{NgvfA(fv8ys7f4Px-__*i7ZW-hBUc&u}^H=Ij4o zw+w0WVw(An7s-%;D+X~Pf}r@Q&LybP|{RZP*zk{0!u=uD61-~ zDXS}MC~K-Hs3@u^sVJ+csHm!_si>=HsA#Gxs4A)|sVb|gsH&=}sj91LsA{Sys41!` zsVS?esHv){si~`JsA;Mzs4J=~sVl3isH>{0sjI7NsB3B{XeeqZX((%`XsBwaX{c*x zXlQDJQ`7{9*95LLK{QQB^#3wQtpRUVz_54lGW0O$BBIV|gS(yKJs|fy{!63mC<=2vXE;B`_|+|1fa97=zZw2OfR?^rCIwBvp!Tpslba4NPHO}czqG-O4M+ugRH1fn z4K?P!4h`NAK>fyGbQ0)(henftqJROvhe1yf4*XtaWa45LW?9ZE$i~Ue#zEl3b8|0a z;=}S21hC8Tf{a2~VVo!*883~OWs$=w;1zL7*gRYTu8>ef`h}Y%&f(_q3rr<}L1$tw zE9|m96O$nPik)NA=GotJ@_PGh?B0dP#+^+pxKUNz-16Yz>%pM~41tS#jiQRWmbR{` znN4^cP?l9yw><3V92~+B*x0FBEo~!XQ!`s9=kT-VGaqzxvT>~eBGX-a_u1MxImezY z08yGBycrytV&gJ0b#kVJ->j*t@9CZT9CqqVPHtU&^Zky_7cWiHY9F<9befuP-L>1+ zE;=s$MtMd3-In{kTzvd{_kI5{zd-Rk^!g3EsF$~}h~0^k*RBUw*YNSL5H;RtzSVlq zKHHPQx0}0r`lmjBJM0_p=bs`WC!cq{qW*qo@0$$$)HH>7(I?#<3+7w*>}4cza7xNg zOnP~%>*^U8C7iW%@o#<5^|2axq(Qa5Gwg{sduW&`SY2tUwnHo)vW8urOfRd5PwXLX5jWR|_AT1z`go ze9?=*#lXR;MG#qSNA@JRucovTLrd|3454%Q-Hd!p0!%!tJgn{v%nX7IyBVd4#>_GV zRst5U$Ra}!WMIKlih)XAaSNW3L(;)>;B^=^NYcd61ug-SJeMqfCC5q*N(>=1bvX-f zR1#61sLhCD7hs~)t@2}~JQHLkQWl7mH>_VW@#;(=`* z2O9?Tl7fo^eL{>l&{uUot!r6RAX6DqN)rtU>}x574B`ZFf)q)G#e74E zIujo=NkH336Yof3Qs5)5#0wFz`Wl2yM0o-;=y%m8OA=U_)bLtFK}G^AqlKxuGMh4^ z9EmwZV!OE%X)T|igfI_3lQ}qmA=`3BW(E_IB$Gdj-ukr++C*lCZJ;ZT6Hla^aadtO zVy5KUt}}(Q&e5HDO{lHWD%+S+bdiFd8$9;5VA8wy^UD{8=dXPH%Q(3zv_79J(?4PN*j1TEadC31(wvDdMuw(!I``2%3ys z`cN52ogsxkTg!C$V~DaWo{K;ViHRn75ZUldjGT$~8=3rcDBqcVNj|*Blnfr$T};a< zks%xLrwllF!!1N9Z>1?cvUouPE<|6HON)pNAE5kLvzfq5z=d%cZq}tV>VRH8E25AJ zE`(i%;KaIddW;G)^^eyqD0K`r87C1`+)ks_@!581)Wizk9! zJrO($k%5_+fdpDrO51AYaE2vYV_K_h2hEcO4f4P2ptm*-V8VyL=U<KJPh@;U@&{0X(v9joSx9r&SqFssK+(q>@0Mk%sQE|Sv+U= z!~14O>epQketWIT^t&wER85eLrFX~aH|cvu`h9~qWn}uMlZbcArL+3Zx(#nSeO`z$ z=%?|a9JN;S^6+=B4~D)oNv%kW-C}t3-TT$&`AA|yLUV_M154dwJmY?l*@wPODa>!Z zl(O&CBzCh*m%O%B7B&)KeDRw?lm>Gdd7_SQYr9qC#!q#xsSID#P=(-sr^ zprBDebldegc2|?ChPlZp?d~tH53Cld<;HpXrOhWFxf;G}ll>2c@=e&Nv+GW3cXwQs zwbf9qy>fB=sA`9eS+qgt9sh{X;>`@(Z@$es+L0U`a*iqK@u$fRzM_Jr%ME5fWzVqu zZ2s_xhcnmy{3Y&R2OfSm4+t2L5bW;K2;G=kiJ2=~WtE&(N{$Y7yb)vTOTJjDYGq^R zRc7T-^GaSVA-`E9tWf1g^vX}iqa$wed;?~j47q88^ z9_FodTGLlW9JSvK_h;2A$E}GJzvErxp}^p9w<~_OaLPo26N*G#hiR=s+q#JGl-^}#&zFW;FfzpTAp=q<%4!sM|7CNb^&fFcrSF)sTS22F(7-Bs!H=lCXuD$iJ zLxJhIM?vr=EW%jK0 zHhj|c{^(DU5dHC7#|N#~8Q1zZ!OW;ErW`sowMUZ!aFK+7~ZgVm<7-a3P*U z|JLKW{Lva5meqH24!@Yah&Qf|-q(A2lZ;VKqXAZI+ zE+063U?HCW>fwCNx-F?*4)4=s^%s0?I^wxoWmoDl&&CA{M+Lk7qNKBA)~Fo#aQyHk zG51S2&1{PKy-#-+M1{>Giw!q!hc}8aQs~j+w7Cvu+`&$dj zbnTn;0=!op=S;uF-*NBh$}JjY99C`8=XV5WWn#aZxZ4|NTdqAhJCL+B9CtU})8KcT zq2CKR&E-cOS8q^kAX(1NTXCKJ6?MtFX5yUg)I-UtlSNjlLAM<%RQ-=jq`bCPkw0T~ zchhB^)nSQ3rPX}i*xB*I8yDgJ-TsGng9>BZ5Oo=&{Okyy=G&bgBs?Uj|B)j3wj%V2^)+>-cExzksD#qWv73(>C(}={=*{2c zEiCK*kS7}Pz#Ze#(NX&*qx_1VwP42XN~lc{bUfcujLyrO)6jQp}T zaZO41wm102#b|d6{~XQhvRK=rLFw^P_!NWf4;aEd14F*`^#IoeE~K%E+4dvd_EW&4WI(Te&MtMjK2D<)_b-?)n`k zV&5h4Lj3cKB<;^||Ft!#;u%S|`N(*P_-%gvw#!}*ome-;9NFP|+4J(N{u6CXb0<1& zYRwhR#&m|p zq@2W6ch=A3s6Dp5w#=9#DZNhZZE}E6WcaD?>1q7S?&x`i?>=VrPHITPui@mYT^~E6 zhvZ+33M#|>^DB=`+O$5I(sQia!@zR2rmVoy!~ZDbILEB#=lfeaKD|wnYT@2(I{dVD z@cV@+d*#@D&#Q}iWe+)xWvbkh(WI2#>MTsz5)@nO(!Bm#z{afptT3zJS-n$Z_ClsV z3uOkQ4G-VWycfYYt?6*SqDS4PxTj-Pv%i$EBK$t!OsI$2IVHyvHBN_qTzr}zk*bKt zU0ATW_NjKu?Ff=({_nXkTv+h8=Lcl79r7Q#JUKQe-sJe^aF=~ej&8wgN|@)0AGM(kEPn z`AU2LkR$b^ojqrmGIiqmTtirX>!X_t6z)f>_8+tn(!G)Mf+@^^7c$i$C2= zc#ACWDTV#;pcZ6D?r-CKMq!umVilR-^KSNqxcPEX#!~7yTo&5S!Lx)S}4B^$&SpAx?*S0L&c70WR-*lYKc}-hegJ&ukyt$Q)9(eFMOQxB z+HrQO0tHbB?(po_uEU33BU>@Qz(DyEq@IL@lH>h0%A zeW|-qZxxbv{5U=x_hI*h^y52$-(uFiO3LW4Uam0XR~`ECnUX>rkJer6bx|F)?e6ch zWAtinq(^!orW1(EzY7fudi~wO`D>_D&mx4s?K<9<@y$KyzF9;le3bZe1R9kWvj!N&+m_TRy#k>e$0}&^^||4 z5q54_gqoe*h>RaY?Z@pSDeHfyv09=gkDN}zC=1B@n@`}Xv6s=x;J?RsP0EZj4^xEl_UJk>>t&x)Z&iN}-hIZUAy;}rM$UTtM3%eXB!7jzhRMsM ze&JnZ<~fb|pI!}G7z=V&EgJ;0v$uRrJixnRs5dKRm|vvzXBD|L!2XfaWQ4fwSvhWz zem|ZLX83($)lTtciRmG1AMERUxR;B)cRM(q`hDRoXZ(fK=Ay}Ov3DJ(SNzh8eRwNu zt~|xV_r}fRH*#m<6>75vYn+Z7cfET3(x&VCt>eXm?BDCyWG$Qj9bj(+dub5BTa8)YGd7{ib zwQ-$fL9Xu6qzayYO%|H5o#$smoEo0J zJtQB|Xge6?(-eC2@`h3^3j6E9lv(P%ZhQEg&G%Ja&q4Bb!0~N*3Y7p{#9w`Hp1_Vf9$L| zCw($-x7y_LmLsFHXZDm^>AW=mv)K~A50=DLAI9&Uib~#n@y?6!F{3!O zBD<0Emi}`;aAi?EWuF9aZ7;b(@}K;^jelFXJX_*YW9{~5m$x$>zhao)V`n;LtnB%5 z)75j~vL7A>pL!{~lT|;*$2*SpetP>I4-Sh*o7-+mh~_btq;}=y886GhkWb17`nHO< z+lIDEvBn(0bjNA$O!y@m{QdGTB@THCCi?J050#*wE6UpLEIiT5(wE*^6A{u|A(1_w zUmt1O%K81B+H~m$rAqNj4|=C%Pdt3iwn5P*OYg&YOEuf!AYomLv+(=ywP!n09XIFr zAMY@^{r08X`6|J;>*}sYJP^X2U-e?UVWaE*^p29f!?Twg*B2$Y^n7W$qi2wQ_}#Co z%b7f??3+^syIme!ihR`1Vc$j^sVuK0Ke$^RIfB`-Ok>?fF&FW1SM8gHBVyP!Ebk*& zk2I(yt#rQ5e7HR8X9w;5yW6_GvFwASna0LM-^!Tej|BFc3fv+r<#+CAn|uwZO=vXn zww38NWCk48{6dgqGh$6<>@3b#@(^(B^rB9 znq1hWK3p6$W^%oBTVKmm=AJNF!&L%ll0q;0o#SQ>8YvFW!0-DPtc9G~F2)Hpzj!jC zCVQq>Pkxo|N#n!s0?tMjhR>Ara#Xvwi}bPY9j!_l-Co@uF;doeiF@Nn+SL)wtCFL^ z`Qi?@Huk28>9IRS#oZXZ^rS>9YdKr+SX}fv-k%c_SHg(2C5zkBAzUa>awM%F#{%xl*=-1nYg ze&Xa$c-`v1-EhyIP@^w$u?%&Ir+8<%U3#Xb-aVVp6}VS^%y;~PhyAIPXIqS|Hm~r{ z+0!L;FQQUHXYHt~vFX zy)Kb9e!|`2c*Ti~%9!_ry%|SO--$E0S&|rM|206k<+J|sahIDB&M#&mSH}fj zyG;Myuu0}tM@VmnPh?W(C`Iu%kL?dTgUhkFEd@bgtFzZ>XQ+Pr@#LYxhOA}QgI-K* zwDBRoEu#dyG9R8XIXQ5);<4YwlPXr7PM2*1USwJySZ}^C(E@Ojg(BTyK#Cwvb6O%C+o_CW%`8jlm75+D5epFE~4?b%; zzT!U9B5pZx?1i62mPjhW_TeYK-`}5((#E5l`c{)GTs<9ib$@dC39}VrACwMuy&qAO zX0#DWjGIV2*L7)wgPhWWmT6gzno?mvWm9DLuRHT~OncvMxmTBz{GqHMI>@z7Zu9fv zQmr4yMH0A}zi+~-XL4}#?PF5y7cWbW`Fingc(~;<_OeX|%jDixd6zPLx#}#{MjQXq zZ6s${5vliczEHf4W#Q9x(_T&9t8Vc>>Uzd&JLW&U6y6fGhuzTkcIwqKwe@yB(TWPm zGf~X0ioZMdhjRtA7p*&9r6$eRI9FqEbf_n({@E5GH^PwwCEeR@2d@)r)gw4>+-)?? z(90DI-zc=PxH)f!$YJGvN+*XjZM;oEj=BD_{Y_7`-)xOd_2Ld2l~5!V3-57a?!Drp z3;xTY{_msPP79A{TiVM&%0Dj!se4%{hK3Z`_wrZi?YS>OMmo~qx=;Ty?uW;q1a0)Fe35~ zm+fNK<8v+e!`b8Q9YT#yVa^KY(ciyqcR0+gSC=Q;x$1otW-Si#v}Al+2h8wzDH`h! zvD{JXWsJ|JvBhh17s8_lUmnmnCuLQLk^XV-#lblk%$CgCKGOLm&6$gO@)Ys?W;O2R z>dlKRb+AQfplRou@b8PPbblY$L9G;)Wf@e~57vRlXHl8+(kAjtlWM`n+9@h`N358R zV!LgTeuTQZGR2T*nc^plcR6}IHIP2b>N)@1>LJ0nt;;__Oe*TMsZ;Ols(d_2T~&GL zVfLO9F!)@ur3hLES^pxK7_8*|iEOU%S%`8oC?%u%DG1h&yQ zc>Ecq?^&8rLeAm!Q-?ksaJ+KLVq-+G^o!h$+YXxN>g%xPi9dI-D^EyBcV>LoX|buZ z%d>O+kN0KM3zrPXL%8EZLUUF|A7%eMQ-43%bye?|l~>r@rI&A){c&Bh8uuy2d$cXV z@W`dynd$Q+_omf8EJO2UJ(e50)V|>0@oovuNiqfwz1U}kl+4MgRrkN0yi@T+Ym32| zo`G=bl20=?U*|bTMh`p*IK`?A-cp>G{}gp;+N#~LC%1FKR5Y-0cD2X-Lmu@yrxTij zg(dmE9-TDOG&i0+vs3Fu=b^sw)Z*K1@2AfAr;fFDZHO};spY>?Cp!iHz293?@8MoHrh^imrJTm6LOAnaJ(+b+u`ZMMWMwMUS>*SUbIYEIWQuCncq2 z>~xloeZ|Sx%Q`zcheTHVhR5@@ZCbOz?&@l7?&rU*N6hoSs=N4gSq1x~Y0>@PvOM*& zr`0zE9#nEu^-B49O!K+ionpd{6vp7DM$_3JFCycQe(Yr!IrhNTPq}~Y#c)}A@l?c> zL*u&DQdw>_Tw<#ldW-W}J6zdDl}PubpB1ewW$SyAfNe_1(PRDwk01Os5cHzOMIvas z-p=eB4=Zytb_<-X#QN-#=qzHgo%`@8!rS(?1a9+q;C_~tJ+`HkGU8rE>yWKGo@vGf zeD?3pT6zD5?lmwAYjf+N9?x^j9*k(Lc(h!r25+a;VP+a=Ju7zKSwG99h;VGMp+j~P zf98=3dQwhopB&)vib-Kxm1?`LYGa()&ILI8eG77We{#$){vp|JxZ1tmHN9djr{ zGGk}Q#;xC4es4H$tNUjBy}C=e%NqHE)9KbL_ePxYn+U}`83?M4Jk)S)G6L0q6o7NEt^D673UiAm_#1felUr*$ztP9;%$lHBs_muIb+LY19 zcOShQ>K1&!=u<3k@!&-zvGj%#j#H(t2rV9+@9;(o9`y?8mbt>b5f#Q^FT?p3j3fI{27A5ulWki~Y2!aLr!+W}ZR_3(TNPm`tW0gqGK^2_ zzm@EBT+4^c+S)ev{YUG4dEe*0CtqyA-s8G-@mI#H{<)7XQX<>3v8vCETPm@S>joo(d`j}zuRyqnJ*EmG^4tzHA%KR`W}e5zdicw>-;m%HPa&1 zDv}qszl&i#U~QK>m@Y6lg1uq%QLr>VSnfxsO@pnClYt0-_R4-=(O=UW{C$p8oisgs zjgTCjZz&vPX3F61_WC9J3!`5}iLQ?PnJ*&=_-BcU7QHji$_;dE!k-iv!Q*Lf78q3} zik)UrstEm}=K3XX1y8SQK=Hx21t+oVhPcVc63ci*oXR>?SQTKmB; z_+#$V{N_8joLnY{3kS)3cG(G}W$Lc@M;tLnnsQU6(u@V=EV+E)5f{d z*9;y7YqJj4R&W-cfB6L-zk9!UC@Cp4(K}|kTB{=8;MY%n(JD=`fVSSi+u?@`+%C*| zS@IB$tX*L1eYd)ES$I>Gnd|aX%f8J&j@dieDBwKpX47bQ!20G%oJM+2kbHgG{-e)l z^#bH4@evDV*0U3Jzxq0Vaahe3hP%|;$W;|iXmp1Cv|L-2e3Ld_IBfryV}5&*UcHIZ z)%4r%Up4r1B7^JD&m{g&F&iJ>;U2N6EX3}7q<(aQyRv;%S6$wNISMGDf5lp5z`CrSt_&UPMzE{exmWy>`2M_I7Z*_x@Ga!Ji@m=9Ccd$ zT7*By>`RnldHd8dmd-^(0f9gKzSxMc6x_3o5`JHuUC(l5y|Z`c_r@GI5YAD zcV5rQWRP0;!TFr?>n0seLl*df=TYU&H3ZuTraBTRre>m!Q-Km6WmUQ-TAdaZjzC^@g}ab zdKzy#=<+k^#5coDcdqXb`4-q%H8CJ0(yzpKF;A!}TZ!M-ZS8Xhr*rk^Po2Fi^>Dtw z6Yn2?>_p&o+(*^6o4?m}HyLjCeX*~oWPd5Pelj!9>6iPmD3TW6@}t6y*F2iL<|>T@ z3Qv>v(#BsK=}PXpxTEBBx}#ohBv~h8Q$*nRcRPyJf^|CM3#8f_UghzX3AyhIzr@n? z_34+nsh(}Z*_RE$I!zyU2i~i=n6moTjhrDfr%OEheNt{a8`n&TzGk~*qZ_$9i1qiX zgHCZ7)dy6TzV z>dF_-nYmLi$Ui5(CeqOD1lH@PmEo9uZ({7N;)3L+{*VXi?pBhTu|IqU_<4>^TfVD4 z<&a#tp)$ibCT^@f(9bjZTy<6Mjsxqhuiuo!q&{vvs^RX|HB!G#?&U?rhTp_v=?w7r za=hqF?fWrN!#z(5Ctk_x6g5d4DG?r)?D-sF<=&&s^31RFX`k4;L#KE8`8zlh@1HU? z4|mxq8~OdXm3uauL}Qu zV_d^nxP7Ss-{CD+60kNudvQ~e@Obv=^<_U|@neb|Dax$|xHSuhwrBevBDkDfzm8n% zmK}5{;%WFuivxz^@fKH^dra2%T}dCV1RiVHZnQsCU6obtXXxoS?K?Ak^4gByFvz}N zbN*~Oi~bS&+^G4NHs_*$7B#Bouy>uC>2#mpt~&GS$Ih=Cws7C8{2oSpO&dRN=5yw# z^3lUh&+-fc~8A|;QDxGUCZO{Q%U*K#YQ{Z zUY&j`*P+f}mUkynjlWlVU9x9-)!pVuVXk3y!mIZ3*k7$#Jn1jHZ!n*|yoy;rG3|m# z>+WD~Guth^L`;^`)TjOjORWcwV}smS^DdBJ{GfB zVL7+79CY0Ci~Z>&%gR3ULhO!#9~*T7_l>;w&*O$cNly|=%;yy55KcAfK@Gk^WFueoQ_*G%RQWP3g=)_djEUds8s{_EPO*33+E z{BcRA7y6UFPL_USoZP(W_p9d@%0-!VXOiQ7I`MK|uQlVK&42i;UMJWs=$X09D)zD6 z8_Uu0X3v$;`&+|htY+Sw_wY+BJ3mk?D~0MBDb`ynGs8PxL4%WL@lOyDHe&1zOYHtn(zTw-k z+C#ix%UO87Mcj@bjpfl@g5;*ID@BJ>uH9I3+gH3eJbU=-y_lUGIxklET~3hg9eQv` zV3t26+BqsQ%K7N=T=|o+uYYFFemYrLF*=_nlu-RjacAJEU%4xKoUg76Ugfve>b0uJ zxqVW~r|cvn3Da1ciFL)p056N#Fh;=UW5qsduI#IFcXvFA}W$z zNzvw$NQ4P5FZ*)*;o%eC97Q|2?@nED6x}Z`JlEO(MJEqy_GK(a$3;`yu?G8)KZ3O0 zD|K1lsR2B(t|zo6Gtca?X{~^HUY}uv?54}or?zDpT^PH#@-h3fk29}~w7+f-EqZ$n zf7{@}P(t-Xe&*7T4ty^vH#nTr7WH@6h3BtyYXvg9a5uBQi^Lk8*zcauEPmzyMS529 z#mM@U{-5Dx-^XPs_TPWr*yi-4+EOF;t*@JA){JkU?ah;pRx0lqV~5|0IUO@$d1+@` zPyqnjbvarTG5M>E!PU;iv;V7q?y`Xg0q9EINAx)Ytr+!owQ7%5mon=fPd zsP@t?BgY-epC49ntWoMs)-Tx@#DBb?;aOdG!S3J)8iX&@mt$ocT#X3rx=iCa}+v)h25_F(x+j2Eqt1M9mrKTF<+g^Q@+)GWj z-COg1&3~2N_&nC)ZhWMQ|j^sfeM$^F%mk7`fUcD9dvOuG=6S}EM4L6M=&2Wr|``@xZU z<3J!!8o|P5bi%Lr;K1*8Yk@Y_H@NLbwAZPAmSE`G%y;Uz_->)ezkgcg8{C4Bl6)QJSxxd}I)qAByW=*yQrgQ%` z=f~OGFfUea#|*r+RCv4J>e{>Pon6Ckcakw)yQZ)UyD&ar1prwv5dsJPVNF;Rcsa4o zVEi@?hb4$%R|xH8(PCo43V#04$!787G3(f}(?&jmE-`{XJBD4!wCU@Sgblb zG-aR2f)!`N9w2}v0>D&>t9T4v1h)eG(+3M9fF&YWn6O~9I@TYz z3VQ^s1Hpu4z`q2E0L~Vya{!J^VqnH$6+{&Y3V?}NDJE7N8C)b5uK|=GB3_Gx!=>S| zY+&I8a6mk+MIVE0T8Y8OV(rNo1~;%G0hXDJv%rCg4dC3DgEa?Iae~}zSP9Z{7CF2E zxHcS461xGUJPyYSu1g-P47`H%Gr)DP1|kk!Ca`z_7RwK%D99(s3XA&$&I>H!fD^)5ksygXlomkij0YJ?1Pf!}zJXi^ zr2(4&jz++enWY%0xx|3O%YnoOAJYZa+xftW053`!eH2G`+>j@ ziM9`KA=Kwv6qfQXZo6n($Q};V$%Zr-NL`$Svy>i#|8KoMO;3NGpTW6BKDEJTbs zZ6Ep`oCIy(n~#XuK--75YyKkNw}d{qsN$UoUh_Fr1tk=;cs6SS}~@kG|8pz>t2#HbwGthLu{(%a_%8GI~yc_p2nUT{9NB2V=&4w!za2l{w#fQ1az3WK4S zj{+IArUew}<>P?i^ZFn{#DJwPsNo>^A1=rAa_IVKpE$+P%M+HchhAL#%^ob)^5;D1 z{V)T$AczA6dU*_xi!X{1@@q@T9|1Y*U+lYp9L_g-|4)GYZ`b>22|p7+4%Y#CKRH({?f}sXgbJ-UJjKzDM$zc zz5Es6O;jmTJ_TfKf008k6yb78?`JuH*U=$%x}8f_QM7Pw%f0 z$p3bI_kkR~ztG!%pgTUj{Ob~a_&~bC=Sy#|3*<_)>!6o&f_Cq3=eYvN|8|}ZK>j!X z&P&*L13A>jL4p3bJm8ijyC_D;1%Ui-`6&bB`hT(4TEgCX344Dahuc;9^GO17y}$Us zyoCQsI{8CXIp_flvgc1|J%()0=#M)|=Z9WC1LS`@Pe$V3<)uKr{V(TV4CHXzO7H(7 zkVE$bDA3DKFJT|HguE5V;r5l@PdkwREgv5Nx#r?w5yyQG@67p+64!2MA{=o~}zaIDf67ug$$mfBaAN)aq-alAW?$2?j!1Ez} zuvkUJh|mPkeHgxHC4=WMF*?Ia6u$(#j!y3h{Oq9PyMW;~5fZbC5iy>0`XIpj>G%a; zXFAps|K~Xb5`SVN{<&vC>X@tI}x)D2Cy9myo!z)RueH>=-3jl8y!F8_~(8B ziDv*?)9EcZiI}x?d=#)N9oGPV@VyEWPXdPP6vSc@M9g|Z)$;d=96i3?w=lW+4?9hI1Ev2_V1sZ=4 zmeRKxfSW1^X}~|6pCqbe@xQcfi+ZvQvR$D+PpDqPW%&q5H@F-a0r_&8FM9bNAcJk9 zfjt+^21-x3j%3iKef>v)_4Dc2r1>?nZ%K~~RG|;w7*^>+x?I1lI=<^x=UEUA! z0ZuQfU%cw*Y?OG=cIR-lv!E z2XZ)%=;a519R7_-FNc07!R3Koo(<$=@COBYIrMuEuG{qTogl$rKLRTMv}Z}c9zLJF zK)w=k2*PIY2iH@2xjCIYNA;g})*r~>^re>{SwbGLggkHwdC(H_qf5x|0y%vC^yl*n z$l>x#FNc0F{_Xr(mXNb9A!l1c&c1})8OZ;3K8JuDzUTC5|8u{B$^+a^(#s3z(t%!H zxP-iD33>4n@{%Rw*Orh&&vkHm(w`5Mo^TzdmqY0T=NG+v!xHiVApe{Hw@b+1Eg_c# z_5W|@vlhtzc0TrW@{2nEJSW!BT`#@7b_qGu&jz1Ay?rq750?iuUF7eXtt29b4C29I z8_>gTKO}?dJAJqa^x(4p1T;@=|9g8nj9a`f7NO!71O6ulSUncRhucAT3mvRAV=?|x z`T?503iyNXfl$1sfa$Mi2r&I~%xBc`|F`j{L45l3K&RI?pa;qSCqU;9x5@O^H^sep zeNoDbTOZZ`zTVRRAwBxw__XbS`l3I6J!E>U0lezZ*@1o+!0_)k=y@B?{Z`cT^?$`P zrHkhd{K5AE=r9z(a9MUePn+QWWa z(#L}9rY|REK+gavXyLz(9}H~Z<6D4ut7&nd=Pfnb{%hlZephS)GO54FcL5okhxGQ_ zfgJwL%CM1$fu0YcPz)*rTql;QuNMD9`6Z_j{`iB_A8u1!6Be%uuGb0B0|5x|vk3fb z|7&3Z$^xh;Py?K90&$B1mPPTO`baawP~sQ&m}&Z9+7?a&8vb8zptOL`i9QnJ5^~7) zZ}!kLB{xXf{Eo#IrH@unp=Ux!ZZWVZ=cFY*RDb{GM}GU?m|xjIZ}WyMx{?cGMm4bqhyNA3F^ASR(CnAQ(sR*r9!X(b>t)*A09O9(;L}?n58c7?2fMchJ$p&ea)OAd)I{^MXD`2jYNlTRYhu z21lWaA=iuFE~BoH^d~;};2DU&_$84)Lg*vV^fKu5eS-d6iYNG#tsN$TmXC1#&qr}3 zihXDW7`AVvWj@RUD7L0mC|K`<;u#ck(kdKmFF>mhFi+7sbYQNeHP|o@$bbw50p=`P zBM9?=98#a4h~!~qB*Ps-aC}avz=Hs@28vx#oQC376lxzbCMdQ>ah40xp3@!4`Y3iqaT0vf@FoOKx) zzX8PxIY|8silwe1^_7K4?n3bZio32M?J2jAJcHt{N~GSk5y=!3C!kpB9@0Lr8Og&a z4s1c{IqxG`3dI^I4s1o*C!jbB#RDjIeSnPLfZ{$B52JYIAu4`5lKW6hdW6*HcOaP) zde8=ew79`Yp;!aO2~QRcVS9}}B-_73asrAQP&|cV@_S@_sX-*0d_*$mFp?Fv2p6mxz@>Z5)jxe~=`KaqOQStRSDSYQsRPnbt?1B$a~ zzrx}C6To4q5nwh!u@o3i3I$l7MMQEJic7&q!07FzpcmsH!11k7tN?Y?(&_V|4sSZ{ zLvbb4flH@Xp#89g{oA8hpBrgkxeUp}C~n|I>NWV0?22N2Xh0F&`BVxbc^JhFi=VCd zlO7r?koK-9CXtbPft5&Z6+^OyIFema%qfA?Q&3!rVrwa+eJhG*P@J_EX-|?yvIdF= zWRQ9jStJLdm@JRfN1?b9#rEK{R`mJPg<^~nQlGDkWP!!chWyDd6BJLWBJJ(fkeq;G z4Gp9|AH{trrf4DUr%+7RM(SG^KWp;md^L2DdRG)@tw-whHzIi$#THwT`h0UFQ??^n z!4k=7yO508jb!`1NN(MSx5(NPUz) zk_C<+*#yObD9%SQB>)*;APC7_D8?K`>J?CIk7A2(q`fPO)6$UoQWQ6!SSkl;FK`vf z3Mke`aaS(VzN-Mm(2EAT{IkD?Wb0c<_Caw2ilL#jYsk zY(U!E-$Qa`Gm^VdJb>b~M@ait6q|G+^-(CEd4kkuy+E@4OC-C#LNW!#2`El`gS1x| zM6wBrtx;S$jI?h>aUY5Wz&Bj!%Zmbv^-*lijI`HaM{*#F)3}iOQWQ6!SYa8`-UP+g zC{_?e+WVkbY6ViSk75fH+e;wrHDr*Sk76Hrq&^D8X(-N8MA}QKBAKLtWHO2sP@I6` zd=yurSV{}&UrGnbfqF>JM{ysDN%}~83X0QEoR8uzLuCA66wjd8-WX{wV1i^56#Jk! z3&q1IHrb5ymxkg}6gQx_bqg~70E(wjtZ$CAx3)yG4~i)$9ze0hUSxcd4U#doNY=MU zvNeibQQWm3X&>l_WC156o1i!l#rY^EIV0nfQLKRCtOH2`lsA%RP;Bpm)VHFTd_c6r1=W^{ptj@I&gY{gJG31j$_})(=4H z?NMwIh}7q!SU(7; zx1u<|2&tbzF=sJSPew5X#eFCyl_29cptujk`L|H^DDEpm>IJHiY>nbH6!)Q+vj!R8 z0>x1%Zbk77iZyDH{#;RSr=hqM#ZxFIH6#6-px7S8StzbV@eGPNTaf-OQ0$80d=xjJ7;_)#PXNW% zDE2{dDT-TBOln2?C!^RN#epcUL~$32IUgYXOQF~m#S|1bptujk0uPb?6;SMh;wTii zqIdws}(mu6gQxF3dI(Wk^X&9oPgp|6nCL`3dNjHkp2}=Y=L4Q6epm#6vgBoq<o?sNHU6%(I{CEVL^lLobx>Q zoX`7TPja*SI-l?J%*>v>KhE9r{jU3Yt&iXe?qKf=x;}yDa0h!W*28nSgU5fb>mxXW zOLz&d;U0FssK=YYA)LSkJck>&gPkww@jW<%6S#oqa07R+^A9*a9Ks1)z;n2PJJ|U$ zjt_@$0#}``U&52GYToy1=U>$$IE72Nfye)*`}uGPXK?gQU7x}^?EaV5d$14B{#)yN z(`jR0AM-Cd=)AEm;ng+BU#wohHC!A={VUXyK<(b3Zs6oq$l(_5;q2AA-VfC|y!dg= z*QR&(!sGQ5nomqGsm)!}lWN`kjMh7^Q%~UJM$IdD2{&+UdWr4vxA4gHYS}!3Q+WJl zJ)bFD!3#Jzsq1HNRo8HFO7kV$!TBv(AG}?irSP75bY5M;v%57P-J?zmbqDt!)I7ak zy?RhReMsGXL>)e?Uckjd^9n9Msd)o$VDD2}AHf+sgBNfEckmV-KdHy};RsIQ89axx zPwW2qpQuZC39sNDc0Qx~d2j&7a0a`d)%~i!P)DCv7w`gJ!5y4_N%vd9@xNxQCtZYP|;s za13W~@;%)zhyDN6yn*MY)6)KZKGnBSM$kL>IFP{9&$K;zUE6fzgqJWPOj11 z_ta~+`T^wEs?!&!8#umB^Y#bTqaRZHa17^g2hU%m`vosnS3j)Y!qrPO-@xv1&5M_+ zYq*6SU+X=%fWwz*eFwW{R)Br}E={k$^(4^s3%Gfu=E;w#7dNWcZ&Z74Qpd3KX3fjD zsAoT~u3;z9ynvVR7S2!W`USj!)0?!ug4;8iCz(2jy?1LK!E?BWle4j8%u)2Rx zoqt5VeOR3jgD7_&_WZ(MRp)T@Ynm_N@Yglp!triK!qMYZ@aQ)*U%*>9{!OjlJfcoM zuFl~aUc=F&x_$=7pU`{*myc;)!`@u;9v(lgdGPz{`MG2Ac-HV5Zs88@VP{YGAHyCz zfqi%iM{o*f@C+{D1>C?byn)?2^m;uwfFn4C3%G)p@CxqWEj+qY&vycca0DlC4wvu( zZr~Q)!0vfH-!VLaLpX*rcm~hm8eYRa?BqCK*oUWZ0_SiEFW?4l;SKEGrRVFx0UW_8 zT)-8)gd2Db_ptLGJ>N0x!&5kcbGU>Ta09pS26lI|m5=`Y<-q|Q!6{t86}*Ip+0I9g z-@-k-g-7@3`UxDuF`U6Ocn;U_8t!4|y?TCQ*oUWZ0_SiEFW?4l;SKD*59bT}a0o|m z0%vdmm+%~3!VSEJJ9q;-zo^$cf<4%WLpXvHID-qggy--QZs0ZC!5i2qaQ(0c`)~+H zZ~|v=0hjO`UcwE$hC6rzJMYK!!yfFzAsoReT)-8)gja9}Z(-+Ny`C}b!2uk?F`U2| zJcH+O4X@!Ic0PdXfqi%iCvXmza0Sod1>C?ZxPyCm3y<#8>z%+MJcScDg$uZXm+%Vi z;4M7*AkH5S;TX=~89aw;cn$ZkbHAS781~^QoWMC;!V9>8TX+Mzzoh5u!2uk>DO|u6 zyo6V92XEogOwV@$hj0vM@C=^AHN1jbxQDl}`+%PR81~@+p29Jl!8u&Q6}*6Jcm=m` z4{u@jL%4o;0tave$8Z9tZ~@QYIlO?Ea1F2E7T&R_9A3f= z+`>J)h201BddIL2PvIC&;T)d96}*I3a0hSU(L;JY6F7vYa00In+MV~`D{Z*DHS;_# z-Q6ENGxIvFCsz*N-5$KR+K{i{9*(ZrFTb#V3eP>wYj|?4<{_NFK=TrIUa0vPF5nsL znt7x4`nM-^zloXeY4Zp!;3eF_t(o^}_Zz)ZkJr3boxf4toK(AJ9;MwcyIJ%4?dr)d zsAIT*mvC+79bUTIW_W(Ix9NU?nP+J8#LOeKwsV)Q8{`|<9o}$Z_hIWxGf&VuHuD6n zqx-bJfEREJ&&<3(yPx}r?l(2_{%ju4HJ`ytxP#rtb$tM*a0O3)Ti1`CP>1mL_cUMp zsX8+A?CkZ`E6tt1RFD2j9hrG>c76U&nlIoMcE6(aiJ9kS_w&rWHS5JUwBGxsI)W$P z(tK{_tJ(e5@Y2j%v-!l#SF?^`-^^RH`NGUsvu@#f|98>h^V@&QMf)dc>k~Mo@S)%v-bhbfoKZIKEQz^|RE@_o-VmZ_Vx>JzMhv zj?KI^Tfcs;);m|JTQhIX)< zzeU$~X8xJof0Sw-!WkUAL+dLu@67JEg17MGow|Ml$G2)egKM~lmu4QCJ)g0eZ)QD( zb9ez~W*(Yd-D96O6#4f` zdxbiCmO8y!y?m*98mQNBc!TEUYt#+AfxQs*@aQKs&wonoPt^%LgKM~by{>o7d?Wk( z@gmKuH>pP_)nm9wH1AKV{hQSZT*3`JJ)`UETh!xss;6)cFW~sRu8;3hZ{Md53v~uJ z_iFAxppIexAnjU`wha1gfIQxd?Gr0Lr&0DxM{j&D_rlud( z+WoGspPT+zn@|2nb01EfgZ>p;pTnc8G+$khqt@#X&O+T#NZwA-!3hvx0j!9n%kLkC(y~+`wDd`-1M@eNkPQ{!)9q z;;Wixra#o?6}*OB)4yr!LpX<*a1VQ?f79+4!zCPiUC-Y){hM}uZTd57hNp>-cR8ip20P|fI(_aB!sZPIN9go#D?3w;8 zd;FQ{SF)a*)b-sdwQu^9Z2j2u8(B}_(P>@p!LI2qvgHlM<+>EE$=ZTfYr-Me+a3EZ3h9b502{vGS7>A$gFJfQn+O+Sv!Bh!Cl z-T$)IC#L_#=H16MUzq+7n>&wd?!)UNuOANo65PII|F_5f>s5m@^L}^3euLdNXzrQ! zi?ewTFHULw7Os9y^VGb*o87O2gGB3{)9M`FnDofCyM%KYsbpOu0zmLuP-#PsMjA6g&U#r7ghtD3o zJ~P~NZ&R1>{C3SpnL39%IC!_#r)Sj-oSf6VgVR0Dvpdw$o$4OWk9M;cv?J z`G@s`z4xo#Bme&}cezc4z-~7aXK_-MW8!`qp>6%{g%{ z-Bs_MzG+u;`>HqaHeAR|u!(%jjcn4p`3%A{|;n6+(u>F|!GvV&eu*CfMi{o5k z>dozA@3NH#x8}!eE`0Lx-MwM`!K3Rqwr|khuAXzDbpH;Ie|-1Z;oR-|)cl-}yiwzuYGjQwK_=V{lE&A!8PXE?T9Z{P3Q-u9->-k68i y(s|zQ-thdg>+Sn-+gtl{A@-|ak8jWa`rTT4?H4xP-Pu0%;EJb5Ke@Zlm#))u|95mm9*AM>#uG8wrW~@V4vyh*R($JU*Gjz>zp%Z z?j$o3)PFz!1DSpIK6|gdUVH7epXbi2Uv^Dfp%D2qDf+Jnv7PUoq)9#Y;^6nD*smz+ ziIzl@M{JRT*Pvr8Zmyubj>kuJ~PIPSz^Wo7GK8j z-3+2;2Y)m#vXJbJ-ZC?aUf+x~JRwhWvAuuA=zLdY^DVOX=UCXs5b*xwx1*?O@<9CZ z_eN347!iUb$P?~oI7b+RdXXlG(>W!QFp8b|$)|9(o}BLWAg^%~yA=p`r?O|kU&T*L3dvZjY!sD~WLW59{m3s>NxcLVwlH6FK#N774|r4yGc;cqAOZ<}1C;sOC6AyG zg}eBEihvQeoCp_ry`&}>+;B1L2VIpn30;=%`T)F867Mg>d!rZc%aWdbzd+vS^}+QL z`T#s!AG{rfGX;n|fInkr6tzmf`(tT(b0PCLXIoffi1Od~wD3o<6+AQPf34|)fILNi z?{Zt4mjCLP@t%BhI-8mI59@pKuhsk2kEVFW?T1zWn!|)YMVi`ULcik$zIDqh;Qh8g z6MXBIPtZIp`OuD|Ogn7$@;&mOcQCD|xrFJ?m$>~P>&?JBCh#CHJShLn-BDEAf1K@O zFWXhN{-c)vgs=knry{?#r?`C|%hiu+7`Go7RUhcg+99v^#0P}VcOfRBwQuJy^yY&< z5&m3b?_bF9whn1*^5oM?Q{SWMVc785PdsjEduMcsz=~JwBz{YGY50iH(jz5GQ=g;d zns;ds-63+2?1|oAx0vb8v(ooqU8{8TTdmU3URtFKD_f?QGwFK;8DZZE*x{-?@=C)j zKEU^NSK&<*%`<%G2pt#8gEWNRK3;bjQljV%DFixRXZdG%`{~oy`X2OnI_%yIdGHqD z>nuOg3-TZx;qxv14&g6Uu{L}}`5pE@rlGeJ*sm+({jbyPj?ni*La)o?5kUed$6dR% zJoNAU2JAWGVTkQ23?3r~_OFH=A&k5oE497#@Pm>qT>-qN18pB7e9NImK-ZuZa z`T}wmwO3jYxSUn;wI&f!drALi`c!IzR7I|5GQaB&^z^bfX8HlT6H!=O*8HV5NL9+!IyGD* z^qeohvgMyI<-=NbAW1#79?hT4*VVEf=1a>hA60I?!e6cFX}&e1@|~yoHs$iMVP^3- z7x`+tn6Ds8BTrbjQ^Q5V?r!&{iD8jxi?*o zrXTL#O%c4HUvY>44uUaI*aRj>2DZ+TfRS z@Q*0`p3??DkN=+t{GX*fKKB&M<6}>;JcgfQc|4@>*Pk|dmjzN*Uw>QSZ#r%8^ZK-1 z;E&R$b!>m7DQB_X^;0!mWOfSuD{>TdG%D?MKwm#4?e*6LKh)PPRlrWdf3M<)JYlT{ zV(-`?1+z=&Pmn*$$6EwWF?xm;2m{Ay_uMZUWxu{_?@9M)`}Mc_zLou2t@V6!q}^OZ zJiWi|+q_H@s%@{*Fd4Vp&U_)zJ_obkaXbCV%DGo*hOmY5sq7*gAFp8kq*HRZJ@;{c zy!3$LGvgw{Pw45Q9~^IFzaKwD|21wuqWKpTUZeh(kMCyO#qvH*ns+zjp-wn`#?MCz z)Q`$C`5{j+x`oezFSB=i4pJe?o^b6(Ud&t*NC;gQH&bx)W@qZ5Wq4_q#4};7% z&;9rs(`T696dL3^`suF6FfVqsW^gNt$#VEJ>Ga}FWanjK^reRga z5x}{~EkRej9#T`Tgw)VCa6-*zzM`F2BnM+7-ddu5Xav zy@v0E^_uo4E3cy_uk%%2Pnf)pn!LWE_!grdXnb@#c%13h4vNuVvb@PJzK3#jIk-HF z(N~$z^cD699L2b2S7v?&dh|BI3!$KSqWJ>S9p*EvPU1iS_7d-7vlDR#918+YDWxr+Fq9?1R4;5VLmW$>vziysF4@-#aw zG<~j4rF|G{9<6yL^MkM5@)L09eNEupZ*bN!MEd6+Mti1yx*hiMTWN|;bwsy_ARr&) ze^BxxM7$XYXRnwO#4(<`LXv!bqxnEu&-TrdPIgAsGuZxG<$E{_-`cs)GkAZ4`inwO zs9V2b{cmjZ#&I9(b$zK>|4?nC=g@biwSU0}a;?hzI3DjOeZJGN6@-#bXZ%V(YGsmlR; zeYgewu8&RgAAS7oa$hHvlm+x17Q+5-len*|A&#fF>qqQg5Eqmxv!sALMe#`RIJ{Mo z47+iygxe(SZ~w8D!~7{s#gqjA?O|yhhd3;>~biP;O zX}*#C={G$`e*+BTg@jj(UH~u&o~`IciOMXFS5^sLN{58oq+I`2&H=hVe22gXJ*yq;G0yXd^;v#e|yTG$r3L{^m9k!)w;fG1)rmUV{(Ih z6<-MzUkSJ2y~*bhsT`aR>qqi`-hd>^b9tZHiyGsucUe3BNAfByZ*sBn%?8UsE;3zt zB3tfi=<3Cy=%BlPI^NKu{h}+r z&g^>pi0}nZd=B9z7{ZiO=115>U~J_v!bF(Md~xU$VpuoJ`#1)1lj}_ms|bIb{m3t5JNXA$;C?Fmk48o=XeYmy ziHG^L*TLlD^OqQ3fy1Su$%$-^g0-jk1}0Rc-$V|;6?N185Mfv#3G#%M8mMoIe93%* zmSP+^V!A0ZAK>$fHg8&{{}mRNkuB<5)XA`udeV1(m&W@$EF55Ze;@P3wX(jC3xpG2 zPrQ8pV!TXWC*w;W|GNH`CUGA^yke#H%i-1B9~oA0Ut|~{T=*+1hiO{@5%U0L3Bi~6 zt73oUH{y3#SdHe%FM0RSa>CtV{2E|bvOTKBNQ5O1+6nj?&#`=Wl{GzVXZ=|}WsBrP zy;sVZKGe4;{J4EJ>#1+E_!^7vu=sk5@3ivihXtHCqkX+vF#5Gb(V8~H`EE& z{hd%}z3x{Qb(Nj;fLrc&#c!n^f=zwT=HqAhWd}RO>9Gg={Q9R-JrDcIr?7|o3wyUI zKDD7Y6R-PO(D@XPnxR)@geREXr907<;c|9=F@9(X;Xc6p^DyX>2YM#| z>wL)75?0E`G+!7ZUX^7xAzKt0EMK``jmCXmu-T*0Xdcm%{ee%MfSi4NGUIH%_w|eo zmTy7y_z7?<43Q7O0~Fo5*z$KUtSnotKhpYNqj8s$?+a>P#r*RqhNwDeiRxRtqFd=H z30DB$_bY5L_`V(k`TX5m1#jbz?*|56c;Ju11z08g#Yntb_{xc8NS8RmES$d*IJZud zM0rA;_4_^|`2AW=;7ZZR*O!udq#W95*u9)`Fuo`28820^QRV(C^hf1F67KuTe10A9 z0KYV?p!F4_Pa3?H3_Hx8&1M)nOuuF`3^s2b&o=ws!FuD_3`3pmseg7^69OueC!WoW zp}=zSY}(UMaQGB(aC2$b1TZ|so^z43X*emQ88_&Ew_FEsw z&Evn|BWb&Iy+-?eEZ2S?%(P$EXV=SC`TMi^uT(O1UE_%y-21ZmmW`cnLpGo3De8?b z9BF5^XY-jIX_fDF*?h}cuj_Hze-U=`LB7Ybb+`O1$@-eGT-pB5?>;K;OC$0Lw^At{a!1a}3+V63K1bT(`6wk7J^UJKi%i7oZ ztUucitd{(+2g4Vm{jOGhi`$Q~-FW}h{)ENX+xue{-(>OQ8rJGNG_1-#diH-zcf0QU z?$iE{aMS*e^%f(xbI=2hdpmLa9tPQegvJ)FuN6IiCw)cD=mDS5-je+fjWt>>xLvUM zi^MM2d`DszjQ+T*$I2;NWnP5%=IhVSza2IHP8B=ZXZWlVx}Z;~{J36}-3K~X@jYKF z3Jv3X7wZdy#`mSNi@P`zreCuhw|K@uTnl)e664b}($#=!o?e@CEj+EU%z{FKVIpCEF*L z8cXPJF>Jv!@!S`hV(wC{hf^=0ii+TBF#_~D!#fAfbkc6f# zn%kH@0}*-R>AQZ6(C0Op_aMf7I>_Std=ooo>mfzeRw+MV_DveM`klt7qVD#U_gz~Q ze%!uI!*IjQV}KuCW#fQXa~$C3LLjd-@;+|gt@*RZc&bL`}bL|`4Mr~w;3Po*8K6(zhZo-lkpW_V|)+sjZ4;U7R*$*@e4?A3};au zw?Fa5HCiryXg$lTR`*&-6?zv)=<&p7- z#+&caXoT}0kXO8s`FwpE?dZZCl0JPC;SG?Ull3=W{YtC;rmPjo_(kE!xQpS&9>U#D z{Z)}Jgfz8(4r>In^#Yu7=> z2TiU^4=_GNJ&9NBV|<))E=F$xc=dPD|AfXi#k<$W5yj|piifAi4_Lbp>a|@Y`DlN# z`wjNtTuMX1N0^6JJ+Cnid$>h@f)6uAF4^|*l3clmgzJ)p3xg`q+Pg7_|`2s z<94$bu6ONbFX{?L*XfmCgBF)c=YTJJ7#Dwl`ZYv-1s_8%`~bHX?guQ=0%2geh9_$u z8(Pp`)m3$Yy}d`vLyl6_2zl)gKEe+@p0nG#ekkLe{X2I;McUyZZKU?vSyE$-5aiziwdkLp+psex!Wc;~L;l;D1p~w^J2P`fFXpJAV_?Z?zWbtz?zQ*D+ z6o9Xjf=?Zi;}IwmLebYA34!PL|5)_752@lGNWPkct@8a{E?<3&eBaFF`&-E;ey!q# zdM}VT>g|(!@So&=jeq)XSvQ};iB+WILB2ng^mqy|V}4Knil&oM+5Cq!eO{8^;eS-& z%S5U?d3m`#$?C(eLmy0ksxko#4C&efFKd9{|j6=V+W%R2m=Y+MMI`L~IzYhw(i_t$BeD6=Koer9vu>Q0*#C9C^ zn7&&(j=RloDvMy`iC3~e?CqJXac{>-I~|kGdlZkXpA7pq5kL88&fd=c5cLK8^m&zZ zygy0hgZWLx+s5_9NQ5j8#V zKVtEN7C&n7LmID{zn#u=Gd&&Wvs^mPC){+Lzfa-#IIbA6U&+s(IN#mx2EFjdin6N( z^visLjG`E+HfMz2kqd zUd4V=#yN(k^^+&*_g!AcwEv+z(te-qO!k=@yG^|~8QGdgr@zc(0)Q0|q^4&wejDH?xz7iDbe{1r5g7T_=!{VH03H3j> z`2SHKig#9{Mzkj;43ok#~KG9Fh z@*R8*jeS~w7-V|^-#;M>3F*4nGua>Qr5vGGkOT64PUO>XO$YTtPkI+By3$WtyXjyY z{UgfxIK%B@l5X~}7;Pb+8fK5Wjxj!H_G;;Ijn{_Q9>N~tRg_y3P*b)af<8~AADWr7 z1K0d+rXP|%TH!CU5X8#k^X!X+xNgKB(B$Mic-+PDQ)x2Wi;w$3H}c2r^z*WELI0VR z^JZy;Ro{XVf->3j+s){~u2IkjDpN`Y@~zTEAL+xvJc{rcuU;)`=5I<9S4T=b*^ z^bIf${aqtJL09J=jr8Z%gYNC4^UarhGViT^XxK~s&7CbqOP;vBN7MZru%By-?=?R6 z5gzLMpSMW<>B|Xc9P|9|mwHFHD_9-iLpbrh-Gn#hygxaGb|h?QG2gO;Q_cj9PYOyUovk}r(T48l#lP*jqe?6KEvcW;rR^H|2HbTl)l&1U{A}+pR_-{ zQZtNad@xDvalSk!Y<&Eg9ruZ9MOnV19cBB8t6S)CDLPB>fxY~W9|n;W*!A-ygq)zS z+5Wa&($oI-1zJ9x-}8QDiPA0SBekE&k4H|`-xi|_6>hdY*Ci!O-wxP`0nIjIJPdo1 zZ?7d=4+I@;2jQ7oIiVBuIGxaQNfN(@1G)r066FCudn82u<2ij)J>QI6e7{rjj+Q?G zp&t5I?4O_PJWcOHo1eM#Y^^`OhH&Fo*|^U7Em;?)of&ifL733M){_kV?li2^Sb&Ea??o#Ra<}(hWU(|C&~I zz}CaYF;8?Q)>%gNBedJz5BNMI>RYjWYuayy6k_c{o48H#QfNM zbNR=zUvH6ozXW%oq#LLzjQvF{Zf1{=f&cOY#zt&+1SGV7vl?f!lkK2`73%J zxPFXuJbm{wTp;t1&yb&f4;S zdHU|Be;VGoK;!XJ(!WmNEs>N*E%fzt_*0T3`{!*veUBCsJ3xAUeJc!cJ`DZ+n`E83 z7=2Iczol@V-h5UnRCPYFR9LO&c!2-(PmRQXsend#jPD%2(~++S_s9S*YJVGJBJ78Dn>o>mvBCq(<{tiJD%Xrl@c#Tzl7|fPaPU2eyOz+_(A8(o4B3&<$9Ft zw{ZPgtHq`LP=EZsA=LNj{|24EqjUoAJ9BtFM~Y_gLVdn(w$JXDM7?d2?)v2WT9W## z!2!zS%uQzRp~XZcWO+GKbfTGvz0O*IvbI zjly+1+}i=H!OxpC-Pc`%rH4gR^?h{`v>=M+&c0H^?EF)=q{O!V6SmCZd+B@dKraqR zJMwvO@aLHW;y1}ZJU)M(ov-?rKWx>%yioIpI|P6{evdZ#DIrU7m*CDy$2Iys86V8H z{DxQ9T-Nv1wo7>Kp}>9tcpcs*=PBeZfb|WO<*(2y{ScmFB!e+|!T|Az&tXRr2GAP_ z{pXyiVX*UB@i{`cJYk?i!}uJ+@qVn|=0ALWdhTrM^H_Y%jNvh@j}Wqxj+3H>V@z7 z@$>hfZ@QG|J%{fR2jwZf*DJhyI^yW1yj(G2KkWAaKwkl;cOl{SFH|rh7g%M^uVm{1 zJhUr^=lTM@5Is2&n_d+4w72kaP{HVZ`QR_?%6RTI!ugFIIlt@2dsDlST^HFQaI^E6 zD#roV3;pkupO9lw^gq;(lK<`NDVO9v?oAe_Ul6x%vG{SzzfHqtru{8Jz1Cl3*H^s0 z|N64%@76Uex6k+>Y|i2bydK;pd};xwYR%IIRNE{=c#?w=MCbY2!AN?Z@qNG5TfAm-Hjjz9E;<_MYRr{`Y8hYx|L*nUv-3M8-$d;E?+g9z2g1%n4`Js&FX74T z{O#ad4cbTjL%AlDQ=Y6{nAXBBOvIkwK7#J=(0&iFo?=A5A-|6@>xUeYcHwpy{V*Qj z*CQe9)@ud7tex+W^sL|D`L?#u178=LNPREQ)dv=~s_zXg>*IRLMC$7iII!b&)YB(_ z5N>Cf<-@PGsIR269wX0Hz;pL?ao020J9MeOe&PEen@}uy{2X}lJ(mla?&B}_vraY- zx1t4pRa@7OA1bR}#2Y&`48z?FgU!PUeh__WT2U(wci23fq~qzc^=y1!138j;IMbJ4 z=Xm<(3nS&Zbw1%hpPnx9{`oc!$7w++&w}6!?Y zk7s`dCHbJ9Yvd>N6?PtQQN9@chtgSUyFllqN^Q?qi8aLs!sF-Z0Pppm9tm%WymxrX ze(`B*P%LWRu0e9H3-&-jNPTPcMQry(6{FW^9Pl5`;fo&5;iu=|p*Ef6Cc;%{f#*~u9Wvqhx6U}mCj#qzbneW zKL`J^O!>TAO4I0P<@xRKOVj>b?XlZg;Bm!(w5REFsNdt*&;9t9M)zM;ooz#ZOgQnz znS_^{Z%FrdKPZ&Zzk~kycrx^tk(_)FB0Fw}+<@;t$yCbey2m686W?m#pH=_;J`?9x zZ2NYTdrfS;G`ZKr=WXoVv+viG`AnrRfwNf#gz`Y%I=-K!QPC4S4^U$q^gUx>V)FRC zmh!N6a7uY#c30ZxAnTtZ{n$j?#}m2sQOvXt&k^H zOoX34dDEU(3r`;*Z{N?C$l2~|Oyq3yG0FV_Hs14lKodD{r<{w?7pVXHb~9}3(lAe7 z+{J#y?}7At@}LKQ-GO~8TeKYRXF!#HA9Hd~%^Y@o-v45Me*T`CpBxxzZzkH_?tDtx z+d=DZ?k9ihJ2i~k+3zOz=p^?Okxzb~PN-Y|?B6A{`*ebxOY(CV$$bEqs+uL=ee`js ze_v1b>r+14nXlD-I&YCz$$dHv&hJ$^q$5N6_NQyuEF&g!>TG^;f6m|Q{W)Pb@t#a%;DOi7HnDQ-Oas;U{p)eDCMs{X3l1Nk34&#?xn#pDSqJV0Qu6 zzvma~e81sL!hyd1=r_a;7+&z>@Ql7E=dlw5NSAyFQSRTRoWr}-%7+H)_3wUy3gib| zWbylclX}GYfLki!@tOpZh_$rxbV; zqem!j^Iw5qhormy!M_EbquOgb>wARFyybbpzxm?Ga-R~qz^9|ar~Z2nYQ3_rmh}S} zd7NM12a=Hg4FV@ve;m)Tc^JRP+|NV#_c-i35W(n)nO)#odjfIt#B<6TCigwe>9n|1 zC{H}6+u{Te&sm~z$Xotby!L>OsXdyG{o>2y*U831e;{K(j&;NE%U~+yy-^X23EuVrBxv$oEUf$XH=G7+0 zTZR5U!)KN8rBvX$2Kcc?K!*BO*3-^=5JG*s#h0<(`n?u^sl{mr&VHaq4;CK5oSAKZ^UW~rTec6pSE5DL^ zdWLo~Zs$hhtErEE?^=Ad>7(7Z(YwILcO}-6U0*@f?hng7L0aGc2EDqurCzaJLa*>Z z|IQWpggV=2f1yXy-F~_W{YE#4Y|J7 z;`#A5zhmP4RXn|0^A)3GDvxY?nyA10Sk7Pmz!2${W~t{Z^Y@gs9Z27Mf1368^7+5@ z(H8kJ=#&TkVgA0d|C`OfjQO(g^ZEZgli&Miz|G%V_79nSt?9u_TEl~8RnC=j zuHlsX-0l9J^uALB=SBlkP|JvH8+K_6dpr@WtT`)5b( zQ-7Mm$?8KHauj_Y@4mBW0q1}K2=(2R(Ff=~^kVdVXWTCxPO|U)SeMxG;a#e?)ia)H z;~ncy+7GE7r~8fVyzVLQm$Cibr?^L`7(J+bK|i%der5aBlkIayd)s1sc-a?5mOIt{ z_G@za$J!sdO!D<^oanxG%0JmZR9EuGabJs_pSYX(A>S{`)fzP$zoh;Q?I7NNLGmsT zZ%afk{ZYF9O9;};nFQUJ?tjj-f`bsBE$fnea94^F#LpuS$U&9 z_TUn!oSkHu(9{ z#iAxA&*VY9zxrm;FY}jINg8&+_`RMpCTq52{4nZXPXUg0nT%JiwEWVP zVG1kTVQu6)7qevWED!ACaU7no2lZc@v3E|Vd>@>A+|Pc@?HKOchdzww{M$3Tv-))w z`8mOJ1+E9CS~Zg9kK$kVF) zi?w`F-@Ck>Kae}y4Zo-6-zD??;Hdu@BKLlFC&{?P+mrhhp<(v5Lkh@~%oo`FpRZH6 z-a)=cbMk#!M!xp&;R|8kU!8Xzkk2ET&s_gb6*QBPWL%kFFV;fM@8)2Iw@9` ze|K0|H9t4rdQAHV-)9&u%B^q6_pR`Ri*oB5i!@ofF6TGl;R&^SOkJ zcI838dfsPEcoIi_@bt3t?Mrj-#k{EAfv>(l)%X43oE)Cydyi{18u@+E+4D&Mr2GGP zk4%$hC|!p#KP9;@+~qSscqfz3e}BG}e4eWX<9lfbe7-=wCwdGo&~FIoLbr7C@`SaN zPv3lv>bwo!yWUkUF*+qc`hrRGwVm{xZ|S1H@_+Ju#man37gobl_PF1JxJq6l2k^jC zjOJJ)UjLAOOq~E z{>C=n6nC+I1pH~g34fI1^-#C*yRS=_y($%C&<*%PD;~`2Pn2)nIlfKL@D23)!*d3I zt#%}SFdfgK2P{VaN7EBOb{zBmz!P-7K@=yMZ-3JF^)6Dq_wSa*eowaT@AmPw+^<7A z{GROkQHzt>V)RD+;rD2RF4D>Q#Dq?l$NVz$oBUe&>rU?jou`xDXPqW`Kd$(PecO}| zVT}8TPqvP6Tnuz=aJ80C*D+p9xOV^Y<#T>T4gYYp#?$eKI92jw{dT|&4WoB(YEu4n(lw1P|9UO&>t+2ljVAt8 zG1_SFHz}OBy;H-mXFbE@9&XOFB=8xK<5fa{Jc++-?X>q=`Ukz2>FblM^IWU- zxt=Bdqu59JU%#6Gk0)K{A%2w(nY=>!_FvVoS=QjESmz=A80VtjN!NKqDDqV9Cj3*c z^IW6t+t=;m6=kIh@&w#;oriMIuJbgvik!`U0}ni{)_ITx_6(uR%j3|aj|&{1|4G(; ztUq#pxm19+A#fPT{mGO&;-X$Bd>>3Qj7F`}j}h*Z=0BR(?Ilz41eAm%+yQ@x9dNus%2b@^hZA&yBy)6+QoA|2Mfao?O5Ej#P6+6UCeQW&9{^a7i&h}-`$+g za_~pHj{673rf%f9b;JG}9=esj_ z!d{Dr#Qu=aKEF|n{60md$7AkKYASy>tm{xZ693lMpYJMb`pLdK_3xchzpa=0I;-zr z@%7kX-_?kHy*AkoLVK9ruV?p;+IOd%u8Ho`O76c|LVF6j=*RC?dJ{T*A5gE+xt0wh z>nB04ppWe`Cvy7eJkR`h>koYWzi$13^WXg@U;j71&e#8o(c>zgcq{Fh+`G^AvUit; zt?2m8x2ANI3O2u6Xa3Rr-l=es^$C|_vM+9%y;n5G{H|3o+DyF0v7Y?*>VIz0{Hg!? zBI;p-^`+lEA^-f|lSIEp?FZmJqk(EZq5S|GLb@NIRM@9@VE@P(`E}a&0c>UeaH@R( zzJA(jya0X8+M6YaC*uX{moobSKD-M4exBd&P&!-b&!r>u=RwUEOn;JnEZ)8oJ>Ee* zN%YwC#`>XRbg#mxjj`??FK5PNy?r_Rf!Nx2Z>RfR+YBGJ1Gzsy>&?@V^jGiH?CKwj ziO_y-iA>%2zP!=jjia6Sd29G5`El&iKa}<(!TDhGJjuSdy`(?C-_7?4#lGLgzh7wk zfO?nNIA}5PE=Jr};`p>0?<|q5mGZx{{U!U_#JNZM1HYl+=<&`!@w;%F^mR0Y?2giX zZ9f>buMPG!9q(+>f|Yj}e1}seywUsGZn5!BL_Jx-ejn|hcAIuQ)cHO;-szC`dFt`b z*H2s`Rm1Khggu`l}}k$GfI@Odovo05638rzeP zyTh=x6N3Z({Evt*o_aFFMb*_}v!w@fY{&#AXM24W85O<5j=UyBO`(@lx1J zJCU^y-C{4pe)bpMuCnLI?-qw^gN-vw1!yPY7b!mA&sT*%w@#yceEkvWk4bu>Z|9O< z@ig+$`6c5xg_GsaGX;Ly-+x!%yS_A~AHq|N?qE65)BL#YlfRqVBiC!EtJOH~fmZpi zWIe6MZ6D0$U&ee{`1x_(TQm8+J+vC1w5S>eSqbfx1@0YQQ-P|U}zjyIitSs9Tfb?{s`lIf0obr z&1HE1;W6GvJHFV1_xm&N5${FZ+=W6JPM4yJXw3H z_c~l<>82;Z^S5AUZ9GvbJV!HxeLGb?S-bhoKM;5WR^H@<@?Q`-<$G|N-|_VO^u0aG zehYmc+j;NiUc!O@eh~WCu(-g*yu8;7d$j9(Gp>>a^<6wPlCE9IznSIn{@)JBx}JUS zFYA9Fmwe#+uk17BkRq@Ln980b`h7T45AMr&A`=JP2S1S7Z`nsddVJh!-)r{s5V8-# z-V?9vxk99PzC>=znJ?LYVDc+XrC|uyb377mu=QuU`?6oJO}`k5_4SH?@OwtE&H{bG zI){*bqB@lV109_^k?;R%h4Io&Kf({WpMzo7HVyrJjKf77Pgb9f$-PA0zqow?{LlV2 z#%a-ykw4zJL*Z2m7pY$fev3$De*Zgoo#<0#JFumm<2~}>ar@`@K+U*>>3vr-gxvli zBhTdhOZfhyrsvmPz&Pf^P?LYJwzBv?R8Egb8tR>ss)odl-?>WQe_MW)CYR*z#PIs( z(}0)PU!D9pt>62ZtiAqo86u9v``Dc~ZTqC=uTJ@d=|zV!81ng^w6iet9<%~ z3MwzzpQ7=w_n3y>4$)6QB^MN)uQz>-v|r_@a+XXM$m8D~s2137-y#hh<$PWd^!)<*u2wtD>bBI^08G*+-!9(RF`f)tV-wv%l3TLlWd1m$WsOV|DbT#3y`%d)A z_oD!>Ibwg^eo23!`KvMxz~gwXk|&{0 zkUkyhH3gHL$4B}(pMkvIrY{nH)R_-*c?|SzT}k@P4m!Ok|J(l{_I>yuPb1u_qM)2)40={w4Y4f z>2)GM)PIMCH_Z}4mR>c7MrDSx?-NXwTBA5pw(b^0~X)3T&IY9a9djO=R) z!=^WTS?;!3)cY_@Ji=b$cS~U?dV!RE#Qal~muPg`ER539bl*oaW0xkY-DJKw%qRW8 zi{zK%bBE!xpfFi?ra@1SWZRMX6MYvFURDlSzJRaK6Ml?EZ=O!YUpnC1W>Jre5!dVI zjlvt)Ll0h{Vbk!*>r1tbdev7yru>Ip5>u`BdqMbOh?^ewepH*ItJe@*cRpDz-79fo%(ooJWH;pe;Df4E0r;Botja@*eC z+MZk`ot@iX*?*yM!_XFngPR!E*K1fRtWmfl?R~WEqcB?X6rB}(Y$^;yo%jpV#Q*tT@)vwXZlNNo%rre0m_L#Fv;4=GL@P3HhX?z16~B8l zs`*klGosfCd_1lfz90T%cmgu;`Y`ZhvyRqD`zlNN2t1@=`u=hFqbQf9>+3?-qgHMd zzot!6e#!a@!5>odHE9DdSNT%X5k4_STO zDf$Dtzr%a|wVlZPvWBHXr^3(D_ZOc`@$mHKTFs{X>-&_9A)|#O%4PLRG~_DO_aPa7 z-4bVxSBgS2OU|8Z>JuPa8ksGR+MJf9UNcJC~$UfScD zL?3`zXc#=}FT^hC`~3aUaXZK1VW6ylz?YS`r*ww_`mIR+6{MR#mCPqeWvH)`^9y}k z>v&eyA{Y69FrLowVr7k`Qw^hdDaXxC(_i>wS4e(ezxVl-c!p^0=>IFCiL9R^kn`_=vF#2`HxC|*NbK^^EcnZ5OCfh zaFThh!K2DoKff?z7V{-~;`_e^4?KQvb{Kp@%gwlo=`)P(tQ~qx@CZYcpU(rL{+E6d z?cs=)5B0}bKZmc;6q(nEwuxSfO-7X%_u`l9cV#6J@o{(MdW%bbJTtCji1iUXo-f$^ z)~&NdG32?GYLMKUl>e?ya?bEQnmxU*NCX1N$@g?@{lw4b#ABW_{DcA;^PFLg^?*Mw zm%jh0JZHEiw@+l{@31|Ixb*%fgvmPo{IcPF>T`yV&li7tAMHhT>d*R|Ar&0+N^1n; ztR9Z{oZ%Ip9$D^G=L}z%%RkmR!%HMz@4Y|cbA~so*w{bbfN@Y++gZ9#f(9z=<2k)B z%yWBLzLiA}+@87}@p(4r>3ik~T-$$Ho%CGxn|qg7yYAMoV)G`Ib-$|!`8-@=FE$fT zw~uJIXraZ3_-(NDYt*;lO?IE4Kz2WhU-@P`N4u=d9@*~EpF^MX`$}OK^ObjtIS_>KFs$;+j&8s$CdBK>3g@EKJOm(Q_e2; zG3EVEWk+I%!T;4GqZaew+0Ly`Y`TY+6yf@`v7&1NCLp=%hUHRJZ1*SiC zu63DIDo^bD8wS|n_tt3edrd#2KR14mFIAZcdZv^swb9-?A3(=tzl#26oag$C9@kIT z8}v))sw(BbC%$&BveLl`-O2;2~_DhgIs1FpU`z2nX z1uO3|_ztJ6$b>(}ozwpjM3T zs?k?Yprs*QL-~Uc{QWm6=kSvK5_Q(wG(W-T>2MANkAFHbX|MczP2_$_rQ-_CS9#z{ z4Sn36l?x!jejxOFec;F7>8Bcr-1B-e&GdwNmDQ7fy9sb0W$|OpPWk(^9_~Nz@qYGv z>F0$H{RFGy4Y-Q=^QFb;BdR|R*ZtXI^Z`wGKMw6Fe@^sJraigc@^M?M{10dH*)FX546fp@wD9M{lRShWy;p%Trcn$wf>*UkN5d?{N0)Nt?L}{5c@u9Reoi;*pKPBEzlY8U7__}PX{8O^|VKn8*%6}*1-^O&{^R-y?WbZ-6 zqdJ9tL3V!c0?F$43KyeGwP5z1W9XCP1$fKBmpb8N{EhnD4~E>z@(TH`YSHdX(X&no z|M}yDe_2cTGZg+<^!dETc)ss&j^N?r;!+zsW*?8czJks(C7+*P2>Ur+5Bm99mWk>YWoXdGy3;w#@PW)^?cYoT><8&(D zPTzT)xrsh})*GdypnaS#A>=3OEBh;+uQRh@i2inIG972%XXg43eeafXejcwh`Ipt7 z^7H3_xAb<*Pt2tJw@U)@cQC|vyYOW9wXBx~r|xAdpD#xL1M$iEw{6se zy<0TQ_CJuL`yW1k;`5`B@AGtAzFNy+JO#U1jDAJo##^cXvVTI~Uq0tXP51r`{cnDp z{@;7j_7nyUuQAU1qy8620~~!0{L5NT*w1={eP1_jH~-7pU))Z91)DdG+n*pG?$_e7 z@9Su5el+-AwwKA58mQ<0En~F4IMwfvMRoEHrB8#Nv$_i?VZ_%aPceWS(w zKF&Uiuhg)w{%Q@A`#9|!PS{UAFOvDkZmAjbkh=x1{JoqLIlsRIau-?Pae0Nz`TbwI zz7^kxHNT$+2)2Je*m;F`))Ia1``zPNz*h9n&jHLbzq_W%@?D~`{_y_T^*!CEPeFvC zP0E)s&++f&dY8`^#kqN%aXFX(hG^4E17V|Z~Ve>U* zr7OWd=1}c=WD(!`VTztU_Jd334L5rjUGGEgLGjYLbTh@O8!tMoOGUM4*8i` zkI`}h7kp3FTf81=f1;+c&OMFMErQbg8YJh8 z#)9@@Hu}f7N7wn59Vfw4cKem~zwkQ5j-KXwV}qTucE32W zTjs~e_a1(NdVSoJ-YfeWr4M+&H0S5ausiW{vvgsmU^HFqr_jIW=j~pY>EDxiv+ck} z_MNoIUuu>A$~1p|-ma3(zl{0ZJ_CL}|FlegywA_u&Ck4Vg>OFpiJ_EF$#;P2!~=40 zf8pr4+4-R}IX`56Eau0{QpEQe`F&8qz5|ln_ha&myN;>cg86xIm-#UqhNO0&VR*C= z58tN(d;GV3sE_SuBJ((NUqibN$o^eJpU-Jt!}|O?R;4q}(1a#B2YE1E(k)?0=G%oe zQa}6Mcp331>es`?&Mo#h_jCEYJp8XUR$hKedHj|99iG3aaX&Wzzi6kVpgxG&_tTBF zZXT>3DcSvl*dGAML3|%P*mnuM(T-3v`1nI{ah?8n#o-?zU)=Q=!}cc_-pGBK$vgzR z4=w+z($M1RN0{&4qZ)=C0$85xcN5X?Ax(liE3=F*gGdp+f;f4Bb~@hgpB~DMA8&eZ zs>g}_@N?34l~GO9>b?ck3qD+bp6IvTw^g0w>+$U0Wj~PGpU{{|d)l>&al79sUb>U< zp@{ES>|lJ4@$Z!P{{AxYYmk4?lS7gccO6oGX2&->WZqZK-!Q$Q#UkVf9r4nGnhyD{ zmL$-%;*pW_bCacf_I%E3r5y6#Z24c3%fC>t%A4M_^Bhc;PSBH`7daw$B>uhhy?Fc_3FvHI2m3_+r|b5! znC~79qT2pW;<@kmkC74@wk|x#b*_BghMsPEWO}t{GUGRzUhSq{jrYD`Z-;0X!q(*b zlp(=ui^30etGC_isT*GHhDV)v$L++kR_FS2ueFbw-9PVeQ#ou?x**d{u1+V+Y zVHcYhFuvhxhN9wXFMoX{kfA-=^@xUXItpFzrURfBfBk8BYKq#X}|m?-=+#&S_XX-^+IH`x(4G@L7tTD4jtj zc)G6-!|nkkzAP*1q{N-=7%Ku``ANHFZ z?`M6!zUO+^d@Ixa9JqX+$J&MUU+tY1ce~ZC@hl!?@YU=r@VP1$x@CNW$InF}{aWam z?LR2$Z1Twc*w|O%eqpUad)M2wll2eMj>tY(#)oKU;uYH%-%UFdPp2M)VY7P+pRIVr zmvVp{U&Hmk*yQKyAldJOK)!xYLoxb>+U4xH02aZ|iKXM6^R%9{-#0xPG``usq1q7b zbJ%0;(d_f7?g96C_jJ5x^s(C#zJYt92T6ayeLf-SFAf^tyuUF0PWlVOccT4;^$Ri& zrtnK`A6I_4|0w;RrlVcJ4nPk+_G;l@(w}@p^UqrY?4|v0rJs=6BRY`Y&b+;QdqMj} zd#m)(5O{lz@1;Lfm3a?5e(!wPOZyjS=Hxqw-F#oyK!1SOVvVc)!o;Lw6+aeH|%lXMj2CJ?oPr z?M%1AW4++J=X+)@-)f;l)-|=>?EJSBQMe!20=u$W z{R&AB`zQyu1Hs01@Ixib2>W_HV;v0IcpI$3Ka&R24 zjh(IRqBv>txPJIJrBwKJO&<^3K56Q;ee{E>Q!iD$_Io;?djJ3D+|FezjR)=mT2^{* z=bh@m$b65=skdt-R4xx0;fv<6mh8+8%~KF;ZVI$d!AuvUGOka`D;l5Au ziJ!rCxRCV*8z1@i72H3{pD*dXx2*PamC2);_>*YGt@>9nVDfl-MEiQB^xbK{%z1@V z_jkM>$;WkazE<&YJbT-hYy4#6CXCZi|7T^+VXS`5zqgmyx3s^ZA9k|-<~ys?{>JA$ ze7>X9_FdJxVsunP-v{CQdP|eZFJI5ce0jIp}-|6~p2cQU@YZ)f{lWMe+&^NSK@cRqk zhuyQ7<|}K#Y`)KGK2R(5w^5%$ZON#72PI!JFJ$9X*+)(JZ(gJMU5kpCllC1kKen~=EqvE_eusk?-tB&%fknK zOJ(Yywtth_sp>Re7vHJzxcw3h<=h+NUC(9Q)|2C<9A}3i_D}H&j>E$qv;vQEi*!)~i@xv`+eUl9$|dk?c$ZBqfWhs-EDs23oig(55EC+8CHn> z7>2(2b~6@V-5-IyyDw+&u;UBxh5Qlr?&1vm)ZW?slJ%PLqsQ>2zmtBaVdTDxVBbMa z?hWo+BhUqZm$%!ivjq{PpK0y6{jl;+>>KM_aftC1W~Y`?U;W&woX_KXJC~a7n=*S* zUuk$#UZK9+;$~m!q$j!0xJG>ZeyL=iMZ4LNnxZT7NxR91TBjx?=YVTviyyN0PS-(; z_t^WR7GGlVBNp$r_+uJ}K11Vu-5PN`zOL-+$5}l;06R3ud|21e{$(@cKE8$g&l1#Y zb@DImXJ|iB587{D-_rSjKT*E-+IP^y;Fl=rbm_GUIzVevfaQ!?@Y;@y;J- z>2v$^TaUx<{-M^B_*Z0S)SOLvOOvJ3lczNKT(wVGIETQe?aT*$TqGh(JHM<85_j6J|*{Zg^<;^A`LF0M?lb+_sy5wtwAjzl^6 zIz@x^$6Y&!hw&#~x{dK6(iyMV!g97xBVI&!{=JC!S_LoPMPZ2X3hZ_)eKVe7^aaH? zi{}>gD-Ltqk?~)Ef4uZDO@};}OL^$&U;h3`Kc*AyB2#Xr)Q9{lE&pS={1|V^eAOtp zUy*#WKV8#h{+D!*K?n2~{afGm)$$kPf~tn$ZQTs(#|+L+!l^%DapP~D z@5$dRf3)3a=mWpNhwsahhI~gz;g<^MX}ufjpV#=UX7Bsz{El>gdqer&S3j!n`|bYG z{`MyIC(#Gm@wlDenG$|ze(x{h_K#YA>Zg3iR^xH|UJYw?*V93bXX!w@@Nom=Qx=Rt z$3pQNM~{DC9Y)G|8|2z1>%8J`xSx@Q@9#fv{>Z(zqWx`CJgS8bw!dVbm9@X5|FrgJ z<3yoX=?-R>giec7zKI{4^rt4rS{Z0de`j(W)~K%MLhrJAs&XB)_redz)#p>em#+&` zvh->G?aZG&FDYPH5&T&XyVJxR`~I?ww|!c#xrKuv-tZw#|Qro{UG}^vFMf)2H%^y-@IC5vUCGk%>qPt6AAD@2{W(=X`*<$@SpDp` zB;QZp&#t7Leun+*|H!!Ze?vd}Lh)~&ihlNY#J^1YSvv=76Ix*d?OxWtX+QV?^%C~& zt%6k5=70OFe=Zd&nju>*-(M;m>klzbbi4ZUg=5=G?+5*RW?A^&KmFz3ihr2c?_Xv+ zGC!~wU90gY+kg5#%%5lf_bH)MzUxc+WGsmX{?!-Ee_AVYEk?)ro^EdPU31^}ddl(Y zX2OM>*T~Z1=<%xA>z`u0db`zbEHZ+Bv@WeWWt)spI?PoJ%|9neBgC_HUaBKij`; zUeL;pa(}q@Z$()_#PbW&PmQ(z>{T{i#N?PfS$=qb{GFrdr>io^lFSNJR2d?jfUuO!aV$`VspqrE3S{gEnqh%K45k#CbuPuVcAW z*wbrDnAiq6?gZkh9df3-Bil2W+I5gI49Phs+b*Al# z`fWsH~iOq?}5Hm0Aw#X;*yy z4E>Ei@b93}pi9_r0{bvPQFdS7LGY)B+>-tPL$oI}Ih0%Ydy;N=`@UV*x8^O(k9?2I zKDJ^chYaK?70yxrtW>yA=l2~?;P)4~^}h2YzvCC~FQd5F*T+;I={fJi7MF_jJ=;Ti z&YSJQ<&1qUN|)np{(T;Q)Y~ug7nNG>8+vw*fAgh2Klk*M7;?k6F}Rjl}Qf9RDs5xX!nU z=-+ZH_m>C8r+@2yc;bZn>3)9;>H}ryPcdWZ_rv+Qn9`)}k}B}KHPAVJv=dBgLg7|{ zO%lKErF`FaKEqquCR4xL*zcCwUZeGt+8)yMVswLMEVZpy{V%mWs3+lY9$8dV{;{0^ zFKMDtW%93R{_HtA5xd@hh4xyxnh0RM^gkuNX@o-0(Qk!!0SfE^?b?QSX;k90r^V=Q zO(;b&NiL7u7mRyEd?Vl~zi9TNzMYy~yo*`-qZ;)l{47gK<&8B>$Nk(W7hb8+1ds4a z8m@T7O19ruibBid_MrAkn@3);Ti=7eZwWrNR}%rySC;%PuRDZac;ooT_pa7*r6~&} zRnj+UkjT;R4ez5k^gdwVS(2ZyR8_u@;$1HAiE`37d&M)x)+d^+br{Nz42`659l;wvBC*F93@x6o- zZ+%STUT?h7@@=(xgf8eu#qI+un*7l(pqJ?HP&7w;hYXndKD(ms>Xdd(@-;Kw<6bs! zs8S@KvigW7le9PCuFA{wqAGR_ac^Ji1a2AmbNHm^MOiQlAM(q>muT`GxROFo;ja=r zwf$a=Tr#is9){2})%$M7z27bBEl0!$Z+bgOUmiclzjqnsTGU&!Sq>@v9ZW|#JpCOr zG`zI~rzpmTo7^X6c!_$*2`RAyA}nB1IpeU(+v^`i-@Ax!ykZyi45y#rp2 z*eUm?J9;!;v-`YzJCsfk8!(jKK%ehsJyFR$xj~^nxM$60iywt*bDUYykHR0}E zY4CSy=yuHI>UPfM?sl*keNOW?$)+gz-oS;*hGbuh-v?7FaGaXm?^722gC98F`@1$R z1l4@tM|??qVD~5Brwh0f{(aTZTh@%e&o?{%K)ooZ_YT|mty-9-{co*aR=U!2@|_w_ z_LB8?~ z^mnXpKjq{5P9Tr}+#vjOfK@pilk`~RgeSWn332C_oOe^WO*VJ&Pgd&uO`_k6 zkXzu}z7{{1buzw96TXe+7wufUmFGa>hn91msLuNO2CSa#3~wo%srniAS-$-&*92gB zN`=K1zFg&xc6HkqMvnh3l^oy?+DWi_N`>Fm{MCXep*-;xGOif?sm7sCfYNL97Nf6f zy4M?TwR0;uyh`$2D(GpcaJ~AwVIkpe=rB9j-mUZk&u`?$2@9p%XgvQ>?-ER0wY*)#>|5Gh~K;NhR;YQk@I@9AhlxwK((|obtA5tgWcnACqd(Q~EJ6YA7Er@I)zijhuTKP0=NWPLHQv-34yGh@%zNJ`1> zgUs5q_sEc4?sKBt`m9}vIUVa<&r&hzqx+q|(E6lbC7e-q@xw~bld+3EIsJY|M!#{t ziP9(b1U*49`YpvLUQd02o@e>x{z;<$cFrQqFSA@%nRJ{_eq{X(*PDw^tKNL{H0ez@ z$d~T~uO$Ba=Fr{@&(ttaho5`So>PXP(9S1Kk$Ln%w)6C!&LiZH`4^$ic7Hj|X>#u) z&Ku|N>Ad@W8GB^s_x)Ud9xv3tT&6r~W@plMkiTU88|kNneWVlJL$|!aKDx)m0F4^5{`x9nZdrHo6+`L@Nhu$6yvvh2c2{zF2 zda;{*^(|U&H91GxsrjL|z`L&=^nJhG$A|vp(A$NH_Q-h*eV>Id$Jyol6zwGPAK(yH zOUU^Q%^z>1ea@EKC3t1;2Z6%)JtA^ni|PsB?as6}zxM;}1&^G^80ClSe$xiu<9wyJ z6TRV*F#=^OuaULGo0C@`NQxw zr5o_>TrcGf-x)Q1t@A)gs?6jIpFde8_#&m+_9=aV@^6yz-fzc+9tBX0KCSgG645|z z=%+Uc-j`Y2zso$YPqXVh5`Li`t#|RrdgGVR>Fu$}$c=5*WguZWY6#7hm z-M{ktQ%^Q-*#41L_Is{Y;PyLy$n4(+VT3%v?lT2kQD^0W1b51PnL>Hy-1@ne|D9zd zx4h@s^m70WJLI_$_Y#xY8LKv|n-8hZrBE9gCL^F+OB|#fpbD?*2n; zeqy|2s^SqZ*Ff~<4pgIlf;2seUrs$5hdufv&X10=M}MS%LxXlObg;kh_UhvTZ>PcL z`;zwAHIsC7C>Uv1quVR^l^y4!{X#$gRXSn$Uaz*pQsH%~Sg>Df1XQU1oB3lw!%+Vp ziw{`5P3ex?-)8Y?7T>AyzWTpv*l+vJW4|}p+8c%es{a!1hW=Yz?8-4vBKI~>9xd(_ zQ+fuGi|t44hWXv@Z)ex1F1e-^-&ZKTVK3W*kCzvSeVw@oa$TqI)h}mN(RmUUqjt*M z^d{_~pBjcKzuMmaFn-Yg&)@qS*Qih9zweiFs5K1kpy`S0?Q-f%7&3o(IoprxZEq)y zdM~q$Ip0!@zN>iqK7#B$wP>J^$iGeUuTuUF>2I*UO2;H^zm@H0XxJ=ECVA!&4Vi!W z7Qg?rMw6q_-+%gsegBE=(Cu`3ukh0e|L!vh-{F)AZ}j({UaWQ?p1y?oWA+dIKLnbb zQ!>9ed!O*9r9Gg2JfrUu{_+VKwrjgbi1zdQlHdL7^gdy;x12`N`zE!Vz(xI)WvW(b zJz`Bn{nPKYT`8H6uj9Yf&Q!MlNJGDGA}bd_!g?Lx7o*>!T=r3);m1Jo?x%Y{1^OKS z;SbCEy^={Dw0mtw*6+9;ApZsO9`wEt`Rkfp=L5AJZe|?smk2*ndqO&;y=aEeFua7C zPDu%Mw-*O!zxEVpKW?Ny5(fWU^-<=j`F@D@MAi)`SKmMKDk_@qV~jVN+@;Oq$==ta z^bIgQOV@d==-SSFplj|3x<0Lh$^A*hxBU^~=l6|weU$M*YmZC!8vbmz@rn;HzQ^pt zDbM}CS?ht{B*O)nhh#ZF_a7QP3Qz8jR)n9dohU|}ZyS4DL;VgX8Q1I*-9LTf`{}1; zTqED+*GrV|X@A{q@qLtY2ecjjF7-I}abZUp$rI|Y zfB5=r`{Fpp`z_3$?-Y2UVf}U&#{;2Z{dU&?@o8AU-Ssw$TRXJ--1-{UZ+C6c_kItk z`?1-6Tm4q47Zj)cHrsiteNLnLZSg~VKTNBB`*K+m9qliD4s4VA!Fn{rIANyz%G#yX z0vF@Q)tT`J+VLyCDf7$w){t(yKkH=s-+(X5Ei4KApw&gSz3a6-R;N5}dTIV+a&Fby zeR6Kq+I@0vmHOo8R>OX_=lERn`z{1E!p|4dPl#X2dW%twcE|hw2H|Asc6{G+W-Gp3 zrGR`K8|=FXa{izEu4F&M; ztcab~_fOv5zgF>hvT)qLEJj~fc&+sE75!1<+q<+qz#ibt1%edY0{#U*S zq4I#e7=B5s_IHBx4L78AYOME$6*XXy8Vm|nKx z|JyNMPVAS@n?E`Gb*k}lu=T)V^mau&+n(l1irmAc?IWM=@-IeQ$2ooNs#Ez@jCj5{ z43qw_ei4-kG9zr!e3V>-%}4gP`;jh^Q!W%jwU1n)WXeQQ3y zmW4lF`rn?T=T!9HlgmFA{o8*^`im{;$0`WdZ?p8L?G?Qw{M1X7-xzm8-;2=;G(FqS z(L{=o7)yENK6%yK(dDky_xW-@4vM`WHaioBX@`T!GwC-mc#_BGA(4)D=IfTl=*vIS z_xW*Y!^WlV7oB2U>itc8&JwLJUf9j>QX2wd0{RiAqn;2)nA7;5G)gzj*P?Mu@k%cveS-bRrwC8n_T$X>n zqWUo(cADbL{NokdshWc3L6L7vZ&{pfx5$F%<~71+-D>VKy3{`T)D{qo3lq@9#+ zop8~Plueabm^dKE?6|{fI;&q;s&-X*c=nTax;gjQOc)OngeE;lZ_zr*@>AF}k z8a8@qzft}N;t09Cz289kQKZvt6?@;K<&gfkqzAKyx86g)@YZ`)Yd!IlwHB^qxQ>2e zypH~>`-@Jm&p#F;-glqn3p6G1>qUR_-{X4p!F1g1_SEep#_9R|gW3Fpik_~UqFtfh zaer?pH=ergz7}{~N%+7c|J|+wSv;08Ul#9t{`)fd0WbgEuHnr4R(#0k-=57+a~=72 zro8`OUmaO>XCdRO1T$~_89dczCiZl%#bR* z{xC$ly+H0=K6XUvG5FYDf_%+cfGX!J3|_U31F`0nOh^B#@B}}=_j)Q9AePoM!~CVJ z9X|@XCd+&y@X6r+g(1P)*7ZQo7U5f{A5}T{J^|lH;C3?h@1||nYLoi_;~P7*T=QC{ zHXEt_BU!M^WzK`az7*ZBCL|{mXXkKA@?`B z9Qqz0J_}_137qr$v|S$J?r_)By37ux%sUlb*_2ldU9-{3p!b@hzr zMI91%Ju6M_)|{^2Zhv9l@y78iOqI-}fM%JRN1v`)D3 zIix4lS#NB5Ta0!q-eZ67oA+oY^g7($vX*OgKE&yLu9&pbN3Y*wPI_H_>G!>jZr|VN zbesK3=r+4sjGiSD6#VwCCf|owYM7Us`_q2j8Fm!~Aw~vgY;KqpFQE4 zd@rAUhw{ts7 zea?B__kG^yeZJn0^S;WrI!Z`a`2BLYPDkaWzDDS-V>H4OdFjQMlDvnX!uR2z=iWTV zQFpD8?3;a@9!rNVppUMQZ!_f|c3JXkCCAFAF! z`D0x`8_{v0!odn#pEFAH(RJ@dCX2`IAg8Wfgx?Iu%XK}e#Z7*Pv?uD>J)<1ow{gby zopkiQz4Bexm4$rQ^BQa)Q2YMoK=^%i4}MF#`bFcLmRpqlBExcv7b!o?-UwgImCN7# zuNHo}{rV?2_x9H*?`cM~diOfl-0H9Mcj0~mI)4|||F~Mt2lfUgD(O+sLYLn*o9XsD z#Xiz{7OjW0ORmGtHK-N5jq*IG5_=q6C;SO?f8_j3Qe?XFQ~4I29~$1A}!Oxh#mmV~}covcsDEc;t7!1kXQr*=0xRL?P2I&IxdOHSySnD~jV zH6N__UCm5gdqa?I$2!AQPlRv2+ zou4P|mZdlumz|Sn_C7P?DSn>0`+xD>_%=?aEyMW==qN7Kvx)BE_msHZ@;1J~C__FrlO5vZz?~n)J`^E=7zuC^wv+)euug3ZzHjZTDA(5SUGv}Wx zsU2un`a48_{7$h8L3m!d@Hx0#`QGPw<=|2h}{BBf)p-qmio?T7Y)ea8j^taQaLG#d}LOM!I z$$LfxBJ>kP8{{PP51Mc0G0~&xtKrqiZ>-{*2flBQ^#`wV_!E7nE^;1yr!Mt0IzVs! zt>W*HbNoE_ajT^~taELmpGohJEQcc>lIMOw21x8Y55LgBFYLSzzo33bzl-q;wfbK5 ztt9!tZlb+xd;oMEeH+Sqh0C*cK)tub>YZ`wZT#teO!6& z1?p#QAIux5<3U$`mfBOLpQ^-{edpv)CPL^kzjG+|S(p~%`;!kxb_ex@{Mz}jerv6i zzf9+4M)(kpD~wWD{GWaAWc$R~J`Le{li|35qzm_d z(D~PB+`yCal#fYI=%smFDX-MT_j2^#OfD~{^k)+)4_!h(_<;-cd?82jAk9`r9vY>U>YwYFAary}zLm;@wR`9%PZW7DISHC~OT8`>I)i$#gQmBsVOQ`At4Dag zLSFDQCrSmA-b{XA=g8vxXha*E?`yEnWn?G)u#pv80KqQm7 zUGh(Qr}GQ5)An774+chTQ6tu`@z>sv>N)wSIK?ZHGJ=_ zmuDTIUQXeBOdc!I%bOg%G`V*5@lBNDK=knxryU2PkLlb%^ilU;Qn^cdUuHXRdS-I{ zKS>{d_PApwl`s3#$FDm0{p;fqKM6&ABmE>i#X$6Ov(ugf(aTK>2cnlp{AAou-s84Q z1$ud%gWtbi9`Tc+mxta@Vtvs-^zlNc9S5S1=MP99kNC;BK32Q!Qh`41qA?cqAJ9MR zUmuV7NzunQu0M$p!hz`Jb545>L@&Q`>k&U0jq6K4*W&KrAo`Q*-FB%!FW>3l_pg^n z{G{mRq4$#*LK%oYUgxyqK=g6Wfb{W*pN#v-ac;X*ppSo|v9=?A^1sfwD`sK_qL)8% z+H)X!`GdC~@so$rPkzd6mkRW9jf3C6pF9%3il2Pr;#Z8&4@58Ta@unsdU@v&y$rw0 zIu!FE7rE_HfnKH@{3Cif`16!Q2dI~SSQo`V1JTR>yyb|$Je27L@zS~(#s?3dE$Ec+M@^NCwDsd{p;nC zxK;G>(EG`e1JuW7opv0EK0b5P5kGk-$9LAc?NWiCe20U7L>~t)ZasE@`uJ9-9S5S1 zS02&FLprWA#%-4h^zpYJjQnZ;`gkO67616g#jVE;P%n2n?Ku#={0?mx7k*!S4#ZC$8F!73?`(71r2>8Y zn1kQHJ|2l*p?`1Wca+BuP%l?H?Ku#={Hp=!<&kmMxL&@^ZI=r4@^lA3sNcokhueOQ zU%(<>T zi!15M9FMQ<5w}Y@e)~3|V|=YXmwW>~AHy#R9YL9W9+1G!*P#8wIDPkteB=G_m~L`P zL^|*9fqSNazXId#?n{C%b~3gf37i>S;eI5v7>@TNvHeE~#+9Z0uyJ)S=bM+ACj0nI zA0rvK`xmW@?H7@9+QZIq9|vWnD?8*|*%zT*+R66CZqa>Pu#W-uvB@L+K=}n&Y<`HX$|~5v*A3x+dc~>7jgMc(SDfHj=vc>Y?gA=!xU!UHpUCcW>f<4vzd3{)fuXTb=fLBjo2T zPX3k1&&e+SLCDXtKMs!k;Ap{coK^Mpe}erP;4@09+S?a!BMlXxlI z-;m0o|Gi+A#z{BC;-sMxC&l9-(1&!ltbUE-mUuj4@{{x=4l_QMi&O4$=(hUp&#ugN z@ee{SzDMWxSzO$=Tu6T`vq2hoAaXHpkmaJu;ny227Z&H5d}OQsw6BTeq;L87&wk_H zDGvRW$Vat{ukm-hp9Fo)I{qb_kaGo%Zrk@GTXmE-nFQVX-?sm3B;OLh540Z=*RO-= z?;UfP<@&W74xOI_KM&O2{lAB!cw{i-8uo7>a{Vob{z~L}vx|QacJG6G53k+ZI>_=- zbolk(Q9d4a=&wXR7Pv8M%o5qKQ4FZuS7oTT>Sm` z#V^sh?ooWyw_k+)`|scvU#!UP9lCzNNrNocRZcs<(Q<9^(xK=F{9;Yy&nuDVue{=o<+=91i9CPa8CN?T{g>|S;Kq4fPCLKR^87~hU)DMFS0dMUx%da+$J>7E+V8%` zJr1284+dF2Zg==K81gY=fPVZchyF_BV~UG^5c2WfR}ZgzTsX+`QS0z)Fy!N|0m?^> zLw_am@$y5CA3X^9xcM;qmshVJT>tWYhhKvsABZ~!(hqpfp}!LOXmjxoLO#y?-Ql$# zpB`lS*x>MMFyy0Vfby}@p}!LOxY@-&2>JL{Me=dz`ddo|Sw60J`1MB1$D!Eyq^o<1-nIXg8kX_$G%x@v^hpk8$+8 zo}-uaR&Y8$`KS*2s=L@~*34AXU7xKL|eJ0=Cc7BZJiQINZySwdtAGfpQL+6$=y@jIW z)0*BU^|$j2(^7w0=cvz>Pw5>BD<9K48O{}z&*@zZr-mQJj6ge6SWV}kuHomI_2-Ar z=h82%`y~|CtCubzJ%!NFl^rg8M*2A3@81!{jWqtp`Q}GUJJEVnhJ(@4UVc&N4Mt0T zv<_0=OZ)jn;e(xDT__44T1PiA9kwnM=N!(YCoX@%Z5l9Y{Zkf;aiz^jALEkS{Sm(KQ{ax^&;Zah?dz$#~7X>eskcI&Nuc!Bcp6hMxVdr}TcJ&yrtLGo4w73eZ zI{7{^F`D0J6MOfe-2Y1LcM|bw7`5L5(wkw_ejACtq_nUH?!+vzm;qsz$NJldNa&;;?u}DKp41NI(+vzKy-3fb-Dpu7T~WJAeo1cw{|MH4f5s28S8`u4hVR{*1rO`7 zYf!)S0z(cjqo1hXe^3^F@nc+{Al=60`^650yA(Emwnbs{r=q8YF7XF0KW~ZIy=SR> zlzXX``$H<%#eWX?x*p7>@4JK-tgYetWNTy{3(nt!Cgh63uY7tpzxPYRPx`(=;CAEl zPJug&|2r7YtNa9$L>__*#6AXk&TgbP^OJo2R^!dTTA8j}rDRhIl~;*a~F?=GGF@#Q^-nJ;%xd2xH}7ezl? zN8c-aDT*D+kDjlv*dvQGZQZ*0P4h?H(-__Bo~(E47u3Z11*`h#7c7%>Xh-;6*tdV4 z>h=q!NWWmk@|Yc4!}jit>lZBM@}hphQicng4!mD*A-A{Cd(i!Y6$`oi?66v97~~Q5 zH0d1^!s2%tLYP=e@5eCwU#~wPaX@f^+VNRx$8S*mqsnl7CP{tb>!pIH<}*LKALi=) zm+U9(eG>L>kopNF(U;8nCT0-iB_99ZaxvL02oYV`;W9r|4zI9C^pe5>tP^0q=SN@9 z>7stZRSNIm_rYj*C)9tt!j~wleo^gA)K6H*?_E238$EIDeY~IWJn3xRzXYobS`@al_{_}KB9)9PSs)_Y~R-M2IF1_6Uc}&v5e}Z)Q z&lkvlx_UD>{h#GbSX{1>-skvvut4IEAieiBJP4kWxXg`v`|AI^jqU0H`#+lwy#MoN zD%a(IU;Uqp2rpQ%hwGIcHdy^1;eXWsc|Y^r;y#zpW&1&9XGpGK$st##GT+<8o*#yO z&?xYG6O+yHEBAxS*h!;rM67+dAGA!;yZw?R;iYPZv%dJ7L=OGUB7dnG>ldBI2_e7q ze-FEf7iKq;p5VED>pXhz>R-HH^t+G1K3Ko#USvnG^@|=**!o3}C~WO@ZH+DD%E zar6&(up^^Mlyb!`_J8Z}T|%9ez;1f${zJt$Ys}B_8oRwg}v;{i|3U zX5W7YS}*7U%FBmw%>*tt{v0qhRTvLhf8XX4f)%5p`+(<{!_kPF*s^oYN;pF#4=^-U$DK`?%vr7SbYNkO28<1Ts*Qoyh6 z7g4=5eRg;?6HpHC`+319viA9#q?}6PYuJ-ugeUE3?M3BV{AYB0mgw+{f|sK2hj4+8 z>*-!;T%Ghj&E@z-p)ZQ3FJ-xeKH#$N=+GWh4ls@#gLZf9j@1Knx^Zj`*&V-A`9$rt zk>2~8Wd6x-mw73RZ&6R^gY~yFt7P1vK)|XWhw`^s_+WZr{o=muvBd?# zFYtpzgzT?82MqbM{%O)1&lL#L(0g?j%Zwc~*2 zQx2j(@i{8jZI^Aa`4pq?(}dUCUJGAbeJN+Jd)a3(pWYZUU*+s`s^&bFhbRsv!$24K zPyhF@b9ip@o8-|**bi-oyYZI#!&DyRt1yE>vJ>X-(YnwZA;`W@P9@OFQeMyY{u0aG z{_G*sS0)ad9qZR~{**cJ(f)V*BgJ3D54L|DGH&9x&t`e*+y2S^G%W?>2K)E3nEiVb zKc7d1jH6%u+dohPyD$83+cibcBRlxlqJNDIBJVj)#`{7w&`;M6LY|=CkfUz=p$qk0 z#PY8FxN!WxOY~^;1B~Yvw0=Gfyl@A9r`PgkF75LLD-LX{O0i5kFQ@nxc+5}!>_@R4=R2@^0C38zY_Vl z+r>W!`Ivv0zZad_pUVUXqHB!^#vAsIyv`Qi01w-2&> zyv^a)V93W^1C)>R9r`Pgk7Hf@3#o9r;`;}bn|DE+gn6oOLq7C8o$Qbr-p@N&yK^7% zv3&@qpD6RNc76%wf3`a7WsuKHe--(;UVQJ`&H3dvzs&bu`(i(c*(zFh@DZ7p&0oM^ zbWV$|qprq<@*(-wC}XnP1D)HF5@-Wz+fIUCTFb9MOp@>-~zqC-d8+ zo+I^n3z$OJ#KbRnp4sYw@-Wl9q=8?1nVHl0hu+R(4kntR(TE7|TUZr$PgpJg0 zQcgD(3(*IuShDiWSkwXB|B*#7O-4%oLHDU+TQ z40>?2YPo>1GRD@E*t(RY_Z;&9^_28-j+wo;dRe-pC+81B?smlF?w=gFv-5~tIgR80 zO*#CTOg5hnvpbgmKf3rc1kcSsj{gYs_na60qVv8Sy>a{xyZ9n@`<++aiS-Di zM&K*tVkP3o_MNYT@=(85>|mJAiQ@b8F{a^*@sL?}_|V{+MU8egWER1r=fIS(m8jbe|ykV4V-}EuYK`0_Y0s zVfw6k-mP-&D<3j-Z(;n*YI$LNF}_2ek$=*wWjy=NH(2p&Y1b7$)BBJ0{$0Mi{^buO zf2c<*>QUruc-_oJWM}YvY3#YL&)*q){zZNM4tn0YLgd%Y?+5fA_6ry4e;VD9|1H$U z?)n_)gYgr1s2}ity^hk+?;iH)BVxyrw5k~S1`7AKM_v5BoIRTUSCJnK^?NmP6TOPt zqm#a9yKia{^`?!6X?7S~WZybM!i(i2~+J<&=?9oSl1$(rO z`4`!vtqj|`^h)f}i}yu*_B%9Q=@h@`Z`#iJ<=Vv097=n18L=?3M^avS=mcxjmBlK@N|l5+ZvfhSc9I`DX5yxTpI`CgY&&Q4=K%8n-b2peMVV5eEuf z&K?2(K+S#l+*KVj{KF=o4B5TtQ`J+^!#%!eg%5I&cQFI=bv=mm!mhX=MTF074$21 zpj|Xh2j9=0P2ZpBIhfh1I<~WR{$`LCdGa6DICD*{w6o(cI6t+U-H-EYKCcwNkpI78 zKCV~(;=hCR25wiA2fu5Vz_I*;dR@<``6J)AP`qUEbMTbd`E253CLp3;^vy3xzNxBl z{NB!+w0;xhuuAFOa4zIIE%|7FD`*ru=yxdH&0@E#oLr}zTN>K03y46v^7{RL9*gAZ zf**wEN_y{R*vj{{9J7x>v(TOEkbIL~Uh(%Zf9yOXt5;BO_}}OExzbL)BTj(dOnNs- z`qBn|o|&>q^Ao;hj{Bs*n}yFKA7;=q`5U)N;A~X?7^H ziVPuLNe`=b5s!&HS-caUHwx|3Oi2eAm!0eBcS=3&oc8Vs^1k~zxsRMF_sm31FE(9z@cGej@4@ z*3-`QMNmwa-x1@R#mQF|_DQjZKPJ_NIh2O(n zAOe}!eUM!J!%FIrs*kpgoR(zsW&UcUw$sQB`h2zCFO_?+U=7occHj0P;;;H$zf;;Z zbM2{&=(mX+_#J}p>;8^ouRWdL+jsKgj+cBars4&hwvQd z^YyuY7Z~<$v_6@0G#}Mhqbp0_`Qqw6PSQ^wFZ7%K+js-~)zVnIym)WaE|#D9`OK9v z-V*Xb>z{c-<1gi>)ju@^Uu_T_tAq~Yhr3??JH+qq2jsown`s))5Bzr3k0+#qHf&p@?tDlb|Gh5$%!rOPv3%FLcxCd9 zkSHQW`1-lBB4!=8!f5%|sx_bU$VCNj#cy4UG2l=s`)coyxVY8FLQ`>~@j_slk z+Wz>?7pfNeu?dus!ng-(#a^4g3jMgXtH!0)zd%0pKlS5&Cgp|W##51XZCVs@@#^ztYKzX66*@VPT7RTDT@!$tc^b)nxna9o~i0p5d@Xy4U8Kz^x!eSS&!p4GUa8-vw!`P#qF%n*B@x%)JJ9*!UDc$V2s&@-OxrHu1h zJ14!*GyRbJczyL&O2xP6DLSjzrb*+`dhrCMbZ;Sn4OL5 zDf9><0h!v{IlbArOx?E_&aB?S_wHK0IrBVk=lXXqmFKh?0 z0VyZUr&jXoJo z=Lz5PSwj59W&KunycjBO`c&z7ai;EB&M&ii8{bEs$T!bz7ZizpAo90;OF$p4K=G5F zD9)nlqqzOLrI~p=n2yEc@mrS9;~wF3sp=0f9^(9o&VIy**QO8#^lnUSKIKEyiK3aolcKFOG@ng*djPC;KKH zNWQCn>E=5mmamMH5kFb3OkTt92{%dn(dYcC&A-IvSCihY%#Z!)9hsIg^Q&*?beYvT zNngu1NAG0*CDa#cA6&iWS2aF?-8q`xFKGf6l{Z!L&D{SEhHYNO;^yp-t60x4kBW5R zJZgc_Ssvz+e)O$R(fkWiKz`mz_%^tbIX0_HBr#(Wx`?E)9?3aI^ z?Qo_X%znvq!Cp4;UJ1^U4-?ntkjR^GxZVl=Q@Z z$?wAqmy1L8t%&ST7+0v=l*9jCIsBQ@e)e6u*-2}kFkcBz%g$53;^upfiT2R}9BSlJ(g^tzL>&YTBnqx}gc+{O1)HMAD%5su>bsUb<; z7oad6*kt=@9ZD6ye-p#N18jO8j??JA;?|!J>O|3OpM%zOV7*9RDKV;;uVXM=SAup& zzO7TGT;M@IlHMP=z2o06r-qyw?)z~a;bqP_g)@XaW1pVXkTk=Pvn;=tKo{^_f{kw2 zn}2H{zf;5LUbaT*EQ9yT0!O~oz6~QhlyB{edLVz~yX%AWoZ<&uHlBcTxqQN-q>T49 zDnB(`1RnHVPtR>15u?}L4}g;P(wjm-+l%&-03TBcsYfayEnxfWMEg(tKGaLl(R!lt zodDsxjEBVeZsStH0`1Q(5Pt_c!9p7cObrnQxkKyOy_{asHB#ed@Bx>lvwrQ^OON5_ z-6_6#BQKXb=G&2f%Q}I5G%8&!dcP$p^o)H-{Jx#1(LG-9KnM7SeJ@Zn{2Z>N_iN^Z zjkn)2HObH2c!9zUo-eQJ#(cyc!7oX9@%$~`8>#)T;0ex-_hSK{gLkVl2LGj2s-ejoyDGa zL+I%0MrL%4B$>noJW%R0MEur!igx#c3&DG^avuE~NBFK77whOm_LPYm0BlzvI{BvmEt5Vohb5A4O|#PbhRUbwnNjA@+^6-XEK z1dq&L0`4T(_64zZBCg!-Qn`JI?$J2eD9*>|F@FL(wh`qJJ@k|IC&OQX`A+gj+3X9) zbB^QJW$9W(Nzos{IZ2j(M5p)>>o@i_K9=i{aWTJB_!h**$4rjH@qgu4$iEAOzR-S+ zQF=+Zp#Q-J)dK*4r%(g%oo`-hs3`CqBqZPj_zlz~OLb&Nu&o>|YuT3?D*b|%R!MK; zFE$>Ksy;>ZV8}7Nb5M%b1&N%B9ro{+bhk_u!({#fa`BzNC%dys==E0%ovz&heQ~>k zc9ghM`MpZ>KaO4iUt#3`P2}H4zE*Cz{Gqo?)XpVY%g9dbr<`XGqMRjh`>E}e^hCd` zy}v-TOs9XTs?+FiB)2vX487&&!d|aM*ywe`2+%c;?&Ij58d@XmL@f=}6xk=BLO;;{ z82{w*;hJM-j{jI8B^Qb)Ji~5~Q z|FTs=@FpS-__tn459yjm&1!he2VHk&>5Na^R|CsO_QFIOw$R&TbM)TS7y11-i80(hzV5Rbq-Fi1Uuu*5>tB@o+cyZ@_`JX!>jZ9oj$w<9D$dSdZ( z1_MS4^PT=cPHA40%gK^oz-4g;?cXkZ>JmCqw!TBxG5GDmSAUc6+gG`>{#dRke7=LS zt)+agsYC8)A5GV;={8CIj1IGBcM$Qlg#TW>r>bvY`3xp0e{|m2&I2+3`(7eO#W<&J(c@l*&=I>L}5B<)`^K#N|EO z|5BwN#*!}BLC|me{gXbZ9B7=c_RH3n0UuXy{2KO`w73f;7Bhv_rB5`^b9c5%J3iLa=?-N|_7H)-BOVImau49`Q+dg(cd zQMKf-X(sQm7DK;Y)>Tra;< z$`2N7V>mxa#<}`BN65xAg0}@zh3eG5X0wvU03(c_qT+XU$KYl0lk+vQ2bN8{oV4AAH@Nj4)lHEkCFVFocQe` z2ie5=q7P~x{0`y6*mKBO(dEw&y5M)AdBF(+$IEx^!)sT#_Tk=8kA?n!B0htwi0^eirVoB6y?ffI} znEv@)A_qyYiRq(tD#C{@2Kju%O7-KTF?sJ#KRyin%~}q*S=NvB0$bef>feVPJ&CuI zt=Dq(VY;Iasl+)fpNEgH!pCs^uddId_=DRsPQNQ>O^%!)?uR}_biXn7F_ec@+8!jX zkk~-fh~&2UkjSOw<9Dfk^jY3L!Y5RoF8f{>`!Itp#0&6r7%#$Q{jSuIUHqUMMw>1~ z4~Q%0(NkoIb=r7vjn)hK;3`az`=aVM@%Zhc>N*~eT2y@&$MK7*f5&zyRed9;uzp>t z`cm$>r6?}0qAZf$tDI5F`wicLC(j!>-JNoQ`vqjZkrm+;NNA)V&X&v_j z!!-Ua>6N}9Eq>U(8vxzNH_-DjY+f)mWDD~rHDrDj-W#69^%)vxzy&+b@9~GOaZP;l zY@N->O9f7PG+9H}qUv{YT)e1ytBf00e~EYY2fY|cOL{Ld9_lM_E$6(b`aS&J+oI}j z8Aq!AeBxBXM7nXD?pt9xfltzO-7*ec{TZ665qymBB)zXQe#$$W@i4E2ai_vV{Fv<( zYB+J&8PqfD>902sp4k=lmy|L<$F87L(b35b$*$j z=Wh_f31-FOk4Y#U^}XrFq{cTx4{`3s9w^@aqNqJs~Zk)!YVfZn6O;?mnq^~43b zPyH^{x2SX{o=ku*xsuQm)Qca>>AE)>M;8B9+QRe(^?MXnd8$|c7mf#Q=l8kIy1%%_ zw=J7K&glz9nYXq$*|lTyiNE2vSBc5xcxoFxhaGQH`oBYcA~*h;f_7OY_!j@Wc7F%( zi<%!buFDq}@Ba{<->LO&(|T!KeFvo@)_K=xzy1y~47GrD0>|H^cC|P~_!OQO-=Od) zzWc>#3Xf5^N#R_*~qkFq`p z{0+$o=udig;5F-YE!{JTMF8nMa1juzXUl5N#=GV?e(mKRmbm*4^0P2~*PJfTp|6tf zC$!v6Jg<|Ya>o-2VU*h(})`Nuh<_XT0&NcW7 zTP8aXsr&u%JUrim+6!09jdGtj{L2Zj&ew6va`Tgr6X?tTeFpuzHfbOIo}|Bdf?nh6 z^`b90FBwPx;| zy-&OsawFv+-o%ykCh{Hag8XcpH!fHHCZRLweU%fYsxL~g|E!*wztn8FC7zM|qr{S|88Nu3{d5M2yxJd^&;WHlCfTeoLa3;8b-}qK@wg_6td# z<)r*6-D&hZ>4|@}@qXm@2elvc)60w(IR?Eb$L2GUUf^p(`j}oFS55M21ksx*l+NNE zcU%SaiRR;OQ$C%^Bd@9IOac}j<&NcYqjdrVk{_`9Y z^&awu=N5M)JrT52_3NyWsp@@vJ`mDR=Jeu!kxt;(=qc&zLwqB9FDr*q)#vlcCrAfL zxA8xu6Zit9b1I)am8w3M^=yCZQ5$0GQK#Rj;~DU~tEGJC88U`Eie7Uu;XHN9dk-VJ zCYQ>aXj>G1EF3@tbw(UEmI}yFv5Kf;YOB%j-3d zGE3-8dOzg%mAA`G!9S00e9lhKcFA$Me+lh^rjFX>1zx9$I1cfP#c^(XA|7?e%RUBq zrFNe{_(8M8$%qdgSOdQ~U)m>(`xYu}ao=Kv$MD@RE>*ae@AKnytvQDgjKHR5%@Q2L*zVrJ&zwbZ4_h^1EhWNEaSCXSAsaGAt5^o|u z{7QPF5P%VHCAqeOk9B-Do)+={s{`ggT$H~G(KGyw`k$51!pOge>wS9l*TnCDUPR)y zt{3nmg6Gj|FyI=U*QW0qncn=YGwCVm-wlv5Dqr_=3eS-rD)iX*+qTYi9*H&bGdp5- z&-MrL_1wWg&o#C41B21iq@1GIuV8eO!eYOI(X$nnerqs#j>23V#l`a(w)vWnPCcIs z?SZRxwD|EQqtW|;eL<_?x$v{Ecss6dN$)o-H%n%TeZqTi+Qw7j_%^PZs*?4SwhrRJ z)Bih{3p;=+nSFvj(IXyzIrj{-v*-!h8FbnDH^8twt{gyrd&iq!=5l}!iO+RPTw&ie zhH|L;GZ-E5I5xLg{Dk%E?RR4$$1tV<{Qfra})ZDe11)7=@;&ErmC zJS1|R>y+^^zbNBnQ9X1Vttj-y>tXcA{hWR0a47uzLl2=n|NHxSZXX?oi}TU9aW5Os z3irX2I6LXRliM%cZ@`HgDLmR&fDxk$aWdpTl@xtWC5JK$xuXBW?-b&GBG7o7`u)g; z*ALgyJ#+b9E*Ek4kEk4fg38%Mu?v2!*gGFaldj;L2EK!iTEKNv7!GpM4+yw7PS3BE za)KFZCzNiJ^I(SLW9dOZu6&)`&7SqO?k_cL79$4DE7@-QH;&qa2mZA+d=JuV*V=f~)N30XP2j~IP&yag6A^nx^c7DwLlEuUy*vXntLc`<&-AFNps^8%3 zJA(6-XUF6WN}2RljG?F&wj^0LDlI78bH(2d_vF6do!HQ6IQ?=_m&oXY8DKme=k0#=R*WAJ0$oY(e7^gwwR|^w zaq~0!@5H`KT^pE=ES+PDB5ZwJpyzD+whpj8Bw5zRCgC7RJAgAAYu%|q|40$SPKGf*w?*-}kTu-FC zUekRomTn8uEt7O^{{KLD`L#0s;FrXH(tfj?UtI2j`o;V_Xk5s5(D%)sV%*uH)e?Y@!n1%J{0eSJq^bcE{wkJEOc zL_O94*2(j59b0J~^J8osa);fxhwo&s__^)l1A52Ox`27~76jsZm+6czj<0uprCt5@ zs9i7P_0`s{6FAO_?BrxCm+N1U@R#;$zLD(^@L-S3PSCnSrZ1;>*~BgB z@M|wsBj0-_y_@+#WY6|+xwf7*XcWJj>(KG3PPKC?N8x&I{r)Nb-H5eEs^%fiH|ddc zpv(5jHGMbx7U8c_{MkyMlxyGl*m^sY2eixf(_yDouG89Xj$PPAdP?!))f%9ukk79aJA4pJZ`dEhZI{xU&#R+6rY<(5z zpFsGa|6;NiG#)7QwQuFS*)6|A>I1y%hz1+~3He3)^x*PKVqZW9Wlj5BOMGDVr{y$8 z^CniMc>Fn~-&6%M<~YEcaInRd6e0L8)$z4T`yttF22r>BEH~y6K%MVAMB2` zYpUi7ZBO(n(4Uhy%;+?{us$_26kq)yOB4^am2&aR@=Mi-|Gk5<0evPP zHa?1aQr67>l_Wn_KJE9w`F78zmvq@W-R?;Or-l!e=eG%rIJ1F%VjTd=3H@Cd&*CZR zVJ@l<`}I109DSd5hrsxrav}Z1I6f{L$4?D!mU!g$h5WqR!CyyDg8HUE;{~l_lY4qT z-@9{+&h>m@CcPgqho5I_Ci4qdKA|U&voqg@I8OXmWS^+>N|#^U&GH$fmvTPzeWk+7 z72d1xN`dvmAb&k&Oc(OQmGmCwACLo5i+#aK^4@>Emf!o2^^VhwzTMbYs%t*CQ?}-G z;Y%?rm&>Q`;e?L*Tbj@{dzJt6$0O>mP?!fE`nPJx=wmUiDSuoFFaw+Jlamyg_b3 z$LhB@{wTeT%PpwhwWgnA*v@zGixTh9JfqN8+{1UED>IxsuqoQ2xU4D6Oa3yI_vWQ4?iASI{7Gldp~2yHD|Zb zeC3<@1^P}}087tc-iV#_u;3}o2nB0 zJ18+)+S0-PAV0LvO;dW>NtWM|%5Qrlzsq)$y{9zD$NZdc@<8KOuy>JN(EhE;SwYxB zb0XH?u{d@<^PyGg4CBYho>8u-kHjZdAB#_1J35o`Mxw%o5{ay);o$ccZwRLsw+94*)*>6&-Ty>XY^>v!_qzZhrGkZ>Rk*!slOmW)gWm&->r&{&RMa(iRbqp{n{t+{D-&xWNV=7JKb>+le?i4Tz@L<=h_?iW4|jAmR zT^YrlXgA0Ot*elFjcDMz>)&siL2^cNjLR=dKAFPB43CpxWr|;r3+1yy@^Q=GO63PD z_Hy~u-;#U#x{#iKszttS45t#>E~PCDn_Ok;;4L9fUHm-iKV`K4BsR?aXaD^t=|Ipr zJSm6xP5*nXbzDr?Z;IPfo43rBEIw3!8ODdpnSXv5AF96$<3sV6{sG2^W*0)et-wC~ zD|IPszXFWgz@FQ802cp;<9HhH)4aIIt28Gw1NI^5i5-T2K>yEgpU-^D>pH%?_QUP_ zEO#6na(W=+;0t4M-bL?-{82XseCRU2Y5S(yej=$FnRgDFcfH1V+7E!-*Uq4Hk8$#F zyfzC4&}H)+shVBfBcgp3C4Eux32nK?F=zcP?7XydWH+rpCh|c0gL8Tt7YfoI^UE); z6Fp1UFl_s*f{cY!6va23F3C|a!5Md8yVG|w%(uMm zFGSxJ3LhIKF7!Jd5V%?6+0J_fULbK{<>TW|GoQQF@1Q?|2f<{m|FT+w$lg|cB9W&T zzAyg6{8`~KSP{|@{%QkwPq94VJuW(jf%$26DoC3=tPy(CCO;xyt{vS)jCSq6$)i7$ z+WRs2Fg^Fgc&0zMP6mi_>;GNKP0!(SbL;<6;e`sfE4)?fG8GEmGe7(fyLHZVc@2|&b8}xX- z!myrz=V*R3xJ6fM`fAOOj5J+K(pM?I_|+giUEvG(eUP4}@L38sFkDzq&YZ5+^jQqI ztiOWcCFwI2AJII@AFJ>U3Xf6vYK2EJTv*@2aBKP)hFjL(!SIswQHmcZ{-4F3t-nWM z_3!K7rSKkpUs(Sz!>#F87;ahr5W`E-ztr>}Rs0_){1JtBD*ULzI~Xpk-^_4p`UQqt z)^B2XN&0!ke@5}2Rrv1|-m37^3QK%ZSpN-%ThmW#eqU#JN%~WY|2@V3xWeC2c!R<( zD7=p0!up>v+?sxr;gGCcvw;u&13(H+h;PloS?rsCo&WA3et}%EOZ6w#}#f9 zdeR?dxTPp|c}e;c3>S)Gw_DRi#TUPlPnQ&4r|F+jc#XneQCRGDklv`U*!3X&w8CQd zgY;(^ZYhc#Uy|--xKKQc@2%;7P<*l5LHcV7i(L=WFDNW_KS+O@;r!a~>GPFJ*GmeE zyyVk=Qh2$(ui_Z5r6~4)N%}|pzEC`d@2%+{EB+qV-yr=Hh4(7_io$#qg5!TNHo3;?Gq0@mAM338-@x8V2U3|}VvzwPvgg4RPSSA<%%DP%H6~I5|z7G z;IQ6*W;j*#$Je(}*8W`lb;}h$71eudtlqD1`iSqp)bcED*`ct-EjtAc%l`qxsj8p9 zzK6>7=i;wh{^A(lm&VF}fzwCjKdXXUR`*vj7^a9IAw8BSGw^YuAgKK{Dp&yLl9POSV#Iek?ABU+x7 zzf@r>f4RV6`S&rLs`}FFE4h69b<1y(@-MkUZ(iTR<>Ief?zULH8)Egon$t(+U!~<)`3(wN`O^dr%b(70s_LV!@8a_D*DZfa zto+(o`DbzZsQfdvJS%^c!dCtmfy460GMuXV!0V&Pbi+O(CG0-#@LOFmzxC(aqVYOg zSJ*A#KdqlR3*^&!>snSI8u#Lkuz&3sg*OO%!zhOR8)aQK?Sszn7dSQbw;Vs#K+kO5 zd{(;2uS*AYCi+r0YzBdX=ws>AwYyDt{dd1;1Iw!-Fx(XWOrOn$RE2 zX0oV#R||b(P$;}exLiz9-yvzdN(f07!ShjPbHQ4d?+k9cW0<&1h=yo%bRDCMF3 zZ^pPlN#QEb`@Y6kw=4enG5lS$?~}W(&GKy^y!=yQuM5R{Szj@KKv`0~hyc3WI0ZfR zq<0%Xr+GKV_cx22Wlkc(_q)xy8os;rvUYs_->DtDq#d#~w*E!hDO&%s zl-mh>LjK^>2PV)sH=VnQkf|K~6Zv^F{q4!mQN&MLC(Z3cLBD~7M1e_8Af9Nfr>$na7FU7f?-$w$I%nokD=FdB=t+mNA3U6yt3FU zwX0!2N9|hDlkX{VrQ4*#Qd&6$3RTnB3# zggz^`B<VSyuOD0liK&BC;jlSAE@>7OH$udm91Y9f8*M#-;IYnN&I8=aQU)> z?6zN0emx=Oga7Azhn^RAD4!d-Vd>l{hK;{wm)-ohJ#UkIZ2u(W_sQ+#ABs`^*bkM# zyKX1S9j@)TTkJ>iScUf}e3HU@6|Pg*6FD#D#6G2WKrE@fTNJi+bp?fGp42bOc&nX{ zUDzmcS5SGS^>JKp|D1?^ZXXFFdA|@a$Zr=u+I**96nbbM5b;Mvp_kV8DJ=D|`EMHc z(0Xjw_cE^=&7%vw(LA=)hrUx)e5o(hL*d0jcgF^4M_SJfI!zw^j&;(W&5~}cOsCTO zELW7)Q_A~#slPivypS-0ddb&~H|E37nSIi@745%{3>$q%$och>gVkId+eb2+><3R@ ziFOk{1nKu{yYAq61?dkeEPM>oA68iS9Hc+aaO<5iKMwnZdZ&iUxPSLXDG%p-yr1G4 zT9?ms`3OGf>Q=b~-9FKs_3q>6=4btlVlUxug`bjduY9^V8Sm?NJK`>_C-Qw4=R1cH z$R0`gCE@3|I!PDQOMl=(Dx`rP1!?Uk6z530q@|zW@{{$iUCSk>(SSIvQ(#)xBjt2R zeL=^gWMd*bYxea0oPgE?iyW1Sqfidg9}Pbo+Ue_J>*^LuK5Cckdm^N3YINhAJmcZ{ z`({dfUPVtbiG}>!wVUSSh~Q6wk;%= zo7Dbwsog7y{jm5Qc(6k$WI&fach|8K>73tr{;}jV=-V8L3rZ`Qul7B(eaGVFUskTxQ=xQ){Z&~;NpwtPIivGn zINvZ1(sg`(QR2}|-K+bM-tg?XtvjK94)fi{Ge_0|o9q|aPqbus9uL8|=IQfnmBrhV z-ttW)#xb5>>c2hL@OMYz@3Zus0qfsJ9e>GjHr#&aQ2TX*7`m+g)r-zif?#n0&g;RG zq$l&R@CSBb{~Nzu;?h9OBAv(m&wOs8^=DI6)*sUNh#DXHv7VlWe4l5&0zR4O3)I!* ziYvLjQwbT*w()4--FGq7L-A;xt;j=8=aH95|G1nz-lFz+O3WVL^l9j;w!gS*lD93K zuf^xI&oaY7PHOMh$vu=O>wl=<1D%V@&^c1`hn)l;G7Irn{2cD{^m#TR`vygRrtf;S zX}y|a^=jgJwZ-buRargScT2j`F1f2cL%qI6L)?D5#G!tvM&y1KsF!yA3f>nRI9-Myyqxn*CGO;W(?A$f|rZ;>1WZCpraQgnn!~8z%j@ut>qj=iB1F<+7bYJutdcK*9@rDieXlDw7 zH9z{UpZNg1^MF@WJYqNeWH|3rS{&k?4LqlR==x9Ee{iH!Wj)f#J{@$95BrNK?!24fDDJ#V;4to#aZrmp zlisOJ*g*5~b^NsQ@1CXK+IM_*GV|5-qvMFuq&J$M=X8I~eEo8U{q`sLPTvm--0_IO z&5H%@lzn!B1q%f}V0u5xcA#&1Kh5uL{5Je9r5e3;YIECnYL3d`T%F(4P~7^hu-^@Y zZ|6iuahLRO!}-nkbGE#GbRqCh6qwG9VtW?Ob3P=`M~Zpy#=eJ)Azi^!ZDJ>Hl=skQ zlsncJhd}*Gv5N~f$h}eGOw)JB@#ks$A{eoT-`hE>`VpQ zzm@gNZ=WJ?;}(HCY6WiY68-CE|EZ++G&h9pUk>_5&=c!t+We+#AG&C~gvNEaKCazD zJ>33B+kGS-MQvxw_cDybId<-2;HT~*fpi#o03ZH3mTo)x5hW?l&Ho9)%dc(XdKQXe zUtkZ(gp$6B+`4*D!BPx=3*RN8AG|asUvGXIP`Ivm6zv49!e5F z1odAQxLx9hpz(78cc^?dOFz@joeTH(xj^JQI{#`G*VpAMRmtJ2j2{(>YVS&-?}+n3 zShUX)DKTArelMu}wDlbJ-6ZVPLP}@ffzo*29^`W&*)z0Qu)@Y^#Xgktx8R?<{$UK{ zG8`uo`{VlC@222yMXw@%OAqOy?~SFs+V}9?l_zVb`9DTGS^Cpr{mhAykM95Mj?;eN zLRZde84vRJ&9}JYv}5_X+a4Ku63O{gexEDJemVK@eBJXzP8!vYbet)0v)Yl)2?8$= zJJR1cQIdBv>e(MgSHS4#tDP+lp#B|~7xjNduA%SL*$a6>2GEHqK7__ejSLvT@$m1-*|iEZ~OMADhFl!=@=>> ze7_m<*rj#c-r1_7)jn#wr&aD%?t*%i`*x9opi$)B?---<(#H0(fBQ(p)8*QMHuje) zSM}m2Tt5hUUBC9ldwZt8B<44tAbgrPVL5aC=HJHBZDYSF<+}O*Z^AQwny;V6^(qu^ z=6mJ-a|YLQ7`caq^o#b5V|uV&9Fk_=t^3;laQU%?=tBGKBNss93!KjUtBcph^h>_L z{{!Je9&qK;uW`Hh>R;UX9GV8Aew*+a^{yp+E)UO@&RkK_r}#7#;&(f8rExLBABFY` z`)xh_i0%7?xV4S)yHMlT;3@GtF8yfNUVi0nDF^f%!+u5ND@&)MAp&;O@1gqJ_*0`j@OWn|FG1@_Ae;6@9s`&rvtHf zZ83Xy2AK?#Pt&K+?p3fa8onb$`oHJ-V6`(($@o#E&u>O`y#4L3ww2@_`FxAx%H3)Y zWF49-m#D96zmN`kwubUY`Z}^Nwk~PW&@9heEgJfqhlWWF{UkSFiU!lAeIG&CO7>_T zm**F+;d`$A5-ESkquidkcJbf&#x*RTx%Nx>eZKJn3>VsOWw^C*lgMxVC*+=flJ9=; z3Vt7?KP1mpzbC8Tg#ToHRyc|yMGnfvku-hPBaU3tL*GUI{0@;X@MRrsxRWbx5qdU? zTt@L^p6luI8FruI3YJ6gcY^ZwcLDgdQ}urr-`#XIlrHIQVm`ait&T1@(WpzBJ#Y?Yv#vUHluJJE(AjzTc|w z6ozU43r@di=;wLnHZ^pWoRc*4D+yQ{luM?b%9Rg`d|eOVw|ku4i1g=zkg=KRzGr+QjMS zQ7uu?RAQ{qQJl%ogLI4XL(Xdr(glUD)Azo@H!8f6?}g$FzPF|q%kwutuXrEluz%KD zuO>Qr74dl~%a6OCAuVqU_b)7`S8GZBU4F8DJL9JhGarTTsl?Tsf71ILGu+~Zf!Gt+ zQJ3B+fMfNx?+aYJlH~eF`i3|n>HV1L+Mk@7z5gQ5r%U>5xSrSal&eW`+IE(6S5Lot zs_2E3C+9%AcH%UqgT<`Jd^s$N*^2<3M7omR-!o!r=$1q^!KtAy(9VdGKOz7+zZGIj z>GrDK>s#+$;&s&4PPVQ}{mfz1yJcnd?rEvVUx42I9j(FaZQuA9I?t8k{`BtTe)KNg zL+^gXa+35ehd7X&d`r#~9(qg_dZMt?*zq*!t*TgF7Sn!pz3VZH%kdARyqsB9UKanw z$jj2Q^70Jx`9S35UsmkjPQQ2_{GI9lUrRd{cc|Zz_>k?zKPgo3uO~Pc^?@>|pyD`t{<+6t*}-_M0-lPVt@eKXhM@O8rZ@ zIF7PCEPyZ2YiiBvx&*No~CcRYkuUJA3~4J551Z|+!y)d->V#p zUvbAdU!nGj=9_-ac!-}e(v4uyIE^SDoMIN-r|M z>AVYWcfb94;eVsXg&i_pPUl_7`_3=R`voFLew)U(74NV7a;)68Sh*Vn4$EB^D|ZdU z{q?_--gksArSVL!Up$%bx%L{qr-uB8(9wPr!%_cfq{7yJI)mZ9`%lO7`@!r#?WS14 z?6k>eBwx>PJzYD3evpl0mW%hUB0JKnA5|dXp$qbNdkTK!RMF=O;*NhW8+Y6%?Qmo)KlT4aPb-jz5oP;- z7aw+cs4FWExBSJ(!=keCAm7is_Mks`_@=-AxZ)e0TgMGU467i4lcN1y88yOUwbVDc|#u3k^=P7R*UWfgz z{Bh`I$$Q*w+=kBgP`tkVy^Sk8T-M(G2-$guGj9GWw@cjKaW%+(H}DTPJ}BS+d{OAG zK(FsCTkma>KD6^PVHfTfpKIXvVf>eyqOisrxmtxK-f+hu`Wn~SShn6TiGBJDu=78s zfkyN9k)1!E+w(y58fTrl{EUz5T-`&jO>X47L`+YhWo&$Ke|m0uv7oHozn9zhaO%Zv zW#vQ8i~dW912&eGk6$yN4@5rR)Q^0O=pi5f#QD?tg8kZklZRD2FC%&#$uUi4(selH zp|-3%-24|~FK#a@51(c}ABa4BWBLB=#rCOg9PmNTpZb|HE?JWPWKb{tH=3Vi{sxWG zzsq%ePWWx-_G-T}sPKFVIyZ*L1t3Q3dNwKUGDY7P@@>F4P^b9e)b!7%>U& z!+Ds#$9KmPzH2wvQGcpjKdL>(FZ8?ocA1y5{uSbtT_;Gqf$`#K-dN@-`&tL{VWyV+ zK9-Mu@QAs+CYe(t6D)d&+9=|vl?WW@&b)ZVdSKzU1|8~EQ8jY^tM44Ce>xk(L zOFjHLJrC+cnLqdIBo1`<=VN?5pEj@iNayVQJe#+ylKl(wx}SK`lkZMk{>V6NmFUG^ zKs>UOSlBxr8PEJZ5c@JcrZ4e$U!NY7SS*PZBoRY79|KX=!ke3$xd!H6k>zpRPNHTzg@ z|E}3&Z=rXUu5aHOTYvJs8>96nfv)$B^CQWtv&DYXG-{dsyXFYJW!B??KG^>|cTxX$ zv-EGX!;jJRcnfu0v5DKyFU=RYewx7Ta|CW|5V&Kuz|B(_?pK~S3tdU?N@>3$-*|n} z6x2ia8F1&#K`-Q))Rg<*Uw>~;KlpNN{mXNTf4k!ULkxd0;rpdA%pW)3FB4w=De;qq zVl~$bejV~;-@Cf@6rS70wf(SO=>5Yk$#}0lPsub~k9k+fx1HbR)^`KwD{wX>$3iFC zYdD1#(YXAnj9~l8m0RCFpYp-__8l?#n$t!3>v?&$Z-ZZ)FY;pRc4-U;mm6=G++6%4 zjGHWH{@}ZO@Q3mfIk5SaP;R!xMv9pu~2pKq-6m!fbj%d?n@WYafk{GU-$Logn-X)`3KE zy2P!neomtIp?<1;wEai?I+X6NOBFcjNgU(m!|}Ps4N31mSs!d4x9B_|SuY)g=K)zpX4y8m;CMf4ZmIdH00%gh#KM7YT;KDNA~`1V+z>`@U7A~a(B$nKQ=##BcZP(9%bT0vA2GiI1%N8 z?ukDld%v+w{Qc08>hGJ>-`9$Ml%@$>KSuRM*L60I61ZcEyl<`%xKr$0f9p5=jY412 zlYq$eM>VLAo~!2iW6%vfLc{?2SKZSyj(LvaA{W0y@hx5qV)(NWk7+;4&G#O{%ip<8 z+p~#EBmJtAc17HXi~JP-=$rqZq~8z!kNzR}FaA3_)Z%pGe^-qEZ883Di1B~j0r5Y{ zr_lH0-#X^IjC;9!G`=@by}|eEVtl-W(z*CIE8i_%x+;c`_NDRt*T6^c-SWMH`96*7 zQ7FF5{7WTcp#?Mi64^}O=$Ngj%v z4d;V?PmmUSiub2XM}KD(BUt_Ld=x+bs~*pT6}2n}-4AJgm-SY^D(^Jl`BKdUFgKo$nBnSAvC;Wn*59DtYdk;Cp<>&a;qx`o~esmrz%QMnnlS2LG zGGeYM_7Qd?K~It#U6s%0%ve6NIG>4}Pq0G9H{9={mJ;3Z@1iVTE*GEDuz<79S^a;z zv`aUPBwe6?F*)+y=ZfFT?|q9eG~RIej{K4Sr{5*J7M@E!A@&_^ljNi4le_EF(hcAn zxCB03%y;B>BZdC3v+(m7?FYDWeg@;;&vN<3@k#BX!>3CfK4lZu|Jlp(>i4oEz3r9A z!(FOZyZN4~l5u4^PnluJCGfz9F(-j9($BGd0x1sU^2UYz87cpkb13Me3w*TaGScKf`{{s{Z>u$uj9ve9Ex!2^wzpGFqo0 z-(k?Xz}z1B`km5V?Z4%F(6~e3j$aDgyj|eVmjzz1P2e_(iz?qghWr@!b4$KB=Z}U&zlh{iZ)j%HJgEF-|*;^xNb*%2)EEef5OC_Bniq zyncxLB^oD|YI@WM?-$WO$mi%Xe7Cr)Pd`;^lJYbz3;k8tKUu|bsoNiiU-`ng7qv|0Yen zx>a?~sdG-9I-}Ew`eqSl$6G$pQJ}E|o zouuO(&hL!eXyuRL_r9aXHw!q<^L#|ZgkyRmYC4D(d=`-Mm66f#=mc4eyyM5scinah2;)X9`yiUj?wZvRIf{g z3v9f$m*X!x-z6R#*0_AHP5lq(*d>etoVM?P4%#Kqf%aF77Lbm9vtP%v9_f6UwJZ64 zUwIzey=6(uFynBtJ6y{3L=_L}={Lj0;{16oYyg&_~<+b($pe(aPsSLrWnJv#L(M1kACPx;rG zj`!F;L%#zW=lFeGA@dZdM$P>C9+6*Vfa99dxH6KZ+*OJ%@$;<)UTEjq72&&Ua={5s??8>n%U~wIsbCfc>7F*PwJ% zhPW>v-fH^NXLjZg`zz!gdcsfjYc1=S>X+3&)i0|*1~c*`E$nanJR`{OPH`1GZ5$Q% z_o#is``hGwDS~$>eA-5P=>9v>H)^@pYWmpv^hK8M=lukU-OCvF|C7?4eb4#N7WeNt z|Hb00U$vj>$GuHP|8Gu4kE^pI%eW?Ty=?u>GO^gWs=`jlr4SnWN)^&cd_U|C`+j#{*l$ zK4jwo;scy-pTqt|x&Rpu%rZJmzGjEye8IHd?-f_L)8;$EdOb@@%JtI?%hrR~Zo~X{NdDv~=o{pCB>MI~)wf;BpG@DbB;DQ~t)Hr=H_&g$yZzhH z8`s<8a^uMpjSsX-_4Mw5ocopO-P2UxDg)nDx-xlxgYr!Gb9{;MRNl0AiOIVdU08!R zp8x3eJTX^K0U}Sl&FXodM)jOdmCGHB!!N`G{2da4e-}yr z=I4F79SqkiJzqugt&lQh`Mbr!lC3L+?FK{5VpP_8%K1tf9<2Tg4pD#54>~J4Z;;m) zTi4n~J%^pfgZwkaKDa$UIqj!qkSveavlz|PcxQpjmo_Z*{$(+>X=^xIYVjQrK|7{mG)-+5rMXzN03^=vVsQ?T6Phvm^9V zEupYE`^jqUK^AvEseGWLk2>Tq`a(?+)LoyDv^NfP?lC^$`wS%~s^rn8g z`490y&TH;7d<%2p9{fA@8IkZ>mr_xA7h(9=3DS()GR#noY-1%jM^&@$gg|4+}rSeP!=tG5Cv=&$5*ao0e(_ zy*^3wTJCQoe-2~a?hzYL9`3r`@|+$$y)zq6LVnub&F_t8mX%^Te~R?fSvLZH(EUM- z0|%F?ed-+V>qh%2PrK(O9^m*z#uxft_RBK8y@&CkM;KqUlHnoFFLi&a7|m8XL%m%o zbjf+yN_RNU1HDQ6LVb_^2p$XQpVTY%q+h|PT#u1_VL9x>LH<`+{$q0aH%U8jIfnJ$iTYKgFv_=(f8g82V#6|j_+(9wH+E=P&wgS(3ifktP7mKZ zDDbzhSGoB4Z3nwI(e>^(>c9KA_;{Otn4Zk%8qU$w& zsC|Wg{zS-EsZz#QlV7@@+|Kt*zyGpxThs5u`sW$F?Du0kuSfP_==)0lEDcMA)5upb zFPr6;+zV&rNWc3HaxWR{W#bpQm(1eyW8_{m*3-}Hk^Zwy;Ne`m`s+8eNA<>gXlJ4S z4Y_3FJ=(c=D6?W!HvMxyDYPq)59m2uKWB5UfBCZv z@s*DP2X~R;$=cZ*jmLHmwEM;N&U?hZ+9RQ3I7i1X13J=ncNp{azzh02#m|!c32ZlZ z4xHRGV)30Ciaae&d!O|uyY+pgf2oF@1wR*;@4erPZT>s4eHOA0h49R;m2r;6m2IMr zGxbL@yT0Em=lL;x-E=J{`)C<%U#MZIFQ}*A|1t6o!hcBA5t@(szx`|QGg<%Acprb5 zKCYuZaemA{iSsZ%uK>Se=N$S(fDCv)ePyN>@%9eVwUzvHIC8ItzOVQ>**8yD!jh{6 zfIM+O)BPO%u1nc|x@dmV^`TVHP2Q=V->G0Czefr70{+B9@`oOvwfrm6W%Ko|&9Bx1 zt#@(&JW|$ZG=t=bKlTgt1LHo@HQC;;qWOqUD?EAt_8=Z&eI4KGHo|GW@KyRl_KPas z*1J|}e8kcxmzi(mB8I;1+BtrK_5;bHDOwJ84fSxl=J-cU1mOSgWh&h7V;bopJkWy& zaz9(bB|Tz8{D{`_(v%I2-h@fIe{xhqt*rZSKv4vaXv)t z4#Zno-%imhJR=us5VbbHSbwzMd4-0POMIU!WByQXD2a2B%Muo&PbeQiFCN)fpmF~` zFPSC`kSDS4@%%fy@cRLAZ-e`8*nU7a^aA|r6`y=m`!Rh&`WoR&)$E1u*Al;pbToZc zLm9U)-tu|I2dV!_JHP9ThuWCF=plxO7?%~Je$59uQT1dJ>s^et5Wbxonfkv2=Mk>& ziyxfJc>7M~A3B5aMLQVU_l?Qv#y@O`2OhKQ;5YnA?`Mh;&+*dlQ*!NpAZY(1%?c(8 z?Zuv4{@2&bZ+ZgwkLB{;9p+y!8h$?injk;mMZx{5kmp;2^m_E>^REo^8-LP!6>Ofv z{likDOpeRr-=(Y&KFa@*@55Ski6*OE!u!`QW_*bRL-&mf#(~e#1hhM8S~{K zf&1x8+Ro*cFJj%@XzG_G)>Ru zrFUuE+X>*jMr>B-cY%L$KDy>}{(wF~d%*i=w=c!$Dv()I9wsm$*!;K7 zWttuj^%39T77fGtt`)rQ*Mr{EL^ScW@FzIpuD`~7!Zw!o^c6xM^ft49 zl(&CZ;pJDD-I9G*iXZSk*AqpP?dG zES#l$k#!iwh`kyWd9bVEjT8^L(aG(jO zU&q<(SJiHsK6t-S8Yc}|o{WFw8|V=bT7H@+n(xzb|Fbj>(6VxV&u?YmF0F4@OOJ-} z;9E6J+E+3@^hU-PEoXe|>lk0QRKu=0&rrC@JZQJbVIl2SvXpdoIsMxxKc_2S4_{{p z>+xq1rTb;Rj+C?zZ#=kzbhMMM%Fug^KFTkdN5qMZBa)?TXI%?(bPaT{oY7Yr_X~|5 zjK9ASx{A?HHQ%Ul%NZg-d3-!oZ`@KAc^}ERWmj(8a&=%Yq5sf7|L*Z%=gI>w^s6fK z1*xCh6tVv4IA)^7saNuysKqHi`A%2kmHsUnX5$vV&&Dk*=XO!{9czB~%j16H&Bn3K z%D1t{EoDuQhs{pe_#m`Ds9!o>xmB~P-zqH9cCqNo7_XF-j%>WbzR1UM^QTjf{hnO( zfBA9B71sp)RXV=2dB3gXTfBV^`82@!!nmL5&L1DI_kXnziaY`TQX)Tsx4N#Vkhf zZ*=?-eN*}C{VnqUmE`wv2i~jwq}dbrmS30zeJDm>*774Tn({!-dH-^8Am?;kv~leG z&kFOOndQs-m6wG1<}zQXZ~6SqVgAc3{~Qagex>o`^Wp51UfEM`e$v*(bN6guA0_-n zV3Xhtt&40LGwp`OQzig$(p$FcMpC>vr$}dah z{CuSk@`s0#&P$GY-xB9BtEt~ZI~Na{ow5EQv@^S`KDU2e>NC8H^vZl7NJ#J788$fS zyg%Dv2*>&I<#XT%br4R0f{pFowxnslzL)bDG)&TScgV-=+#S;E=kBC-#{79dPa?ZN zjtw%TXISdx?LW5mjdpdbOgMH&OA%} zqlvV8{@&X`2=7l~i}LL`w2wk|+W)2g!q%0<*RSfJ;~wxk^#gnPe&me|QO{xAGi&o; zC2Y`^N4|44`YE4c^r*%`5A?AZ{gQYFHM`zh1UyGF9@?Ah-7zfH~l`muU(H=ulfD_f%HCSJ9k08SD;+` zHJj?s4y(_xX!q&4=HBkC5kcE4TNzMLN~(e_UGrNRpoL2Tdc$@-w;jR(k|q~#ID zZ9P9}-^=(A^)gwsN8^WX?|-Fu{NBBXBnJI0HDEL>V-*=NLf;^-d&PvN`nN`tqwUfU zApI5zFOXN>UVWU7{I`jGI_*1aPxtnQ^!^Yokjs-awJH7H?)_XJ3$~PW#c~Jh3TOPqVG0YGFV%}s?l`Xy@c#L^*l)1;&%l%E4aXyK{}z2O^J^N$_I*#{ z`<42KEq#-QA^f!hAA0ptqx)jrEMr#)fn9uu9#|J*(aeT4)B?-F|!8=*$ z0zBxghm`^D*URodA0mI!`_JFQczXZ&U5wlP=Z89nQSR@f-RKd-^28)l`$7C1i~gjY z{7qO)`G)-NH^sg404~b!gB%N9d{dj|PnOM5`l~n5J|@fBEKY|gSvJq&b1Z*{#^amH z7GG#_v){`WYdmRR%8=$Xp!aL6|C$ahA2(}|)!+P*RDKf2h-Ek0f2 zNlBwJ9(Q^_tn`ZCq4?rzk7kICKX8jx{_++f>=qvQ6L{pDPnH`nJ+S?xiM2QEPa|Kp zk}th9WO2`WEnmHH6T>xcW_Z)>8ul)0()_(G{O+`O(R9)|X$r&ki44>EEYcz4C)V5c zH)s2?$iLpEpIbWl*W2_ni<5u7>3%Kp&-ZJ^{l>qhy_TQ+N}Bdqe5r=Au4eH$7T;y@ zHVu25K4I~B7Jt~{9Twkd@r4@7y)+hIZ1G`>mo46B@yApSGT&_R<(9r#!_a=hPYV4e z*sriZg#Cscgo?xd+|UL)&i*;G;~YqZaNX{&d#}LjS21e)ar^ibc^}`&_xbdsiS(jB za=X6ttlD{f(4E-1sJ>1J`eezZ2H!5B-~CIZ_vO+-XVOG@$#_rs1H4jAt?$koIPRG9 zEX-8#@%?u`4)pua!+K!65bVFB90rL8{6rT6dR`!iMvo^}E20pd^YfH3`1!;4Wc)<` z{O3>qUX9`2{$I-dk*n_!uNjn z1HAu_oBsCS{9dGxF38j673vM@kNSQ3%zAPoA40j^kdsSZZmWxt*wT1z5%jf*@q^mBgWH65BR!?s93)c7CR2a5U3d4awH&y>F-9<<|ldym4CeVz7W}!9$()N_gA6{zK^09?NB_Gfqz#!o!$TRW3%78 zG>nJ%-Bot~lYL)f_dgxVZ*e+8K0E(^CVVd1d`EU3F2@^jdLHg#qvKPgYZ1pW*?G8> zx9|I_47@_~$$ii)H+U7xrRSt_{FvA|sqxl}nQr@4o=AP(JBB`0w4NK4-`TihjtVqB zjP)t3E2G^X?*6{TIXxTq652HZqk4_d$6E)RCk9^V>2>m*o9sI$pZr|RbbglO!ED`n zCHdaJ+4A>jSm__uFk7GEd!Hvs=UaSVfzP*2Wjly%U*3_gPxXOs~Ueiho4kNSybE4w#ck3ze?Nzdl8Mz_>z&*unY~;kM2#6W^_~G#tG?#q;j4K85lw zhj4xB1R)jn5D)O2Eg`0Bv08$1c#0A27wjY+$Yqk08+)C~@(WxP@$>8twodhjK;DP5 zPWA0Dzx2&`&|l=&o%V(K%zt-3s$Tvth50Wd+)?)DN!ZuBGG~9*$QD7@fAAgcz`vV} zZ!Iff_0Hj#Bl%G7J2GGG>%s2tWb=?at-Z|C(ETT0zi~e+Ti5!_+t>SRwL^2XewiJj zU6k|Itp05p9?rhr)q!68D z-ph2{5bo;*+=K1weZ5SvrFNl5(PVZ@#vy9Aw4I3mcaVL(y#mMW*3sYBdzBK_dM7NX z?C-L9i6?7cFYE&BYp;FRCFjCZubUOUtV{5Gj=0(MzUc>+E(SuLq{+?)ZnpWPChoTg z`+L}va9q9D?8`sMr(^&7`+)CN`q5vZ-q}9jJ6W&)^?kr^R=oB0T{H*w!uOx}II&*- zKgpDr+xs;Cg({}H{aPu6zkvBd`1yYDR{?zB&+qF!HiT#T6T-{qe>8xXuIrvlI!Y5c z7w_v}={aGtzW{Pe*9YtzWA{r&of8%l9@ZC@%g>UO7Z)Wfe+Ts%e#<0z-(__B`asfR z^GEGIkH-1O_-5+A?|;SmfvC00Qi8hYY~edZj3*9vfAIQTf4){`D^mHc5n|=RxOcOJ zK2PcMsp)>Rt}?RK-6vcYdH`?LKVuzht&WqD>o^~XalC*QK3P2v0tfX1oOj|o7~A($ zDx9fu>MT?hPCQ7s@!T@Vk#&!ax*rt$@aL)WBOV}sljHfe7`;z_g!TmZLVJRGpgr%B zoS>siri>~!jw#7LXFMSu$ayrL2Q{DXGY6dZIg-!LpYeIEL!En{Oyas?4ZWfe;e5~( z#MAXf>et9!ieBmep425f7nJAzWalcH-<6)LxPx{pJy&s<@$_6pj>C$PPIXCro}%eK zuNC(nV{wDi|3~UUlfmi#jl~HkS6i9daZ;LpP~{PFSOwaUOse_83x_ET`aIonUs zrs;0~A?G_~>Ayx^rq9>Nm_X#9`LlVgXDFVy|1skWzXO)}aPr;sKWTcFr9Wox_gnmI z3-{@J-yZ-v{$0jHHFSUvXoT`0{hyvA=@^8`6PEi5%F&EPt#4+7Dn_r+d_Lat?_a`p zFj;bpZU-E&h`uSEGM}SiJZO5*PX5J1|IT_WB7ftpKW2Ox`5*VO9UtyFPwBaOH!Ioc z_a3PC7o>hitsUmuU6QoeQ;eBVQ)dQ%$5g(%-o z9P0Bt(P!>b6a9U{=dm7{95cG92WU4!j?lyYJRb1&C=RzgGM?1>h4h!DfWwLBiUP=k zeodOgf#d|ni_x*lhZ$Qmxu!i_kM>~gq;-q8hhsHksQ;wve1((o`IZBC9}hU6Z_@O7 zd_Gn(%DMbZKgfAGQ^cOYU)qxE&o8+5z=0WWCjBywP(Jv%IpT-2J>6mH%?#82-TFs4 zZ$a0$&`w?y)O)hQb^pF~Gx^~5V2MQWrnB%pqko>`{8)W=>0u=r9M7? zgVG+$`n zH^aUUupZ!h0s7stN%NsSh&a{mUoJ!BLdy2sYU*&ubg)867GaO`neZPmSSD;u;zoc|;)YnnS zzfYWu@!V$Br;?mE2+wO|h2`7E5a+_q%hg-qRhW)?FP5K>M~l=uqmS~92T7;z!y4&k zIpqJvPONK`^+k8%xq4#(_%dDIbT@un<|4)KY<#qEqJ-efw9nP@Wrn;59P)*7m3#8o zKGJ>(WUN7pG(Ot_|!2iL1&`J9p+9S+g!~>Lv`@h-uCBE;tkNi#d9n((> z?dMu?q(i@jb`H`2{Kr(znLo5o<6-$8DevQ-?#5Sezuh+KMR()95iDt`@h1iN(V%}y zPQSqKjE}BgIr+UxGRgB0$Zd#_#Hg0r20DtJg_Zp8By_`PSRSb*^mRmhF>F z_ib^VD^B(2`NTW0L(BU({T9Jj7WiGZUp5}xr0Er#Z|-X9)N+-%(m*udQjPmQ#?Y=n z|9rfXjx*ABUSAH7*DHSszU)%C-Ho^DNiEL*QsZX|sDEtx)_T+Rs4YsT>_1?=(t00e zyXn{JX#f5)w3`9eJG6iItpeUTtZ&nL!_#K*O&WSV8$_RK+uzvrEqwy@9U#0^4_>Du z-csX;oCHKY+jLMn@dWEXWc3$!sFvU6ayzbhL`dwo6yIa3AD>V+OAA$N^n3Gejo!g^)l5^=9djD(o8~hvp8^Gsz z37Ng`kW87?b7ur?8mAYJL8s@&VkD99;NS&L^}7Jl>AwJK24R`~6WU|LdF{ zfInV7vp?js(}nb2^_w{F065>w!8x4%-|Gi>|E=(TnAYnzxpt13o{TQq3;7OM>yy=! z^&4#mYTNyh*w=ZFLXKZe>H3!(U1(QM*J!-8bjrC%)~;+`(%MbN=ds)M=YI}5{ywE+ zZjRqA0Ua5<+V%lDH>E&*44p2=Er8U6LF2{l^;Ct9MVX-rItDcQ!mWX1S%g za_$yuZ!$4`&EnnSI`@^d7BoqmL6;HEZ>y-MmcozuL?WrMKzj8b8#$On45? zSAPq6@%shTXM0Z1!+8yF7w+dA%I`mt{{K(f|9my&Yh#YDb87hdzhi$P$JyGCe@f}X zzH?#GQ5jEuNXI7+9(A}Y`XJA3M;-1d+W`08qYn2U1^{>SQHOiM2LShP6t3%S=y&bU z^^hM_&i`K=?eHj4*( zxmyMh@i6xng!vvrzH765pOJj151y{3iE58zK9u-|_#WrOO~m@Z2FB4ZmgT40u}VM7 zbrv`=m#o{Q<>LX?L+;_*hm!KWCf_&H|CDtBi?bg|nylXMLT=O()}?lyeXr0j=WuB` z;D12t$@QMFo;cSxtmn46_1vv+LVI|ydWzqn`8x~Kb)SaU8|!&^((^s~t)7S64^z@| z;XVlTtFW_&yB}s#PVc5(p2-Dp{1FkR-&?%XURJBHn!;zh@2OO{TI0|MtV8+wQ!)Cz z_8aLso9~OiwygO($6cuWh*wZv-j1sFVQBS6%@$RA*$+J=b`no%Jma0?+d1xW`?+YL z@>%Z1RlI(#iR?SFIQ_s--n(*s9QX+R*dlVT+$ahwPo=s@Ig&I{e=5}$i&L(Z>LiP= zRs5Ce&$XQ27gnkMlf_qBI_Kl$`zDLuZs|)cex1b^Gv2#M z)=bi{tL2kg&)#J`mvnU#_qSATnyKm4{=a8>6X$7H^?yp^tL^^0xQf*cw6{ThA6Fqw z#2>IYyl}*KXk6}n<9j>bPS)EjKCEHVvf1LypV&QDofElU19_tczVA!sR~0_qgY)sA z%})=}E+l84sOhnN&ybvr*+t~*A-=Prt)hQdabHQ-ERAOC-KKxik7<7BD>#|ytJQme z^^kiptR5$rePez6{=B5+>Gpn$5oyl@dybmR=o}=lNHRo$sz41Z~n#7PHB4Gw$*}2a4OdlHXuistE|%+E#d7UcmTOt9;p(Q>Y8dykJ#;m#VZ8bp z%^$D1nQ(evPdGPjB%DRJYrd}b+cZpCUeB=UW(`+W-^6ljZe_XNH?v&(9W2-K77bT7 z{jG-b{f@TBuBJN~ufARL$7}8;oZi19oErxSXVLwdud98VhDpocF>Jb9!&TMoEVpKm z<$CX9x%LNHuH^v@S2uk~!?>UJqO0k{j8{LX`QtSoC!F5DC!8Dqfp8Z6ljiGc|D1+N z%il9>`nZOxs-I`MHJ@R*-Y>9R``1{m<%=4wZu+W*@_m!l|DPGJens=gYraJ|z5hly zH~x@t7X4iFb+!Lg!=&Zk7&d)N!&TKsS#HgbSg!YHEZ6=kmTUQihO3+YQ^VNqS?p^1 zHRIL)(ERb5-w{r)zK*Il^1_wgMU%8$cC}Bi_W5VckTm^HL$RZ3a4Ku~!Fjb8UrUPK zw;#uHE!-Egx@oe;<@+(K|M84hpQicaHB$+v_Y@7QH=as3i&_Y$eYS>4%P9<-rfRsV zdOFLknaOg!XRuuRT$XEjriQDVo};1r5nWAZGG2YQ=8xB$LpZ(X6V8nn63(JUgwuYh zhDi(e=Oj(%Xk6Mo%dNSH<$5n?x%MkquH^+9O1sxk=0~jlFJ-*?V$Cn@pKy9#NjNuN zOE`U2#KfX-zIbY-+cgkyEnB|5D5B|OS zPj@gr+P`PHPA;29)e7_ZK4Oe}%km2IY2X0XQBluQ!yd)w`wBpZB&Bk#o=X3-)Awzd zADZ>!w7Vz=xV=;5A9=j}K%RJ?$@@PPpW`V;za{^xT1@BFp%Iu@@5}kk`(K**&FQ@p z^H|QmQ}cZZ*w?;z8+8UbLVk6;y52cz+?W>1bFc8n3&z6d^O)c}TzLOp z6@KCcWp zf2xMJB)dbTsO%?%){7l6`+$AejHC8QCt<*Yr@Dsm(zw$#j`w(zw%_#J$Uf@D0NYu5 zpY|a2Cu!fo^dYw2WRdx|=adz0GMnQy|1KiA6jEh3~f^xkG0U)cUdvFoHWy|;NSV>C>8Ct;*(ic{&Rm zFIKB<7Jr!KQCObpnz@uc$I&itUSu6(UM zhH@*2+>+bpDV$J0phs?B+`fh7{^wOfPoO`l7Zu_U^Zge2`i(9jTDDcy)t`NmulH{9 z*}oI+ok}_Q`F;sLDv<~MEBGJc-wC(@ewa?QgP#c8V#Icqx2FfQYmepZS}C+^rbpG- z=vzy=D#qWu9dr9p)Lk#a?-tr6m!qdUpV5wwLHz>0$+W+=>t~A|!~^?PUZru|(F!k~9-hJiz+& z&SpFBy_@Ynxowtas8s1Mj^+#7v)})f@q_Q7$MXb2$Oml~Yls*1dy4Xd?JK*-hWplC zPvrX$wnw+SX76LSyMCXC+uhl0x2Yd${+8SSdi~>&kOca-f?g=6Jdj6}lgF2?$o#9! ze(WKAsr}e(akn4rw=(;&Q`1AfOCOC# z+j-FL9goUkZHJbtRu^hGy5GNC-)H?k=K=G2QC;%_mOo^_zm|MT`+e&_(|(`rK2H1n z9?hTng?>J%^*4U*GvvsAiS;O|wL$;m^n1VW^n1Tgy6g4(FO(W({r)iNcKvJVv$)CI z<`q-@Yuc>op}h0`#b%TDzn)sB-|ts;>bUfJ*vUYC==Vc=eSz4^YJ9yGI8CMxzz6@H zHqV#dyJ4k}{uB+X)f+S{$vtht!oc5qjlR$1{#uKpfe5_}gugu+cYi;R4{>C5d+F`5 zYV&5D1F6wQJOO+9H$qWnclHy0YIpWo-1N}TS+;)daQ&8~{SoD-j~`)&Hk~SV(bhlWL8gN* zV40lH%y}#;SI?h;e&_Yp{pwJEAMFx66=l2X`zHD6{c4!+hmtSbH~tvq-`}S7&(1%b zXYu`(-eK{Gd`j;nn`rUHmOjPeOD#U#;>#^w)-a?KmL{wp+G$Admr$<(tJgR=hX(7A zR!`*XkRvGo_BU4x(F@7_M1LG!G7QA zfnQdZdW7`C9|4_zXa=2+Dc|b(Wean3HUxCK9O0J%emwAq!ik@tU*__xSbv=FXS=3C z>tMgJt2^j7?0s187fO0q?>$!U?`pmGj#=-#Kl)gp7h$~tU%qcTD)o+S9j|vP^{rz4 z%INyDzY6;|5rOnCd>>cYUXrOafXUtLA1YN?mC^GRT~jyc`_cX^-^2dCRdR%U)P6kh zXFsg`+->|UL}khE_p-pBRr_t{nWz556ZL0v^Gp~QVa6Nnyi5KUM?p{``k}3BGgDRoRm2t< z!@nbk|6YZc;eUt49sgSxPw@j=E&f5o1N?o4|9Ogk*ziAK`1^ABKdJCC{2#Hn<9~qh z6hE-l;{Ocd0shT~|Cx$^i{Zc5@NdrH|5t^V;s3hD9sd^@Pw@j=E&d-N9^hYZ_@A!$ zHyQr782GcH-l% z*(X@s@gK{0iXYf&@z0buA^zot{|{xfzmIEVk`hR^X|X>rH@0>)GPz#6Oz*nFg~1Npv@ zCDN|}QD+19sic0rjjIvIlg_W(e9FjnzDK_ZJL3MluTx;7C5~1AEc1bfWfPUk<7?{TSIN7~q*+6g)v3cwcyN=Fd8v@Yhf^ zKQAYg&q~lWKt$*_e4gZAg?`Y{(UH+1>zpd5usrG|>$8+!KlRx6DW&JK+BrPnub>b* z9vcM*_4aWa@D2z4rOyYY^0IT@{Cq~_Um1M2;PXQvooN4IJt0T%>tAI`Ue+f`&++8L z4C_ZvwtZQx)5wF^?pO7DO{=DVD+E7gu0C&Jx2zwG2z=1heSgM3$njZ{5BjoxX05)w z?xIX?s3&F?ef?-uJpz3}J+M9|MRZ(+GYP#uuE$Pyc3&gyou7y6^AjWI2oQOE9S{7u zTi`E|Du6+q6M!XaXYoyvL7rsYKFa?VwolX(boqMz5~1$`$+trOz>Sx5$UpLwbe80} zXvR&9kIZF=dr71k0(ZZ{^>vkpjFm+_ z^8SJ9sy`8qtPd$13r99*a3%xJo#fvV2`-R$%CGgzSB$=;aNu7ny&Zh-_&j}yz(@J& z@;_>R&S_1H=O{hU zmlsKULa6jjw|o;(Ymt}9dGg=zef3I{i^+X-JDN^7S1>>9n960b$wgi(KP?;)dhtNt zW=QCIc*8iC57*mA0(dH)mzsP;Zxx<}BeN8o!h?Js5+?4FirsL!FjyY&wT4fIKi*Eg zb$LLalpn@N$M5M&1Rvm3uQGWF9F>=Z%73E+a8zDKf4cs@5!A@H9s&n=ZZUYiF6R4i zA(vwXhMy}mavRHaHj;49@ACu5KP!~y9nA0jdHCJQ+bXa-W(UlU`uQB*4`KcXk^tO4 z3QEtP*!hKt?Mq4o7M^0X<^V#u*M(tIhlc4oPs>8an+2de*yq6&8HP#APEB|FvHG4p zgmcdhzL)%Xl9pY3Z}Z#UuO!xQdHYn%ko!P1p4_uoLqBJ8 zY&+K`rNquz{Yu9av_C3F(ir6Nb+u9hq$crh4Wh|r-wxZZy_DskFGpn8q+Yv(e>bGx zG5l2PH4?y6yLN@yHOa4b%|gHT3U*TMnwT?%chVv3+I`MH%5#)mGyKJf>EQnbQpGXt z+KEf{;wepBpkdhFmXD!7Pe*&}U#|S{?^|3STBlKdB{=f(gz)wP-bQ_$ z?q7gDzg9jljqjsA`#vr|KXUR0%Wrz(`yNp47t2L|2dQ`N*Lc6_ehlXgMhxXo*3kF0T{Hc6 z)USSht@BACzfj*t?_|3L{ye{mQMS(y7~+8rh3ESQOB3n$IGrwsu)jD*bhW7dVmAMq zha>y=6>fYy>Gb}`_hp3eo+a?y?{#`Zeb@(mxRY=~`?ghlPWYRsN7eTw*u9kLzR3Rl zN=I@6>xKLfW!DLW=ks4qpVvF24`aR2_2E9PV)PTrW46&}^$qENOz4MP{&b4K*-kqg zrmOr+Z;@}mn=P)&*$ncoUQ&&ci|!(CzcAkwOpz2C57vJ_wggfS3PH%k&v8n{*Bh4yU?iP ztnNaS?horObSRyGC)Jd5d%r~gP;#R{2yb&+2ycnOJ4r_z-G%eDeRUUBsGk9Ntx~=o zT|1r=!r^}H?m}75Z|g2RpXas|KCAY(RQQ6P*HjPA`ne&TON^f9>A8O0g;(f#ETzKd z^n8|j<=blNGvCU$Yk$yPSQcT3R4RNXdhQtYXsfBmbFKV&dd_rr;dQ#Nv{cxo{%Sq^ zQ)=YXYUStYN=|oSot}eKDtuboN4@gpn);t+nru(FRr2ItCoLzbk-Q~7uV491iD5v{yjDDKW+JkqUVhPe@~5m&~ldwgZjN-J^1@-=y}lK59oQ7_3}qG z`5&{Oz5Kgs`2Rl5UoYS08oAyxX1?_``PhTigSVpw@8&V# z_0{Bi%b59g)yRzl^?G>wYWTBZOn94X^1W`%d>u9P-7KKdx8Tt4CW6 zeJjU=x4Z^k*O>X%*VN;cW9O@>$IHgd*Hc5^Rb%FhYVf{f?0hwNxj(NSUmmH!d&!va z_SN9+7&BjA4PID*`gW#V1CJA__2?_tz?(ZJyp9^VVHUhT-sLrTPaiv94c=47%(u9P ze;n7;tH&cX@TQLm?~xk&NQb^2yr`zWPa6~7lp6Xt{;mgaS4}<{9LQ5I-=2Vf;d)9q zU*h}zQ67(f7vSqG;d;~_jDwn)&-Yir-=BYZ=FgAR8Gr1w@kg&LwZ#KFIWDvPAc=qX zKD3A9q(wWJKDe9l_F>C!!m;XKhTjcf zzN51JQ;Mg1LL>dz36s^Y?4Ixp9T$`)px5Sn-M;NI>0}Sa*%;3iqu1ygAI}#f+STp} zPgTFZd%{Wdt0tVIeigSu7T%Vj`!16zjjRTcC9pFKK(#*VMya7qXUa;lfFPoi^7jiRlDa+mRBx8)m*(KJ{7lKk^s{p5qz}?(`qy*tVd4Dy z4bbj#|w=vv42-gO=ALejoV+ketxcE`Ru z2V1o*(;jv=oW}WxhD$jg(Qp&zBO1Q1^AUh2)Aa)HMJc=$AEL0rDuPT~CgL$z>Vv5N8) zmjoh*1)T3_=;nM!!&h{^1AKyKQ;gvubxy9zD98x0{y#V~#IuH+@#03g0i7p7`F2g`d;{wLjGP{{X?l;9e>vwp8t&7150s~T zzMta%<%etWQ$JQ}`BK9+o&P{N!rhyen~-a#=n2wuy&E_VN4ZJ*{%dKui8+0ltLe)O zU!V3kCHE*5<=&d(5A}oh29z4^(eXdZvA_9L zT5fi(T~P1rTyVtbbaISu{C8ZegT#Zu0T{<6wa-{!*X}N25)XD)P zw_YTn)pH8)Mo7AHKK8&1-(Vc0z-{$nCThpIz<^D$Jc~Fk@bG||CcWz(OGql`^ zR&Jxtub>?Li8rSBuFL85^R(PkwH)BSHs?=KzKv=6)wy))M=?!r%fV;6ETrkXb9QZ| z;?K_Y{&0>T7is$M|BMQNzGvt7OuhbXnqJ7I)8FK|I4J*yoF1~j`ej=Fc{zQfUH(~` zJ}#$MY^Og-)4!E#AJoeqr0LUg{G&d6FHOHR$DgHI|9?r-f0V;d`|`~+{o$Pc(mwHA zBGmu0IsTBpUrx(U$=MU~=L>223v+g(L+Sf$ntn{K{^Z~PNz>2B$@hFM|LHXSyqtWf zzYnMB*W~DXo0jLf-l*Rva`dtO52odp=ICSl{y>`kl^lNB#rx9q2Xp$-qxiR`>95Vn z=Tc4YPt!Yc^0`>k-W8%+4?@IFSU&s)J${eT$P~QE<~`icAAxfv zkI#cZkNnYk9>*B=t<=<0N2O8@lL_+pcSbW7FrL;^hD!k1S|Q+cz2CaFPO;N?9L}47 zRGz@gGYU){OOyBDd`A6$nVXXBkgLetzPM7bu#Q5v0uMm98nV#~&%3O9wRoQ;pEoM#i>==x{lPv)qtztsR!;KVum4VFMPI zdSD(8^_VXPA>PJuNW9(V-E4l&_v!igx-@~6^7p>qJ=~`Ny>a=Y+^vEqjLY?UHSv3L z@rQHqX6Z1zJlf&fO_#%kh<@`Qzw1kyr)!1q{)X?L@&F9`B2bu3U`w`>)x(F=@uHT0B3VG2DT73yK%NEyVM>=YH&ac>k$8Ke-{q z^P*XgRq=lLcjmn=hi4hyzx#}4p$oq1^j+hEL9qCw!l`cRSH%@cJD-@sF6@Um--6g}z%XL_ZFGgx~D~ zQ)~4U`Ip}d>3*WM8jV2CO!?{il24cZ4e^t2K2|gMK2_%<_=0pl#}Ds8cs)5?A%u9l z@qPE5g&iud5$Rv>U_T@1o3TQp>H12kfgO0!ZhA>(86R&nJ|1Iyw0`Usp{pz)x=tit zi_yoGpCMg1pTz3{J({odEi}E}Mm_3VPI(S5Ww>>*hHgj99nk&>M&^12o=@w1}0DP5<_Z{>TlzP4E;wBzZc?7`(ix7-&w>ixU*{hU$n7wh{e zqu$@5@Av%)7(>2bK2`3i(EQTQH9TFa(I)w1AGW=xy^HN0-S{~2U(S`Z^qm^QEh=ld z{fZ}PS*hw+ru=v`U}Z zh3L~N&tzSn;!Ca@Cf@DVp0=?)L7#UCUC;~E2zpTxesro<(hlU)+oZolKFM|<>6s&? zq+ZL*8m9LNO(CIjjwta@vHJLZj#Es3H?v+{>$P~4OrpMdfA9P7K__yi_lgdXkEL-7 zB1w{Rn-!0jgP+zW?@yO{A(Zo-HGSkUPz&I;*E78t*q%lbm>zNWkX5D$<} z_Z$2>huH4T>}ocCZy>w|X{E|P``)3!_!b*~PrjCh*7sM3?FRNFX<4rLeVx|%pDbFc z@1ZAf>Ea>A;{nUxOZ;-qv)UIqpHuS}qaSGeAxQ?O0S^!4k0)trvwSq5aUaV&|2*C8 zj(m5j`CUJ%vki{Ptt9KFc;s9J%@?1zP~*v>g%{n@N1sr_`HQq z=M9y2X*tAEg<|wFwR?H~fjxuHOZjkihEtK1~^%|BYeoG+9dvsLtgyR$Ck9yS9v$CWE&Rc~)*r$&AczZx`q?%d9Sp09%*%kSS?oGw5C zF}_w5NFMnPT;clp)Nw!g-FrM{$s}Jt`Q3XwW-SpXzk82oebWB(__C#w--(}FvC-)D z`wIQMjy0NH+pE^|9Ls0+Fz%nu`kH@s^6|@%P4t8F#@mHI>m|NKLI`^PIp=G7XurT$ zjB~&A50wGQq7V3jZAk9-;yf_S$NBigzdw+BpHzP!7XU`NiJ~W`3%)kw-%dVEwsz}w z9OFUc&(neO&y=zm-oXOK0dh~T)pLj9i|;f4$Ho&$Q(4n79)iS@b-PKIz`~PUw@2g2 zb-Vc9{Pxns?`V43Pg#Ed-fe{tiLCNHAK_a$x4X$b)Qh67D%aL$x9|b=`G(kt_`W@) zfBSBRe!k6Cw!>>m1RU~Xm&nDx%K{v2H(OZ`f4@Tbh?W-jvA!cJUHLYvS*sN#W*1X> z?R!a#SCBPXMtJdkUJv#o@!(_Rb3g5t+5T)9sP^zTp3L+>HJY~dUopC!x4uTtSfoOhf=yd&(UBiA!O zmnT_Adgd>X&On~DT@RRE+BiOLE+bj`NpJ6qDJKr!ME?J8iS1P@w!Z%b-c|OJ`v13ZdAEs~Dl&Ih#p1huQi%w>j6PhKuIfnwT%PJQRg3KVGS}0Ah06EVW*!(29|7Gju0wY`(4p`qn;rFeXv{a% zZ8vk3yeMgASd89XzaB>a{L3!Wj3GbA(u+-xQ!idAdU3kgHMOU%w^pBGR3&}=q!)h0 zSoqfp{`9-)Ce!aB>Y=ym&cYA0-PWs5sE6UXhc^h^WF6Z_sbQ_sSf$Nx?P+v_4jhAz43d@&ECX=tWWg}5v)8_I27`v?ZWh{b?%71&-6+di+ln9 z^6ddvSEcnZI~$uFjt5!q$udpVd!UFGj$aRSL;ssg_v9mo&e|(|aVd_(Skra?8X&X>H9Cs``$^s52g3IRf?xHk@~Vi zz@nxjcWZX_LxHiVyKCr|mL-ny$0O2DbXvK1pse^y!ME!D7V(N!ACc??QmrJwoXSC!?>OPcj{2G{4c z`bdZD_tW_|jK6`ui{cfYiE!kRc@pAlAwH*1<}>uY`>RRIVvYMecUKGd8+A1;w0xF7 zH(y#AWWLzu%hUPWH$vK?k5^H@y8Iq98>jg?l-L=SGyLmaxqkd}!tY|l@!Nbf7R3}xP2`4$gY{q8flrRz64m9A9Y!#4kK z^Ef^)k9mSQ0x~|EaN|oD`uF?ERTh_gZSoU-p3Wak;5ZcLgyMlYM!M^_w>Q*hqv+|# za^w;`#;@cK^Irx?XF3m7HGXe^brF3Ky};x5D8&6y%I_}*{PumpVft5tbkK1Z^gs23 zPgl69-5vZc^<*pinPk~cjVJc~K~Z<*O8LQk%;){;R+dZJYxDd49bL z$*CB9$Mlx@z5QkFA+P^zADRBMU3N7sRyal7?Irl_yL8__QL%dac_?1r*yjJzdbG11 zUH*N>z%GUB^F`@6#m-GN?Y#A~rExs(I-fs2j(iEn5z`eu&udTTPlw2dc!Bkg zejd(+#@Cj4)TfJ#AMI_7Urc(EMRPQsOt2pSpf))MdITSwL&eEM; zTR#wb%Tgr7^C`u{`!Bqo^?eb3zK-u_DowQcj0MK;0^6tim!0Dpm7r1s^*Z%eHdK(2;~cg2&IvE9&lC;nxDb_j5n|Jb1v_u5id#zfYjlK>YRazE|@TQBij_ z3%=V}kK|_Z33MPwQU1f@`@7r^Nc>)~30Z#R8+Cr4=_TXQx2|D1$QzH3TcB^J zO5E>PieF{*k!qtvvW)r$w8-cCqLM|AsU8+}_9&}|?~@AsV(5K%UWuQB@=rX!O4@wW#3GhHKUUOPze-QucAntm6_;RuN=Zb=V zLUMWF{`>C-6bZ&1OfGGb2lFWqSv~ncQlWg1|7gl*{gW)8_COwx&*ee<|4aD@8~vF$ zmV8!-nxVZNu6!^qL;j;FpBFyK@|h6G1M(>aahH#u$2i%>F>c>lFJ$W+ZQo`oZ^yK6 zm$97BgTcN*&wc&D*Z;Rmqmb$!xJAMXB)wGNIM3IW{QT8q5$#vXcf0@m@!Bo7o8ZSY zo@D-iOIQUws3*do1o6ku|3lcZI{b&;*6UyXQta5Hx^BLgev{o3aWwmx{y;8}(~clM z!tOlX2kz~|`!#PL?(ew&P}!i(MD9U)y8dwg+1JNIzfC@KJ>Y(?`;ory-RI}gUxxcO zlQRh)?x^-7=FhlaI%5sf<4s77qSMAr(CeZ|FFzfZqQ=d8)Shg?y#V-z3uaTLgXGSl>_|5cBPaEH${jAdy)(`p*xmG~WwN@@y zPoLNE`RcBBFdyoR^6oEYd=-H5;MiJ_H2==}^Hjd?7Wl6?2!DN?6z2c()v{h`>vxXd z{iu^IM z9`B#^KQhjmG@X8%a*2LqKHsH5dy?-C6jzM$TD>>WaX(lmv98Bj10{6^Hz{b|(a9#N(2eylyDubRVa^K27w;{EX9moo$-W=R@N=%KJ#~9@hU|w0B~!?EOBzzk_y8>cRNC z%`V@wTjS_g+62(at4>!!z5V!nsNd5Q>i2Z1FX~s85~$bTiXLIz0dQ8xAKu?$VQ2Ik zr91AY{9;bG2_KA}<0PXzGVe|Lpq%oA^8mn?EVB9yvA)S7t4E91m-QSam#qeGBg1$o z(}!88hv**|hW=)vgs9Iee=l|f5qUZ%Uaw)jd~2mcMt%XK_eE$%`RA=?@BV9gWEznDcUC*yI^|UQQS<59lo;l&I5vxzH`&4*XX`1tGfY=wXDdL0RTn2?Zfrvkq9w@(KBQx*P`NB<b;7!B$BkywLh@+yY$JNByG!1OHU1>?DVkN#2qPyNIE ze&F~n=JR!^vGxOxvU8Oor=?|{ZI`zDY@hHxg*yu0@sH_y|Bf&| zp81lN9a_G6{C-Oxwz%ypZs}uuD%Y*uZwUQ`HmL&a(n~wVj&8{6gZFYjgqDZ?d%hRt z@-5eVvwh!i?hDT^hYedQ|U{l z@3lDjnM~hfapU*&-4@?t`FCkNxqY+pE%hs!Hj$5Z&r#SvsK03b6Zq9CHo0Q^Eiu0i z*GBBNmiK))vG@>p((jhmvHyt&c9V`4A9sxIk9LV3hyAGf4TFS-b^c9AxBd$BJSgq1 zV*Br)=b)z;m9;=zv2xNFq}TzU4~Pd@|9EJN(w)aA`-hZHz~4Ge^rJt61N?Y@udW9# z)$+c6e7dyTQzdI`=kr6}f?D-VuKJPn;~vY;?>haQw8{YQZHaw9>yYttEA>0>BR$Dt z`khIK5Fk&|Hc`W_@-YngTIx@Sp)$aA{VuyFBp&2GO!`7a#<%{XhO&Q^ zal_kHeulzN+9=qhgX_Y{;#$08@w*=XdXJEQFRkI9Fjk(pf34CP^7%!Q@12?|a-*A_ zuJ8Cb{AjGVoiBKf#(LX%3Wxpv;jFhkNAp9gpy%|-|KC|}J45MmJ@D_B9?yE40^&c% z9BkbHq({~31YX<}dy!k+oMrR%ld`&yFkAn;75 zn&|J($v&cRo)k$%TQ82QCuyE!D#O?|xF=I-N8+kD81f`j8OButPSX2ig-Cfo*C*uz z3?CPS`xJM|``!t#juPI+{OEr)ZsTKbx7m4xw9h`CORl3mOs=C}fOQF2z+@5aCGJ7O zum*tIyZmB5;t>dpzyW>N{D1Ua|Xl zFuubZr_=p4)Uzz_TdR}gU)U|P>jSJ`y6>l-^+@;q*gj+5_X9X!mES8O_k$DO@NTWg zrFHJv$Zt3XvN){SvBQx<2Cb+72InF`iQ6 zReG+Zki1ns$oY_X-2Vg~yoWw-_-}ll zTt;%;^!GGp?a}c%U8gl(py!J@A7$U5)^~{j$B!f72RP~X4d(Hb8qZXIj0Io5epV*nfk3$t3E9?;B6&%X_uB^6k+pq38AMdnkJ3yY3o3 zIUOz^tOI}!UnfZCY5hBA+n2jU@&Sa z+xKbtlHIR{ZMbUB))IfV4~zB%@vIV2d)fTnyV5xBe_A1U6;73K(2hV6>N!jF z-tSqeZXo`2U-w4VGu_v{AuA6JLiuNCdG70WyYAn^=hyL07df3O3iV|o}|U< z*}x9!V04`zbd9EiescBJvcgO05cHs+Q_c^;^R>oJd3mN#KL&X>De%Uz)5 z&Az2GF;!J8y3skH*# zJXhQO_EB(bobC1n<=-IXJ9Rcl{5knuOZQ57LqD%R-ODxo@zC8PB{RBdw~mBvP}XVV z(9v|SSGwSz)~9=|ravCKUnV6>HZJN^t%i-GU4)*=|LMNM#P%DedfBgN^?Q2gkFOhr z@_3PC>5OP!M%63cq5wwK>m=IGc5)luSXR1bl%Z=?{?nz_dt)% z(emHd@}uZ})13NrPcwgYLs`>(J*aElk<&k0Ae8hJX|dlEZ6B{RFU(tJ=No)c=_@sI zpT=l9r6Tf-;%`c)0G8*8(1{UGDf*_;(HT9gVKMp_4fE}_9-m<`M%Rz~h>OvO6pnx2 z=>BX1=E&pYvtslC;)BrSN&8FpkDxIY|KC>40_ zRkerb?aTSzKoiYicNaL6KT^J6e;RrwhkJ_vD@z>sPm|F18CQF#FGbrA zQ8)h#-g6AVOXLqB?A}ZXlXV`?VjXTrtu+2Y<&tiR0&walY`{aI`00FB)|Lxx;y=3>97o$%p z-{L;fxC<{2LQd$xMGx~ zk?9~wU!>{C&-mpUFBQ(!lPXGuc|6x$^1}iYql?r0j9+B=pUe9f3g_tf1@(m;mh;oo z{EW}H{Aby{erNL>aLErnmG4;7{EVM#`RA%9Bi}RgJ^;y&c2d+^1z2Ck=ceUP)O7lL zs2?n7I-eoGVR8K)6{JJoih9~V;X&_weE{jutDT$mwPJo*mXh>B8b3pRy;Eb-Pgwuz^U*#p z2s`?e=fj@^cF9*^0la^zz9&2%2fMxX{tx*sy(FWvUiyiePCDzQ_gt3I>HFzMwvi5u z-{BuQU9j(Mk_5b;yh7;hw|qk8ZpnvvggIw}vwTLn7$W~-QElgkhu$Bh{mgOX1Mr~c zC^u935zn9cE6s#^sP~=@+p%tW!?#iDYtNkfeSh!!W`O53NrPPRx^kX=eUj<2U>( zAt6d;6JAkox5@ZkuihxpdOWPM_;o^;)KKI4lJ2|EP72`c5=J_&p6zG?917>WBj-)Oq({xpM`Qi?X1TxbXsjPs6%Mp5)2oAhuen0=i{fa0 zXpyw5|7X6}M6WG!biFkDyF#i5ijU-b&A*iIX`bx$<8#IFgq_8>65*vm+}DlWzCI*O z2A{|No^}?*b42n%ON2c_=yu}&h40PYDExRb*X<_;d2>n`L%=7bS6(Icnnt>T_uBZz zZj|jUujx*vkxsmS@oR4^<9+4&>&uDRZInk1K?ibzj&}(i;d$Tr^bZI4lODo%IukqB zJ0UqrKO{FI@bO?ia$=4zAC-v|pXUc0`9JE#;8z~_40v20O5>>q*k=fPS!H)5>up;O zAfMaGq-C?ld+*te|9);jgB?@jzWey6*h9&8ncx}1yAe-my$=`R(oOyTDB<5kcPqK!Hm$JUS62l4@Zsvjqf zp&xdB`6&Ge=CSkgzf$$%SxTp`Z=ih71#Fv0NXLWynoZYhwhKRfpGnB~UHYEuFD`%o zUd89n6@D;o_Iss~ACK=Z2;mM3+|Un(d?C*pgg+>kuh+ao=>3%eU*&vom7D9k?}te2 z9Q)YM#|qj3=9>=I4z84TkmZzR{=yK|8qySPm*!VBGjdXi^aJt}j{lpPE1eSkY$3%68&mO#I_PbRs2 zH~C=u!_w~w4+H*tL-=++R%zS zHu0lqze+Hi^7y)ZDCh48+*FSCeOz*zW>4)f(vP-1`GVj*Sby{5pdQIlXb(RX+cv76 zX?vhub$-?3)5nBf*(a}hkj~55KB|zPJ0xFeBK6wsg^%OId=E&zV)S+D)nUNh3jC=& zzo6xc(PwM*a%zVPE}1=#;caaiIvvT>ITkm)o?5mz>y>zab~g=}uv|e#s8b^9O3;#-g zFX}AYm=2)dN7v|h9bc))byGjDH1!)`4CU!caux28_J^n^FnGTg+QEGP2-3rTas5&17jGBQfsTB?*eUwfY3KSCb(ezV6Cyxw z41+2U&T)hsoX@54i}gL=zDb7dAXh!@He#!>AgHV)Sf&Mf95>1v#PJ&-{g&diJC&_Qd`DV)Qo#ZwA+m8_GI=jB<;|Q^v^fWcb~EN6EhYSr!cI*_SW;3ijm{ zTEBYfKbCUt$AE8?FXa^4$+5<7uMx;T-wt`1?qOlHr~`XzUlHT3$pALx4M=QhRt#539-Lc5!rx&BQDg1i&`Fg2}w1 zN!C#xygtc2%T+&oKWjX+l<}Jg$L}Zc@5221j`W<1wanMqc!~1suJ$jb zbdOxlxbt!TJI~e^BU|`>#!`mv56v*VBTM*x{sz;Je#YVc?)<;(y$zg|)palYoPjf- z`4|Kr#cAlA0fs^BiQp&F5J5!<`4EZ6AtpqQCzAn<$~ea1;b|K1ThsKSF-mQ3%M1vZ z<|ejHs=ZC#;Wlmc_O^{l^Px?ekS1wto7{SvCN*E>UH`S#f1f?ioIy>tcN z`)lpB*IIk+wLc%&y+#hP6E=_IAIqV=-xGlzb3SH!Enl|BH96H=#ahKOE}&>>EWMy`B#8HM4Zym*aUL&PxNl4_H2ae=%;yCV?B;alqs? ze0R_5<2^6R7w^4MzS85w_!P(QyUclutxK?7K~AN=gCzV86X{i~Kb+g$|Af*%J7E1P zd)Vlk`Z`rO&jz4Eqp^#Dh0u4U5nlTt;$8lF%_+3war>mT-R`{8}pFiYwEbYSj$X12h=-jD(eZ=Z% zbl$4r{OaB2Ke)^M?RTmVIaPjj9t>er#QTW3zuO<5k5j&lDmuEavIqLIZeffMhYI^a zm!2v781ej4**4^NzJrdh6;+tBzwPyW0_Qo&2mTI!*7_b&&`-}5AK&qQHmYZ~tXIq^ zj7uMXb+f;P_54oPCtT0>bzpf9#OUJh*nuu$hG~7USb*oNRWC?_IGzj5oR3ZW?~uU> z`v%s(U&#L18!%7wCO795LJ)ECp1tu&g1`iL%@MqSFWdxs#pz5t5&7PIqwFvGdLZb# zxpMtGQMR(>x~;Sa5#Rrg%DWKXf8D^1viAN5hlf)P4HC^ciE3kF? zyjUM@a=TFAK);fnG2Kp-_@nK&^nh-zN1Ofr&;JAU^S!dzj{p9@V7~Pynojz0yn-Ab zjN}V+{zkPaPdrD!b<&|T>paVF<^ObA|`1&=C7voIa;7YdV^?I(o*`C)W zqnb{>u1+>+n0#I3__IA1CV<%F>q?ywwQA387>5gXSM9iqq%`n7NBtPzf7JMMJ)ZcU zEiW0w^glC^ZY1>+-zCY%HB5X@PyRx~#J8HDA2jhjEqOx1t?=DBj{c|VeEIq4&+|xJ z7Yg&}u#WVM@=3m59rz_(TIBXg4VRPGNC@~L7V=rr^%>sZZ}RQLq$24}zRlE|4^2AE zNZz99q{AzfFHJdoxt`Z<(&2ceTPr$zPQDL8z7fyb^tmdJ9H(K|K;LjbjP_npUlsKk z#}V2gMR+`?^L%tCUSa(7b-K7cJD?vS+~S}gpOP^2_d*Fn@BH`IW4zlI_=0@FF0Laa z<-DE57v)Zs|K-Hqe|kPVNe}K1a{j^dEOmc3q2cec)JIIeHT*lrL$_BHk9+|}M;X_A z|Bc`2sJk7~IH_CH>m#NY8Yj)yFzk1QKq#oHItmznBj|Ut}}!SLC;CuKL*to7Cs`tI{d6%}$-??eKm6Y@QHPT)ylng{HV2D>kq9_`r21 z(Cs@SA9#NT4&?z4{0E(Rjv%||ImKt=PWv7S-xtUZ7@k~5L%+IH$~XCY*!fBRyF3K` zK56NDA2a_9K}cMX3%`FE zxL!khljn3%--pl+!u5B*dA>Zm*ZDfn;CEOzNCt1U@X&?kgFirl>ww_1uvy-zF#ZO- z)?5A{|Cc)c(teZ&KKDTWosVgo^SM4T->*r&a`I*6Z@$R#@x70{)8)bCoclIh|KX5@OM5^y**;{ z6qM(?7^r8lgh@xfV>4&xN@No~#QtVJ-z8+du^!nBI(mJC2l{}{)Qe~j!l`_(0FHWx z`C@o)kKnOf{-ERI?^w;5ZRh&3{ia{LZdb5Ludom83UobXmnL-FW$Aoxej~q^x7+1z zr>D1|XFZ}P-=Uz~2fBFqxE-sac0f+ie)J#soVT`-Jk&w1x>MUuNxQ82nwP z@@VovI|uz=#}!{E3;o~wC(45^DF4Z|P5B7=QrLoi)V$}-?=3ogZ&0)XUGr|!7rgi1 zCyjz2J}UpwKCT-f3>Wmu<@SE(7?2Ui`m%L1o<*4Jft3GrDxe9zefGX5-f4rYE|vz1 z<99STZ>>T>h~s)`*H#TCdCB;k*DXwZXitzI;75!TPf0x^?TdcnPLAMv2Z?F$hALs#}??-Xnw|cIENuWr_#ExaFOXKd1hKxcnfDA4&rD@VM;_n(9~N2RmxE1;d{9uu8Et`!A3yB_Mg&+EU% z{OYa7=P}1?0-e2H;*0uwkA==_95FaxW!)Uv^CNwE^s< zpzn`fcMNj8CQ~|0D!=F0crby!Cl%y(0)2NmeN8_#uKuy=m>|F2AGseI=we`!I7F>ha!AE#+O&!<259VOzE`8g(z zkHimp@}npZ)unQ2B5KzJI*vMh<}cKIcpqUL9p|SS&;4Y|M>e;j@mx2^*V=rc zv9=)Rw!9d|_o|=c#kdbW9q##$D!7T`;*iCoojY65mvrfwKGp7X^FG4*X1TAu0^{ZJ z8WHr&prv=28Vv7WR2Tk^_R}%9U&_gUwtg$}g{>2QSDWw4274>oSE)$8>XK`;f^5EY zG;xF7W(N-y-c`;XvUiZ`-I^`oy7Lx)5Ad4K4{QBdx8INRbIJ`GPiCX*w4L>N zik{$S<)F{e-||J44trgOn(&^;qqaPgq*MMx3+8M6`w83qy9ig?_YzR9CON|VV%ZF> zFP!fZaYuUopnq@SZu_3WnCpG6i&3wkJ-`F}+xBgojryVgni(K^!uj~%4sBOD&E7HOc?!-;<9TaC@Gd8I z&oy7<<&Lv{FY`#14_P-;pZLV(-zw$#U9%Jm7~(>Grk2CHe_ZY|DaUg|oTnY_oW<=z zpK@Y)iSHBVS3jxkB>(d(%pTx;mHQ$<3#8*YuwDr)Mp*a<$M?eXGYsMU=8DFLa}qOW zdw8+d$M?H)UmwdxmU_Hq)B6mlkNA+E0}@%0_+j_?9kk~AELSN2?kne4tWdbe(jTA3 zI^JII=MO)t<#@l2_lEOjhG$ccSbL5I{$IuLT|d>PL;5kU7~HfiHT>^UFZ8pR?cmNTffizdu05MbdZ;u437>UFL`gUtha}3-41fKakCf*;%IlKZGw=vUXeGX z2mLUmsXzYppV1Frrunj))obn(>@+&Zbo{pD&u%ukOxuj#+0Kd-dQjmtT;FA*yEL3H z8nAjscYFAH^#@1ys6Vu5SpB-+vupS{`7NVn7hJRGS3LhA%fH}!ZD8i-!tw<+K4znj zXnMZD?1*f1wuKknp?)^HQ2n7rx2sETW4XQMmJ-+Pn#`8mI6_3qv3 zVb@=NzA`_@RC6go_}-x8=d0& z7+SCEU-E(chn)y`Y~SSX-1t649neG{pKShssBM&gA@2}bN&P$HXy37csPH{pte2x6 z%x5DvBb-Nvu`4d?H@V<<*{8pu$o(7+<(BiirafundbJ@2r8vr2#___IZ8RQyYRokL(Qc(3I8EAz9t68p zeuneiRX^8(egVhz7Yv(#RQ2~}XzyZO9&`=&7RrDoqT))YA zUQXs1aM~{fWKr%2m+Sr1UsWuyiX-13FW?L9&sv+EPo%t;lSftV>U(XysloFs`&{p> zb$jO_v)l43o-jGxV|Gbn@qDc>U-2sQ*V%e*KEw2KqhfML`Q*7)zPCbo&3G<(rgS`U z%-@t-+wgDZH#>>?o%gtE@(y%dp80t$-^rtS4*l{+SMz>TkOS{O;oS7hissGTzRmA7 z{PL@s?Lr2^rygMcqWy_;wT~3eX%C#$)JNPm=e)R8{tH{>zgf}K_lTY-z<+C;-}g=O zMF(9k?NFa`lDocY>Ko&)eE%}!gJh8})L-(wOiMR4Byw#EWBC^LgXK503Bc>N= zCpN!F<$BZk-=x3MIlro+Z~}g=-^$5OjVHa)KT(4AL+BSr(PxmE4g`8{XCX?ffR`%zm)y0 zQm&{ExIRL;r9NSQE&F#;@Ejf<%%Ih4ZzktehR^7*BIY`z~I&O!7_qBznyUSGnwT9kVm=;?N6&A$`D zedn~z6b;WmQLo@0PUL1(=L2igJMd$?o)EmaF2ngB-#5rco>TgTcb-P9|MDGRz^Mql zVfSloQ?*~WdPmjzyX#r5hwnAM<;$$UW&4b8!A?EL3^KkKm@hCsuzuFtSYQoqxE;)K zF7xk-e&>`Q6EQoJR?MPm1cO{#P^rllXmrwSBn~SfaCdK zyyu7hi+F&fUh9{0jM$;?mvIa|J1yA>U*toW_Zs@;oifOETAHPL8R-afUWV`UpeN!y zK1LLzBg}QWv@}C^8W4~AcrPI>b;xha5B&MQLt1*Jt_UJOcJRx-K8P^d!Fe3QVtPy1 zam4p#%aDfl+H6R^1xlW2O>gT*1VXSlp`pa+7m*;W^T^?W$A-*bhGaSkx9Lfoi zKc4;g6?wPmU!3pw!o0ra$eaD+%g)Zb4G`i_UR(Yjh#&pcMZc7Hn|?+7^u~XEKjNSM z(?9u9aeSnG!gHcB+C4Zo+@ zgm3lcS8ua%>?X^{`#->MsYF#DkOqn4_f`4s7~~r1;hc-Ft8o1zyjQUVToSu%M7=Q2 z;CvbFxIy13n>0ANTwcFsaAI9fF?-IK3{1)dAgv0#B_Zr5w z7`%MD*-xPY~ zS`X_(zlI|`t_#i?SgrBPZ!$xG?=OV<9j{)gSMdR|;`k29n75;&-{OaEGoSn6Re$G% z_X@Di2n=gezi^}?KKVmB6ECjY%o#XS0WQDE7{>BkSK+%dfxp5>z)z>zN?a_5%Ab`A zF5!5UmW)nEo7ZnR+jJC4LVjOyhWSeGb~`^t`~y8!c|GgR?`eB|f~A_VB^Ks6{lOD< zXvC1yneCh7>#)491U-hHS*^P~U1e~FB@*rBes{+QES~Rd@Vq1MVGo{d=|h*fKWlt` z)(?9_+ZXs}??8h;a6GS4D_@}D?BPdL-eP)V-<7OO?-XdA(A8P%gi`u zyj*1qnsP-w7OyWYjLL(K-|mU>cN=}Ex3JE*wUGZG;{0{XAH!c!$ba~G^3N#b|B%Qw z@Pgbl=l>Yx!C#{v#u3OX@*Vz`*t2^zIlLcmH2U2c)d%_=jKY9-X+%F)OL8Al62yh` z1t-`zALH?9!GrlZ4sjeEv}J;1Y+e&jv=99q+^e2-iYBCOf{?h(_hZsFn^0#bSpBT8 zthcEp-^r#(Y0viwQ9!Ah*jb>@OYyzO-v_--&8u9362J?XfPNp5bhaCER4Jr?D5evP z0dd9o54T#6L5^UjdKVts zfl^|3xqZj|L+EXdFRZUn&Y)+J1^M7lzMs5M;gCN+%YO2oe(F=70xfT`}MOKV5&l>LupS zpB4D?xhPCKg?10`^?;AH)0^ua@pm)7AqzdUe=^Aj7sCEMPr`#;3AsQX@!ngVhgBQt zAA8jsXUE-Bu!CW5zz#kX*=?}n78dN_*|FU@uL)QC)e=K6n`L%fw%F3SPSeEO^SS-^ zQsBL!fOjdvJDX8a!~#BZ9Up&R1a!dnzL81kc%j>mbFCcgGTk>>qF`j-#l~NLha7eT zAhO-C*T9KJx8-NOfP?sq_0KmvK=*O=UuN(rKbSuvC+CCt1y(M1`lEhufOr-0eZKl@ zQ(M;uyISaRy1#GJ_1`hSulr%459=DOtH9f8{N@v;91! z)3ttr?W2y#92Tz2=LdiT$927MkIR~w)O=lr`{~GssWxB;TPXu z;W{YoOVp?QF4&cstRHK+C1!8siz^nMVe!koJvF%}4afD_(4O@gt?ORM5!xYqfdil6 zSf29IajC_lJ#ZX9fTzl_z18D4FiJlXP>lW%EX$b5d5g?D+Z9nE)nd_K%7y^fn5zj+n?kdtC2UU?ue&@B6X^x)=FExupE&SMSk!%lg%Jp`-J?SDL+8 z;EuVR^PCIMZw$>^r1kQA&Co2j|NZ>Tkkg&#|MFQC&;Nw+*T=2=1JFi-e*;_;e@H!f zXVBg$YRo!KBL+uoj2N0FgSR-&7sIHW`z#4KC2f#39^k%KUsm*06lU2l^a%;k$=$Tqn(XHJIe*m|vUPruisW z^o#eWGCX!Bj_yR9JiQ3kuMKk@!$ z)?EP_GVeX4^v`_%DC}?Zyoc*k%K65bzpwe@dGKKwfY9E>IEUr^A=~46&A-D{mQ&PP z-y)ME-oNc|y);-iferZ9jX}hBux|7xzUUtn0f>G92}J$xlKxoJRc49z^s>I1X<@CO zG<=5B}?iE76uR;3p2Muo@f3hv>048bNZvzVI0cB%6-YR%-+^&JaNcVi@ zPPD*)Q|ZGDvA&Jm{Ttf89%6R^I4#p(8KjgIk{5X1$%x&!TvseUUOWE^X-^UDk*5?s?*N^MG!QME*%JE$=tYiPq-w4y)h3{q^un5?rhos-f z=Z0`@jq8%&$5Oel3^>mXyYVGO*`M?6w%>yE!_j<-=XcRx z;gKJX?}o;B?EqeNO9x(~GBh>cb2h$)^>yD*VLw6suYW_zZ?f{VJA!?;#N<5KcbwnR zzI&eYXM9&7+_zq2_5{DHfcpPkM2P!iu**PA+C76l{_s1G{q450+u#05)r4ug8BW;8 z1&L7J&P&YH2KBewxwo|aWx8UVwjZzi0_Eg~O80&V%k)~_yGrd`g#dYjglTU>Pk;hE zpOLoDRQoAyZ})Q>c5fQ>&ewb*z~a(&JHHXmLr6N7!l+MTJL~s^{@GsB2erf$M5}!N zLk@6&UczpDfBQ0l7uLB2@3#T|8S+OQ^&96UkRPo-O0P&h+U30ejPns2%tI7LumcQ9P z()qWQ&fE{jxVJ*G(msjtNB<%HdHxsUU`67K{NQ;do{QkQT;e|z;V)GNKGu$Vxx$;k z&!w)P&NBh%DRtS*u* z2=IR~=D+iu?;E7;b4!q;wEc<_^&RRbe|x4+J7zgM0reb3zR~G zlm1Qp@FvB(D5u4KHo3l`TuxIxax8THV%#2=8_d@r>676>`k{T`5Al)v1&Tk@e;WG! zS&Yw7Yp~0fB39s>VdHsE>FL(`(&vAz#`DFX>s1Oy&aZM@e^Mig=Mg9B zjej`iaeZD)=TOgAw0w~cd8E%`y-a<@_AlQFD#Bj7Q-2iU@O*B3uJ+OMo9AjV|A3>u zv8*>KNIjcX8nX(C$BfwOYdz_FYFyiHrH{A zcu-zrIKLqJjr(f6KR~@eJs0%a3mk{i_WP0w+S4t6#Qn7PWUSXVq22!eMNzL6+dJvF z;P2*hJ#XUp@HdKY=I^_O{_N`;G5x;#+UEE`dUf5T#YO+ydQ&#r29BQgW0$K7(N3?Q z?d-@5Zhn{f$`g&ro{#Tw4DHRhkX!u(T~s?Yrc+F6t9)xXnvCmY9F&C@?7^qn|w`dmvm zt=3~dtnXL4-Ybmv@;$ttpaDgETEW?SR5))rW;hQ5&ICMuvt{{K^PPKRJc9l8p`(_M z$A`O7K9n2$k5=t(Rqrh=>-~D`@`d?YT>o}~!*QSAi)rFBuO(mkT`S@h!`pgPc$X?X z-Vch;Lt_0N`*f4t3)&j(W8z%it|*Lk-tTuc_hqvA-XB+bfAsTw`AX|Q*?jMhE4@F? z)ovr_^vtKc4Ru?@(2|NKgnnh+F}$lmyEosy&f2$qjr&{O-|YS-_cy3NwB7mv?^ovj z9eM6s=GzZyzH7EWp?Sz0{-TZD*+HU6pLq=--w1q8 zg*^Mc&+NdR3a>aWw<`B)jL)Wy<-Oee6(4IS=l#91 ziVIQ64qAIjPu^2xdwRvrMn2+;_MHMew*VWV=YtM+)WX;&Y{k!PWU0~hVQUxfDURJ{ z<;cfu#O18>Im&b62 z!7uZDZMS0&h~0?%!{VPWaK>l{U)993{56q3sRynm@9e|1%GVXuNBWZf9Cv`voy&y} zZa?!p631zt@1ecO^8n5FW)3+WTz>h^$;fV}%afkI$HPylANN1>r)>XXi?3^Q^xP*# zVBmM7;FsUNLKE_v*0^uuGrwQS^8l3dcpmdL>A=)Cq(8&Zf9pOk#CDfo;opCfc)0uu z|BjURf0PG*Sw3HD?dNw2Nk5*$$+{OSoNT?n&&cQQdxL*o8 z4{&Qb>ydlr{!mm+-eG{t>gt7ik2ohY?wt1-W{C0JW9_(St$l8H&zWuKcgF58IM`=J zd$WhV|LwQ@dM;(4-Web#R#;$i(~V*RF{^F_OquI0q+ z$8f*7)8s4*_ld{LGtWI`lsiACmhCgShCHC1JWs&)!h*i=b4rh9 z@}%Qw14=WWwY!591nlL4&T>?<9H4GE50|B?KeEDel9YMr=!;HYFz+}1HJp? z00MmJen&Fb1HAvk}sWxQwovZzgL4=Zl&nE{CLA}!Z~M-CsF<@qWW?lPeDgG z+Ou;`wu1xNUW3bX%7eF7G+(~`3%yGG+aXm#$AKVGQqVed5pa=B^`UmJ%YqNJ@vjaxo?1mMZKlAZ}e4-u0 z_wM@JOIp61{I#}+=gkiYT7Ww&{`mrl>si#x;MYu{JHK~9d9C?7@~!fp7SWyK7VRFu z``~MYPQI~|-QaX{`eZk1bCT>Km%}R_G5LMS`@)komq3!S74tS3hO(YY&_6 z_ZoQqkmE>f-{Jfo$J=c04lS4Mvwo62Wc`P7ThmiR(jSCBkeqDvPR-Bn^za-P-&f@M zXq4Zo`3--*uhT1h1)q6-Galc+`BmWcq{62ikhM4$cv$jr+$Nq3*Ii%q%XdT3v-;a! zrY-1~d-c);IX8By{1si;G!^uGS2QVaEA!G`XCtkiSS@Xy?z`7U{X0I3MaOC-%J@ zzyV%q+iCiqQk)O<@H_RXzyUs~jI+?(aXz%OtY4`&IKU%qt0pHF@<9%0@5bdp-})@G zW78=MB_#dp9Q7ps0{s!^w{tf0Cd7k&d=EL!4?5HSh~q&&o`a0zAqwThE(*o*P(@{J z)%f-UbAGQa&TsEinVl5JLr!=PDvk$T`3_AS4|(Ld$T%MK<^A3`K9%^Iybpnzd2TrL zi^BU5=m+nSeo;Ix%X@9G=jf#S9|`M=ODvZ5%dq6DNYsEZ6aFQhj`zp3Kb2l?{i$?{ z^{3Ke$tC(h%7OoB>22zllSLZeUwVc0C&>?fr=_a)C%i{%{5Sgv+@Eg6{Z0ld81=hbxz1qJ}Un~zOXJPA?X=%Rovr>=tGvtru z0r@SwLhDIObFIIX<|%)WKb8mNw=iBm0{*?$?@9}`-ywf256Exfvpj!~-t)uzZjcX@ zhdl7zP2`6wC$CnY;l?@E|H}G$jPb?#wdp9dA@7wr~{BV3fmi7wl zE3_ba`YsScqMWu1%3WNx-^Oj0?~`;;D!emO2REUY)RXz6-8Xx`1R)<;!_qPD zlxmJ-XBhp0{;GSqGmYTD%1T+ymS*(Z0cn?VFAEnV5&~kcV{`!4D4QSD}4YJgn~_{*bhf^T%s%cRZog z#1T%bcJ7gOsvM(zDs__o9xI1-0}}5sFP~{}5f2ymdbx%99#D3whj*LbvDy6U8f)P4 z-KIxb59dp;D`ubTM35qA*P9kLu+yUJ_7Ew zLPPlAE9bKZTn>iCKVRyhd~jV2`8SB($o3jM&S#j9_ob2Vh6q02!{a>!>>|K7T-fiOUB(JRlW_3-mwN;(1Rc=rQ3xNCvvig)i+w82Ftp?I8ZG(yxf{ z&vu#~$a#Nqr`4bPJws_tP;ddSbAEfOUNtT%Y9R>Oq(-+0`zo$U|dkXZwr$B%2zx*Cb zlOK=*k*~n7MII(R6UZjN&EC|0r~JD>^CSGz@H5|T>jNk+B$sisz#rdV;kXyu7cZ1Q zuZa2cQ}mB7{oT>~$M;1|K@Olln)EV0W&fM{2j~dS2Rb@^UkZ8u`g1MhJymhg{tbRs z2fH+4i;s8wu0lBx<3ikkWD=AUzGs1bRQZ=wZ#2509|BHI)?5Aq{jFO9;`qLV&N~a^ z!`;12K7{iaUT(`NUVfMP!ZL+l-Rk3ytaTMP}^KXW@D$cm#~t#Ys7$4sx>{fPSI zWJvQN9WGB7s?YaSsi*U0yRF<6w(iCI^Sxr%&Xp3>FR`AuJCegz=?_J6m>)kLFTjE0 z_fU9Vjr*JV_Fay@@dx%bn!tU*J_#U<^-cJF?sFf4`>T#`WMKn|{iB#!pzg z&jJ+DKm2aQS!SSQ{(eM$)(M{eRnKn>8M?JwUp#6VsEk?kXWfH$l;erJyB zTKvuz_EkU{j>DMGNR-HLRsM?aGKPBMd_0cJ@Wu>XuiV5{Nu35 zb#)eI%@gZKeaO%KSgxP(U6vr??8jt6H+MdI>hdz6mZESo>)%Z#qsEX-z+ z{*e#D9b4LM=;n(L7*A0%;8YyW-wCN|x;iQK+$n&?WvD?q2Kje2$-N*R}qg z9_o{ip6c*~KX8wWf4=yjXXJK#)b$m=Q%?O8;C5qGRoXXVrO5?xn zJ`)b@Uf?h2A0b~qC}rTXdFFRqXnyYV-vF=M+A72Z+ z1}z`LpleN6za_rz@Pys~70;Ixegx;QMaOWzXhJzKRUF`dO78suF99j%t2BBX{dP@4 zzc;)9`jH-xe~7@4&-)7fW(ChDA>VG<2nh4S5yJ~|hHkY<$VX`sK5#1Nq(N z=K7{F0PwLiNjc^|H0i|tih6!W=o9$bZFoSg;&oW?59{B^|M=W!oknyNTz0bggpYim zlYBf+6~}*G;{`!++;5;faNSDl`6Z;oLTJiMoPNKi3u2R`uYYwDPreHXxPLWC{03=r zpu;=`o3I?;X^r1~-6(lN{-e<04<@0*$BvN>-#P8*bogUUpPUXSN9`rP^e26<$=9_Z zM-?Fj`Y|L6^!x0W#;+q3)4wBA0pvr!&NnsXQHm%23{{}pAU37M^~?9w(SrUd744_# zlpc*oe*gsT@5Jj@GJOy8X~^s6qw|fBSAfR3$3xM$T9)(6aQ#!-MYd$$aLRc)U#DFO zIJXqwbV|N*(u-n}e@+6eolfai$$TOBy}bYzaDd;Yarl|Rolf}$o2PZ7R>>!LeY8;C z_VaAs^Fr`=rT|AkD;|f(;ZZ@g!QMvw+Y9BOnRsq3rr$dX`5;P=hZS>?-vkTz%D-dc z>z@?hk*|Bk;cM#~FN6;Nxd0a?4fwGmf(yKVL-1yQ84#o^l5g03eD@3Z^p{T7ee3?x ze3hsE(%ITRjNfo+=|1=QUgA9(=J;Mtc4@d@?upKkU;U+S-QVjkouT{MY3Z%L&t1}R zIk_v8Pvw`~Klk#lQ2V^Uv_yBH)6(S$P6A**y-V}mtocx{g?DPWU(O#uzxT^|1z2ur z=~A_vP!8d8l50Mcv+#BeBY%(lN=t9ld45`YlfI{ga)>V{H)=kVv+$6H`%9-=KPs)2 zJcb|oLpixl(~;l8*J>E~(GSzo#ro0~;9I}k9P(RuAmkU;N&XA;oB{G9Tu!bE`7K=6 za9TQF`&(LCp?V1Uv0_lx*SSRh$!`n4)$=b`Nlr_z)e|(x56daM>nOi1T=V?Dta6i< zUXyf74DyRuN&a=34)_+nM8j$6JUXc(m|Z0AqF4GXu4AI@#a?NEh=@!l06+FVHZsvM^c_<~#CxnugK)EPc9$!6gfWKeU$sA1k1= zj}b;qv;z?aUu+g?@|b-+$e}joddzeC@jaZHV96 z-;j9xGW*CGypd>aAM%hM zT!+Uvjdc*K{}uoS#Sizb^AK;z|El^vUal{>;z) zm{#+yk3{r6TE4%Z+r;DNcH7VGHb`n(`nlcqbGr@t?f;*48}#qfBIneju-7Ci>65Vw zc1(ZU@it#=n{D&eHlMGy*?blE_YOXcnU}e=?GyR~@t=6A84tK-Pd=vU{qo*0>arZSz*S zhoSSWjbF){zjo5v-f|?%T;Z@D&;V*K>phz z_@IZ`)Axt`u$R)dOH|U3e_EmaHXqp)@`F!l+Z%Phiu~UwGc5PUYk_g`2j$$>ynoH%vmny))u(IoaX)E=?-(D;0Xk@T&QKqa{7Id;aw{KNUV< z9s{`L0zGVgW%GZ)U7+y$bbgBbOAGL=eK!A3+b*~HY1>=WK12Rm0e&j*%E@NOXM@d8 z+uo}49^`+j0N>`9S9$&`RDRR8jmkIVzqUYcgM-1V-UtC%y)-&K-)>ZKEKR2X;AxoNq%ztV!dI1$>$q=k_mdRzckm@17zI` zj`I=BFF{e>^T7Tj>dm)5gJE#ozBKmR^3H|NA|CVT4d)px5zadoYdq$#(EE93CFFZ9 z;$z-9JH#&?SAK@Z$NS(Ab69u(*Qg%!188~h@yF5nHRzg) z2(y1-olc4-!_t(B_@oQ+y|OhqvCyB(1ZE8--dbp)L8DMe&7vwd!^oZ zpBC+7d6c`eupWS?ueiUB^7|#sb+xgB0#qFCIV&DJEga+VS1~;V3GfAQ-UK{sxY6(H zNmMxZ2Rg>|0v&-r*87Y2y-&I_@k6=K#`Q@#NC)dd`GbY_-4c}tUQ^LtADYKDNnvr6 zZ}O+H{V&0eB<4d*$GH7Z$M{Giz-QF+YP5eJpa_l<|CcczkSXS0)cz+5_+KOVLoR!z z$WZ6E46g{Te9rkuOrK+=>n6~}>F}r!5#urHQD5+rpW2R#3worOUYERRub`ppW<&3M z_q!T?ZXf;ky2{YI1ys^#^bhenN{GLHYyS<1uiv=kdahrEbH(9)k?V)3y())wloWZ9 zx?9NuT_DQ8L-<^S6k`BE>hjQ{qMt>-Aaa?EO`ya6DrHI(w z(@hV`J!j1~)6Pg`CqP=I+^mZF+^3^FyhHzZKzg$M4|8<#4`--_Jq*Z4upg4-9f4|3ohuKf?a+hyjLv->q=$ z90c;iQQz~vee)fk9ZE;O^D{wT)vY|*@q4oIz;)r``d<8Q!Tpt{JWuG?f{!F8jn4bD z-lNI;GhZA(ZWiTz%8M%RdyEeLPFTKux8W!FLJml8@MZeAaqMu&2bQwRCiX7g+**BR9V{LFs+ z9WTdy;Wz8Jj`rMMsK?eFek0`9dA96VW4@;JE5H}EE3iHg@`Ijf=@Oe~3;bVb2_Kpf z`QBvnY=JMRD*T5F`E8zihv#4C`@-vOzAf_ zeP8%;n|Dk8r3HM9o;m~y{x#nh-eB`?$^VnsehBox-r@Jzyj$c6@(g-D(NZ3_c>cHe zzVO>@-YxmBE8uVQ#fIm9tIfMhZ?}230UPw2& zUlz(gR!BEF^!c}xUpS5~XNGcrR=~^V<)?@A%L?g+*J&YrRUzH#g|%q%a!CO%lgD`> zeN`dd^hQ@m?-9fkQ1^9+PdXeEbSV*__qZO9^YX$gL9#~lV zj+W_WXQBS%3h+%XpgpYoj|$~6zvuaIj5EI}3#ptRaNH?|8BhOFfdLLomJj3UQ!!qG z=c^?@%753#wYlzdoRjN@;T-COd3ZFwJ|OtE+Hc&g@lvt4qwP0-W&zTTew;VbJ_UZi zqv=M!R_Q-l*laiLW0cz^rZDNj_>A}cPlk3IZ_mQcN4XQlUch$;;rRY}YMHR&LKLPwj9=1jJ$9C=~WME}|e6NW2vbipkWdc|n-v{WrM}x`o zI(nzn=j%4S2NuIe|8JHzzVQAc@6pHkI|TpmF6skTZ;>wH{K$N*KHTHuyrO!6$tC9v zeG)`Wtz-qVx|QSp`hfGXy4uR|-L@W?hyY{Q_wf1N=mDX(koCy0_~%P}T54B-d@qsT zZ_d~Hxo*C5vB>5-ygx?#TG`vm-+c;J=ld#`H&_^a#z?|@7JN6X)+Pvv%WqOI;d>Y4 zFTaBTdA62nz8!PUucZ?)2CEXVbe9)V8hkP+S=JX-`Q#n@+m#vfn;?k0hquB-4&uVQLeT+Zq%O6lW zohC|99Q!TSc@d9#W`3c$zL%;M3jHmMT;hVfx}MGXE|3&Pd7l5xR%-Teo{Z~hfPefr z_$Y;P7=0*z5~O466%lM zvw0%YpIpb#dGwd8?*m?Z|HH!U|Go}8f>;?Z1bOst=;fy(xS-EtV#<);@X^jV{14JD zhWY+0*Y~rz)~>u;t7+=>c@}PU-X;@KV7rfY-sYDhyM^`w^=DQ$`M|qiaM@LYxH!IR zJ0Oj~Y@hIPpBb2DOOZ?T{TlNTkCLFzvqGP-TP?m;vhMa5#et;aq<-JD791G?MpRAtPEw+vluP0-iXMd)B725^#&k#C{ zSb5O*kmR3W7YtcCxsC60s(mH+ zoE`NeloK;R_d%YG`UlowXDpM!*d5^gAnGqg{YZL1{wh*t*nPm?A>ryw*N;x`#-blL z=`SR&`ujmV=Sz5f0tb3|wfKND)cF%FhxLAFgVo|6Li~1zCt*~ks+94Zfn!s^i z2<@6B^lIia{_|b=A-A_U{^tI@7x`X>)FSlv^j7VLMybveqKWGhytEzS?~w4&c3W5G z`tz_LjrR13uldB+e2e}=+Z*SD2A`PE=^}K(JPCA@s+4Zeia*rZq|a0da~zn6SL^y# z3qFSd$N2{SD!$??zT)pdy7LEi94MV_76QYstCw``G=KS^k0%~ay+OMVe0c=-TSIy0 ziwFFP)H zc5n82?-<{1;P;1>NWYj^>U(6!nfp6ft?Xmyjd)*oO{5pNuZ!{dwn(qU`?w3o<)65Z zi}5G#Z0_SiGPplHVL!Lq+6R4lcT^wrFDx|d=kAE&(eD2yrW*Ml&jY2}peLE$=r%cn z-hwNR?-*a;vQ))8Ny|2{)=!iJ_2PIBeL%`r5T1uvS%Z?)iujMB(}=tF9ATC?zyluW z{}uV&Bj>izf5$GAKjH?w9Pg_??)ip2KAkdO^ARu50gmegJ#zo=h?Jon+hNOE$>2LJ zU&ki%VGoq#XY~yHMR^0%E59+Xte*XS)kEdP?1O3#^U2^Pj`;S-y*{na<*Q?!CMSbu znqQk@-@B(>!*^xoTuyjqXAjPLxgt=L`-O0XpH2ZbBIh8IxWR7IqtJJN!1*C`AHv*! zB7Leo_MJ`M*XWt@!z1X~a#`>*X$13v&H<&+`j*rJaJ@`FX zv-ml;**s+VZHSezP}%{0VO)aCA2hu@w#&jDi_Pb{4A+@JKX8oq-q|nGsfNJNPLtDg ziiOdSP)|?$3hf7|4&gCuMvCJ*NQ2I=>reIg%>1zj%s*EOf)bq1^-BA`FK{~Uv-E1& z+Eq^er}j(Iv%{j3p_P`8@y+j6_;o1l|r`Wn#{TAbMm*dTGtFhGL2PD2Ca2|JG2~4&PJ_?#VO}J8k*`>Pt!xC&my({Q*ZCgUdwUID^(^c6I!;Jb4HIT@?Ds>{sRvB1 z2#Ucg?_v1gyuNOeWy78;X?kqn#5P^~aMee$EQ3x15;0S)8x2oqX?z?P7l|Cu097T(?si8cbNO z>jDp7Xg<%+a9;@H5%wj^va$q6I)g5tC+o@A`a04o%_jRc)=!B~^L*XKnjw40@(=p> zT5FrC`(cC!IRX5%yh8JHz5w~e%&NM6p~iDP1M-FVo;I7O%od^8Ar6_~0}rILUxn{Q z?eY1$$QNA4$>!$+=4b832hN+pC%}>Oiqbp89+bZb17FNpUAl9dW_KI(El)8dIok^i;wceFj64^wWs)>}QleMNa+ zzQzd6c^A_`uNeh9>BBL-gwJquZngbr>Ok7eszegg)VZ49@aovgI2*!`!KfO6G zX8hPHEv9s^e*Q}uG3fSH+CEiZnxK9j1JT5xo1z`H_T%)6A%w^FrScuEs-&|VmH@B55jSQ^-?c!eUA24t!#qdC&&T66vnlCf|my=Ive*@nZ2}{EKL&9(jqn%w69uOZAgLSk`U$Z`i ze3Z|uf>5*En8U$3AKlgKrzEF){Nob2SpI`d=gWWAH}*OShzs{$7FrnFELigCnSN$+ zx8RU>K4to1r=m0}+0Q|juZT+F_}C|KfhXh*a{S(I zg9o`>AwR44N`g41bNmi=fr&|S<-M9MnN!%m0KXM~0W%K&otB^Hp=10HKM($YF8F7A zeZ2O5&hLu@Z(v-V<#yh#Meec{4>uCqFwVuZ!I3mnU{T*qA&Kj?al>npHtF#h$4 z4}t2LYU5pR+508+m%v5UbG-kR6Pt$=;RpP!x}cwm_?DAj(HvuES;nqg&1ZXgKZ5gL z%q!vWo{!mSH)=T8V@~%2f+Xn5c~=oX@)LN1RMivBpwI48aQfY~*iK>nT=&KL-8g&Y z9aioVfd_|l*kSk$|4L7T;0P#nGo%vgosJb2$8{F&*A(dlcoq2%IY_?ty>iDo@x_6D@=y1{ z|6S-zI*uWqxax_gs-NAjp5AjBM^DGMo2 zWT5L*rz`1Jq^ryIm1f2&-yAigYrld`h6Rq&HG(sat{%_!6zN!`GvHMOH2AJ`l`z~i z0s1dOPtf-S33J?Le@IJahYjAR*>#+c`z^ z>~^b%>o%Ovab2W~`gEt`Bjr$({XE+@UhlG>cCEDhgqOCPpz!_lpuaHKii3Q?rDEa1 zVZ1^;)MKk9P(eKNl`c~I4SEu+xwd|n{1v?Xo$&B3yRXZ_p8bx{f$!#XorHR6tfIxG zcB`Pv>4I?^cyN9g>`5euONG$TQ>hHxi^b=9__dpdkR0?g>^ATL_A%gLze0NW0sQ@g zKRo;jzS}kC>)qhrXC!~1lN3U^s!TlKI6l-$c3xv_w?+&8J`b(f{twVp`4M0BzGx!> z9o7>&ygwK5t6paH@P2u7J;=s`BL1{PiC3z-Bf`I4!>ec?QoeeGG2`%gx94{{0e{eq z`Ky=NdKcv~8+}66DA%JYm*wO_1;lruP(R?L(@Y@@-ec`cW!)O~HTQ?OKEn0mnw+aV zP11n}?CmsJu5br;3xeXnC;VbP0gfnrrMvh__vzC7u^DS~|hnd6s4uIkEFg+;2fU6kqXmJQ}ooJ;(i<+3`Cp z+$)}rW4kQO?=uEwH@kM{KPZ33yga|_O+C*2%WR*m=X9)exQ-|J$@v-S zH0FAvoV-`z^L^qjoA&EE1~B3GYx$jJ?&lL8zpH`o6~hsJI>iWxc7eamd8QGXc6^Ax z8QL869aemX{JpV!Jw|`TTl;dikGW0=eT}9-4v@1t?hta%dG8c`t+8IStK9%PGGBhq z9!=mmt3F8tAJUT9Nr8^i7^P#FhkZfivq>kDH^Lnd`i^V=8Cp%)N8`S8{W6R1MI&K1 z!WY+AW&Sj2x3*vYR3r-Hi?)A-r8oD<3hke2fO<+*YroSUa{henN6$=Uof?koo0uoR z>g!FpN4wEJ$jckLB%kX!(61u#t;)|Ylm~_=e^H_Qib=|!RVWYHK>61c%I}z@{JKK< zS9szvzLm>T-RI{AY?oe)t@3uY4V1MDGY( zwBt%|Ux$y!v>%X;_AnlLwL=`r!oSk%Qi(G=5UGYI_=C(<*KY^| zk1Y#kBa56%5@jte@7$&Q%a?8X5yFk*4;g=vSzL9h{TPsJz0d3t>kksGPVu|h3UL2+SRyNu+UR_j;dA$9^-mtO? zlK)&MCEfDQ4L?Czqw_Jvi+JaYHWk|Q1s=Eu7^iO*I>+fBD5TGj z_He)84j%%@H_|tT_n735;oT+hvWHF2Wcv+Yro*lP{~ve1zpX?2@v+oXY}eo6eC!CQ zB>GC|QEb=Kg>%qTpsNJz4SXc`>lV)=^I4f#OH*859DHx)Q@tf3%~QLZGIx( zF*A8`{#Q>jhH;;=r)^^!>@t*xOWRZ>Nxyi^`f1(bAH_H&EWD6 zxqr!bqOpEmk)&bwG0vbK*uC&KZpL2=!^ilH7@oW4dJE*RBJsoSZ@m5Qk4P|lkGL#f zQ$x6?ZTpm7@!5`GpY21c&_Rd*y77Dr_<;HvK7X1f@`*6Z=YGCBJ81oq`-Ih{$R+t` z?+Kh!C0v~+01)4;p3v$2`Xk$G`WyIQ9C+OQe3{2fF_h%FK;R)_d8BFgtxz~=n++(j zTtI$#KDK_AozIWcwY~GScv3&x&+khRE!eoq+QE4U=poe}sjm7Lr6=u#vaALH0`%9s zZ@6-hF!UrO0s8r|!g~I^S2XP)lsmsrPSk~#d$LgORfTeIE0kk>&lJj~g>u^p<<9Z? zlGk|Wk}&%3D+}d5QYd$>mz!NEH?2_an}u>!FSoc*4hPIpFX-IV1L&QY_nO|M{VwBy z_PesLZWrYz8#Vd4_6%Pi5q_u~UIl3qIjpNEa`hSuk6IY=MLmX?wB6@54NGUg;rYk- z9V;zo*^;{3Sz%pO^b1_W=R4?cXjz!oiJU;Lc`m0Z-#)ML`op}g{pH%9!@N#J4-(6B zKD39jQL`&RPwR)sh5q#GCdYdcQ#qL;q4FeIf<^wf6zhO?d2!TXTAPZsRcduwKl+RNPFOdUh;hTT5qo)tL>d> z6{A05d_jLK&ig+gCMD%A9?vkY1UW3Vp5L~T11+c6iT+<@KVCFBfN>-)$U*6l(PO>E zvwv_No-gxp_kgEM7vCfcLh=23wO4%pN_zn9-Xldomvdy{GQT_d4?y5N7xaWAA^loW zJYgJ&>X{CE<0x=XQ@95wz=fOv?p2e(#l5AY!L2CVbtd0lKk$Xf0B#@P?lpTR8})ok zBvD+xXq)O4u9IaW7S0ZuKItj_;Sse{fTZT%qxrK@8wcfkqaHST<%@QB*xH#dx>Lij zV<4a98bSN&fRx9$QM8BZ8>}4f zZH?Vw;Q^t4MSc#ukNsjm-gt+>=l*e@#36>~%*M7!K->iTP2#ltHa};kxxnAAE8Wtm zf2%&*-RJdDt|$*RUq3D%p8;~!MfM7c0M)RYSk2f3K@Gs}hA*FL_znEFD;qo2#g3zUmx6XXIBlnb!~ zY#iFVTm3m7*suAsk~U{>JmWw4rt8tz${VZ-+jVmnO7tC_$??5VjE=W(zQyhiO*j;CQA}&ilFkQ5?6j{f6J9>mX~a-VPWy;wD?az&HpzXh&?k-T9!U#CAK*(!;&( z&6ZFck2h}qn)V;SgDWRr)o@SSTS~nWW_viFi|v)4Y8_;s=~te|uAXIha~(v!hpzmq z_F8;7`N~g#*MRG9o)UFiRlk(x_lSj=(O74bAP{_thRZ=sAki7pU$^o)Qd}} znmx#K0OV_{`I(lpVlVpqjQA4|&eIs~nevnI`>{b!n*Cpeq7+w7{#oTC)`R2b=RC)D zhc$%fcE+}N*!w%@0o;F%?fsW@-u^+y!`C~;?((qP&$M^LyuN(c=wfUS_W*dWg7f<3 zI8&Y=4?6FLG830_zFwFQwW5QT^ZCb%zJB5Lhjy0!VthM=-QYOS4diQmzV7qUY}DqT z#rbGY>6<@UDCM$I>wozo8`lYkd{3tupjeJ{JiP!^l6oUM3-jtp?flC4`5)%faLdc5 zDId~OulkI~II8X1YxywF=)8TOhlNkTgY%&$z=h*|W4`Cb_4Wgj zhVgws{EGY^cE7$_euxY6xSK3ooG*rP=O)x<;}HBlZ%@bN2npR#fw&3tNYA%%z~_OE zFXw@zPdfE!x9_GX-TDL;YGl8tB|p*rkM-f7e9wgw=|YE2@msB9ZIA^wtZ!)hh2E-v z-KU;@&n;M8w)Fu`7b3%d&#k_%J)+5K->9B+x&!WFshH~=*@){a+g=ar`oQm00nvPK zXx9Wi_6gy8Jbryl20Yq_sk}oCH`b$G(vh0a{dV4;nkIC?I9b2Q((5(z=Uis@gsQ6q zVR6glq8eOSFS=KQO21u&_2-$g#7Fwcx{!`TTz9^P=df-t1BP}p&tbW`6TJg2LVw#i zt?VJ|m({mSj>o0~9#CBYk9}sJa~$lr)bQthm=2d?j>}`~EuG_D+IF(a1MN%3qhCQz z2IpG-wC%Yg*g+X{_?!=fau-;>!THuc%=hqX$ls>~KZNy3_0rF%T$4W8sI{NxDhZeQ z4>%r>o5fNd?d}i(DkpXx6Z^l=U_;cSWvAmxi4+Go@=yB(%JW`3;Is=I(tr5_6-kwR zk{S7igvM~>FX3=s0qam`5ARc#lRwmUjoqWg!aW!C!!-r|{ouGJ|F}+r_Ca5_>bKvG z^E>^Imj63({%*tPX!*Yt=O3Ji|F#1DUyJkCpCA60qWt+4yMBxk`Lf;WXK1vzbo$*w zDWUf~>p$s~|778cFiKpS+^6BO+ccQ)JTdigbpyC8{Ig`ecZryqm#g0Lpq7XJTp{s1 zC&GP6*wv8tw9WP0JOeZ=r2yv=w@W!MKs@;0Djwrft9Z1mRXkeQD!vlMA8`C2ALGvN zxE>r9c?6zUo-KdG@t!C0k2}B9Yw1^>YyPmnMLVwa_H}sr*bU8ew1@FH?=|lHj^{`G zxbr(6KRnIGkG%6vSJ6*KLdVrz#no7SG~o4kUzk)BJq!Vxv(GZdZz04h_F9} zl8w9V97w*_&xI`dgw`9MPdFq41nYx-+%4sx7j6-mjPt!i);HsPzvcOURr2Kz+B$yY z!A&-fZI(oFUE9p>6{fd5zcSm><9aYp3;X>}hik(!vU?e%lZ?;W?`Tib9_eX&%J$)`+sJvLC0bE_ z_a5at_iti5Q`eRE89uOIAg5gK<9&(7t(H#yBcJ?h&O=-e^E)lo_0~cABo8>p@q_&? zJ81Y!x-RxQtFHq>B98O;;(8XwHNfY1xN$c^(Re6mASUc{-LA>1H!AXTSkmEdyxsPr z01vL5*g9EH+bz0}O#BCgu3*v6ye{VbFw9@~TE7hPd4>rB?WA(@A+@tuKi`F(43BtT z3q8}};~eZ;$Q9R%>lgX_^#b3&yubu!n&@xPt*p1U#SWQb?|cUQ!g==x%>Ek%)kGcy zZO9Y!8eCX!@cnt>PkdthwVV}u(bu22-p_TXp3>#db3VZO7yAp^4SMiAEBgIn2|w<> zoY&I+h4yK@`|Nku&Ju!(>$2`B>))2Y3zJN7kZUdHe)Sa8)|C6&^w($@bcOz+zUMl5 zIeDk?;eck-`%D~P5Hl$s+#ZVgpydqAI6g=h5BBFzeO_n6IgK9U0~Ccg=smQY{K}Wv zI2!bw?{l+XkRDAstg?P0K+uDDJ~il5f3KoXlov9lQ~yY3rt*H65Y>;SGy0AHvQz?~v+saIn9V4Wnk&)z2Vk#;K`!@A4?%8{Qp>D*xH zBi0T%PvBvzKkwYEVf26Ww|u+lXUbi5f$>$|^VW4L$}99MQu1Y6tvrwy$9Iwjgx{c3 zw%^9Tc%2dYZ_Zh36>xY5Ps&3?xQ>&Se_;K59-s=GeTFB$hmq|ak9XDIM=2-gSh=&T z9Pto7DxT?7+Ly9kdAK-3BDu z8Tl2K-aO}qTfZ20dew&#EcIe6x{P=xgVj^JPAcNjs$~Imcym4*PEhOs=zoCI_^0vVEJ3 zU%rl&?e%bwr*E6w7v!ncxvI(J=&0-JB2}PQ^IVn5OFBgk&x;H4Ap8X7VtG*gbx^bE zcW6x>(0Z94o0nYD-%M{Q50_gy?K6I#isu#&2%VMh;#VZ=u>18ptXzkWXJeD`UGk6P zyM*ETygeP8G#KtZPvE=f+c?eF(Fb|M_lgdl= z1{JgLy`|k)F(dB(7JWkdB`tG*4y#hl{lB7K;r;b;@_EhINyN@IimrrC{M%&`oeRK{??xKYm$vaoS`YMU6aX%x#ifaA6!LpTvhhe|?r z#7{i75FkEIQ_B!SppzMX65=xV$Fc#FKKEXj_PI5^omB30TGP(VbD2I*T6#Na+;(o7 zW}3!Hla{8_=)Uh->;2AmJ{{R6$+WlA*^jM#_TFo+z4qE`uf6u~H>~s}+l>yLn`3)% zd>we|$J^33*=y0+_s7Z|KWq^LclkglJabPw9M{|L9Pqj_hV)NEhUl}Lbk^lf-#hB2 zp#~>V%FW@zy|GcyB)L()(06UL&YqXM%?0{y z%jbXE$gpyD72w{Rhuh|Gq-$-^RlKkBV>x)X$71*HUMlQsZYtzk?B3mpLcUrd-(vUf z-YDd|qL6Q~dv~V``6`8ci`~0hR^TJwdQ*FmFPn1jI+CB?2Y%}OEz{5ST*=#Vc(rdx zd8ro4UzU?ct!w1vqX_?>FKp6_^U0=9(mrqh#&r+6ep&MK)=`@K#;-z%V8jsaDJb@roe=~hoq~Gg4>3fs-P;Sn3==&FHZ|!$Xpj>K{EIsJR*AL0d zb$&;7YF3|qFGb@4^(yU%Ed35oZ|po|yv@H0fs{ej3v94=v}$o4Me&VQJ6w)ty*;%K zt@+yoju>8TLFIzKG-zhT}1%yp65h*I0f2OT3IH-UD|q7V;4EjD$VXxX#bPXuPiVvgXb5)1zL`#&u4=^z;Y^ zNjQCPci<|wV>w=Wysw+&*Awl#dq+H-a`7tY%G-y0*Ngh)7a=RsGsJfo@s)#~ZvtVf zclc}IQ{*`AzVcqY9}K+z7y`un|8u3ePNa9)I9^r(uk|Iu-;8jr(*;yv>Yqae{qsbQ zuJlOA!O_reh-)K~Xn|8YMecu?N(QeURl&>nn z)8VIAFcs-{xj%7!q=&C}-qy#wU>b#10XmKH={Ht+Ir@%{-sNmuaGS|rp99$%!td$Y&uTvaiBQTovqRb*{^Ao%~3<5|zfgu^*A5mtS(ciZkG6xu=j2!HOA9>4wu_ci~RMz^RUs`1ca!@EB9bn%VN%5=$<=F!r- z!l(1b#d5Rt;gFB@*9QJ6_ltbdIk&swyi@wTx0BviXzcVb;O%{f=TQG&f0c*TzUk56L&%lRpEvGaYx%LB?tHp2@I3GGB>BU+8cXk99qDmT zmi?xRsKz>%8>KgP?l*X~2hM?6c_)e&^As#?6hUyvSgzCVU~&<(@i3ZHrm|B`*ZjHP^j%<^%(Wjys4)ExF( z1}(nv(2V)|&WiM|#>iIz@R9Q>;GtVAsDIZPNQoi%mEfkhd zczxe_(eeSub|+uxtG@ks=NlrHy&tQ1!m)?eR{J_M%VAjWM~MLXW!!ztQ!QTmz1`1vJo^D%`n=1{ zFz6)l`P-6*M` z&(NhWzGC?G4l(!DXy;6Y{?s~BaU77o$?S|6PrGk3v{+Yie$*b`V-e{Q=lks|K4;~z z-B}^=NA*l!bUl(D^Y+#FuXou7dVL`(Jsfnqn#tbl)i`$`+A*uopplV`gx*%Ub2A=K zc98FSCvFYe)!O0x#&*bi+#3tKxDxwInulQ>&FVD}`^t=`)3}`;*>CCTu^IElf62r8 z&`X*Rbw@ij)%C1-w@>zjTXZ;tx&gYkmYryvbZ||(X9&`Sr$AaE_S6Kcu;&t9o@B172G4SBspaA2?^1IA$tV^ukjpqi<$2z$CNA5Jg(raMQA8ReV-o+%l zuaND>KNxVcw;5cw4}(|_+*Jr~ZV7jLHv#@pi?1j=yVApfr#T<0dj6oJ66umV>dW?fCT;mCk*Lr2+p+k;8Ke-@zDW-By-(R~I=4Eoj8aN@TyMV4%c#F}yZGbWk(X0j=`xtn8~l_WRe$e} zdCR~d;jb8e^&gEl=}RB;`UD>J*MVhmJ|aY3=OZ+KlYY~?XthnDC%0QMN#$1e2R1ok zeK(u>>|!LP&+o8uYwvZ%wg&uu1A^RrDDvIr@v2sS69Pca+8lLn>V*bEdj!wky`G&z(a#s5^`tIu7 zK?6(F-nA7@m-<=dVNV~>d1@e`eAW(neWfQjUUDC7;2;zpocN&ih(`1em7C5wKQq4c zy5iCIV$vhQmtI4cy??FxXrG39Zht|~-HABqIi?>jr0>qBgYGB46iS$MqjI=bVC(g_ z74rQ^A>U%_@2?crXWm`Nx7hmoYlVCRg?x*xzn?1PyQYwDvGw;i3;EU;@-4RhzP!Mf zwS|0(y?eK|kdIqJSv#Q}-dAXcTC~GpA>XP(zMszB4`BOoCTDU{*w?uzm#^`GFM7Y* z`KJ5voqCP0X~c*sd>ps<8(yWh(I2YR_p{1N_8Qz>eXhv0j>>WG6f)%3a|x*X z4uUJLqh;x_zNUFL`Zbr^q`CTG&wgYdx`snxUM~~ zd(euXTjS6?M+KDGJ!!>j+&u}rTakVYKG4i-UPFC#7~#rT;6Zo;eiEMk8u+{5cfl{N z=l;>#bMrT((=lHazU(W?C6@rtWj*(Gz^Y$N01x@I*m|t$bqsK<&oq42Z{MZ(1Gm_` zFI79lc?a#kXuV1M3hM_AO;(R-@VAPn^)XM9T|zn;&+_SOJkffvzHhRAKN8_oj?P!C z-|q2}+i6s~iYVR7FU~JzKWUw@+szp3_sBMb|Ksifm zfYQn_JgYB9$iu#lRkiS7^=_v0K6rsx)iXXV*^eKljf9EMAF658R|w4}?|rP* z6-)_7y^f{SOusjFR!=kOT6AjSZG2$4g&XUZx&K^ZzUZ1p`BlK}qx``&u3Ks8x@Vy8 z=cGrx{<2R@t^r?)?=PjZE-#H$UVb_o_Ry*`mz&PIyzL9SMCBLv-}bo~lxV%L@r2jw zY;>m2yWD8r#Br1DqV`E&borwl!10c9xfSW@VW&&^)0bTSb-z)37}(ekXuuy0`G|Vx zyr14ZI1o+7^@`)_J=~9u{k#v+EvKO=>1#v7wzPg1^GMyhRgm?KPB9T0^Bd<;3)r# z&4*qs*zZRQ`My!G`&WZjmiKxg-@h#6`*a~+Z{+)8A>Su*`SgBB_TAVkoiX}eS$a$K zhnu|}23CL&IN9fwK8LUMdA!=S5%Qk#`Moah>8;V9cDfv-b??u5uc#9C_`r5wr_?!I z+U+14>p9V0hrC>kFOpO3-_w30yuM>r*%k1z(>@j9$K6j~a=ZhBE`I}8xgJyfbbe$Y zzWZ8Ae0^nL;%$atZY=QK>pt6Sv>+ch0aS7{F!7HU&9|eFZ!nin<5%}?FUQ7R{M0&U z&ow;Vg>cLpwEoL}N&V0Ty2zLB-nq>mvhvjLiJ#?APf<^tt-Z*PTdJVnvQk+C1>L&weOB<(KZBs^jN8_duQ=)erGTPu1yZ#QQn-R4sc} z?%W+7evH6yjcV2WCOuw`_5m~fd%X*0CFYUZ57Kv3igY$MM7=|QHwL|&a>?~6;f>GL zK`Wp2YTh`g^_RV&AYay36u4%k>V)dQff}lOfM61n!gWS$}(Hj(^PGi!|!z zmkRRwuMjVJrJUbh$ai_A|RX*ioD>L;P{x?S$74;gy5A_Fngz#W&Wy>g}7$ zM~P{ydnL;^n&->9mptD&$OXCz+S}VVFCW!&kdNZ|-FEU&NBwoqPU|A-kJL95xvBWx zQ)8XWYo3n%ZRnt!wCJ@}Z#aKufeel|0{0Cmk9U8`eFW|MGb}G#w|U;c?fd}O`x`r7 zcYZ=?wBX+v{e#K`PTvpLdQfAX_~-QJ`{4=T)j9|1`2&RM!<)zVJX+d`^-5am>vBPR zl>I~Jts8CXdy4p0IsMsuY1JzW_>$KPEN?I`Zv{EX*W)F_?|jI|zk>LD{eBJMX8l5b zS9v}1--Y40k;%(dAA$>l@d&N}}{xHoh4)O&A?|BGCF zzWy%f#rns4>jk=rXZg1F<5*s=ool`FaD!HV?>{VOLtek?K1ZAJc!T9TZ0*NL49?ejrG1RSAXpXNRP`-#k-(}r*WOv7xyAP9OI|#TiN@K zb-};52g7)-H;%7!deS(jf&0~lCyntJ-*NKrh2B4Peu(xG9p?{pI-ffXzT+Rxw|va< z>Al%>Ht=5Tc7e`e$xdwC?ex{{MbhTFfBL-Fv);AS%F+4u+Iz0Iu+Hb}-0j>=7M)Z+ z;XdVnl-IVgwiush&9iZndR}}W-wy&r=Vx@@bnXF%*S=>R-_vCMN-+`qJtr?@D$2hr{5#zjzj}H}9r@tzG~zp2hEMtG@ov1% z;ik`f`)a)Uti+q(6rx2y5IGuO}fh+h=61K zJ9F`@562O`AIUHw(~yhw6HagXZtuUUx9C^B(xcH|lkF^~ng8j?AM;r4!)16-82KiA z_2h@on4k1tU7(-z3SWBE`Jwg~eNFtn&z-&_<|$8Gah9Ka5d2)G->x)(a_0}4FFaEZ z&6ymfkw1OL>zTL1=AdWrJpzMkT=$aup|8`YT@j@Gcp_Z-iK_dbTx zPsRLn`~vTH+{BS-oG2`Ubl3PXZu^m$=57CuMd4ZJK^yUTXERe^7f>D z{2|n%ZdO~X9%a78+JpJlAiXSo8SOlEh0FKU1HrFRFHij@le>c4u$*fP?aua6yQ^K) ze)U*?RsYd@=+al|MbZ98ynN16Pk>&@4e=6xJ^7;XC7pG?rP(;<{32b~73e}#Mpsjg zKJWa!#LJruJ)r)s{g!&C_t{~9F@yA8-Fk_Ki9zt{a4}D$A1BAWKa_($t;?ii zPUqCKp3ZqM@iu~wI$t9`q`RYD+(?ESoAml?UuPJNWAlQrqw1Zp{_=$DQ?}=gxq6h( zV{5k2n-F8tL48SIThjZSo}wPrx!HPr=TPTTC7+T<-LsNhX6p?HNgc;y zKj*J;p&s@~s`BrR@^kBn@6Odz^k)4@^<(AW>dF6Y?IwPzeWsoZJ$SFfrBhzcIqSiD zyjq{5!eS6KXfLN8MI`X?J{^dcxIW zX1A1*S??#(7x8Y6`h(6R);rwb97eZhhD;BzqePg#`iJyny%UEG;p(sp2st_Ch9ARp zY3y@l?`}tU?4b82>63DY+t+ExVfyewIeR5H4_1GXoJl`UJ>YUsN__u#F?^z*{q&}S z{IVXxTaNFFH}QJ^&^dSM9nm9wEd2Eju3CaGyS-t~W{=hR=qQ}*c72C08_#4PtTC`8 z-Q@7BN6{W*3#cCI_o7qum2Gsw8qzgRpT<+QbNWco)9dBwoeasV@QMF=XA0jza=E+S z^Zi4ydoq3<@p?Dy0q2MK_8s^0uq(xXe81TEoykeT?ooSFPIjC@-i5E8e8%tBIn5#OE6eaiEVT@47i- zyFlN6;CJJJoOq4}KE0>Tbs(0jd{)W2Jm;l$ZS3Jkv=B%WFM{zdY%=(ea8-t|vF$Akvu&<&JwS&lxfY*F*jt>X+q5 z_}}w;qmMeA>Z^0qIv;&coLAC*iOOxPb9=mT_bHPBo;%3zV~{U&Q>(sd>1Ei%qzw1H zP0s3_t}mJI;rzJd%o~M#Y28lmovYc9e*>66wNP8d&_3?q_Y;fv_dAVdg)nB!rsdX8>OVzkL_;V!c31Us|4mRug z1;}(Zj>bCuxwiA7bY*<<@6xhiIXeqSJ;W-|>m)=045Nwbs6k>#E+) zS2|#0)dmkg>*2RNt{?JzBW}iK<2?050yK=&-7VQpXT+>sls?TgfQ(!&hXiP|F3UQ zZ;o33Z0@`Jdca#94@!1R*834u1@!oS!^{5o*2cMYCLrpMvRCvzLmB&Bbks}a^IIS9 zc$*jPGi#oWC#Gj}diQL6yZOuDTa0HK-!vZP@s6Hxd<%WqW_;VscFT=#5AyqPvxCM@ zy;qdUCzx=iaZBxxmkX!s4a1j>LwEZ)-H3Ayd3f?Copn7o8ZsDpIYa0 zK%L8(Mm$x-Lbz{Pee-ZUr??Q#zgxxfIU^c|4;7Hy1phSrsi^01gjogMyPR4V-|;~_ z<+5bfZL?m)V|2rxF+5RD(98OqV0;b_qJW1~wxhFseTe52;Dv|vFn%mjKI?&hE%N~0 zuVy=+hFz7}8>?JC7CYBOJR5+A`F%S z#hD)Q?*Rlqgkei`TWbd85jnf`1%PuJ)>P2cb z&RbnCW&5#DziH*`{-f5<>&ae+KN#u3m*P0rT;H8Ac&<~hMAAo!NRRm_{xtMjdFjmt zr|+?)TSzU&$n*VvTE2d?@YWzdS(WkOlBg%}Sv|*@8|#Zpcq%^!x1ws{v4cJyjIZ+j z|H*@%e=5$ij>WlioeRkJPd?)HE}!T7G}5CCvwo%IBE!FcpLZ(P~W>tZ*cyQ7x{j1dkF~OgeTo*!9;pLn>SzQ@|PYw?d|-LFPdMD@ANh6wIt|q zehIJMi~H`==XB_PiO#>Poa}wC-l$i+XS=xk`FcZM{wOzGK9(GETrHnZuCx7K4Y?2h zLUI`P)uht>T-pTW+4`6IVVvvB-r0)pPU`&M0(ppg-pzhRD+TRle@Taz$1s0M-^>j< zfbOZL@Aq@V<<5{t%XT5=4c@L}H*K&C(r3d+Cy36sl{;LHI8We-C*{+-{EV;V;xl|P zzat-sFTFX=HS1nTsOo&V5a$)r~Lul57W6P-51w8 z`_#|J08f2Ey6W*Bn(nQq@A38=c-H4K+#?>CsQcM>LJ9MgR!($5~Idp(uA z4BYlb7~go#`&D__k6S`{*|78bki&5wg(Xq{(dT@bzVc*A?N6Gqs6K$gXj_-tzKZWv}d<0Us^dnLJm<6#u?c-T`)cdXES^J>mFYBl8v1M}1 zNI1#6_?gM0CuiS%)qQvA9lb-%^?ssa^PwBh*E^if?omgOzTkRS-wT+ViTLN=uzp@j zUN(Pnwa0TEb~ApS3}5+p96mB#ZvMW%Qv>o&r@il&(7E1>ghWF*iPQx1CV<1TSkxMQSwEfd?}wF)r~Saj2Tm9q$8&--`QZ7|8@(PWiGx$U32*eoI$t_owRGJZS35W9 z@pG)Y=O;X(zufWjpyR^q-ns4xicjhDPG|ap+c)eVeJHV>{BK5ox;y0gDI;T1d9Eif z8a&IVOJDMSu5&9r4@P~@QXh>&<+!)VF!>=psruDc9%?DClsxP7M*ma&7wdoZCOsxz#i1$LqSGt_)ezoqo>ioC%mDE3_cQ_tb!H4Pa{hZl_*||&cMe78aUZAkR zX`MpH@^}0 zS{q@vH$D({dF+Q$&lA3s`~p`E%*z{ClB%=fdh0<8rydms+O~zM{OSpVpI~ zcDZ4vfJ=`>eM3Hbs1O(*@_sft#n*Ds$*}38F+-E_p?2v{wv<~+9fxayJ(1hSYz=sP(bze#Ucd=QVb|X7rXje19t4W@wUhd(aW* zM0IbfDJM4?Aotnw`=krsIq>%5IT|)qW>-mXfd3m1FT1+2&gE@gxd!q`4zv3)iE_esA&HL~gZOsShkWpzN`3Dl zeGrjwdKZrQ4G-{r5pq;tii~jS5vRZQ@Z}cPz1_J-EtuT3-`lmEe8bxTyQ{{hu#Zd0 zpIiK0`~5swt-}T&dzXa2YUla9iuJ;9f&X7Nc!rHXo&O~ZbL6WniTh_rCxxZtk1bvE zD78PwpKX>Ndg(=%lj8d)`EyUFP;chD&hmNt4dXZI&^XU=hv5=l(}kB*9_C-zQIHSP zBl*~KzOU%$U6%6sv0elI(#g)&ek9kUSie&8*M?W=S})MNcN#z}UwAd|OINb>QGSeD z&3T{~m_Aaqbj>@`BQB@x52U*u_ewPHD)L47^5Z*IM?LwRw~wpUY~7~k*`Vhv<%<4n zegr(GUt=5)ziXGU){tZSi+ZX(V_j|P64xJEA0EHN+fV0%b#6U7{}}6K=_5{OeaXjK z@gHPwpglkip}fakpREBIOwsvNtxrmi<@a0u5*5+AMdgkS)^2(~P~%zpjN?spZaL-| z(o?c)#fN%_+l%BI>B#so>-JjDWz!@#xhoq-oo`ydNDp~^sVBApu6EEBcShHxUT@8J zxc~SgUk;%KJoU*SeA36BwUE9E#3BvFa6IZ?_80Sm2=^~rW%erdZ606uK|;f>z0m!I z_VPu^9PkwLQNIl$i}a=5L*seg>h<2|iIM&JVZ_k~n)$ope5%INsa4*O$73I$$u}p6 z@9lu(Y`vii0k&8v@pl3 z*=n1d=Zq(2=HG4c#O8QWJn~1m48ZB0hsHVWdkiBRPbgdqpZMo)F!1dAL6uuPJh#XF z?yzIHkG}y?Q~e%KezD#4T_xE)8kdUgm)}=-6ZzB+7VnR)_)5vgt-Xr)(ubXnv*9Us z?6>?HXVQx!--X^^)3WzV-H#zXY>y27l7PF^^DVajNqHfECyTEMOcInkf&(o*zO7D}Uaeq_wO2?21PU}ZHf3JE{uIQ#Nb$(A# z!N4_K-DGquSkH2{;a#QRkNW9B%bwj6%J8r9b)~5b!d{K}Q9ADX9qI0PA8?PqAnljG)@?V{He~l#QICN?l9@;l!MFhQ~Q3DBen?3Wz8fP z_2lhVQPGpWD(SX$ z(pYycFMR`H>^tS^O_r%Vt+VCR*&yO0<+~I4wa?s$^@}~YuTQ$UzCg`g?%0CA^ncO5 zn?QSo_D{)1dxzt>zmlEeC)_V2Z!`GKCO_qlYm_mzM{Uk6v`1I z1xm@Imag{FJY3^?_bry3Oh$buryP<_hJPINo&X>AUiFg(sB=BF9rszd=Og3_<>o>2 z8}aU65r1Rnt46QvugZH39P1iySiI(~vKuwt)f2B@a~$_IVvQN2$geDx(Yq=H_+u|Cqn)XOsrKkt0h_tR>_c-Gs~FSB&fJAp!24*LNug1nxr zBCPf}(2oEd`AH2){!tHc9JP7fF4oTDY12s#sjuj?PA7hA|0tVxMLTBt3-ppMmcx~d zY(9v1t3TrJN4)Tf-;%fV=$j^olsm%LZM6>YA3*?4_EDzCuJ(BDKjKBjOiz$MRm4n( z-{1MO=snAAydd6E@{*O)y&VBKt-or0Pv3D#51(*2HUgZ!+t{<5qX_YoEuH*gPb?)L z&cgRvxMAU}A70N~82>JJpLAM34}VYR$Lw56HqJlnaM~~6`p4$Ny#EeVi29#;GP4_E zUAm0>&2+WR-T^dD_dMe5uKtsU6w=mm^$k&&CD5-ku(I zU-r1_tM5on1|3EIi2gmDf6|^$At&V1t^%J1a(vP{g!+^4N-vV1`E&j{FOh~Fk@iBM zS-*>_=F3iCz1JXSaXT;U^W+njkLN>ZH;lP@P4=80_5#qepO()bFu3yhTijm2I&zD> z064c7C`E+7#_fgkHyRz~^EbG?5c3z-YX}Ik_R7US4pH&*DzJkN89KmG529TI?k0#0 zB}aT?|6+Kkmr$+oNNO8TiEIZ?jw+t^IwhKa>13ab^m%w1ZHcNRLmNGO3`9`=i*yYG zkJ$;LFXVa{g_)gzd@O1hzS#+puKD^j;z_T*|H^!3C%`v50lwJ@@V5f)7<{u6KIL>t zFN&YCBdJ$iU!VXuhN&;8N7$2eZtwAb5e2e;Ud}KWrj<5b=d#3LJ zWaDehYnu7jTRWFa7a5*xd}U=(uG(kO@wGrteM!IP8)JSSFV?_cs4u*~EIK}dUu>U} zeND6}fB((LN3<6m`NkI1cWxGpk5O;Q0pZyWY^Rp-5%Cadi?{Kyz<=qNB7f@k%00?= zb&}{i@seNJ1!o^0e-6DTKR&+V<5uizOHUtnHJ^P)jLpygv>mvfdi#cL_yu|f&9gMF zo7GA`Xc4+Uk)4MOead+y3*={v*{=vpfBgfN&F-Ddz%fK$_rJ7G^K!_M)*rcDHvg?} zeT(q$zSaF0fBE|%AZLDL(!(Q8F4oa}{zyJiuhdF!GXQ-*x%>SNpGLoadBVdQ?-rDI z(9@5hK33kP9v->LeVvQT_BVB2;dz($fm_XR&E7%P`Zm}7?<|aKR{~`={-ZoP%@2ES zz1+it<|X=Gk-lq8y+bE^tox%Lp8TNudPj@rRoFf~50ou|;{+X-Y1vpRuQucFvFhGL zg@YJe4d?afBp2o6mrTCgK8M@`pW(UMz>@Ag?oZa;*Lhj$U!&tuCqU;nbY8qR>gP># z4p{G~WcXv9p%LrTjgSM$H~C09#80iOG~yk7?o-X=_^)>#Xm_z+>s)Yl4rkWsr@ed% zf}ryhvePtA%dZE%WNkaOkwZT#5X{fFqstj;~t( zAGp=mor>k?{K&{-UT)nD1MD~ZKrnJJ;^Un|(r@G84X)P*HhsbMORvLa^k3-Vg>-v6 zHu>Q5d!5f~Y$SG!3$Z@ai2GmCkEP@pOQ)RP@Mk_IGD7#VbgqE!Sg}dV9c$f=ig8Ky zS-s;?x4U3x(P^KRa)N)!z{Z0XKd{jmqj9f0+GTRul9LHA+1e$1j-Sl0a`tpRXy+M9 z$z7JASWX_E00SFo_0xaQe2sIOZ%jH_c0M>S59O})w(ec-+M0YFIeET|7L3N}d^x1A zziW@zzd$d?#S@UHy_fm|X(<^6wAY9JV)PGe{Bb6O?lJSvo$g9<3OUMMeaV;M&v~`Y zj-Z5Vd?!D-ZeDww)Hmlt#d_`O*lGOE(`oi~wDZ8m8!i99#ve7m*sjD!_(j_ncu2vX zu7PM*U#Fwo>_H0~>o5Er;-D z=ll0sIqIi6uUgc*we5A&OQqyK^Vtt*{1^FLha&p3Hu24V@C%k-^Q@^yT(M2XIj%;W zJEeZYzoZd*OZKDeEZXVIw}Xgqw6i*MNibl#;B<58{uqlTfs^r-Ed zsGn7CbHrRfh6pFMM-Ezq*5!r~PrRIG(rH}kxx^bt`+}8GOU}|om*!X9T%>@LolTsq zx8C{G_Z!MfoKHGG$aNR1Pt>EE(|tJ6Q?gM7?SGfYPk=1bZ{f2&Sk9jFv8D^YFifZO zquo(24qt@pLVW=?UtfXINZqn?u#FK4_N!yS$@>d^24|McSL?4 zF0ys{l!68K?U%pO^Iu_J@>a{|{I80vx3BbbqIyqo0`(^UWsgZtw9isG)am8&C9_Y ziGmvmdZ+Q*ysB-Hsg!w z3D8fU`j`3koc9;5CxT9W|79}X4Ja?U)DTQw zj=zog?o@t0_A=(3)VGuZgzUF+h9KX2Ui(u z)mZ1d#QD`7tfmYD{ruzyeGj5IKJ3AHm^q|tAHUXlvmxlo-Y>3OxN*S_D^Kq~=sR(Z zRf8EF1T4O%+qmF1r}yq%=GXczH+;3#-muN@4ZB@SIr&(gfAL)~>LG|KWLT(>{NciSTTU>E)F@x$x2kRNXt9a?9}^U?XSSiN6ez^DDjr&kul&mcbk zeY9t>;6b@Z^t|ji$+OmT7s~N6lVj>Jx;Y999PxY#c!aOK1QBqxt6abI^l4t>zT`YF z*HrD4>wo!`rv54=&wD)_T_XJ((~V>Tc}%~Ce;ip)fR4R!{;b)~qmE}a0O1Dudd;8P zV4jUZe}jCodx?jVQrqD2!Z2N3c%$C={Dd4BbU@9g^ezYW;mUVYOSmJS?n8m%SH}Mv zJzevb?kK0S+vBN+SicPZ8jqJf%W)FP&_fRIje z*8H+(b)Nxd>o=kQbgs0x@4lzA=5(xZJi{0#wz9l&FOTz$3d6yFwj((=RK*{-e(?G8 z@Jse;F6;~BXZqxF{D2$Af8rbRV78ye^%kxJ$le;-j{qF!1E=BFCce+Y8NLfa2psJy z!?(hMcAt_zNvECryUOE**WZu75cK&xhW+_BzeEb%aUA*q{M0!Nw(~Xl{uXwL&XX*5 zk8{tG*Ue6l9P1nc@m+~_&g5q_$^{+d`1l1Mnr95@n1NsGblIc6Cd+J`MAxi!x_~jB zRke6vsKvO%ctSLGx}H?~Hp_*w7Q7SWsR&)+@0c{(evbsTYd)_K^4dh=A4Z~v#l zcr?1+u=qT@>&>QqKK->k{A&x~KLq~g?XK4G=+y=Aqku0ZKkD&*JPYq8`MrVceDM3? zJ*OqU-ZFtk0gJP9qdM;*yFb0n#5#LN-{ch~Od}sPFZBgoW2f^$_iSa)*0kQT%i(0_ z_pI=F%;XxDj`D*Q-7z1X>~(nBzm%t*xVKu0d!MykKCh`g`Yr>M9xL;pE}ZD%IwaUK zpP%25L6bWS%b$+Pa;rCRf z#}u!7e1hk^la}3a(+AW`-q$Pd0WvtQM^xdT41e!4U$S;ShB%hjbA{8T^%|{1OzuV^ zoW={zD{VXrfA2-)F4AM&p@dxuhHvlp{=Asp;dry7Fy6d)x~VTXZ#w`yoVQU1MzPt;D0Xnj=Aoe>t234_y=8f$lv{1Ge67Wh{|^%H|Fr_9SN5AvtMn(=gV7PD6eASh2{P5 z>1KYGw>HZA>0CU=iAnUctly}eL09GCznP~TRFQ8weBwcIcK)$~0hRS-xOM~odYHc6 z3!pszVF`HPR}s&3Ec$HMKg4g^RhD1pM-isex+10d*LNU4{3`xWhflkUwc~n&ks+-Ke4fWep;M0ZF1-X3w9+&bBqr6jS#|7m%nYa)3tMjK`?9BQB;rTt3 z#o<3Q|NXCaT6yGe7yc~&)>r4LhpGrqhu`1v>U{UN9F9em5?}Y$dEEZazewi6!@1xW z+n@DMpFB)(yyCple60g@hd!fyN;1GquvSZ4*RyY0~LWQCBI|-*nQ?D<2Sl5d>MYo3Ck-l=`dgGGwc^tWU2N2 zbIZsu-M9~TNk8nStq7NrCnDYjop7Uwsr5Z(@y&eHzTQ7(;MZ0^!XWU`ibXu~9fn`) zo55cX(;wf04h_eCbpn2E^-nU0c<x2*Pf|!vE#K}e z%pc=hHh382ov?hP79QW?4QlnXe78Fs%BLIO!qL;qqX5-bzu&^+Ti)sMtRLh1TmXrO zZhXt2`Q*!><-5hg<6GYC@vNWa8_L4`F}`Johmmi_^8JW~$G7~b$0Ofi%NKB2d2YB^ z`&m9OXMD@8S-w-2Z(A1TkMS+Hc^Ks-z*Sp4VBzsC|HR`FS7m(Pby=7{#e<9!9<6B(530Gx&-}gHl!gS+XX3V$tvwUv9j&J#h#{+KE z@?DyR`D1*`$2^R*8O!Grv+*teEQ332`J%sN*6l|T0hWw zKtJNw0FL84G0!>qN%tTEaAVM?bi)4_>4Q@_dLQb*O&+iF7v;Ej-QVHo$2A_ams>er z-K2Y&=NtFt&CYLV9U^;=@42w^k(u?I8}@wE%V0$^aOuSsp>xI)z{_&9|Jv=d)`%=~>5-`NO4u5seRc=Hp6fyL{iD%cuP#od?i*RDM1^8g!6i%JpIRv_t!0kfmb` zd3jgDudVQYa3;D>y%|y7y-J6}RhdrDg-DTna&Kc|PW7m58`shbv zPkC7HR%*X~{5>Ae={Zr0PwX!YPvcLO@emdEt8&=WdN(wU@2ZH;>5EQR`8*(m(>}lE z)%~3xa(x6o7$0X1%pYwubN-~yJHJYa+t0LT zt(-d?pXgBg>-@Xm8moe@2b**O1>~UW{NCg7XO)lHW`3(z^mFMK>FJ*Ss|`@=0Mskw zyVgfD`3Sq2^DU;Amxg?VJU5>3dP^^8+(}>Z`bf?tNBte|bUATzFIgY_84G#ne#YtV z?sI>9llvT3SU%|_U#OoMo`$?s8HW6uoW#2ZnVh^7^c@5coZ73r#N|YCq<4V&JJ!0K zpb%@nBc6})Y5ZK_;qmBq>8!U;dc^y4cdw_XM?zj=JxKjoa;5Lt7u%Eih_2@*5CB)w z{fPn{>5Ja)bnhas@3>_sd2L*B*znAanwPC(_1xs^Q0jMMQ=TrqmY2jkV9^fjm*|Fg zk0$E1(a*6ruJf?g;dGw!OnUYHKWpk$-AmEAA?d?934;@VO35}0>%3tZ??==S!*?XO z?oGN$pT7s|@<0x1oGN!+ZE{0=7b3nE4{D0vT3;JRJ(;h*#2c7$#jx;e{#owgXovnX zYo5s^mbgP*bZOkv;J8smSp7u%(Hb`>H^f+5eaiF*uz;?9yo<|n z*C3tsq#monpAKL8uE;;Ve#k*^=KNwGv6yJnHBwS{<^m{ms z{yO&F4HlvH)qBl#ya&i~(p#fFZ}xm_uRlP8s$8uvvK{D{{s#6}RG#>za~PzHd?_Vv zKXyZZ(#;(<&&J;ww6FFTr*<8%1ogAxJvgl|a9kt_{iXBmJgxLD_2LZTN#8;EwTr(8 ze_h^Y;M2Z3v5i{Y9qIoCd{31=$e5oS^mq>2Jf};!-2lJV;p@l@GfnH-D)%um4^H0$ z&G!eZUyMJZuc7azz2N!s`huuP=LhreprAf%Z_@dH;C$lWXMTU|E%N(X)cfy}-!pCa zJ?7$UdfDZ>Yz_HV<(u=uDsoEBxqfHiTMeDre*pQe$oc!viSK{x`Yw}me}DV$Le6jg z?}YDoQtBM^OOx*|?|*lEzq}3KQy1@?y!(LiR^>e}=O)*fr%=yIzN8L{pa0{x$!C}6zdJs^mgBSD z!y3DaqdMenwRyJQhM^Vr%-lAh9a%-#&zo@kiFgdoRFJY?DR*B)SnGt0r=$G(xIpXN z;myzDSsH8%$RXVVd#GyBNoEfLuh}Qj4~LPDbjTi?23k3J7rD338efLwHdUzEd7!7nA-@wc74ty?6XyR?u)B7s{}4wr{r zE;)?(!CRL*H=MM8t#c&9DBI)@bZo%y+IjK*IKnP}{{HxPPUqR=?d;>thP=E*e_XsC zG2WmatfSr$Q2+efl)E)=N$wWYgIEZF%icZC$=^*@zZ)!FdT`-*##M@@d@f86d3>vS z(Tlb752gptJ6Amz@_1IeA>=XBhar#OF`eJJJpMk;(|^bIPp>sg^0z6EpUtlupwZS) zF2GNIAE8#R8bKTf=%g>MML%M?*2mUB4O)1Y!&OGz*S^p&((V0f_`8W`!qeAudAPDa z;E%b_^{wCKdc;j0=6(c!a@9Gtp;1Ch(W=?Cbo(HP2PI1}Xg=7bN5G$rsuawKB0&kN7?-;mj_0!J-qc zPaFL=L>S=Ir&FP)blyt*G&=%xUW?Qz3{uGUdsuE^+C|X zeZKtqhtadm)9-RGxrfN$WWVUVo66yRu@?M6|5@?Z9q&CcJ}pLv#_!{xgLSP0ecAfW z3yw$g0*>eT?~_O_+3$WbC&#icb>Cg~0O5{;FQ0U@*}JS`7e+a*hHRg>iYU#KPJ#~% z3(uaCpNo-xJqdnqMf%G@zv@9gG9Slh`YivMGn6mIbD_{BHU@dN}(>AH8B!Nq!Jy)(w6F)t$fuE^?@7QynD{dRP^ zC?71#$}icePslC!gnwAS^Y!2CUGz5Z6My})i{s1QE5G|RdKKzRq;OO5o_V|T()O^ z-P6(gcX}tf*583##ODL!H0WTA_QSP5KY=)igxOiEbM2}7bYr7Pgd?2M4Zn*2)8Xs= zg4+kZJiTAQ^Un~w`PzA3Fg-r^05ZUp;`>b8pJEQZBhlZv#qjD}&ICl!=mLN8bWy(Y zVcqK>pMM7#SZ)P)$9Azar?U*djC({?__9mMZ#wl?o)00tV^b&)j{Nd`9;O{jdb)R8 zax!*<`)>bL65W?O1|Z@QpUX=wF~CaLQ#?=qQl8I_KHJ|d+8wM9o$%|sr2U<5OMV+= zW&5yqIGoJ2_b>t2DnO>gudTojIM$o(P#gE}n{|&ecHwxS`8MU6eCGK2N}-=W z@}=B7s@L%lo%@KB~Yw9lqA9C6{_1a(#?P`ri0@ zH*2zXpGN(OXFUZRPT!yHe%!;J*he;)Toevxa967>yUelf4s_zu({UgB(|1>gH zk%4kT4%Jq8gJt9Ny^f9J(Z5Gq&*49v@UUFwGdtN*_$OCx)rfog(%ZT(uXCts z2hu}Kx_>$(dEV$$FrD>p7f;pFE}m6y7mq45<3nF%<==`#rpG)>_E}cG;&XIW2`_rL zF*o`v=ry_opNluTBA$Had&bN|{}^~ny{hqI8kGh}G7|F`jeF9EOt<>1CI;~DdiNP$ z#m}LLr`+&8WTw(rza3e_7#{}!l8p3OFzLS5{bQ(v(X-vd8ejEpM`e@8OAj%f^;Ukp z4>Phh@*~m83A(y>ME*x2UHRi2FZC(kXJ#t>k%%7&`Z&(y-!oZ%ljB(*c=>J+vot<1 zXnL)2x1VpTmz*H2|D-SYxkRncQooY#LmEGKIbOz-{&w-Kce{9y&^F%MtquNa4*nR@ zjsLmwO&*vTuA=;-7Q}ZJNEBaY(_eU+O&@Azx+ z&H&TtYVY$Ov_t87_IOzH-eCk8qw?w<|G@y#S(nR7@LTD8Kgr-DpX#N1pDI`R(pjfV z^(!U6V))e#MLv>$BcP|0IR8{Hy>Co=w~BPxM_OmxI)+F%ohMws-NTiu-JgJ5SDEf; zv-5(wuQ>O9hua$Vgw7u>z=KXeJkr;~t9y-__iP<^e0mRJ{caCeZix72-B&$^+#JvD zJr7AvZgDutMJf524#qStuxVGeZcsVsa7BKy-K$8dU1Zt7*AEz=*2~$hgNUho)S~Sj zyaV`|UtgW!`1W2;*LRI*SNsNKArH47a5@()`$OYQKHtAVzI1lK>7De`Gv;rV-8Spx zbb;Jmz$dvAKFP7(TNhtQ57!m*^o)}3u(QsT?-ArnXGfi$7j83O=MiJq_u2anh^OOx)6-oq>-}xU(`lZYzU1jWai4N-zo#pIc0V}sr7zuS<##_AaG|g3 z`raMFbm=k2Te-=@jfXs}@va`<|I&K5?n9){N4~HV8aq9l9`$<4o=FcUMn{H+g9BV+ zo%eU07uUT3y#v@-=lEnVsedcIs7KPH4&NR2S|$3a?9)WUg_+UhA->i(eAZg zSCGO}N+;ea`uI5dHTfd_S9^vb57SSNzT)`9 z{>=0t?s?NOKb^kUrSY%Q@A1;JTyJC!tqbYh0IkC_ov0L_zW9cfTiFnJ`rMy}{sm|< zbk)Lmvq#&+gB5MztGRfspXKcb(#!cwem*#|9g%Pqm9y5voEPQibF$OfA6T!EfbR}? z$t}}aAH{QC*fPHoe#E!Tt0G?WDXr7hOZ&aObNn*CuXZ|zG(U`bke{TlUA*zR4g9i2 z;Em61;Hx?K#pbW<-Yt9vU3A%ec87--miLPz zhun|xQsYPMmgpa3B3w_bi|bvgVU~mVdi0}GveV%7T@Jlpqx)X6v!oaF4wm#9?Ihx> z`EZ%7>+0Qz^jP%!2dx;pZwB>Y@Aq52$AaFlFDh3!TrJ)yVmWk`8^e$BI-QMiF4k?N zcQyXcX&j32uapcJ0_s_oD7~Y473VjyLu6;vZVfs^K6-BOaB;p!=Cb+HYCr^32-U-t> ztL$FUo6#Bdu3R7a!oJYDq4=fupVJq;-L;OItuIIYGyKmM>igmjlj~ZnCuZ#*dR=;6 zc9Z6R>5Cx`L06+ntpGdeNvF55%LnqtD78p)KlZpM;~xLG{nEUO;{qv@T`&~y6d=UJ|tq#X|qUo>q zd06{Bnin&jC>781NiE~VZqHYIhkN~f9xuC){Gv)LclKF=)-R@E#~7d2I=t5Z)qk08 zd_Lsq`FPUR4&M0O2HyDGCcc`(a}1fOpYr7xUqJ_5rI3Fl<_*--#>d+nPVzqYNe@q> z9gL3wKXRAHuTLW#_&4Kc2^zRi6d6>TDCFFY*;Yzf3 z5%0cTga&>V4!1ApuRQGGfvddZH+soc^d~Q-d7c6E(P{lu`w7@95f(YTakse-r6^2k2~E@TS#4>&YY0j=G<${eQ_}#-B^P zUitadDbPc_e3vhsJ!N>)BQCeP=i7*U!bAA~9lTIj@2{vIYd)cOn;!!#9rZEY-uV8C z>?QUquqd;OWFOK_qSLt{t(T~uJm$&C-qZoJ_-OyEoJSl`c9*`-l;OYM)4R*=KNfl= z!yD;EzUALFz8Cy^IS3LTvwPy+eY1UK)m!Ha#GkCZtDFw4`#z@nsGgY}>hr}$$hY)l z)-G2Bo|WOxxG%nG-EQwh@GsIcK56|;?J(~7)eae5${*>nOQ(^KS+%Y;6#UitHq$8@ z?c&*k6wiK;pC{$>b7T-)J{}^P#dAK}Vuys?&ho1{xMQKG^LR+V_FJ_6uXXj2_aG5& z8u?g{ksBwx3<_ZVK+0rWgZssv_m*V)|a^ApNoE!*)P$*hMxBF z^Y)GLw@tk9w@tk9vrWA5vrT*zMXKCvJ>g2HL-f%O%(tKHmP!F%#!nD!{Jh@rbO%1Y zi<7M%MErvFgNR>W;2+1(RcFXO>ina4B(tR6TXBmGplGfO8X`ub%vo{SX}G z^DA4v`7K6X|3e=gB>Z0@Zcp-y3*dh1UqAFYM*cwGM^6y$BJ`b1Pj`8}wcg$;XNP|w zPtW&It{vRucolw+U-0#L;?C1`^M=}I8MpjBk6C%Rl9`OnI^5o_-(*q?{I?$dfjx}7 zsr;8)N#9M7F`l1-XXkYu{B#U<2-Tx)gB;Se_UB|!<6Zh`=Re_36QA%PFCF7Q&VAzG zQ(kVrMEEg>%k%YTS&x8k?RTfGo-Z<8@)YeN`PO`h<4+Z+%bhn^0h0SY(4(9OX&+AK z8$^fTIA7|7-K=`<&Gz#-x*~s0-YvrZj4ok)UnSo}U+}XFalZkaTsNu$mi*j|`>)Dd zyW~pyc_zG^*9fo9E1pFA5#OHkJCgnQ&N1}>{Vw=VgNE#$+L)Kr%Fj~=vh~om^*MEb z_4!}gl$)1_jvSx#P7`$)=`#I?_S)O)>kIAc=jA+=D<_Y?6MT|=s`?Ty+i~wx_`z~e zUu)Ok_XKd69puRd!}Fk`Pjs+;59i9&dP6pDk9pQ2-iT=vWal4;kq)-#zIsz$yQ?z8`q|LjNaf?w(L_+f-u?tu*+ z-UEHk@*neHHgC`P0z6HJddIoo*78qpudVR0o%x85=`HnN5%q5SC-z_byF3{h^bqwh^QqmmPO@*crymdcvvnAR={O&%!k-Rb@;iqxo%l4==V9@? ze~HQ6@+O}^IGo125^fnYOuE3gLGhRJRz-Zs`3!!V(|msj`iD;P#`ZuK=zCv(6CcZX zACpe)N%{$=_HMUt&nmZzC*|$pX(F_XCyDLitGW1ND2(kvxyaAM)gPu&DCyNcvi8{) zteXVibsm87!+MNdk3Vp_hoki^rn5eZC!br^Jp!NU5iEQV2iHH=M?Cq+b&q`;nd0(6 zc@N|of)$JT4*o5hdr{x;q&W6 zIYg!X{L``dgB6LSJroh^nFvVYoG}W zcJE+z@1eCFmS_7!Lt4)~==883m+_PPI`8=N^X}I=9oxT(c=dCgYvp})eQrEu z{9_~vZUTN4KIM#l{yVT0gp28t!}TFA)KAR!82-~u;C~hW6~CSVIED=${Az^h&w)<5 z^{gD6=+^plu|Deqo_du1qNq=qpXGGHr<@e?>)wIr*E*=qbBmAa52}~+Y>`h3>br}z zXx3ljR~Pt;=}XHwFRsfSM?L7+u5`2q*W!IT#&;mxD({u-eb(c^$MmdUAxy`0$fm1i z?^9&uKpv?t(!(yVdcWd0;@J; zQcC>&S>20}T%1JtRbUXm_rBNbmA>F~@4La!CkLFIOfRp0*yD8`j`&DX?NO&o{HES- z*1J1fXXidA`8{;0odqHVm{DQ)mjCAPy;&BY%Jc_{7|NUu^)lHQTfBNY+{$M$U5-|4sLME!?) ziJ&6^U%ao&bk;}l3+$A@x4=$`c(pI}>t^6#d(tAU1f5-or(PL`zZJgDIp|$_^gnMW z&@qVgvd;su``V<>+9~XyOuj?jRKIrd)=s@Z3YWK=tex7#TRXLhuOh$j9|JM$x8z5Q zJ$W%pfqqA!07*txTQGaCW`R8!^viza_yUy4$TrWHtt0e$nCaF|k&p72Zx{I1F0nt6AD6JqGcWnWyT0EdC6qVmEeoSv6^4I6_U6cYnF{z8eS-Pp zI5B{Z;E&Ej74g<~J3%_9Dm_s?kFA9KedcA~FW<9dskaNFMGqnN_WF7S^P{RsPp|6} zy%Sk1ckc>v+2z>b`IqB~`=Vn4aaH_38U82X(oWso7aYdK!){(I|qjQL5gfh9vOw%L4| z_8cWjawB_0_dT?4xBy?D5c~<&p7-pLIJV)<@8fx%%Yu7m}uaNO{?cu+|eee_}fATdqH+hut1& z+PRk^6|&M&KiIj^&ZT6Br*mGJwNK8zjfM}l8!UR}fm+E263M~HGoDn-&W%$Vi{--Z zE7*J4ds(2L`wUI`|74zaI{8Y{_f9zlt@C==qap8^J-Wt<1^=q{qg5_*c4t=qwGQXm z;&0yG>~E6}J8ns@$Fp-c-OR`p9?&{~>{{9Hl_^i}UvinXqs>de_hHnP?W%sD`}q6r zbNJ#s@qp|MZ-(SR@ImJjbna36O!~fd(JroS^np*~XY>2N=tyv+i~R4t*Miyj(uI06 zUiqXSdb0NSj@K$T;9JShLAJI_Q~$g?=H+O;R^MxsT$D@xU9R#H|9-B{y>j1!u2s&L zC$*csfofw{TY4!uXgY}caS;E@=h=F0c7BrMkkxCufoJ23#;f8um5+zIZ3_<-)i$0Y zsPQY0r<#k`xU<0C-T)xDY5XU>nZ1qvLPvOxH~dY1F5m@6JwndN{?Yd`w2sSk)<^M_ zn-)7R@G-u{j?2-t8Tr{RW@ktHK%Cs3gHL(i$3ozAp1+j*n70q`4kEtX8ST?YDAbqb z!Iyog^EY|@$a=MlxAuvClZUtVX%oME5j@pgd}e2Zt`@`tU}_ulo$TPo3=_Mf*(F9naLQO?S!#3~yhd0GOalgaM zZc(}HM@GlBp04u=I&ZG?kKL)KPyUSiQ-|EwxU=`B*e5}`)-IbU_04gJ>%Xi=SJ2gc zy(MSw@94f4+kt#bS9^M6yZam`SRcxl`2Zr_dpf>3Pxy1??{Sn*w?KY!@k|$gx(69T zc#Zpd=WK!eAb*Se+#K?==_@YpW08NG`S{lAkYpoBhV$aaRA} z{u$f+!y8?Z7UgTJ%Ts8_~saluq`# z`e|d>uZ@ZV1~=a#6b`^hwA=rl+F) z(+53cdYAjRKkk5h@6FmD%r!sSuXe*b&ukFwsk%MzNo&Tr?YkbkkeNCdHIa)AL%`Xm0%b7?QZ!@ZS-mT zsrzz*tBw8(kMDCx-5-@*l7)Lco%2xYO@?g#9Q2Ls_H^0H(+DzYm~fD3t_gOz{MY@d%DonC=fGR+;eL-Fx!--Q&yEDV)|s{c$aL06@tnW3*u#O3@h$o? z;#m*cTgQ=Z_Hfjb<2pra82(oH`)T$c(@+tQ+N+&5K-k)*u|kgc%F{+p4;W{ zL!<8NTq@I1bkZ)~+Nlk^wNsmTYo|8x)uf647=Bwjq1+a`80|xswbO)$^=>}T>$kKM z^3#nx;6CM>^%&XjVZF;L{mFD|r^v_t*wRk9_?C8xc=E~G3Hhv@M)3oVfGQ&G`pLvPSf0dFSv9_9Bh`y-sPf?Uopwz?uldYJxn%&`1c~%^?cxoVu6naSlTNL- z42Aute#><0Z*AhOzqN_C{?;bm`dgd$D#};6$Aaz!dXbso*e^)0=KmE2;MhM}`djcR z+m{2J^|yfUj&z+npuAasi+J*{rN8CkTl!nXlTYk#)PtNKR6;KfBTV_o^x=&Nz_qq3 z){SC5yQfq8`pCEd4(pu(r*)Q6vfkim$FgdBd-<305%aFq7PNT}<$?1ulh-{SuYGme zY0O!V{b|{4m0pJ%Mk4baxWvP{_s?;FF5hoWZvY?5W15Zg!6)sLOd#JTq-i|VIxzb? zRcrs!71kaaKly!cphUec#9!zI&_jB)Z>Rbe^+C_ogknBwL3|Hlg?WYZ8E+QOF(xyz zLwf!{>fQt%$}f5tpM9}YC`+bN$iDB{g^)sN5yK3|GNYNXwGpC4NN6ug_KFf^ZB-&# zNJ%@DvQ=7b|NA^MWBJy1{r%q0|9z)Vo;i2F=bm%!x#yhw4EJ6*@BG1&KfH1A_Nfd` zvlDhiS)EaM?&4UEfa0|$K5=e%4Ey(Zc1L#%N00X6jt`6AytjG!d=N2bwKyF?`rj20 z$THEd4JRF(bZ)d@695lpaMA(vP!4vZX{n^kNVg3Q!B1@@JyaoZf0K#Zs z`WvYfZZ=Fnae^bqpLxBxShyLJ6W%jtSAEq&htsJ)XE!{zf3$A#+VxWqYecf2Xu~NC zEDww3uZ4U1^<{rp%ClLPp4DB!cRI?YMx5d<2`Y@${7`(9y?cZ^M{buE!AvTd5;fTl z1((+*Ia_7j^0`S3j;0PDqdhwM35T;stzSQRyo*EOfBiR@=EEd2qttyFOsYDI>8s8P zrukCUL#QFX0ZcU}3!$CL+@mAv9aYmHS~re(((0skr|H{~+?NQ0mr%r>{g3_JCtve66L@454H zqOn*sy8jC5hAb@kvbh9pKS`3TFh4(P7k6pBIq#MVER=LVNd|lXmmD} zNhb%BsGM@31{KVT6h;Tj=Nmxw38s>K85Am+WKCm*29u*mw2;tXD%3K}IwU3)Or8#E zkm;N@KvimFC_n&}5*(!-zzCtLGsEbBlhh*^%pi5p2HTIp3{hn>7{RPRf?#z(X%<_Z zx7aZMa#owo1ZaoW8I2Vw z#Am)yHiJp_r;=EqKw!C0nIRAjG3eG*It>Ud zI-AChBGDOak{?8sN~DMYK#>r2g9($oEhrQw;CG8~AON8CpmAgy$q&c_Iyr=DNe(8{ zeW@hSXb73@8{o?T*oNgJSR59WP3CYSAbCiPP$>Zn7JKUYGRbr>G%B4!;&2GBd`K7@ zbQd*Ik3t3Y0G~|bATo&^KdckZ4m{yTSiB$R@6T>9u zvzh9lWHv;)fFP`Bp#fkJ)JQgo%%rgcLa1z-FLyjs^1#pmb3wF6r2u?_8AAe;6v|`( zc>s_GH^2g9O(l`J+XR#uP6OMDYiKZy{ntgI^_$ouz)CU!Iwy82J2EpILLcZU0*o+# z8cZRv89`J!3p~Ke(P7L|Xe?-fDHcE;aTl30x+yJzCj7vnk|M~giP3Sk2B;-}Dqu8d z25`N}bPCm%85Iif15MNq%m~mNV0t!KdzK%S$(yiEBpVC~T1}YmI2;ZQn8XODG9v(T z0hs~zB!#i4u;^p~`h(;r*nyp(Nq|{%aR*V#l#K`)9FP#aY56ijd}u&!LR|rzfTiQ~ z%i;<vw4?V6H$SLaP8NH6(~s zfnosKfzUvqKzQP`3?YRa#tvWr+(!NTra~bC1@uayP$89tP5swR_y#k;T0*1XVgbT0 z#Nr&m4oRS3atNgPz@!S}=wEl@1GWf=W|$;DXy=1@0O}(Q+DZQ>qn{w)f7Lj|DZLs}jzWFfX> zGowgke=>~@tszX$lN|UfP1u6=L&<=NsbIHZk*2H;EEl2vR3X(0h;6Wo!eSTTi~(=w z@ODgsMmVJffGTJxR^(vcFrW*8DM1A&3@}ni=5XrpG$<&~2@kn-Oov1h|0@T7c~) zPJMu90Aj%MaB1Y1I)9)x2#H`lf!v4+g=rYpdAPNlsS%U=GpEEKs`EwwFFAy^$?crm zhW(@foTPNFvIKAQkSYLV0Y`3eD*>H_1{MvHKz=|+fy9O%lfj`Qm|l1VxaD~Z0_F|M zZKTq<>244v;FdWufGcwSm{jVd8Bjdzqcv8=5O3OTfKZQVyjChHw!0rGaeq5kq66 z!X!uhZ9%T$`M2e4z#$}xOZk6W7)%X1T>R7GaC`p15cW?ixQBwn7p&dHuWz4EATdVO24^>SDCw|xA^IsbVp|8(|~i~c9Lf|LFKejP(;UjERbefqkE z(%_T)?-t7HcX^z}_}ktnR6jDXn0t{q$7wHkx1ZLtHKzm@neZ+=CkSWLuKF|PL_0&xkIw$f9a85knyg#u^^Nwaf+hzk(7#Uc{ zpz}S?rURSA01S+{VAF=xJ{nI`hfhE%BsvvvDrj=5NzaKpAQ$L>MP*K#JXdi3MTvj0 z3gGa-@q11DTaO@pWm zI;%tWU<(cu0VX<$O1N$S8PfZwU|5yA1XD0PrEyr8@dh##DO#LAg_?i+gbSEB>7@lyBp#hY z{(KgTdlZ|tb4+n;yYPl-&9$9RYie>Cz;T4PSkTc1n&`BW;BW&rMrZg0{Y&PG3Gq6u z&SdLQ4J*jBIU$>-6?CUrz*cBC2In~~lb6LSG%a(o3wxe{cNyg1m{w>BmCfZ~$b;+( zmS9@;WE;>hwf-cJ+k*DAY}Y8dF9#>nk|$f_)|%cZWbNZJ{j_RO_il{e<#10azsusS z+Hdm$$PwHTOzX%NwhH|&hqFi=t!`QsXw1_RID?&*1SIqC*LMXq9I(Z-vfLR>OXE#t zTH+)wc`+J)lKs2sOsNcT>ptPL=a{eomrl4W{zRg{iY*WlaLggc*UkCM^S|?+Q0xQ= z{6}-LC;s{Z#RG7>_cCB>Wzda`!sro9a;Tybuu}tli3E8ysNN(H0ulCSk{<*KfB|rI z3t%^b^e(O!3kVn*Pt5|I{8R7aWCOo9uR)56?-bj~bUy^>6!HiBYxfw}BgmwJ;0WNz zpQb*B)5wrd>fg8di))bdUz_^HMaXSv!j1QT(pC7!zW$H64F7p2T;nm=u&5mOFQng7 zxPE(RZvzVjD;xjYzCk|Hln+FPQ#?`s+)wzw=&iH(J6`!;bJKCPI@nM4&m2hJ;XHy)&k z;;>`s4|V>+Z8T*c`v?9ah~I!t%5f_2oI;$9<8OUOJaj{j1K4Tw&%8yvY7z6!~myxfOy0CbJx$WB_@`K z>+`Xga9TpX9b3p{0|!q9N5MMkd*(pSUl6yjqr#Xopw{)JnH6M9IaveY4} zRpmrCsls{)HxLo1`#|rQEEme4Oa`Qa9AFz_l2yqZRYWxe7;0Eh;3WkFtHME-|KmV7 z7Ms%{jlpe^6MP4$Ih-J^2;g4=K3#wW;CzPz^PoHsF&GRYnI^OTHwOa6g|fip|1XLZ z<&lIB2sH(a1tL^f;8Y5q*`Ww$IJ%a^Q9pmlRwF@T#g7IY1+evt6ZZ!8eGO>G*PNm- zgJ5tP88UT2@x?|UJO~O?{(JI)t_J+84+4ooW3V_pfm@5n$1fl#BrGy6DmG)L__&1R ztl3i1GO}~zNSp$5=gpU2uuwr!X_4}{imIBrhNhObj;@}*fgzkf0a;jDS=-nyv2$4J z=;Z9Oe1)r<`^r@wtJjcye1S3BKY$h(6hdc&ZUC4F3y+A5+6d7yl@3J(gXQLF@hhk- zVD|o_7EdQZVI^EGo+t4C7+4Cm$7L2T5XQ>@t#Y~mkvgz3kee{&Ou|h8K2PB91b{oN zjB#ijj=TdJ9TW%!nrxGFU8C3J56!2O=m1;yU0(2XR=yjqVGFCPVBAOF32);A}=1 z6BP9cV@1KKaI>H%Dg?2jz>XS1+7Je`a$x!6TonOi09plciFD{2OfFDajsS(uagY%T zY{P&!xS|5`5b(a!=-_V3l(;Ss3P=TZb2b#@&KVbo9Hj(L?LL^n2!*Br)&vAWaU!(g zU_z22Rn1?G#EJ@mX9|^50!Af@DjfSZWdfiwFO(b%jTRQdX$Y(YtXe`brcsb-4X8a( z2Ou5ZGAF^w^@w1Eff!^Th!B8GgaXic?8%AT@?k>eI#MVN#2#{IMFHUyJU<59E-xaT zmk5qOU^j=dfW`^Mpi==6P6VTqK>#{P;k7p9hQX9VOsXGff&!TkIWh9Uh6n}811SOp zZnIb*jD8u5q{smAw@?yEY0AwU4o|}A?o)|)8AQH-!k76$9wlbfr0}+3_}NbqChyLK zBcMyb=Z=6Xxm;-r2m_)$km>|z)Da?4D~Lp?ENhrX!4)DNRYGMg0T}{`2-76=9ZG{* zga{J)&LPO<5IsV+O6cYiU`KEf2&O|<&UFFXu;AcTP|WojK#bghKxR7bS9^#8fr^T-2q3daQ4vn-~jO#-VRGO>iHgsZaIz;S(+ zFm<>9;^6je4QRsx2w8i&I}1dAK<;f00dT(n3y%e7{kav~Coh=TfeQ&!z{wf_gH&Ei zpn(+}+JH&H2tLU|k(?8^xVRDjD!dB{91(>;a5m&9b+{y~r-6_PuW+g_5bM5Va3cdE zeki^jirOMW&MlyH!r^~FivwZ6e|O>K*Y^EF*r07#+QP&L`Oaa94N>gGcqVQ$07V+; zhQ3;U!EERW1-)^090UPLKof8xL=iy|v00L{WM|0(Uz|3{gk-DeqPSMkUxTF)qmgWI z#GuHa%Am>osri68%3j=F$$s&2*X6Y3G0P9EE?M2M`Zal!Jj+KX&?PV=aCcxKvw_*e z9^899g)Fu{$WvABnpc|VbS1$gudVj;2WBV z0AG5T&PJiYEmx=_xYmm1)J7w)NIa(vBxsv}C%`S?aVR31 zh=H3!6Ob4j5s!i!1wY`5M`H*$G~6`k4~NH~kvO=03?4^7ei+})?iJ%}n3^X3hl86A+F*qcVJ6Qq_g~MP$1K@|bh*-|NiD)Ef9*YOXuoygn zg8~eg7!HXdfR`-c@n|9!qhK5aGy;b~8i4NGuwKB%p9afCwapgCuCu zcpMHvM4~YOCE%AhIQT+=*`q;2kd}x>gHhnX2%uF#Vz6Mn0M0=-I2;%X0fFP75s$)S z!GLi{fITz@g(V`;XbwivI5eJ!ClJ6Y5|MZ$iikymE;+~rbV(!-F<@OWSOOM@!DB(M z96S>dL<|vwM_UG=c;-i{bfT03I-SG!{W1;J{iSK`U51kC0F(JQ@#H7>mK6 z0X_iE0n)(WI17P9qHzRh#qlVxLO6hafIZMQm+Y`O0tOAHK)``l0}&8NKr}=gfk@;M zBZ`1QfnO;kVgPQyKX6qXU=xEO;<+S>1lb4z7#%R8&Dbnk3^y{C@@|QPhhbqJQyQ{FmMwakRB0@1&o0N>%d)1JRXMz;{=or?&*We z#Q+6(P#3HOKqXB41Ofqx2XqX^1(ba(zycZ#Dr3=TK)0MF#$YfA0z}#b&0Br5U?Hw1IQha7hn!Fgc7Je8Vv}7z~vus!yf^NAA!O^yo|+R0b-yQz#e5IRJIM1ZIUkT$gFfW&}Q0P_Kq4Q9>Z zS3vwkFi$|3kZ6D?7*GRXT>@YrJP@3KJGjIHqzFI{AX7k1FhK(;1q1{JuoRjIqzqse z4s#R0HyjcWA|Nc7yuoF7!2Ey|5TGB>4Y+&AVR??u4gEeB=bOAa=ee5mG*96?cUv803YC0``wT+!6#DPcYEM7n_na ze%}r)$_wnrWN0G=qeX)M&|9~myPVKHPUsFNbbk}Ny9wXhgzjvD`4#AUTCI?L|pyAk4V=Auh_W4_>?dolmfp+IZ(WPtwo4xKj2H=2Pqd= zN9i-5_wCQ?82Vw>gXaB63axs** zd0=TG>6Kv`E*f{4q>!o$neeu|I$aI?@HhCL__Q zr{(VMQ5orl;kZNYnu_a|Kd>Dcg5e7dxu;W#HykC8jKFZ??*91^^NzgVG=hQOml5I` zX};IK=}hK`01U5cF87QQGl?x7nF+)CD~Fx*(nYQ|j>y39vIBMojQ5$TZ${){_@+_p zg2wIacEqR(3=i$E+kdAeAxnBx2Zp{OM+h{NhFZ_7z*}m_uJ`aqtVfbRqcwzqO3m5ptHpB34ht0uQFQ3E(WAQNTs&Pff zd%U{AY%B?e-4|BwP1^39xn?XKhHJJeg~~_#c+MU>0>dd6yDuKEd7hgxmIK4-Ti82V zyb8V-j1|Lh%i;W#eWBKuu8)<$aFDog-S#=-gLMA zmD$0W<4rJ(Gge-7U1VLC+ITArKS*QI4$8|H+K+d_u+VKp+@}kPqrT%^F#JJEG>t;& zuHHJ{3&Tx2To}Zr>ZF6?LolpDo&Cx~vf|$P@evs2u)q0{=J81Z16?71H_mx~2?F6$ zeB(ppX*4Xk5QunU`u9jfvu$$_GH})N;`oM5&HFDKAmm{pIJ`D3VQoShG6{xFJu`3J zaMit4j7*2&^I7|Ut}9!VaT9q2hF^PS9jrNS`|Kq$2Zk3U89vdudic}`vKWTf9Y1iA zd8q!Y1gZ>%n`?XATi~G9Y)C1Qb*#K}mCOPj`;UVPmua3?CIZl0AG_ zRD$Qv`@_8i4H;=EG{{*8?CNm5#IK@%E+oXk)&Lu{ z6qR^3=6?_=@@re%9c{{}{&K<`56!R5fDx_L@C19G&pHK-bqc>009QQOtjV26M?+0b zRapbrvz^J#zm}f1OMz>?V8>EYQ{%kp1vYSSzB+|*?jQlXCa{zOhXm|ofG)Zkg@Sip zK-W5H(sp+rXc`E`-B%4HH()U=LplJv$IB9BWX=!WeXz6O{Y3XYjXc=m2XlrU~fw!9JXzOb0 zY3pknXdCKi=xFL_>1gZd=;-R`>FDbi=osp1=xXX}>1ylh=<4d~>FVnm=o;#2=xOR{ z>1pff=;`X|>FMhk=o#v3=xge0>1*rj=Feto=o=bn7-$-38E6~m80Z@48R#1r z7#JFYAsT}24MEd}pqe2_`t2k+nTL`*TcZXy!TNX5y49KZ0}Y@q9G zfE2*%6u2;Fz2Ta0fGSJ~;JOhEaBbe4n%rtY@0oB)bM6vD@9p5dYhilPUuFz6M0MCJ z2K&e$zX$Xpxc@&tJmK-*pQyk5fHU#54>(_g6ZYhX23#@F3va-U%`mo~ssYCr$l|(v zI04yQp5!_SIK?JPfCc45&rdD8R!vRK6S5C-`59tp=zR#FFz3ZY@aqJ8sQ$pB4!k+g zyIB;WGLUH{hr ztU3am#(|v*1y0L&q9~sX|7-y%K@lMVVT=e`Ol$^G0x5}^g`AC+!b>A%P;wF^v@%+i zUk#~&)|&w6f|(eZ%c%UESjdjHsBBrjEXmv8la-XM8fqEUT=lzunx@)s4Ui3cgiYkF5`&Db#t@ol(_v|eIRcu&` zJ6lnAv!(s{A@ltQG*aa5w>FPEFI%$~PY@PSQ12h0GxSZ(ENxQvy84IRYPfsv(c`yc zqMOX~)4vZNxe85Bk ztYyGHgcbm{BYqrGNF3{om&UIGb{jV(b- z;bKS8tML-VS;UzFGX(;1d^joGYP>SmmQMvEfI*@)`BgAdIDT|YG00NabVA4E5KPd* zXcN2vK^Yr6E;@^#E~<*2D?C>?CJ7U}e>T7Pj&!U#))$kG_#}vqn3!F-q8tH9 zjAk}wTgO7Y02UjQE3)ki9!Xk+1G#%JF|}xEw6Guom}tS|fI$k60)`?KAC?~_gb_iC zqQtN>L}wz!QIe?Hf-+cHf*evF8Hfo&okf?Ts!=Vdd#F}{HsV86JE{Zu0{as67V{3( zPa4F0NBu+(BLx(U7dtOYId<&$rrrB8Pn@evD8=E4dZvq4e!hDTGjo=n{>oL+g=fyz z=)Mq3+?jlAf*2uMbY4cGt}CyQmcbMF_-9J$85-pmJbFyjPu-i3=QCdHM@vZ)Wq8%~ z53KPS8X0$WJ9Jo0T|v?PXjb;goV@(gmDSgA`~u>#MrJn4bMu<+WZ`E^&0Db8>}~JB z_>FoDY5szRirNN7b`Fj%uI?)#+Vl3M`USBfw?yyCDLh+p`R< z9~!By785Ir))bb($P?#am9f?sp+zx;IC+dbMv zbOs$*3(m&#;p_+s#4vu##fxypSU%ixU=I{QV`Fyv%&{Zz#pJG^XT#5j6P#g$*sJQJ$h!s{5XU8ONTY}zhDJ&lE zB8S7pv?*g3&qao+qNOmXZRT>KMp)#wyNhDq#C%h7#PDHIaiZ3arZHDdfbr2CE3Jdt zCZvL)2(09bIb$Fzr~>TEC?Q{k_#&H(;5@B*-3b0{=_C|1*i zr1Kl256ATnOg@M;4~^3vx(@Z58E4~^3e7~nQ>oH^WgZ#$8}d1;Z{KtXuUmW~`sJBQ z`>(2kgVj-10KTsHi*|O2=L)SI&C0m-cSyuEWy!s2{c)a`bLFm&d<) z799J^ZhysrJx;q#N;iuwPE%pAUQN9$pWAEav#;wh#RM#8m)|@pwvU%lz364`KGjdv^-}Y1 z$i@}weA_wq!zSgnDyuTFZ(Z}-kYT+&r&Nup?u8{mhv<3><~C@bTk?Hp?fcdvxGQ|| zbXwKrV+X@k=nMF)?LG=FxNY@56nFXd6UwTDMRzh4SFI0B!(j}n#TNG|gvC^U>ivSz zCt&M?_V0POAua2JT7Y)FwL`+UlUqGB6W3brlb!owII#cxM#p1kMgy4Q7b!}QDzWdq zSM%&@HQLEaiSm~iML`-kpDTA$R=)Ti)mr}GRAxeTV&%u{=YwZO9N3v-mtvA%q@<)) z{vz>N`dHB&DeVRJ{FRS8HzdvEmyO^y927B=iCrY(u_H20K zLO%NSvC6v!I`-`GVoime_t(ucBFJdg^6YrHp+Y-8-#a~Jj*%=ZY+bN=*|@8YLLXA2pMcoq7RdoRzF3HqJ=B6Zs) zD*j1MQ>;^8>aF8@9=n+A-%k~5jy{>Cc%c&iQMg-R%jj6<6|bgSOrHY#KG!)syWvLR zsu@S#y<1R9)ca=IQh2<$$1RDxw=_aF2^}UFmvK{4K5*dDm!oY97}odizTWsl_Ox}7 zA`81T;znScPjl~KtzJH>AO5eM+rn++hmr5Dn&sWu_s!zSyj`s;`=U@MJ`0Opi40p| z74*=0rB0HC&4Gf#$e^7w_~LkWO4qiP^+5-k=BG5)iw5cS_T}&Dtw^Y?Q!zA?`G7ic zKE2@KXRVp--!|cwy*?GU>%}G4rV_KqOjmrY z&6c}*iAWHcuR1)QV!dShgnm*04bZ5BCr zUb6Z6gSk!yWy0=_%7@*evyLLa+68*sX1gxhI{YGiSv=~>!4S)lMr(G5n&Iq?z6-20 zFB4pc$J|Bt{@iigqq={e>ELaJ%B@B2x=|N=D|Ev)FU)-Ap`*Ur{mRl4CJW-yq)VzK z7|7wi!qQ_rJ9FplD^UgF>utRx!@_rEWT!REncchd&QE?lPqCSOKkX|v`N{eB#{6iG zACg#|A0^W*W&Sos;bleaeUCHx#3s!YksYZe7kkg$!w($X&Tlq$PQ0+J^G%*yLPH?J zzqz^Q`JvXA`(EtL^RKb=Mp$G&-2Fm6WSvUYtrtAI_AL4i*0-LY9(DnlsbAJccO2Up z=n|G=cvcm;eq+?ZlpW|y%M}f{_>jnLUmmqbpD&CvevrF~(0%LSORbhWUfyjKz8$HD z9Oeu!yc3=M!g$^GXq@|@to~2y7?+O4)&tqppq#TepHeEj?ay+!8=VHAyG5d%``61oQt@TMV^8Vj_ zmNua8`y#J>w;pNV8*YABw#C!Z_t}dmJ+9rp%kqoZi1_LQ*OfQEw7Qi3>FB<5lAG2t z7Oz>`8+`lok^PoOTKZKEV)Hba3WOSs_!`%^xJ8lU^)5Benhw1t-MxwwdEJ$D-*fct ztv@rvRyh4&t=`Lw2h?uG2jTc56a+p@D;z2m)<-`X`e{!`Uu1+GD18}WU@!yzAUIyHZInXXtbw%Y#PgPN|dM+UvM_pEzd zRn)G!fzo?a=emkvOv(9{!c3>AJvII}EWSi6$?D9Cb05iSAMEv(w*OJ6(zVl?dEw~w z1c@O-pTiYx`kuvY>-ADavT_{1UrkgY@=k`EIGEZ1{HUL4Lx1O*HDrxaXMqKkzF`Z>yan5#+Qo4+sZx;S*zNKyAP-J-}t;+6)P%v&_TvYRs5-RSovUo zq;1=}I#1uM$Th7#_q(`$7oE+wBsT0EP7o^ID0Dm_CL_zq^voMw&sMQjv+vvHzo;Z; zH?8)LWAAJ)KAzxl|3J^u2D4CuoO{Z+rul9WapQYio+{X$#QAO_)Tt=BQ=-c)+e#%J zZy8J7?vG#vn#y(Ub2ujJRe77V_{*XlJ_-AhkMzefHCDO*4PFfb7{*^PPnV2^oq+_Uwe21|B}^PjP4wJUWglFtlJ!p-jQ z*zuP?2h4UKjqk6ExQNJ_uy-Rquks(jxER6D^o+`n-bZr za{e>~>DrQTo?nUe!U8e$xL1kT5^+0$IN~+r8aPi(F4#&n4wJ^@fs}J$~Sffg2+h2YhFju^??E`5(t|rbUOA-D zc$MB9s&dHv?gmV281STi;zw8@q4*7mSO8|3JW1U)aW9u+pO z=Ix4}OpB2Nc^LhQ`39nPDp`3aN0u8$-@Ac1rIH}N=G%3NYT55x}7d;5gtbl}hz zu3xb7!;AZOy%%3Of3HHM-z>8!f8jwq_QxJgx%%$(o|dZd6`|4gwh4Q{YY** zuAMv3xBGqh*sWOC&GgFKH!r6I^{AbqjBWA_kmjS+{3v17pWN6yYO1<;?($r+#1Uu0 zx?F+mTHM7wv~J;_(${?WKDR8hr`z%TkUyISuAO|cb4^9KQP8?4AqQk$gsrhv%KT29 zNlegQv9or*$Bd8r&sTkoUcKA@a;|c}ike5?maIVbfMkWaf!&kzPMK9@&N)}}KRoSn zv6T|5oY4hdyX^Ei%};zzcY9XmJ4xAFKPpKj5#D#S1`_1g?^P3%?PSkv=HvN2D_6?T zNISSq@Qrs}o7n96uW7-3`@fFA6iGR<|3=ZkmpxZ}hvxh=+jIMT+-P}b5Ucdu=F;5n zDH=6dUDcG$ws)UCd*XTb>-o*aT|!@L1yxS$3Mw$N+&FkA=xEw`HPH%()Agi$ z^TD;N&U-{;#~m8IKQ3()(TlG%T<)=m`Mi1TV&9yxbgNT}JE;9-&ik(}Rw&3d-I(62 z^?pprXPzl5@uJcWzb2V&UMtMjY|+IL@)znXiW&FK{Av$j zNBlQ`v@kGJ8F^J~)f!1EuDaZ(rxcfW_^6;`Vf@ncrEQu8@t=M7qHc&jZ>6q}I>xu< z0d=ri*XnDVgv0m+Nn0ni95abbH*TzYEs5*cSYfR%Xo9!*SL-7Sul6$#|C zY$|_WaMkVSsub1vVh*vk1;;5=x6eJ_RnPw@%IutXVBZp+-}>9i>V3*vBUkGU%&rfA zKfHTQxx2{|=MSsUL0J(qyMLbfI6sI#ZQh%{>)Ur^tUh+Bqp#N{S+B^e=df$o=r>f^ zj+td2W}zCNh;GZjKXL*6vT%0x!sAzKRy;hh0>AmB^}#kT`$1dnkhe=u?Tc4^b31zb z6V;Uh<~gB^WUill#ibx&mphJ)=N8K4;ZN_so0n%hBL_j+svgO@CEv6@_LibRk{_Zq z*?48@Pu1wJCw^)PtH&UAGH(azME#gk)_7_BzEPIB^0MlLZS55cv&Zu5676q^e0`-i zRPsjaqWtlO_94|Rw;u~yX?kXvz3Hp35@bfnn7Zuc`Q;bwZQk$em=m_S+3v#2C$z(r zQZE9y~MPh7PqO7(C1bnTLvQ&1p>rByvi#+jE*aZ>j;Kk!YR6cw{HeZFG2cOI zu&uZ3vlPEx`5A*bS}UHCg83C+mAu?@&9|d|ftTy9^!fPJdxTCKtl4UJWR?EA;;3G` zvn9(P)ejzB6Q^oDZ`J_?=_j4kikA&8~uS|cxqN*vOr|jx+u_ZkRPW2d`Qg|PoFYj}HN&A8M zWcbnlX1oJ7>$4y*V)R+YbpxgC*`!OVofZs<$Lks z9Gs$XjbcM-`l5A&_39yAsAAF4(yZ%3N6(yT4qUsN?>;3A^XyjG z3hOm%Vr@RD?ZMTiZ5JOF^KTm*eD$#3bk_CqO{~5nLEhUlA3E8(JI)EqS#wwMdcwtp zCXO%n7e1$^Ic>mbf64dLSbF@iXEhYl!&`{rllJLE*oK)ijr`pVW)*Ue&8D?9}~W?lAnKfB@C z(E#c8p4>7K`kcXwg@X7ZpSc?rro9WjjhT^o{`S3F?o~9wlJa?iqc`?%@;1eC<2V9c zHdkypbTR2QX6>PkyDlYLo;#hE?EN`Hrv9V(>^}c<59X}aKW8xLeo|j;uh%=|^BsQb zH=fxB2i3o-t~Ayp?-tK8HC|dge}05?aKWxEhG(P*f_&@u_ZGR&yXo53dGqmHhtwq9 znzxx@Ve8yxzL(kIrq&T*@z7=KeQw-H&!xL<_xQV?CPv)7R;PNpmGaJdX}$CMj4}tG zj{PRR@ry+Y@MqIp>*!uI*SS@kwOQ-9#=^1az3U)>X)T%i?E-o?pwplh+ zl<(_Or2bK1;YaI;x}EZ68A+dyU5Sr(ogq}V)N+Q}%SuKG?$ar%Vk0*Wr`1Nyx+2l+ z$5^3!Bmek^v-a(V;-_dS-)h_XYMRI1JdttQu|~+6bz%RhGCd2g(4CqZ8Q*vC1!#^m zljB7rnu->0uGCW&ygFKKxv{$~z3!otGz}A;s%3hC7JL?4qn{vBdgZG9A+y~1@k^wa z6yL~mlVxgm#Vwp7*zV?%$rr=Kw{5RCJ ziTk<3eR7?SF_X?m_*>Q=U?{Y4>gVTF07N@-f@UTXU9L@hg}=yif>d~k6F+*vO(0d* z^0MHkVo?7y_y>yjb4+lud4*Nw)O;weX)3(vZT#LTaLf?t&4np&mS%z1>nSj%u0c9^ z3VcJY`b6m@Yz{O!{%4c$yPcT%H>dn|3@46iB0pxMWW~bvM-y;rFck^G; zYsaT#b77Zf&iIQC@=1f(*N%E(v-NLq zV8!5i=@-{lK9B#(fwgX~6Ad=X4{yH)YY0n7;a6NGqgkS zg3H11ObCv#o;gGFgUhQNvzgTpoaO#la&$qrRPwUfA0U`e)Z$sI-*Wfjy@50c?!KM9 z<}?U~SEw(l0HFCX@B==xZ(7xtRAhDo_%8O&kq`zqE)BpZ<|AUec7N`YimmfJ4|?0B z5_94yUE^4`kGk#)5S4pcpTys0^9*(BsOj7!5YOB+Z)l;-!wf4S>>j1F2!utna%pI$ z4$?{<5TyqnFJo(4|UgfE&2dVg1EiOx2?uN8j ze0yCsG=AK=Z<|=kw%DAxJ2wh_{9bo6BVbF#iQqxr|ecF)^3uBHB49>gxW$UGi`$kTdyW3vG zE1&-G{oJ!WYU0ip_an9oXag_BVf7C?jt{vv`L^Y@jN8jaUL9T#baO*coyo4$Ytb?a z5}!8?*cdw74(wiO)X}oxQQ!XJ3yrS_cZcooy>-_r*}11i@?@<{U;heIUYy_4rTxD7 zg?WzOWrpRwY}Tg-4zL#&+*%ZV)}FTtD2@-_N|a}A$gh6l z?O-JPzE-#^^^WG4ZIAoT>Zdz$a?Z?*tahmw)G0!qaE+c4i*n4oZQf7 zTBVpps}`L)Xei%J9^-A_ecOU*Ik`@)N3{P`lPquzgE)C#ke z*`>EH<`}G=wf7=2bk)L^BI5edH+K>k>n|)sIrc@8`RmuLFNrC`uGRF|w#@CJVRFRB zu+FTxH%m>=fLmdXw>Gqe?3>ZhV=(8=Y@=$lmr=8WeWb_m{F_wsEW0AirmoA)s!Ju0 zh95CgqzHcS;l)j+$1T&UxqHeOK^5E=L1ll5qP*VPYn^hNJ&}QIj-PYW>*Ba5qEDf$4GYV|JCC zBKdZKr*+l-&g3&T^j(VLkB(Pu1(m@?vF*>e-TxaxZ(1do{#=#L<>*G z;8s8C?cv6SW~4Wf>=;7t&UqxHj_FwJc-k2~a}SoN`K&)rXL0QELh;t)s|RhD)?~iF zcjeBj?pCP=d}#5kW5LI?<{!L#T6lZOQ%rqO%PX|acu<|jLDyUv@q`N7xF_)vmGij}96LYp<_;E#qEb+H}`jddu`Uh?$^5SfVMXw#OsjXB>U+?y)K{oK^#)qHB9)>6l z$yVto99!`!Nx;v;E4S<5tgaqpsm)udl9Xt*Z!Mme*LzYdWhJxccCzGt4q1hThF5O2 zXP&`i?96wSiE^;V1=5~95$dq{S(FywD|z%uA_o01EzPC<`@?ce6VLej1vb36-g5;u zm1*;L@oQDYe$os0ls9K)dq6~SaPl~1{#k3(z`cc!1*|gK@mg|n;eCN*-MEYRclI@J ztfSxNKFGgu36+ye^f?ktlJLq-CCtzdK;IEg+ITH7|A`)DO?S||x$|On?pNt>v$wxG zTJj7RRIn&3xTZp+@bHsQyg1^U#og)Yv1yE?p(>+_e9NCdB;_g%=SMWQM_!0$7SN6i z(_Lp`!WWGTw!d1?G9&(4r9;5%?K8fN-Ah_KaCH`Sh~|0K%g^K7R+PcPwkY+w1LTd5 zhs`3?2ha)Q4j#k(wLc%V{1kQ{E{ylD^Hi%W>^EqM`{BB%GUFUKZaI$pX%l-*`qSq- zObyxOu*$9<{f9(1{79GlkhJ99C9xjQi-pLQcl0;*i(PD*S5~yw=se#Q_ZL3Jg_3Wg zm9r(viQZ0{K?gRiNDzMk!*I`Oi)1`2zIH~6$4r^?Z#Gh9 z-!HwDvf#|4bM)1`IO(d-XC5be-DdO~V`3i8@7?9*A!v7L)3XIV4p>r?O5W+fGa`rb zq*k8IIf_#p|0ePn(~%ytJX={hVIC%G{7_+{CBbv>zV&dbqO#4ZI*W_~R|j@6UcyNC zQ<#LRdfLwi@7C?zzw_`8%L<~}X3xW$<3&3v$8^aVG+tbGMk;M<+@+sZY6Ez&tLIQH zRYU0YUH(7Pw|uc)dg&~A+n30zmHjWIWjnPbj^#;LW@|~ZXp0{EQ1;aw-oE#Q;_b1{ z7Iaw3rY(_YQEzn{&y6f@y=J|F)v>PVG`R#>H*hqM@-uM84uX-y?2R&4&jj7LJ9^P( zR^cwfT5cS;jj6(_V{WH+9rQKJO(dBdTAC2~^_5$(UbIO|N`YeI<)?WPWzvDG;*axR z`~2Y3=wRD&nd}qR;C+*CS4Up2IF`BKd}&U%1LgQka%koSs%>?@+%v)Bo~DVbqXb6g zjj#IfKF|Fg?t(zUqsGLiVdE0}KJ93;l>eAV| z_RjJCD^(M}2Hz8_s;xK3dqSoni@z(D-Q3;l+D_lSeA|? z*%o!VH{QF%QiADpG8O6hqa8JBLNvMBqu2i>dLoT+`w5~Vc0VMTV>28{n!i^ZfG zT6Wa&ga`3G^*#vUdrko=*NGlC0|;+UMsB*i!iF5HuPdPTBX-yGrj>_6^BOmoT4vv@ zKD@V_-#pwqcgNTh&wV?86kXNL5xTqYdrRQh3f=D?zODRhP}abH97%{juKn>rQ)$telxF zSh~bZC2?{70`{!1^q_+uU7pU4_@?MArhZSo!Q}OL=89d6!b6WkgKAi24$GUFxPfn*PVdiRD({|xpi{O*p5d(M z%~>)!`8FaCT(XE_qIS>bICbqM?Nojrr-iXC;_GwR=j~Y@5LDikW42v#rBlA__e(>s zEpc74&B=a8i@e1a=zWfC?uk2dKW4VKWzc5j_A45amR<5|@Y!(=UV~?cEI< zW(`Yb?xgNW+dqmE7`3fL+g@aSiq6&l81T7r(av4GdnVgetx9NN%BqLYef1CgG9#8X zq9o*_WPHTZYY2Jc(}5Eyo67c9ezuRB5wXECT-~gZb!e_$L8y#;d#m6B*I}VC5oBT9 zgQWR=xKrdqD>Qe|!Wifc*)_k~yt-^tecRp6#n^hQ(vEG?os4X$?|j(wy^QizH(%BnbDTcPbPUthflppCjN%TzSYRvk_iM0 z`u-V@w!c7QYujS0kLEetv#*)uocG8&L3QZ~}ab7p;8u8OwzQn)2?^v>HY7`HS5Uc$J8>*+LFXy&=EbFGhJ- zp`*tlv-3xM+1EbR81Jt?N|#gaSGgMGzGTr1v%a$;*Pq+!>#p+}zvtb{`IJp|{7<~r zR|g_H?Mr+*R7?*^HShfv7IXvYx~_9rB(ljo+akPD$yfU2`Aa*G@-;@^!t@jEtN)5h0L`{V6OVI{5h4D-`Vq9ivLTz**FTCh4g!PBm(sVKS6J~XM{e&+vI+MU3= zQPq6|pWNHtrW*}gNmT})2v1mq=YMX_xp#iQ&&~6^@8=yqn0){L|2cDJl1y^b zd(TO4TwQt8XOA55Ztj@xYPUn#}EDY z_Tl{ZcWA!1a>31qfBAy@s$bvZ{uMvo^Oxzhc75`V9hbiH%m%+tnZFbI$v2LE>iDOF zb52@)@SL5#e0XK!<91m6;N+M7!{Osgs|9#=t=h-iM zubpCV_1=Q|ua=zu%#*WMUi!=xd&xh#Y_#x&12X>pV~X! z@L6`Jf5%?hDK>4Bo#)MbY3DoEShmY{$yK|gH(chvH+R*6&Z1>r_taGfAGBhVL#}^t z)gjhX%MLy9=~ahXf8OLUtNqg9cfY*sOK<*tlOvm*RY%&^+eda|7aV2fWfp>M<&P|e z_quB?xCUBq|IV;H#mCwu2ZMRk}@rT{MPg2t%pAs-zpiica{b+he2$3 z%Cw=4V!Nv5^4IKBW7CG#j|~m&BEKFj6BrB+$L7kP?oU^iKjNPhTW4st{PMI^OW!ir z!0@!8xv|}(?F?y|lJ2F~;mPv5)l-J1kM^pDBs(2a-f(Ca8K2q9Mlo0Z;$AwJ=>QIn z4NaL5KThU|m_B8{p$(+JSZwDt)U*MUr^n_^jh!?}rYVp=!Ba zQ!Bi!PMgemU?ma>OL}=ZcD0&dL1sTm$=~+}jcy(MM`z zXn28~aBGU3kk}!!Whm0ixiZaz+{#&W)vjbBi0MO%MrV4kVk5IG`AvQ+w&$dSC2lPk zT2JOgk$LB2YK19tCtWpc?L29Rcx;W>?8&iN((9U|Jx)F$R*^QltI0K{ET3X!gZ{UU z);iS@YfX8XsqG2*Z~M`E^7r}EEi0+-$MvncX>HI)L*EuBYfHV^M*nD0Bj=2a-n&Tl zbC{03Hr2A$(DxhC&h&3?RVr!vU%y{i)winNY|;JH%l&~2$`^FryM4r3*K{Da(Q$nu zUC7Wzw=1O1*Z1qPqs{gGRrifpd+Yn#$?cXC#P=BxSC_NhCa6DuK>ck4>hF^H{0aJb zYC!$tat)nmzkiFwpTB>^+E@3vvE1&j??>Z%MdBMjFk&syHM_`dTHlYxuaNU*&QCOc zPbB`22S==(bz56*7wP+lKQw-x)v=oXqjA;F#5^?=O_NdHtx+0pk8EpJSO9rEjC+A9Gx;F+TV*(w2dx|#$T71 znj1uI(fC_pGp?r}8-G0JN!%P?H10&=kFFlE%(+%^^B8;}@h!w^Q~$8F`RF~%ipFn} z*jf|Be?6f7xdHY2$n)47Z?ymYC2p=G(fE-A>c1@UnG=j>eL2zUT&OMD&*uijuamgC zCaEo2f49WV^)?#+iNwvdKN`PpK>Wc0@xMrXMt`@k-@i)SytbqL|3l&nC#Zi{;^x{M ztzTEJQ|9s7=$9j7*S=p$y?$G;7E4^cUe%^vpXPCi#=kA`om7Er(fF0()MymruSo2G z3F1A8na3;IkNSr~^LmQLH<8EJ(fu@JoU`u|Kc=OfRfX#B4d zH}Bukcv8-jqxTt&&zJZ_=gA#Vze3_WOwi9M^7u?N&aX*)qW&L{_(bD*ctHKOlPo<` z20lJ#N_^W1#`86aPxN?RD)EW-`~3m+_YJ6jPU7bMB6>bQlK7qzj7QA`Hqm(IM&f^_ z=fNvtc{s4;93JT>I-V~}e4=qK9}xeh#3y>Z?vc2y_Z{v3rxKq(LHs;< zqnqe;dB%YF=@LI`f__ev_z@GtmksFW&;kAIIiNl>AbyR+_nlzBKaluDkK>OeZr-ol zaKuudC)B2%n`@8Wv#e-*@qoCxK222r{Q>d2C2p>Z(f;olQ2)+=_=gf-PySO|v>*9N zYOMd3e14B^Psnj@qyy?2Y2I^Rlh4f?MhYk2F*4S-3U4Sj=SulNv7_$j?K@)gUams- zQ~m4W=;x}TJ4eQ@d8&S)SbfejTSvxeuHh=S@HgY1bCv%l^_!_XdKj`pAY?TEEJlD}BIEaF$iCq{h06L`G4+%q!vp0CQzk-U2R&Gx$F4>pk7 z-D)G&rV+38yAkUP5ib>=67eSYj=z7Z@V;U{lD|Q`Rm7n<7q#?f-n&)!W3jo$C~x=V zh_#oxqqoj&BUZvdZu64YKiZ8(FBNYc$^TqzKEtT+E7afSzaJU9=Bxan;%NOh#p>q~ zvppb=KA&C_N1q?_pBn%9SGCzmyt!#Cx5tRhd#VawCSE6!UnMrz1Qq^`*t~|7Ul2$4 zmu%wsT`Z1%zV^k@$ManBbz<{bPW6Agc!s*8x1WonvAKq*{LH8E z@yLkH@m(f)^ZpQxe^=t>^KMq^&9z^(GoSl&GHHi--Nq$uw$`WerYYN?yrt*lGIgCD zri|9tq(8Ma^=(b}Q`Qdk?X@zVLq;ntYnEKXHqrOrl(@N0nQNa)#h)2}en!vRW)e5A z`*)>ortW9ZxZ5&2rP^$n#Z_&+;r?w_k4wem@;bJe?@8{C>?is?iE3xgo4Hku3o(Rfbk&GRrCKV9PH=Za{2xx~k= zO)L86O@;!GOZ#{Ic(mQCQe&$^wGC)D_I=YI@O?8b_2&FVAICtZK~`IIKk7Pfu8p_J zc`?_OeI-6e70VWlA1yXD4@&)-qZO95OpeoBb2GAkvtM(o%Ire_IG>bu=COEK+L`Cg zn-Y&+2VWGM=hur;XX|}!B*$y6XLBWHYNBy<{N_0qjjQ*SO(y7PuSh=&rGN9BxIyCP zc@~YYC*x7iLA5o0Kk@N)p6nm*pnT$g$S0-W|E~Y~|Iogv^PNBb_t(WidA=@Mnf`S# z{ipu8Jbi1fi%D%)-*)MFnf}Y9vFE*#@6-URV#$(1|Zb$ohetbM8Zf;dRt;dt=P2Ar4u-`r}d9{`F z?V$SJ2mQKve;I$hsdjO(d4Cxh8l7=kg)P;O*`mi|uKzh{XU@|J($3b!gZ5wl&whV{ z@)eygOTXr|qOL3Eel)&J;^ws*jh`fOGp1-SgOo8yX(=R=8`&oI&WM-n&J>1aGA z=Tlvi)fSCUllVl(JAFWW#(?w4=l+StqaH8w`ihQI zoqzK@iNJ4!x!zSa26ds+1Of}8s1 z#eTGZ8(-bO9dx`^r+>T$NdM-u^@(y!>*(Ve)Gn|4E$Z9Cw@#k4i`Y`YRm&$&l&$sKvo9cb2o7C<2{SD_Ik$STa zRsRREd9EwJEH>9p<^L3$pJ|k9^nBg)?eW*&HIg^igMI#q-dG49>Wgaujoif^kd0g^0jB@NVn)!L+qt8BDp)w==^0D($ z*ry*bkE40r_Fpmb81387o9~z=>w}9v&iy_9-~GR8zLuP!;+_2~QbPBqo);=!`hGv2 z(gRo5IdcrrJ*dwp6UEgzoG89%K-?J+KXyR;!U6H}fVld+FwuBc4~Vx0#MS5YiTddc zh+D^xF3q@fbb@S47Pr%@NLr%{&{=1$IDP5qD^6ZIdPmj>JAQe7?7l2F*1s=pmYyW@ z`YzQANUAj}WR1ZyM;o0iRT4j5eBud9&phQgS+(zES-DbXPd;mORll(+nYa1))0XB= zRI_f624(d?SyoTloctAOdWH;iG_3a9UqpR$cE_>yvhupLm&M{_jnT27tb({Q8dJxR zKWFS1mdjFROXbv9DgDM|t_u!Xeh+SRGx<Y^F0;S( zm#}@09QQHrle0%+-aZOlBqxr>e4bn;*U6rG!;{U7zd>%3Ep?%e)Vt&gxlQ($Vf#Ee zAXksWdgo-cPtK7`IjnD=hVGn>&MZfl$vtx7D_C!n9dedjAtzU0f7LuX`Bik9?2wyh zV7>cwbe5bam&grrmu#Ph{k!A>**_oa^W=b>`zF@A7oiK}kX*YM>zm}{hJ$?>Z&pCo6;v+SLp1o(FJmm zTqP$%Y@a4O@EzueVWf1P!)eL;Ou9NC|({^2#^ z8aX7l$mt}ucga3EAjjum`x3cBw${h`1UcJZ-fQgmLvowkC0FKR|Lu*@2^*ay+nZv( zy$!m%0A1N0o!S-cldI(19#~%{hvYW7OSbpK{ycJ?Tp@>hVf*@Gw7)kxPY%cxa=gFX z+Suc3lhfqZzPP{ULFkN!&XLRHklZEP@&{hg*OR0E;ZrvA`pz7R&XJ3UVZK7Hk<&ib zhex7&EIyeDcBL^p8KB;~oD004Ba^h6XS5HH?$W`?V zLy`WR73e(KQU3rLsc)Tuj-QEctwiV5KU7BASIGGS=Hut0Gi2-Qm@l7)4#}kpFrWGc z+9#(j#C+o-beCMe1oK`I9gyAsjrsPa=)`x>?aR=G%h6SG;R?(ru0%WJ_|=%Nd=K3u zSIU^rT#L?;dq2Q@;(Bzvg3gi4WcMbluaaBj;>}p!At!%?`S2EW>Q=N*E>-S>5@MCn9T>2^I-JhWg$n8fkUumG5Wa}}^*U24n@N24n5?y^3U1$FR z^UXh_+vNBQn73a|_3p*!>Lus~xk+w+7werX(c#tT z#P`tgYtZHE(E01p#UG+8L zv$4M-xlHbm(;H%Ym+X_18)3aO58a!O?rx4wZ;5uvKDo6O)`#1mdt`fC%zNY-IV88p zx$UsO@@ z(@QX)aM1;FazD&>$j<(l56B@oc>vbe$e9B%UnbYd=`Ub?i5!wUT+zK zBUi~CvR%UVKDk5=$<`IvK10rvtK=3r{$1*ioFkXXA-P9RU5WksRLrz?S{b$Hoa*IYcwlZ)g!xkGlZ!~SyQBDqQq$sKb1dh9<% zcF8$%kz6H*+E4*xn%*$YpY!+#>hL$y;!L8M03K zy|W=Yu@SmJ_U2-~NbZn}n_zumQ*?JTbZS1jz9rh-8eJyW$PIE~8*E?O7M)mtF35D! zYBP^_au0NyoZA!gu7hrpt9xNSy%-(e8(kuoKact3KIk6V+8^`w0qES3=rTDZ=Z~iP zW6HUxf9+#psaSA(vKR zeU)5i--7k-&(Kw}dmrYrHw zZjs|JWBUv_M=p~a&ayn_2rkzI0* zTqIY>4RV`oy^8y{$qqS74#*X9gWUcH?l0a!+vGgCNbZr-uVH@yxlV49yX3^{)E~J{ zZjuvkVEYu=dK2?Wvi%n3J#y!5%xC_I&XL_N=CkAqxkZk@gY6x1o?Ia}$=18rUz(gH zm&grrmu$a>{d?p*xkRp#Lvn{4{}=8rMRv(Ka*AB_1`>joO$aQj3&oyV-x5&v)@%Yl@Jh`i9Lo)ppV*LpTj7#JSxvuB9H1$n#hwSLN zicGyvuBwUEBR_w%$f>EA&yb7ckldSr?Nc++wFJ6NPOOP}dtJ0Q8(o}(POXpbkn0;@ z-rpQ;Z-Ms7-j**XpTv&k8< zPtKD|oF$jY?L7A9eia=*58eF+I-wR^h+MzYWS5*H2jmL5PHvIA z z&^2=RO3Zs#p>yQ4EN&Nl{xrzt?_u7$2Aw2l$R4>$ZjeiQ4ngyN-6Y%J$NlHX4RYaH ztZ$P2A7H*jZj*D@VSPYull|+lJ|yRFz*N;MdKl|% zazJ(-!TLJ6(!jjC8r{%yZJO(o^DE5zzd`rN)jwlCd=Z^&qdjtgY`ui_0lE4L=G)}X zKQN!^pljs9JD9J$i!OYK?tO&LeT?pqeYJq5Z07v7$nL)}ACl9bVm_~Df;9cvlSiO` zcRD&pZjrlWZw9vaW}-`EYYohM}!LbZc$2yAC=a*T`+MzaF;F zlbd8KiS<=-hiuKk`Xt#W=gEc5uzi_aBd6zKeQ`^4Vq0{b94x?mYCCj~TuEa-zdbs+ z1G-5L7GgfL2;Ct!cfx#OXSA~mx=U{EhWX%gXm@vXK=${*e4Xs7h1Mdk|3J?rYhJ(B zUYPgDb+V`DoHp$%yI+VegCWgC*z^*>f?U+z;)N8!Ip$Y&r$b zXPI0h*U1fXNN$rmWNS0rf0AsIQ{*(+C41x?IZqDAWpa%ilH24SIWZ59H$`^HJ~>Y= zk}Kpoxk>Jjt@${iNUo6UBOU{xD zj=gCEKgua$;K?e~RpoeR7^$Bv;6Fa+BO4TMKY}NphO( zlC$IjxkRp#>*OZ6P41Gd?Qr}FvQ18t9kNHxlJn$%Tq0M&#uC0pCm@sn+Gn(UB0a+aJY2jmjDLavb;SlN$!xXopJn0a+>Utv*ZFfAeYG% za-G~Dx5#aBk8IXVHP@r0UPIN`CTGYF*(G~qpPVJ<$a!*sTqIY>RdRzIlH24C+1dr4 z4{>smoFZq)9@!`7$pvzWTqak@RdRzIl3V01Ile2Nj|4eIcE~O{OU{u4a+zEshvYW7 zM^5a9$CDyEWS^WT7s(ZJo!lgM$kyj@d`WVe?2@zO0=Y!4k{jd}xl4}kj^nk-8L~&t zkpps>TqB3%Hn~Sm?1AG=ksY#6&XbGe3b{^hk~?H;PaI#8oF=>EEV)1~k*nkexkc`h zEeFS&Alu|L*&%!6EICgul1t<&xke7jO>&#uC0l#Z@sn+Gn(UB0a+aJY7s(}ZnOq^) z$qjOg+$HzO)?z##ak5R$kUesa9FR-o3b{sZkelQ-xl6Y8#^XznQ{)WUCHv$Yxj-(G z%j7D#P7cW}a);a_$3Kt9pCqTrE;&mskW1t;xke83-vg-iG1bhndOCGWS^Z_Vp3ZvA zx&Cx|V-EECfu=s8*9$bRkv+Ze?^vE(-&A%go5_dd^gPTvvdWOc9UXRc?P4>wFxk_%5D|)>`(|_pT_`N0Q$`{euL(#TgPtWvM zUWWPLWOPQazh>(FuVTKV*H1I~qF(RJxJqu3<9fX_Q=cJc$t806d>l{kO?37mbcx*i z7Ut7>eK9khnqFVb*w*WT8GGb_TqC#0alM|H=`XF<_cCsg3wpgTlaJqsuDL+^?F&x9di5;>@P#kk;~*dxkGL|hW%$(qf6w#$COhl5swX`PBO8_(tgZTy%Up zbg&1yMvm`^`3yNnE|TlyvR<#p9FO~X?7yqm<1u;9!+b!lk=x|>!Pq`S_7B0lcPKg_ zhlgR__0es;o{kxR;wa3wjz;H?MOVoArI?TF^>j@C8M1Xe)|XE}hvc$eU&r*9&S8C) zoYL#-nEJ+PSl=Z#mSf&ufi9B$uVTJ?2HHLo-Cc<;o{g@Piv`Tv=b}Aw^6Qwdori9d zs~2G2(d+4$^OYw%dVL*}Z(W4#;}@fwdVL*JpVJ=zjmzY$USG%LdzWH=DZReVSpG81 zmo7&)$fYYVpSlw5lkKZ9U;iGuL#~%G?_G-y$cgJPpSm7xSI_~uM$YQ>bIkc^kh|o{ z&Dg)C*V8fW)8w{ZU&rJfy`GM7o?NM7|Gr*N$J7_eb-lih$rtXx_EmD}F3fv+Jss0N zAlL4}`g{#tAvf>EeCfyN2D$oE%xCp_I%Yg2azU@JWAd3gw$G8vdVL*J@86I0MRHxQ zuVd;94`O|l+>kV{=oYd>*nDI2p)@p3u{uMg@EV}k0y469K_4+hs ze<8W2*Q+u4`Wx83Lr&`TXiR;J9Df_zXa0%qkyBmF`|qHW@1k9D;XTY}^!hYrd^vJi zuUBL8-Urw|AlLMIHKsn_!}Cymty_-CQUv$gsu*gr=VNpcpUQ?vQI9OD^sz3a}BhYKxfuM=g1{;gIt@9 z?bCD6MRI;a%s1wuLvnsg%y;$r73O@mx50d3TXcq;BX`NccG$kQ5Zxvx7Gd5YXUQdU zV@GUX)9WLciK+)^BZIP zj*WKaqjM>AYio32TXbpxy0;&?dl=d}9332qu94g1qMmQo9A8b(H*1_d4g0GvM|a3c zJ^!m|?~)7T9@#z<`%jY_D>2_BJ7;6wBe%bX`7XJk=c6^_FPww*+4IpYay`I&gWS6a z^NEYmnIgJPZmz<7>PP4@*}DVt4RYo#%$I(OuIc${&G~Tk{Itduvh@JAZ;+jbFyDR{ zookREMRy-VcYcYkK926_`E1R2^C9L_PocdQxzCf_0_ z^n7wA?~(&@ot)ko_t)DGojCyAAP0K>IJ5s6xlK;!`Q%K!L(Y@ahvEJcIdta~w5#Wb zGyNCIHFBGrJPrHv$VGC4+#`FZV}Av5m7L9EeUEIPhxxdkU(FnEiku~v$xU)x&yQyM zbI1j9jocyIdVV$2Uw8?Qzxo}ttLHZ}^?7oY+$JYW*k9=ibV!b0h53M-*7J{<@#M%g zJ)f7!JJ(}>_6_I`xv&cJg`3c}p8v@7U()mY7?)qd`qDe-x}JZ>)R*-9JH~B2|Bi7( z&$naTB-iwOJ0{;CSM+>4CSTO^?HFe!_vfE5uIl-5jLUld8siQ*spqdT`M931#@Htp z_53m>-ytXUd@?3q((}C-hh$gJ|6=kLJ^zbwO3(LVT-Niw7$@|6F2+SYpNnxv&);I~ z>G@fVJ9>T=V_(m|V%*X5uNbHG{2#_yvVY+C@0X}|cIOM|!jb6AQRoUebu{LiOZ?yP=zl(e=ph@2LHC4#Ip&uYazd|0>_o>xUc1o$>D{RNmeTom-4< zlbyXW@9u*R_C>eJ?h?$WTy%+?-VgJZUhmZ$k54X>+vM~C*k4HYBHwSS^V1}|k?%KE z-ufc;=aTKn_n)dhPxd`*Upg3VN4_r|ZBKR%#rlwJ>GkT&`O4mf_4&xpL#qGcUE}|L zpx#--*U*_Sjki~MJMw+3a`Jf0mo7sWuNi+oRQ18vN6+Hu?=P!Nc=dQ*-AhG&zMdlP z-85bw7dLK3S8o~jRLRHh9=AFNwTP#Sy(h-=GsL;yjeDlpx^jFxYY4MfjeC~LFIahE zVWqX;gmb=f=E^hVcG=2h3(h$4^aW?Eutsks(a)Z7nt7Mz)r>XplmlbRK( ziR$V9Yg1PN^zp>GmMOPveV zzW->;iK5cTW-I-xy|3uZ>OH~ dYBT#c(&[ + let account_size = ExtensionType::try_calculate_account_len::(&[ ExtensionType::ImmutableOwner, ExtensionType::MemoTransfer, - ]); + ]) + .unwrap(); let mut account_data = vec![0; account_size]; let mut account_state = StateWithExtensionsMut::::unpack_uninitialized(&mut account_data) @@ -7466,8 +7467,10 @@ pub mod tests { bank.store_account(&token_account_pubkey, &token_account); // Add the mint - let mint_size = - ExtensionType::get_account_len::(&[ExtensionType::MintCloseAuthority]); + let mint_size = ExtensionType::try_calculate_account_len::(&[ + ExtensionType::MintCloseAuthority, + ]) + .unwrap(); let mint_base = Mint { mint_authority: COption::Some(owner), supply: 500, @@ -7931,10 +7934,11 @@ pub mod tests { delegated_amount: 30, close_authority: COption::Some(owner), }; - let account_size = ExtensionType::get_account_len::(&[ + let account_size = ExtensionType::try_calculate_account_len::(&[ ExtensionType::ImmutableOwner, ExtensionType::MemoTransfer, - ]); + ]) + .unwrap(); let mut account_data = vec![0; account_size]; let mut account_state = StateWithExtensionsMut::::unpack_uninitialized(&mut account_data) @@ -7957,8 +7961,10 @@ pub mod tests { }); bank.store_account(&token_account_pubkey, &token_account); - let mint_size = - ExtensionType::get_account_len::(&[ExtensionType::MintCloseAuthority]); + let mint_size = ExtensionType::try_calculate_account_len::(&[ + ExtensionType::MintCloseAuthority, + ]) + .unwrap(); let mint_base = Mint { mint_authority: COption::Some(owner), supply: 500, diff --git a/transaction-status/Cargo.toml b/transaction-status/Cargo.toml index bbdbc6b0bd6926..3c830f591403fe 100644 --- a/transaction-status/Cargo.toml +++ b/transaction-status/Cargo.toml @@ -13,8 +13,7 @@ edition = { workspace = true } Inflector = { workspace = true } base64 = { workspace = true } bincode = { workspace = true } -# NOTE: Use the workspace version once spl-associated-token-account uses borsh 0.10. -borsh0-9 = { package = "borsh", version = "0.9.3" } +borsh = { workspace = true } bs58 = { workspace = true } lazy_static = { workspace = true } log = { workspace = true } diff --git a/transaction-status/src/parse_associated_token.rs b/transaction-status/src/parse_associated_token.rs index f14eab32897c8d..e03fd185a6002d 100644 --- a/transaction-status/src/parse_associated_token.rs +++ b/transaction-status/src/parse_associated_token.rs @@ -2,7 +2,7 @@ use { crate::parse_instruction::{ check_num_accounts, ParsableProgram, ParseInstructionError, ParsedInstructionEnum, }, - borsh0_9::BorshDeserialize, + borsh::BorshDeserialize, serde_json::json, solana_sdk::{instruction::CompiledInstruction, message::AccountKeys, pubkey::Pubkey}, spl_associated_token_account::instruction::AssociatedTokenAccountInstruction, diff --git a/transaction-status/src/parse_token.rs b/transaction-status/src/parse_token.rs index 33a29ff1403678..ce57111c958bef 100644 --- a/transaction-status/src/parse_token.rs +++ b/transaction-status/src/parse_token.rs @@ -3,9 +3,10 @@ use { check_num_accounts, ParsableProgram, ParseInstructionError, ParsedInstructionEnum, }, extension::{ - confidential_transfer::*, cpi_guard::*, default_account_state::*, interest_bearing_mint::*, - memo_transfer::*, mint_close_authority::*, permanent_delegate::*, reallocate::*, - transfer_fee::*, + confidential_transfer::*, confidential_transfer_fee::*, cpi_guard::*, + default_account_state::*, interest_bearing_mint::*, memo_transfer::*, metadata_pointer::*, + mint_close_authority::*, permanent_delegate::*, reallocate::*, transfer_fee::*, + transfer_hook::*, }, serde_json::{json, Map, Value}, solana_account_decoder::parse_token::{token_amount_to_ui_amount, UiAccountState}, @@ -229,7 +230,10 @@ pub fn parse_token( | AuthorityType::CloseMint | AuthorityType::InterestRate | AuthorityType::PermanentDelegate - | AuthorityType::ConfidentialTransferMint => "mint", + | AuthorityType::ConfidentialTransferMint + | AuthorityType::TransferHookProgramId + | AuthorityType::ConfidentialTransferFeeConfig + | AuthorityType::MetadataPointer => "mint", AuthorityType::AccountOwner | AuthorityType::CloseAccount => "account", }; let mut value = json!({ @@ -590,6 +594,62 @@ pub fn parse_token( account_keys, ) } + TokenInstruction::TransferHookExtension => { + if instruction.data.len() < 2 { + return Err(ParseInstructionError::InstructionNotParsable( + ParsableProgram::SplToken, + )); + } + parse_transfer_hook_instruction( + &instruction.data[1..], + &instruction.accounts, + account_keys, + ) + } + TokenInstruction::ConfidentialTransferFeeExtension => { + if instruction.data.len() < 2 { + return Err(ParseInstructionError::InstructionNotParsable( + ParsableProgram::SplToken, + )); + } + parse_confidential_transfer_fee_instruction( + &instruction.data[1..], + &instruction.accounts, + account_keys, + ) + } + TokenInstruction::WithdrawExcessLamports => { + check_num_token_accounts(&instruction.accounts, 3)?; + let mut value = json!({ + "source": account_keys[instruction.accounts[0] as usize].to_string(), + "destination": account_keys[instruction.accounts[1] as usize].to_string(), + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 2, + account_keys, + &instruction.accounts, + "authority", + "multisigAuthority", + ); + Ok(ParsedInstructionEnum { + instruction_type: "withdrawExcessLamports".to_string(), + info: value, + }) + } + TokenInstruction::MetadataPointerExtension => { + if instruction.data.len() < 2 { + return Err(ParseInstructionError::InstructionNotParsable( + ParsableProgram::SplToken, + )); + } + parse_metadata_pointer_instruction( + &instruction.data[1..], + &instruction.accounts, + account_keys, + ) + } } } @@ -606,6 +666,9 @@ pub enum UiAuthorityType { InterestRate, PermanentDelegate, ConfidentialTransferMint, + TransferHookProgramId, + ConfidentialTransferFeeConfig, + MetadataPointer, } impl From for UiAuthorityType { @@ -621,6 +684,11 @@ impl From for UiAuthorityType { AuthorityType::InterestRate => UiAuthorityType::InterestRate, AuthorityType::PermanentDelegate => UiAuthorityType::PermanentDelegate, AuthorityType::ConfidentialTransferMint => UiAuthorityType::ConfidentialTransferMint, + AuthorityType::TransferHookProgramId => UiAuthorityType::TransferHookProgramId, + AuthorityType::ConfidentialTransferFeeConfig => { + UiAuthorityType::ConfidentialTransferFeeConfig + } + AuthorityType::MetadataPointer => UiAuthorityType::MetadataPointer, } } } @@ -642,6 +710,12 @@ pub enum UiExtensionType { CpiGuard, PermanentDelegate, NonTransferableAccount, + TransferHook, + TransferHookAccount, + ConfidentialTransferFeeConfig, + ConfidentialTransferFeeAmount, + MetadataPointer, + TokenMetadata, } impl From for UiExtensionType { @@ -663,6 +737,16 @@ impl From for UiExtensionType { ExtensionType::CpiGuard => UiExtensionType::CpiGuard, ExtensionType::PermanentDelegate => UiExtensionType::PermanentDelegate, ExtensionType::NonTransferableAccount => UiExtensionType::NonTransferableAccount, + ExtensionType::TransferHook => UiExtensionType::TransferHook, + ExtensionType::TransferHookAccount => UiExtensionType::TransferHookAccount, + ExtensionType::ConfidentialTransferFeeConfig => { + UiExtensionType::ConfidentialTransferFeeConfig + } + ExtensionType::ConfidentialTransferFeeAmount => { + UiExtensionType::ConfidentialTransferFeeAmount + } + ExtensionType::MetadataPointer => UiExtensionType::MetadataPointer, + ExtensionType::TokenMetadata => UiExtensionType::TokenMetadata, } } } diff --git a/transaction-status/src/parse_token/extension/confidential_transfer.rs b/transaction-status/src/parse_token/extension/confidential_transfer.rs index 44384f119318f9..138e885fb33bad 100644 --- a/transaction-status/src/parse_token/extension/confidential_transfer.rs +++ b/transaction-status/src/parse_token/extension/confidential_transfer.rs @@ -192,8 +192,8 @@ pub(in crate::parse_token) fn parse_confidential_transfer_instruction( let proof_instruction_offset: i8 = transfer_data.proof_instruction_offset; let mut value = json!({ "source": account_keys[account_indexes[0] as usize].to_string(), - "destination": account_keys[account_indexes[1] as usize].to_string(), - "mint": account_keys[account_indexes[2] as usize].to_string(), + "mint": account_keys[account_indexes[1] as usize].to_string(), + "destination": account_keys[account_indexes[2] as usize].to_string(), "instructionsSysvar": account_keys[account_indexes[3] as usize].to_string(), "newSourceDecryptableAvailableBalance": format!("{}", transfer_data.new_source_decryptable_available_balance), "proofInstructionOffset": proof_instruction_offset, @@ -322,85 +322,37 @@ pub(in crate::parse_token) fn parse_confidential_transfer_instruction( info: value, }) } - ConfidentialTransferInstruction::WithdrawWithheldTokensFromMint => { - check_num_token_accounts(account_indexes, 4)?; - let withdraw_withheld_data: WithdrawWithheldTokensFromMintData = - *decode_instruction_data(instruction_data).map_err(|_| { - ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) - })?; - let proof_instruction_offset: i8 = withdraw_withheld_data.proof_instruction_offset; - let mut value = json!({ - "mint": account_keys[account_indexes[0] as usize].to_string(), - "feeRecipient": account_keys[account_indexes[1] as usize].to_string(), - "instructionsSysvar": account_keys[account_indexes[2] as usize].to_string(), - "proofInstructionOffset": proof_instruction_offset, - - }); - let map = value.as_object_mut().unwrap(); - parse_signers( - map, - 3, - account_keys, - account_indexes, - "withdrawWithheldAuthority", - "multisigWithdrawWithheldAuthority", - ); - Ok(ParsedInstructionEnum { - instruction_type: "withdrawWithheldConfidentialTransferTokensFromMint".to_string(), - info: value, - }) - } - ConfidentialTransferInstruction::WithdrawWithheldTokensFromAccounts => { - let withdraw_withheld_data: WithdrawWithheldTokensFromAccountsData = + ConfidentialTransferInstruction::TransferWithSplitProofs => { + check_num_token_accounts(account_indexes, 7)?; + let transfer_data: TransferWithSplitProofsInstructionData = *decode_instruction_data(instruction_data).map_err(|_| { ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) })?; - let num_token_accounts = withdraw_withheld_data.num_token_accounts; - check_num_token_accounts(account_indexes, 4 + num_token_accounts as usize)?; - let proof_instruction_offset: i8 = withdraw_withheld_data.proof_instruction_offset; - let mut value = json!({ - "mint": account_keys[account_indexes[0] as usize].to_string(), - "feeRecipient": account_keys[account_indexes[1] as usize].to_string(), - "instructionsSysvar": account_keys[account_indexes[2] as usize].to_string(), - "proofInstructionOffset": proof_instruction_offset, - }); - let map = value.as_object_mut().unwrap(); - let mut source_accounts: Vec = vec![]; - let first_source_account_index = account_indexes - .len() - .saturating_sub(num_token_accounts as usize); - for i in account_indexes[first_source_account_index..].iter() { - source_accounts.push(account_keys[*i as usize].to_string()); - } - map.insert("sourceAccounts".to_string(), json!(source_accounts)); - parse_signers( - map, - 3, - account_keys, - &account_indexes[..first_source_account_index], - "withdrawWithheldAuthority", - "multisigWithdrawWithheldAuthority", - ); - Ok(ParsedInstructionEnum { - instruction_type: "withdrawWithheldConfidentialTransferTokensFromAccounts" - .to_string(), - info: value, - }) - } - ConfidentialTransferInstruction::HarvestWithheldTokensToMint => { - check_num_token_accounts(account_indexes, 1)?; let mut value = json!({ - "mint": account_keys[account_indexes[0] as usize].to_string(), - + "source": account_keys[account_indexes[0] as usize].to_string(), + "mint": account_keys[account_indexes[1] as usize].to_string(), + "destination": account_keys[account_indexes[2] as usize].to_string(), + "ciphertextCommitmentEqualityContext": account_keys[account_indexes[3] as usize].to_string(), + "batchedGroupedCiphertext2HandlesValidityContext": account_keys[account_indexes[4] as usize].to_string(), + "batchedRangeProofContext": account_keys[account_indexes[5] as usize].to_string(), + "owner": account_keys[account_indexes[6] as usize].to_string(), + "newSourceDecryptableAvailableBalance": format!("{}", transfer_data.new_source_decryptable_available_balance), + "noOpOnUninitializedSplitContextState": bool::from(transfer_data.no_op_on_uninitialized_split_context_state), + "closeSplitContextStateOnExecution": bool::from(transfer_data.close_split_context_state_on_execution), }); let map = value.as_object_mut().unwrap(); - let mut source_accounts: Vec = vec![]; - for i in account_indexes.iter().skip(1) { - source_accounts.push(account_keys[*i as usize].to_string()); + if transfer_data.close_split_context_state_on_execution.into() { + map.insert( + "lamportDestination".to_string(), + json!(account_keys[account_indexes[7] as usize].to_string()), + ); + map.insert( + "contextStateOwner".to_string(), + json!(account_keys[account_indexes[8] as usize].to_string()), + ); } - map.insert("sourceAccounts".to_string(), json!(source_accounts)); Ok(ParsedInstructionEnum { - instruction_type: "harvestWithheldConfidentialTransferTokensToMint".to_string(), + instruction_type: "confidentialTransferWithSplitProofs".to_string(), info: value, }) } diff --git a/transaction-status/src/parse_token/extension/confidential_transfer_fee.rs b/transaction-status/src/parse_token/extension/confidential_transfer_fee.rs new file mode 100644 index 00000000000000..f35fad62c095fe --- /dev/null +++ b/transaction-status/src/parse_token/extension/confidential_transfer_fee.rs @@ -0,0 +1,159 @@ +use { + super::*, + solana_account_decoder::parse_token_extension::UiConfidentialTransferFeeConfig, + spl_token_2022::{ + extension::confidential_transfer_fee::{instruction::*, ConfidentialTransferFeeConfig}, + instruction::{decode_instruction_data, decode_instruction_type}, + }, +}; + +pub(in crate::parse_token) fn parse_confidential_transfer_fee_instruction( + instruction_data: &[u8], + account_indexes: &[u8], + account_keys: &AccountKeys, +) -> Result { + match decode_instruction_type(instruction_data) + .map_err(|_| ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken))? + { + ConfidentialTransferFeeInstruction::InitializeConfidentialTransferFeeConfig => { + check_num_token_accounts(account_indexes, 1)?; + let confidential_transfer_mint: ConfidentialTransferFeeConfig = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let confidential_transfer_mint: UiConfidentialTransferFeeConfig = + confidential_transfer_mint.into(); + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + }); + let map = value.as_object_mut().unwrap(); + map.append(json!(confidential_transfer_mint).as_object_mut().unwrap()); + Ok(ParsedInstructionEnum { + instruction_type: "initializeConfidentialTransferFeeConfig".to_string(), + info: value, + }) + } + ConfidentialTransferFeeInstruction::WithdrawWithheldTokensFromMint => { + check_num_token_accounts(account_indexes, 4)?; + let withdraw_withheld_data: WithdrawWithheldTokensFromMintData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let proof_instruction_offset: i8 = withdraw_withheld_data.proof_instruction_offset; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + "feeRecipient": account_keys[account_indexes[1] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[2] as usize].to_string(), + "proofInstructionOffset": proof_instruction_offset, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 3, + account_keys, + account_indexes, + "withdrawWithheldAuthority", + "multisigWithdrawWithheldAuthority", + ); + Ok(ParsedInstructionEnum { + instruction_type: "withdrawWithheldConfidentialTransferTokensFromMint".to_string(), + info: value, + }) + } + ConfidentialTransferFeeInstruction::WithdrawWithheldTokensFromAccounts => { + let withdraw_withheld_data: WithdrawWithheldTokensFromAccountsData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let num_token_accounts = withdraw_withheld_data.num_token_accounts; + check_num_token_accounts(account_indexes, 4 + num_token_accounts as usize)?; + let proof_instruction_offset: i8 = withdraw_withheld_data.proof_instruction_offset; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + "feeRecipient": account_keys[account_indexes[1] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[2] as usize].to_string(), + "proofInstructionOffset": proof_instruction_offset, + }); + let map = value.as_object_mut().unwrap(); + let mut source_accounts: Vec = vec![]; + let first_source_account_index = account_indexes + .len() + .saturating_sub(num_token_accounts as usize); + for i in account_indexes[first_source_account_index..].iter() { + source_accounts.push(account_keys[*i as usize].to_string()); + } + map.insert("sourceAccounts".to_string(), json!(source_accounts)); + parse_signers( + map, + 3, + account_keys, + &account_indexes[..first_source_account_index], + "withdrawWithheldAuthority", + "multisigWithdrawWithheldAuthority", + ); + Ok(ParsedInstructionEnum { + instruction_type: "withdrawWithheldConfidentialTransferTokensFromAccounts" + .to_string(), + info: value, + }) + } + ConfidentialTransferFeeInstruction::HarvestWithheldTokensToMint => { + check_num_token_accounts(account_indexes, 1)?; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + + }); + let map = value.as_object_mut().unwrap(); + let mut source_accounts: Vec = vec![]; + for i in account_indexes.iter().skip(1) { + source_accounts.push(account_keys[*i as usize].to_string()); + } + map.insert("sourceAccounts".to_string(), json!(source_accounts)); + Ok(ParsedInstructionEnum { + instruction_type: "harvestWithheldConfidentialTransferTokensToMint".to_string(), + info: value, + }) + } + ConfidentialTransferFeeInstruction::EnableHarvestToMint => { + check_num_token_accounts(account_indexes, 2)?; + let mut value = json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 1, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "enableConfidentialTransferFeeHarvestToMint".to_string(), + info: value, + }) + } + ConfidentialTransferFeeInstruction::DisableHarvestToMint => { + check_num_token_accounts(account_indexes, 2)?; + let mut value = json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 1, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "disableConfidentialTransferFeeHarvestToMint".to_string(), + info: value, + }) + } + } +} diff --git a/transaction-status/src/parse_token/extension/metadata_pointer.rs b/transaction-status/src/parse_token/extension/metadata_pointer.rs new file mode 100644 index 00000000000000..e88a14732fd89b --- /dev/null +++ b/transaction-status/src/parse_token/extension/metadata_pointer.rs @@ -0,0 +1,192 @@ +use { + super::*, + spl_token_2022::{ + extension::metadata_pointer::instruction::*, + instruction::{decode_instruction_data, decode_instruction_type}, + }, +}; + +pub(in crate::parse_token) fn parse_metadata_pointer_instruction( + instruction_data: &[u8], + account_indexes: &[u8], + account_keys: &AccountKeys, +) -> Result { + match decode_instruction_type(instruction_data) + .map_err(|_| ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken))? + { + MetadataPointerInstruction::Initialize => { + check_num_token_accounts(account_indexes, 1)?; + let InitializeInstructionData { + authority, + metadata_address, + } = *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + }); + let map = value.as_object_mut().unwrap(); + if let Some(authority) = Option::::from(authority) { + map.insert("authority".to_string(), json!(authority.to_string())); + } + if let Some(metadata_address) = Option::::from(metadata_address) { + map.insert( + "metadataAddress".to_string(), + json!(metadata_address.to_string()), + ); + } + Ok(ParsedInstructionEnum { + instruction_type: "initializeMetadataPointer".to_string(), + info: value, + }) + } + MetadataPointerInstruction::Update => { + check_num_token_accounts(account_indexes, 2)?; + let UpdateInstructionData { metadata_address } = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + }); + let map = value.as_object_mut().unwrap(); + if let Some(metadata_address) = Option::::from(metadata_address) { + map.insert( + "metadataAddress".to_string(), + json!(metadata_address.to_string()), + ); + } + parse_signers( + map, + 1, + account_keys, + account_indexes, + "authority", + "multisigAuthority", + ); + Ok(ParsedInstructionEnum { + instruction_type: "updateMetadataPointer".to_string(), + info: value, + }) + } + } +} + +#[cfg(test)] +mod test { + use { + super::*, crate::parse_token::test::*, solana_sdk::pubkey::Pubkey, + spl_token_2022::solana_program::message::Message, + }; + + #[test] + fn test_parse_metadata_pointer_instruction() { + let mint_pubkey = Pubkey::new_unique(); + let authority = Pubkey::new_unique(); + let metadata_address = Pubkey::new_unique(); + + // Initialize variations + let init_ix = initialize( + &spl_token_2022::id(), + &mint_pubkey, + Some(authority), + Some(metadata_address), + ) + .unwrap(); + let message = Message::new(&[init_ix], None); + let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + assert_eq!( + parse_token( + &compiled_instruction, + &AccountKeys::new(&message.account_keys, None) + ) + .unwrap(), + ParsedInstructionEnum { + instruction_type: "initializeMetadataPointer".to_string(), + info: json!({ + "mint": mint_pubkey.to_string(), + "authority": authority.to_string(), + "metadataAddress": metadata_address.to_string(), + }) + } + ); + + let init_ix = initialize(&spl_token_2022::id(), &mint_pubkey, None, None).unwrap(); + let message = Message::new(&[init_ix], None); + let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + assert_eq!( + parse_token( + &compiled_instruction, + &AccountKeys::new(&message.account_keys, None) + ) + .unwrap(), + ParsedInstructionEnum { + instruction_type: "initializeMetadataPointer".to_string(), + info: json!({ + "mint": mint_pubkey.to_string(), + }) + } + ); + + // Single owner Update + let update_ix = update( + &spl_token_2022::id(), + &mint_pubkey, + &authority, + &[], + Some(metadata_address), + ) + .unwrap(); + let message = Message::new(&[update_ix], None); + let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + assert_eq!( + parse_token( + &compiled_instruction, + &AccountKeys::new(&message.account_keys, None) + ) + .unwrap(), + ParsedInstructionEnum { + instruction_type: "updateMetadataPointer".to_string(), + info: json!({ + "mint": mint_pubkey.to_string(), + "authority": authority.to_string(), + "metadataAddress": metadata_address.to_string(), + }) + } + ); + + // Multisig Update + let multisig_pubkey = Pubkey::new_unique(); + let multisig_signer0 = Pubkey::new_unique(); + let multisig_signer1 = Pubkey::new_unique(); + let update_ix = update( + &spl_token_2022::id(), + &mint_pubkey, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], + Some(metadata_address), + ) + .unwrap(); + let message = Message::new(&[update_ix], None); + let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + assert_eq!( + parse_token( + &compiled_instruction, + &AccountKeys::new(&message.account_keys, None) + ) + .unwrap(), + ParsedInstructionEnum { + instruction_type: "updateMetadataPointer".to_string(), + info: json!({ + "mint": mint_pubkey.to_string(), + "metadataAddress": metadata_address.to_string(), + "multisigAuthority": multisig_pubkey.to_string(), + "signers": vec![ + multisig_signer0.to_string(), + multisig_signer1.to_string(), + ], + }) + } + ); + } +} diff --git a/transaction-status/src/parse_token/extension/mod.rs b/transaction-status/src/parse_token/extension/mod.rs index 1dd2a829332d28..8e65ddfcfc691f 100644 --- a/transaction-status/src/parse_token/extension/mod.rs +++ b/transaction-status/src/parse_token/extension/mod.rs @@ -1,11 +1,14 @@ use super::*; pub(super) mod confidential_transfer; +pub(super) mod confidential_transfer_fee; pub(super) mod cpi_guard; pub(super) mod default_account_state; pub(super) mod interest_bearing_mint; pub(super) mod memo_transfer; +pub(super) mod metadata_pointer; pub(super) mod mint_close_authority; pub(super) mod permanent_delegate; pub(super) mod reallocate; pub(super) mod transfer_fee; +pub(super) mod transfer_hook; diff --git a/transaction-status/src/parse_token/extension/transfer_hook.rs b/transaction-status/src/parse_token/extension/transfer_hook.rs new file mode 100644 index 00000000000000..e6b33c058f96de --- /dev/null +++ b/transaction-status/src/parse_token/extension/transfer_hook.rs @@ -0,0 +1,186 @@ +use { + super::*, + spl_token_2022::{ + extension::transfer_hook::instruction::*, + instruction::{decode_instruction_data, decode_instruction_type}, + }, +}; + +pub(in crate::parse_token) fn parse_transfer_hook_instruction( + instruction_data: &[u8], + account_indexes: &[u8], + account_keys: &AccountKeys, +) -> Result { + match decode_instruction_type(instruction_data) + .map_err(|_| ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken))? + { + TransferHookInstruction::Initialize => { + check_num_token_accounts(account_indexes, 1)?; + let InitializeInstructionData { + authority, + program_id, + } = *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + }); + let map = value.as_object_mut().unwrap(); + if let Some(authority) = Option::::from(authority) { + map.insert("authority".to_string(), json!(authority.to_string())); + } + if let Some(program_id) = Option::::from(program_id) { + map.insert("programId".to_string(), json!(program_id.to_string())); + } + Ok(ParsedInstructionEnum { + instruction_type: "initializeTransferHook".to_string(), + info: value, + }) + } + TransferHookInstruction::Update => { + check_num_token_accounts(account_indexes, 2)?; + let UpdateInstructionData { program_id } = *decode_instruction_data(instruction_data) + .map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + }); + let map = value.as_object_mut().unwrap(); + if let Some(program_id) = Option::::from(program_id) { + map.insert("programId".to_string(), json!(program_id.to_string())); + } + parse_signers( + map, + 1, + account_keys, + account_indexes, + "authority", + "multisigAuthority", + ); + Ok(ParsedInstructionEnum { + instruction_type: "updateTransferHook".to_string(), + info: value, + }) + } + } +} + +#[cfg(test)] +mod test { + use { + super::*, crate::parse_token::test::*, solana_sdk::pubkey::Pubkey, + spl_token_2022::solana_program::message::Message, + }; + + #[test] + fn test_parse_transfer_hook_instruction() { + let mint_pubkey = Pubkey::new_unique(); + let authority = Pubkey::new_unique(); + let program_id = Pubkey::new_unique(); + + // Initialize variations + let init_ix = initialize( + &spl_token_2022::id(), + &mint_pubkey, + Some(authority), + Some(program_id), + ) + .unwrap(); + let message = Message::new(&[init_ix], None); + let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + assert_eq!( + parse_token( + &compiled_instruction, + &AccountKeys::new(&message.account_keys, None) + ) + .unwrap(), + ParsedInstructionEnum { + instruction_type: "initializeTransferHook".to_string(), + info: json!({ + "mint": mint_pubkey.to_string(), + "authority": authority.to_string(), + "programId": program_id.to_string(), + }) + } + ); + + let init_ix = initialize(&spl_token_2022::id(), &mint_pubkey, None, None).unwrap(); + let message = Message::new(&[init_ix], None); + let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + assert_eq!( + parse_token( + &compiled_instruction, + &AccountKeys::new(&message.account_keys, None) + ) + .unwrap(), + ParsedInstructionEnum { + instruction_type: "initializeTransferHook".to_string(), + info: json!({ + "mint": mint_pubkey.to_string(), + }) + } + ); + + // Single owner Update + let update_ix = update( + &spl_token_2022::id(), + &mint_pubkey, + &authority, + &[], + Some(program_id), + ) + .unwrap(); + let message = Message::new(&[update_ix], None); + let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + assert_eq!( + parse_token( + &compiled_instruction, + &AccountKeys::new(&message.account_keys, None) + ) + .unwrap(), + ParsedInstructionEnum { + instruction_type: "updateTransferHook".to_string(), + info: json!({ + "mint": mint_pubkey.to_string(), + "authority": authority.to_string(), + "programId": program_id.to_string(), + }) + } + ); + + // Multisig Update + let multisig_pubkey = Pubkey::new_unique(); + let multisig_signer0 = Pubkey::new_unique(); + let multisig_signer1 = Pubkey::new_unique(); + let update_ix = update( + &spl_token_2022::id(), + &mint_pubkey, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], + Some(program_id), + ) + .unwrap(); + let message = Message::new(&[update_ix], None); + let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + assert_eq!( + parse_token( + &compiled_instruction, + &AccountKeys::new(&message.account_keys, None) + ) + .unwrap(), + ParsedInstructionEnum { + instruction_type: "updateTransferHook".to_string(), + info: json!({ + "mint": mint_pubkey.to_string(), + "programId": program_id.to_string(), + "multisigAuthority": multisig_pubkey.to_string(), + "signers": vec![ + multisig_signer0.to_string(), + multisig_signer1.to_string(), + ], + }) + } + ); + } +} From bdc4cbba4774f77ee7ed3839861cf22aa2b55a0c Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Sat, 30 Sep 2023 01:46:54 +0800 Subject: [PATCH 215/407] ci: fix Windows build (#33467) ci: fix windows build --- .github/workflows/release-artifacts.yml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index e89cf87b1dc2cf..3e5ab89fe33c57 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -48,7 +48,16 @@ jobs: shell: bash run: | choco install openssl - export OPENSSL_DIR="C:\Program Files\OpenSSL-Win64" + if [[ -d "C:\Program Files\OpenSSL" ]]; then + echo "OPENSSL_DIR: C:\Program Files\OpenSSL" + export OPENSSL_DIR="C:\Program Files\OpenSSL" + elif [[ -d "C:\Program Files\OpenSSL-Win64" ]]; then + echo "OPENSSL_DIR: C:\Program Files\OpenSSL-Win64" + export OPENSSL_DIR="C:\Program Files\OpenSSL-Win64" + else + echo "can't determine OPENSSL_DIR" + exit 1 + fi choco install protoc export PROTOC="C:\ProgramData\chocolatey\lib\protoc\tools\bin\protoc.exe" source /tmp/env.sh From ec2e1241a1c4b27360476f40ab3ece2105423be1 Mon Sep 17 00:00:00 2001 From: carllin Date: Fri, 29 Sep 2023 15:11:25 -0700 Subject: [PATCH 216/407] Cleanup select_vote_and_reset_forks() (#33421) --- core/src/replay_stage.rs | 93 +++++++++++++++++++++++++++++++--------- 1 file changed, 73 insertions(+), 20 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index b7d7db0dcec595..37067ce38f556d 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -3440,8 +3440,17 @@ impl ReplayStage { // 3) The best "selected" bank is on a different fork, // switch_threshold succeeds let mut failure_reasons = vec![]; - let selected_fork = { - let switch_fork_decision = tower.check_switch_threshold( + struct CandidateVoteAndResetBanks<'a> { + // A bank that the validator will vote on given it passes all + // remaining vote checks + candidate_vote_bank: Option<&'a Arc>, + // A bank that the validator will reset its PoH to regardless + // of voting behavior + reset_bank: Option<&'a Arc>, + switch_fork_decision: SwitchForkDecision, + } + let candidate_vote_and_reset_banks = { + let switch_fork_decision: SwitchForkDecision = tower.check_switch_threshold( heaviest_bank.slot(), ancestors, descendants, @@ -3456,9 +3465,8 @@ impl ReplayStage { match switch_fork_decision { SwitchForkDecision::FailedSwitchThreshold(switch_proof_stake, total_stake) => { - let reset_bank = heaviest_bank_on_same_voted_fork; let final_switch_fork_decision = Self::select_forks_failed_switch_threshold( - reset_bank.map(|bank| bank.as_ref()), + heaviest_bank_on_same_voted_fork.map(|bank| bank.as_ref()), progress, tower, heaviest_bank.slot(), @@ -3467,7 +3475,22 @@ impl ReplayStage { total_stake, switch_fork_decision, ); - reset_bank.map(|b| (b, final_switch_fork_decision)) + let candidate_vote_bank = if final_switch_fork_decision.can_vote() { + // The only time we would still vote despite `!switch_fork_decision.can_vote()` + // is if we switched the vote candidate to `heaviest_bank_on_same_voted_fork` + // because we needed to refresh the vote to the tip of our last voted fork. + heaviest_bank_on_same_voted_fork + } else { + // Otherwise, we should just return the original vote candidate, the heaviest bank + // for logging purposes, namely to check if there are any additional voting failures + // besides the switch threshold + Some(heaviest_bank) + }; + CandidateVoteAndResetBanks { + candidate_vote_bank, + reset_bank: heaviest_bank_on_same_voted_fork, + switch_fork_decision: final_switch_fork_decision, + } } SwitchForkDecision::FailedSwitchDuplicateRollback(latest_duplicate_ancestor) => { // If we can't switch and our last vote was on an unconfirmed, duplicate slot, @@ -3517,13 +3540,29 @@ impl ReplayStage { 0, // In this case we never actually performed the switch check, 0 for now 0, )); - reset_bank.map(|b| (b, switch_fork_decision)) + CandidateVoteAndResetBanks { + candidate_vote_bank: None, + reset_bank, + switch_fork_decision, + } } - _ => Some((heaviest_bank, switch_fork_decision)), + _ => CandidateVoteAndResetBanks { + candidate_vote_bank: Some(heaviest_bank), + reset_bank: Some(heaviest_bank), + switch_fork_decision, + }, } }; - if let Some((bank, switch_fork_decision)) = selected_fork { + let CandidateVoteAndResetBanks { + candidate_vote_bank, + reset_bank, + switch_fork_decision, + } = candidate_vote_and_reset_banks; + + if let Some(candidate_vote_bank) = candidate_vote_bank { + // If there's a bank to potentially vote on, then make the remaining + // checks let ( is_locked_out, vote_threshold, @@ -3533,8 +3572,10 @@ impl ReplayStage { total_threshold_stake, total_epoch_stake, ) = { - let fork_stats = progress.get_fork_stats(bank.slot()).unwrap(); - let propagated_stats = &progress.get_propagated_stats(bank.slot()).unwrap(); + let fork_stats = progress.get_fork_stats(candidate_vote_bank.slot()).unwrap(); + let propagated_stats = &progress + .get_propagated_stats(candidate_vote_bank.slot()) + .unwrap(); ( fork_stats.is_locked_out, fork_stats.vote_threshold, @@ -3548,22 +3589,22 @@ impl ReplayStage { let propagation_confirmed = is_leader_slot || progress - .get_leader_propagation_slot_must_exist(bank.slot()) + .get_leader_propagation_slot_must_exist(candidate_vote_bank.slot()) .0; if is_locked_out { - failure_reasons.push(HeaviestForkFailures::LockedOut(bank.slot())); + failure_reasons.push(HeaviestForkFailures::LockedOut(candidate_vote_bank.slot())); } if let ThresholdDecision::FailedThreshold(fork_stake) = vote_threshold { failure_reasons.push(HeaviestForkFailures::FailedThreshold( - bank.slot(), + candidate_vote_bank.slot(), fork_stake, total_threshold_stake, )); } if !propagation_confirmed { failure_reasons.push(HeaviestForkFailures::NoPropagatedConfirmation( - bank.slot(), + candidate_vote_bank.slot(), propagated_stake, total_epoch_stake, )); @@ -3574,19 +3615,25 @@ impl ReplayStage { && propagation_confirmed && switch_fork_decision.can_vote() { - info!("voting: {} {}", bank.slot(), fork_weight); + info!("voting: {} {}", candidate_vote_bank.slot(), fork_weight); SelectVoteAndResetForkResult { - vote_bank: Some((bank.clone(), switch_fork_decision)), - reset_bank: Some(bank.clone()), + vote_bank: Some((candidate_vote_bank.clone(), switch_fork_decision)), + reset_bank: Some(candidate_vote_bank.clone()), heaviest_fork_failures: failure_reasons, } } else { SelectVoteAndResetForkResult { vote_bank: None, - reset_bank: Some(bank.clone()), + reset_bank: reset_bank.cloned(), heaviest_fork_failures: failure_reasons, } } + } else if reset_bank.is_some() { + SelectVoteAndResetForkResult { + vote_bank: None, + reset_bank: reset_bank.cloned(), + heaviest_fork_failures: failure_reasons, + } } else { SelectVoteAndResetForkResult { vote_bank: None, @@ -8147,7 +8194,10 @@ pub(crate) mod tests { assert_eq!(reset_fork, Some(6)); assert_eq!( failures, - vec![HeaviestForkFailures::FailedSwitchThreshold(4, 0, 30000),] + vec![ + HeaviestForkFailures::FailedSwitchThreshold(4, 0, 30000), + HeaviestForkFailures::LockedOut(4) + ] ); let (vote_fork, reset_fork, failures) = run_compute_and_select_forks( @@ -8163,7 +8213,10 @@ pub(crate) mod tests { assert_eq!(reset_fork, Some(6)); assert_eq!( failures, - vec![HeaviestForkFailures::FailedSwitchThreshold(4, 0, 30000),] + vec![ + HeaviestForkFailures::FailedSwitchThreshold(4, 0, 30000), + HeaviestForkFailures::LockedOut(4) + ] ); } From 4866789b6737ba512770a7a57b677f9f56795805 Mon Sep 17 00:00:00 2001 From: steviez Date: Sat, 30 Sep 2023 00:14:40 +0200 Subject: [PATCH 217/407] Fix off-by-one bug in Blockstore::purge_exact() (#33463) --- ledger/src/blockstore/blockstore_purge.rs | 24 +++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index 7a58d78750446e..090096d17e970a 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -354,8 +354,7 @@ impl Blockstore { ) -> Result<()> { let mut index0 = self.transaction_status_index_cf.get(0)?.unwrap_or_default(); let mut index1 = self.transaction_status_index_cf.get(1)?.unwrap_or_default(); - let to_slot = to_slot.saturating_add(1); - for slot in from_slot..to_slot { + for slot in from_slot..=to_slot { let slot_entries = self.get_any_valid_slot_entries(slot, 0); let transactions = slot_entries .into_iter() @@ -1100,6 +1099,27 @@ pub mod tests { assert_eq!(entry.0, 2); // Buffer entry, no index 1 entries remaining drop(status_entry_iterator); + // Purge up to but not including index0_max_slot + clear_and_repopulate_transaction_statuses_for_test( + &blockstore, + index0_max_slot, + index1_max_slot, + ); + blockstore + .run_purge(0, index0_max_slot - 1, PurgeType::Exact) + .unwrap(); + assert_eq!( + blockstore + .transaction_status_index_cf + .get(0) + .unwrap() + .unwrap(), + TransactionStatusIndexMeta { + max_slot: index0_max_slot, + frozen: true, + } + ); + // Test purge all clear_and_repopulate_transaction_statuses_for_test( &blockstore, From d5195921a9916a0917a1407a8e55e9ac3feaeabd Mon Sep 17 00:00:00 2001 From: Jon Cinque Date: Sat, 30 Sep 2023 00:40:48 +0200 Subject: [PATCH 218/407] transaction-status: Remove conversions between spl re-exports and local sdk (#33456) * transaction-status: Remove `convert_pubkey` Ran `git g -l convert_pubkey | xargs sed -i'' -re 's/convert_pubkey\(([^)]+)\)/\1/g'` * Remove convert_compiled_instruction Ran `git g -l convert_compiled_instruction | xargs sed -i'' -re 's/convert_compiled_instruction\(([^)]+)\)/\1/g'` * Cleanup + clippy * Remove instruction conversions in new extensions * Run clippy --fix --- .../src/parse_associated_token.rs | 116 +-- transaction-status/src/parse_token.rs | 890 +++++++----------- .../src/parse_token/extension/cpi_guard.rs | 53 +- .../extension/default_account_state.rs | 28 +- .../parse_token/extension/memo_transfer.rs | 43 +- .../parse_token/extension/metadata_pointer.rs | 29 +- .../extension/mint_close_authority.rs | 21 +- .../extension/permanent_delegate.rs | 13 +- .../src/parse_token/extension/reallocate.rs | 26 +- .../src/parse_token/extension/transfer_fee.rs | 129 ++- .../parse_token/extension/transfer_hook.rs | 29 +- 11 files changed, 528 insertions(+), 849 deletions(-) diff --git a/transaction-status/src/parse_associated_token.rs b/transaction-status/src/parse_associated_token.rs index e03fd185a6002d..868f453f1ce22b 100644 --- a/transaction-status/src/parse_associated_token.rs +++ b/transaction-status/src/parse_associated_token.rs @@ -94,48 +94,26 @@ mod test { use spl_associated_token_account::create_associated_token_account as create_associated_token_account_deprecated; use { super::*, + solana_sdk::{message::Message, sysvar}, spl_associated_token_account::{ get_associated_token_address, get_associated_token_address_with_program_id, instruction::{ create_associated_token_account, create_associated_token_account_idempotent, recover_nested, }, - solana_program::{ - instruction::CompiledInstruction as SplAssociatedTokenCompiledInstruction, - message::Message, pubkey::Pubkey as SplAssociatedTokenPubkey, sysvar, - }, }, }; - fn convert_pubkey(pubkey: Pubkey) -> SplAssociatedTokenPubkey { - SplAssociatedTokenPubkey::new_from_array(pubkey.to_bytes()) - } - - fn convert_compiled_instruction( - instruction: &SplAssociatedTokenCompiledInstruction, - ) -> CompiledInstruction { - CompiledInstruction { - program_id_index: instruction.program_id_index, - accounts: instruction.accounts.clone(), - data: instruction.data.clone(), - } - } - #[test] fn test_parse_create_deprecated() { let funder = Pubkey::new_unique(); let wallet_address = Pubkey::new_unique(); let mint = Pubkey::new_unique(); - let associated_account_address = - get_associated_token_address(&convert_pubkey(wallet_address), &convert_pubkey(mint)); + let associated_account_address = get_associated_token_address(&wallet_address, &mint); #[allow(deprecated)] - let create_ix = create_associated_token_account_deprecated( - &convert_pubkey(funder), - &convert_pubkey(wallet_address), - &convert_pubkey(mint), - ); - let message = Message::new(&[create_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let create_ix = create_associated_token_account_deprecated(&funder, &wallet_address, &mint); + let mut message = Message::new(&[create_ix], None); + let compiled_instruction = &mut message.instructions[0]; let expected_parsed_ix = ParsedInstructionEnum { instruction_type: "create".to_string(), info: json!({ @@ -149,7 +127,7 @@ mod test { }; assert_eq!( parse_associated_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -165,7 +143,7 @@ mod test { compiled_instruction.accounts.remove(rent_account_index); assert_eq!( parse_associated_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -175,7 +153,7 @@ mod test { // after popping another account, parsing should fail compiled_instruction.accounts.pop(); assert!(parse_associated_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .is_err()); @@ -187,22 +165,15 @@ mod test { let wallet_address = Pubkey::new_unique(); let mint = Pubkey::new_unique(); let token_program_id = Pubkey::new_unique(); - let associated_account_address = get_associated_token_address_with_program_id( - &convert_pubkey(wallet_address), - &convert_pubkey(mint), - &convert_pubkey(token_program_id), - ); - let create_ix = create_associated_token_account( - &convert_pubkey(funder), - &convert_pubkey(wallet_address), - &convert_pubkey(mint), - &convert_pubkey(token_program_id), - ); - let message = Message::new(&[create_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let associated_account_address = + get_associated_token_address_with_program_id(&wallet_address, &mint, &token_program_id); + let create_ix = + create_associated_token_account(&funder, &wallet_address, &mint, &token_program_id); + let mut message = Message::new(&[create_ix], None); + let compiled_instruction = &mut message.instructions[0]; assert_eq!( parse_associated_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -220,7 +191,7 @@ mod test { ); compiled_instruction.accounts.pop(); assert!(parse_associated_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .is_err()); @@ -232,22 +203,19 @@ mod test { let wallet_address = Pubkey::new_unique(); let mint = Pubkey::new_unique(); let token_program_id = Pubkey::new_unique(); - let associated_account_address = get_associated_token_address_with_program_id( - &convert_pubkey(wallet_address), - &convert_pubkey(mint), - &convert_pubkey(token_program_id), - ); + let associated_account_address = + get_associated_token_address_with_program_id(&wallet_address, &mint, &token_program_id); let create_ix = create_associated_token_account_idempotent( - &convert_pubkey(funder), - &convert_pubkey(wallet_address), - &convert_pubkey(mint), - &convert_pubkey(token_program_id), + &funder, + &wallet_address, + &mint, + &token_program_id, ); - let message = Message::new(&[create_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let mut message = Message::new(&[create_ix], None); + let compiled_instruction = &mut message.instructions[0]; assert_eq!( parse_associated_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -265,7 +233,7 @@ mod test { ); compiled_instruction.accounts.pop(); assert!(parse_associated_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .is_err()); @@ -278,31 +246,31 @@ mod test { let nested_mint = Pubkey::new_unique(); let token_program_id = Pubkey::new_unique(); let owner_associated_account_address = get_associated_token_address_with_program_id( - &convert_pubkey(wallet_address), - &convert_pubkey(owner_mint), - &convert_pubkey(token_program_id), + &wallet_address, + &owner_mint, + &token_program_id, ); let nested_associated_account_address = get_associated_token_address_with_program_id( &owner_associated_account_address, - &convert_pubkey(nested_mint), - &convert_pubkey(token_program_id), + &nested_mint, + &token_program_id, ); let destination_associated_account_address = get_associated_token_address_with_program_id( - &convert_pubkey(wallet_address), - &convert_pubkey(nested_mint), - &convert_pubkey(token_program_id), + &wallet_address, + &nested_mint, + &token_program_id, ); let recover_ix = recover_nested( - &convert_pubkey(wallet_address), - &convert_pubkey(owner_mint), - &convert_pubkey(nested_mint), - &convert_pubkey(token_program_id), + &wallet_address, + &owner_mint, + &nested_mint, + &token_program_id, ); - let message = Message::new(&[recover_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let mut message = Message::new(&[recover_ix], None); + let compiled_instruction = &mut message.instructions[0]; assert_eq!( parse_associated_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -321,7 +289,7 @@ mod test { ); compiled_instruction.accounts.pop(); assert!(parse_associated_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .is_err()); diff --git a/transaction-status/src/parse_token.rs b/transaction-status/src/parse_token.rs index ce57111c958bef..ee9a04db3a7184 100644 --- a/transaction-status/src/parse_token.rs +++ b/transaction-status/src/parse_token.rs @@ -809,32 +809,12 @@ fn map_coption_pubkey(pubkey: COption) -> Option { mod test { use { super::*, - solana_sdk::{instruction::CompiledInstruction, pubkey::Pubkey}, - spl_token_2022::{ - instruction::*, - solana_program::{ - instruction::CompiledInstruction as SplTokenCompiledInstruction, message::Message, - pubkey::Pubkey as SplTokenPubkey, - }, - }, - std::{iter::repeat_with, str::FromStr}, + solana_sdk::{message::Message, pubkey::Pubkey}, + spl_token_2022::instruction::*, + std::iter::repeat_with, }; - pub(super) fn convert_pubkey(pubkey: Pubkey) -> SplTokenPubkey { - SplTokenPubkey::from_str(&pubkey.to_string()).unwrap() - } - - pub(super) fn convert_compiled_instruction( - instruction: &SplTokenCompiledInstruction, - ) -> CompiledInstruction { - CompiledInstruction { - program_id_index: instruction.program_id_index, - accounts: instruction.accounts.clone(), - data: instruction.data.clone(), - } - } - - fn test_parse_token(program_id: &SplTokenPubkey) { + fn test_parse_token(program_id: &Pubkey) { let mint_pubkey = Pubkey::new_unique(); let mint_authority = Pubkey::new_unique(); let freeze_authority = Pubkey::new_unique(); @@ -843,17 +823,17 @@ mod test { // Test InitializeMint variations let initialize_mint_ix = initialize_mint( program_id, - &convert_pubkey(mint_pubkey), - &convert_pubkey(mint_authority), - Some(&convert_pubkey(freeze_authority)), + &mint_pubkey, + &mint_authority, + Some(&freeze_authority), 2, ) .unwrap(); let message = Message::new(&[initialize_mint_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -869,19 +849,13 @@ mod test { } ); - let initialize_mint_ix = initialize_mint( - program_id, - &convert_pubkey(mint_pubkey), - &convert_pubkey(mint_authority), - None, - 2, - ) - .unwrap(); + let initialize_mint_ix = + initialize_mint(program_id, &mint_pubkey, &mint_authority, None, 2).unwrap(); let message = Message::new(&[initialize_mint_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -899,17 +873,17 @@ mod test { // Test InitializeMint2 let initialize_mint_ix = initialize_mint2( program_id, - &convert_pubkey(mint_pubkey), - &convert_pubkey(mint_authority), - Some(&convert_pubkey(freeze_authority)), + &mint_pubkey, + &mint_authority, + Some(&freeze_authority), 2, ) .unwrap(); let message = Message::new(&[initialize_mint_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -927,18 +901,13 @@ mod test { // Test InitializeAccount let account_pubkey = Pubkey::new_unique(); let owner = Pubkey::new_unique(); - let initialize_account_ix = initialize_account( - program_id, - &convert_pubkey(account_pubkey), - &convert_pubkey(mint_pubkey), - &convert_pubkey(owner), - ) - .unwrap(); + let initialize_account_ix = + initialize_account(program_id, &account_pubkey, &mint_pubkey, &owner).unwrap(); let message = Message::new(&[initialize_account_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -954,18 +923,13 @@ mod test { ); // Test InitializeAccount2 - let initialize_account_ix = initialize_account2( - program_id, - &convert_pubkey(account_pubkey), - &convert_pubkey(mint_pubkey), - &convert_pubkey(owner), - ) - .unwrap(); + let initialize_account_ix = + initialize_account2(program_id, &account_pubkey, &mint_pubkey, &owner).unwrap(); let message = Message::new(&[initialize_account_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -981,18 +945,13 @@ mod test { ); // Test InitializeAccount3 - let initialize_account_ix = initialize_account3( - program_id, - &convert_pubkey(account_pubkey), - &convert_pubkey(mint_pubkey), - &convert_pubkey(owner), - ) - .unwrap(); + let initialize_account_ix = + initialize_account3(program_id, &account_pubkey, &mint_pubkey, &owner).unwrap(); let message = Message::new(&[initialize_account_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1013,20 +972,16 @@ mod test { let multisig_signer2 = Pubkey::new_unique(); let initialize_multisig_ix = initialize_multisig( program_id, - &convert_pubkey(multisig_pubkey), - &[ - &convert_pubkey(multisig_signer0), - &convert_pubkey(multisig_signer1), - &convert_pubkey(multisig_signer2), - ], + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1, &multisig_signer2], 2, ) .unwrap(); let message = Message::new(&[initialize_multisig_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1048,20 +1003,16 @@ mod test { // Test InitializeMultisig2 let initialize_multisig_ix = initialize_multisig2( program_id, - &convert_pubkey(multisig_pubkey), - &[ - &convert_pubkey(multisig_signer0), - &convert_pubkey(multisig_signer1), - &convert_pubkey(multisig_signer2), - ], + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1, &multisig_signer2], 2, ) .unwrap(); let message = Message::new(&[initialize_multisig_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1082,20 +1033,13 @@ mod test { // Test Transfer, incl multisig let recipient = Pubkey::new_unique(); #[allow(deprecated)] - let transfer_ix = transfer( - program_id, - &convert_pubkey(account_pubkey), - &convert_pubkey(recipient), - &convert_pubkey(owner), - &[], - 42, - ) - .unwrap(); + let transfer_ix = + transfer(program_id, &account_pubkey, &recipient, &owner, &[], 42).unwrap(); let message = Message::new(&[transfer_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1113,21 +1057,18 @@ mod test { #[allow(deprecated)] let transfer_ix = transfer( program_id, - &convert_pubkey(account_pubkey), - &convert_pubkey(recipient), - &convert_pubkey(multisig_pubkey), - &[ - &convert_pubkey(multisig_signer0), - &convert_pubkey(multisig_signer1), - ], + &account_pubkey, + &recipient, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], 42, ) .unwrap(); let message = Message::new(&[transfer_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1147,20 +1088,12 @@ mod test { ); // Test Approve, incl multisig - let approve_ix = approve( - program_id, - &convert_pubkey(account_pubkey), - &convert_pubkey(recipient), - &convert_pubkey(owner), - &[], - 42, - ) - .unwrap(); + let approve_ix = approve(program_id, &account_pubkey, &recipient, &owner, &[], 42).unwrap(); let message = Message::new(&[approve_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1177,21 +1110,18 @@ mod test { let approve_ix = approve( program_id, - &convert_pubkey(account_pubkey), - &convert_pubkey(recipient), - &convert_pubkey(multisig_pubkey), - &[ - &convert_pubkey(multisig_signer0), - &convert_pubkey(multisig_signer1), - ], + &account_pubkey, + &recipient, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], 42, ) .unwrap(); let message = Message::new(&[approve_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1211,18 +1141,12 @@ mod test { ); // Test Revoke - let revoke_ix = revoke( - program_id, - &convert_pubkey(account_pubkey), - &convert_pubkey(owner), - &[], - ) - .unwrap(); + let revoke_ix = revoke(program_id, &account_pubkey, &owner, &[]).unwrap(); let message = Message::new(&[revoke_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1239,18 +1163,18 @@ mod test { let new_freeze_authority = Pubkey::new_unique(); let set_authority_ix = set_authority( program_id, - &convert_pubkey(mint_pubkey), - Some(&convert_pubkey(new_freeze_authority)), + &mint_pubkey, + Some(&new_freeze_authority), AuthorityType::FreezeAccount, - &convert_pubkey(freeze_authority), + &freeze_authority, &[], ) .unwrap(); let message = Message::new(&[set_authority_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1267,19 +1191,19 @@ mod test { let set_authority_ix = set_authority( program_id, - &convert_pubkey(account_pubkey), + &account_pubkey, None, AuthorityType::CloseAccount, - &convert_pubkey(owner), + &owner, &[], ) .unwrap(); let message = Message::new(&[set_authority_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; let new_authority: Option = None; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1297,18 +1221,18 @@ mod test { // Test MintTo let mint_to_ix = mint_to( program_id, - &convert_pubkey(mint_pubkey), - &convert_pubkey(account_pubkey), - &convert_pubkey(mint_authority), + &mint_pubkey, + &account_pubkey, + &mint_authority, &[], 42, ) .unwrap(); let message = Message::new(&[mint_to_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1324,20 +1248,12 @@ mod test { ); // Test Burn - let burn_ix = burn( - program_id, - &convert_pubkey(account_pubkey), - &convert_pubkey(mint_pubkey), - &convert_pubkey(owner), - &[], - 42, - ) - .unwrap(); + let burn_ix = burn(program_id, &account_pubkey, &mint_pubkey, &owner, &[], 42).unwrap(); let message = Message::new(&[burn_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1353,19 +1269,13 @@ mod test { ); // Test CloseAccount - let close_account_ix = close_account( - program_id, - &convert_pubkey(account_pubkey), - &convert_pubkey(recipient), - &convert_pubkey(owner), - &[], - ) - .unwrap(); + let close_account_ix = + close_account(program_id, &account_pubkey, &recipient, &owner, &[]).unwrap(); let message = Message::new(&[close_account_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1382,17 +1292,17 @@ mod test { // Test FreezeAccount let freeze_account_ix = freeze_account( program_id, - &convert_pubkey(account_pubkey), - &convert_pubkey(mint_pubkey), - &convert_pubkey(freeze_authority), + &account_pubkey, + &mint_pubkey, + &freeze_authority, &[], ) .unwrap(); let message = Message::new(&[freeze_account_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1409,17 +1319,17 @@ mod test { // Test ThawAccount let thaw_account_ix = thaw_account( program_id, - &convert_pubkey(account_pubkey), - &convert_pubkey(mint_pubkey), - &convert_pubkey(freeze_authority), + &account_pubkey, + &mint_pubkey, + &freeze_authority, &[], ) .unwrap(); let message = Message::new(&[thaw_account_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1436,20 +1346,20 @@ mod test { // Test TransferChecked, incl multisig let transfer_ix = transfer_checked( program_id, - &convert_pubkey(account_pubkey), - &convert_pubkey(mint_pubkey), - &convert_pubkey(recipient), - &convert_pubkey(owner), + &account_pubkey, + &mint_pubkey, + &recipient, + &owner, &[], 42, 2, ) .unwrap(); let message = Message::new(&[transfer_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1472,23 +1382,20 @@ mod test { let transfer_ix = transfer_checked( program_id, - &convert_pubkey(account_pubkey), - &convert_pubkey(mint_pubkey), - &convert_pubkey(recipient), - &convert_pubkey(multisig_pubkey), - &[ - &convert_pubkey(multisig_signer0), - &convert_pubkey(multisig_signer1), - ], + &account_pubkey, + &mint_pubkey, + &recipient, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], 42, 2, ) .unwrap(); let message = Message::new(&[transfer_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1516,20 +1423,20 @@ mod test { // Test ApproveChecked, incl multisig let approve_ix = approve_checked( program_id, - &convert_pubkey(account_pubkey), - &convert_pubkey(mint_pubkey), - &convert_pubkey(recipient), - &convert_pubkey(owner), + &account_pubkey, + &mint_pubkey, + &recipient, + &owner, &[], 42, 2, ) .unwrap(); let message = Message::new(&[approve_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1552,23 +1459,20 @@ mod test { let approve_ix = approve_checked( program_id, - &convert_pubkey(account_pubkey), - &convert_pubkey(mint_pubkey), - &convert_pubkey(recipient), - &convert_pubkey(multisig_pubkey), - &[ - &convert_pubkey(multisig_signer0), - &convert_pubkey(multisig_signer1), - ], + &account_pubkey, + &mint_pubkey, + &recipient, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], 42, 2, ) .unwrap(); let message = Message::new(&[approve_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1596,19 +1500,19 @@ mod test { // Test MintToChecked let mint_to_ix = mint_to_checked( program_id, - &convert_pubkey(mint_pubkey), - &convert_pubkey(account_pubkey), - &convert_pubkey(mint_authority), + &mint_pubkey, + &account_pubkey, + &mint_authority, &[], 42, 2, ) .unwrap(); let message = Message::new(&[mint_to_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1631,19 +1535,19 @@ mod test { // Test BurnChecked let burn_ix = burn_checked( program_id, - &convert_pubkey(account_pubkey), - &convert_pubkey(mint_pubkey), - &convert_pubkey(owner), + &account_pubkey, + &mint_pubkey, + &owner, &[], 42, 2, ) .unwrap(); let message = Message::new(&[burn_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1664,12 +1568,12 @@ mod test { ); // Test SyncNative - let sync_native_ix = sync_native(program_id, &convert_pubkey(account_pubkey)).unwrap(); + let sync_native_ix = sync_native(program_id, &account_pubkey).unwrap(); let message = Message::new(&[sync_native_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1683,12 +1587,12 @@ mod test { // Test InitializeImmutableOwner let init_immutable_owner_ix = - initialize_immutable_owner(program_id, &convert_pubkey(account_pubkey)).unwrap(); + initialize_immutable_owner(program_id, &account_pubkey).unwrap(); let message = Message::new(&[init_immutable_owner_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1703,15 +1607,15 @@ mod test { // Test GetAccountDataSize let get_account_data_size_ix = get_account_data_size( program_id, - &convert_pubkey(mint_pubkey), + &mint_pubkey, &[], // This emulates the packed data of spl_token::instruction::get_account_data_size ) .unwrap(); let message = Message::new(&[get_account_data_size_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1725,15 +1629,15 @@ mod test { let get_account_data_size_ix = get_account_data_size( program_id, - &convert_pubkey(mint_pubkey), + &mint_pubkey, &[ExtensionType::ImmutableOwner, ExtensionType::MemoTransfer], ) .unwrap(); let message = Message::new(&[get_account_data_size_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1750,13 +1654,12 @@ mod test { ); // Test AmountToUiAmount - let amount_to_ui_amount_ix = - amount_to_ui_amount(program_id, &convert_pubkey(mint_pubkey), 4242).unwrap(); + let amount_to_ui_amount_ix = amount_to_ui_amount(program_id, &mint_pubkey, 4242).unwrap(); let message = Message::new(&[amount_to_ui_amount_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1771,12 +1674,12 @@ mod test { // Test UiAmountToAmount let ui_amount_to_amount_ix = - ui_amount_to_amount(program_id, &convert_pubkey(mint_pubkey), "42.42").unwrap(); + ui_amount_to_amount(program_id, &mint_pubkey, "42.42").unwrap(); let message = Message::new(&[ui_amount_to_amount_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1803,13 +1706,12 @@ mod test { #[test] fn test_create_native_mint() { let payer = Pubkey::new_unique(); - let create_native_mint_ix = - create_native_mint(&spl_token_2022::id(), &convert_pubkey(payer)).unwrap(); + let create_native_mint_ix = create_native_mint(&spl_token_2022::id(), &payer).unwrap(); let message = Message::new(&[create_native_mint_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -1824,479 +1726,349 @@ mod test { ); } - fn test_token_ix_not_enough_keys(program_id: &SplTokenPubkey) { + fn test_token_ix_not_enough_keys(program_id: &Pubkey) { let keys: Vec = repeat_with(solana_sdk::pubkey::new_rand).take(10).collect(); // Test InitializeMint variations - let initialize_mint_ix = initialize_mint( - program_id, - &convert_pubkey(keys[0]), - &convert_pubkey(keys[1]), - Some(&convert_pubkey(keys[2])), - 2, - ) - .unwrap(); - let message = Message::new(&[initialize_mint_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..1], None)).is_err()); + let initialize_mint_ix = + initialize_mint(program_id, &keys[0], &keys[1], Some(&keys[2]), 2).unwrap(); + let mut message = Message::new(&[initialize_mint_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..1], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); - let initialize_mint_ix = initialize_mint( - program_id, - &convert_pubkey(keys[0]), - &convert_pubkey(keys[1]), - None, - 2, - ) - .unwrap(); - let message = Message::new(&[initialize_mint_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..1], None)).is_err()); + let initialize_mint_ix = initialize_mint(program_id, &keys[0], &keys[1], None, 2).unwrap(); + let mut message = Message::new(&[initialize_mint_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..1], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test InitializeMint2 - let initialize_mint_ix = initialize_mint2( - program_id, - &convert_pubkey(keys[0]), - &convert_pubkey(keys[1]), - Some(&convert_pubkey(keys[2])), - 2, - ) - .unwrap(); - let message = Message::new(&[initialize_mint_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..0], None)).is_err()); + let initialize_mint_ix = + initialize_mint2(program_id, &keys[0], &keys[1], Some(&keys[2]), 2).unwrap(); + let mut message = Message::new(&[initialize_mint_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..0], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test InitializeAccount - let initialize_account_ix = initialize_account( - program_id, - &convert_pubkey(keys[0]), - &convert_pubkey(keys[1]), - &convert_pubkey(keys[2]), - ) - .unwrap(); - let message = Message::new(&[initialize_account_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..3], None)).is_err()); + let initialize_account_ix = + initialize_account(program_id, &keys[0], &keys[1], &keys[2]).unwrap(); + let mut message = Message::new(&[initialize_account_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..3], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test InitializeAccount2 - let initialize_account_ix = initialize_account2( - program_id, - &convert_pubkey(keys[0]), - &convert_pubkey(keys[1]), - &convert_pubkey(keys[3]), - ) - .unwrap(); - let message = Message::new(&[initialize_account_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); + let initialize_account_ix = + initialize_account2(program_id, &keys[0], &keys[1], &keys[3]).unwrap(); + let mut message = Message::new(&[initialize_account_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test InitializeAccount3 - let initialize_account_ix = initialize_account3( - program_id, - &convert_pubkey(keys[0]), - &convert_pubkey(keys[1]), - &convert_pubkey(keys[2]), - ) - .unwrap(); - let message = Message::new(&[initialize_account_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..1], None)).is_err()); + let initialize_account_ix = + initialize_account3(program_id, &keys[0], &keys[1], &keys[2]).unwrap(); + let mut message = Message::new(&[initialize_account_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..1], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test InitializeMultisig - let initialize_multisig_ix = initialize_multisig( - program_id, - &convert_pubkey(keys[0]), - &[ - &convert_pubkey(keys[1]), - &convert_pubkey(keys[2]), - &convert_pubkey(keys[3]), - ], - 2, - ) - .unwrap(); - let message = Message::new(&[initialize_multisig_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..4], None)).is_err()); + let initialize_multisig_ix = + initialize_multisig(program_id, &keys[0], &[&keys[1], &keys[2], &keys[3]], 2).unwrap(); + let mut message = Message::new(&[initialize_multisig_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..4], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 3].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test InitializeMultisig2 - let initialize_multisig_ix = initialize_multisig2( - program_id, - &convert_pubkey(keys[0]), - &[ - &convert_pubkey(keys[1]), - &convert_pubkey(keys[2]), - &convert_pubkey(keys[3]), - ], - 2, - ) - .unwrap(); - let message = Message::new(&[initialize_multisig_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..3], None)).is_err()); + let initialize_multisig_ix = + initialize_multisig2(program_id, &keys[0], &[&keys[1], &keys[2], &keys[3]], 2).unwrap(); + let mut message = Message::new(&[initialize_multisig_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..3], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 3].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test Transfer, incl multisig #[allow(deprecated)] - let transfer_ix = transfer( - program_id, - &convert_pubkey(keys[1]), - &convert_pubkey(keys[2]), - &convert_pubkey(keys[0]), - &[], - 42, - ) - .unwrap(); - let message = Message::new(&[transfer_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); + let transfer_ix = transfer(program_id, &keys[1], &keys[2], &keys[0], &[], 42).unwrap(); + let mut message = Message::new(&[transfer_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); #[allow(deprecated)] let transfer_ix = transfer( program_id, - &convert_pubkey(keys[2]), - &convert_pubkey(keys[3]), - &convert_pubkey(keys[4]), - &[&convert_pubkey(keys[0]), &convert_pubkey(keys[1])], + &keys[2], + &keys[3], + &keys[4], + &[&keys[0], &keys[1]], 42, ) .unwrap(); - let message = Message::new(&[transfer_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..4], None)).is_err()); + let mut message = Message::new(&[transfer_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..4], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 3].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test Approve, incl multisig - let approve_ix = approve( - program_id, - &convert_pubkey(keys[1]), - &convert_pubkey(keys[2]), - &convert_pubkey(keys[0]), - &[], - 42, - ) - .unwrap(); - let message = Message::new(&[approve_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); + let approve_ix = approve(program_id, &keys[1], &keys[2], &keys[0], &[], 42).unwrap(); + let mut message = Message::new(&[approve_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); let approve_ix = approve( program_id, - &convert_pubkey(keys[2]), - &convert_pubkey(keys[3]), - &convert_pubkey(keys[4]), - &[&convert_pubkey(keys[0]), &convert_pubkey(keys[1])], + &keys[2], + &keys[3], + &keys[4], + &[&keys[0], &keys[1]], 42, ) .unwrap(); - let message = Message::new(&[approve_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..4], None)).is_err()); + let mut message = Message::new(&[approve_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..4], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 3].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test Revoke - let revoke_ix = revoke( - program_id, - &convert_pubkey(keys[1]), - &convert_pubkey(keys[0]), - &[], - ) - .unwrap(); - let message = Message::new(&[revoke_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..1], None)).is_err()); + let revoke_ix = revoke(program_id, &keys[1], &keys[0], &[]).unwrap(); + let mut message = Message::new(&[revoke_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..1], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test SetAuthority let set_authority_ix = set_authority( program_id, - &convert_pubkey(keys[1]), - Some(&convert_pubkey(keys[2])), + &keys[1], + Some(&keys[2]), AuthorityType::FreezeAccount, - &convert_pubkey(keys[0]), + &keys[0], &[], ) .unwrap(); - let message = Message::new(&[set_authority_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..1], None)).is_err()); + let mut message = Message::new(&[set_authority_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..1], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test MintTo - let mint_to_ix = mint_to( - program_id, - &convert_pubkey(keys[1]), - &convert_pubkey(keys[2]), - &convert_pubkey(keys[0]), - &[], - 42, - ) - .unwrap(); - let message = Message::new(&[mint_to_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); + let mint_to_ix = mint_to(program_id, &keys[1], &keys[2], &keys[0], &[], 42).unwrap(); + let mut message = Message::new(&[mint_to_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test Burn - let burn_ix = burn( - program_id, - &convert_pubkey(keys[1]), - &convert_pubkey(keys[2]), - &convert_pubkey(keys[0]), - &[], - 42, - ) - .unwrap(); - let message = Message::new(&[burn_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); + let burn_ix = burn(program_id, &keys[1], &keys[2], &keys[0], &[], 42).unwrap(); + let mut message = Message::new(&[burn_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test CloseAccount - let close_account_ix = close_account( - program_id, - &convert_pubkey(keys[1]), - &convert_pubkey(keys[2]), - &convert_pubkey(keys[0]), - &[], - ) - .unwrap(); - let message = Message::new(&[close_account_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); + let close_account_ix = + close_account(program_id, &keys[1], &keys[2], &keys[0], &[]).unwrap(); + let mut message = Message::new(&[close_account_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test FreezeAccount - let freeze_account_ix = freeze_account( - program_id, - &convert_pubkey(keys[1]), - &convert_pubkey(keys[2]), - &convert_pubkey(keys[0]), - &[], - ) - .unwrap(); - let message = Message::new(&[freeze_account_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); + let freeze_account_ix = + freeze_account(program_id, &keys[1], &keys[2], &keys[0], &[]).unwrap(); + let mut message = Message::new(&[freeze_account_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test ThawAccount - let thaw_account_ix = thaw_account( - program_id, - &convert_pubkey(keys[1]), - &convert_pubkey(keys[2]), - &convert_pubkey(keys[0]), - &[], - ) - .unwrap(); - let message = Message::new(&[thaw_account_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); + let thaw_account_ix = thaw_account(program_id, &keys[1], &keys[2], &keys[0], &[]).unwrap(); + let mut message = Message::new(&[thaw_account_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test TransferChecked, incl multisig let transfer_ix = transfer_checked( program_id, - &convert_pubkey(keys[1]), - &convert_pubkey(keys[2]), - &convert_pubkey(keys[3]), - &convert_pubkey(keys[0]), + &keys[1], + &keys[2], + &keys[3], + &keys[0], &[], 42, 2, ) .unwrap(); - let message = Message::new(&[transfer_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..3], None)).is_err()); + let mut message = Message::new(&[transfer_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..3], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); let transfer_ix = transfer_checked( program_id, - &convert_pubkey(keys[2]), - &convert_pubkey(keys[3]), - &convert_pubkey(keys[4]), - &convert_pubkey(keys[5]), - &[&convert_pubkey(keys[0]), &convert_pubkey(keys[1])], + &keys[2], + &keys[3], + &keys[4], + &keys[5], + &[&keys[0], &keys[1]], 42, 2, ) .unwrap(); - let message = Message::new(&[transfer_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..5], None)).is_err()); + let mut message = Message::new(&[transfer_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..5], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 3].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test ApproveChecked, incl multisig let approve_ix = approve_checked( program_id, - &convert_pubkey(keys[1]), - &convert_pubkey(keys[2]), - &convert_pubkey(keys[3]), - &convert_pubkey(keys[0]), + &keys[1], + &keys[2], + &keys[3], + &keys[0], &[], 42, 2, ) .unwrap(); - let message = Message::new(&[approve_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..3], None)).is_err()); + let mut message = Message::new(&[approve_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..3], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); let approve_ix = approve_checked( program_id, - &convert_pubkey(keys[2]), - &convert_pubkey(keys[3]), - &convert_pubkey(keys[4]), - &convert_pubkey(keys[5]), - &[&convert_pubkey(keys[0]), &convert_pubkey(keys[1])], + &keys[2], + &keys[3], + &keys[4], + &keys[5], + &[&keys[0], &keys[1]], 42, 2, ) .unwrap(); - let message = Message::new(&[approve_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..5], None)).is_err()); + let mut message = Message::new(&[approve_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..5], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 3].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test MintToChecked - let mint_to_ix = mint_to_checked( - program_id, - &convert_pubkey(keys[1]), - &convert_pubkey(keys[2]), - &convert_pubkey(keys[0]), - &[], - 42, - 2, - ) - .unwrap(); - let message = Message::new(&[mint_to_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); + let mint_to_ix = + mint_to_checked(program_id, &keys[1], &keys[2], &keys[0], &[], 42, 2).unwrap(); + let mut message = Message::new(&[mint_to_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test BurnChecked - let burn_ix = burn_checked( - program_id, - &convert_pubkey(keys[1]), - &convert_pubkey(keys[2]), - &convert_pubkey(keys[0]), - &[], - 42, - 2, - ) - .unwrap(); - let message = Message::new(&[burn_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); + let burn_ix = burn_checked(program_id, &keys[1], &keys[2], &keys[0], &[], 42, 2).unwrap(); + let mut message = Message::new(&[burn_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys[0..2], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test SyncNative - let sync_native_ix = sync_native(program_id, &convert_pubkey(keys[0])).unwrap(); - let message = Message::new(&[sync_native_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&[], None)).is_err()); + let sync_native_ix = sync_native(program_id, &keys[0]).unwrap(); + let mut message = Message::new(&[sync_native_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&[], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test InitializeImmutableOwner - let init_immutable_owner_ix = - initialize_immutable_owner(program_id, &convert_pubkey(keys[0])).unwrap(); - let message = Message::new(&[init_immutable_owner_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&[], None)).is_err()); + let init_immutable_owner_ix = initialize_immutable_owner(program_id, &keys[0]).unwrap(); + let mut message = Message::new(&[init_immutable_owner_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&[], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test GetAccountDataSize - let get_account_data_size_ix = - get_account_data_size(program_id, &convert_pubkey(keys[0]), &[]).unwrap(); - let message = Message::new(&[get_account_data_size_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&[], None)).is_err()); + let get_account_data_size_ix = get_account_data_size(program_id, &keys[0], &[]).unwrap(); + let mut message = Message::new(&[get_account_data_size_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&[], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test AmountToUiAmount - let amount_to_ui_amount_ix = - amount_to_ui_amount(program_id, &convert_pubkey(keys[0]), 4242).unwrap(); - let message = Message::new(&[amount_to_ui_amount_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&[], None)).is_err()); + let amount_to_ui_amount_ix = amount_to_ui_amount(program_id, &keys[0], 4242).unwrap(); + let mut message = Message::new(&[amount_to_ui_amount_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&[], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); // Test UiAmountToAmount - let ui_amount_to_amount_ix = - ui_amount_to_amount(program_id, &convert_pubkey(keys[0]), "42.42").unwrap(); - let message = Message::new(&[ui_amount_to_amount_ix], None); - let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&[], None)).is_err()); + let ui_amount_to_amount_ix = ui_amount_to_amount(program_id, &keys[0], "42.42").unwrap(); + let mut message = Message::new(&[ui_amount_to_amount_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert!(parse_token(compiled_instruction, &AccountKeys::new(&[], None)).is_err()); compiled_instruction.accounts = compiled_instruction.accounts[0..compiled_instruction.accounts.len() - 1].to_vec(); - assert!(parse_token(&compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); + assert!(parse_token(compiled_instruction, &AccountKeys::new(&keys, None)).is_err()); } #[test] diff --git a/transaction-status/src/parse_token/extension/cpi_guard.rs b/transaction-status/src/parse_token/extension/cpi_guard.rs index 5a7cba0aa48060..a6a8f26dfb47aa 100644 --- a/transaction-status/src/parse_token/extension/cpi_guard.rs +++ b/transaction-status/src/parse_token/extension/cpi_guard.rs @@ -40,7 +40,6 @@ pub(in crate::parse_token) fn parse_cpi_guard_instruction( mod test { use { super::*, - crate::parse_token::test::*, solana_sdk::pubkey::Pubkey, spl_token_2022::{ extension::cpi_guard::instruction::{disable_cpi_guard, enable_cpi_guard}, @@ -54,18 +53,13 @@ mod test { // Enable, single owner let owner_pubkey = Pubkey::new_unique(); - let enable_cpi_guard_ix = enable_cpi_guard( - &spl_token_2022::id(), - &convert_pubkey(account_pubkey), - &convert_pubkey(owner_pubkey), - &[], - ) - .unwrap(); + let enable_cpi_guard_ix = + enable_cpi_guard(&spl_token_2022::id(), &account_pubkey, &owner_pubkey, &[]).unwrap(); let message = Message::new(&[enable_cpi_guard_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -84,19 +78,16 @@ mod test { let multisig_signer1 = Pubkey::new_unique(); let enable_cpi_guard_ix = enable_cpi_guard( &spl_token_2022::id(), - &convert_pubkey(account_pubkey), - &convert_pubkey(multisig_pubkey), - &[ - &convert_pubkey(multisig_signer0), - &convert_pubkey(multisig_signer1), - ], + &account_pubkey, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], ) .unwrap(); let message = Message::new(&[enable_cpi_guard_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -114,18 +105,13 @@ mod test { ); // Disable, single owner - let enable_cpi_guard_ix = disable_cpi_guard( - &spl_token_2022::id(), - &convert_pubkey(account_pubkey), - &convert_pubkey(owner_pubkey), - &[], - ) - .unwrap(); + let enable_cpi_guard_ix = + disable_cpi_guard(&spl_token_2022::id(), &account_pubkey, &owner_pubkey, &[]).unwrap(); let message = Message::new(&[enable_cpi_guard_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -144,19 +130,16 @@ mod test { let multisig_signer1 = Pubkey::new_unique(); let enable_cpi_guard_ix = disable_cpi_guard( &spl_token_2022::id(), - &convert_pubkey(account_pubkey), - &convert_pubkey(multisig_pubkey), - &[ - &convert_pubkey(multisig_signer0), - &convert_pubkey(multisig_signer1), - ], + &account_pubkey, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], ) .unwrap(); let message = Message::new(&[enable_cpi_guard_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), diff --git a/transaction-status/src/parse_token/extension/default_account_state.rs b/transaction-status/src/parse_token/extension/default_account_state.rs index d0063e61701700..aa7a3da01bb2b2 100644 --- a/transaction-status/src/parse_token/extension/default_account_state.rs +++ b/transaction-status/src/parse_token/extension/default_account_state.rs @@ -53,7 +53,6 @@ pub(in crate::parse_token) fn parse_default_account_state_instruction( mod test { use { super::*, - crate::parse_token::test::*, solana_sdk::pubkey::Pubkey, spl_token_2022::{ extension::default_account_state::instruction::{ @@ -69,15 +68,15 @@ mod test { let mint_pubkey = Pubkey::new_unique(); let init_default_account_state_ix = initialize_default_account_state( &spl_token_2022::id(), - &convert_pubkey(mint_pubkey), + &mint_pubkey, &AccountState::Frozen, ) .unwrap(); let message = Message::new(&[init_default_account_state_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -94,17 +93,17 @@ mod test { let mint_freeze_authority = Pubkey::new_unique(); let update_default_account_state_ix = update_default_account_state( &spl_token_2022::id(), - &convert_pubkey(mint_pubkey), - &convert_pubkey(mint_freeze_authority), + &mint_pubkey, + &mint_freeze_authority, &[], &AccountState::Initialized, ) .unwrap(); let message = Message::new(&[update_default_account_state_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -124,20 +123,17 @@ mod test { let multisig_signer1 = Pubkey::new_unique(); let update_default_account_state_ix = update_default_account_state( &spl_token_2022::id(), - &convert_pubkey(mint_pubkey), - &convert_pubkey(multisig_pubkey), - &[ - &convert_pubkey(multisig_signer0), - &convert_pubkey(multisig_signer1), - ], + &mint_pubkey, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], &AccountState::Initialized, ) .unwrap(); let message = Message::new(&[update_default_account_state_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), diff --git a/transaction-status/src/parse_token/extension/memo_transfer.rs b/transaction-status/src/parse_token/extension/memo_transfer.rs index 78fcd5b48e1a64..6106e2fab64abe 100644 --- a/transaction-status/src/parse_token/extension/memo_transfer.rs +++ b/transaction-status/src/parse_token/extension/memo_transfer.rs @@ -40,7 +40,6 @@ pub(in crate::parse_token) fn parse_memo_transfer_instruction( mod test { use { super::*, - crate::parse_token::test::*, solana_sdk::pubkey::Pubkey, spl_token_2022::{ extension::memo_transfer::instruction::{ @@ -58,16 +57,16 @@ mod test { let owner_pubkey = Pubkey::new_unique(); let enable_memo_transfers_ix = enable_required_transfer_memos( &spl_token_2022::id(), - &convert_pubkey(account_pubkey), - &convert_pubkey(owner_pubkey), + &account_pubkey, + &owner_pubkey, &[], ) .unwrap(); let message = Message::new(&[enable_memo_transfers_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -86,19 +85,16 @@ mod test { let multisig_signer1 = Pubkey::new_unique(); let enable_memo_transfers_ix = enable_required_transfer_memos( &spl_token_2022::id(), - &convert_pubkey(account_pubkey), - &convert_pubkey(multisig_pubkey), - &[ - &convert_pubkey(multisig_signer0), - &convert_pubkey(multisig_signer1), - ], + &account_pubkey, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], ) .unwrap(); let message = Message::new(&[enable_memo_transfers_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -118,16 +114,16 @@ mod test { // Disable, single owner let enable_memo_transfers_ix = disable_required_transfer_memos( &spl_token_2022::id(), - &convert_pubkey(account_pubkey), - &convert_pubkey(owner_pubkey), + &account_pubkey, + &owner_pubkey, &[], ) .unwrap(); let message = Message::new(&[enable_memo_transfers_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -146,19 +142,16 @@ mod test { let multisig_signer1 = Pubkey::new_unique(); let enable_memo_transfers_ix = disable_required_transfer_memos( &spl_token_2022::id(), - &convert_pubkey(account_pubkey), - &convert_pubkey(multisig_pubkey), - &[ - &convert_pubkey(multisig_signer0), - &convert_pubkey(multisig_signer1), - ], + &account_pubkey, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], ) .unwrap(); let message = Message::new(&[enable_memo_transfers_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), diff --git a/transaction-status/src/parse_token/extension/metadata_pointer.rs b/transaction-status/src/parse_token/extension/metadata_pointer.rs index e88a14732fd89b..657078ca80af53 100644 --- a/transaction-status/src/parse_token/extension/metadata_pointer.rs +++ b/transaction-status/src/parse_token/extension/metadata_pointer.rs @@ -74,10 +74,7 @@ pub(in crate::parse_token) fn parse_metadata_pointer_instruction( #[cfg(test)] mod test { - use { - super::*, crate::parse_token::test::*, solana_sdk::pubkey::Pubkey, - spl_token_2022::solana_program::message::Message, - }; + use {super::*, solana_sdk::pubkey::Pubkey, spl_token_2022::solana_program::message::Message}; #[test] fn test_parse_metadata_pointer_instruction() { @@ -93,11 +90,11 @@ mod test { Some(metadata_address), ) .unwrap(); - let message = Message::new(&[init_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let mut message = Message::new(&[init_ix], None); + let compiled_instruction = &mut message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -112,11 +109,11 @@ mod test { ); let init_ix = initialize(&spl_token_2022::id(), &mint_pubkey, None, None).unwrap(); - let message = Message::new(&[init_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let mut message = Message::new(&[init_ix], None); + let compiled_instruction = &mut message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -137,11 +134,11 @@ mod test { Some(metadata_address), ) .unwrap(); - let message = Message::new(&[update_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let mut message = Message::new(&[update_ix], None); + let compiled_instruction = &mut message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -167,11 +164,11 @@ mod test { Some(metadata_address), ) .unwrap(); - let message = Message::new(&[update_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let mut message = Message::new(&[update_ix], None); + let compiled_instruction = &mut message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), diff --git a/transaction-status/src/parse_token/extension/mint_close_authority.rs b/transaction-status/src/parse_token/extension/mint_close_authority.rs index 71b6f737e9ebea..6108796122ada9 100644 --- a/transaction-status/src/parse_token/extension/mint_close_authority.rs +++ b/transaction-status/src/parse_token/extension/mint_close_authority.rs @@ -22,7 +22,6 @@ pub(in crate::parse_token) fn parse_initialize_mint_close_authority_instruction( mod test { use { super::*, - crate::parse_token::test::*, serde_json::Value, solana_sdk::pubkey::Pubkey, spl_token_2022::{instruction::*, solana_program::message::Message}, @@ -34,15 +33,15 @@ mod test { let close_authority = Pubkey::new_unique(); let mint_close_authority_ix = initialize_mint_close_authority( &spl_token_2022::id(), - &convert_pubkey(mint_pubkey), - Some(&convert_pubkey(close_authority)), + &mint_pubkey, + Some(&close_authority), ) .unwrap(); let message = Message::new(&[mint_close_authority_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -55,17 +54,13 @@ mod test { } ); - let mint_close_authority_ix = initialize_mint_close_authority( - &spl_token_2022::id(), - &convert_pubkey(mint_pubkey), - None, - ) - .unwrap(); + let mint_close_authority_ix = + initialize_mint_close_authority(&spl_token_2022::id(), &mint_pubkey, None).unwrap(); let message = Message::new(&[mint_close_authority_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), diff --git a/transaction-status/src/parse_token/extension/permanent_delegate.rs b/transaction-status/src/parse_token/extension/permanent_delegate.rs index 11af94b1a25db1..262d5ec04afd81 100644 --- a/transaction-status/src/parse_token/extension/permanent_delegate.rs +++ b/transaction-status/src/parse_token/extension/permanent_delegate.rs @@ -19,7 +19,6 @@ pub(in crate::parse_token) fn parse_initialize_permanent_delegate_instruction( mod test { use { super::*, - crate::parse_token::test::*, solana_sdk::pubkey::Pubkey, spl_token_2022::{instruction::*, solana_program::message::Message}, }; @@ -28,17 +27,13 @@ mod test { fn test_parse_initialize_permanent_delegate_instruction() { let mint_pubkey = Pubkey::new_unique(); let delegate = Pubkey::new_unique(); - let permanent_delegate_ix = initialize_permanent_delegate( - &spl_token_2022::id(), - &convert_pubkey(mint_pubkey), - &convert_pubkey(delegate), - ) - .unwrap(); + let permanent_delegate_ix = + initialize_permanent_delegate(&spl_token_2022::id(), &mint_pubkey, &delegate).unwrap(); let message = Message::new(&[permanent_delegate_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), diff --git a/transaction-status/src/parse_token/extension/reallocate.rs b/transaction-status/src/parse_token/extension/reallocate.rs index 2c43f68166d0f6..623f8e1b5c81eb 100644 --- a/transaction-status/src/parse_token/extension/reallocate.rs +++ b/transaction-status/src/parse_token/extension/reallocate.rs @@ -31,7 +31,6 @@ pub(in crate::parse_token) fn parse_reallocate_instruction( mod test { use { super::*, - crate::parse_token::test::*, solana_sdk::pubkey::Pubkey, spl_token_2022::{instruction::reallocate, solana_program::message::Message}, }; @@ -50,18 +49,18 @@ mod test { let owner_pubkey = Pubkey::new_unique(); let reallocate_ix = reallocate( &spl_token_2022::id(), - &convert_pubkey(account_pubkey), - &convert_pubkey(payer_pubkey), - &convert_pubkey(owner_pubkey), + &account_pubkey, + &payer_pubkey, + &owner_pubkey, &[], &extension_types, ) .unwrap(); let message = Message::new(&[reallocate_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -83,21 +82,18 @@ mod test { let multisig_signer1 = Pubkey::new_unique(); let reallocate_ix = reallocate( &spl_token_2022::id(), - &convert_pubkey(account_pubkey), - &convert_pubkey(payer_pubkey), - &convert_pubkey(multisig_pubkey), - &[ - &convert_pubkey(multisig_signer0), - &convert_pubkey(multisig_signer1), - ], + &account_pubkey, + &payer_pubkey, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], &extension_types, ) .unwrap(); let message = Message::new(&[reallocate_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), diff --git a/transaction-status/src/parse_token/extension/transfer_fee.rs b/transaction-status/src/parse_token/extension/transfer_fee.rs index 9cdce193c302f4..468e15c5463efe 100644 --- a/transaction-status/src/parse_token/extension/transfer_fee.rs +++ b/transaction-status/src/parse_token/extension/transfer_fee.rs @@ -158,7 +158,6 @@ pub(in crate::parse_token) fn parse_transfer_fee_instruction( mod test { use { super::*, - crate::parse_token::test::*, solana_sdk::pubkey::Pubkey, spl_token_2022::{ extension::transfer_fee::instruction::*, solana_program::message::Message, @@ -176,18 +175,18 @@ mod test { // InitializeTransferFeeConfig variations let init_transfer_fee_config_ix = initialize_transfer_fee_config( &spl_token_2022::id(), - &convert_pubkey(mint_pubkey), - Some(&convert_pubkey(transfer_fee_config_authority)), - Some(&convert_pubkey(withdraw_withheld_authority)), + &mint_pubkey, + Some(&transfer_fee_config_authority), + Some(&withdraw_withheld_authority), transfer_fee_basis_points, maximum_fee, ) .unwrap(); let message = Message::new(&[init_transfer_fee_config_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -205,7 +204,7 @@ mod test { let init_transfer_fee_config_ix = initialize_transfer_fee_config( &spl_token_2022::id(), - &convert_pubkey(mint_pubkey), + &mint_pubkey, None, None, transfer_fee_basis_points, @@ -213,10 +212,10 @@ mod test { ) .unwrap(); let message = Message::new(&[init_transfer_fee_config_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -239,10 +238,10 @@ mod test { let fee = 5; let transfer_checked_with_fee_ix = transfer_checked_with_fee( &spl_token_2022::id(), - &convert_pubkey(account_pubkey), - &convert_pubkey(mint_pubkey), - &convert_pubkey(recipient), - &convert_pubkey(owner), + &account_pubkey, + &mint_pubkey, + &recipient, + &owner, &[], amount, decimals, @@ -250,10 +249,10 @@ mod test { ) .unwrap(); let message = Message::new(&[transfer_checked_with_fee_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -286,24 +285,21 @@ mod test { let multisig_signer1 = Pubkey::new_unique(); let transfer_checked_with_fee_ix = transfer_checked_with_fee( &spl_token_2022::id(), - &convert_pubkey(account_pubkey), - &convert_pubkey(mint_pubkey), - &convert_pubkey(recipient), - &convert_pubkey(multisig_pubkey), - &[ - &convert_pubkey(multisig_signer0), - &convert_pubkey(multisig_signer1), - ], + &account_pubkey, + &mint_pubkey, + &recipient, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], amount, decimals, fee, ) .unwrap(); let message = Message::new(&[transfer_checked_with_fee_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -337,17 +333,17 @@ mod test { // Single authority WithdrawWithheldTokensFromMint let withdraw_withheld_tokens_from_mint_ix = withdraw_withheld_tokens_from_mint( &spl_token_2022::id(), - &convert_pubkey(mint_pubkey), - &convert_pubkey(recipient), - &convert_pubkey(withdraw_withheld_authority), + &mint_pubkey, + &recipient, + &withdraw_withheld_authority, &[], ) .unwrap(); let message = Message::new(&[withdraw_withheld_tokens_from_mint_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -364,20 +360,17 @@ mod test { // Multisig WithdrawWithheldTokensFromMint let withdraw_withheld_tokens_from_mint_ix = withdraw_withheld_tokens_from_mint( &spl_token_2022::id(), - &convert_pubkey(mint_pubkey), - &convert_pubkey(recipient), - &convert_pubkey(multisig_pubkey), - &[ - &convert_pubkey(multisig_signer0), - &convert_pubkey(multisig_signer1), - ], + &mint_pubkey, + &recipient, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], ) .unwrap(); let message = Message::new(&[withdraw_withheld_tokens_from_mint_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -400,18 +393,18 @@ mod test { let fee_account1 = Pubkey::new_unique(); let withdraw_withheld_tokens_from_accounts_ix = withdraw_withheld_tokens_from_accounts( &spl_token_2022::id(), - &convert_pubkey(mint_pubkey), - &convert_pubkey(recipient), - &convert_pubkey(withdraw_withheld_authority), + &mint_pubkey, + &recipient, + &withdraw_withheld_authority, &[], - &[&convert_pubkey(fee_account0), &convert_pubkey(fee_account1)], + &[&fee_account0, &fee_account1], ) .unwrap(); let message = Message::new(&[withdraw_withheld_tokens_from_accounts_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -432,21 +425,18 @@ mod test { // Multisig WithdrawWithheldTokensFromAccounts let withdraw_withheld_tokens_from_accounts_ix = withdraw_withheld_tokens_from_accounts( &spl_token_2022::id(), - &convert_pubkey(mint_pubkey), - &convert_pubkey(recipient), - &convert_pubkey(multisig_pubkey), - &[ - &convert_pubkey(multisig_signer0), - &convert_pubkey(multisig_signer1), - ], - &[&convert_pubkey(fee_account0), &convert_pubkey(fee_account1)], + &mint_pubkey, + &recipient, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], + &[&fee_account0, &fee_account1], ) .unwrap(); let message = Message::new(&[withdraw_withheld_tokens_from_accounts_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -471,15 +461,15 @@ mod test { // HarvestWithheldTokensToMint let harvest_withheld_tokens_to_mint_ix = harvest_withheld_tokens_to_mint( &spl_token_2022::id(), - &convert_pubkey(mint_pubkey), - &[&convert_pubkey(fee_account0), &convert_pubkey(fee_account1)], + &mint_pubkey, + &[&fee_account0, &fee_account1], ) .unwrap(); let message = Message::new(&[harvest_withheld_tokens_to_mint_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -498,18 +488,18 @@ mod test { // Single authority SetTransferFee let set_transfer_fee_ix = set_transfer_fee( &spl_token_2022::id(), - &convert_pubkey(mint_pubkey), - &convert_pubkey(transfer_fee_config_authority), + &mint_pubkey, + &transfer_fee_config_authority, &[], transfer_fee_basis_points, maximum_fee, ) .unwrap(); let message = Message::new(&[set_transfer_fee_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -527,21 +517,18 @@ mod test { // Multisig WithdrawWithheldTokensFromMint let set_transfer_fee_ix = set_transfer_fee( &spl_token_2022::id(), - &convert_pubkey(mint_pubkey), - &convert_pubkey(multisig_pubkey), - &[ - &convert_pubkey(multisig_signer0), - &convert_pubkey(multisig_signer1), - ], + &mint_pubkey, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], transfer_fee_basis_points, maximum_fee, ) .unwrap(); let message = Message::new(&[set_transfer_fee_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let compiled_instruction = &message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), diff --git a/transaction-status/src/parse_token/extension/transfer_hook.rs b/transaction-status/src/parse_token/extension/transfer_hook.rs index e6b33c058f96de..68e3bab2b78cea 100644 --- a/transaction-status/src/parse_token/extension/transfer_hook.rs +++ b/transaction-status/src/parse_token/extension/transfer_hook.rs @@ -68,10 +68,7 @@ pub(in crate::parse_token) fn parse_transfer_hook_instruction( #[cfg(test)] mod test { - use { - super::*, crate::parse_token::test::*, solana_sdk::pubkey::Pubkey, - spl_token_2022::solana_program::message::Message, - }; + use {super::*, solana_sdk::pubkey::Pubkey, spl_token_2022::solana_program::message::Message}; #[test] fn test_parse_transfer_hook_instruction() { @@ -87,11 +84,11 @@ mod test { Some(program_id), ) .unwrap(); - let message = Message::new(&[init_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let mut message = Message::new(&[init_ix], None); + let compiled_instruction = &mut message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -106,11 +103,11 @@ mod test { ); let init_ix = initialize(&spl_token_2022::id(), &mint_pubkey, None, None).unwrap(); - let message = Message::new(&[init_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let mut message = Message::new(&[init_ix], None); + let compiled_instruction = &mut message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -131,11 +128,11 @@ mod test { Some(program_id), ) .unwrap(); - let message = Message::new(&[update_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let mut message = Message::new(&[update_ix], None); + let compiled_instruction = &mut message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), @@ -161,11 +158,11 @@ mod test { Some(program_id), ) .unwrap(); - let message = Message::new(&[update_ix], None); - let compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let mut message = Message::new(&[update_ix], None); + let compiled_instruction = &mut message.instructions[0]; assert_eq!( parse_token( - &compiled_instruction, + compiled_instruction, &AccountKeys::new(&message.account_keys, None) ) .unwrap(), From bb19ebed491de314a387b1590cc70d25b4661f03 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Fri, 29 Sep 2023 21:12:50 -0500 Subject: [PATCH 219/407] Convert tuple into dedup result struct (#33450) convert tupe into dedup result struct Co-authored-by: HaoranYi --- accounts-db/src/accounts_hash.rs | 69 +++++++++++++++----------------- 1 file changed, 33 insertions(+), 36 deletions(-) diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index f5c0a78ffa2380..66a77c81883300 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -781,47 +781,44 @@ impl<'a> AccountsHasher<'a> { // b. lamports let _guard = self.active_stats.activate(ActiveStatItem::HashDeDup); + #[derive(Default)] + struct DedupResult { + hashes_files: Vec, + hashes_count: usize, + lamports_sum: u64, + } + let mut zeros = Measure::start("eliminate zeros"); - let (hashes, hash_total, lamports_total) = (0..max_bin) + let DedupResult { + hashes_files: hashes, + hashes_count: hash_total, + lamports_sum: lamports_total, + } = (0..max_bin) .into_par_iter() - .fold( - || { - ( - /*hashes files*/ Vec::with_capacity(max_bin), - /*hashes count*/ 0_usize, - /*lamports sum*/ 0_u64, - ) - }, - |mut accum, bin| { - let (hashes_file, lamports_bin) = self.de_dup_accounts_in_parallel( - sorted_data_by_pubkey, - bin, - max_bin, - stats, - ); - accum.2 = accum - .2 - .checked_add(lamports_bin) - .expect("summing capitalization cannot overflow"); - accum.1 += hashes_file.count(); - accum.0.push(hashes_file); - accum - }, - ) + .fold(DedupResult::default, |mut accum, bin| { + let (hashes_file, lamports_bin) = + self.de_dup_accounts_in_parallel(sorted_data_by_pubkey, bin, max_bin, stats); + + accum.lamports_sum = accum + .lamports_sum + .checked_add(lamports_bin) + .expect("summing capitalization cannot overflow"); + accum.hashes_count += hashes_file.count(); + accum.hashes_files.push(hashes_file); + accum + }) .reduce( - || { - ( - /*hashes files*/ Vec::with_capacity(max_bin), - /*hashes count*/ 0, - /*lamports sum*/ 0, - ) + || DedupResult { + hashes_files: Vec::with_capacity(max_bin), + ..Default::default() }, |mut a, mut b| { - a.2 = - a.2.checked_add(b.2) - .expect("summing capitalization cannot overflow"); - a.1 += b.1; - a.0.append(&mut b.0); + a.lamports_sum = a + .lamports_sum + .checked_add(b.lamports_sum) + .expect("summing capitalization cannot overflow"); + a.hashes_count += b.hashes_count; + a.hashes_files.append(&mut b.hashes_files); a }, ); From 9f6ef2fe629d59d93d227d4561d8f7d5a2fd5f2f Mon Sep 17 00:00:00 2001 From: ch9xy <67190054+ch9xy@users.noreply.github.com> Date: Sat, 30 Sep 2023 09:05:05 +0300 Subject: [PATCH 220/407] Typo fix in instruction.rs (#33473) Typo fix --- sdk/program/src/instruction.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/program/src/instruction.rs b/sdk/program/src/instruction.rs index 0aef0008667a5e..e68fc198a36642 100644 --- a/sdk/program/src/instruction.rs +++ b/sdk/program/src/instruction.rs @@ -152,7 +152,7 @@ pub enum InstructionError { ExecutableDataModified, /// Executable account's lamports modified - #[error("instruction changed the balance of a executable account")] + #[error("instruction changed the balance of an executable account")] ExecutableLamportChange, /// Executable accounts must be rent exempt From 3508b7d84ee3d05107c908ff2bb4772f3524e22e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 10:47:51 +0000 Subject: [PATCH 221/407] build(deps): bump regex from 1.9.5 to 1.9.6 (#33481) * build(deps): bump regex from 1.9.5 to 1.9.6 Bumps [regex](https://github.com/rust-lang/regex) from 1.9.5 to 1.9.6. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.9.5...1.9.6) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 12 ++++++------ Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3737cdc37424a0..c892fd686a6757 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -709,7 +709,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b" dependencies = [ "borsh-derive 0.10.3", - "hashbrown 0.12.3", + "hashbrown 0.13.2", ] [[package]] @@ -4374,13 +4374,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.5" +version = "1.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" +checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff" dependencies = [ "aho-corasick 1.0.1", "memchr", - "regex-automata 0.3.8", + "regex-automata 0.3.9", "regex-syntax 0.7.5", ] @@ -4392,9 +4392,9 @@ checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" [[package]] name = "regex-automata" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" +checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9" dependencies = [ "aho-corasick 1.0.1", "memchr", diff --git a/Cargo.toml b/Cargo.toml index c0f31b9e377840..a93092f2e892c0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -275,7 +275,7 @@ raptorq = "1.7.0" rayon = "1.7.0" rcgen = "0.10.0" reed-solomon-erasure = "6.0.0" -regex = "1.9.5" +regex = "1.9.6" rolling-file = "0.2.0" reqwest = { version = "0.11.20", default-features = false } rpassword = "7.2" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index babcf314e76e19..3a113d5336a333 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3816,9 +3816,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.5" +version = "1.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" +checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff" dependencies = [ "aho-corasick 1.0.1", "memchr", @@ -3828,9 +3828,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" +checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9" dependencies = [ "aho-corasick 1.0.1", "memchr", From 660e41a8e182faadec2861db74b86b8dfe1679f5 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 2 Oct 2023 09:03:12 -0700 Subject: [PATCH 222/407] Remove entry shuffling (#33378) --- core/benches/banking_stage.rs | 17 ++----- ledger/src/blockstore_processor.rs | 72 ++++++++---------------------- rpc/src/rpc.rs | 1 - 3 files changed, 22 insertions(+), 68 deletions(-) diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 6219f4abb9a265..2526c2a6369c5a 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -390,7 +390,6 @@ fn bench_banking_stage_multi_programs_with_voting(bencher: &mut Bencher) { } fn simulate_process_entries( - randomize_txs: bool, mint_keypair: &Keypair, mut tx_vector: Vec, genesis_config: &GenesisConfig, @@ -423,10 +422,11 @@ fn simulate_process_entries( hash: next_hash(&bank.last_blockhash(), 1, &tx_vector), transactions: tx_vector, }; - process_entries_for_tests(&bank, vec![entry], randomize_txs, None, None).unwrap(); + process_entries_for_tests(&bank, vec![entry], None, None).unwrap(); } -fn bench_process_entries(randomize_txs: bool, bencher: &mut Bencher) { +#[bench] +fn bench_process_entries(bencher: &mut Bencher) { // entropy multiplier should be big enough to provide sufficient entropy // but small enough to not take too much time while executing the test. let entropy_multiplier: usize = 25; @@ -446,7 +446,6 @@ fn bench_process_entries(randomize_txs: bool, bencher: &mut Bencher) { bencher.iter(|| { simulate_process_entries( - randomize_txs, &mint_keypair, tx_vector.clone(), &genesis_config, @@ -456,13 +455,3 @@ fn bench_process_entries(randomize_txs: bool, bencher: &mut Bencher) { ); }); } - -#[bench] -fn bench_process_entries_without_order_shuffeling(bencher: &mut Bencher) { - bench_process_entries(false, bencher); -} - -#[bench] -fn bench_process_entries_with_order_shuffeling(bencher: &mut Bencher) { - bench_process_entries(true, bencher); -} diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 488ada14666c55..f5a8836087d3d4 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -13,7 +13,6 @@ use { crossbeam_channel::Sender, itertools::Itertools, log::*, - rand::{seq::SliceRandom, thread_rng}, rayon::{prelude::*, ThreadPool}, scopeguard::defer, solana_accounts_db::{ @@ -421,7 +420,6 @@ fn execute_batches( pub fn process_entries_for_tests( bank: &Arc, entries: Vec, - randomize: bool, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, ) -> Result<()> { @@ -453,7 +451,6 @@ pub fn process_entries_for_tests( let result = process_entries( bank, &mut replay_entries, - randomize, transaction_status_sender, replay_vote_sender, &mut batch_timing, @@ -465,11 +462,9 @@ pub fn process_entries_for_tests( result } -// Note: If randomize is true this will shuffle entries' transactions in-place. fn process_entries( bank: &Arc, entries: &mut [ReplayEntry], - randomize: bool, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, batch_timing: &mut BatchExecutionTiming, @@ -479,7 +474,6 @@ fn process_entries( // accumulator for entries that can be processed in parallel let mut batches = vec![]; let mut tick_hashes = vec![]; - let mut rng = thread_rng(); for ReplayEntry { entry, @@ -511,18 +505,8 @@ fn process_entries( } EntryType::Transactions(transactions) => { let starting_index = *starting_index; - let transaction_indexes = if randomize { - let mut transactions_and_indexes: Vec<(SanitizedTransaction, usize)> = - transactions.drain(..).zip(starting_index..).collect(); - transactions_and_indexes.shuffle(&mut rng); - let (txs, indexes): (Vec<_>, Vec<_>) = - transactions_and_indexes.into_iter().unzip(); - *transactions = txs; - indexes - } else { - (starting_index..starting_index.saturating_add(transactions.len())).collect() - }; - + let transaction_indexes = + (starting_index..starting_index.saturating_add(transactions.len())).collect(); loop { // try to lock the accounts let batch = bank.prepare_sanitized_batch(transactions); @@ -1249,11 +1233,9 @@ fn confirm_slot_entries( starting_index: tx_starting_index, }) .collect(); - // Note: This will shuffle entries' transactions in-place. let process_result = process_entries( bank, &mut replay_entries, - true, // shuffle transactions. transaction_status_sender, replay_vote_sender, batch_execute_timing, @@ -2614,7 +2596,7 @@ pub mod tests { ); // Now ensure the TX is accepted despite pointing to the ID of an empty entry. - process_entries_for_tests(&bank, slot_entries, true, None, None).unwrap(); + process_entries_for_tests(&bank, slot_entries, None, None).unwrap(); assert_eq!(bank.process_transaction(&tx), Ok(())); } @@ -2749,7 +2731,7 @@ pub mod tests { assert_eq!(bank.tick_height(), 0); let tick = next_entry(&genesis_config.hash(), 1, vec![]); assert_eq!( - process_entries_for_tests(&bank, vec![tick], true, None, None), + process_entries_for_tests(&bank, vec![tick], None, None), Ok(()) ); assert_eq!(bank.tick_height(), 1); @@ -2784,7 +2766,7 @@ pub mod tests { ); let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]); assert_eq!( - process_entries_for_tests(&bank, vec![entry_1, entry_2], true, None, None), + process_entries_for_tests(&bank, vec![entry_1, entry_2], None, None), Ok(()) ); assert_eq!(bank.get_balance(&keypair1.pubkey()), 2); @@ -2843,7 +2825,6 @@ pub mod tests { process_entries_for_tests( &bank, vec![entry_1_to_mint, entry_2_to_3_mint_to_1], - false, None, None, ), @@ -2915,7 +2896,6 @@ pub mod tests { assert!(process_entries_for_tests( &bank, vec![entry_1_to_mint.clone(), entry_2_to_3_mint_to_1.clone()], - false, None, None, ) @@ -3031,7 +3011,7 @@ pub mod tests { let entry = next_entry(&bank.last_blockhash(), 1, vec![tx]); let bank = Arc::new(bank); - let result = process_entries_for_tests(&bank, vec![entry], false, None, None); + let result = process_entries_for_tests(&bank, vec![entry], None, None); bank.freeze(); let blockhash_ok = bank.last_blockhash(); let bankhash_ok = bank.hash(); @@ -3072,7 +3052,7 @@ pub mod tests { let entry = next_entry(&bank.last_blockhash(), 1, vec![tx]); let bank = Arc::new(bank); - let _result = process_entries_for_tests(&bank, vec![entry], false, None, None); + let _result = process_entries_for_tests(&bank, vec![entry], None, None); bank.freeze(); assert_eq!(blockhash_ok, bank.last_blockhash()); @@ -3171,7 +3151,6 @@ pub mod tests { entry_2_to_3_and_1_to_mint, entry_conflict_itself, ], - false, None, None, ) @@ -3221,7 +3200,7 @@ pub mod tests { system_transaction::transfer(&keypair2, &keypair4.pubkey(), 1, bank.last_blockhash()); let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]); assert_eq!( - process_entries_for_tests(&bank, vec![entry_1, entry_2], true, None, None), + process_entries_for_tests(&bank, vec![entry_1, entry_2], None, None), Ok(()) ); assert_eq!(bank.get_balance(&keypair3.pubkey()), 1); @@ -3282,7 +3261,7 @@ pub mod tests { }) .collect(); assert_eq!( - process_entries_for_tests(&bank, entries, true, None, None), + process_entries_for_tests(&bank, entries, None, None), Ok(()) ); } @@ -3345,7 +3324,7 @@ pub mod tests { // Transfer lamports to each other let entry = next_entry(&bank.last_blockhash(), 1, tx_vector); assert_eq!( - process_entries_for_tests(&bank, vec![entry], true, None, None), + process_entries_for_tests(&bank, vec![entry], None, None), Ok(()) ); bank.squash(); @@ -3405,13 +3384,7 @@ pub mod tests { system_transaction::transfer(&keypair1, &keypair4.pubkey(), 1, bank.last_blockhash()); let entry_2 = next_entry(&tick.hash, 1, vec![tx]); assert_eq!( - process_entries_for_tests( - &bank, - vec![entry_1, tick, entry_2.clone()], - true, - None, - None, - ), + process_entries_for_tests(&bank, vec![entry_1, tick, entry_2.clone()], None, None,), Ok(()) ); assert_eq!(bank.get_balance(&keypair3.pubkey()), 1); @@ -3422,7 +3395,7 @@ pub mod tests { system_transaction::transfer(&keypair2, &keypair3.pubkey(), 1, bank.last_blockhash()); let entry_3 = next_entry(&entry_2.hash, 1, vec![tx]); assert_eq!( - process_entries_for_tests(&bank, vec![entry_3], true, None, None), + process_entries_for_tests(&bank, vec![entry_3], None, None), Err(TransactionError::AccountNotFound) ); } @@ -3502,7 +3475,7 @@ pub mod tests { ); assert_eq!( - process_entries_for_tests(&bank, vec![entry_1_to_mint], false, None, None), + process_entries_for_tests(&bank, vec![entry_1_to_mint], None, None), Err(TransactionError::AccountInUse) ); @@ -3705,7 +3678,7 @@ pub mod tests { }) .collect(); info!("paying iteration {}", i); - process_entries_for_tests(&bank, entries, true, None, None).expect("paying failed"); + process_entries_for_tests(&bank, entries, None, None).expect("paying failed"); let entries: Vec<_> = (0..NUM_TRANSFERS) .step_by(NUM_TRANSFERS_PER_ENTRY) @@ -3728,7 +3701,7 @@ pub mod tests { .collect(); info!("refunding iteration {}", i); - process_entries_for_tests(&bank, entries, true, None, None).expect("refunding failed"); + process_entries_for_tests(&bank, entries, None, None).expect("refunding failed"); // advance to next block process_entries_for_tests( @@ -3736,7 +3709,6 @@ pub mod tests { (0..bank.ticks_per_slot()) .map(|_| next_entry_mut(&mut hash, 1, vec![])) .collect::>(), - true, None, None, ) @@ -3778,7 +3750,7 @@ pub mod tests { let entry = next_entry(&new_blockhash, 1, vec![tx]); entries.push(entry); - process_entries_for_tests(&bank0, entries, true, None, None).unwrap(); + process_entries_for_tests(&bank0, entries, None, None).unwrap(); assert_eq!(bank0.get_balance(&keypair.pubkey()), 1) } @@ -3944,8 +3916,7 @@ pub mod tests { .collect(); let entry = next_entry(&bank_1_blockhash, 1, vote_txs); let (replay_vote_sender, replay_vote_receiver) = crossbeam_channel::unbounded(); - let _ = - process_entries_for_tests(&bank1, vec![entry], true, None, Some(&replay_vote_sender)); + let _ = process_entries_for_tests(&bank1, vec![entry], None, Some(&replay_vote_sender)); let successes: BTreeSet = replay_vote_receiver .try_iter() .map(|(vote_pubkey, ..)| vote_pubkey) @@ -4309,9 +4280,7 @@ pub mod tests { if let TransactionStatusMessage::Batch(batch) = batch { assert_eq!(batch.transactions.len(), 2); assert_eq!(batch.transaction_indexes.len(), 2); - // Assert contains instead of the actual vec due to randomize - assert!(batch.transaction_indexes.contains(&0)); - assert!(batch.transaction_indexes.contains(&1)); + assert_eq!(batch.transaction_indexes, [0, 1]); } else { panic!("batch should have been sent"); } @@ -4355,10 +4324,7 @@ pub mod tests { if let TransactionStatusMessage::Batch(batch) = batch { assert_eq!(batch.transactions.len(), 3); assert_eq!(batch.transaction_indexes.len(), 3); - // Assert contains instead of the actual vec due to randomize - assert!(batch.transaction_indexes.contains(&2)); - assert!(batch.transaction_indexes.contains(&3)); - assert!(batch.transaction_indexes.contains(&4)); + assert_eq!(batch.transaction_indexes, [2, 3, 4]); } else { panic!("batch should have been sent"); } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index d37a6f8823b0c4..7ff2ffa42b5f3e 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -4599,7 +4599,6 @@ pub fn populate_blockstore_for_tests( solana_ledger::blockstore_processor::process_entries_for_tests( &bank, entries, - true, Some( &solana_ledger::blockstore_processor::TransactionStatusSender { sender: transaction_status_sender, From 8033be333e57d058cc8d5981ee5ca3990270da25 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Mon, 2 Oct 2023 19:01:23 +0200 Subject: [PATCH 223/407] Refactor - `LoadedPrograms` (#33482) * Adds type ProgramRuntimeEnvironment. * Moves LoadedPrograms::remove_expired_entries() into LoadedPrograms::prune(). * Adds Stats::prunes_environment and renames Stats::prunes_orphan and Stats::prunes_expired. * Adds LoadedPrograms::latest_root_epoch. * Typo fix, authored-by: Dmitri Makarov --- program-runtime/src/loaded_programs.rs | 147 ++++++++++++++----------- runtime/src/bank_forks.rs | 2 +- 2 files changed, 81 insertions(+), 68 deletions(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index ed34ca523cd12f..43061f19a0758e 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -13,8 +13,11 @@ use { vm::{BuiltinProgram, Config}, }, solana_sdk::{ - bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, clock::Slot, loader_v4, - pubkey::Pubkey, saturating_add_assign, + bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, + clock::{Epoch, Slot}, + loader_v4, + pubkey::Pubkey, + saturating_add_assign, }, std::{ collections::HashMap, @@ -26,7 +29,8 @@ use { }, }; -const MAX_LOADED_ENTRY_COUNT: usize = 256; +pub type ProgramRuntimeEnvironment = Arc>>; +pub const MAX_LOADED_ENTRY_COUNT: usize = 256; pub const DELAY_VISIBILITY_SLOT_OFFSET: Slot = 1; /// Relationship between two fork IDs @@ -62,17 +66,17 @@ pub trait WorkingSlot { #[derive(Default)] pub enum LoadedProgramType { /// Tombstone for undeployed, closed or unloadable programs - FailedVerification(Arc>>), + FailedVerification(ProgramRuntimeEnvironment), #[default] Closed, DelayVisibility, /// Successfully verified but not currently compiled, used to track usage statistics when a compiled program is evicted from memory. - Unloaded(Arc>>), + Unloaded(ProgramRuntimeEnvironment), LegacyV0(Executable>), LegacyV1(Executable>), Typed(Executable>), #[cfg(test)] - TestLoaded(Arc>>), + TestLoaded(ProgramRuntimeEnvironment), Builtin(BuiltinProgram>), } @@ -121,8 +125,9 @@ pub struct Stats { pub insertions: AtomicU64, pub replacements: AtomicU64, pub one_hit_wonders: AtomicU64, - pub prunes: AtomicU64, - pub expired: AtomicU64, + pub prunes_orphan: AtomicU64, + pub prunes_expired: AtomicU64, + pub prunes_environment: AtomicU64, pub empty_entries: AtomicU64, } @@ -135,8 +140,9 @@ impl Stats { let replacements = self.replacements.load(Ordering::Relaxed); let one_hit_wonders = self.one_hit_wonders.load(Ordering::Relaxed); let evictions: u64 = self.evictions.values().sum(); - let prunes = self.prunes.load(Ordering::Relaxed); - let expired = self.expired.load(Ordering::Relaxed); + let prunes_orphan = self.prunes_orphan.load(Ordering::Relaxed); + let prunes_expired = self.prunes_expired.load(Ordering::Relaxed); + let prunes_environment = self.prunes_environment.load(Ordering::Relaxed); let empty_entries = self.empty_entries.load(Ordering::Relaxed); datapoint_info!( "loaded-programs-cache-stats", @@ -147,13 +153,14 @@ impl Stats { ("insertions", insertions, i64), ("replacements", replacements, i64), ("one_hit_wonders", one_hit_wonders, i64), - ("prunes", prunes, i64), - ("evict_expired", expired, i64), - ("evict_empty_entries", empty_entries, i64), + ("prunes_orphan", prunes_orphan, i64), + ("prunes_expired", prunes_expired, i64), + ("prunes_environment", prunes_environment, i64), + ("empty_entries", empty_entries, i64), ); debug!( - "Loaded Programs Cache Stats -- Hits: {}, Misses: {}, Evictions: {}, Insertions: {}, Replacements: {}, One-Hit-Wonders: {}, Prunes: {}, Expired: {}, Empty: {}", - hits, misses, evictions, insertions, replacements, one_hit_wonders, prunes, expired, empty_entries + "Loaded Programs Cache Stats -- Hits: {}, Misses: {}, Evictions: {}, Insertions: {}, Replacements: {}, One-Hit-Wonders: {}, Prunes-Orphan: {}, Prunes-Expired: {}, Prunes-Environment: {}, Empty: {}", + hits, misses, evictions, insertions, replacements, one_hit_wonders, prunes_orphan, prunes_expired, prunes_environment, empty_entries ); if log_enabled!(log::Level::Trace) && !self.evictions.is_empty() { let mut evictions = self.evictions.iter().collect::>(); @@ -221,7 +228,7 @@ impl LoadedProgram { /// Creates a new user program pub fn new( loader_key: &Pubkey, - program_runtime_environment: Arc>>, + program_runtime_environment: ProgramRuntimeEnvironment, deployment_slot: Slot, effective_slot: Slot, maybe_expiration_slot: Option, @@ -407,10 +414,10 @@ impl LoadedProgram { #[derive(Clone, Debug)] pub struct ProgramRuntimeEnvironments { - /// Globally shared RBPF config and syscall registry - pub program_runtime_v1: Arc>>, + /// Globally shared RBPF config and syscall registry for runtime V1 + pub program_runtime_v1: ProgramRuntimeEnvironment, /// Globally shared RBPF config and syscall registry for runtime V2 - pub program_runtime_v2: Arc>>, + pub program_runtime_v2: ProgramRuntimeEnvironment, } impl Default for ProgramRuntimeEnvironments { @@ -432,8 +439,12 @@ pub struct LoadedPrograms { /// /// Pubkey is the address of a program, multiple versions can coexists simultaneously under the same address (in different slots). entries: HashMap>>, + /// The slot of the last rerooting + pub latest_root_slot: Slot, + /// The epoch of the last rerooting + pub latest_root_epoch: Epoch, + /// Environments of the current epoch pub environments: ProgramRuntimeEnvironments, - latest_root: Slot, pub stats: Stats, } @@ -605,7 +616,9 @@ impl LoadedPrograms { _ => false, }; if !retain { - self.stats.prunes.fetch_add(1, Ordering::Relaxed); + self.stats + .prunes_environment + .fetch_add(1, Ordering::Relaxed); } retain }); @@ -625,39 +638,54 @@ impl LoadedPrograms { self.remove_programs_with_no_entries(); } - /// Before rerooting the blockstore this removes all programs of orphan forks - pub fn prune(&mut self, fork_graph: &F, new_root: Slot) { - let previous_root = self.latest_root; - self.entries.retain(|_key, second_level| { + /// Before rerooting the blockstore this removes all superfluous entries + pub fn prune( + &mut self, + fork_graph: &F, + new_root_slot: Slot, + new_root_epoch: Epoch, + ) { + for second_level in self.entries.values_mut() { + // Remove entries un/re/deployed on orphan forks let mut first_ancestor_found = false; *second_level = second_level .iter() .rev() .filter(|entry| { - let relation = fork_graph.relationship(entry.deployment_slot, new_root); - if entry.deployment_slot >= new_root { + let relation = fork_graph.relationship(entry.deployment_slot, new_root_slot); + if entry.deployment_slot >= new_root_slot { matches!(relation, BlockRelation::Equal | BlockRelation::Descendant) } else if !first_ancestor_found && (matches!(relation, BlockRelation::Ancestor) - || entry.deployment_slot <= previous_root) + || entry.deployment_slot <= self.latest_root_slot) { first_ancestor_found = true; first_ancestor_found } else { - self.stats.prunes.fetch_add(1, Ordering::Relaxed); + self.stats.prunes_orphan.fetch_add(1, Ordering::Relaxed); false } }) + .filter(|entry| { + // Remove expired + if let Some(expiration) = entry.maybe_expiration_slot { + if expiration <= new_root_slot { + self.stats.prunes_expired.fetch_add(1, Ordering::Relaxed); + return false; + } + } + true + }) .cloned() .collect(); second_level.reverse(); - !second_level.is_empty() - }); - - self.remove_expired_entries(new_root); + } self.remove_programs_with_no_entries(); - - self.latest_root = std::cmp::max(self.latest_root, new_root); + debug_assert!(self.latest_root_slot <= new_root_slot); + self.latest_root_slot = new_root_slot; + if self.latest_root_epoch < new_root_epoch { + self.latest_root_epoch = new_root_epoch; + } } fn matches_loaded_program_criteria( @@ -706,7 +734,7 @@ impl LoadedPrograms { if let Some(second_level) = self.entries.get(&key) { for entry in second_level.iter().rev() { let current_slot = working_slot.current_slot(); - if entry.deployment_slot <= self.latest_root + if entry.deployment_slot <= self.latest_root_slot || entry.deployment_slot == current_slot || working_slot.is_ancestor(entry.deployment_slot) { @@ -826,24 +854,6 @@ impl LoadedPrograms { } } - fn remove_expired_entries(&mut self, current_slot: Slot) { - for entry in self.entries.values_mut() { - entry.retain(|program| { - program - .maybe_expiration_slot - .map(|expiration| { - if expiration > current_slot { - true - } else { - self.stats.expired.fetch_add(1, Ordering::Relaxed); - false - } - }) - .unwrap_or(true) - }); - } - } - fn unload_program(&mut self, id: &Pubkey) { if let Some(entries) = self.entries.get_mut(id) { entries.iter_mut().for_each(|entry| { @@ -1316,40 +1326,43 @@ mod tests { relation: BlockRelation::Unrelated, }; - cache.prune(&fork_graph, 0); + cache.prune(&fork_graph, 0, 0); assert!(cache.entries.is_empty()); - cache.prune(&fork_graph, 10); + cache.prune(&fork_graph, 10, 0); assert!(cache.entries.is_empty()); + let mut cache = LoadedPrograms::default(); let fork_graph = TestForkGraph { relation: BlockRelation::Ancestor, }; - cache.prune(&fork_graph, 0); + cache.prune(&fork_graph, 0, 0); assert!(cache.entries.is_empty()); - cache.prune(&fork_graph, 10); + cache.prune(&fork_graph, 10, 0); assert!(cache.entries.is_empty()); + let mut cache = LoadedPrograms::default(); let fork_graph = TestForkGraph { relation: BlockRelation::Descendant, }; - cache.prune(&fork_graph, 0); + cache.prune(&fork_graph, 0, 0); assert!(cache.entries.is_empty()); - cache.prune(&fork_graph, 10); + cache.prune(&fork_graph, 10, 0); assert!(cache.entries.is_empty()); + let mut cache = LoadedPrograms::default(); let fork_graph = TestForkGraph { relation: BlockRelation::Unknown, }; - cache.prune(&fork_graph, 0); + cache.prune(&fork_graph, 0, 0); assert!(cache.entries.is_empty()); - cache.prune(&fork_graph, 10); + cache.prune(&fork_graph, 10, 0); assert!(cache.entries.is_empty()); } @@ -1760,7 +1773,7 @@ mod tests { programs.pop(); } - cache.prune(&fork_graph, 5); + cache.prune(&fork_graph, 5, 0); // Fork graph after pruning // 0 @@ -1825,7 +1838,7 @@ mod tests { assert!(match_slot(&found, &program3, 25, 27)); assert!(match_slot(&found, &program4, 5, 27)); - cache.prune(&fork_graph, 15); + cache.prune(&fork_graph, 15, 0); // Fork graph after pruning // 0 @@ -2176,7 +2189,7 @@ mod tests { ); // New root 5 should not evict the expired entry for program1 - cache.prune(&fork_graph, 5); + cache.prune(&fork_graph, 5, 0); assert_eq!( cache .entries @@ -2187,7 +2200,7 @@ mod tests { ); // New root 15 should evict the expired entry for program1 - cache.prune(&fork_graph, 15); + cache.prune(&fork_graph, 15, 0); assert!(cache.entries.get(&program1).is_none()); } @@ -2213,7 +2226,7 @@ mod tests { assert!(!cache.replenish(program1, new_test_loaded_program(0, 1)).0); assert!(!cache.replenish(program1, new_test_loaded_program(5, 6)).0); - cache.prune(&fork_graph, 10); + cache.prune(&fork_graph, 10, 0); let working_slot = TestWorkingSlot::new(20, &[0, 10, 20]); let ExtractedPrograms { diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index b56f4d774d1c76..ec69df9dded953 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -419,7 +419,7 @@ impl BankForks { .loaded_programs_cache .write() .unwrap() - .prune(self, root); + .prune(self, root, root_bank.epoch()); let set_root_start = Instant::now(); let (removed_banks, set_root_metrics) = self.do_set_root_return_metrics( root, From ae4e33efc9a2ca0c58b268720e23558f4380c7b3 Mon Sep 17 00:00:00 2001 From: Will Hickey Date: Mon, 2 Oct 2023 12:31:06 -0500 Subject: [PATCH 224/407] Update mergify backport actions for new minor version (#33490) --- .mergify.yml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/.mergify.yml b/.mergify.yml index 1fca661dddda29..ab81476816764c 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -73,9 +73,9 @@ pull_request_rules: - automerge comment: message: automerge label removed due to a CI failure - - name: v1.14 feature-gate backport + - name: v1.16 feature-gate backport conditions: - - label=v1.14 + - label=v1.16 - label=feature-gate actions: backport: @@ -86,10 +86,10 @@ pull_request_rules: labels: - feature-gate branches: - - v1.14 - - name: v1.14 non-feature-gate backport + - v1.16 + - name: v1.16 non-feature-gate backport conditions: - - label=v1.14 + - label=v1.16 - label!=feature-gate actions: backport: @@ -97,10 +97,10 @@ pull_request_rules: title: "{{ destination_branch }}: {{ title }} (backport of #{{ number }})" ignore_conflicts: true branches: - - v1.14 - - name: v1.16 feature-gate backport + - v1.16 + - name: v1.17 feature-gate backport conditions: - - label=v1.16 + - label=v1.17 - label=feature-gate actions: backport: @@ -110,10 +110,10 @@ pull_request_rules: labels: - feature-gate branches: - - v1.16 - - name: v1.16 non-feature-gate backport + - v1.17 + - name: v1.17 non-feature-gate backport conditions: - - label=v1.16 + - label=v1.17 - label!=feature-gate actions: backport: @@ -121,7 +121,7 @@ pull_request_rules: title: "{{ destination_branch }}: {{ title }} (backport of #{{ number }})" ignore_conflicts: true branches: - - v1.16 + - v1.17 commands_restrictions: # The author of copied PRs is the Mergify user. From 73e9e6dd70a60b27586f7ea1d2e6c2f83a99b29d Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 2 Oct 2023 19:40:31 +0200 Subject: [PATCH 225/407] Deprecate bz2/gzip/none/tar snapshot compression types (#33484) These options are now disallowed on the command line for solana-validator and solana-ledger-tool, which effectively means no more snapshots will be created with this types in normal usecases. However, support for reading the deprecated types is still in place. --- runtime/src/snapshot_utils/archive_format.rs | 20 ++++++++------------ scripts/run.sh | 1 - 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/runtime/src/snapshot_utils/archive_format.rs b/runtime/src/snapshot_utils/archive_format.rs index 4ef271cbb5bf3a..d807f4447a2b7b 100644 --- a/runtime/src/snapshot_utils/archive_format.rs +++ b/runtime/src/snapshot_utils/archive_format.rs @@ -3,7 +3,13 @@ use { strum::Display, }; -pub const SUPPORTED_ARCHIVE_COMPRESSION: &[&str] = &["bz2", "gzip", "zstd", "lz4", "tar", "none"]; +// SUPPORTED_ARCHIVE_COMPRESSION lists the compression types that can be +// specified on the command line. "zstd" and "lz4" are valid whereas "gzip", +// "bz2", "tar" and "none" have been deprecated. Thus, all newly created +// snapshots will either use "zstd" or "lz4". By keeping the deprecated types +// in the ArchiveFormat enum, pre-existing snapshot archives with the +// deprecated compression types can still be read. +pub const SUPPORTED_ARCHIVE_COMPRESSION: &[&str] = &["zstd", "lz4"]; pub const DEFAULT_ARCHIVE_COMPRESSION: &str = "zstd"; pub const TAR_BZIP2_EXTENSION: &str = "tar.bz2"; @@ -36,11 +42,8 @@ impl ArchiveFormat { pub fn from_cli_arg(archive_format_str: &str) -> Option { match archive_format_str { - "bz2" => Some(ArchiveFormat::TarBzip2), - "gzip" => Some(ArchiveFormat::TarGzip), "zstd" => Some(ArchiveFormat::TarZstd), "lz4" => Some(ArchiveFormat::TarLz4), - "tar" | "none" => Some(ArchiveFormat::Tar), _ => None, } } @@ -158,14 +161,7 @@ mod tests { #[test] fn test_from_cli_arg() { - let golden = [ - Some(ArchiveFormat::TarBzip2), - Some(ArchiveFormat::TarGzip), - Some(ArchiveFormat::TarZstd), - Some(ArchiveFormat::TarLz4), - Some(ArchiveFormat::Tar), - Some(ArchiveFormat::Tar), - ]; + let golden = [Some(ArchiveFormat::TarZstd), Some(ArchiveFormat::TarLz4)]; for (arg, expected) in zip(SUPPORTED_ARCHIVE_COMPRESSION.iter(), golden.into_iter()) { assert_eq!(ArchiveFormat::from_cli_arg(arg), expected); diff --git a/scripts/run.sh b/scripts/run.sh index a890aa10c17474..699bfce3e253e3 100755 --- a/scripts/run.sh +++ b/scripts/run.sh @@ -110,7 +110,6 @@ args=( --enable-rpc-transaction-history --enable-extended-tx-metadata-storage --init-complete-file "$dataDir"/init-completed - --snapshot-compression none --require-tower --no-wait-for-vote-to-start-leader --no-os-network-limits-test From 3eae9802930cb4e8fb12a7971fd76c350a7d79c5 Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 2 Oct 2023 20:25:30 +0200 Subject: [PATCH 226/407] Minor cleanup on some snapshot related tests (#33485) --- core/src/snapshot_packager_service.rs | 2 +- download-utils/src/lib.rs | 2 +- rpc/src/rpc_service.rs | 4 +++- runtime/src/bank/serde_snapshot.rs | 2 +- runtime/src/snapshot_bank_utils.rs | 2 +- 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/core/src/snapshot_packager_service.rs b/core/src/snapshot_packager_service.rs index 4c90a8bd18d793..4840e118231b1d 100644 --- a/core/src/snapshot_packager_service.rs +++ b/core/src/snapshot_packager_service.rs @@ -264,7 +264,7 @@ mod tests { let bank_snapshot_info = snapshot_utils::get_highest_bank_snapshot(&bank_snapshots_dir).unwrap(); let snapshot_storages = bank.get_snapshot_storages(None); - let archive_format = ArchiveFormat::TarBzip2; + let archive_format = ArchiveFormat::Tar; let full_archive = snapshot_bank_utils::package_and_archive_full_snapshot( &bank, diff --git a/download-utils/src/lib.rs b/download-utils/src/lib.rs index c42166437cf8ee..17d50d31b55628 100644 --- a/download-utils/src/lib.rs +++ b/download-utils/src/lib.rs @@ -288,7 +288,7 @@ pub fn download_snapshot_archive( ArchiveFormat::TarGzip, ArchiveFormat::TarBzip2, ArchiveFormat::TarLz4, - ArchiveFormat::Tar, // `solana-test-validator` creates uncompressed snapshots + ArchiveFormat::Tar, ] { let destination_path = match snapshot_kind { SnapshotKind::FullSnapshot => snapshot_utils::build_full_snapshot_archive_path( diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index 4fdde57c31a9fe..c38e3b7444b6a4 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -745,7 +745,9 @@ mod tests { assert!(!rrm.is_file_get_path("//genesis.tar.bz2")); assert!(!rrm.is_file_get_path("/../genesis.tar.bz2")); - assert!(!rrm.is_file_get_path("/snapshot.tar.bz2")); // This is a redirect + // These two are redirects + assert!(!rrm.is_file_get_path("/snapshot.tar.bz2")); + assert!(!rrm.is_file_get_path("/incremental-snapshot.tar.bz2")); assert!(!rrm.is_file_get_path( "/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2" diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 2e0bdd3ecc7ab6..671a6dc6d738e5 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -474,7 +474,7 @@ mod tests { None, full_snapshot_archives_dir.path(), incremental_snapshot_archives_dir.path(), - ArchiveFormat::TarBzip2, + ArchiveFormat::Tar, NonZeroUsize::new(1).unwrap(), NonZeroUsize::new(1).unwrap(), ) diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 82d6f354a4e998..43b0ef5a364563 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -1408,7 +1408,7 @@ mod tests { let bank_snapshots_dir = tempfile::TempDir::new().unwrap(); let full_snapshot_archives_dir = tempfile::TempDir::new().unwrap(); let incremental_snapshot_archives_dir = tempfile::TempDir::new().unwrap(); - let snapshot_archive_format = ArchiveFormat::TarGzip; + let snapshot_archive_format = ArchiveFormat::Tar; let full_snapshot_archive_info = bank_to_full_snapshot_archive( bank_snapshots_dir.path(), From 7adab97ffdd636de3ee4b10b6fb829c048a93a67 Mon Sep 17 00:00:00 2001 From: Tyera Date: Mon, 2 Oct 2023 20:22:51 -0600 Subject: [PATCH 227/407] Add test for compaction filter purge (#33494) * Add Database::compact_range_cf method * Add test of CompactionFilter purge --- ledger/src/blockstore/blockstore_purge.rs | 84 +++++++++++++++++++++++ ledger/src/blockstore_db.rs | 5 ++ 2 files changed, 89 insertions(+) diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index 090096d17e970a..71d20720ff4dcc 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -1190,4 +1190,88 @@ pub mod tests { .purge_special_columns_exact(&mut write_batch, slot, slot + 1) .unwrap(); } + + #[test] + fn test_purge_special_columns_compaction_filter() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + let index0_max_slot = 9; + let index1_max_slot = 19; + + clear_and_repopulate_transaction_statuses_for_test( + &blockstore, + index0_max_slot, + index1_max_slot, + ); + let first_index = { + let mut status_entry_iterator = blockstore + .db + .iter::(IteratorMode::Start) + .unwrap(); + status_entry_iterator.next().unwrap().0 + }; + let last_index = { + let mut status_entry_iterator = blockstore + .db + .iter::(IteratorMode::End) + .unwrap(); + status_entry_iterator.next().unwrap().0 + }; + + let oldest_slot = 3; + blockstore.db.set_oldest_slot(oldest_slot); + blockstore.db.compact_range_cf::( + &cf::TransactionStatus::key(first_index), + &cf::TransactionStatus::key(last_index), + ); + + let status_entry_iterator = blockstore + .db + .iter::(IteratorMode::Start) + .unwrap(); + let mut count = 0; + for ((_primary_index, _signature, slot), _value) in status_entry_iterator { + assert!(slot >= oldest_slot); + count += 1; + } + assert_eq!(count, index1_max_slot - (oldest_slot - 1)); + + clear_and_repopulate_transaction_statuses_for_test( + &blockstore, + index0_max_slot, + index1_max_slot, + ); + let first_index = { + let mut status_entry_iterator = blockstore + .db + .iter::(IteratorMode::Start) + .unwrap(); + status_entry_iterator.next().unwrap().0 + }; + let last_index = { + let mut status_entry_iterator = blockstore + .db + .iter::(IteratorMode::End) + .unwrap(); + status_entry_iterator.next().unwrap().0 + }; + + let oldest_slot = 12; + blockstore.db.set_oldest_slot(oldest_slot); + blockstore.db.compact_range_cf::( + &cf::TransactionStatus::key(first_index), + &cf::TransactionStatus::key(last_index), + ); + + let status_entry_iterator = blockstore + .db + .iter::(IteratorMode::Start) + .unwrap(); + let mut count = 0; + for ((_primary_index, _signature, slot), _value) in status_entry_iterator { + assert!(slot >= oldest_slot); + count += 1; + } + assert_eq!(count, index1_max_slot - (oldest_slot - 1)); + } } diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index 25f68b8ef65381..184df713ef3a65 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -1287,6 +1287,11 @@ impl Database { pub fn live_files_metadata(&self) -> Result> { self.backend.live_files_metadata() } + + pub fn compact_range_cf(&self, from: &[u8], to: &[u8]) { + let cf = self.cf_handle::(); + self.backend.db.compact_range_cf(cf, Some(from), Some(to)); + } } impl LedgerColumn From b87c8d163d5df7ea0b401918035699641bb7d8fa Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Tue, 3 Oct 2023 12:37:36 +0800 Subject: [PATCH 228/407] ci: remove stable-perf from PR and push CI (#33479) ci: move stable-perf to nightly pipeline --- ci/buildkite-pipeline-in-disk.sh | 27 --------------------------- ci/buildkite-pipeline.sh | 27 --------------------------- ci/buildkite-solana-private.sh | 27 --------------------------- 3 files changed, 81 deletions(-) diff --git a/ci/buildkite-pipeline-in-disk.sh b/ci/buildkite-pipeline-in-disk.sh index c9816e30a6df9b..113b009aa4452e 100755 --- a/ci/buildkite-pipeline-in-disk.sh +++ b/ci/buildkite-pipeline-in-disk.sh @@ -196,33 +196,6 @@ EOF "Stable-SBF skipped as no relevant files were modified" fi - # Perf test suite - if affects \ - .rs$ \ - Cargo.lock$ \ - Cargo.toml$ \ - ^ci/rust-version.sh \ - ^ci/test-stable-perf.sh \ - ^ci/test-stable.sh \ - ^ci/test-local-cluster.sh \ - ^core/build.rs \ - ^fetch-perf-libs.sh \ - ^programs/ \ - ^sdk/ \ - ; then - cat >> "$output_file" <<"EOF" - - command: "ci/test-stable-perf.sh" - name: "stable-perf" - timeout_in_minutes: 35 - artifact_paths: "log-*.txt" - agents: - queue: "cuda" -EOF - else - annotate --style info \ - "Stable-perf skipped as no relevant files were modified" - fi - # Downstream backwards compatibility if affects \ .rs$ \ diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index e130c585ad6dde..8535905bfee4d0 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -194,33 +194,6 @@ EOF "Stable-SBF skipped as no relevant files were modified" fi - # Perf test suite - if affects \ - .rs$ \ - Cargo.lock$ \ - Cargo.toml$ \ - ^ci/rust-version.sh \ - ^ci/test-stable-perf.sh \ - ^ci/test-stable.sh \ - ^ci/test-local-cluster.sh \ - ^core/build.rs \ - ^fetch-perf-libs.sh \ - ^programs/ \ - ^sdk/ \ - ; then - cat >> "$output_file" <<"EOF" - - command: "ci/test-stable-perf.sh" - name: "stable-perf" - timeout_in_minutes: 35 - artifact_paths: "log-*.txt" - agents: - queue: "cuda" -EOF - else - annotate --style info \ - "Stable-perf skipped as no relevant files were modified" - fi - # Downstream backwards compatibility if affects \ .rs$ \ diff --git a/ci/buildkite-solana-private.sh b/ci/buildkite-solana-private.sh index 57a3d3de3b2049..ede70e6229d5f8 100755 --- a/ci/buildkite-solana-private.sh +++ b/ci/buildkite-solana-private.sh @@ -180,33 +180,6 @@ EOF "Stable-SBF skipped as no relevant files were modified" fi - # Perf test suite - if affects \ - .rs$ \ - Cargo.lock$ \ - Cargo.toml$ \ - ^ci/rust-version.sh \ - ^ci/test-stable-perf.sh \ - ^ci/test-stable.sh \ - ^ci/test-local-cluster.sh \ - ^core/build.rs \ - ^fetch-perf-libs.sh \ - ^programs/ \ - ^sdk/ \ - ; then - cat >> "$output_file" <<"EOF" - - command: "ci/test-stable-perf.sh" - name: "stable-perf" - timeout_in_minutes: 35 - artifact_paths: "log-*.txt" - agents: - queue: "sol-private" -EOF - else - annotate --style info \ - "Stable-perf skipped as no relevant files were modified" - fi - # Downstream backwards compatibility if affects \ .rs$ \ From c6ee69e0dcb6e952fa17716a7b241f544e37e8cd Mon Sep 17 00:00:00 2001 From: Will Hickey Date: Tue, 3 Oct 2023 10:20:24 -0500 Subject: [PATCH 229/407] Update version to v1.18.0 (#33487) --- Cargo.lock | 216 ++++++++-------- Cargo.toml | 150 +++++------ programs/sbf/Cargo.lock | 242 +++++++++--------- programs/sbf/Cargo.toml | 48 ++-- .../tests/crates/fail/Cargo.toml | 4 +- .../tests/crates/noop/Cargo.toml | 4 +- 6 files changed, 332 insertions(+), 332 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c892fd686a6757..78eee9369d79ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2134,7 +2134,7 @@ dependencies = [ [[package]] name = "gen-headers" -version = "1.17.0" +version = "1.18.0" dependencies = [ "log", "regex", @@ -2142,7 +2142,7 @@ dependencies = [ [[package]] name = "gen-syscall-list" -version = "1.17.0" +version = "1.18.0" dependencies = [ "regex", ] @@ -4061,7 +4061,7 @@ dependencies = [ [[package]] name = "proto" -version = "1.17.0" +version = "1.18.0" dependencies = [ "protobuf-src", "tonic-build", @@ -4306,7 +4306,7 @@ dependencies = [ [[package]] name = "rbpf-cli" -version = "1.17.0" +version = "1.18.0" [[package]] name = "rcgen" @@ -5095,7 +5095,7 @@ dependencies = [ [[package]] name = "solana-account-decoder" -version = "1.17.0" +version = "1.18.0" dependencies = [ "Inflector", "assert_matches", @@ -5119,7 +5119,7 @@ dependencies = [ [[package]] name = "solana-accounts-bench" -version = "1.17.0" +version = "1.18.0" dependencies = [ "clap 2.33.3", "log", @@ -5133,7 +5133,7 @@ dependencies = [ [[package]] name = "solana-accounts-cluster-bench" -version = "1.17.0" +version = "1.18.0" dependencies = [ "clap 2.33.3", "log", @@ -5163,7 +5163,7 @@ dependencies = [ [[package]] name = "solana-accounts-db" -version = "1.17.0" +version = "1.18.0" dependencies = [ "arrayref", "assert_matches", @@ -5227,7 +5227,7 @@ dependencies = [ [[package]] name = "solana-address-lookup-table-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "bytemuck", @@ -5246,7 +5246,7 @@ dependencies = [ [[package]] name = "solana-address-lookup-table-program-tests" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "bincode", @@ -5257,7 +5257,7 @@ dependencies = [ [[package]] name = "solana-banking-bench" -version = "1.17.0" +version = "1.18.0" dependencies = [ "clap 3.2.23", "crossbeam-channel", @@ -5281,7 +5281,7 @@ dependencies = [ [[package]] name = "solana-banks-client" -version = "1.17.0" +version = "1.18.0" dependencies = [ "borsh 0.10.3", "futures 0.3.28", @@ -5298,7 +5298,7 @@ dependencies = [ [[package]] name = "solana-banks-interface" -version = "1.17.0" +version = "1.18.0" dependencies = [ "serde", "solana-sdk", @@ -5307,7 +5307,7 @@ dependencies = [ [[package]] name = "solana-banks-server" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "crossbeam-channel", @@ -5325,7 +5325,7 @@ dependencies = [ [[package]] name = "solana-bench-streamer" -version = "1.17.0" +version = "1.18.0" dependencies = [ "clap 3.2.23", "crossbeam-channel", @@ -5336,7 +5336,7 @@ dependencies = [ [[package]] name = "solana-bench-tps" -version = "1.17.0" +version = "1.18.0" dependencies = [ "clap 2.33.3", "crossbeam-channel", @@ -5377,7 +5377,7 @@ dependencies = [ [[package]] name = "solana-bloom" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bv", "fnv", @@ -5394,7 +5394,7 @@ dependencies = [ [[package]] name = "solana-bpf-loader-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "bincode", @@ -5414,7 +5414,7 @@ dependencies = [ [[package]] name = "solana-bpf-loader-program-tests" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "bincode", @@ -5425,7 +5425,7 @@ dependencies = [ [[package]] name = "solana-bucket-map" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bv", "bytemuck", @@ -5444,7 +5444,7 @@ dependencies = [ [[package]] name = "solana-cargo-build-bpf" -version = "1.17.0" +version = "1.18.0" dependencies = [ "log", "solana-logger", @@ -5452,7 +5452,7 @@ dependencies = [ [[package]] name = "solana-cargo-build-sbf" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_cmd", "bzip2", @@ -5473,11 +5473,11 @@ dependencies = [ [[package]] name = "solana-cargo-test-bpf" -version = "1.17.0" +version = "1.18.0" [[package]] name = "solana-cargo-test-sbf" -version = "1.17.0" +version = "1.18.0" dependencies = [ "cargo_metadata", "clap 3.2.23", @@ -5488,7 +5488,7 @@ dependencies = [ [[package]] name = "solana-clap-utils" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "chrono", @@ -5505,7 +5505,7 @@ dependencies = [ [[package]] name = "solana-clap-v3-utils" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "chrono", @@ -5523,7 +5523,7 @@ dependencies = [ [[package]] name = "solana-cli" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "bincode", @@ -5576,7 +5576,7 @@ dependencies = [ [[package]] name = "solana-cli-config" -version = "1.17.0" +version = "1.18.0" dependencies = [ "anyhow", "dirs-next", @@ -5591,7 +5591,7 @@ dependencies = [ [[package]] name = "solana-cli-output" -version = "1.17.0" +version = "1.18.0" dependencies = [ "Inflector", "base64 0.21.4", @@ -5617,7 +5617,7 @@ dependencies = [ [[package]] name = "solana-client" -version = "1.17.0" +version = "1.18.0" dependencies = [ "async-trait", "bincode", @@ -5649,7 +5649,7 @@ dependencies = [ [[package]] name = "solana-client-test" -version = "1.17.0" +version = "1.18.0" dependencies = [ "futures-util", "rand 0.8.5", @@ -5679,7 +5679,7 @@ dependencies = [ [[package]] name = "solana-compute-budget-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program-runtime", "solana-sdk", @@ -5687,7 +5687,7 @@ dependencies = [ [[package]] name = "solana-config-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "chrono", @@ -5700,7 +5700,7 @@ dependencies = [ [[package]] name = "solana-connection-cache" -version = "1.17.0" +version = "1.18.0" dependencies = [ "async-trait", "bincode", @@ -5724,7 +5724,7 @@ dependencies = [ [[package]] name = "solana-core" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "base64 0.21.4", @@ -5807,7 +5807,7 @@ dependencies = [ [[package]] name = "solana-cost-model" -version = "1.17.0" +version = "1.18.0" dependencies = [ "lazy_static", "log", @@ -5832,7 +5832,7 @@ dependencies = [ [[package]] name = "solana-dos" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "clap 3.2.23", @@ -5862,7 +5862,7 @@ dependencies = [ [[package]] name = "solana-download-utils" -version = "1.17.0" +version = "1.18.0" dependencies = [ "console", "indicatif", @@ -5874,7 +5874,7 @@ dependencies = [ [[package]] name = "solana-ed25519-program-tests" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "ed25519-dalek", @@ -5885,7 +5885,7 @@ dependencies = [ [[package]] name = "solana-entry" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "bincode", @@ -5907,7 +5907,7 @@ dependencies = [ [[package]] name = "solana-faucet" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "byteorder", @@ -5929,7 +5929,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.17.0" +version = "1.18.0" dependencies = [ "ahash 0.8.3", "blake3", @@ -5958,7 +5958,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.17.0" +version = "1.18.0" dependencies = [ "proc-macro2", "quote", @@ -5968,7 +5968,7 @@ dependencies = [ [[package]] name = "solana-genesis" -version = "1.17.0" +version = "1.18.0" dependencies = [ "base64 0.21.4", "bincode", @@ -5993,7 +5993,7 @@ dependencies = [ [[package]] name = "solana-genesis-utils" -version = "1.17.0" +version = "1.18.0" dependencies = [ "log", "solana-accounts-db", @@ -6004,7 +6004,7 @@ dependencies = [ [[package]] name = "solana-geyser-plugin-interface" -version = "1.17.0" +version = "1.18.0" dependencies = [ "log", "solana-sdk", @@ -6014,7 +6014,7 @@ dependencies = [ [[package]] name = "solana-geyser-plugin-manager" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bs58", "crossbeam-channel", @@ -6039,7 +6039,7 @@ dependencies = [ [[package]] name = "solana-gossip" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "bincode", @@ -6090,7 +6090,7 @@ dependencies = [ [[package]] name = "solana-install" -version = "1.17.0" +version = "1.18.0" dependencies = [ "atty", "bincode", @@ -6125,7 +6125,7 @@ dependencies = [ [[package]] name = "solana-keygen" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bs58", "clap 3.2.23", @@ -6142,7 +6142,7 @@ dependencies = [ [[package]] name = "solana-ledger" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "bincode", @@ -6210,7 +6210,7 @@ dependencies = [ [[package]] name = "solana-ledger-tool" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_cmd", "bs58", @@ -6259,7 +6259,7 @@ dependencies = [ [[package]] name = "solana-loader-v4-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "log", @@ -6271,7 +6271,7 @@ dependencies = [ [[package]] name = "solana-local-cluster" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "crossbeam-channel", @@ -6310,7 +6310,7 @@ dependencies = [ [[package]] name = "solana-log-analyzer" -version = "1.17.0" +version = "1.18.0" dependencies = [ "byte-unit", "clap 3.2.23", @@ -6322,7 +6322,7 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.17.0" +version = "1.18.0" dependencies = [ "env_logger", "lazy_static", @@ -6331,7 +6331,7 @@ dependencies = [ [[package]] name = "solana-measure" -version = "1.17.0" +version = "1.18.0" dependencies = [ "log", "solana-sdk", @@ -6339,11 +6339,11 @@ dependencies = [ [[package]] name = "solana-memory-management" -version = "1.17.0" +version = "1.18.0" [[package]] name = "solana-merkle-root-bench" -version = "1.17.0" +version = "1.18.0" dependencies = [ "clap 2.33.3", "log", @@ -6356,7 +6356,7 @@ dependencies = [ [[package]] name = "solana-merkle-tree" -version = "1.17.0" +version = "1.18.0" dependencies = [ "fast-math", "hex", @@ -6365,7 +6365,7 @@ dependencies = [ [[package]] name = "solana-metrics" -version = "1.17.0" +version = "1.18.0" dependencies = [ "crossbeam-channel", "env_logger", @@ -6381,7 +6381,7 @@ dependencies = [ [[package]] name = "solana-net-shaper" -version = "1.17.0" +version = "1.18.0" dependencies = [ "clap 3.2.23", "rand 0.8.5", @@ -6392,7 +6392,7 @@ dependencies = [ [[package]] name = "solana-net-utils" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "clap 3.2.23", @@ -6412,7 +6412,7 @@ dependencies = [ [[package]] name = "solana-notifier" -version = "1.17.0" +version = "1.18.0" dependencies = [ "log", "reqwest", @@ -6422,7 +6422,7 @@ dependencies = [ [[package]] name = "solana-perf" -version = "1.17.0" +version = "1.18.0" dependencies = [ "ahash 0.8.3", "assert_matches", @@ -6450,7 +6450,7 @@ dependencies = [ [[package]] name = "solana-poh" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "bincode", @@ -6471,7 +6471,7 @@ dependencies = [ [[package]] name = "solana-poh-bench" -version = "1.17.0" +version = "1.18.0" dependencies = [ "clap 3.2.23", "log", @@ -6486,7 +6486,7 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "anyhow", "ark-bn254", @@ -6543,7 +6543,7 @@ dependencies = [ [[package]] name = "solana-program-runtime" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "base64 0.21.4", @@ -6572,7 +6572,7 @@ dependencies = [ [[package]] name = "solana-program-test" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "async-trait", @@ -6600,7 +6600,7 @@ dependencies = [ [[package]] name = "solana-pubsub-client" -version = "1.17.0" +version = "1.18.0" dependencies = [ "anyhow", "crossbeam-channel", @@ -6624,7 +6624,7 @@ dependencies = [ [[package]] name = "solana-quic-client" -version = "1.17.0" +version = "1.18.0" dependencies = [ "async-mutex", "async-trait", @@ -6652,7 +6652,7 @@ dependencies = [ [[package]] name = "solana-rayon-threadlimit" -version = "1.17.0" +version = "1.18.0" dependencies = [ "lazy_static", "num_cpus", @@ -6660,7 +6660,7 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "console", @@ -6679,7 +6679,7 @@ dependencies = [ [[package]] name = "solana-rpc" -version = "1.17.0" +version = "1.18.0" dependencies = [ "base64 0.21.4", "bincode", @@ -6738,7 +6738,7 @@ dependencies = [ [[package]] name = "solana-rpc-client" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "async-trait", @@ -6767,7 +6767,7 @@ dependencies = [ [[package]] name = "solana-rpc-client-api" -version = "1.17.0" +version = "1.18.0" dependencies = [ "base64 0.21.4", "bs58", @@ -6787,7 +6787,7 @@ dependencies = [ [[package]] name = "solana-rpc-client-nonce-utils" -version = "1.17.0" +version = "1.18.0" dependencies = [ "anyhow", "clap 2.33.3", @@ -6804,7 +6804,7 @@ dependencies = [ [[package]] name = "solana-rpc-test" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "bs58", @@ -6831,7 +6831,7 @@ dependencies = [ [[package]] name = "solana-runtime" -version = "1.17.0" +version = "1.18.0" dependencies = [ "arrayref", "assert_matches", @@ -6914,7 +6914,7 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.17.0" +version = "1.18.0" dependencies = [ "anyhow", "assert_matches", @@ -6972,7 +6972,7 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bs58", "proc-macro2", @@ -6983,7 +6983,7 @@ dependencies = [ [[package]] name = "solana-send-transaction-service" -version = "1.17.0" +version = "1.18.0" dependencies = [ "crossbeam-channel", "log", @@ -6998,7 +6998,7 @@ dependencies = [ [[package]] name = "solana-stake-accounts" -version = "1.17.0" +version = "1.18.0" dependencies = [ "clap 2.33.3", "solana-clap-utils", @@ -7014,7 +7014,7 @@ dependencies = [ [[package]] name = "solana-stake-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "bincode", @@ -7031,7 +7031,7 @@ dependencies = [ [[package]] name = "solana-storage-bigtable" -version = "1.17.0" +version = "1.18.0" dependencies = [ "backoff", "bincode", @@ -7063,7 +7063,7 @@ dependencies = [ [[package]] name = "solana-storage-proto" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "bs58", @@ -7079,7 +7079,7 @@ dependencies = [ [[package]] name = "solana-store-tool" -version = "1.17.0" +version = "1.18.0" dependencies = [ "clap 2.33.3", "log", @@ -7091,7 +7091,7 @@ dependencies = [ [[package]] name = "solana-streamer" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "async-channel", @@ -7123,7 +7123,7 @@ dependencies = [ [[package]] name = "solana-system-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "bincode", @@ -7137,7 +7137,7 @@ dependencies = [ [[package]] name = "solana-test-validator" -version = "1.17.0" +version = "1.18.0" dependencies = [ "base64 0.21.4", "bincode", @@ -7167,7 +7167,7 @@ dependencies = [ [[package]] name = "solana-thin-client" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "log", @@ -7181,7 +7181,7 @@ dependencies = [ [[package]] name = "solana-tokens" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "bincode", @@ -7214,7 +7214,7 @@ dependencies = [ [[package]] name = "solana-tpu-client" -version = "1.17.0" +version = "1.18.0" dependencies = [ "async-trait", "bincode", @@ -7236,7 +7236,7 @@ dependencies = [ [[package]] name = "solana-transaction-dos" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "clap 2.33.3", @@ -7263,7 +7263,7 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "1.17.0" +version = "1.18.0" dependencies = [ "Inflector", "base64 0.21.4", @@ -7286,7 +7286,7 @@ dependencies = [ [[package]] name = "solana-turbine" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "bincode", @@ -7323,7 +7323,7 @@ dependencies = [ [[package]] name = "solana-udp-client" -version = "1.17.0" +version = "1.18.0" dependencies = [ "async-trait", "solana-connection-cache", @@ -7336,7 +7336,7 @@ dependencies = [ [[package]] name = "solana-upload-perf" -version = "1.17.0" +version = "1.18.0" dependencies = [ "serde_json", "solana-metrics", @@ -7344,7 +7344,7 @@ dependencies = [ [[package]] name = "solana-validator" -version = "1.17.0" +version = "1.18.0" dependencies = [ "chrono", "clap 2.33.3", @@ -7408,7 +7408,7 @@ dependencies = [ [[package]] name = "solana-version" -version = "1.17.0" +version = "1.18.0" dependencies = [ "log", "rustc_version 0.4.0", @@ -7422,7 +7422,7 @@ dependencies = [ [[package]] name = "solana-vote" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "crossbeam-channel", @@ -7441,7 +7441,7 @@ dependencies = [ [[package]] name = "solana-vote-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "bincode", @@ -7464,7 +7464,7 @@ dependencies = [ [[package]] name = "solana-watchtower" -version = "1.17.0" +version = "1.18.0" dependencies = [ "clap 2.33.3", "humantime", @@ -7483,7 +7483,7 @@ dependencies = [ [[package]] name = "solana-zk-keygen" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bs58", "clap 3.2.23", @@ -7502,7 +7502,7 @@ dependencies = [ [[package]] name = "solana-zk-token-proof-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bytemuck", "criterion", @@ -7516,7 +7516,7 @@ dependencies = [ [[package]] name = "solana-zk-token-proof-program-tests" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bytemuck", "curve25519-dalek", @@ -7528,7 +7528,7 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "1.17.0" +version = "1.18.0" dependencies = [ "aes-gcm-siv", "base64 0.21.4", diff --git a/Cargo.toml b/Cargo.toml index a93092f2e892c0..fa624857e059f6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -123,7 +123,7 @@ exclude = [ resolver = "2" [workspace.package] -version = "1.17.0" +version = "1.18.0" authors = ["Solana Labs Maintainers "] repository = "https://github.com/solana-labs/solana" homepage = "https://solanalabs.com/" @@ -299,80 +299,80 @@ smpl_jwt = "0.7.1" socket2 = "0.5.4" soketto = "0.7" solana_rbpf = "=0.7.2" -solana-account-decoder = { path = "account-decoder", version = "=1.17.0" } -solana-accounts-db = { path = "accounts-db", version = "=1.17.0" } -solana-address-lookup-table-program = { path = "programs/address-lookup-table", version = "=1.17.0" } -solana-banks-client = { path = "banks-client", version = "=1.17.0" } -solana-banks-interface = { path = "banks-interface", version = "=1.17.0" } -solana-banks-server = { path = "banks-server", version = "=1.17.0" } -solana-bench-tps = { path = "bench-tps", version = "=1.17.0" } -solana-bloom = { path = "bloom", version = "=1.17.0" } -solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=1.17.0" } -solana-bucket-map = { path = "bucket_map", version = "=1.17.0" } -solana-connection-cache = { path = "connection-cache", version = "=1.17.0", default-features = false } -solana-clap-utils = { path = "clap-utils", version = "=1.17.0" } -solana-clap-v3-utils = { path = "clap-v3-utils", version = "=1.17.0" } -solana-cli = { path = "cli", version = "=1.17.0" } -solana-cli-config = { path = "cli-config", version = "=1.17.0" } -solana-cli-output = { path = "cli-output", version = "=1.17.0" } -solana-client = { path = "client", version = "=1.17.0" } -solana-compute-budget-program = { path = "programs/compute-budget", version = "=1.17.0" } -solana-config-program = { path = "programs/config", version = "=1.17.0" } -solana-core = { path = "core", version = "=1.17.0" } -solana-cost-model = { path = "cost-model", version = "=1.17.0" } -solana-download-utils = { path = "download-utils", version = "=1.17.0" } -solana-entry = { path = "entry", version = "=1.17.0" } -solana-faucet = { path = "faucet", version = "=1.17.0" } -solana-frozen-abi = { path = "frozen-abi", version = "=1.17.0" } -solana-frozen-abi-macro = { path = "frozen-abi/macro", version = "=1.17.0" } -solana-genesis = { path = "genesis", version = "=1.17.0" } -solana-genesis-utils = { path = "genesis-utils", version = "=1.17.0" } -solana-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=1.17.0" } -solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=1.17.0" } -solana-gossip = { path = "gossip", version = "=1.17.0" } -solana-loader-v4-program = { path = "programs/loader-v4", version = "=1.17.0" } -solana-ledger = { path = "ledger", version = "=1.17.0" } -solana-local-cluster = { path = "local-cluster", version = "=1.17.0" } -solana-logger = { path = "logger", version = "=1.17.0" } -solana-measure = { path = "measure", version = "=1.17.0" } -solana-merkle-tree = { path = "merkle-tree", version = "=1.17.0" } -solana-metrics = { path = "metrics", version = "=1.17.0" } -solana-net-utils = { path = "net-utils", version = "=1.17.0" } -solana-notifier = { path = "notifier", version = "=1.17.0" } -solana-perf = { path = "perf", version = "=1.17.0" } -solana-poh = { path = "poh", version = "=1.17.0" } -solana-program = { path = "sdk/program", version = "=1.17.0" } -solana-program-runtime = { path = "program-runtime", version = "=1.17.0" } -solana-program-test = { path = "program-test", version = "=1.17.0" } -solana-pubsub-client = { path = "pubsub-client", version = "=1.17.0" } -solana-quic-client = { path = "quic-client", version = "=1.17.0" } -solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=1.17.0" } -solana-remote-wallet = { path = "remote-wallet", version = "=1.17.0", default-features = false } -solana-rpc = { path = "rpc", version = "=1.17.0" } -solana-rpc-client = { path = "rpc-client", version = "=1.17.0", default-features = false } -solana-rpc-client-api = { path = "rpc-client-api", version = "=1.17.0" } -solana-rpc-client-nonce-utils = { path = "rpc-client-nonce-utils", version = "=1.17.0" } -solana-runtime = { path = "runtime", version = "=1.17.0" } -solana-sdk = { path = "sdk", version = "=1.17.0" } -solana-sdk-macro = { path = "sdk/macro", version = "=1.17.0" } -solana-send-transaction-service = { path = "send-transaction-service", version = "=1.17.0" } -solana-stake-program = { path = "programs/stake", version = "=1.17.0" } -solana-storage-bigtable = { path = "storage-bigtable", version = "=1.17.0" } -solana-storage-proto = { path = "storage-proto", version = "=1.17.0" } -solana-streamer = { path = "streamer", version = "=1.17.0" } -solana-system-program = { path = "programs/system", version = "=1.17.0" } -solana-test-validator = { path = "test-validator", version = "=1.17.0" } -solana-thin-client = { path = "thin-client", version = "=1.17.0" } -solana-tpu-client = { path = "tpu-client", version = "=1.17.0", default-features = false } -solana-transaction-status = { path = "transaction-status", version = "=1.17.0" } -solana-turbine = { path = "turbine", version = "=1.17.0" } -solana-udp-client = { path = "udp-client", version = "=1.17.0" } -solana-version = { path = "version", version = "=1.17.0" } -solana-vote = { path = "vote", version = "=1.17.0" } -solana-vote-program = { path = "programs/vote", version = "=1.17.0" } -solana-zk-keygen = { path = "zk-keygen", version = "=1.17.0" } -solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=1.17.0" } -solana-zk-token-sdk = { path = "zk-token-sdk", version = "=1.17.0" } +solana-account-decoder = { path = "account-decoder", version = "=1.18.0" } +solana-accounts-db = { path = "accounts-db", version = "=1.18.0" } +solana-address-lookup-table-program = { path = "programs/address-lookup-table", version = "=1.18.0" } +solana-banks-client = { path = "banks-client", version = "=1.18.0" } +solana-banks-interface = { path = "banks-interface", version = "=1.18.0" } +solana-banks-server = { path = "banks-server", version = "=1.18.0" } +solana-bench-tps = { path = "bench-tps", version = "=1.18.0" } +solana-bloom = { path = "bloom", version = "=1.18.0" } +solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=1.18.0" } +solana-bucket-map = { path = "bucket_map", version = "=1.18.0" } +solana-connection-cache = { path = "connection-cache", version = "=1.18.0", default-features = false } +solana-clap-utils = { path = "clap-utils", version = "=1.18.0" } +solana-clap-v3-utils = { path = "clap-v3-utils", version = "=1.18.0" } +solana-cli = { path = "cli", version = "=1.18.0" } +solana-cli-config = { path = "cli-config", version = "=1.18.0" } +solana-cli-output = { path = "cli-output", version = "=1.18.0" } +solana-client = { path = "client", version = "=1.18.0" } +solana-compute-budget-program = { path = "programs/compute-budget", version = "=1.18.0" } +solana-config-program = { path = "programs/config", version = "=1.18.0" } +solana-core = { path = "core", version = "=1.18.0" } +solana-cost-model = { path = "cost-model", version = "=1.18.0" } +solana-download-utils = { path = "download-utils", version = "=1.18.0" } +solana-entry = { path = "entry", version = "=1.18.0" } +solana-faucet = { path = "faucet", version = "=1.18.0" } +solana-frozen-abi = { path = "frozen-abi", version = "=1.18.0" } +solana-frozen-abi-macro = { path = "frozen-abi/macro", version = "=1.18.0" } +solana-genesis = { path = "genesis", version = "=1.18.0" } +solana-genesis-utils = { path = "genesis-utils", version = "=1.18.0" } +solana-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=1.18.0" } +solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=1.18.0" } +solana-gossip = { path = "gossip", version = "=1.18.0" } +solana-loader-v4-program = { path = "programs/loader-v4", version = "=1.18.0" } +solana-ledger = { path = "ledger", version = "=1.18.0" } +solana-local-cluster = { path = "local-cluster", version = "=1.18.0" } +solana-logger = { path = "logger", version = "=1.18.0" } +solana-measure = { path = "measure", version = "=1.18.0" } +solana-merkle-tree = { path = "merkle-tree", version = "=1.18.0" } +solana-metrics = { path = "metrics", version = "=1.18.0" } +solana-net-utils = { path = "net-utils", version = "=1.18.0" } +solana-notifier = { path = "notifier", version = "=1.18.0" } +solana-perf = { path = "perf", version = "=1.18.0" } +solana-poh = { path = "poh", version = "=1.18.0" } +solana-program = { path = "sdk/program", version = "=1.18.0" } +solana-program-runtime = { path = "program-runtime", version = "=1.18.0" } +solana-program-test = { path = "program-test", version = "=1.18.0" } +solana-pubsub-client = { path = "pubsub-client", version = "=1.18.0" } +solana-quic-client = { path = "quic-client", version = "=1.18.0" } +solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=1.18.0" } +solana-remote-wallet = { path = "remote-wallet", version = "=1.18.0", default-features = false } +solana-rpc = { path = "rpc", version = "=1.18.0" } +solana-rpc-client = { path = "rpc-client", version = "=1.18.0", default-features = false } +solana-rpc-client-api = { path = "rpc-client-api", version = "=1.18.0" } +solana-rpc-client-nonce-utils = { path = "rpc-client-nonce-utils", version = "=1.18.0" } +solana-runtime = { path = "runtime", version = "=1.18.0" } +solana-sdk = { path = "sdk", version = "=1.18.0" } +solana-sdk-macro = { path = "sdk/macro", version = "=1.18.0" } +solana-send-transaction-service = { path = "send-transaction-service", version = "=1.18.0" } +solana-stake-program = { path = "programs/stake", version = "=1.18.0" } +solana-storage-bigtable = { path = "storage-bigtable", version = "=1.18.0" } +solana-storage-proto = { path = "storage-proto", version = "=1.18.0" } +solana-streamer = { path = "streamer", version = "=1.18.0" } +solana-system-program = { path = "programs/system", version = "=1.18.0" } +solana-test-validator = { path = "test-validator", version = "=1.18.0" } +solana-thin-client = { path = "thin-client", version = "=1.18.0" } +solana-tpu-client = { path = "tpu-client", version = "=1.18.0", default-features = false } +solana-transaction-status = { path = "transaction-status", version = "=1.18.0" } +solana-turbine = { path = "turbine", version = "=1.18.0" } +solana-udp-client = { path = "udp-client", version = "=1.18.0" } +solana-version = { path = "version", version = "=1.18.0" } +solana-vote = { path = "vote", version = "=1.18.0" } +solana-vote-program = { path = "programs/vote", version = "=1.18.0" } +solana-zk-keygen = { path = "zk-keygen", version = "=1.18.0" } +solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=1.18.0" } +solana-zk-token-sdk = { path = "zk-token-sdk", version = "=1.18.0" } spl-associated-token-account = "=2.2.0" spl-instruction-padding = "0.1" spl-memo = "=4.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 3a113d5336a333..fa80a2be2250be 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4446,7 +4446,7 @@ dependencies = [ [[package]] name = "solana-account-decoder" -version = "1.17.0" +version = "1.18.0" dependencies = [ "Inflector", "base64 0.21.4", @@ -4468,7 +4468,7 @@ dependencies = [ [[package]] name = "solana-accounts-db" -version = "1.17.0" +version = "1.18.0" dependencies = [ "arrayref", "bincode", @@ -4525,7 +4525,7 @@ dependencies = [ [[package]] name = "solana-address-lookup-table-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "bytemuck", @@ -4544,7 +4544,7 @@ dependencies = [ [[package]] name = "solana-banks-client" -version = "1.17.0" +version = "1.18.0" dependencies = [ "borsh 0.10.3", "futures 0.3.28", @@ -4559,7 +4559,7 @@ dependencies = [ [[package]] name = "solana-banks-interface" -version = "1.17.0" +version = "1.18.0" dependencies = [ "serde", "solana-sdk", @@ -4568,7 +4568,7 @@ dependencies = [ [[package]] name = "solana-banks-server" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "crossbeam-channel", @@ -4586,7 +4586,7 @@ dependencies = [ [[package]] name = "solana-bloom" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bv", "fnv", @@ -4603,7 +4603,7 @@ dependencies = [ [[package]] name = "solana-bpf-loader-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "byteorder 1.4.3", @@ -4620,7 +4620,7 @@ dependencies = [ [[package]] name = "solana-bpf-rust-big-mod-exp" -version = "1.17.0" +version = "1.18.0" dependencies = [ "array-bytes", "serde", @@ -4630,7 +4630,7 @@ dependencies = [ [[package]] name = "solana-bucket-map" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bv", "bytemuck", @@ -4646,7 +4646,7 @@ dependencies = [ [[package]] name = "solana-clap-utils" -version = "1.17.0" +version = "1.18.0" dependencies = [ "chrono", "clap 2.33.3", @@ -4661,7 +4661,7 @@ dependencies = [ [[package]] name = "solana-cli-config" -version = "1.17.0" +version = "1.18.0" dependencies = [ "dirs-next", "lazy_static", @@ -4675,7 +4675,7 @@ dependencies = [ [[package]] name = "solana-cli-output" -version = "1.17.0" +version = "1.18.0" dependencies = [ "Inflector", "base64 0.21.4", @@ -4700,7 +4700,7 @@ dependencies = [ [[package]] name = "solana-client" -version = "1.17.0" +version = "1.18.0" dependencies = [ "async-trait", "bincode", @@ -4731,7 +4731,7 @@ dependencies = [ [[package]] name = "solana-compute-budget-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program-runtime", "solana-sdk", @@ -4739,7 +4739,7 @@ dependencies = [ [[package]] name = "solana-config-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "chrono", @@ -4751,7 +4751,7 @@ dependencies = [ [[package]] name = "solana-connection-cache" -version = "1.17.0" +version = "1.18.0" dependencies = [ "async-trait", "bincode", @@ -4771,7 +4771,7 @@ dependencies = [ [[package]] name = "solana-core" -version = "1.17.0" +version = "1.18.0" dependencies = [ "base64 0.21.4", "bincode", @@ -4843,7 +4843,7 @@ dependencies = [ [[package]] name = "solana-cost-model" -version = "1.17.0" +version = "1.18.0" dependencies = [ "lazy_static", "log", @@ -4865,7 +4865,7 @@ dependencies = [ [[package]] name = "solana-download-utils" -version = "1.17.0" +version = "1.18.0" dependencies = [ "console", "indicatif", @@ -4877,7 +4877,7 @@ dependencies = [ [[package]] name = "solana-entry" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "crossbeam-channel", @@ -4897,7 +4897,7 @@ dependencies = [ [[package]] name = "solana-faucet" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "byteorder 1.4.3", @@ -4919,7 +4919,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.17.0" +version = "1.18.0" dependencies = [ "ahash 0.8.3", "blake3", @@ -4947,7 +4947,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.17.0" +version = "1.18.0" dependencies = [ "proc-macro2", "quote", @@ -4957,7 +4957,7 @@ dependencies = [ [[package]] name = "solana-genesis-utils" -version = "1.17.0" +version = "1.18.0" dependencies = [ "log", "solana-accounts-db", @@ -4968,7 +4968,7 @@ dependencies = [ [[package]] name = "solana-geyser-plugin-interface" -version = "1.17.0" +version = "1.18.0" dependencies = [ "log", "solana-sdk", @@ -4978,7 +4978,7 @@ dependencies = [ [[package]] name = "solana-geyser-plugin-manager" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bs58", "crossbeam-channel", @@ -5003,7 +5003,7 @@ dependencies = [ [[package]] name = "solana-gossip" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "bincode", @@ -5051,7 +5051,7 @@ dependencies = [ [[package]] name = "solana-ledger" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "bincode", @@ -5115,7 +5115,7 @@ dependencies = [ [[package]] name = "solana-loader-v4-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "log", "solana-measure", @@ -5126,7 +5126,7 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.17.0" +version = "1.18.0" dependencies = [ "env_logger", "lazy_static", @@ -5135,7 +5135,7 @@ dependencies = [ [[package]] name = "solana-measure" -version = "1.17.0" +version = "1.18.0" dependencies = [ "log", "solana-sdk", @@ -5143,7 +5143,7 @@ dependencies = [ [[package]] name = "solana-merkle-tree" -version = "1.17.0" +version = "1.18.0" dependencies = [ "fast-math", "solana-program", @@ -5151,7 +5151,7 @@ dependencies = [ [[package]] name = "solana-metrics" -version = "1.17.0" +version = "1.18.0" dependencies = [ "crossbeam-channel", "gethostname", @@ -5164,7 +5164,7 @@ dependencies = [ [[package]] name = "solana-net-utils" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "clap 3.1.6", @@ -5184,7 +5184,7 @@ dependencies = [ [[package]] name = "solana-perf" -version = "1.17.0" +version = "1.18.0" dependencies = [ "ahash 0.8.3", "bincode", @@ -5208,7 +5208,7 @@ dependencies = [ [[package]] name = "solana-poh" -version = "1.17.0" +version = "1.18.0" dependencies = [ "core_affinity", "crossbeam-channel", @@ -5224,7 +5224,7 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "ark-bn254", "ark-ec", @@ -5276,7 +5276,7 @@ dependencies = [ [[package]] name = "solana-program-runtime" -version = "1.17.0" +version = "1.18.0" dependencies = [ "base64 0.21.4", "bincode", @@ -5302,7 +5302,7 @@ dependencies = [ [[package]] name = "solana-program-test" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "async-trait", @@ -5329,7 +5329,7 @@ dependencies = [ [[package]] name = "solana-pubsub-client" -version = "1.17.0" +version = "1.18.0" dependencies = [ "crossbeam-channel", "futures-util", @@ -5352,7 +5352,7 @@ dependencies = [ [[package]] name = "solana-quic-client" -version = "1.17.0" +version = "1.18.0" dependencies = [ "async-mutex", "async-trait", @@ -5377,7 +5377,7 @@ dependencies = [ [[package]] name = "solana-rayon-threadlimit" -version = "1.17.0" +version = "1.18.0" dependencies = [ "lazy_static", "num_cpus", @@ -5385,7 +5385,7 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "1.17.0" +version = "1.18.0" dependencies = [ "console", "dialoguer", @@ -5402,7 +5402,7 @@ dependencies = [ [[package]] name = "solana-rpc" -version = "1.17.0" +version = "1.18.0" dependencies = [ "base64 0.21.4", "bincode", @@ -5457,7 +5457,7 @@ dependencies = [ [[package]] name = "solana-rpc-client" -version = "1.17.0" +version = "1.18.0" dependencies = [ "async-trait", "base64 0.21.4", @@ -5481,7 +5481,7 @@ dependencies = [ [[package]] name = "solana-rpc-client-api" -version = "1.17.0" +version = "1.18.0" dependencies = [ "base64 0.21.4", "bs58", @@ -5501,7 +5501,7 @@ dependencies = [ [[package]] name = "solana-rpc-client-nonce-utils" -version = "1.17.0" +version = "1.18.0" dependencies = [ "clap 2.33.3", "solana-clap-utils", @@ -5512,7 +5512,7 @@ dependencies = [ [[package]] name = "solana-runtime" -version = "1.17.0" +version = "1.18.0" dependencies = [ "arrayref", "base64 0.21.4", @@ -5587,7 +5587,7 @@ dependencies = [ [[package]] name = "solana-sbf-programs" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "byteorder 1.4.3", @@ -5616,7 +5616,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-128bit" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", "solana-sbf-rust-128bit-dep", @@ -5624,21 +5624,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-128bit-dep" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-alloc" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-alt-bn128" -version = "1.17.0" +version = "1.18.0" dependencies = [ "array-bytes", "solana-program", @@ -5646,7 +5646,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-alt-bn128-compression" -version = "1.17.0" +version = "1.18.0" dependencies = [ "array-bytes", "solana-program", @@ -5654,21 +5654,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-call-depth" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-caller-access" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-curve25519" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", "solana-zk-token-sdk", @@ -5676,14 +5676,14 @@ dependencies = [ [[package]] name = "solana-sbf-rust-custom-heap" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-dep-crate" -version = "1.17.0" +version = "1.18.0" dependencies = [ "byteorder 1.4.3", "solana-program", @@ -5691,21 +5691,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-deprecated-loader" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-dup-accounts" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-error-handling" -version = "1.17.0" +version = "1.18.0" dependencies = [ "num-derive 0.3.0", "num-traits", @@ -5715,42 +5715,42 @@ dependencies = [ [[package]] name = "solana-sbf-rust-external-spend" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-finalize" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-get-minimum-delegation" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-inner_instruction_alignment_check" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-instruction-introspection" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-invoke" -version = "1.17.0" +version = "1.18.0" dependencies = [ "rustversion", "solana-program", @@ -5760,49 +5760,49 @@ dependencies = [ [[package]] name = "solana-sbf-rust-invoke-and-error" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-invoke-and-ok" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-invoke-and-return" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-invoked" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-iter" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-log-data" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-many-args" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", "solana-sbf-rust-many-args-dep", @@ -5810,14 +5810,14 @@ dependencies = [ [[package]] name = "solana-sbf-rust-many-args-dep" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-mem" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", "solana-program-runtime", @@ -5827,7 +5827,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-membuiltins" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", "solana-sbf-rust-mem", @@ -5835,21 +5835,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-noop" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-panic" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-param-passing" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", "solana-sbf-rust-param-passing-dep", @@ -5857,14 +5857,14 @@ dependencies = [ [[package]] name = "solana-sbf-rust-param-passing-dep" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-poseidon" -version = "1.17.0" +version = "1.18.0" dependencies = [ "array-bytes", "solana-program", @@ -5872,7 +5872,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-rand" -version = "1.17.0" +version = "1.18.0" dependencies = [ "getrandom 0.2.10", "rand 0.8.5", @@ -5881,14 +5881,14 @@ dependencies = [ [[package]] name = "solana-sbf-rust-realloc" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-realloc-invoke" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", "solana-sbf-rust-realloc", @@ -5896,7 +5896,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-remaining-compute-units" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", "solana-program-runtime", @@ -5906,21 +5906,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-ro-account_modify" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-ro-modify" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-sanity" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", "solana-program-runtime", @@ -5930,7 +5930,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-secp256k1-recover" -version = "1.17.0" +version = "1.18.0" dependencies = [ "libsecp256k1 0.7.0", "solana-program", @@ -5938,7 +5938,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-sha" -version = "1.17.0" +version = "1.18.0" dependencies = [ "blake3", "solana-program", @@ -5946,21 +5946,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-sibling-instructions" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-sibling_inner-instructions" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-simulation" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-logger", "solana-program", @@ -5971,21 +5971,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-spoof1" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-spoof1-system" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-sysvar" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", "solana-program-runtime", @@ -5995,21 +5995,21 @@ dependencies = [ [[package]] name = "solana-sbf-rust-upgradeable" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sbf-rust-upgraded" -version = "1.17.0" +version = "1.18.0" dependencies = [ "solana-program", ] [[package]] name = "solana-sdk" -version = "1.17.0" +version = "1.18.0" dependencies = [ "assert_matches", "base64 0.21.4", @@ -6061,7 +6061,7 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bs58", "proc-macro2", @@ -6072,7 +6072,7 @@ dependencies = [ [[package]] name = "solana-send-transaction-service" -version = "1.17.0" +version = "1.18.0" dependencies = [ "crossbeam-channel", "log", @@ -6086,7 +6086,7 @@ dependencies = [ [[package]] name = "solana-stake-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "log", @@ -6099,7 +6099,7 @@ dependencies = [ [[package]] name = "solana-storage-bigtable" -version = "1.17.0" +version = "1.18.0" dependencies = [ "backoff", "bincode", @@ -6131,7 +6131,7 @@ dependencies = [ [[package]] name = "solana-storage-proto" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "bs58", @@ -6146,7 +6146,7 @@ dependencies = [ [[package]] name = "solana-streamer" -version = "1.17.0" +version = "1.18.0" dependencies = [ "async-channel", "bytes", @@ -6176,7 +6176,7 @@ dependencies = [ [[package]] name = "solana-system-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "log", @@ -6188,7 +6188,7 @@ dependencies = [ [[package]] name = "solana-test-validator" -version = "1.17.0" +version = "1.18.0" dependencies = [ "base64 0.21.4", "bincode", @@ -6218,7 +6218,7 @@ dependencies = [ [[package]] name = "solana-thin-client" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "log", @@ -6231,7 +6231,7 @@ dependencies = [ [[package]] name = "solana-tpu-client" -version = "1.17.0" +version = "1.18.0" dependencies = [ "async-trait", "bincode", @@ -6253,7 +6253,7 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "1.17.0" +version = "1.18.0" dependencies = [ "Inflector", "base64 0.21.4", @@ -6276,7 +6276,7 @@ dependencies = [ [[package]] name = "solana-turbine" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "bytes", @@ -6311,7 +6311,7 @@ dependencies = [ [[package]] name = "solana-udp-client" -version = "1.17.0" +version = "1.18.0" dependencies = [ "async-trait", "solana-connection-cache", @@ -6324,7 +6324,7 @@ dependencies = [ [[package]] name = "solana-validator" -version = "1.17.0" +version = "1.18.0" dependencies = [ "chrono", "clap 2.33.3", @@ -6386,7 +6386,7 @@ dependencies = [ [[package]] name = "solana-version" -version = "1.17.0" +version = "1.18.0" dependencies = [ "log", "rustc_version", @@ -6400,7 +6400,7 @@ dependencies = [ [[package]] name = "solana-vote" -version = "1.17.0" +version = "1.18.0" dependencies = [ "crossbeam-channel", "itertools", @@ -6417,7 +6417,7 @@ dependencies = [ [[package]] name = "solana-vote-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bincode", "log", @@ -6437,7 +6437,7 @@ dependencies = [ [[package]] name = "solana-zk-token-proof-program" -version = "1.17.0" +version = "1.18.0" dependencies = [ "bytemuck", "num-derive 0.3.0", @@ -6449,7 +6449,7 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "1.17.0" +version = "1.18.0" dependencies = [ "aes-gcm-siv", "base64 0.21.4", diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 5e2370576d4b20..509d750cd41d29 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.17.0" +version = "1.18.0" description = "Solana SBF test program written in Rust" authors = ["Solana Labs Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -26,29 +26,29 @@ rustversion = "1.0.14" serde = "1.0.112" serde_json = "1.0.56" solana_rbpf = "=0.7.2" -solana-account-decoder = { path = "../../account-decoder", version = "=1.17.0" } -solana-accounts-db = { path = "../../accounts-db", version = "=1.17.0" } -solana-bpf-loader-program = { path = "../bpf_loader", version = "=1.17.0" } -solana-cli-output = { path = "../../cli-output", version = "=1.17.0" } -solana-ledger = { path = "../../ledger", version = "=1.17.0" } -solana-logger = { path = "../../logger", version = "=1.17.0" } -solana-measure = { path = "../../measure", version = "=1.17.0" } -solana-program = { path = "../../sdk/program", version = "=1.17.0" } -solana-program-runtime = { path = "../../program-runtime", version = "=1.17.0" } -solana-program-test = { path = "../../program-test", version = "=1.17.0" } -solana-runtime = { path = "../../runtime", version = "=1.17.0" } -solana-sbf-rust-128bit-dep = { path = "rust/128bit_dep", version = "=1.17.0" } -solana-sbf-rust-invoke = { path = "rust/invoke", version = "=1.17.0" } -solana-sbf-rust-invoked = { path = "rust/invoked", version = "=1.17.0", default-features = false } -solana-sbf-rust-many-args-dep = { path = "rust/many_args_dep", version = "=1.17.0" } -solana-sbf-rust-mem = { path = "rust/mem", version = "=1.17.0" } -solana-sbf-rust-param-passing-dep = { path = "rust/param_passing_dep", version = "=1.17.0" } -solana-sbf-rust-realloc = { path = "rust/realloc", version = "=1.17.0", default-features = false } -solana-sbf-rust-realloc-invoke = { path = "rust/realloc_invoke", version = "=1.17.0" } -solana-sdk = { path = "../../sdk", version = "=1.17.0" } -solana-transaction-status = { path = "../../transaction-status", version = "=1.17.0" } -solana-validator = { path = "../../validator", version = "=1.17.0" } -solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=1.17.0" } +solana-account-decoder = { path = "../../account-decoder", version = "=1.18.0" } +solana-accounts-db = { path = "../../accounts-db", version = "=1.18.0" } +solana-bpf-loader-program = { path = "../bpf_loader", version = "=1.18.0" } +solana-cli-output = { path = "../../cli-output", version = "=1.18.0" } +solana-ledger = { path = "../../ledger", version = "=1.18.0" } +solana-logger = { path = "../../logger", version = "=1.18.0" } +solana-measure = { path = "../../measure", version = "=1.18.0" } +solana-program = { path = "../../sdk/program", version = "=1.18.0" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.18.0" } +solana-program-test = { path = "../../program-test", version = "=1.18.0" } +solana-runtime = { path = "../../runtime", version = "=1.18.0" } +solana-sbf-rust-128bit-dep = { path = "rust/128bit_dep", version = "=1.18.0" } +solana-sbf-rust-invoke = { path = "rust/invoke", version = "=1.18.0" } +solana-sbf-rust-invoked = { path = "rust/invoked", version = "=1.18.0", default-features = false } +solana-sbf-rust-many-args-dep = { path = "rust/many_args_dep", version = "=1.18.0" } +solana-sbf-rust-mem = { path = "rust/mem", version = "=1.18.0" } +solana-sbf-rust-param-passing-dep = { path = "rust/param_passing_dep", version = "=1.18.0" } +solana-sbf-rust-realloc = { path = "rust/realloc", version = "=1.18.0", default-features = false } +solana-sbf-rust-realloc-invoke = { path = "rust/realloc_invoke", version = "=1.18.0" } +solana-sdk = { path = "../../sdk", version = "=1.18.0" } +solana-transaction-status = { path = "../../transaction-status", version = "=1.18.0" } +solana-validator = { path = "../../validator", version = "=1.18.0" } +solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=1.18.0" } static_assertions = "1.1.0" thiserror = "1.0" diff --git a/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml b/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml index a5ecf6a3a38d38..00fdb7e5330ea7 100644 --- a/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml +++ b/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fail" -version = "1.17.0" +version = "1.18.0" description = "Solana SBF test program written in Rust" authors = ["Solana Labs Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ edition = "2021" publish = false [dependencies] -solana-program = { path = "../../../../program", version = "=1.17.0" } +solana-program = { path = "../../../../program", version = "=1.18.0" } [lib] crate-type = ["cdylib"] diff --git a/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml b/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml index a05351e377201a..c4fb1364393d35 100644 --- a/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml +++ b/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "noop" -version = "1.17.0" +version = "1.18.0" description = "Solana SBF test program written in Rust" authors = ["Solana Labs Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ edition = "2021" publish = false [dependencies] -solana-program = { path = "../../../../program", version = "=1.17.0" } +solana-program = { path = "../../../../program", version = "=1.18.0" } [lib] crate-type = ["cdylib"] From 3008cd8ac156acd1d9752494abc3e7991417d9f0 Mon Sep 17 00:00:00 2001 From: mvines Date: Tue, 3 Oct 2023 09:32:13 -0700 Subject: [PATCH 230/407] Add SIMD field to feature gate issue template (#33495) --- .github/ISSUE_TEMPLATE/2-feature-gate.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/2-feature-gate.yml b/.github/ISSUE_TEMPLATE/2-feature-gate.yml index a230ec5d8ec0b4..649727680b4f46 100644 --- a/.github/ISSUE_TEMPLATE/2-feature-gate.yml +++ b/.github/ISSUE_TEMPLATE/2-feature-gate.yml @@ -24,6 +24,14 @@ body: `solana_sdk::declare_id!()` within the module. Additionally, add an entry to `FEATURE_NAMES` map. 3. Add desired logic to check for and switch on feature availability. + - type: input + id: simd + attributes: + label: SIMD + description: Solana IMprovement Document (SIMD) + placeholder: Link to the https://github.com/solana-foundation/solana-improvement-documents document for this feature + validations: + required: true - type: textarea id: description attributes: From 9761e6f251ab110cd1a7d0d6df294d39658d34ac Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 3 Oct 2023 22:53:55 +0200 Subject: [PATCH 231/407] Remove dummy entries in Blockstore special columns (#33511) These entries were found to improve compaction performance when LedgerCleanupService was performing direct compactions on each primary index. Cleaning by primary index has been deprecated for a while, and as such, these dummy entries are no longer needed and can be removed. --- ledger/src/blockstore.rs | 39 +++++++++++++++-------- ledger/src/blockstore/blockstore_purge.rs | 13 ++------ 2 files changed, 28 insertions(+), 24 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index b4426aa3678501..93de196cc10a85 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -2127,14 +2127,29 @@ impl Blockstore { .put(0, &TransactionStatusIndexMeta::default())?; self.transaction_status_index_cf .put(1, &TransactionStatusIndexMeta::default())?; - // This dummy status improves compaction performance - let default_status = TransactionStatusMeta::default().into(); - self.transaction_status_cf - .put_protobuf(cf::TransactionStatus::as_index(2), &default_status)?; - self.address_signatures_cf.put( - cf::AddressSignatures::as_index(2), - &AddressSignatureMeta::default(), - ) + + // If present, delete dummy entries inserted by old software + // https://github.com/solana-labs/solana/blob/bc2b372/ledger/src/blockstore.rs#L2130-L2137 + let transaction_status_dummy_key = cf::TransactionStatus::as_index(2); + if self + .transaction_status_cf + .get_protobuf_or_bincode::(transaction_status_dummy_key)? + .is_some() + { + self.transaction_status_cf + .delete(transaction_status_dummy_key)?; + }; + let address_signatures_dummy_key = cf::AddressSignatures::as_index(2); + if self + .address_signatures_cf + .get(address_signatures_dummy_key)? + .is_some() + { + self.address_signatures_cf + .delete(address_signatures_dummy_key)?; + }; + + Ok(()) } /// Toggles the active primary index between `0` and `1`, and clears the @@ -7669,8 +7684,6 @@ pub mod tests { fn test_get_transaction_status() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - - // TransactionStatus column opens initialized with one entry at index 2 let transaction_status_cf = &blockstore.transaction_status_cf; let pre_balances_vec = vec![1, 2, 3]; @@ -7849,13 +7862,13 @@ pub mod tests { .get_transaction_status_with_counter(signature7, &[].into()) .unwrap(); assert_eq!(status, None); - assert_eq!(counter, 2); + assert_eq!(counter, 1); let (status, counter) = blockstore .get_transaction_status_with_counter(signature7, &[3].into()) .unwrap(); assert_eq!(status, None); - assert_eq!(counter, 2); + assert_eq!(counter, 1); } fn do_test_lowest_cleanup_slot_and_special_cfs(simulate_ledger_cleanup_service: bool) { @@ -7863,8 +7876,6 @@ pub mod tests { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - - // TransactionStatus column opens initialized with one entry at index 2 let transaction_status_cf = &blockstore.transaction_status_cf; let pre_balances_vec = vec![1, 2, 3]; diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index 71d20720ff4dcc..473e0de1f5927e 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -649,9 +649,6 @@ pub mod tests { IteratorDirection::Forward, )) .unwrap(); - let padding_entry = status_entry_iterator.next().unwrap().0; - assert_eq!(padding_entry.0, 2); - assert_eq!(padding_entry.2, 0); assert!(status_entry_iterator.next().is_none()); let mut address_transactions_iterator = blockstore .db @@ -660,10 +657,8 @@ pub mod tests { IteratorDirection::Forward, )) .unwrap(); - let padding_entry = address_transactions_iterator.next().unwrap().0; - assert_eq!(padding_entry.0, 2); - assert_eq!(padding_entry.2, 0); assert!(address_transactions_iterator.next().is_none()); + assert_eq!( transaction_status_index_cf.get(0).unwrap().unwrap(), TransactionStatusIndexMeta { @@ -1095,8 +1090,6 @@ pub mod tests { frozen: false, } ); - let entry = status_entry_iterator.next().unwrap().0; - assert_eq!(entry.0, 2); // Buffer entry, no index 1 entries remaining drop(status_entry_iterator); // Purge up to but not including index0_max_slot @@ -1135,6 +1128,8 @@ pub mod tests { IteratorDirection::Forward, )) .unwrap(); + assert!(status_entry_iterator.next().is_none()); + assert_eq!( blockstore .transaction_status_index_cf @@ -1157,8 +1152,6 @@ pub mod tests { frozen: false, } ); - let entry = status_entry_iterator.next().unwrap().0; - assert_eq!(entry.0, 2); // Buffer entry, no index 0 or index 1 entries remaining } #[test] From eb262aabe36a2c0d9609e2a3431e06c7f1f0e5d7 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Wed, 4 Oct 2023 09:01:28 +0900 Subject: [PATCH 232/407] Enable the banking trace by default (#33497) --- core/src/validator.rs | 4 ++-- validator/src/cli.rs | 14 +++++++++++--- validator/src/main.rs | 19 ++++++++++--------- 3 files changed, 23 insertions(+), 14 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index a0c39da764239b..e2b763f202f89b 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1134,11 +1134,11 @@ impl Validator { .map_err(|err| format!("{} [{:?}]", &err, &err))?; if banking_tracer.is_enabled() { info!( - "Enabled banking tracer (dir_byte_limit: {})", + "Enabled banking trace (dir_byte_limit: {})", config.banking_trace_dir_byte_limit ); } else { - info!("Disabled banking tracer"); + info!("Disabled banking trace"); } let entry_notification_sender = entry_notifier_service diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 1eef34535511ff..403a922c421112 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1351,9 +1351,17 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { // explicitly given, similar to --limit-ledger-size. // see configure_banking_trace_dir_byte_limit() for this. .default_value(&default_args.banking_trace_dir_byte_limit) - .help("Write trace files for simulate-leader-blocks, retaining \ - up to the default or specified total bytes in the \ - ledger") + .help("Enables the banking trace explicitly, which is enabled by default and \ + writes trace files for simulate-leader-blocks, retaining up to the default \ + or specified total bytes in the ledger. This flag can be used to override \ + its byte limit.") + ) + .arg( + Arg::with_name("disable_banking_trace") + .long("disable-banking-trace") + .conflicts_with("banking_trace_dir_byte_limit") + .takes_value(false) + .help("Disables the banking trace") ) .arg( Arg::with_name("block_verification_method") diff --git a/validator/src/main.rs b/validator/src/main.rs index db9e8396108dea..8d37486d7d8057 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -448,15 +448,16 @@ fn configure_banking_trace_dir_byte_limit( validator_config: &mut ValidatorConfig, matches: &ArgMatches, ) { - validator_config.banking_trace_dir_byte_limit = - if matches.occurrences_of("banking_trace_dir_byte_limit") == 0 { - // disable with no explicit flag; then, this effectively becomes `opt-in` even if we're - // specifying a default value in clap configuration. - DISABLED_BAKING_TRACE_DIR - } else { - // BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT or user-supplied override value - value_t_or_exit!(matches, "banking_trace_dir_byte_limit", u64) - }; + validator_config.banking_trace_dir_byte_limit = if matches.is_present("disable_banking_trace") { + // disable with an explicit flag; This effectively becomes `opt-out` by reseting to + // DISABLED_BAKING_TRACE_DIR, while allowing us to specify a default sensible limit in clap + // configuration for cli help. + DISABLED_BAKING_TRACE_DIR + } else { + // a default value in clap configuration (BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT) or + // explicit user-supplied override value + value_t_or_exit!(matches, "banking_trace_dir_byte_limit", u64) + }; } pub fn main() { From 144e6d6eec4b15d86ee9afd8bd2d823db9a8911b Mon Sep 17 00:00:00 2001 From: Tyera Date: Tue, 3 Oct 2023 18:58:30 -0600 Subject: [PATCH 233/407] Blockstore special columns: minimize deletes in PurgeType::Exact (#33498) * Adjust test_purge_transaction_status_exact to test slots that cross primary indexes * Minimize deletes by checking the primary-index range * Fix test_purge_special_columns_compaction_filter --- ledger/src/blockstore/blockstore_purge.rs | 84 ++++++++++++++++++++--- 1 file changed, 76 insertions(+), 8 deletions(-) diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index 473e0de1f5927e..677a34d3295602 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -354,15 +354,32 @@ impl Blockstore { ) -> Result<()> { let mut index0 = self.transaction_status_index_cf.get(0)?.unwrap_or_default(); let mut index1 = self.transaction_status_index_cf.get(1)?.unwrap_or_default(); + let slot_indexes = |slot: Slot| -> Vec { + let mut indexes = vec![]; + if slot <= index0.max_slot && (index0.frozen || slot >= index1.max_slot) { + indexes.push(0); + } + if slot <= index1.max_slot && (index1.frozen || slot >= index0.max_slot) { + indexes.push(1); + } + indexes + }; + for slot in from_slot..=to_slot { + let primary_indexes = slot_indexes(slot); + if primary_indexes.is_empty() { + continue; + } + let slot_entries = self.get_any_valid_slot_entries(slot, 0); let transactions = slot_entries .into_iter() .flat_map(|entry| entry.transactions); for transaction in transactions { if let Some(&signature) = transaction.signatures.get(0) { - batch.delete::((0, signature, slot))?; - batch.delete::((1, signature, slot))?; + for primary_index in &primary_indexes { + batch.delete::((*primary_index, signature, slot))?; + } let meta = self.read_transaction_status((signature, slot))?; let loaded_addresses = meta.map(|meta| meta.loaded_addresses); @@ -372,8 +389,14 @@ impl Blockstore { ); for pubkey in account_keys.iter() { - batch.delete::((0, *pubkey, slot, signature))?; - batch.delete::((1, *pubkey, slot, signature))?; + for primary_index in &primary_indexes { + batch.delete::(( + *primary_index, + *pubkey, + slot, + signature, + ))?; + } } } } @@ -697,7 +720,7 @@ pub mod tests { blockstore.initialize_transaction_status_index().unwrap(); *blockstore.active_transaction_status_index.write().unwrap() = 0; - for x in 0..index0_max_slot + 1 { + for x in 0..index0_max_slot { let entries = make_slot_entries_with_transactions(1); let shreds = entries_to_test_shreds( &entries, @@ -726,6 +749,36 @@ pub mod tests { ) .unwrap(); } + + // Add slot that crosses primary indexes + let entries = make_slot_entries_with_transactions(2); + let shreds = entries_to_test_shreds( + &entries, + index0_max_slot, // slot + index0_max_slot.saturating_sub(1), // parent_slot + true, // is_full_slot + 0, // version + true, // merkle_variant + ); + blockstore.insert_shreds(shreds, None, false).unwrap(); + let signatures = entries + .iter() + .filter(|entry| !entry.is_tick()) + .cloned() + .flat_map(|entry| entry.transactions) + .map(|transaction| transaction.signatures[0]) + .collect::>(); + let random_bytes: Vec = (0..64).map(|_| rand::random::()).collect(); + blockstore + .write_transaction_status( + index0_max_slot, + signatures[0], + vec![&Pubkey::try_from(&random_bytes[..32]).unwrap()], + vec![&Pubkey::try_from(&random_bytes[32..]).unwrap()], + TransactionStatusMeta::default(), + ) + .unwrap(); + // Freeze index 0 let mut write_batch = blockstore.db.batch().unwrap(); let mut w_active_transaction_status_index = @@ -740,6 +793,19 @@ pub mod tests { drop(w_active_transaction_status_index); blockstore.db.write(write_batch).unwrap(); + let random_bytes: Vec = (0..64).map(|_| rand::random::()).collect(); + blockstore + .write_transaction_status( + index0_max_slot, + signatures[1], + vec![&Pubkey::try_from(&random_bytes[..32]).unwrap()], + vec![&Pubkey::try_from(&random_bytes[32..]).unwrap()], + TransactionStatusMeta::default(), + ) + .unwrap(); + + // Note: index0_max_slot exists in both indexes + for x in index0_max_slot + 1..index1_max_slot + 1 { let entries = make_slot_entries_with_transactions(1); let shreds = entries_to_test_shreds( @@ -993,7 +1059,7 @@ pub mod tests { for _ in 13..index1_max_slot + 1 { let entry = status_entry_iterator.next().unwrap().0; assert_eq!(entry.0, 1); - assert!(entry.2 > 12); + assert!(entry.2 == index0_max_slot || entry.2 > 12); } drop(status_entry_iterator); @@ -1190,6 +1256,8 @@ pub mod tests { let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let index0_max_slot = 9; let index1_max_slot = 19; + // includes slot 0, and slot 9 has 2 transactions + let num_total_transactions = index1_max_slot + 2; clear_and_repopulate_transaction_statuses_for_test( &blockstore, @@ -1227,7 +1295,7 @@ pub mod tests { assert!(slot >= oldest_slot); count += 1; } - assert_eq!(count, index1_max_slot - (oldest_slot - 1)); + assert_eq!(count, num_total_transactions - oldest_slot); clear_and_repopulate_transaction_statuses_for_test( &blockstore, @@ -1265,6 +1333,6 @@ pub mod tests { assert!(slot >= oldest_slot); count += 1; } - assert_eq!(count, index1_max_slot - (oldest_slot - 1)); + assert_eq!(count, num_total_transactions - oldest_slot - 1); // Extra transaction in slot 9 } } From c9d04bcfe6ac4b3f284f7207d43ab98dc20749b1 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Wed, 4 Oct 2023 12:00:25 +0800 Subject: [PATCH 234/407] chore(solana-frozen-abi): remove unused deps (#33436) * chore: remove unused deps * ci: increase regression of build redundancy --- Cargo.lock | 5 ----- ci/test-stable.sh | 2 +- frozen-abi/Cargo.toml | 7 ------- programs/sbf/Cargo.lock | 5 ----- 4 files changed, 1 insertion(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 78eee9369d79ea..b5394925adefb4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5931,13 +5931,9 @@ dependencies = [ name = "solana-frozen-abi" version = "1.18.0" dependencies = [ - "ahash 0.8.3", - "blake3", "block-buffer 0.10.4", "bs58", "bv", - "byteorder", - "cc", "either", "generic-array 0.14.7", "im", @@ -5948,7 +5944,6 @@ dependencies = [ "serde", "serde_bytes", "serde_derive", - "serde_json", "sha2 0.10.7", "solana-frozen-abi-macro", "solana-logger", diff --git a/ci/test-stable.sh b/ci/test-stable.sh index e5f8b65326d99e..f521a6c17c6417 100755 --- a/ci/test-stable.sh +++ b/ci/test-stable.sh @@ -93,7 +93,7 @@ test-stable-sbf) # latest mainbeta release version. solana_program_count=$(grep -c 'solana-program v' cargo.log) rm -f cargo.log - if ((solana_program_count > 18)); then + if ((solana_program_count > 20)); then echo "Regression of build redundancy ${solana_program_count}." echo "Review dependency features that trigger redundant rebuilds of solana-program." exit 1 diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index 3121b6968ebdf5..2965dd17a368d7 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -17,25 +17,18 @@ log = { workspace = true, features = ["std"] } serde = { workspace = true, features = ["derive", "rc"] } serde_bytes = { workspace = true } serde_derive = { workspace = true } -serde_json = { workspace = true } sha2 = { workspace = true } solana-frozen-abi-macro = { workspace = true } thiserror = { workspace = true } [target.'cfg(not(target_os = "solana"))'.dependencies] -ahash = { workspace = true } -blake3 = { workspace = true, features = ["digest", "traits-preview"] } block-buffer = { workspace = true } -byteorder = { workspace = true, features = ["i128"] } either = { workspace = true, features = ["use_std"] } generic-array = { workspace = true, features = ["serde", "more_lengths"] } im = { workspace = true, features = ["rayon", "serde"] } memmap2 = { workspace = true } subtle = { workspace = true } -[target.'cfg(any(unix, windows))'.dependencies] -cc = { workspace = true, features = ["jobserver", "parallel"] } - [target.'cfg(not(target_os = "solana"))'.dev-dependencies] solana-logger = { workspace = true } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index fa80a2be2250be..3e749d42692ff0 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4921,13 +4921,9 @@ dependencies = [ name = "solana-frozen-abi" version = "1.18.0" dependencies = [ - "ahash 0.8.3", - "blake3", "block-buffer 0.10.4", "bs58", "bv", - "byteorder 1.4.3", - "cc", "either", "generic-array 0.14.7", "im", @@ -4938,7 +4934,6 @@ dependencies = [ "serde", "serde_bytes", "serde_derive", - "serde_json", "sha2 0.10.7", "solana-frozen-abi-macro", "subtle", From 05ddbf3b91285d1303a1c2a5a3585ef3adf432d8 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Wed, 4 Oct 2023 23:09:43 +0900 Subject: [PATCH 235/407] Define generic AbiExample for OnceLock (#33520) * Define generic AbiExample for OnceLock * Guard with cfg... cargo-test-sbf (governance/addin-mock/program, governance/program): error[E0658]: use of unstable library feature 'once_cell' --> src/abi_example.rs:559:36 | 559 | impl AbiExample for std::sync::OnceLock { | ^^^^^^^^^^^^^^^^^^^^^^ | = note: see issue #74465 for more information = help: add `#![feature(once_cell)]` to the crate attributes to enable --- frozen-abi/src/abi_example.rs | 7 +++++++ vote/src/vote_account.rs | 26 ++------------------------ 2 files changed, 9 insertions(+), 24 deletions(-) diff --git a/frozen-abi/src/abi_example.rs b/frozen-abi/src/abi_example.rs index c7765c4a573544..976668d487b3e0 100644 --- a/frozen-abi/src/abi_example.rs +++ b/frozen-abi/src/abi_example.rs @@ -555,3 +555,10 @@ impl AbiEnumVisitor for Result { digester.create_child() } } + +#[cfg(not(target_os = "solana"))] +impl AbiExample for std::sync::OnceLock { + fn example() -> Self { + Self::from(T::example()) + } +} diff --git a/vote/src/vote_account.rs b/vote/src/vote_account.rs index cd4d538b2ccc80..c4ddd43b2051c9 100644 --- a/vote/src/vote_account.rs +++ b/vote/src/vote_account.rs @@ -1,5 +1,3 @@ -#[cfg(RUSTC_WITH_SPECIALIZATION)] -use solana_frozen_abi::abi_example::AbiExample; use { itertools::Itertools, serde::ser::{Serialize, Serializer}, @@ -30,7 +28,7 @@ pub enum Error { InvalidOwner(/*owner:*/ Pubkey), } -#[derive(Debug)] +#[derive(Debug, AbiExample)] struct VoteAccountInner { account: AccountSharedData, vote_state: OnceLock>, @@ -38,7 +36,7 @@ struct VoteAccountInner { pub type VoteAccountsHashMap = HashMap; -#[derive(Clone, Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize, AbiExample)] #[serde(from = "Arc")] pub struct VoteAccounts { vote_accounts: Arc, @@ -321,26 +319,6 @@ impl Serialize for VoteAccounts { } } -#[cfg(RUSTC_WITH_SPECIALIZATION)] -impl AbiExample for VoteAccountInner { - fn example() -> Self { - Self { - account: AccountSharedData::example(), - vote_state: OnceLock::from(Result::::example()), - } - } -} - -#[cfg(RUSTC_WITH_SPECIALIZATION)] -impl AbiExample for VoteAccounts { - fn example() -> Self { - Self { - vote_accounts: Arc::::example(), - staked_nodes: OnceLock::from(Arc::>::example()), - } - } -} - #[cfg(test)] mod tests { use { From 5a95e5676ee322276b974014c63804152eba5df9 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Wed, 4 Oct 2023 08:04:43 -0700 Subject: [PATCH 236/407] Manually add lookup table addresses instead of sanitizing (#33273) --- ledger-tool/src/main.rs | 18 +++-- ledger/src/blockstore.rs | 67 +++++++++++++------ ledger/src/lib.rs | 1 + ...ransaction_address_lookup_table_scanner.rs | 44 ++++++++++++ sdk/program/src/lib.rs | 9 ++- 5 files changed, 110 insertions(+), 29 deletions(-) create mode 100644 ledger/src/transaction_address_lookup_table_scanner.rs diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 2fa26528497860..1cce5ad2789371 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -1024,13 +1024,14 @@ fn get_latest_optimistic_slots( /// Finds the accounts needed to replay slots `snapshot_slot` to `ending_slot`. /// Removes all other accounts from accounts_db, and updates the accounts hash /// and capitalization. This is used by the --minimize option in create-snapshot +/// Returns true if the minimized snapshot may be incomplete. fn minimize_bank_for_snapshot( blockstore: &Blockstore, bank: &Bank, snapshot_slot: Slot, ending_slot: Slot, -) { - let (transaction_account_set, transaction_accounts_measure) = measure!( +) -> bool { + let ((transaction_account_set, possibly_incomplete), transaction_accounts_measure) = measure!( blockstore.get_accounts_used_in_range(bank, snapshot_slot, ending_slot), "get transaction accounts" ); @@ -1038,6 +1039,7 @@ fn minimize_bank_for_snapshot( info!("Added {total_accounts_len} accounts from transactions. {transaction_accounts_measure}"); SnapshotMinimizer::minimize(bank, snapshot_slot, ending_slot, transaction_account_set); + possibly_incomplete } fn assert_capitalization(bank: &Bank) { @@ -3158,14 +3160,16 @@ fn main() { bank }; - if is_minimized { + let minimize_snapshot_possibly_incomplete = if is_minimized { minimize_bank_for_snapshot( &blockstore, &bank, snapshot_slot, ending_slot.unwrap(), - ); - } + ) + } else { + false + }; println!( "Creating a version {} {}snapshot of slot {}", @@ -3245,6 +3249,10 @@ fn main() { warn!("Minimized snapshot range crosses epoch boundary ({} to {}). Bank hashes after {} will not match replays from a full snapshot", starting_epoch, ending_epoch, bank.epoch_schedule().get_last_slot_in_epoch(starting_epoch)); } + + if minimize_snapshot_possibly_incomplete { + warn!("Minimized snapshot may be incomplete due to missing accounts from CPI'd address lookup table extensions. This may lead to mismatched bank hashes while replaying."); + } } } diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 93de196cc10a85..0e8709c018e0ba 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -21,6 +21,7 @@ use { Shred, ShredData, ShredId, ShredType, Shredder, }, slot_stats::{ShredSource, SlotsStats}, + transaction_address_lookup_table_scanner::scan_transaction, }, assert_matches::debug_assert_matches, bincode::{deserialize, serialize}, @@ -44,13 +45,15 @@ use { solana_rayon_threadlimit::get_max_thread_count, solana_runtime::bank::Bank, solana_sdk::{ + account::ReadableAccount, + address_lookup_table::state::AddressLookupTable, clock::{Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND}, genesis_config::{GenesisConfig, DEFAULT_GENESIS_ARCHIVE, DEFAULT_GENESIS_FILE}, hash::Hash, pubkey::Pubkey, signature::{Keypair, Signature, Signer}, timing::timestamp, - transaction::VersionedTransaction, + transaction::{SanitizedVersionedTransaction, VersionedTransaction}, }, solana_storage_proto::{StoredExtendedRewards, StoredTransactionStatusMeta}, solana_transaction_status::{ @@ -2930,14 +2933,23 @@ impl Blockstore { } /// Gets accounts used in transactions in the slot range [starting_slot, ending_slot]. + /// Additionally returns a bool indicating if the set may be incomplete. /// Used by ledger-tool to create a minimized snapshot pub fn get_accounts_used_in_range( &self, bank: &Bank, starting_slot: Slot, ending_slot: Slot, - ) -> DashSet { + ) -> (DashSet, bool) { let result = DashSet::new(); + let lookup_tables = DashSet::new(); + let possible_cpi_alt_extend = AtomicBool::new(false); + + fn add_to_set<'a>(set: &DashSet, iter: impl IntoIterator) { + iter.into_iter().for_each(|key| { + set.insert(*key); + }); + } (starting_slot..=ending_slot) .into_par_iter() @@ -2945,31 +2957,44 @@ impl Blockstore { if let Ok(entries) = self.get_slot_entries(slot, 0) { entries.into_par_iter().for_each(|entry| { entry.transactions.into_iter().for_each(|tx| { - if let Some(lookups) = tx.message.address_table_lookups() { - lookups.iter().for_each(|lookup| { - result.insert(lookup.account_key); - }); + // Attempt to verify transaction and load addresses from the current bank, + // or manually scan the transaction for addresses if the transaction. + if let Ok(tx) = bank.fully_verify_transaction(tx.clone()) { + add_to_set(&result, tx.message().account_keys().iter()); + } else { + add_to_set(&result, tx.message.static_account_keys()); + if let Some(lookups) = tx.message.address_table_lookups() { + add_to_set( + &lookup_tables, + lookups.iter().map(|lookup| &lookup.account_key), + ); + } + + let tx = SanitizedVersionedTransaction::try_from(tx) + .expect("transaction failed to sanitize"); + + let alt_scan_extensions = scan_transaction(&tx); + add_to_set(&result, &alt_scan_extensions.accounts); + if alt_scan_extensions.possibly_incomplete { + possible_cpi_alt_extend.store(true, Ordering::Relaxed); + } } - // howdy, anybody who reached here from the panic messsage! - // the .unwrap() below could indicate there was an odd error or there - // could simply be a tx with a new ALT, which is just created/updated - // in this range. too bad... this edge case isn't currently supported. - // see: https://github.com/solana-labs/solana/issues/30165 - // for casual use, please choose different slot range. - let sanitized_tx = bank.fully_verify_transaction(tx).unwrap(); - sanitized_tx - .message() - .account_keys() - .iter() - .for_each(|&pubkey| { - result.insert(pubkey); - }); }); }); } }); - result + // For each unique lookup table add all accounts to the minimized set. + lookup_tables.into_par_iter().for_each(|lookup_table_key| { + bank.get_account(&lookup_table_key) + .map(|lookup_table_account| { + AddressLookupTable::deserialize(lookup_table_account.data()).map(|t| { + add_to_set(&result, &t.addresses[..]); + }) + }); + }); + + (result, possible_cpi_alt_extend.into_inner()) } fn get_completed_ranges( diff --git a/ledger/src/lib.rs b/ledger/src/lib.rs index a8f81be486cead..0f311ca1216ec4 100644 --- a/ledger/src/lib.rs +++ b/ledger/src/lib.rs @@ -28,6 +28,7 @@ pub mod sigverify_shreds; pub mod slot_stats; mod staking_utils; pub mod token_balances; +mod transaction_address_lookup_table_scanner; pub mod use_snapshot_archives_at_startup; #[macro_use] diff --git a/ledger/src/transaction_address_lookup_table_scanner.rs b/ledger/src/transaction_address_lookup_table_scanner.rs new file mode 100644 index 00000000000000..357a9c6e497306 --- /dev/null +++ b/ledger/src/transaction_address_lookup_table_scanner.rs @@ -0,0 +1,44 @@ +use { + bincode::deserialize, + lazy_static::lazy_static, + solana_sdk::{ + address_lookup_table::{self, instruction::ProgramInstruction}, + pubkey::Pubkey, + sdk_ids::SDK_IDS, + transaction::SanitizedVersionedTransaction, + }, + std::collections::HashSet, +}; + +lazy_static! { + static ref SDK_IDS_SET: HashSet = SDK_IDS.iter().cloned().collect(); +} + +pub struct ScannedLookupTableExtensions { + pub possibly_incomplete: bool, + pub accounts: Vec, // empty if no extensions found +} + +pub fn scan_transaction( + transaction: &SanitizedVersionedTransaction, +) -> ScannedLookupTableExtensions { + // Accumulate accounts from account lookup table extension instructions + let mut accounts = Vec::new(); + let mut native_only = true; + for (program_id, instruction) in transaction.get_message().program_instructions_iter() { + if address_lookup_table::program::check_id(program_id) { + if let Ok(ProgramInstruction::ExtendLookupTable { new_addresses }) = + deserialize::(&instruction.data) + { + accounts.extend(new_addresses); + } + } else { + native_only &= SDK_IDS_SET.contains(program_id); + } + } + + ScannedLookupTableExtensions { + possibly_incomplete: !native_only, + accounts, + } +} diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index edcc2e3cb8c961..0dfc8c3247cc03 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -564,9 +564,9 @@ pub mod config { pub mod sdk_ids { use { crate::{ - bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, config, ed25519_program, - feature, incinerator, secp256k1_program, solana_program::pubkey::Pubkey, stake, - system_program, sysvar, vote, + address_lookup_table, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, + config, ed25519_program, feature, incinerator, loader_v4, secp256k1_program, + solana_program::pubkey::Pubkey, stake, system_program, sysvar, vote, }, lazy_static::lazy_static, }; @@ -585,6 +585,9 @@ pub mod sdk_ids { vote::program::id(), feature::id(), bpf_loader_deprecated::id(), + address_lookup_table::program::id(), + loader_v4::id(), + stake::program::id(), #[allow(deprecated)] stake::config::id(), ]; From d4941cc420458d869ef87547703051f1f1ec9f2b Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Wed, 4 Oct 2023 08:47:47 -0700 Subject: [PATCH 237/407] initialize with new VoteState for tests (#33518) --- programs/vote/src/vote_state/mod.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index e83171d06e0844..871f4696c1a078 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -1095,8 +1095,6 @@ pub fn do_process_vote_state_update( // a. In many tests. // b. In the genesis tool that initializes a cluster to create the bootstrap validator. // c. In the ledger tool when creating bootstrap vote accounts. -// In all cases, initializing with the 1_14_11 version of VoteState is safest, as this version will in-place upgrade -// the first time it is altered by a vote transaction. pub fn create_account_with_authorized( node_pubkey: &Pubkey, authorized_voter: &Pubkey, @@ -1104,7 +1102,7 @@ pub fn create_account_with_authorized( commission: u8, lamports: u64, ) -> AccountSharedData { - let mut vote_account = AccountSharedData::new(lamports, VoteState1_14_11::size_of(), &id()); + let mut vote_account = AccountSharedData::new(lamports, VoteState::size_of(), &id()); let vote_state = VoteState::new( &VoteInit { @@ -1116,8 +1114,11 @@ pub fn create_account_with_authorized( &Clock::default(), ); - let version1_14_11 = VoteStateVersions::V1_14_11(Box::new(VoteState1_14_11::from(vote_state))); - VoteState::serialize(&version1_14_11, vote_account.data_as_mut_slice()).unwrap(); + VoteState::serialize( + &VoteStateVersions::Current(Box::new(vote_state)), + vote_account.data_as_mut_slice(), + ) + .unwrap(); vote_account } From daaeb7410eb01d31d754d9c91d626075ad599e9a Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 4 Oct 2023 11:56:39 -0400 Subject: [PATCH 238/407] Adds solana-nohash-hasher (#33521) --- Cargo.lock | 7 +++++++ Cargo.toml | 1 + accounts-db/Cargo.toml | 1 + programs/sbf/Cargo.lock | 7 +++++++ 4 files changed, 16 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index b5394925adefb4..cc1d90de5ebb13 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5211,6 +5211,7 @@ dependencies = [ "solana-logger", "solana-measure", "solana-metrics", + "solana-nohash-hasher", "solana-program-runtime", "solana-rayon-threadlimit", "solana-sdk", @@ -6405,6 +6406,12 @@ dependencies = [ "url 2.4.1", ] +[[package]] +name = "solana-nohash-hasher" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e" + [[package]] name = "solana-notifier" version = "1.18.0" diff --git a/Cargo.toml b/Cargo.toml index fa624857e059f6..c8a91758a33272 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -338,6 +338,7 @@ solana-measure = { path = "measure", version = "=1.18.0" } solana-merkle-tree = { path = "merkle-tree", version = "=1.18.0" } solana-metrics = { path = "metrics", version = "=1.18.0" } solana-net-utils = { path = "net-utils", version = "=1.18.0" } +solana-nohash-hasher = "0.2.1" solana-notifier = { path = "notifier", version = "=1.18.0" } solana-perf = { path = "perf", version = "=1.18.0" } solana-poh = { path = "poh", version = "=1.18.0" } diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index a19708768fdd08..36ddf7d0e75f10 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -48,6 +48,7 @@ solana-frozen-abi = { workspace = true } solana-frozen-abi-macro = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } +solana-nohash-hasher = { workspace = true } solana-program-runtime = { workspace = true } solana-rayon-threadlimit = { workspace = true } solana-sdk = { workspace = true } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 3e749d42692ff0..59b18c18cb09eb 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4509,6 +4509,7 @@ dependencies = [ "solana-frozen-abi-macro", "solana-measure", "solana-metrics", + "solana-nohash-hasher", "solana-program-runtime", "solana-rayon-threadlimit", "solana-sdk", @@ -5177,6 +5178,12 @@ dependencies = [ "url 2.4.1", ] +[[package]] +name = "solana-nohash-hasher" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e" + [[package]] name = "solana-perf" version = "1.18.0" From a25bb2e3039b006b32a235380259fb22174cd5cc Mon Sep 17 00:00:00 2001 From: kirill lykov Date: Wed, 4 Oct 2023 18:17:42 +0200 Subject: [PATCH 239/407] Add error messages for BlockstoreError (#33427) * Add error messages for BlockstoreError * display underlying errors * address PR comments: remove unnecessary error from msg Co-authored-by: steviez * fix typo Co-authored-by: steviez --------- Co-authored-by: steviez --- ledger/src/blockstore_db.rs | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index 184df713ef3a65..f51e577c7f0543 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -106,35 +106,49 @@ const OPTIMISTIC_SLOTS_CF: &str = "optimistic_slots"; #[derive(Error, Debug)] pub enum BlockstoreError { + #[error("shred for index exists")] ShredForIndexExists, + #[error("invalid shred data")] InvalidShredData(Box), + #[error("RocksDB error: {0}")] RocksDb(#[from] rocksdb::Error), + #[error("slot is not rooted")] SlotNotRooted, + #[error("dead slot")] DeadSlot, + #[error("io error: {0}")] Io(#[from] std::io::Error), + #[error("serialization error: {0}")] Serialize(#[from] Box), + #[error("fs extra error: {0}")] FsExtraError(#[from] fs_extra::error::Error), + #[error("slot cleaned up")] SlotCleanedUp, + #[error("unpack error: {0}")] UnpackError(#[from] UnpackError), + #[error("unable to set open file descriptor limit")] UnableToSetOpenFileDescriptorLimit, + #[error("transaction status slot mismatch")] TransactionStatusSlotMismatch, + #[error("empty epoch stakes")] EmptyEpochStakes, + #[error("no vote timestamps in range")] NoVoteTimestampsInRange, + #[error("protobuf encode error: {0}")] ProtobufEncodeError(#[from] prost::EncodeError), + #[error("protobuf decode error: {0}")] ProtobufDecodeError(#[from] prost::DecodeError), + #[error("parent entries unavailable")] ParentEntriesUnavailable, + #[error("slot unavailable")] SlotUnavailable, + #[error("unsupported transaction version")] UnsupportedTransactionVersion, + #[error("missing transaction metadata")] MissingTransactionMetadata, } pub type Result = std::result::Result; -impl std::fmt::Display for BlockstoreError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "blockstore error") - } -} - pub enum IteratorMode { Start, End, From d41fa346cc85282830549d1976501d4c8f8e94fa Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 4 Oct 2023 13:07:35 -0400 Subject: [PATCH 240/407] Uses `IntSet` for `ShrinkCandidates` (#33522) --- accounts-db/src/accounts_db.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index dad1f152f36de2..f028ad0daef810 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -80,6 +80,7 @@ use { rayon::{prelude::*, ThreadPool}, serde::{Deserialize, Serialize}, solana_measure::{measure::Measure, measure_us}, + solana_nohash_hasher::IntSet, solana_rayon_threadlimit::get_thread_count, solana_sdk::{ account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, @@ -767,7 +768,7 @@ type AccountSlots = HashMap>; type SlotOffsets = HashMap>; type ReclaimResult = (AccountSlots, SlotOffsets); type PubkeysRemovedFromAccountsIndex = HashSet; -type ShrinkCandidates = HashSet; +type ShrinkCandidates = IntSet; // Some hints for applicability of additional sanity checks for the do_load fast-path; // Slower fallback code path will be taken if the fast path has failed over the retry @@ -2488,7 +2489,7 @@ impl AccountsDb { recycle_stores: RwLock::new(RecycleStores::default()), uncleaned_pubkeys: DashMap::new(), next_id: AtomicAppendVecId::new(0), - shrink_candidate_slots: Mutex::new(ShrinkCandidates::new()), + shrink_candidate_slots: Mutex::new(ShrinkCandidates::default()), write_cache_limit_bytes: None, write_version: AtomicU64::new(0), paths: vec![], @@ -4323,7 +4324,7 @@ impl AccountsDb { // Working from the beginning of store_usage which are the most sparse and see when we can stop // shrinking while still achieving the overall goals. let mut shrink_slots = HashMap::new(); - let mut shrink_slots_next_batch = ShrinkCandidates::new(); + let mut shrink_slots_next_batch = ShrinkCandidates::default(); for usage in &store_usage { let store = &usage.store; let alive_ratio = (total_alive_bytes as f64) / (total_bytes as f64); @@ -8176,7 +8177,7 @@ impl AccountsDb { assert!(self.storage.no_shrink_in_progress()); let mut dead_slots = HashSet::new(); - let mut new_shrink_candidates = ShrinkCandidates::new(); + let mut new_shrink_candidates = ShrinkCandidates::default(); let mut measure = Measure::start("remove"); for (slot, account_info) in reclaims { // No cached accounts should make it here @@ -13325,7 +13326,7 @@ pub mod tests { fn test_select_candidates_by_total_usage_no_candidates() { // no input candidates -- none should be selected solana_logger::setup(); - let candidates: ShrinkCandidates = ShrinkCandidates::new(); + let candidates = ShrinkCandidates::default(); let db = AccountsDb::new_single_for_tests(); let (selected_candidates, next_candidates) = @@ -13339,7 +13340,7 @@ pub mod tests { fn test_select_candidates_by_total_usage_3_way_split_condition() { // three candidates, one selected for shrink, one is put back to the candidate list and one is ignored solana_logger::setup(); - let mut candidates = ShrinkCandidates::new(); + let mut candidates = ShrinkCandidates::default(); let db = AccountsDb::new_single_for_tests(); let common_store_path = Path::new(""); @@ -13413,7 +13414,7 @@ pub mod tests { // three candidates, 2 are selected for shrink, one is ignored solana_logger::setup(); let db = AccountsDb::new_single_for_tests(); - let mut candidates = ShrinkCandidates::new(); + let mut candidates = ShrinkCandidates::default(); let common_store_path = Path::new(""); let slot_id_1 = 12; @@ -13479,7 +13480,7 @@ pub mod tests { // 2 candidates, they must be selected to achieve the target alive ratio solana_logger::setup(); let db = AccountsDb::new_single_for_tests(); - let mut candidates = ShrinkCandidates::new(); + let mut candidates = ShrinkCandidates::default(); let slot1 = 12; let common_store_path = Path::new(""); From 5a9956824f807682c1ee5d8879c1eae14581b9e1 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 4 Oct 2023 13:26:31 -0400 Subject: [PATCH 241/407] Uses `IntSet` for `RootsTracker::uncleaned_roots` (#33524) --- accounts-db/src/accounts_index.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index f1c0b4e90972fc..cd37df61693248 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -20,6 +20,7 @@ use { ThreadPool, }, solana_measure::measure::Measure, + solana_nohash_hasher::IntSet, solana_sdk::{ account::ReadableAccount, clock::{BankId, Slot}, @@ -457,7 +458,7 @@ pub struct RootsTracker { /// Updated every time we add a new root or clean/shrink an append vec into irrelevancy. /// Range is approximately the last N slots where N is # slots per epoch. pub alive_roots: RollingBitField, - uncleaned_roots: HashSet, + uncleaned_roots: IntSet, } impl Default for RootsTracker { @@ -473,7 +474,7 @@ impl RootsTracker { pub fn new(max_width: u64) -> Self { Self { alive_roots: RollingBitField::new(max_width), - uncleaned_roots: HashSet::new(), + uncleaned_roots: IntSet::default(), } } @@ -1994,7 +1995,7 @@ impl + Into> AccountsIndex { tracker.alive_roots.get_all() } - pub fn clone_uncleaned_roots(&self) -> HashSet { + pub fn clone_uncleaned_roots(&self) -> IntSet { self.roots_tracker.read().unwrap().uncleaned_roots.clone() } From 25460f76e709a9768b2d68b2f17428209c410c24 Mon Sep 17 00:00:00 2001 From: Joe C Date: Wed, 4 Oct 2023 19:54:59 +0200 Subject: [PATCH 242/407] Bank: Add function to replace empty account with upgradeable program on feature activation (#32783) * replace program account * modify for all cases * remove non-data swap * address tests & conditional feedback * get the rent involved * mix in owner & executable * feature-related cases * stripped back to feature-specific case only * added feature * address initial feedback * added more lamport checks * condense tests * using test_case * add fail cases to tests * more cleanup * add verifiably built program * update program account state * cleaned up serializing logic * use full word capitalization * rename old & new to dst & src * swap src and dst in parameters * add warnings and errors * rename feature to programify * test suite description clarity * remove strings from datapoints * spell out source and destination * more verbose comments in account replace functions * move lamport calculation * swap lamport check for state check * move replace functions to helper module * make replace_account methods fallible * refactor error handling * add test for source program state --- runtime/src/bank.rs | 56 +-- runtime/src/bank/replace_account.rs | 191 ++++++++++ runtime/src/bank/tests.rs | 417 +++++++++++++++++++-- runtime/src/inline_feature_gate_program.rs | 5 + runtime/src/lib.rs | 1 + sdk/src/feature_set.rs | 5 + 6 files changed, 613 insertions(+), 62 deletions(-) create mode 100644 runtime/src/bank/replace_account.rs create mode 100644 runtime/src/inline_feature_gate_program.rs diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index adc9da3a4b07bb..68e5492186ff9d 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -42,6 +42,7 @@ use { builtins::{BuiltinPrototype, BUILTINS}, epoch_rewards_hasher::hash_rewards_into_partitions, epoch_stakes::{EpochStakes, NodeVoteAccounts}, + inline_feature_gate_program, runtime_config::RuntimeConfig, serde_snapshot::BankIncrementalSnapshotPersistence, snapshot_hash::SnapshotHash, @@ -215,6 +216,7 @@ pub mod bank_hash_details; mod builtin_programs; pub mod epoch_accounts_hash_utils; mod metrics; +mod replace_account; mod serde_snapshot; mod sysvar_cache; #[cfg(test)] @@ -8054,6 +8056,24 @@ impl Bank { if new_feature_activations.contains(&feature_set::update_hashes_per_tick::id()) { self.apply_updated_hashes_per_tick(DEFAULT_HASHES_PER_TICK); } + + if new_feature_activations.contains(&feature_set::programify_feature_gate_program::id()) { + let datapoint_name = "bank-progamify_feature_gate_program"; + if let Err(e) = replace_account::replace_empty_account_with_upgradeable_program( + self, + &feature::id(), + &inline_feature_gate_program::noop_program::id(), + datapoint_name, + ) { + warn!( + "{}: Failed to replace empty account {} with upgradeable program: {}", + datapoint_name, + feature::id(), + e + ); + datapoint_warn!(datapoint_name, ("slot", self.slot(), i64),); + } + } } fn apply_updated_hashes_per_tick(&mut self, hashes_per_tick: u64) { @@ -8196,42 +8216,6 @@ impl Bank { } } - /// Use to replace programs by feature activation - #[allow(dead_code)] - fn replace_program_account( - &mut self, - old_address: &Pubkey, - new_address: &Pubkey, - datapoint_name: &'static str, - ) { - if let Some(old_account) = self.get_account_with_fixed_root(old_address) { - if let Some(new_account) = self.get_account_with_fixed_root(new_address) { - datapoint_info!(datapoint_name, ("slot", self.slot, i64)); - - // Burn lamports in the old account - self.capitalization - .fetch_sub(old_account.lamports(), Relaxed); - - // Transfer new account to old account - self.store_account(old_address, &new_account); - - // Clear new account - self.store_account(new_address, &AccountSharedData::default()); - - // Unload a program from the bank's cache - self.loaded_programs_cache - .write() - .unwrap() - .remove_programs([*old_address].into_iter()); - - self.calculate_and_update_accounts_data_size_delta_off_chain( - old_account.data().len(), - new_account.data().len(), - ); - } - } - } - /// Get all the accounts for this bank and calculate stats pub fn get_total_accounts_stats(&self) -> ScanResult { let accounts = self.get_all_accounts()?; diff --git a/runtime/src/bank/replace_account.rs b/runtime/src/bank/replace_account.rs new file mode 100644 index 00000000000000..8d650aeebe7e87 --- /dev/null +++ b/runtime/src/bank/replace_account.rs @@ -0,0 +1,191 @@ +use { + super::Bank, + log::*, + solana_accounts_db::accounts_index::ZeroLamport, + solana_sdk::{ + account::{Account, AccountSharedData, ReadableAccount}, + bpf_loader_upgradeable::{self, UpgradeableLoaderState}, + pubkey::Pubkey, + }, + std::sync::atomic::Ordering::Relaxed, + thiserror::Error, +}; + +/// Errors returned by `replace_account` methods +#[derive(Debug, Error)] +pub enum ReplaceAccountError { + /// Account not found + #[error("Account not found: {0:?}")] + AccountNotFound(Pubkey), + /// Account exists + #[error("Account exists: {0:?}")] + AccountExists(Pubkey), + #[error("Bincode Error: {0}")] + BincodeError(#[from] bincode::Error), + /// Not an upgradeable program + #[error("Not an upgradeable program")] + NotAnUpgradeableProgram, +} + +/// Moves one account in place of another +/// `source`: the account to replace with +/// `destination`: the account to be replaced +fn move_account( + bank: &Bank, + source_address: &Pubkey, + source_account: &V, + destination_address: &Pubkey, + destination_account: Option<&U>, +) where + U: ReadableAccount + Sync + ZeroLamport, + V: ReadableAccount + Sync + ZeroLamport, +{ + let (destination_lamports, destination_len) = match destination_account { + Some(destination_account) => ( + destination_account.lamports(), + destination_account.data().len(), + ), + None => (0, 0), + }; + + // Burn lamports in the destination account + bank.capitalization.fetch_sub(destination_lamports, Relaxed); + + // Transfer source account to destination account + bank.store_account(destination_address, source_account); + + // Clear source account + bank.store_account(source_address, &AccountSharedData::default()); + + bank.calculate_and_update_accounts_data_size_delta_off_chain( + destination_len, + source_account.data().len(), + ); +} + +/// Use to replace non-upgradeable programs by feature activation +/// `source`: the non-upgradeable program account to replace with +/// `destination`: the non-upgradeable program account to be replaced +#[allow(dead_code)] +pub(crate) fn replace_non_upgradeable_program_account( + bank: &Bank, + source_address: &Pubkey, + destination_address: &Pubkey, + datapoint_name: &'static str, +) -> Result<(), ReplaceAccountError> { + let destination_account = bank + .get_account_with_fixed_root(destination_address) + .ok_or(ReplaceAccountError::AccountNotFound(*destination_address))?; + let source_account = bank + .get_account_with_fixed_root(source_address) + .ok_or(ReplaceAccountError::AccountNotFound(*source_address))?; + + datapoint_info!(datapoint_name, ("slot", bank.slot, i64)); + + move_account( + bank, + source_address, + &source_account, + destination_address, + Some(&destination_account), + ); + + // Unload a program from the bank's cache + bank.loaded_programs_cache + .write() + .unwrap() + .remove_programs([*destination_address].into_iter()); + + Ok(()) +} + +/// Use to replace an empty account with a program by feature activation +/// Note: The upgradeable program should have both: +/// - Program account +/// - Program data account +/// `source`: the upgradeable program account to replace with +/// `destination`: the empty account to be replaced +pub(crate) fn replace_empty_account_with_upgradeable_program( + bank: &Bank, + source_address: &Pubkey, + destination_address: &Pubkey, + datapoint_name: &'static str, +) -> Result<(), ReplaceAccountError> { + // Must be attempting to replace an empty account with a program + // account _and_ data account + let source_account = bank + .get_account_with_fixed_root(source_address) + .ok_or(ReplaceAccountError::AccountNotFound(*source_address))?; + + let (destination_data_address, _) = Pubkey::find_program_address( + &[destination_address.as_ref()], + &bpf_loader_upgradeable::id(), + ); + let (source_data_address, _) = + Pubkey::find_program_address(&[source_address.as_ref()], &bpf_loader_upgradeable::id()); + + // Make sure the data within the source account is the PDA of its + // data account. This also means it has at least the necessary + // lamports for rent. + let source_state = bincode::deserialize::(source_account.data())?; + if !matches!(source_state, UpgradeableLoaderState::Program { .. }) { + return Err(ReplaceAccountError::NotAnUpgradeableProgram); + } + + let source_data_account = bank + .get_account_with_fixed_root(&source_data_address) + .ok_or(ReplaceAccountError::AccountNotFound(source_data_address))?; + + // Make sure the destination account is empty + // We aren't going to check that there isn't a data account at + // the known program-derived address (ie. `destination_data_address`), + // because if it exists, it will be overwritten + if bank + .get_account_with_fixed_root(destination_address) + .is_some() + { + return Err(ReplaceAccountError::AccountExists(*destination_address)); + } + let state = UpgradeableLoaderState::Program { + programdata_address: destination_data_address, + }; + let data = bincode::serialize(&state)?; + let lamports = bank.get_minimum_balance_for_rent_exemption(data.len()); + let created_program_account = Account { + lamports, + data, + owner: bpf_loader_upgradeable::id(), + executable: true, + rent_epoch: source_account.rent_epoch(), + }; + + datapoint_info!(datapoint_name, ("slot", bank.slot, i64)); + let change_in_capitalization = source_account.lamports().saturating_sub(lamports); + + // Replace the destination data account with the source one + // If the destination data account does not exist, it will be created + // If it does exist, it will be overwritten + move_account( + bank, + &source_data_address, + &source_data_account, + &destination_data_address, + bank.get_account_with_fixed_root(&destination_data_address) + .as_ref(), + ); + + // Write the source data account's PDA into the destination program account + move_account( + bank, + source_address, + &created_program_account, + destination_address, + None::<&AccountSharedData>, + ); + + // Any remaining lamports in the source program account are burnt + bank.capitalization + .fetch_sub(change_in_capitalization, Relaxed); + + Ok(()) +} diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 3263eb9c41db7c..58ce790d43d0d4 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -8,6 +8,10 @@ use { }, crate::{ accounts_background_service::{PrunedBanksRequestHandler, SendDroppedBankCallback}, + bank::replace_account::{ + replace_empty_account_with_upgradeable_program, + replace_non_upgradeable_program_account, ReplaceAccountError, + }, bank_client::BankClient, epoch_rewards_hasher::hash_rewards_into_partitions, genesis_utils::{ @@ -8003,42 +8007,403 @@ fn test_compute_active_feature_set() { assert!(feature_set.is_active(&test_feature)); } +fn test_program_replace_set_up_account( + bank: &Bank, + pubkey: &Pubkey, + lamports: u64, + state: &T, + owner: &Pubkey, + executable: bool, +) -> AccountSharedData { + let data_len = bincode::serialized_size(state).unwrap() as usize; + let mut account = AccountSharedData::from(Account { + lamports, + owner: *owner, + executable, + data: vec![0u8; data_len], + ..Account::default() + }); + account.serialize_data(state).unwrap(); + bank.store_account_and_update_capitalization(pubkey, &account); + assert_eq!(bank.get_balance(pubkey), lamports); + account +} + #[test] -fn test_program_replacement() { - let mut bank = create_simple_test_bank(0); +fn test_replace_non_upgradeable_program_account() { + // Non-upgradeable program + // - Destination: [Destination program data] + // - Source: [*Source program data] + // + // Should replace the destination program account with the source program account: + // - Destination: [*Source program data] + let bpf_id = bpf_loader::id(); + let bank = create_simple_test_bank(0); - // Setup original program account - let old_address = Pubkey::new_unique(); - let new_address = Pubkey::new_unique(); - bank.store_account_and_update_capitalization( - &old_address, - &AccountSharedData::from(Account { - lamports: 100, - ..Account::default() - }), + let destination = Pubkey::new_unique(); + let destination_state = vec![0u8; 4]; + let destination_lamports = bank.get_minimum_balance_for_rent_exemption(destination_state.len()); + test_program_replace_set_up_account( + &bank, + &destination, + destination_lamports, + &destination_state, + &bpf_id, + true, ); - assert_eq!(bank.get_balance(&old_address), 100); - // Setup new program account - let new_program_account = AccountSharedData::from(Account { - lamports: 123, - ..Account::default() - }); - bank.store_account_and_update_capitalization(&new_address, &new_program_account); - assert_eq!(bank.get_balance(&new_address), 123); + let source = Pubkey::new_unique(); + let source_state = vec![6; 30]; + let source_lamports = bank.get_minimum_balance_for_rent_exemption(source_state.len()); + let check_source_account = test_program_replace_set_up_account( + &bank, + &source, + source_lamports, + &source_state, + &bpf_id, + true, + ); + let check_data_account_data = check_source_account.data().to_vec(); + + let original_capitalization = bank.capitalization(); + + replace_non_upgradeable_program_account( + &bank, + &source, + &destination, + "bank-apply_program_replacement", + ) + .unwrap(); + + // Destination program account balance is now the source program account's balance + assert_eq!(bank.get_balance(&destination), source_lamports); + + // Source program account is now empty + assert_eq!(bank.get_balance(&source), 0); + + // Destination program account now holds the source program data, ie: + // - Destination: [*Source program data] + let destination_account = bank.get_account(&destination).unwrap(); + assert_eq!(destination_account.data(), &check_data_account_data); + + // Ownership & executable match the source program account + assert_eq!(destination_account.owner(), &bpf_id); + assert!(destination_account.executable()); + + // The destination account's original lamports balance was burnt + assert_eq!( + bank.capitalization(), + original_capitalization - destination_lamports + ); +} + +#[test_case( + Pubkey::new_unique(), + None; + "Empty destination account _without_ corresponding data account" +)] +#[test_case( + Pubkey::new_unique(), + Some(vec![4; 40]); + "Empty destination account _with_ corresponding data account" +)] +#[test_case( + feature::id(), // `Feature11111111` + None; + "Native destination account _without_ corresponding data account" +)] +#[test_case( + feature::id(), // `Feature11111111` + Some(vec![4; 40]); + "Native destination account _with_ corresponding data account" +)] +fn test_replace_empty_account_with_upgradeable_program_success( + destination: Pubkey, + maybe_destination_data_state: Option>, // Inner data of the destination program _data_ account +) { + // Ensures a program account and data account are created when replacing an + // empty account, ie: + // - Destination: PDA(DestinationData) + // - DestinationData: [Destination program data] + // + // If the destination data account exists, it will be overwritten + let bpf_upgradeable_id = bpf_loader_upgradeable::id(); + let bank = create_simple_test_bank(0); + + // Create the test source accounts, one for program and one for data + let source = Pubkey::new_unique(); + let (source_data, _) = Pubkey::find_program_address(&[source.as_ref()], &bpf_upgradeable_id); + let source_state = UpgradeableLoaderState::Program { + programdata_address: source_data, + }; + let source_lamports = + bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()); + let source_data_state = vec![6; 30]; + let source_data_lamports = bank.get_minimum_balance_for_rent_exemption(source_data_state.len()); + test_program_replace_set_up_account( + &bank, + &source, + source_lamports, + &source_state, + &bpf_upgradeable_id, + true, + ); + let check_source_data_account = test_program_replace_set_up_account( + &bank, + &source_data, + source_data_lamports, + &source_data_state, + &bpf_upgradeable_id, + false, + ); + let check_data_account_data = check_source_data_account.data().to_vec(); + + // Derive the well-known PDA address for the destination data account + let (destination_data, _) = + Pubkey::find_program_address(&[destination.as_ref()], &bpf_upgradeable_id); + + // Determine the lamports that will be burnt after the replacement + let burnt_after_rent = if let Some(destination_data_state) = maybe_destination_data_state { + // Create the data account if necessary + let destination_data_lamports = + bank.get_minimum_balance_for_rent_exemption(destination_data_state.len()); + test_program_replace_set_up_account( + &bank, + &destination_data, + destination_data_lamports, + &destination_data_state, + &bpf_upgradeable_id, + false, + ); + destination_data_lamports + source_lamports + - bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()) + } else { + source_lamports + - bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()) + }; + + let original_capitalization = bank.capitalization(); + + // Do the replacement + replace_empty_account_with_upgradeable_program( + &bank, + &source, + &destination, + "bank-apply_empty_account_replacement_for_program", + ) + .unwrap(); + + // Destination program account was created and funded to pay for minimum rent + // for the PDA + assert_eq!( + bank.get_balance(&destination), + bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()), + ); + + // Destination data account was created, now holds the source data account's balance + assert_eq!(bank.get_balance(&destination_data), source_data_lamports); + + // Source program accounts are now empty + assert_eq!(bank.get_balance(&source), 0); + assert_eq!(bank.get_balance(&source_data), 0); + + // Destination program account holds the PDA, ie: + // - Destination: PDA(DestinationData) + let destination_account = bank.get_account(&destination).unwrap(); + assert_eq!( + destination_account.data(), + &bincode::serialize(&UpgradeableLoaderState::Program { + programdata_address: destination_data + }) + .unwrap(), + ); + + // Destination data account holds the source data, ie: + // - DestinationData: [*Source program data] + let destination_data_account = bank.get_account(&destination_data).unwrap(); + assert_eq!(destination_data_account.data(), &check_data_account_data); + + // Ownership & executable match the source program accounts + assert_eq!(destination_account.owner(), &bpf_upgradeable_id); + assert!(destination_account.executable()); + assert_eq!(destination_data_account.owner(), &bpf_upgradeable_id); + assert!(!destination_data_account.executable()); + + // The remaining lamports from both program accounts minus the rent-exempt + // minimum were burnt + assert_eq!( + bank.capitalization(), + original_capitalization - burnt_after_rent + ); +} + +#[test_case( + None; + "Existing destination account _without_ corresponding data account" +)] +#[test_case( + Some(vec![4; 40]); + "Existing destination account _with_ corresponding data account" +)] +fn test_replace_empty_account_with_upgradeable_program_fail_when_account_exists( + maybe_destination_data_state: Option>, // Inner data of the destination program _data_ account +) { + // Should not be allowed to execute replacement + let bpf_upgradeable_id = bpf_loader_upgradeable::id(); + let bank = create_simple_test_bank(0); + + // Create the test destination account with some arbitrary data and lamports balance + let destination = Pubkey::new_unique(); + let destination_state = vec![0, 0, 0, 0]; // Arbitrary bytes, doesn't matter + let destination_lamports = bank.get_minimum_balance_for_rent_exemption(destination_state.len()); + let destination_account = test_program_replace_set_up_account( + &bank, + &destination, + destination_lamports, + &destination_state, + &bpf_upgradeable_id, + true, + ); + + // Create the test source accounts, one for program and one for data + let source = Pubkey::new_unique(); + let (source_data, _) = Pubkey::find_program_address(&[source.as_ref()], &bpf_upgradeable_id); + let source_state = UpgradeableLoaderState::Program { + programdata_address: source_data, + }; + let source_lamports = + bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()); + let source_data_state = vec![6; 30]; + let source_data_lamports = bank.get_minimum_balance_for_rent_exemption(source_data_state.len()); + let source_account = test_program_replace_set_up_account( + &bank, + &source, + source_lamports, + &source_state, + &bpf_upgradeable_id, + true, + ); + let source_data_account = test_program_replace_set_up_account( + &bank, + &source_data, + source_data_lamports, + &source_data_state, + &bpf_upgradeable_id, + false, + ); + + // Derive the well-known PDA address for the destination data account + let (destination_data, _) = + Pubkey::find_program_address(&[destination.as_ref()], &bpf_upgradeable_id); + + // Create the data account if necessary + let destination_data_account = + if let Some(destination_data_state) = maybe_destination_data_state { + let destination_data_lamports = + bank.get_minimum_balance_for_rent_exemption(destination_data_state.len()); + let destination_data_account = test_program_replace_set_up_account( + &bank, + &destination_data, + destination_data_lamports, + &destination_data_state, + &bpf_upgradeable_id, + false, + ); + Some(destination_data_account) + } else { + None + }; let original_capitalization = bank.capitalization(); - bank.replace_program_account(&old_address, &new_address, "bank-apply_program_replacement"); + // Attempt the replacement + assert_matches!( + replace_empty_account_with_upgradeable_program( + &bank, + &source, + &destination, + "bank-apply_empty_account_replacement_for_program", + ) + .unwrap_err(), + ReplaceAccountError::AccountExists(..) + ); + + // Everything should be unchanged + assert_eq!(bank.get_account(&destination).unwrap(), destination_account); + if let Some(destination_data_account) = destination_data_account { + assert_eq!( + bank.get_account(&destination_data).unwrap(), + destination_data_account + ); + } + assert_eq!(bank.get_account(&source).unwrap(), source_account); + assert_eq!(bank.get_account(&source_data).unwrap(), source_data_account); + assert_eq!(bank.capitalization(), original_capitalization); +} + +#[test] +fn test_replace_empty_account_with_upgradeable_program_fail_when_not_upgradeable_program() { + // Should not be allowed to execute replacement + let bpf_upgradeable_id = bpf_loader_upgradeable::id(); + let bank = create_simple_test_bank(0); + + // Create the test destination account with some arbitrary data and lamports balance + let destination = Pubkey::new_unique(); + let destination_state = vec![0, 0, 0, 0]; // Arbitrary bytes, doesn't matter + let destination_lamports = bank.get_minimum_balance_for_rent_exemption(destination_state.len()); + let destination_account = test_program_replace_set_up_account( + &bank, + &destination, + destination_lamports, + &destination_state, + &bpf_upgradeable_id, + true, + ); + + // Create the test source accounts, one for program and one for data + let source = Pubkey::new_unique(); + let (source_data, _) = Pubkey::find_program_address(&[source.as_ref()], &bpf_upgradeable_id); + let source_state = [0, 0, 0, 0]; // Arbitrary bytes, NOT an upgradeable program + let source_lamports = + bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()); + let source_data_state = vec![6; 30]; + let source_data_lamports = bank.get_minimum_balance_for_rent_exemption(source_data_state.len()); + let source_account = test_program_replace_set_up_account( + &bank, + &source, + source_lamports, + &source_state, + &bpf_upgradeable_id, + true, + ); + let source_data_account = test_program_replace_set_up_account( + &bank, + &source_data, + source_data_lamports, + &source_data_state, + &bpf_upgradeable_id, + false, + ); - // New program account is now empty - assert_eq!(bank.get_balance(&new_address), 0); + let original_capitalization = bank.capitalization(); - // Old program account holds the new program account - assert_eq!(bank.get_account(&old_address), Some(new_program_account)); + // Attempt the replacement + assert_matches!( + replace_empty_account_with_upgradeable_program( + &bank, + &source, + &destination, + "bank-apply_empty_account_replacement_for_program", + ) + .unwrap_err(), + ReplaceAccountError::NotAnUpgradeableProgram + ); - // Lamports in the old token account were burnt - assert_eq!(bank.capitalization(), original_capitalization - 100); + // Everything should be unchanged + assert_eq!(bank.get_account(&destination).unwrap(), destination_account); + assert_eq!(bank.get_account(&source).unwrap(), source_account); + assert_eq!(bank.get_account(&source_data).unwrap(), source_data_account); + assert_eq!(bank.capitalization(), original_capitalization); } fn min_rent_exempt_balance_for_sysvars(bank: &Bank, sysvar_ids: &[Pubkey]) -> u64 { diff --git a/runtime/src/inline_feature_gate_program.rs b/runtime/src/inline_feature_gate_program.rs new file mode 100644 index 00000000000000..125dc74df243d6 --- /dev/null +++ b/runtime/src/inline_feature_gate_program.rs @@ -0,0 +1,5 @@ +//! Contains replacement program IDs for the feature gate program + +pub(crate) mod noop_program { + solana_sdk::declare_id!("2rqZsQBbacRbuAuTSuJ7n49UQT9fzes8RLggFcmB9YuN"); +} diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index ff94a68c69fa1e..503d24410e8cdc 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -14,6 +14,7 @@ pub mod commitment; mod epoch_rewards_hasher; pub mod epoch_stakes; pub mod genesis_utils; +pub mod inline_feature_gate_program; pub mod inline_spl_associated_token_account; pub mod loader_utils; pub mod non_circulating_supply; diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index b414a5f6ab4551..9ec56b03e0e3bf 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -700,6 +700,10 @@ pub mod better_error_codes_for_tx_lamport_check { solana_sdk::declare_id!("Ffswd3egL3tccB6Rv3XY6oqfdzn913vUcjCSnpvCKpfx"); } +pub mod programify_feature_gate_program { + solana_sdk::declare_id!("8GdovDzVwWU5edz2G697bbB7GZjrUc6aQZLWyNNAtHdg"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -870,6 +874,7 @@ lazy_static! { (require_rent_exempt_split_destination::id(), "Require stake split destination account to be rent exempt"), (better_error_codes_for_tx_lamport_check::id(), "better error codes for tx lamport check #33353"), (enable_alt_bn128_compression_syscall::id(), "add alt_bn128 compression syscalls"), + (programify_feature_gate_program::id(), "move feature gate activation logic to an on-chain program #32783"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() From f714a44c2ac277c7d3d91ab4ec73625a1efede36 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 4 Oct 2023 15:04:28 -0400 Subject: [PATCH 243/407] Uses `IntSet` for `RollingBitField::excess` (#33523) --- accounts-db/src/rolling_bit_field.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/accounts-db/src/rolling_bit_field.rs b/accounts-db/src/rolling_bit_field.rs index 1b520e4b5eef63..cfbfe820a176c7 100644 --- a/accounts-db/src/rolling_bit_field.rs +++ b/accounts-db/src/rolling_bit_field.rs @@ -2,7 +2,7 @@ //! Relies on there being a sliding window of key values. The key values continue to increase. //! Old key values are removed from the lesser values and do not accumulate. -use {bv::BitVec, solana_sdk::clock::Slot, std::collections::HashSet}; +use {bv::BitVec, solana_nohash_hasher::IntSet, solana_sdk::clock::Slot}; #[derive(Debug, Default, AbiExample, Clone)] pub struct RollingBitField { @@ -15,7 +15,7 @@ pub struct RollingBitField { // They would cause us to exceed max_width if we stored them in our bit field. // We only expect these items in conditions where there is some other bug in the system // or in testing when large ranges are created. - excess: HashSet, + excess: IntSet, } impl PartialEq for RollingBitField { @@ -47,7 +47,7 @@ impl RollingBitField { count: 0, min: 0, max_exclusive: 0, - excess: HashSet::new(), + excess: IntSet::default(), } } @@ -290,7 +290,7 @@ impl RollingBitField { #[cfg(test)] pub mod tests { - use {super::*, log::*, solana_measure::measure::Measure}; + use {super::*, log::*, solana_measure::measure::Measure, std::collections::HashSet}; impl RollingBitField { pub fn clear(&mut self) { From befc903993a172847f497d51002f7ea6195c2b41 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 4 Oct 2023 16:21:06 -0400 Subject: [PATCH 244/407] Uses `IntSet` in AccountsDb::calc_delete_dependencies() (#33528) --- accounts-db/src/accounts_db.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index f028ad0daef810..c5310f059851c6 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -2816,7 +2816,7 @@ impl AccountsDb { // Another pass to check if there are some filtered accounts which // do not match the criteria of deleting all appendvecs which contain them // then increment their storage count. - let mut already_counted = HashSet::new(); + let mut already_counted = IntSet::default(); for (pubkey, (account_infos, ref_count_from_storage)) in purges.iter() { let mut failed_slot = None; let all_stores_being_deleted = @@ -2861,7 +2861,7 @@ impl AccountsDb { } // increment store_counts to non-zero for all stores that can not be deleted. - let mut pending_stores = HashSet::new(); + let mut pending_stores = IntSet::default(); for (slot, _account_info) in account_infos { if !already_counted.contains(slot) { pending_stores.insert(*slot); From 3b9304140113fa4aeb36125b23b3d545f288c484 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 4 Oct 2023 16:21:44 -0400 Subject: [PATCH 245/407] Uses `IntSet` for dead slots (#33529) --- accounts-db/src/accounts_db.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index c5310f059851c6..fd552397381d6d 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -3793,7 +3793,7 @@ impl AccountsDb { /// and should not be unref'd. If they exist in the accounts index, they are NEW. fn process_dead_slots( &self, - dead_slots: &HashSet, + dead_slots: &IntSet, purged_account_slots: Option<&mut AccountSlots>, purge_stats: &PurgeStats, pubkeys_removed_from_accounts_index: &PubkeysRemovedFromAccountsIndex, @@ -8170,13 +8170,13 @@ impl AccountsDb { expected_slot: Option, mut reclaimed_offsets: Option<&mut SlotOffsets>, reset_accounts: bool, - ) -> HashSet + ) -> IntSet where I: Iterator, { assert!(self.storage.no_shrink_in_progress()); - let mut dead_slots = HashSet::new(); + let mut dead_slots = IntSet::default(); let mut new_shrink_candidates = ShrinkCandidates::default(); let mut measure = Measure::start("remove"); for (slot, account_info) in reclaims { @@ -8394,7 +8394,7 @@ impl AccountsDb { /// and should not be unref'd. If they exist in the accounts index, they are NEW. fn clean_stored_dead_slots( &self, - dead_slots: &HashSet, + dead_slots: &IntSet, purged_account_slots: Option<&mut AccountSlots>, pubkeys_removed_from_accounts_index: &PubkeysRemovedFromAccountsIndex, ) { @@ -13242,7 +13242,7 @@ pub mod tests { #[test] fn test_clean_stored_dead_slots_empty() { let accounts = AccountsDb::new_single_for_tests(); - let mut dead_slots = HashSet::new(); + let mut dead_slots = IntSet::default(); dead_slots.insert(10); accounts.clean_stored_dead_slots(&dead_slots, None, &HashSet::default()); } From c66af12bdba62ace3191f562b266bd71b9d72c76 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 4 Oct 2023 16:34:18 -0400 Subject: [PATCH 246/407] Uses `IntSet` for `RemoveUnrootedSlotsSynchronization::slots_under_contention` (#33530) --- accounts-db/src/accounts_db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index fd552397381d6d..4516643fcc35f8 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1416,7 +1416,7 @@ impl RecycleStores { #[derive(Debug, Default)] struct RemoveUnrootedSlotsSynchronization { // slots being flushed from the cache or being purged - slots_under_contention: Mutex>, + slots_under_contention: Mutex>, signal: Condvar, } From feeff68fd69c612bf7ce756657e716e818f36c04 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Oct 2023 12:55:05 +0000 Subject: [PATCH 247/407] build(deps): bump rayon from 1.7.0 to 1.8.0 (#33346) * build(deps): bump rayon from 1.7.0 to 1.8.0 Bumps [rayon](https://github.com/rayon-rs/rayon) from 1.7.0 to 1.8.0. - [Changelog](https://github.com/rayon-rs/rayon/blob/master/RELEASES.md) - [Commits](https://github.com/rayon-rs/rayon/compare/rayon-core-v1.7.0...rayon-core-v1.8.0) --- updated-dependencies: - dependency-name: rayon dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 10 ++++------ Cargo.toml | 2 +- programs/sbf/Cargo.lock | 10 ++++------ 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cc1d90de5ebb13..693db73691fbe4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4284,9 +4284,9 @@ checksum = "655b020bbf5c89791160a30f0d4706d8ec7aa5718d6a198f6df19c400e4f4470" [[package]] name = "rayon" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" +checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" dependencies = [ "either", "rayon-core", @@ -4294,14 +4294,12 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" +checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" dependencies = [ - "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "num_cpus", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index c8a91758a33272..57fb36201cee77 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -272,7 +272,7 @@ rand = "0.8.5" rand_chacha = "0.3.1" rand_core = "0.6.4" raptorq = "1.7.0" -rayon = "1.7.0" +rayon = "1.8.0" rcgen = "0.10.0" reed-solomon-erasure = "6.0.0" regex = "1.9.6" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 59b18c18cb09eb..9a3dadb8e9fe0e 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3733,9 +3733,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" +checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" dependencies = [ "either", "rayon-core", @@ -3743,14 +3743,12 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" +checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" dependencies = [ - "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "num_cpus", ] [[package]] From 2353cc531326e19957430d7f66ea4b0fbd9a6c2b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Oct 2023 13:06:31 +0000 Subject: [PATCH 248/407] build(deps): bump bytecount from 0.6.3 to 0.6.4 (#33502) Bumps [bytecount](https://github.com/llogiq/bytecount) from 0.6.3 to 0.6.4. - [Commits](https://github.com/llogiq/bytecount/commits) --- updated-dependencies: - dependency-name: bytecount dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 693db73691fbe4..283fa60517a80e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -864,9 +864,9 @@ dependencies = [ [[package]] name = "bytecount" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" +checksum = "ad152d03a2c813c80bb94fedbf3a3f02b28f793e39e7c214c8a0bcc196343de7" [[package]] name = "bytemuck" diff --git a/Cargo.toml b/Cargo.toml index 57fb36201cee77..5ed44239dcc8ce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -156,7 +156,7 @@ borsh = "0.10.3" bs58 = "0.4.0" bv = "0.11.1" byte-unit = "4.0.19" -bytecount = "0.6.3" +bytecount = "0.6.4" bytemuck = "1.14.0" byteorder = "1.4.3" bytes = "1.5" From abb1f2301d2cf3d87c34c73c95d02f9e79e56741 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Oct 2023 14:05:26 +0000 Subject: [PATCH 249/407] build(deps): bump sha2 from 0.10.7 to 0.10.8 (#33422) * build(deps): bump sha2 from 0.10.7 to 0.10.8 Bumps [sha2](https://github.com/RustCrypto/hashes) from 0.10.7 to 0.10.8. - [Commits](https://github.com/RustCrypto/hashes/compare/sha2-v0.10.7...sha2-v0.10.8) --- updated-dependencies: - dependency-name: sha2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 18 +++++++++--------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 18 +++++++++--------- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 283fa60517a80e..4b60782e00cf45 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1746,7 +1746,7 @@ dependencies = [ "derivation-path", "ed25519-dalek", "hmac 0.12.1", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] @@ -4929,9 +4929,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -5943,7 +5943,7 @@ dependencies = [ "serde", "serde_bytes", "serde_derive", - "sha2 0.10.7", + "sha2 0.10.8", "solana-frozen-abi-macro", "solana-logger", "subtle", @@ -6166,7 +6166,7 @@ dependencies = [ "scopeguard", "serde", "serde_bytes", - "sha2 0.10.7", + "sha2 0.10.8", "solana-account-decoder", "solana-accounts-db", "solana-bpf-loader-program", @@ -6528,7 +6528,7 @@ dependencies = [ "serde_bytes", "serde_derive", "serde_json", - "sha2 0.10.7", + "sha2 0.10.8", "sha3 0.10.4", "solana-frozen-abi", "solana-frozen-abi-macro", @@ -6955,7 +6955,7 @@ dependencies = [ "serde_derive", "serde_json", "serde_with", - "sha2 0.10.7", + "sha2 0.10.8", "sha3 0.10.4", "solana-frozen-abi", "solana-frozen-abi-macro", @@ -7642,7 +7642,7 @@ checksum = "0e5f2044ca42c8938d54d1255ce599c79a1ffd86b677dfab695caa20f9ffc3f2" dependencies = [ "proc-macro2", "quote", - "sha2 0.10.7", + "sha2 0.10.8", "syn 2.0.37", "thiserror", ] @@ -7700,7 +7700,7 @@ checksum = "ab5269c8e868da17b6552ef35a51355a017bd8e0eae269c201fef830d35fa52c" dependencies = [ "proc-macro2", "quote", - "sha2 0.10.7", + "sha2 0.10.8", "syn 2.0.37", ] diff --git a/Cargo.toml b/Cargo.toml index 5ed44239dcc8ce..0a6a55f1b23895 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -291,7 +291,7 @@ serde_json = "1.0.107" serde_yaml = "0.9.25" serial_test = "2.0.0" serde_with = { version = "2.3.3", default-features = false } -sha2 = "0.10.7" +sha2 = "0.10.8" sha3 = "0.10.4" signal-hook = "0.3.17" siphasher = "0.3.11" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 9a3dadb8e9fe0e..76d3c4bc32f9f5 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1433,7 +1433,7 @@ dependencies = [ "derivation-path", "ed25519-dalek", "hmac 0.12.1", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] @@ -4280,9 +4280,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -4933,7 +4933,7 @@ dependencies = [ "serde", "serde_bytes", "serde_derive", - "sha2 0.10.7", + "sha2 0.10.8", "solana-frozen-abi-macro", "subtle", "thiserror", @@ -5074,7 +5074,7 @@ dependencies = [ "scopeguard", "serde", "serde_bytes", - "sha2 0.10.7", + "sha2 0.10.8", "solana-account-decoder", "solana-accounts-db", "solana-bpf-loader-program", @@ -5263,7 +5263,7 @@ dependencies = [ "serde_bytes", "serde_derive", "serde_json", - "sha2 0.10.7", + "sha2 0.10.8", "sha3 0.10.4", "solana-frozen-abi", "solana-frozen-abi-macro", @@ -6047,7 +6047,7 @@ dependencies = [ "serde_derive", "serde_json", "serde_with", - "sha2 0.10.7", + "sha2 0.10.8", "sha3 0.10.4", "solana-frozen-abi", "solana-frozen-abi-macro", @@ -6561,7 +6561,7 @@ checksum = "0e5f2044ca42c8938d54d1255ce599c79a1ffd86b677dfab695caa20f9ffc3f2" dependencies = [ "proc-macro2", "quote", - "sha2 0.10.7", + "sha2 0.10.8", "syn 2.0.37", "thiserror", ] @@ -6609,7 +6609,7 @@ checksum = "ab5269c8e868da17b6552ef35a51355a017bd8e0eae269c201fef830d35fa52c" dependencies = [ "proc-macro2", "quote", - "sha2 0.10.7", + "sha2 0.10.8", "syn 2.0.37", ] From 9c2663f7a51763de969a4d15d33030b3a8bb0f72 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Oct 2023 14:46:10 +0000 Subject: [PATCH 250/407] build(deps): bump num_enum from 0.6.1 to 0.7.0 (#33480) * build(deps): bump num_enum from 0.6.1 to 0.7.0 Bumps [num_enum](https://github.com/illicitonion/num_enum) from 0.6.1 to 0.7.0. - [Commits](https://github.com/illicitonion/num_enum/compare/0.6.1...0.7.0) --- updated-dependencies: - dependency-name: num_enum dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * updates programs/sbf/Cargo.lock --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: behzad nouri --- Cargo.lock | 12 ++++++------ Cargo.toml | 2 +- programs/sbf/Cargo.lock | 12 ++++++------ 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4b60782e00cf45..44f4a34ce8a140 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5190,7 +5190,7 @@ dependencies = [ "num-derive 0.3.3", "num-traits", "num_cpus", - "num_enum 0.6.1", + "num_enum 0.7.0", "ouroboros", "percentage", "qualifier_attr", @@ -5432,7 +5432,7 @@ dependencies = [ "log", "memmap2", "modular-bitfield", - "num_enum 0.6.1", + "num_enum 0.7.0", "rand 0.8.5", "rayon", "solana-logger", @@ -5743,7 +5743,7 @@ dependencies = [ "log", "lru", "min-max-heap", - "num_enum 0.6.1", + "num_enum 0.7.0", "quinn", "rand 0.8.5", "rand_chacha 0.3.1", @@ -6155,7 +6155,7 @@ dependencies = [ "log", "lru", "num_cpus", - "num_enum 0.6.1", + "num_enum 0.7.0", "prost", "rand 0.8.5", "rand_chacha 0.3.1", @@ -6863,7 +6863,7 @@ dependencies = [ "num-derive 0.3.3", "num-traits", "num_cpus", - "num_enum 0.6.1", + "num_enum 0.7.0", "ouroboros", "percentage", "qualifier_attr", @@ -6942,7 +6942,7 @@ dependencies = [ "memmap2", "num-derive 0.3.3", "num-traits", - "num_enum 0.6.1", + "num_enum 0.7.0", "pbkdf2 0.11.0", "qstring", "qualifier_attr", diff --git a/Cargo.toml b/Cargo.toml index 0a6a55f1b23895..d02e2f1ca00fbe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -245,7 +245,7 @@ modular-bitfield = "0.11.2" nix = "0.26.4" num-bigint = "0.4.4" num_cpus = "1.16.0" -num_enum = "0.6.1" +num_enum = "0.7.0" num-derive = "0.3" num-traits = "0.2" openssl = "0.10" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 76d3c4bc32f9f5..eecd73cfcf86af 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4491,7 +4491,7 @@ dependencies = [ "num-derive 0.3.0", "num-traits", "num_cpus", - "num_enum 0.6.1", + "num_enum 0.7.0", "ouroboros", "percentage", "qualifier_attr", @@ -4636,7 +4636,7 @@ dependencies = [ "log", "memmap2", "modular-bitfield", - "num_enum 0.6.1", + "num_enum 0.7.0", "rand 0.8.5", "solana-measure", "solana-sdk", @@ -4788,7 +4788,7 @@ dependencies = [ "log", "lru", "min-max-heap", - "num_enum 0.6.1", + "num_enum 0.7.0", "quinn", "rand 0.8.5", "rand_chacha 0.3.1", @@ -5063,7 +5063,7 @@ dependencies = [ "log", "lru", "num_cpus", - "num_enum 0.6.1", + "num_enum 0.7.0", "prost", "rand 0.8.5", "rand_chacha 0.3.1", @@ -5540,7 +5540,7 @@ dependencies = [ "num-derive 0.3.0", "num-traits", "num_cpus", - "num_enum 0.6.1", + "num_enum 0.7.0", "ouroboros", "percentage", "qualifier_attr", @@ -6034,7 +6034,7 @@ dependencies = [ "memmap2", "num-derive 0.3.0", "num-traits", - "num_enum 0.6.1", + "num_enum 0.7.0", "pbkdf2 0.11.0", "qstring", "qualifier_attr", From 2a17be0eeac6f092b1449d6e9eaf86c6ff9cf81a Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Thu, 5 Oct 2023 09:20:24 -0700 Subject: [PATCH 251/407] Bugfix: MultiIterator batch priority guard (#33454) --- core/src/banking_stage/consumer.rs | 123 ++++++++++++- .../banking_stage/read_write_account_set.rs | 164 +++++------------- .../unprocessed_transaction_storage.rs | 30 ++-- 3 files changed, 177 insertions(+), 140 deletions(-) diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index ba915bc767efc8..af7b5b93e40501 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -757,13 +757,17 @@ mod tests { self, state::{AddressLookupTable, LookupTableMeta}, }, + compute_budget, instruction::InstructionError, - message::{v0, v0::MessageAddressTableLookup, MessageHeader, VersionedMessage}, + message::{ + v0::{self, MessageAddressTableLookup}, + Message, MessageHeader, VersionedMessage, + }, poh_config::PohConfig, pubkey::Pubkey, signature::Keypair, signer::Signer, - system_transaction, + system_instruction, system_transaction, transaction::{MessageHash, Transaction, VersionedTransaction}, }, solana_transaction_status::{TransactionStatusMeta, VersionedTransactionWithStatusMeta}, @@ -862,10 +866,11 @@ mod tests { Arc, Arc>, Receiver, + GenesisConfigInfo, JoinHandle<()>, ) { Blockstore::destroy(ledger_path).unwrap(); - let genesis_config_info = create_slow_genesis_config(10_000); + let genesis_config_info = create_slow_genesis_config(100_000_000); let GenesisConfigInfo { genesis_config, mint_keypair, @@ -905,6 +910,7 @@ mod tests { bank, poh_recorder, entry_receiver, + genesis_config_info, poh_simulator, ) } @@ -1830,9 +1836,9 @@ mod tests { fn test_consume_buffered_packets() { let ledger_path = get_tmp_ledger_path_auto_delete!(); { - let (transactions, bank, poh_recorder, _entry_receiver, poh_simulator) = + let (transactions, bank, poh_recorder, _entry_receiver, _, poh_simulator) = setup_conflicting_transactions(ledger_path.path()); - let recorder = poh_recorder.read().unwrap().new_recorder(); + let recorder: TransactionRecorder = poh_recorder.read().unwrap().new_recorder(); let num_conflicting_transactions = transactions.len(); let deserialized_packets = transactions_to_deserialized_packets(&transactions).unwrap(); assert_eq!(deserialized_packets.len(), num_conflicting_transactions); @@ -1903,7 +1909,7 @@ mod tests { fn test_consume_buffered_packets_sanitization_error() { let ledger_path = get_tmp_ledger_path_auto_delete!(); { - let (mut transactions, bank, poh_recorder, _entry_receiver, poh_simulator) = + let (mut transactions, bank, poh_recorder, _entry_receiver, _, poh_simulator) = setup_conflicting_transactions(ledger_path.path()); let duplicate_account_key = transactions[0].message.account_keys[0]; transactions[0] @@ -1959,7 +1965,7 @@ mod tests { fn test_consume_buffered_packets_retryable() { let ledger_path = get_tmp_ledger_path_auto_delete!(); { - let (transactions, bank, poh_recorder, _entry_receiver, poh_simulator) = + let (transactions, bank, poh_recorder, _entry_receiver, _, poh_simulator) = setup_conflicting_transactions(ledger_path.path()); let recorder = poh_recorder.read().unwrap().new_recorder(); let num_conflicting_transactions = transactions.len(); @@ -2048,6 +2054,109 @@ mod tests { Blockstore::destroy(ledger_path.path()).unwrap(); } + #[test] + fn test_consume_buffered_packets_batch_priority_guard() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + { + let (_, bank, poh_recorder, _entry_receiver, genesis_config_info, poh_simulator) = + setup_conflicting_transactions(ledger_path.path()); + let recorder = poh_recorder.read().unwrap().new_recorder(); + + // Setup transactions: + // [(AB), (BC), (CD)] + // (AB) and (BC) are conflicting, and cannot go into the same batch. + // (AB) and (CD) are not conflict. However, (CD) should not be able to take locks needed by (BC). + let keypair_a = Keypair::new(); + let keypair_b = Keypair::new(); + let keypair_c = Keypair::new(); + let keypair_d = Keypair::new(); + for keypair in &[&keypair_a, &keypair_b, &keypair_c, &keypair_d] { + bank.transfer(5_000, &genesis_config_info.mint_keypair, &keypair.pubkey()) + .unwrap(); + } + + let make_prioritized_transfer = + |from: &Keypair, to, lamports, priority| -> Transaction { + let ixs = vec![ + system_instruction::transfer(&from.pubkey(), to, lamports), + compute_budget::ComputeBudgetInstruction::set_compute_unit_price(priority), + ]; + let message = Message::new(&ixs, Some(&from.pubkey())); + Transaction::new(&[from], message, bank.last_blockhash()) + }; + + let transactions = vec![ + make_prioritized_transfer(&keypair_a, &keypair_b.pubkey(), 1, 3), + make_prioritized_transfer(&keypair_b, &keypair_c.pubkey(), 1, 2), + make_prioritized_transfer(&keypair_c, &keypair_d.pubkey(), 1, 1), + ]; + + let num_conflicting_transactions = transactions.len(); + let deserialized_packets = transactions_to_deserialized_packets(&transactions).unwrap(); + assert_eq!(deserialized_packets.len(), num_conflicting_transactions); + let mut buffered_packet_batches = + UnprocessedTransactionStorage::new_transaction_storage( + UnprocessedPacketBatches::from_iter( + deserialized_packets, + num_conflicting_transactions, + ), + ThreadType::Transactions, + ); + + let (replay_vote_sender, _replay_vote_receiver) = unbounded(); + let committer = Committer::new( + None, + replay_vote_sender, + Arc::new(PrioritizationFeeCache::new(0u64)), + ); + let consumer = Consumer::new(committer, recorder, QosService::new(1), None); + + // When the working bank in poh_recorder is None, no packets should be processed (consume will not be called) + assert!(!poh_recorder.read().unwrap().has_bank()); + assert_eq!(buffered_packet_batches.len(), num_conflicting_transactions); + // When the working bank in poh_recorder is Some, all packets should be processed. + // Multi-Iterator will process them 1-by-1 if all txs are conflicting. + poh_recorder.write().unwrap().set_bank(bank, false); + let bank_start = poh_recorder.read().unwrap().bank_start().unwrap(); + let banking_stage_stats = BankingStageStats::default(); + consumer.consume_buffered_packets( + &bank_start, + &mut buffered_packet_batches, + &banking_stage_stats, + &mut LeaderSlotMetricsTracker::new(0), + ); + + // Check that all packets were processed without retrying + assert!(buffered_packet_batches.is_empty()); + assert_eq!( + banking_stage_stats + .consumed_buffered_packets_count + .load(Ordering::Relaxed), + num_conflicting_transactions + ); + assert_eq!( + banking_stage_stats + .rebuffered_packets_count + .load(Ordering::Relaxed), + 0 + ); + // Use bank to check the number of entries (batches) + assert_eq!(bank_start.working_bank.transactions_per_entry_max(), 1); + assert_eq!( + bank_start.working_bank.transaction_entries_count(), + 4 + num_conflicting_transactions as u64 // 4 for funding transfers + ); + + poh_recorder + .read() + .unwrap() + .is_exited + .store(true, Ordering::Relaxed); + let _ = poh_simulator.join(); + } + Blockstore::destroy(ledger_path.path()).unwrap(); + } + #[test] fn test_accumulate_execute_units_and_time() { let mut execute_timings = ExecuteTimings::default(); diff --git a/core/src/banking_stage/read_write_account_set.rs b/core/src/banking_stage/read_write_account_set.rs index 691f81d0f58f67..7a2117675b31d0 100644 --- a/core/src/banking_stage/read_write_account_set.rs +++ b/core/src/banking_stage/read_write_account_set.rs @@ -1,12 +1,9 @@ use { - solana_sdk::{ - message::{SanitizedMessage, VersionedMessage}, - pubkey::Pubkey, - }, + solana_sdk::{message::SanitizedMessage, pubkey::Pubkey}, std::collections::HashSet, }; -/// Wrapper struct to check account locks for a batch of transactions. +/// Wrapper struct to accumulate locks for a batch of transactions. #[derive(Debug, Default)] pub struct ReadWriteAccountSet { /// Set of accounts that are locked for read @@ -16,66 +13,42 @@ pub struct ReadWriteAccountSet { } impl ReadWriteAccountSet { - /// Check static account locks for a transaction message. - pub fn check_static_account_locks(&self, message: &VersionedMessage) -> bool { - !message - .static_account_keys() - .iter() - .enumerate() - .any(|(index, pubkey)| { - if message.is_maybe_writable(index) { - !self.can_write(pubkey) - } else { - !self.can_read(pubkey) - } - }) - } - - /// Check all account locks and if they are available, lock them. - /// Returns true if all account locks are available and false otherwise. - pub fn try_locking(&mut self, message: &SanitizedMessage) -> bool { - if self.check_sanitized_message_account_locks(message) { - self.add_sanitized_message_account_locks(message); - true - } else { - false - } - } - - /// Clears the read and write sets - pub fn clear(&mut self) { - self.read_set.clear(); - self.write_set.clear(); - } - - /// Check if a sanitized message's account locks are available. - fn check_sanitized_message_account_locks(&self, message: &SanitizedMessage) -> bool { - !message + /// Returns true if all account locks were available and false otherwise. + #[allow(dead_code)] + pub fn check_locks(&self, message: &SanitizedMessage) -> bool { + message .account_keys() .iter() .enumerate() - .any(|(index, pubkey)| { + .all(|(index, pubkey)| { if message.is_writable(index) { - !self.can_write(pubkey) + self.can_write(pubkey) } else { - !self.can_read(pubkey) + self.can_read(pubkey) } }) } - /// Insert the read and write locks for a sanitized message. - fn add_sanitized_message_account_locks(&mut self, message: &SanitizedMessage) { + /// Add all account locks. + /// Returns true if all account locks were available and false otherwise. + pub fn take_locks(&mut self, message: &SanitizedMessage) -> bool { message .account_keys() .iter() .enumerate() - .for_each(|(index, pubkey)| { + .fold(true, |all_available, (index, pubkey)| { if message.is_writable(index) { - self.add_write(pubkey); + all_available & self.add_write(pubkey) } else { - self.add_read(pubkey); + all_available & self.add_read(pubkey) } - }); + }) + } + + /// Clears the read and write sets + pub fn clear(&mut self) { + self.read_set.clear(); + self.write_set.clear(); } /// Check if an account can be read-locked @@ -89,15 +62,21 @@ impl ReadWriteAccountSet { } /// Add an account to the read-set. - /// Should only be called after `can_read()` returns true - fn add_read(&mut self, pubkey: &Pubkey) { + /// Returns true if the lock was available. + fn add_read(&mut self, pubkey: &Pubkey) -> bool { + let can_read = self.can_read(pubkey); self.read_set.insert(*pubkey); + + can_read } /// Add an account to the write-set. - /// Should only be called after `can_write()` returns true - fn add_write(&mut self, pubkey: &Pubkey) { + /// Returns true if the lock was available. + fn add_write(&mut self, pubkey: &Pubkey) -> bool { + let can_write = self.can_write(pubkey); self.write_set.insert(*pubkey); + + can_write } } @@ -197,58 +176,12 @@ mod tests { Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)) } - // Helper function (could potentially use test_case in future). - // conflict_index = 0 means write lock conflict - // conflict_index = 1 means read lock conflict - fn test_check_static_account_locks(conflict_index: usize, add_write: bool, expectation: bool) { - let message = - create_test_versioned_message(&[Pubkey::new_unique()], &[Pubkey::new_unique()], vec![]); - - let mut account_locks = ReadWriteAccountSet::default(); - assert!(account_locks.check_static_account_locks(&message)); - - let conflict_key = message.static_account_keys().get(conflict_index).unwrap(); - if add_write { - account_locks.add_write(conflict_key); - } else { - account_locks.add_read(conflict_key); - } - assert_eq!( - expectation, - account_locks.check_static_account_locks(&message) - ); - } - - #[test] - fn test_check_static_account_locks_write_write_conflict() { - test_check_static_account_locks(0, true, false); - } - - #[test] - fn test_check_static_account_locks_read_write_conflict() { - test_check_static_account_locks(0, false, false); - } - - #[test] - fn test_check_static_account_locks_write_read_conflict() { - test_check_static_account_locks(1, true, false); - } - - #[test] - fn test_check_static_account_locks_read_read_non_conflict() { - test_check_static_account_locks(1, false, true); - } - // Helper function (could potentially use test_case in future). // conflict_index = 0 means write lock conflict with static key // conflict_index = 1 means read lock conflict with static key // conflict_index = 2 means write lock conflict with address table key // conflict_index = 3 means read lock conflict with address table key - fn test_check_sanitized_message_account_locks( - conflict_index: usize, - add_write: bool, - expectation: bool, - ) { + fn test_check_and_take_locks(conflict_index: usize, add_write: bool, expectation: bool) { let bank = create_test_bank(); let (bank, table_address) = create_test_address_lookup_table(bank, 2); let tx = create_test_sanitized_transaction( @@ -264,7 +197,6 @@ mod tests { let message = tx.message(); let mut account_locks = ReadWriteAccountSet::default(); - assert!(account_locks.check_sanitized_message_account_locks(message)); let conflict_key = message.account_keys().get(conflict_index).unwrap(); if add_write { @@ -272,34 +204,32 @@ mod tests { } else { account_locks.add_read(conflict_key); } - assert_eq!( - expectation, - account_locks.check_sanitized_message_account_locks(message) - ); + assert_eq!(expectation, account_locks.check_locks(message)); + assert_eq!(expectation, account_locks.take_locks(message)); } #[test] - fn test_check_sanitized_message_account_locks_write_write_conflict() { - test_check_sanitized_message_account_locks(0, true, false); // static key conflict - test_check_sanitized_message_account_locks(2, true, false); // lookup key conflict + fn test_check_and_take_locks_write_write_conflict() { + test_check_and_take_locks(0, true, false); // static key conflict + test_check_and_take_locks(2, true, false); // lookup key conflict } #[test] - fn test_check_sanitized_message_account_locks_read_write_conflict() { - test_check_sanitized_message_account_locks(0, false, false); // static key conflict - test_check_sanitized_message_account_locks(2, false, false); // lookup key conflict + fn test_check_and_take_locks_read_write_conflict() { + test_check_and_take_locks(0, false, false); // static key conflict + test_check_and_take_locks(2, false, false); // lookup key conflict } #[test] - fn test_check_sanitized_message_account_locks_write_read_conflict() { - test_check_sanitized_message_account_locks(1, true, false); // static key conflict - test_check_sanitized_message_account_locks(3, true, false); // lookup key conflict + fn test_check_and_take_locks_write_read_conflict() { + test_check_and_take_locks(1, true, false); // static key conflict + test_check_and_take_locks(3, true, false); // lookup key conflict } #[test] - fn test_check_sanitized_message_account_locks_read_read_non_conflict() { - test_check_sanitized_message_account_locks(1, false, true); // static key conflict - test_check_sanitized_message_account_locks(3, false, true); // lookup key conflict + fn test_check_and_take_locks_read_read_non_conflict() { + test_check_and_take_locks(1, false, true); // static key conflict + test_check_and_take_locks(3, false, true); // lookup key conflict } #[test] diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index 80ce0875323819..03b3e583326a71 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -16,7 +16,7 @@ use { }, itertools::Itertools, min_max_heap::MinMaxHeap, - solana_measure::measure, + solana_measure::{measure, measure_us}, solana_runtime::bank::Bank, solana_sdk::{ clock::FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, feature_set::FeatureSet, hash::Hash, @@ -149,18 +149,11 @@ fn consume_scan_should_process_packet( return ProcessingDecision::Now; } - // Before sanitization, let's quickly check the static keys (performance optimization) - let message = &packet.transaction().get_message().message; - if !payload.account_locks.check_static_account_locks(message) { - return ProcessingDecision::Later; - } - - // Try to deserialize the packet - let (maybe_sanitized_transaction, sanitization_time) = measure!( + // Try to sanitize the packet + let (maybe_sanitized_transaction, sanitization_time_us) = measure_us!( packet.build_sanitized_transaction(&bank.feature_set, bank.vote_only_bank(), bank) ); - let sanitization_time_us = sanitization_time.as_us(); payload .slot_metrics_tracker .increment_transactions_from_packets_us(sanitization_time_us); @@ -181,13 +174,18 @@ fn consume_scan_should_process_packet( payload .message_hash_to_transaction .remove(packet.message_hash()); - ProcessingDecision::Never - } else if payload.account_locks.try_locking(message) { - payload.sanitized_transactions.push(sanitized_transaction); - ProcessingDecision::Now - } else { - ProcessingDecision::Later + return ProcessingDecision::Never; } + + // Always take locks during batch creation. + // This prevents lower-priority transactions from taking locks + // needed by higher-priority txs that were skipped by this check. + if !payload.account_locks.take_locks(message) { + return ProcessingDecision::Later; + } + + payload.sanitized_transactions.push(sanitized_transaction); + ProcessingDecision::Now } else { payload .message_hash_to_transaction From 93d8bcca4b32d314e219798bd5b2716eda1f0815 Mon Sep 17 00:00:00 2001 From: Joe C Date: Thu, 5 Oct 2023 18:28:44 +0200 Subject: [PATCH 252/407] update feature gate no-op program id (#33535) --- runtime/src/inline_feature_gate_program.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/inline_feature_gate_program.rs b/runtime/src/inline_feature_gate_program.rs index 125dc74df243d6..a2c647bbda22a0 100644 --- a/runtime/src/inline_feature_gate_program.rs +++ b/runtime/src/inline_feature_gate_program.rs @@ -1,5 +1,5 @@ //! Contains replacement program IDs for the feature gate program pub(crate) mod noop_program { - solana_sdk::declare_id!("2rqZsQBbacRbuAuTSuJ7n49UQT9fzes8RLggFcmB9YuN"); + solana_sdk::declare_id!("37Yr1mVPdfUuy6oC2yPjWtg8xyyVi33TYYqyNQocsAkT"); } From e0091d6995e0ce659f3201fbb34eba122169ce38 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 5 Oct 2023 12:41:33 -0400 Subject: [PATCH 253/407] Removes unnecessary borrow (#33539) --- runtime/src/snapshot_utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 74c9b2421f4c99..0cf1aab09daea2 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1469,7 +1469,7 @@ pub fn build_storage_from_snapshot_dir( let accounts_hardlinks = bank_snapshot_dir.join(SNAPSHOT_ACCOUNTS_HARDLINKS); let account_run_paths: HashSet<_> = HashSet::from_iter(account_paths); - for dir_entry in fs_err::read_dir(&accounts_hardlinks)? { + for dir_entry in fs_err::read_dir(accounts_hardlinks)? { let symlink_path = dir_entry?.path(); // The symlink point to /snapshot/ which contain the account files hardlinks // The corresponding run path should be /run/ From 83b49daf29bdd7027e972eaa4403aba0d479f12c Mon Sep 17 00:00:00 2001 From: sakridge Date: Thu, 5 Oct 2023 13:23:41 -0400 Subject: [PATCH 254/407] Fix CLI help text for `solana stake-account` (#33387) Fix help text --- cli/src/stake.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/src/stake.rs b/cli/src/stake.rs index 96c1b50b3576e6..337b2843ff2229 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -710,7 +710,7 @@ impl StakeSubCommands for App<'_, '_> { Arg::with_name("csv") .long("csv") .takes_value(false) - .help("Format stake account data in csv") + .help("Format stake rewards data in csv") ) .arg( Arg::with_name("num_rewards_epochs") From 402e9a5fffb5dbf1d6dd1c28800c8b9e95f113b6 Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 5 Oct 2023 13:13:09 -0500 Subject: [PATCH 255/407] Use copy_from_slice() over clone_from_slice() for u8 slice copies (#33536) clone_from_slice() would hypothetically visit each item in the slice and clone it whereas copy_from_slice() can memcpy the whole slice in one go. Technically, Rust does the right thing for us by making clone_from_slice() defer to copy_from_slice() for types that implement Copy trait. However, we should still use the more efficient method directly to show intent. --- ledger/src/blockstore_db.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index f51e577c7f0543..b2c4020e3b1938 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -750,7 +750,7 @@ impl Column for columns::TransactionStatus { fn key((index, signature, slot): (u64, Signature, Slot)) -> Vec { let mut key = vec![0; 8 + 64 + 8]; // size_of u64 + size_of Signature + size_of Slot BigEndian::write_u64(&mut key[0..8], index); - key[8..72].clone_from_slice(&signature.as_ref()[0..64]); + key[8..72].copy_from_slice(&signature.as_ref()[0..64]); BigEndian::write_u64(&mut key[72..80], slot); key } @@ -791,9 +791,9 @@ impl Column for columns::AddressSignatures { fn key((index, pubkey, slot, signature): (u64, Pubkey, Slot, Signature)) -> Vec { let mut key = vec![0; 8 + 32 + 8 + 64]; // size_of u64 + size_of Pubkey + size_of Slot + size_of Signature BigEndian::write_u64(&mut key[0..8], index); - key[8..40].clone_from_slice(&pubkey.as_ref()[0..32]); + key[8..40].copy_from_slice(&pubkey.as_ref()[0..32]); BigEndian::write_u64(&mut key[40..48], slot); - key[48..112].clone_from_slice(&signature.as_ref()[0..64]); + key[48..112].copy_from_slice(&signature.as_ref()[0..64]); key } @@ -826,7 +826,7 @@ impl Column for columns::TransactionMemos { fn key(signature: Signature) -> Vec { let mut key = vec![0; 64]; // size_of Signature - key[0..64].clone_from_slice(&signature.as_ref()[0..64]); + key[0..64].copy_from_slice(&signature.as_ref()[0..64]); key } @@ -919,7 +919,7 @@ impl Column for columns::ProgramCosts { fn key(pubkey: Pubkey) -> Vec { let mut key = vec![0; 32]; // size_of Pubkey - key[0..32].clone_from_slice(&pubkey.as_ref()[0..32]); + key[0..32].copy_from_slice(&pubkey.as_ref()[0..32]); key } From 6b96a2259f45f21a7bf957a3a143aacf850366a8 Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 5 Oct 2023 13:14:09 -0500 Subject: [PATCH 256/407] Remove unused code in Blockstore underlying impl (#33538) * Remove LedgerColumn::delete_slot() method * Remove primary_index() function from Trait column --- ledger/src/blockstore_db.rs | 69 +++---------------------------------- 1 file changed, 5 insertions(+), 64 deletions(-) diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index b2c4020e3b1938..3fd33fa12acea3 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -677,12 +677,8 @@ pub trait Column { fn key(index: Self::Index) -> Vec; fn index(key: &[u8]) -> Self::Index; - // this return Slot or some u64 - fn primary_index(index: Self::Index) -> u64; fn as_index(slot: Slot) -> Self::Index; - fn slot(index: Self::Index) -> Slot { - Self::primary_index(index) - } + fn slot(index: Self::Index) -> Slot; } pub trait ColumnName { @@ -733,8 +729,7 @@ impl Column for T { BigEndian::read_u64(&key[..8]) } - /// Obtains the primary index from the specified index. - fn primary_index(index: u64) -> Slot { + fn slot(index: Self::Index) -> Slot { index } @@ -766,10 +761,6 @@ impl Column for columns::TransactionStatus { } } - fn primary_index(index: Self::Index) -> u64 { - index.0 - } - fn slot(index: Self::Index) -> Slot { index.2 } @@ -805,10 +796,6 @@ impl Column for columns::AddressSignatures { (index, pubkey, slot, signature) } - fn primary_index(index: Self::Index) -> u64 { - index.0 - } - fn slot(index: Self::Index) -> Slot { index.2 } @@ -834,10 +821,6 @@ impl Column for columns::TransactionMemos { Signature::try_from(&key[..64]).unwrap() } - fn primary_index(_index: Self::Index) -> u64 { - unimplemented!() - } - fn slot(_index: Self::Index) -> Slot { unimplemented!() } @@ -863,10 +846,6 @@ impl Column for columns::TransactionStatusIndex { BigEndian::read_u64(&key[..8]) } - fn primary_index(index: u64) -> u64 { - index - } - fn slot(_index: Self::Index) -> Slot { unimplemented!() } @@ -927,10 +906,6 @@ impl Column for columns::ProgramCosts { Pubkey::try_from(&key[..32]).unwrap() } - fn primary_index(_index: Self::Index) -> u64 { - unimplemented!() - } - fn slot(_index: Self::Index) -> Slot { unimplemented!() } @@ -951,7 +926,7 @@ impl Column for columns::ShredCode { columns::ShredData::index(key) } - fn primary_index(index: Self::Index) -> Slot { + fn slot(index: Self::Index) -> Slot { index.0 } @@ -979,7 +954,7 @@ impl Column for columns::ShredData { (slot, index) } - fn primary_index(index: Self::Index) -> Slot { + fn slot(index: Self::Index) -> Slot { index.0 } @@ -1064,7 +1039,7 @@ impl Column for columns::ErasureMeta { key } - fn primary_index(index: Self::Index) -> Slot { + fn slot(index: Self::Index) -> Slot { index.0 } @@ -1375,40 +1350,6 @@ where })) } - pub fn delete_slot( - &self, - batch: &mut WriteBatch, - from: Option, - to: Option, - ) -> Result - where - C::Index: PartialOrd + Copy + ColumnName, - { - let mut end = true; - let iter_config = match from { - Some(s) => IteratorMode::From(C::as_index(s), IteratorDirection::Forward), - None => IteratorMode::Start, - }; - let iter = self.iter(iter_config)?; - for (index, _) in iter { - if let Some(to) = to { - if C::primary_index(index) > to { - end = false; - break; - } - }; - if let Err(e) = batch.delete::(index) { - error!( - "Error: {:?} while adding delete from_slot {:?} to batch {:?}", - e, - from, - C::NAME - ) - } - } - Ok(end) - } - pub fn compact_range(&self, from: Slot, to: Slot) -> Result where C::Index: PartialOrd + Copy, From fac0c3c0fc58e529e47e922aa476292084bc8b41 Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 5 Oct 2023 13:15:24 -0500 Subject: [PATCH 257/407] Make Blockstore::purge_special_columns_exact() bail if columns empty (#33534) The special columns, TransactionStatus and AddressSignatures, are only populated if --enable-rpc-transaction-history is passed. Cleaning these columns for a range of slots is very expensive, as the block for each slot must be read, deserialized, and then parsed to extract all of the transaction signatures and address pubkeys. This change adds a simple check to see if there are any values at all in the special columns. If there are not, then the whole process described above can be skipped for nodes that are not storing the special columns. --- ledger/src/blockstore/blockstore_purge.rs | 72 +++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index 677a34d3295602..be57b4c6cf8671 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -339,6 +339,26 @@ impl Blockstore { .is_ok() } + /// Returns true if the special columns, TransactionStatus and + /// AddressSignatures, are both empty. + /// + /// It should not be the case that one is empty and the other is not, but + /// just return false in this case. + fn special_columns_empty(&self) -> Result { + let transaction_status_empty = self + .transaction_status_cf + .iter(IteratorMode::Start)? + .next() + .is_none(); + let address_signatures_empty = self + .address_signatures_cf + .iter(IteratorMode::Start)? + .next() + .is_none(); + + Ok(transaction_status_empty && address_signatures_empty) + } + /// Purges special columns (using a non-Slot primary-index) exactly, by /// deserializing each slot being purged and iterating through all /// transactions to determine the keys of individual records. @@ -352,6 +372,10 @@ impl Blockstore { from_slot: Slot, to_slot: Slot, ) -> Result<()> { + if self.special_columns_empty()? { + return Ok(()); + } + let mut index0 = self.transaction_status_index_cf.get(0)?.unwrap_or_default(); let mut index1 = self.transaction_status_index_cf.get(1)?.unwrap_or_default(); let slot_indexes = |slot: Slot| -> Vec { @@ -859,6 +883,54 @@ pub mod tests { ); } + #[test] + fn test_special_columns_empty() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + // Nothing has been inserted yet + assert!(blockstore.special_columns_empty().unwrap()); + + let num_entries = 1; + let max_slot = 9; + for slot in 0..=max_slot { + let entries = make_slot_entries_with_transactions(num_entries); + let shreds = entries_to_test_shreds( + &entries, + slot, + slot.saturating_sub(1), + true, // is_full_slot + 0, // version + true, // merkle_variant + ); + blockstore.insert_shreds(shreds, None, false).unwrap(); + + for transaction in entries.into_iter().flat_map(|entry| entry.transactions) { + assert_eq!(transaction.signatures.len(), 1); + blockstore + .write_transaction_status( + slot, + transaction.signatures[0], + transaction.message.static_account_keys().iter().collect(), + vec![], + TransactionStatusMeta::default(), + ) + .unwrap(); + } + } + assert!(!blockstore.special_columns_empty().unwrap()); + + // Partially purge and ensure special columns are non-empty + blockstore + .run_purge(0, max_slot - 5, PurgeType::Exact) + .unwrap(); + assert!(!blockstore.special_columns_empty().unwrap()); + + // Purge the rest and ensure the special columns are empty once again + blockstore.run_purge(0, max_slot, PurgeType::Exact).unwrap(); + assert!(blockstore.special_columns_empty().unwrap()); + } + #[test] #[allow(clippy::cognitive_complexity)] fn test_purge_transaction_status_exact() { From 666ce9b3befbf5e8bab7bc827d375d018a96adad Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 5 Oct 2023 13:34:04 -0500 Subject: [PATCH 258/407] Fix blockstore-purge delete_files_in_range_us metric (#33512) This field was being filled with the wrong value --- ledger/src/blockstore/blockstore_purge.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index be57b4c6cf8671..f7e8aab3db3ad7 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -50,7 +50,7 @@ impl Blockstore { ("write_batch_us", purge_stats.write_batch as i64, i64), ( "delete_files_in_range_us", - purge_stats.write_batch as i64, + purge_stats.delete_files_in_range as i64, i64 ) ); From 6f1922b4fd04092dd2b0c802a87b1c778c8f3de4 Mon Sep 17 00:00:00 2001 From: Tyera Date: Thu, 5 Oct 2023 13:57:35 -0600 Subject: [PATCH 259/407] Add early return to Blockstore::find_address_signatures methods (#33545) Add early return to find_address_signatures methods --- ledger/src/blockstore.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 0e8709c018e0ba..ec78883548a3a4 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -2474,8 +2474,10 @@ impl Blockstore { end_slot: Slot, ) -> Result> { let (lock, lowest_available_slot) = self.ensure_lowest_cleanup_slot(); - let mut signatures: Vec<(Slot, Signature)> = vec![]; + if end_slot < lowest_available_slot { + return Ok(signatures); + } for transaction_status_cf_primary_index in 0..=1 { let index_iterator = self.address_signatures_cf.iter(IteratorMode::From( ( @@ -2511,12 +2513,15 @@ impl Blockstore { ) -> Result> { let (lock, lowest_available_slot) = self.ensure_lowest_cleanup_slot(); let mut signatures: Vec<(Slot, Signature)> = vec![]; + if slot < lowest_available_slot { + return Ok(signatures); + } for transaction_status_cf_primary_index in 0..=1 { let index_iterator = self.address_signatures_cf.iter(IteratorMode::From( ( transaction_status_cf_primary_index, pubkey, - slot.max(lowest_available_slot), + slot, Signature::default(), ), IteratorDirection::Forward, From 64b36135cba574cb6c3b5d0404b52da2c6cd9c6c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Oct 2023 18:39:28 -0700 Subject: [PATCH 260/407] build(deps): bump postcss from 8.4.21 to 8.4.31 in /docs (#33505) Bumps [postcss](https://github.com/postcss/postcss) from 8.4.21 to 8.4.31. - [Release notes](https://github.com/postcss/postcss/releases) - [Changelog](https://github.com/postcss/postcss/blob/main/CHANGELOG.md) - [Commits](https://github.com/postcss/postcss/compare/8.4.21...8.4.31) --- updated-dependencies: - dependency-name: postcss dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs/package-lock.json | 40 +++++++++++++++++++++++++--------------- docs/package.json | 2 +- 2 files changed, 26 insertions(+), 16 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index baeb044d5ebb8b..8f577415e69674 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -17,7 +17,7 @@ "clsx": "^1.2.1", "eslint": "^7.3.1", "eslint-plugin-react": "^7.20.0", - "postcss": "^8.2.13", + "postcss": "^8.4.31", "postcss-loader": "^4.2.0", "prettier": "^2.0.5", "react": "^16.8.4", @@ -9470,9 +9470,15 @@ } }, "node_modules/nanoid": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.4.tgz", - "integrity": "sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==", + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", + "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], "bin": { "nanoid": "bin/nanoid.cjs" }, @@ -10125,9 +10131,9 @@ } }, "node_modules/postcss": { - "version": "8.4.21", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.21.tgz", - "integrity": "sha512-tP7u/Sn/dVxK2NnruI4H9BG+x+Wxz6oeZ1cJ8P6G/PZY0IKk4k/63TDsQf2kQq3+qoJeLm2kIBUNlZe3zgb4Zg==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "funding": [ { "type": "opencollective", @@ -10136,10 +10142,14 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], "dependencies": { - "nanoid": "^3.3.4", + "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" }, @@ -21113,9 +21123,9 @@ } }, "nanoid": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.4.tgz", - "integrity": "sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==" + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", + "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==" }, "natural-compare": { "version": "1.4.0", @@ -21585,11 +21595,11 @@ } }, "postcss": { - "version": "8.4.21", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.21.tgz", - "integrity": "sha512-tP7u/Sn/dVxK2NnruI4H9BG+x+Wxz6oeZ1cJ8P6G/PZY0IKk4k/63TDsQf2kQq3+qoJeLm2kIBUNlZe3zgb4Zg==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "requires": { - "nanoid": "^3.3.4", + "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" } diff --git a/docs/package.json b/docs/package.json index 2be6040c558f80..7279aa3e160586 100644 --- a/docs/package.json +++ b/docs/package.json @@ -29,7 +29,7 @@ "clsx": "^1.2.1", "eslint": "^7.3.1", "eslint-plugin-react": "^7.20.0", - "postcss": "^8.2.13", + "postcss": "^8.4.31", "postcss-loader": "^4.2.0", "prettier": "^2.0.5", "react": "^16.8.4", From ef47c977dc07735f413f9bee6b17320e3f653640 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Fri, 6 Oct 2023 03:53:55 -0700 Subject: [PATCH 261/407] Decouple program-v4 command processors from CliConfig (#33554) --- cli/src/cli.rs | 8 +- cli/src/program_v4.rs | 213 ++++++++++++++++++++---------------------- 2 files changed, 105 insertions(+), 116 deletions(-) diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 8252e13bbbd6a2..5e4e8b95cb2460 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -1682,7 +1682,7 @@ pub fn request_and_confirm_airdrop( Ok(signature) } -fn common_error_adapter(ix_error: &InstructionError) -> Option +pub fn common_error_adapter(ix_error: &InstructionError) -> Option where E: 'static + std::error::Error + DecodeError + FromPrimitive, { @@ -1700,12 +1700,12 @@ pub fn log_instruction_custom_error( where E: 'static + std::error::Error + DecodeError + FromPrimitive, { - log_instruction_custom_error_ex::(result, config, common_error_adapter) + log_instruction_custom_error_ex::(result, &config.output_format, common_error_adapter) } pub fn log_instruction_custom_error_ex( result: ClientResult, - config: &CliConfig, + output_format: &OutputFormat, error_adapter: F, ) -> ProcessResult where @@ -1726,7 +1726,7 @@ where let signature = CliSignature { signature: sig.clone().to_string(), }; - Ok(config.output_format.formatted_string(&signature)) + Ok(output_format.formatted_string(&signature)) } } } diff --git a/cli/src/program_v4.rs b/cli/src/program_v4.rs index 07b82636bf0871..b1e0d60fa9a39c 100644 --- a/cli/src/program_v4.rs +++ b/cli/src/program_v4.rs @@ -2,8 +2,8 @@ use { crate::{ checks::*, cli::{ - log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, - ProcessResult, + common_error_adapter, log_instruction_custom_error_ex, CliCommand, CliCommandInfo, + CliConfig, CliError, ProcessResult, }, program::calculate_max_chunk_size, }, @@ -14,7 +14,7 @@ use { input_validators::is_valid_signer, keypair::{DefaultSigner, SignerIndex}, }, - solana_cli_output::CliProgramId, + solana_cli_output::{CliProgramId, OutputFormat}, solana_client::{ connection_cache::ConnectionCache, send_and_confirm_transactions_in_parallel::{ @@ -29,6 +29,7 @@ use { solana_rpc_client_api::config::RpcSendTransactionConfig, solana_sdk::{ account::Account, + commitment_config::CommitmentConfig, hash::Hash, instruction::Instruction, loader_v4::{ @@ -334,6 +335,28 @@ fn read_and_verify_elf(program_location: &str) -> Result, Box { + pub websocket_url: &'a str, + pub commitment: CommitmentConfig, + pub payer: &'a dyn Signer, + pub authority: &'a dyn Signer, + pub output_format: &'a OutputFormat, + pub use_quic: bool, +} + +impl<'a> ProgramV4CommandConfig<'a> { + fn new_from_cli_config(config: &'a CliConfig, auth_signer_index: &SignerIndex) -> Self { + ProgramV4CommandConfig { + websocket_url: &config.websocket_url, + commitment: config.commitment, + payer: config.signers[0], + authority: config.signers[*auth_signer_index], + output_format: &config.output_format, + use_quic: config.use_quic, + } + } +} + pub fn process_program_v4_subcommand( rpc_client: Arc, config: &CliConfig, @@ -350,12 +373,11 @@ pub fn process_program_v4_subcommand( process_deploy_program( rpc_client, - config, + &ProgramV4CommandConfig::new_from_cli_config(config, authority_signer_index), &program_data, program_len, &config.signers[*program_signer_index].pubkey(), Some(config.signers[*program_signer_index]), - config.signers[*authority_signer_index], ) } ProgramV4CliCommand::Redeploy { @@ -370,12 +392,11 @@ pub fn process_program_v4_subcommand( process_deploy_program( rpc_client, - config, + &ProgramV4CommandConfig::new_from_cli_config(config, authority_signer_index), &program_data, program_len, program_address, buffer_signer, - config.signers[*authority_signer_index], ) } ProgramV4CliCommand::Undeploy { @@ -383,18 +404,16 @@ pub fn process_program_v4_subcommand( authority_signer_index, } => process_undeploy_program( rpc_client, - config, + &ProgramV4CommandConfig::new_from_cli_config(config, authority_signer_index), program_address, - config.signers[*authority_signer_index], ), ProgramV4CliCommand::Finalize { program_address, authority_signer_index, } => process_finalize_program( rpc_client, - config, + &ProgramV4CommandConfig::new_from_cli_config(config, authority_signer_index), program_address, - config.signers[*authority_signer_index], ), } } @@ -410,15 +429,14 @@ pub fn process_program_v4_subcommand( // (program_address must contain program ID and must NOT be same as buffer_signer.pubkey()) fn process_deploy_program( rpc_client: Arc, - config: &CliConfig, + config: &ProgramV4CommandConfig, program_data: &[u8], program_data_len: u32, program_address: &Pubkey, buffer_signer: Option<&dyn Signer>, - authority_signer: &dyn Signer, ) -> ProcessResult { let blockhash = rpc_client.get_latest_blockhash()?; - let payer_pubkey = config.signers[0].pubkey(); + let payer_pubkey = config.payer.pubkey(); let (initial_messages, balance_needed, buffer_address) = if let Some(buffer_signer) = buffer_signer { @@ -428,8 +446,6 @@ fn process_deploy_program( config, program_address, &buffer_address, - &payer_pubkey, - &authority_signer.pubkey(), program_data_len, &blockhash, )?; @@ -445,7 +461,6 @@ fn process_deploy_program( config, program_data_len, program_address, - authority_signer, ) .map(|(messages, balance_needed)| (messages, balance_needed, *program_address))? }; @@ -453,7 +468,7 @@ fn process_deploy_program( // Create and add write messages let create_msg = |offset: u32, bytes: Vec| { let instruction = - loader_v4::write(&buffer_address, &authority_signer.pubkey(), offset, bytes); + loader_v4::write(&buffer_address, &config.authority.pubkey(), offset, bytes); Message::new_with_blockhash(&[instruction], Some(&payer_pubkey), &blockhash) }; @@ -469,14 +484,13 @@ fn process_deploy_program( config, program_address, &buffer_address, - authority_signer, )? } else { // Create and add deploy message vec![Message::new_with_blockhash( &[loader_v4::deploy( program_address, - &authority_signer.pubkey(), + &config.authority.pubkey(), )], Some(&payer_pubkey), &blockhash, @@ -499,7 +513,6 @@ fn process_deploy_program( &write_messages, &final_messages, buffer_signer, - authority_signer, )?; let program_id = CliProgramId { @@ -510,12 +523,11 @@ fn process_deploy_program( fn process_undeploy_program( rpc_client: Arc, - config: &CliConfig, + config: &ProgramV4CommandConfig, program_address: &Pubkey, - authority_signer: &dyn Signer, ) -> ProcessResult { let blockhash = rpc_client.get_latest_blockhash()?; - let payer_pubkey = config.signers[0].pubkey(); + let payer_pubkey = config.payer.pubkey(); let Some(program_account) = rpc_client .get_account_with_commitment(program_address, config.commitment)? @@ -527,7 +539,7 @@ fn process_undeploy_program( let retract_instruction = build_retract_instruction( &program_account, program_address, - &authority_signer.pubkey(), + &config.authority.pubkey(), )?; let mut initial_messages = if let Some(instruction) = retract_instruction { @@ -542,7 +554,7 @@ fn process_undeploy_program( let truncate_instruction = loader_v4::truncate( program_address, - &authority_signer.pubkey(), + &config.authority.pubkey(), 0, &payer_pubkey, ); @@ -555,15 +567,7 @@ fn process_undeploy_program( check_payer(&rpc_client, config, 0, &initial_messages, &[], &[])?; - send_messages( - rpc_client, - config, - &initial_messages, - &[], - &[], - None, - authority_signer, - )?; + send_messages(rpc_client, config, &initial_messages, &[], &[], None)?; let program_id = CliProgramId { program_id: program_address.to_string(), @@ -573,33 +577,23 @@ fn process_undeploy_program( fn process_finalize_program( rpc_client: Arc, - config: &CliConfig, + config: &ProgramV4CommandConfig, program_address: &Pubkey, - authority_signer: &dyn Signer, ) -> ProcessResult { let blockhash = rpc_client.get_latest_blockhash()?; - let payer_pubkey = config.signers[0].pubkey(); let message = [Message::new_with_blockhash( &[loader_v4::transfer_authority( program_address, - &authority_signer.pubkey(), + &config.authority.pubkey(), None, )], - Some(&payer_pubkey), + Some(&config.payer.pubkey()), &blockhash, )]; check_payer(&rpc_client, config, 0, &message, &[], &[])?; - send_messages( - rpc_client, - config, - &message, - &[], - &[], - None, - authority_signer, - )?; + send_messages(rpc_client, config, &message, &[], &[], None)?; let program_id = CliProgramId { program_id: program_address.to_string(), @@ -609,7 +603,7 @@ fn process_finalize_program( fn check_payer( rpc_client: &RpcClient, - config: &CliConfig, + config: &ProgramV4CommandConfig, balance_needed: u64, initial_messages: &[Message], write_messages: &[Message], @@ -630,7 +624,7 @@ fn check_payer( } check_account_for_spend_and_fee_with_commitment( rpc_client, - &config.signers[0].pubkey(), + &config.payer.pubkey(), balance_needed, fee, config.commitment, @@ -640,15 +634,12 @@ fn check_payer( fn send_messages( rpc_client: Arc, - config: &CliConfig, + config: &ProgramV4CommandConfig, initial_messages: &[Message], write_messages: &[Message], final_messages: &[Message], program_signer: Option<&dyn Signer>, - authority_signer: &dyn Signer, ) -> Result<(), Box> { - let payer_signer = config.signers[0]; - for message in initial_messages { if message.header.num_required_signatures == 3 { // The initial message that creates the account and truncates it to the required size requires @@ -658,11 +649,15 @@ fn send_messages( let mut initial_transaction = Transaction::new_unsigned(message.clone()); initial_transaction - .try_sign(&[payer_signer, initial_signer, authority_signer], blockhash)?; + .try_sign(&[config.payer, initial_signer, config.authority], blockhash)?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&initial_transaction); - log_instruction_custom_error::(result, config) - .map_err(|err| format!("Account allocation failed: {err}"))?; + log_instruction_custom_error_ex::( + result, + config.output_format, + common_error_adapter, + ) + .map_err(|err| format!("Account allocation failed: {err}"))?; } else { return Err("Buffer account not created yet, must provide a key pair".into()); } @@ -671,10 +666,14 @@ fn send_messages( let blockhash = rpc_client.get_latest_blockhash()?; let mut initial_transaction = Transaction::new_unsigned(message.clone()); - initial_transaction.try_sign(&[payer_signer, authority_signer], blockhash)?; + initial_transaction.try_sign(&[config.payer, config.authority], blockhash)?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&initial_transaction); - log_instruction_custom_error::(result, config) - .map_err(|err| format!("Failed to send initial message: {err}"))?; + log_instruction_custom_error_ex::( + result, + config.output_format, + common_error_adapter, + ) + .map_err(|err| format!("Failed to send initial message: {err}"))?; } else { return Err("Initial message requires incorrect number of signatures".into()); } @@ -690,19 +689,19 @@ fn send_messages( let transaction_errors = match connection_cache { ConnectionCache::Udp(cache) => TpuClient::new_with_connection_cache( rpc_client.clone(), - &config.websocket_url, + config.websocket_url, TpuClientConfig::default(), cache, )? .send_and_confirm_messages_with_spinner( write_messages, - &[payer_signer, authority_signer], + &[config.payer, config.authority], ), ConnectionCache::Quic(cache) => { let tpu_client_fut = solana_client::nonblocking::tpu_client::TpuClient::new_with_connection_cache( rpc_client.get_inner_client().clone(), - config.websocket_url.as_str(), + config.websocket_url, solana_client::tpu_client::TpuClientConfig::default(), cache, ); @@ -715,7 +714,7 @@ fn send_messages( rpc_client.clone(), Some(tpu_client), write_messages, - &[payer_signer, authority_signer], + &[config.payer, config.authority], SendAndConfirmConfig { resign_txs_count: Some(5), with_spinner: true, @@ -739,7 +738,7 @@ fn send_messages( for message in final_messages { let blockhash = rpc_client.get_latest_blockhash()?; let mut final_tx = Transaction::new_unsigned(message.clone()); - final_tx.try_sign(&[payer_signer, authority_signer], blockhash)?; + final_tx.try_sign(&[config.payer, config.authority], blockhash)?; rpc_client .send_and_confirm_transaction_with_spinner_and_config( &final_tx, @@ -758,11 +757,9 @@ fn send_messages( fn build_create_buffer_message( rpc_client: Arc, - config: &CliConfig, + config: &ProgramV4CommandConfig, program_address: &Pubkey, buffer_address: &Pubkey, - payer_address: &Pubkey, - authority: &Pubkey, program_data_length: u32, blockhash: &Hash, ) -> Result<(Option, u64), Box> { @@ -786,17 +783,16 @@ fn build_create_buffer_message( let (truncate_instructions, balance_needed) = build_truncate_instructions( rpc_client.clone(), - payer_address, + config, &account, buffer_address, - authority, program_data_length, )?; if !truncate_instructions.is_empty() { Ok(( Some(Message::new_with_blockhash( &truncate_instructions, - Some(payer_address), + Some(&config.payer.pubkey()), blockhash, )), balance_needed, @@ -811,14 +807,14 @@ fn build_create_buffer_message( Ok(( Some(Message::new_with_blockhash( &loader_v4::create_buffer( - payer_address, + &config.payer.pubkey(), buffer_address, lamports_required, - authority, + &config.authority.pubkey(), program_data_length, - payer_address, + &config.payer.pubkey(), ), - Some(payer_address), + Some(&config.payer.pubkey()), blockhash, )), lamports_required, @@ -828,12 +824,10 @@ fn build_create_buffer_message( fn build_retract_and_truncate_messages( rpc_client: Arc, - config: &CliConfig, + config: &ProgramV4CommandConfig, program_data_len: u32, program_address: &Pubkey, - authority_signer: &dyn Signer, ) -> Result<(Vec, u64), Box> { - let payer_pubkey = config.signers[0].pubkey(); let blockhash = rpc_client.get_latest_blockhash()?; let Some(program_account) = rpc_client .get_account_with_commitment(program_address, config.commitment)? @@ -845,13 +839,13 @@ fn build_retract_and_truncate_messages( let retract_instruction = build_retract_instruction( &program_account, program_address, - &authority_signer.pubkey(), + &config.authority.pubkey(), )?; let mut messages = if let Some(instruction) = retract_instruction { vec![Message::new_with_blockhash( &[instruction], - Some(&payer_pubkey), + Some(&config.payer.pubkey()), &blockhash, )] } else { @@ -860,17 +854,16 @@ fn build_retract_and_truncate_messages( let (truncate_instructions, balance_needed) = build_truncate_instructions( rpc_client.clone(), - &payer_pubkey, + config, &program_account, program_address, - &authority_signer.pubkey(), program_data_len, )?; if !truncate_instructions.is_empty() { messages.push(Message::new_with_blockhash( &truncate_instructions, - Some(&payer_pubkey), + Some(&config.payer.pubkey()), &blockhash, )); } @@ -880,13 +873,11 @@ fn build_retract_and_truncate_messages( fn build_retract_and_deploy_messages( rpc_client: Arc, - config: &CliConfig, + config: &ProgramV4CommandConfig, program_address: &Pubkey, buffer_address: &Pubkey, - authority_signer: &dyn Signer, ) -> Result, Box> { let blockhash = rpc_client.get_latest_blockhash()?; - let payer_pubkey = config.signers[0].pubkey(); let Some(program_account) = rpc_client .get_account_with_commitment(program_address, config.commitment)? @@ -898,13 +889,13 @@ fn build_retract_and_deploy_messages( let retract_instruction = build_retract_instruction( &program_account, program_address, - &authority_signer.pubkey(), + &config.authority.pubkey(), )?; let mut messages = if let Some(instruction) = retract_instruction { vec![Message::new_with_blockhash( &[instruction], - Some(&payer_pubkey), + Some(&config.payer.pubkey()), &blockhash, )] } else { @@ -915,10 +906,10 @@ fn build_retract_and_deploy_messages( messages.push(Message::new_with_blockhash( &[loader_v4::deploy_from_source( program_address, - &authority_signer.pubkey(), + &config.authority.pubkey(), buffer_address, )], - Some(&payer_pubkey), + Some(&config.payer.pubkey()), &blockhash, )); Ok(messages) @@ -957,16 +948,18 @@ fn build_retract_instruction( fn build_truncate_instructions( rpc_client: Arc, - payer: &Pubkey, + config: &ProgramV4CommandConfig, account: &Account, buffer_address: &Pubkey, - authority: &Pubkey, program_data_length: u32, ) -> Result<(Vec, u64), Box> { if !loader_v4::check_id(&account.owner) { return Err("Buffer account passed is already in use by another program".into()); } + let payer = &config.payer.pubkey(); + let authority = &config.authority.pubkey(); + let truncate_instruction = if account.data.is_empty() { loader_v4::truncate_uninitialized(buffer_address, authority, program_data_length, payer) } else { @@ -1125,6 +1118,9 @@ mod tests { let authority_signer = program_authority(); config.signers.push(&payer); + config.signers.push(&authority_signer); + + let config = ProgramV4CommandConfig::new_from_cli_config(&config, &1); assert!(process_deploy_program( Arc::new(rpc_client_no_existing_program()), @@ -1133,7 +1129,6 @@ mod tests { data.len() as u32, &program_signer.pubkey(), Some(&program_signer), - &authority_signer, ) .is_ok()); @@ -1144,7 +1139,6 @@ mod tests { data.len() as u32, &program_signer.pubkey(), Some(&program_signer), - &authority_signer, ) .is_err()); @@ -1155,7 +1149,6 @@ mod tests { data.len() as u32, &program_signer.pubkey(), Some(&program_signer), - &authority_signer, ) .is_err()); } @@ -1170,6 +1163,9 @@ mod tests { let authority_signer = program_authority(); config.signers.push(&payer); + config.signers.push(&authority_signer); + + let config = ProgramV4CommandConfig::new_from_cli_config(&config, &1); // Redeploying a non-existent program should fail assert!(process_deploy_program( @@ -1179,7 +1175,6 @@ mod tests { data.len() as u32, &program_address, None, - &authority_signer, ) .is_err()); @@ -1190,7 +1185,6 @@ mod tests { data.len() as u32, &program_address, None, - &authority_signer, ) .is_ok()); @@ -1201,7 +1195,6 @@ mod tests { data.len() as u32, &program_address, None, - &authority_signer, ) .is_ok()); @@ -1212,7 +1205,6 @@ mod tests { data.len() as u32, &program_address, None, - &authority_signer, ) .is_err()); @@ -1223,7 +1215,6 @@ mod tests { data.len() as u32, &program_address, None, - &authority_signer, ) .is_err()); @@ -1234,7 +1225,6 @@ mod tests { data.len() as u32, &program_address, None, - &authority_signer, ) .is_err()); } @@ -1250,6 +1240,9 @@ mod tests { let authority_signer = program_authority(); config.signers.push(&payer); + config.signers.push(&authority_signer); + + let config = ProgramV4CommandConfig::new_from_cli_config(&config, &1); // Redeploying a non-existent program should fail assert!(process_deploy_program( @@ -1259,7 +1252,6 @@ mod tests { data.len() as u32, &program_address, Some(&buffer_signer), - &authority_signer, ) .is_err()); @@ -1270,7 +1262,6 @@ mod tests { data.len() as u32, &program_address, Some(&buffer_signer), - &authority_signer, ) .is_err()); @@ -1281,7 +1272,6 @@ mod tests { data.len() as u32, &program_address, Some(&buffer_signer), - &authority_signer, ) .is_err()); } @@ -1295,12 +1285,14 @@ mod tests { let authority_signer = program_authority(); config.signers.push(&payer); + config.signers.push(&authority_signer); + + let config = ProgramV4CommandConfig::new_from_cli_config(&config, &1); assert!(process_undeploy_program( Arc::new(rpc_client_no_existing_program()), &config, &program_signer.pubkey(), - &authority_signer, ) .is_err()); @@ -1308,7 +1300,6 @@ mod tests { Arc::new(rpc_client_with_program_retracted()), &config, &program_signer.pubkey(), - &authority_signer, ) .is_ok()); @@ -1316,7 +1307,6 @@ mod tests { Arc::new(rpc_client_with_program_deployed()), &config, &program_signer.pubkey(), - &authority_signer, ) .is_ok()); @@ -1324,7 +1314,6 @@ mod tests { Arc::new(rpc_client_with_program_finalized()), &config, &program_signer.pubkey(), - &authority_signer, ) .is_err()); @@ -1332,7 +1321,6 @@ mod tests { Arc::new(rpc_client_wrong_account_owner()), &config, &program_signer.pubkey(), - &authority_signer, ) .is_err()); @@ -1340,7 +1328,6 @@ mod tests { Arc::new(rpc_client_wrong_authority()), &config, &program_signer.pubkey(), - &authority_signer, ) .is_err()); } @@ -1354,12 +1341,14 @@ mod tests { let authority_signer = program_authority(); config.signers.push(&payer); + config.signers.push(&authority_signer); + + let config = ProgramV4CommandConfig::new_from_cli_config(&config, &1); assert!(process_finalize_program( Arc::new(rpc_client_with_program_deployed()), &config, &program_signer.pubkey(), - &authority_signer, ) .is_ok()); } From 35a0295376f112a57b2b2ba2137a2bc87cdb2743 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 6 Oct 2023 07:51:49 -0400 Subject: [PATCH 262/407] Improves error text when snapshot intervals are incompatible (#33551) --- validator/src/cli.rs | 3 ++- validator/src/main.rs | 9 +++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 403a922c421112..72e82ca13b56d9 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -468,7 +468,8 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("NUMBER") .takes_value(true) .default_value(&default_args.full_snapshot_archive_interval_slots) - .help("Number of slots between generating full snapshots") + .help("Number of slots between generating full snapshots. \ + Must be a multiple of the incremental snapshot interval.") ) .arg( Arg::with_name("maximum_full_snapshots_to_retain") diff --git a/validator/src/main.rs b/validator/src/main.rs index 8d37486d7d8057..b97789061c9e3b 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1588,17 +1588,14 @@ pub fn main() { validator_config.accounts_hash_interval_slots, ) { eprintln!("Invalid snapshot configuration provided: snapshot intervals are incompatible. \ - \n\t- full snapshot interval MUST be a multiple of accounts hash interval (if enabled) \ - \n\t- incremental snapshot interval MUST be a multiple of accounts hash interval (if enabled) \ + \n\t- full snapshot interval MUST be a multiple of incremental snapshot interval (if enabled) \ \n\t- full snapshot interval MUST be larger than incremental snapshot interval (if enabled) \ \nSnapshot configuration values: \ \n\tfull snapshot interval: {} \ - \n\tincremental snapshot interval: {} \ - \n\taccounts hash interval: {}", + \n\tincremental snapshot interval: {}", if full_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { "disabled".to_string() } else { full_snapshot_archive_interval_slots.to_string() }, if incremental_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { "disabled".to_string() } else { incremental_snapshot_archive_interval_slots.to_string() }, - validator_config.accounts_hash_interval_slots); - + ); exit(1); } From 88c1317535edece0ab7bc058bea1dd50449e07b3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 13:19:45 +0000 Subject: [PATCH 263/407] build(deps): bump indexmap from 2.0.1 to 2.0.2 (#33559) * build(deps): bump indexmap from 2.0.1 to 2.0.2 Bumps [indexmap](https://github.com/bluss/indexmap) from 2.0.1 to 2.0.2. - [Changelog](https://github.com/bluss/indexmap/blob/master/RELEASES.md) - [Commits](https://github.com/bluss/indexmap/compare/2.0.1...2.0.2) --- updated-dependencies: - dependency-name: indexmap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 24 ++++++++++++------------ Cargo.toml | 2 +- programs/sbf/Cargo.lock | 22 +++++++++++----------- 3 files changed, 24 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 44f4a34ce8a140..98718e52a33557 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2321,9 +2321,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" [[package]] name = "headers" @@ -2626,12 +2626,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad227c3af19d4914570ad36d30409928b75967c298feb9ea1969db3a610bb14e" +checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown 0.14.1", "rayon", ] @@ -4835,7 +4835,7 @@ version = "0.9.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" dependencies = [ - "indexmap 2.0.1", + "indexmap 2.0.2", "itoa", "ryu", "serde", @@ -5624,7 +5624,7 @@ dependencies = [ "dashmap 4.0.2", "futures 0.3.28", "futures-util", - "indexmap 2.0.1", + "indexmap 2.0.2", "indicatif", "log", "quinn", @@ -5705,7 +5705,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.0.1", + "indexmap 2.0.2", "indicatif", "log", "rand 0.8.5", @@ -6041,7 +6041,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.0.1", + "indexmap 2.0.2", "itertools", "log", "lru", @@ -7099,7 +7099,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "histogram", - "indexmap 2.0.1", + "indexmap 2.0.2", "itertools", "libc", "log", @@ -7190,7 +7190,7 @@ dependencies = [ "console", "csv", "ctrlc", - "indexmap 2.0.1", + "indexmap 2.0.2", "indicatif", "pickledb", "serde", @@ -7219,7 +7219,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.0.1", + "indexmap 2.0.2", "indicatif", "log", "rayon", diff --git a/Cargo.toml b/Cargo.toml index d02e2f1ca00fbe..e2c2c350136076 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -215,7 +215,7 @@ hyper = "0.14.27" hyper-proxy = "0.9.1" im = "15.1.0" index_list = "0.2.7" -indexmap = "2.0.1" +indexmap = "2.0.2" indicatif = "0.17.7" Inflector = "0.11.4" itertools = "0.10.5" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index eecd73cfcf86af..f900477ef15acf 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1945,9 +1945,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" [[package]] name = "headers" @@ -2232,12 +2232,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad227c3af19d4914570ad36d30409928b75967c298feb9ea1969db3a610bb14e" +checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown 0.14.1", "rayon", ] @@ -4211,7 +4211,7 @@ version = "0.9.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" dependencies = [ - "indexmap 2.0.1", + "indexmap 2.0.2", "itoa", "ryu", "serde", @@ -4706,7 +4706,7 @@ dependencies = [ "dashmap", "futures 0.3.28", "futures-util", - "indexmap 2.0.1", + "indexmap 2.0.2", "indicatif", "log", "quinn", @@ -4756,7 +4756,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.0.1", + "indexmap 2.0.2", "log", "rand 0.8.5", "rayon", @@ -5005,7 +5005,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.0.1", + "indexmap 2.0.2", "itertools", "log", "lru", @@ -6153,7 +6153,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "histogram", - "indexmap 2.0.1", + "indexmap 2.0.2", "itertools", "libc", "log", @@ -6236,7 +6236,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.0.1", + "indexmap 2.0.2", "indicatif", "log", "rayon", From 8b1377cbb4e67fd32a7080a9e6ae4bfee02764d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 14:25:15 +0000 Subject: [PATCH 264/407] build(deps): bump reqwest from 0.11.20 to 0.11.22 (#33561) * build(deps): bump reqwest from 0.11.20 to 0.11.22 Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.11.20 to 0.11.22. - [Release notes](https://github.com/seanmonstar/reqwest/releases) - [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md) - [Commits](https://github.com/seanmonstar/reqwest/compare/v0.11.20...v0.11.22) --- updated-dependencies: - dependency-name: reqwest dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 26 ++++++++++++++++++++++++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 26 ++++++++++++++++++++++++-- 3 files changed, 49 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 98718e52a33557..b0bb813bfebacc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4413,9 +4413,9 @@ checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "reqwest" -version = "0.11.20" +version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" +checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ "async-compression", "base64 0.21.4", @@ -4442,6 +4442,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", + "system-configuration", "tokio", "tokio-native-tls", "tokio-rustls", @@ -7924,6 +7925,27 @@ dependencies = [ "walkdir", ] +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "systemstat" version = "0.2.3" diff --git a/Cargo.toml b/Cargo.toml index e2c2c350136076..e5ad097510628d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -277,7 +277,7 @@ rcgen = "0.10.0" reed-solomon-erasure = "6.0.0" regex = "1.9.6" rolling-file = "0.2.0" -reqwest = { version = "0.11.20", default-features = false } +reqwest = { version = "0.11.22", default-features = false } rpassword = "7.2" rustc_version = "0.4" rustls = { version = "0.21.7", default-features = false, features = ["quic"] } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index f900477ef15acf..9703dab3458643 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3843,9 +3843,9 @@ checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "reqwest" -version = "0.11.20" +version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" +checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ "async-compression", "base64 0.21.4", @@ -3872,6 +3872,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", + "system-configuration", "tokio", "tokio-native-tls", "tokio-rustls", @@ -6833,6 +6834,27 @@ dependencies = [ "walkdir", ] +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "tar" version = "0.4.40" From 2d1449f71412a9e8bc170895564918604ef2abbb Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 6 Oct 2023 10:47:51 -0400 Subject: [PATCH 265/407] Upgrades Rust to 1.73.0 (#33546) --- ci/docker-rust-nightly/Dockerfile | 2 +- ci/docker-rust/Dockerfile | 2 +- ci/rust-version.sh | 2 +- rust-toolchain.toml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile index 23262061e4f2a3..a5d933b2a2d79f 100644 --- a/ci/docker-rust-nightly/Dockerfile +++ b/ci/docker-rust-nightly/Dockerfile @@ -1,4 +1,4 @@ -FROM solanalabs/rust:1.72.1 +FROM solanalabs/rust:1.73.0 ARG date RUN set -x \ diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index e5d80f9e04bfbf..8dfc347d54d697 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -1,6 +1,6 @@ # Note: when the rust version is changed also modify # ci/rust-version.sh to pick up the new image tag -FROM rust:1.72.1 +FROM rust:1.73.0 ARG NODE_MAJOR=18 diff --git a/ci/rust-version.sh b/ci/rust-version.sh index 76f929277ba757..a38910accda10b 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -29,7 +29,7 @@ fi if [[ -n $RUST_NIGHTLY_VERSION ]]; then nightly_version="$RUST_NIGHTLY_VERSION" else - nightly_version=2023-09-20 + nightly_version=2023-10-05 fi diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 7eb23c42c2af84..8142c3012694b1 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.72.1" +channel = "1.73.0" From 446d89e84a24aa6b5de549ae994e947ec820c73f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 14:57:54 +0000 Subject: [PATCH 266/407] build(deps): bump csv from 1.2.2 to 1.3.0 (#33560) Bumps [csv](https://github.com/BurntSushi/rust-csv) from 1.2.2 to 1.3.0. - [Commits](https://github.com/BurntSushi/rust-csv/compare/1.2.2...1.3.0) --- updated-dependencies: - dependency-name: csv dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b0bb813bfebacc..031d068f411552 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1423,9 +1423,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" dependencies = [ "csv-core", "itoa", @@ -1435,9 +1435,9 @@ dependencies = [ [[package]] name = "csv-core" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" dependencies = [ "memchr", ] diff --git a/Cargo.toml b/Cargo.toml index e5ad097510628d..ed39154543f8e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -175,7 +175,7 @@ core_affinity = "0.5.10" criterion = "0.5.1" criterion-stats = "0.3.0" crossbeam-channel = "0.5.8" -csv = "1.2.2" +csv = "1.3.0" ctrlc = "3.4.1" curve25519-dalek = "3.2.1" dashmap = "4.0.2" From 973df825b7c347b8a056377fd341b25646a92b88 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 6 Oct 2023 09:46:26 -0700 Subject: [PATCH 267/407] log signature after successful feature activation (#33488) --- cli/src/feature.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/cli/src/feature.rs b/cli/src/feature.rs index 8c065d78feec91..d55f3dee88a7d0 100644 --- a/cli/src/feature.rs +++ b/cli/src/feature.rs @@ -1,6 +1,9 @@ use { crate::{ - cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult}, + cli::{ + log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, + ProcessResult, + }, spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount}, }, clap::{value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand}, @@ -23,6 +26,7 @@ use { message::Message, pubkey::Pubkey, stake_history::Epoch, + system_instruction::SystemError, transaction::Transaction, }, std::{cmp::Ordering, collections::HashMap, fmt, rc::Rc, str::FromStr}, @@ -957,6 +961,6 @@ fn process_activate( FEATURE_NAMES.get(&feature_id).unwrap(), feature_id ); - rpc_client.send_and_confirm_transaction_with_spinner(&transaction)?; - Ok("".to_string()) + let result = rpc_client.send_and_confirm_transaction_with_spinner(&transaction); + log_instruction_custom_error::(result, config) } From c8d545c5019507fc735893d6de36168c73deb792 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 6 Oct 2023 13:45:14 -0400 Subject: [PATCH 268/407] Uses stable `u64::next_multiple_of()` (#33549) --- core/tests/epoch_accounts_hash.rs | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index 1f6eb702769d3e..718e62688b8c4c 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -529,7 +529,7 @@ fn test_background_services_request_handling_for_epoch_accounts_hash() { // Based on the EAH start and snapshot interval, pick a slot to mass-root all the banks in // this range such that an EAH request will be sent and also a snapshot request. let eah_start_slot = epoch_accounts_hash_utils::calculation_start(&bank); - let set_root_slot = next_multiple_of(eah_start_slot, FULL_SNAPSHOT_INTERVAL); + let set_root_slot = eah_start_slot.next_multiple_of(FULL_SNAPSHOT_INTERVAL); if bank.block_height() == set_root_slot { info!("Calling set_root() on bank {}...", bank.slot()); @@ -661,16 +661,3 @@ fn test_epoch_accounts_hash_and_warping() { .wait_get_epoch_accounts_hash(); info!("Waiting for epoch accounts hash... DONE"); } - -// Copy the impl of `next_multiple_of` since it is nightly-only experimental. -// https://doc.rust-lang.org/std/primitive.u64.html#method.next_multiple_of -// https://github.com/rust-lang/rust/issues/88581 -// https://github.com/rust-lang/rust/pull/88582 -// https://github.com/jhpratt/rust/blob/727a4fc7e3f836938dfeb4a2ab237cfca612222d/library/core/src/num/uint_macros.rs#L1811-L1837 -const fn next_multiple_of(lhs: u64, rhs: u64) -> u64 { - #![allow(clippy::arithmetic_side_effects)] - match lhs % rhs { - 0 => lhs, - r => lhs + (rhs - r), - } -} From 937cf5312da0db05aa399e0ec1e666b99713c268 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 18:11:42 +0000 Subject: [PATCH 269/407] build(deps): bump syn from 2.0.37 to 2.0.38 (#33558) * build(deps): bump syn from 2.0.37 to 2.0.38 Bumps [syn](https://github.com/dtolnay/syn) from 2.0.37 to 2.0.38. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.37...2.0.38) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 58 ++++++++++++++++++++--------------------- programs/sbf/Cargo.lock | 56 +++++++++++++++++++-------------------- 2 files changed, 57 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 031d068f411552..988d11f7f5631b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -436,7 +436,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -590,7 +590,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1496,7 +1496,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1507,7 +1507,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1699,7 +1699,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1799,7 +1799,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -2074,7 +2074,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3307,7 +3307,7 @@ checksum = "9e6a0fd4f737c707bd9086cc16c925f294943eb62eb71499e9fd4cf71f8b9f4e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3411,7 +3411,7 @@ dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3423,7 +3423,7 @@ dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3930,7 +3930,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -4093,7 +4093,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -4770,7 +4770,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -4815,7 +4815,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -4865,7 +4865,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -5958,7 +5958,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -6979,7 +6979,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -7632,7 +7632,7 @@ checksum = "fadbefec4f3c678215ca72bd71862697bb06b41fd77c0088902dd3203354387b" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -7644,7 +7644,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.37", + "syn 2.0.38", "thiserror", ] @@ -7702,7 +7702,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -7875,9 +7875,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.37" +version = "2.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" +checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" dependencies = [ "proc-macro2", "quote", @@ -8053,7 +8053,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -8065,7 +8065,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", "test-case-core", ] @@ -8101,7 +8101,7 @@ checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -8240,7 +8240,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -8731,7 +8731,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", "wasm-bindgen-shared", ] @@ -8765,7 +8765,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9102,7 +9102,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 9703dab3458643..438ecaaaff9e15 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -410,7 +410,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -564,7 +564,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1206,7 +1206,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1217,7 +1217,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1392,7 +1392,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1495,7 +1495,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1744,7 +1744,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -2926,7 +2926,7 @@ checksum = "9e6a0fd4f737c707bd9086cc16c925f294943eb62eb71499e9fd4cf71f8b9f4e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3008,7 +3008,7 @@ dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3020,7 +3020,7 @@ dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3456,7 +3456,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3591,7 +3591,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -4158,7 +4158,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -4203,7 +4203,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -4947,7 +4947,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -6068,7 +6068,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -6551,7 +6551,7 @@ checksum = "fadbefec4f3c678215ca72bd71862697bb06b41fd77c0088902dd3203354387b" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -6563,7 +6563,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.37", + "syn 2.0.38", "thiserror", ] @@ -6611,7 +6611,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -6784,9 +6784,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.37" +version = "2.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" +checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" dependencies = [ "proc-macro2", "quote", @@ -6942,7 +6942,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -6954,7 +6954,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", "test-case-core", ] @@ -6990,7 +6990,7 @@ checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -7113,7 +7113,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -7590,7 +7590,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", "wasm-bindgen-shared", ] @@ -7624,7 +7624,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -7952,7 +7952,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] From ecb1f8a9d739213cca027660d74b3b090c0d5d3e Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Fri, 6 Oct 2023 11:54:02 -0700 Subject: [PATCH 270/407] [TieredStorage] Include Hot Account in StoredAccountMeta and ReadableAccount (#33544) #### Problem All account storage formats are required to implement both StoredAccountMeta and ReadableAccount, but the implementation for the hot account format is missing. #### Summary of Changes This PR includes hot account format into StoredAccountMeta and ReadableAccount enum. This will allow the TieredStorageReader in the future PRs to return hot account format in its `get_account` implementation. --- accounts-db/src/account_storage/meta.rs | 31 ++++++++++++++++++++++++- accounts-db/src/append_vec.rs | 18 ++++++++++---- 2 files changed, 44 insertions(+), 5 deletions(-) diff --git a/accounts-db/src/account_storage/meta.rs b/accounts-db/src/account_storage/meta.rs index dba672292310f5..21b117c0a15d3a 100644 --- a/accounts-db/src/account_storage/meta.rs +++ b/accounts-db/src/account_storage/meta.rs @@ -1,5 +1,9 @@ use { - crate::{append_vec::AppendVecStoredAccountMeta, storable_accounts::StorableAccounts}, + crate::{ + append_vec::AppendVecStoredAccountMeta, + storable_accounts::StorableAccounts, + tiered_storage::{hot::HotAccountMeta, readable::TieredReadableAccount}, + }, solana_sdk::{account::ReadableAccount, hash::Hash, pubkey::Pubkey, stake_history::Epoch}, std::{borrow::Borrow, marker::PhantomData}, }; @@ -12,6 +16,10 @@ pub struct StoredAccountInfo { pub size: usize, } +lazy_static! { + static ref DEFAULT_ACCOUNT_HASH: Hash = Hash::default(); +} + /// Goal is to eliminate copies and data reshaping given various code paths that store accounts. /// This struct contains what is needed to store accounts to a storage /// 1. account & pubkey (StorableAccounts) @@ -100,66 +108,82 @@ impl<'a: 'b, 'b, T: ReadableAccount + Sync + 'b, U: StorableAccounts<'a, T>, V: #[derive(PartialEq, Eq, Debug)] pub enum StoredAccountMeta<'storage> { AppendVec(AppendVecStoredAccountMeta<'storage>), + Hot(TieredReadableAccount<'storage, HotAccountMeta>), } impl<'storage> StoredAccountMeta<'storage> { pub fn pubkey(&self) -> &'storage Pubkey { match self { Self::AppendVec(av) => av.pubkey(), + Self::Hot(hot) => hot.address(), } } pub fn hash(&self) -> &'storage Hash { match self { Self::AppendVec(av) => av.hash(), + Self::Hot(hot) => hot.hash().unwrap_or(&DEFAULT_ACCOUNT_HASH), } } pub fn stored_size(&self) -> usize { match self { Self::AppendVec(av) => av.stored_size(), + Self::Hot(_) => unimplemented!(), } } pub fn offset(&self) -> usize { match self { Self::AppendVec(av) => av.offset(), + Self::Hot(hot) => hot.index(), } } pub fn data(&self) -> &'storage [u8] { match self { Self::AppendVec(av) => av.data(), + Self::Hot(hot) => hot.data(), } } pub fn data_len(&self) -> u64 { match self { Self::AppendVec(av) => av.data_len(), + Self::Hot(hot) => hot.data().len() as u64, } } pub fn write_version(&self) -> StoredMetaWriteVersion { match self { Self::AppendVec(av) => av.write_version(), + Self::Hot(hot) => hot.write_version().unwrap_or_default(), } } pub fn meta(&self) -> &StoredMeta { match self { Self::AppendVec(av) => av.meta(), + // Hot account does not support this API as it does not + // use the same in-memory layout as StoredMeta. + Self::Hot(_) => unreachable!(), } } pub fn set_meta(&mut self, meta: &'storage StoredMeta) { match self { Self::AppendVec(av) => av.set_meta(meta), + // Hot account does not support this API as it does not + // use the same in-memory layout as StoredMeta. + Self::Hot(_) => unreachable!(), } } pub(crate) fn sanitize(&self) -> bool { match self { Self::AppendVec(av) => av.sanitize(), + // Hot account currently doesn't have the concept of sanitization. + Self::Hot(_) => unimplemented!(), } } } @@ -168,26 +192,31 @@ impl<'storage> ReadableAccount for StoredAccountMeta<'storage> { fn lamports(&self) -> u64 { match self { Self::AppendVec(av) => av.lamports(), + Self::Hot(hot) => hot.lamports(), } } fn data(&self) -> &[u8] { match self { Self::AppendVec(av) => av.data(), + Self::Hot(hot) => hot.data(), } } fn owner(&self) -> &Pubkey { match self { Self::AppendVec(av) => av.owner(), + Self::Hot(hot) => hot.owner(), } } fn executable(&self) -> bool { match self { Self::AppendVec(av) => av.executable(), + Self::Hot(hot) => hot.executable(), } } fn rent_epoch(&self) -> Epoch { match self { Self::AppendVec(av) => av.rent_epoch(), + Self::Hot(hot) => hot.rent_epoch(), } } } diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index 941c0a9afe298b..fce45672f2a9bd 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -686,6 +686,8 @@ pub mod tests { pub(crate) fn ref_executable_byte(&self) -> &u8 { match self { Self::AppendVec(av) => av.ref_executable_byte(), + // Tests currently only cover AppendVec. + Self::Hot(_) => unreachable!(), } } } @@ -1181,7 +1183,9 @@ pub mod tests { av.append_account_test(&create_test_account(10)).unwrap(); let accounts = av.accounts(0); - let StoredAccountMeta::AppendVec(account) = accounts.first().unwrap(); + let StoredAccountMeta::AppendVec(account) = accounts.first().unwrap() else { + panic!("StoredAccountMeta can only be AppendVec in this test."); + }; account.set_data_len_unsafe(crafted_data_len); assert_eq!(account.data_len(), crafted_data_len); @@ -1209,7 +1213,9 @@ pub mod tests { av.append_account_test(&create_test_account(10)).unwrap(); let accounts = av.accounts(0); - let StoredAccountMeta::AppendVec(account) = accounts.first().unwrap(); + let StoredAccountMeta::AppendVec(account) = accounts.first().unwrap() else { + panic!("StoredAccountMeta can only be AppendVec in this test."); + }; account.set_data_len_unsafe(too_large_data_len); assert_eq!(account.data_len(), too_large_data_len); @@ -1245,14 +1251,18 @@ pub mod tests { assert_eq!(*accounts[0].ref_executable_byte(), 0); assert_eq!(*accounts[1].ref_executable_byte(), 1); - let StoredAccountMeta::AppendVec(account) = &accounts[0]; + let StoredAccountMeta::AppendVec(account) = &accounts[0] else { + panic!("StoredAccountMeta can only be AppendVec in this test."); + }; let crafted_executable = u8::max_value() - 1; account.set_executable_as_byte(crafted_executable); // reload crafted accounts let accounts = av.accounts(0); - let StoredAccountMeta::AppendVec(account) = accounts.first().unwrap(); + let StoredAccountMeta::AppendVec(account) = accounts.first().unwrap() else { + panic!("StoredAccountMeta can only be AppendVec in this test."); + }; // upper 7-bits are not 0, so sanitization should fail assert!(!account.sanitize_executable()); From bb27bd88d426cce591446d364cef226bbd316157 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 6 Oct 2023 16:19:35 -0400 Subject: [PATCH 271/407] Removes write version from tiered storage (#33566) --- accounts-db/src/account_storage/meta.rs | 4 +- accounts-db/src/tiered_storage.rs | 4 +- accounts-db/src/tiered_storage/byte_block.rs | 30 +--- accounts-db/src/tiered_storage/hot.rs | 49 ++---- accounts-db/src/tiered_storage/meta.rs | 155 ++++++------------- accounts-db/src/tiered_storage/readable.rs | 18 +-- 6 files changed, 70 insertions(+), 190 deletions(-) diff --git a/accounts-db/src/account_storage/meta.rs b/accounts-db/src/account_storage/meta.rs index 21b117c0a15d3a..57a5e556aaa93f 100644 --- a/accounts-db/src/account_storage/meta.rs +++ b/accounts-db/src/account_storage/meta.rs @@ -157,7 +157,9 @@ impl<'storage> StoredAccountMeta<'storage> { pub fn write_version(&self) -> StoredMetaWriteVersion { match self { Self::AppendVec(av) => av.write_version(), - Self::Hot(hot) => hot.write_version().unwrap_or_default(), + // Hot account does not support this API as it does not + // use a write version. + Self::Hot(_) => StoredMetaWriteVersion::default(), } } diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index 549528f22be6d4..43d34f1561cca4 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -300,8 +300,6 @@ mod tests { } /// Create a test account based on the specified seed. - /// The created test account might have default rent_epoch - /// and write_version. fn create_account(seed: u64) -> (StoredMeta, AccountSharedData) { let data_byte = seed as u8; let account = Account { @@ -317,7 +315,7 @@ mod tests { }; let stored_meta = StoredMeta { - write_version_obsolete: u64::MAX, + write_version_obsolete: StoredMetaWriteVersion::default(), pubkey: Pubkey::new_unique(), data_len: seed, }; diff --git a/accounts-db/src/tiered_storage/byte_block.rs b/accounts-db/src/tiered_storage/byte_block.rs index 8795aa7b48cf63..53af0a71374c85 100644 --- a/accounts-db/src/tiered_storage/byte_block.rs +++ b/accounts-db/src/tiered_storage/byte_block.rs @@ -78,9 +78,6 @@ impl ByteBlockWriter { if let Some(hash) = opt_fields.account_hash { size += self.write_type(&hash)?; } - if let Some(write_version) = opt_fields.write_version { - size += self.write_type(&write_version)?; - } debug_assert_eq!(size, opt_fields.size()); @@ -154,7 +151,6 @@ impl ByteBlockReader { mod tests { use { super::*, - crate::account_storage::meta::StoredMetaWriteVersion, solana_sdk::{hash::Hash, stake_history::Epoch}, }; @@ -307,7 +303,6 @@ mod tests { fn write_optional_fields(format: AccountBlockFormat) { let mut test_epoch = 5432312; - let mut test_write_version = 231; let mut writer = ByteBlockWriter::new(format); let mut opt_fields_vec = vec![]; @@ -317,18 +312,12 @@ mod tests { // of Some and None. for rent_epoch in [None, Some(test_epoch)] { for account_hash in [None, Some(Hash::new_unique())] { - for write_version in [None, Some(test_write_version)] { - some_count += rent_epoch.map_or(0, |_| 1) - + account_hash.map_or(0, |_| 1) - + write_version.map_or(0, |_| 1); - - opt_fields_vec.push(AccountMetaOptionalFields { - rent_epoch, - account_hash, - write_version, - }); - test_write_version += 1; - } + some_count += rent_epoch.iter().count() + account_hash.iter().count(); + + opt_fields_vec.push(AccountMetaOptionalFields { + rent_epoch, + account_hash, + }); } test_epoch += 1; } @@ -367,13 +356,6 @@ mod tests { verified_count += 1; offset += std::mem::size_of::(); } - if let Some(expected_write_version) = opt_fields.write_version { - let write_version = - read_type::(&decoded_buffer, offset).unwrap(); - assert_eq!(*write_version, expected_write_version); - verified_count += 1; - offset += std::mem::size_of::(); - } } // make sure the number of Some fields matches the number of fields we diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 0b17ec1b8469b4..68c0e705976385 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -2,17 +2,12 @@ //! The account meta and related structs for hot accounts. use { - crate::{ - account_storage::meta::StoredMetaWriteVersion, - tiered_storage::{ - byte_block, - footer::{ - AccountBlockFormat, AccountMetaFormat, OwnersBlockFormat, TieredStorageFooter, - }, - index::AccountIndexFormat, - meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, - TieredStorageFormat, TieredStorageResult, - }, + crate::tiered_storage::{ + byte_block, + footer::{AccountBlockFormat, AccountMetaFormat, OwnersBlockFormat, TieredStorageFooter}, + index::AccountIndexFormat, + meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, + TieredStorageFormat, TieredStorageResult, }, memmap2::{Mmap, MmapOptions}, modular_bitfield::prelude::*, @@ -167,19 +162,6 @@ impl TieredAccountMeta for HotAccountMeta { .flatten() } - /// Returns the write version by parsing the specified account block. None - /// will be returned if this account does not persist this optional field. - fn write_version(&self, account_block: &[u8]) -> Option { - self.flags - .has_write_version() - .then(|| { - let offset = self.optional_fields_offset(account_block) - + AccountMetaOptionalFields::write_version_offset(self.flags()); - byte_block::read_type::(account_block, offset).copied() - }) - .flatten() - } - /// Returns the offset of the optional fields based on the specified account /// block. fn optional_fields_offset(&self, account_block: &[u8]) -> usize { @@ -239,13 +221,10 @@ impl HotStorageReader { pub mod tests { use { super::*, - crate::{ - account_storage::meta::StoredMetaWriteVersion, - tiered_storage::{ - byte_block::ByteBlockWriter, - footer::AccountBlockFormat, - meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, - }, + crate::tiered_storage::{ + byte_block::ByteBlockWriter, + footer::AccountBlockFormat, + meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, }, ::solana_sdk::{hash::Hash, stake_history::Epoch}, memoffset::offset_of, @@ -311,7 +290,6 @@ pub mod tests { let optional_fields = AccountMetaOptionalFields { rent_epoch: Some(TEST_RENT_EPOCH), account_hash: Some(Hash::new_unique()), - write_version: None, }; let flags = AccountMetaFlags::new_from(&optional_fields); @@ -335,12 +313,10 @@ pub mod tests { const TEST_LAMPORT: u64 = 2314232137; const OWNER_INDEX: u32 = 0x1fef_1234; const TEST_RENT_EPOCH: Epoch = 7; - const TEST_WRITE_VERSION: StoredMetaWriteVersion = 0; let optional_fields = AccountMetaOptionalFields { rent_epoch: Some(TEST_RENT_EPOCH), account_hash: Some(Hash::new_unique()), - write_version: Some(TEST_WRITE_VERSION), }; let flags = AccountMetaFlags::new_from(&optional_fields); @@ -361,7 +337,6 @@ pub mod tests { assert_eq!(expected_meta, *meta); assert!(meta.flags().has_rent_epoch()); assert!(meta.flags().has_account_hash()); - assert!(meta.flags().has_write_version()); assert_eq!(meta.account_data_padding() as usize, padding.len()); let account_block = &buffer[std::mem::size_of::()..]; @@ -378,9 +353,5 @@ pub mod tests { *(meta.account_hash(account_block).unwrap()), optional_fields.account_hash.unwrap() ); - assert_eq!( - meta.write_version(account_block), - optional_fields.write_version - ); } } diff --git a/accounts-db/src/tiered_storage/meta.rs b/accounts-db/src/tiered_storage/meta.rs index 15a4d7aefbfef0..20147bdaf141ce 100644 --- a/accounts-db/src/tiered_storage/meta.rs +++ b/accounts-db/src/tiered_storage/meta.rs @@ -1,7 +1,6 @@ #![allow(dead_code)] //! The account meta and related structs for the tiered storage. use { - crate::account_storage::meta::StoredMetaWriteVersion, ::solana_sdk::{hash::Hash, stake_history::Epoch}, modular_bitfield::prelude::*, }; @@ -15,10 +14,8 @@ pub struct AccountMetaFlags { pub has_rent_epoch: bool, /// whether the account meta has account hash pub has_account_hash: bool, - /// whether the account meta has write version - pub has_write_version: bool, /// the reserved bits. - reserved: B29, + reserved: B30, } /// A trait that allows different implementations of the account meta that @@ -70,10 +67,6 @@ pub trait TieredAccountMeta: Sized { /// will be returned if this account does not persist this optional field. fn account_hash<'a>(&self, _account_block: &'a [u8]) -> Option<&'a Hash>; - /// Returns the write version by parsing the specified account block. None - /// will be returned if this account does not persist this optional field. - fn write_version(&self, _account_block: &[u8]) -> Option; - /// Returns the offset of the optional fields based on the specified account /// block. fn optional_fields_offset(&self, _account_block: &[u8]) -> usize; @@ -92,7 +85,6 @@ impl AccountMetaFlags { let mut flags = AccountMetaFlags::default(); flags.set_has_rent_epoch(optional_fields.rent_epoch.is_some()); flags.set_has_account_hash(optional_fields.account_hash.is_some()); - flags.set_has_write_version(optional_fields.write_version.is_some()); flags } } @@ -107,9 +99,6 @@ pub struct AccountMetaOptionalFields { pub rent_epoch: Option, /// the hash of its associated account pub account_hash: Option, - /// Order of stores of its associated account to an accounts file will - /// determine 'latest' account data per pubkey. - pub write_version: Option, } impl AccountMetaOptionalFields { @@ -117,9 +106,6 @@ impl AccountMetaOptionalFields { pub fn size(&self) -> usize { self.rent_epoch.map_or(0, |_| std::mem::size_of::()) + self.account_hash.map_or(0, |_| std::mem::size_of::()) - + self - .write_version - .map_or(0, |_| std::mem::size_of::()) } /// Given the specified AccountMetaFlags, returns the size of its @@ -132,9 +118,6 @@ impl AccountMetaOptionalFields { if flags.has_account_hash() { fields_size += std::mem::size_of::(); } - if flags.has_write_version() { - fields_size += std::mem::size_of::(); - } fields_size } @@ -155,17 +138,6 @@ impl AccountMetaOptionalFields { } offset } - - /// Given the specified AccountMetaFlags, returns the relative offset - /// of its write_version field to the offset of its optional fields entry. - pub fn write_version_offset(flags: &AccountMetaFlags) -> usize { - let mut offset = Self::account_hash_offset(flags); - // account hash is the previous field to write version - if flags.has_account_hash() { - offset += std::mem::size_of::(); - } - offset - } } #[cfg(test)] @@ -178,7 +150,6 @@ pub mod tests { assert!(!flags.has_rent_epoch()); assert!(!flags.has_account_hash()); - assert!(!flags.has_write_version()); assert_eq!(flags.reserved(), 0u32); assert_eq!( @@ -199,21 +170,12 @@ pub mod tests { assert!(flags.has_rent_epoch()); assert!(!flags.has_account_hash()); - assert!(!flags.has_write_version()); verify_flags_serialization(&flags); flags.set_has_account_hash(true); assert!(flags.has_rent_epoch()); assert!(flags.has_account_hash()); - assert!(!flags.has_write_version()); - verify_flags_serialization(&flags); - - flags.set_has_write_version(true); - - assert!(flags.has_rent_epoch()); - assert!(flags.has_account_hash()); - assert!(flags.has_write_version()); verify_flags_serialization(&flags); // make sure the reserved bits are untouched. @@ -224,27 +186,19 @@ pub mod tests { let flags: AccountMetaFlags = AccountMetaFlags::new_from(opt_fields); assert_eq!(flags.has_rent_epoch(), opt_fields.rent_epoch.is_some()); assert_eq!(flags.has_account_hash(), opt_fields.account_hash.is_some()); - assert_eq!( - flags.has_write_version(), - opt_fields.write_version.is_some() - ); assert_eq!(flags.reserved(), 0u32); } #[test] fn test_optional_fields_update_flags() { let test_epoch = 5432312; - let test_write_version = 231; for rent_epoch in [None, Some(test_epoch)] { for account_hash in [None, Some(Hash::new_unique())] { - for write_version in [None, Some(test_write_version)] { - update_and_verify_flags(&AccountMetaOptionalFields { - rent_epoch, - account_hash, - write_version, - }); - } + update_and_verify_flags(&AccountMetaOptionalFields { + rent_epoch, + account_hash, + }); } } } @@ -252,30 +206,24 @@ pub mod tests { #[test] fn test_optional_fields_size() { let test_epoch = 5432312; - let test_write_version = 231; for rent_epoch in [None, Some(test_epoch)] { for account_hash in [None, Some(Hash::new_unique())] { - for write_version in [None, Some(test_write_version)] { - let opt_fields = AccountMetaOptionalFields { - rent_epoch, - account_hash, - write_version, - }; - assert_eq!( - opt_fields.size(), - rent_epoch.map_or(0, |_| std::mem::size_of::()) - + account_hash.map_or(0, |_| std::mem::size_of::()) - + write_version - .map_or(0, |_| std::mem::size_of::()) - ); - assert_eq!( - opt_fields.size(), - AccountMetaOptionalFields::size_from_flags(&AccountMetaFlags::new_from( - &opt_fields - )) - ); - } + let opt_fields = AccountMetaOptionalFields { + rent_epoch, + account_hash, + }; + assert_eq!( + opt_fields.size(), + rent_epoch.map_or(0, |_| std::mem::size_of::()) + + account_hash.map_or(0, |_| std::mem::size_of::()) + ); + assert_eq!( + opt_fields.size(), + AccountMetaOptionalFields::size_from_flags(&AccountMetaFlags::new_from( + &opt_fields + )) + ); } } } @@ -283,47 +231,34 @@ pub mod tests { #[test] fn test_optional_fields_offset() { let test_epoch = 5432312; - let test_write_version = 231; for rent_epoch in [None, Some(test_epoch)] { - let rent_epoch_offset = 0; for account_hash in [None, Some(Hash::new_unique())] { - let mut account_hash_offset = rent_epoch_offset; - if rent_epoch.is_some() { - account_hash_offset += std::mem::size_of::(); - } - for write_version in [None, Some(test_write_version)] { - let mut write_version_offset = account_hash_offset; - if account_hash.is_some() { - write_version_offset += std::mem::size_of::(); - } - let opt_fields = AccountMetaOptionalFields { - rent_epoch, - account_hash, - write_version, - }; - let flags = AccountMetaFlags::new_from(&opt_fields); - assert_eq!( - AccountMetaOptionalFields::rent_epoch_offset(&flags), - rent_epoch_offset - ); - assert_eq!( - AccountMetaOptionalFields::account_hash_offset(&flags), - account_hash_offset - ); - assert_eq!( - AccountMetaOptionalFields::write_version_offset(&flags), - write_version_offset - ); - let mut derived_size = AccountMetaOptionalFields::write_version_offset(&flags); - if flags.has_write_version() { - derived_size += std::mem::size_of::(); - } - assert_eq!( - AccountMetaOptionalFields::size_from_flags(&flags), - derived_size - ); - } + let rent_epoch_offset = 0; + let account_hash_offset = + rent_epoch_offset + rent_epoch.as_ref().map(std::mem::size_of_val).unwrap_or(0); + let derived_size = account_hash_offset + + account_hash + .as_ref() + .map(std::mem::size_of_val) + .unwrap_or(0); + let opt_fields = AccountMetaOptionalFields { + rent_epoch, + account_hash, + }; + let flags = AccountMetaFlags::new_from(&opt_fields); + assert_eq!( + AccountMetaOptionalFields::rent_epoch_offset(&flags), + rent_epoch_offset + ); + assert_eq!( + AccountMetaOptionalFields::account_hash_offset(&flags), + account_hash_offset + ); + assert_eq!( + AccountMetaOptionalFields::size_from_flags(&flags), + derived_size + ); } } } diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index 686f622ea041a3..426da02ccbd260 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -1,12 +1,9 @@ use { - crate::{ - account_storage::meta::StoredMetaWriteVersion, - tiered_storage::{ - footer::{AccountMetaFormat, TieredStorageFooter}, - hot::HotStorageReader, - meta::TieredAccountMeta, - TieredStorageResult, - }, + crate::tiered_storage::{ + footer::{AccountMetaFormat, TieredStorageFooter}, + hot::HotStorageReader, + meta::TieredAccountMeta, + TieredStorageResult, }, solana_sdk::{account::ReadableAccount, hash::Hash, pubkey::Pubkey, stake_history::Epoch}, std::path::Path, @@ -44,11 +41,6 @@ impl<'accounts_file, M: TieredAccountMeta> TieredReadableAccount<'accounts_file, self.index } - /// Returns the write version of the account. - pub fn write_version(&self) -> Option { - self.meta.write_version(self.account_block) - } - /// Returns the data associated to this account. pub fn data(&self) -> &'accounts_file [u8] { self.meta.account_data(self.account_block) From 77632daca52060bf5e5bc60cb91949a8aa1d1b72 Mon Sep 17 00:00:00 2001 From: Nick Frostbutter <75431177+nickfrosty@users.noreply.github.com> Date: Fri, 6 Oct 2023 16:24:28 -0400 Subject: [PATCH 272/407] [docs] removed >600 unused icon files (#33569) refactor: removed icons since none are used --- docs/src/icons/Archivers.inline.svg | 11 --------- docs/src/icons/Bulb.inline.svg | 13 ----------- docs/src/icons/Chat.inline.svg | 10 -------- docs/src/icons/Clipboard.inline.svg | 13 ----------- docs/src/icons/Cloudbreak.inline.svg | 11 --------- docs/src/icons/Code.inline.svg | 11 --------- docs/src/icons/Fire.inline.svg | 10 -------- docs/src/icons/Gamepad.inline.svg | 11 --------- docs/src/icons/Globe.inline.svg | 1 - docs/src/icons/Gulfstream.inline.svg | 11 --------- docs/src/icons/History.inline.svg | 11 --------- docs/src/icons/Money.inline.svg | 11 --------- docs/src/icons/Pipeline.inline.svg | 15 ------------ docs/src/icons/PoH.inline.svg | 11 --------- docs/src/icons/Sealevel.inline.svg | 11 --------- docs/src/icons/Tools.inline.svg | 11 --------- docs/src/icons/Tower.inline.svg | 11 --------- docs/src/icons/Turbine.inline.svg | 12 ---------- .../icons/duotone-icons/Clothes/Brassiere.svg | 10 -------- .../icons/duotone-icons/Clothes/Briefcase.svg | 11 --------- docs/src/icons/duotone-icons/Clothes/Cap.svg | 11 --------- .../src/icons/duotone-icons/Clothes/Crown.svg | 11 --------- .../src/icons/duotone-icons/Clothes/Dress.svg | 11 --------- .../icons/duotone-icons/Clothes/Hanger.svg | 10 -------- docs/src/icons/duotone-icons/Clothes/Hat.svg | 11 --------- .../icons/duotone-icons/Clothes/Panties.svg | 10 -------- .../src/icons/duotone-icons/Clothes/Shirt.svg | 11 --------- .../src/icons/duotone-icons/Clothes/Shoes.svg | 11 --------- .../icons/duotone-icons/Clothes/Shorts.svg | 10 -------- .../icons/duotone-icons/Clothes/Sneakers.svg | 11 --------- .../src/icons/duotone-icons/Clothes/Socks.svg | 11 --------- .../duotone-icons/Clothes/Sun-glasses.svg | 11 --------- .../icons/duotone-icons/Clothes/T-Shirt.svg | 10 -------- docs/src/icons/duotone-icons/Clothes/Tie.svg | 11 --------- .../icons/duotone-icons/Code/Backspace.svg | 11 --------- docs/src/icons/duotone-icons/Code/CMD.svg | 10 -------- .../icons/duotone-icons/Code/Code.inline.svg | 11 --------- docs/src/icons/duotone-icons/Code/Commit.svg | 11 --------- .../duotone-icons/Code/Compiling.inline.svg | 11 --------- docs/src/icons/duotone-icons/Code/Control.svg | 10 -------- .../icons/duotone-icons/Code/Done-circle.svg | 11 --------- .../icons/duotone-icons/Code/Error-circle.svg | 11 --------- docs/src/icons/duotone-icons/Code/Git#1.svg | 12 ---------- docs/src/icons/duotone-icons/Code/Git#2.svg | 15 ------------ docs/src/icons/duotone-icons/Code/Git#3.svg | 13 ----------- docs/src/icons/duotone-icons/Code/Git#4.svg | 13 ----------- docs/src/icons/duotone-icons/Code/Github.svg | 11 --------- .../icons/duotone-icons/Code/Info-circle.svg | 12 ---------- .../icons/duotone-icons/Code/Left-circle.svg | 11 --------- docs/src/icons/duotone-icons/Code/Loading.svg | 12 ---------- .../icons/duotone-icons/Code/Lock-circle.svg | 11 --------- .../duotone-icons/Code/Lock-overturning.svg | 11 --------- docs/src/icons/duotone-icons/Code/Minus.svg | 11 --------- docs/src/icons/duotone-icons/Code/Option.svg | 11 --------- docs/src/icons/duotone-icons/Code/Plus.svg | 11 --------- docs/src/icons/duotone-icons/Code/Puzzle.svg | 10 -------- .../duotone-icons/Code/Question-circle.svg | 11 --------- .../icons/duotone-icons/Code/Right-circle.svg | 11 --------- .../icons/duotone-icons/Code/Settings#4.svg | 11 --------- docs/src/icons/duotone-icons/Code/Shift.svg | 10 -------- docs/src/icons/duotone-icons/Code/Spy.svg | 11 --------- docs/src/icons/duotone-icons/Code/Stop.svg | 10 -------- .../src/icons/duotone-icons/Code/Terminal.svg | 11 --------- .../duotone-icons/Code/Thunder-circle.svg | 11 --------- .../duotone-icons/Code/Time-schedule.svg | 11 --------- .../duotone-icons/Code/Warning-1-circle.svg | 12 ---------- .../icons/duotone-icons/Code/Warning-2.svg | 12 ---------- .../Communication/Active-call.svg | 11 --------- .../duotone-icons/Communication/Add-user.svg | 11 --------- .../Communication/Address-card.svg | 10 -------- .../Communication/Adress-book#1.svg | 11 --------- .../Communication/Adress-book#2.svg | 11 --------- .../duotone-icons/Communication/Archive.svg | 10 -------- .../duotone-icons/Communication/Call#1.svg | 10 -------- .../duotone-icons/Communication/Call.svg | 10 -------- .../duotone-icons/Communication/Chat#1.svg | 11 --------- .../duotone-icons/Communication/Chat#2.svg | 11 --------- .../duotone-icons/Communication/Chat#4.svg | 10 -------- .../duotone-icons/Communication/Chat#5.svg | 11 --------- .../duotone-icons/Communication/Chat#6.svg | 11 --------- .../Communication/Chat-check.svg | 11 --------- .../Communication/Chat-error.svg | 11 --------- .../Communication/Chat-locked.svg | 11 --------- .../Communication/Chat-smile.svg | 11 --------- .../Communication/Clipboard-check.svg | 12 ---------- .../Communication/Clipboard-list.svg | 17 -------------- .../duotone-icons/Communication/Contact#1.svg | 11 --------- .../Communication/Delete-user.svg | 11 --------- .../Communication/Dial-numbers.svg | 18 --------------- .../duotone-icons/Communication/Flag.svg | 11 --------- .../duotone-icons/Communication/Forward.svg | 10 -------- .../Communication/Group-chat.svg | 11 --------- .../duotone-icons/Communication/Group.svg | 11 --------- .../Communication/Incoming-box.svg | 12 ---------- .../Communication/Incoming-call.svg | 11 --------- .../Communication/Incoming-mail.svg | 11 --------- .../duotone-icons/Communication/Mail-@.svg | 10 -------- .../Communication/Mail-attachment.svg | 11 --------- .../duotone-icons/Communication/Mail-box.svg | 11 --------- .../Communication/Mail-error.svg | 11 --------- .../Communication/Mail-heart.svg | 11 --------- .../Communication/Mail-locked.svg | 11 --------- .../Communication/Mail-notification.svg | 11 --------- .../Communication/Mail-opened.svg | 11 --------- .../Communication/Mail-unocked.svg | 11 --------- .../duotone-icons/Communication/Mail.svg | 10 -------- .../Communication/Missed-call.svg | 11 --------- .../Communication/Outgoing-box.svg | 12 ---------- .../Communication/Outgoing-call.svg | 11 --------- .../Communication/Outgoing-mail.svg | 11 --------- .../icons/duotone-icons/Communication/RSS.svg | 12 ---------- .../Communication/Readed-mail.svg | 11 --------- .../duotone-icons/Communication/Reply-all.svg | 11 --------- .../duotone-icons/Communication/Reply.svg | 10 -------- .../duotone-icons/Communication/Right.svg | 10 -------- .../duotone-icons/Communication/Safe-chat.svg | 11 --------- .../duotone-icons/Communication/Send.svg | 10 -------- .../Communication/Sending mail.svg | 11 --------- .../duotone-icons/Communication/Sending.svg | 11 --------- .../duotone-icons/Communication/Share.svg | 11 --------- .../Communication/Shield-thunder.svg | 11 --------- .../Communication/Shield-user.svg | 12 ---------- .../Communication/Snoozed-mail.svg | 11 --------- .../duotone-icons/Communication/Spam.svg | 10 -------- .../duotone-icons/Communication/Thumbtack.svg | 11 --------- .../Communication/Urgent-mail.svg | 11 --------- .../duotone-icons/Communication/Write.svg | 11 --------- .../duotone-icons/Cooking/Baking-glove.svg | 11 --------- docs/src/icons/duotone-icons/Cooking/Bowl.svg | 11 --------- docs/src/icons/duotone-icons/Cooking/Chef.svg | 11 --------- .../duotone-icons/Cooking/Cooking-book.svg | 17 -------------- .../duotone-icons/Cooking/Cooking-pot.svg | 11 --------- .../duotone-icons/Cooking/Cutting board.svg | 11 --------- .../icons/duotone-icons/Cooking/Dinner.svg | 14 ----------- docs/src/icons/duotone-icons/Cooking/Dish.svg | 11 --------- .../icons/duotone-icons/Cooking/Dishes.svg | 11 --------- .../Cooking/Fork-spoon-knife.svg | 15 ------------ .../duotone-icons/Cooking/Fork-spoon.svg | 13 ----------- docs/src/icons/duotone-icons/Cooking/Fork.svg | 11 --------- .../duotone-icons/Cooking/Frying-pan.svg | 11 --------- .../icons/duotone-icons/Cooking/Grater.svg | 11 --------- .../duotone-icons/Cooking/Kitchen-scale.svg | 11 --------- .../icons/duotone-icons/Cooking/Knife#1.svg | 11 --------- .../icons/duotone-icons/Cooking/Knife#2.svg | 11 --------- .../duotone-icons/Cooking/Knife&fork#1.svg | 13 ----------- .../duotone-icons/Cooking/Knife&fork#2.svg | 13 ----------- .../src/icons/duotone-icons/Cooking/Ladle.svg | 11 --------- .../duotone-icons/Cooking/Rolling-pin.svg | 11 --------- .../icons/duotone-icons/Cooking/Saucepan.svg | 11 --------- .../icons/duotone-icons/Cooking/Shovel.svg | 11 --------- .../src/icons/duotone-icons/Cooking/Sieve.svg | 11 --------- .../src/icons/duotone-icons/Cooking/Spoon.svg | 12 ---------- .../src/icons/duotone-icons/Design/Adjust.svg | 10 -------- .../Design/Anchor-center-down.svg | 14 ----------- .../duotone-icons/Design/Anchor-center-up.svg | 14 ----------- .../duotone-icons/Design/Anchor-center.svg | 11 --------- .../duotone-icons/Design/Anchor-left-down.svg | 14 ----------- .../duotone-icons/Design/Anchor-left-up.svg | 14 ----------- .../duotone-icons/Design/Anchor-left.svg | 14 ----------- .../Design/Anchor-right-down.svg | 14 ----------- .../duotone-icons/Design/Anchor-right-up.svg | 14 ----------- .../duotone-icons/Design/Anchor-right.svg | 14 ----------- .../src/icons/duotone-icons/Design/Arrows.svg | 11 --------- .../duotone-icons/Design/Bezier-curve.svg | 11 --------- .../src/icons/duotone-icons/Design/Border.svg | 10 -------- docs/src/icons/duotone-icons/Design/Brush.svg | 11 --------- .../src/icons/duotone-icons/Design/Bucket.svg | 11 --------- docs/src/icons/duotone-icons/Design/Cap-1.svg | 11 --------- docs/src/icons/duotone-icons/Design/Cap-2.svg | 10 -------- docs/src/icons/duotone-icons/Design/Cap-3.svg | 11 --------- .../src/icons/duotone-icons/Design/Circle.svg | 10 -------- .../duotone-icons/Design/Color-profile.svg | 11 --------- docs/src/icons/duotone-icons/Design/Color.svg | 10 -------- .../icons/duotone-icons/Design/Component.svg | 10 -------- docs/src/icons/duotone-icons/Design/Crop.svg | 11 --------- .../icons/duotone-icons/Design/Difference.svg | 11 --------- docs/src/icons/duotone-icons/Design/Edit.svg | 11 --------- .../src/icons/duotone-icons/Design/Eraser.svg | 10 -------- .../icons/duotone-icons/Design/Flatten.svg | 11 --------- .../duotone-icons/Design/Flip-horizontal.svg | 12 ---------- .../duotone-icons/Design/Flip-vertical.svg | 12 ---------- .../icons/duotone-icons/Design/Horizontal.svg | 11 --------- docs/src/icons/duotone-icons/Design/Image.svg | 10 -------- .../duotone-icons/Design/Interselect.svg | 11 --------- .../src/icons/duotone-icons/Design/Join-1.svg | 11 --------- .../src/icons/duotone-icons/Design/Join-2.svg | 11 --------- .../src/icons/duotone-icons/Design/Join-3.svg | 11 --------- .../src/icons/duotone-icons/Design/Layers.svg | 11 --------- docs/src/icons/duotone-icons/Design/Line.svg | 12 ---------- docs/src/icons/duotone-icons/Design/Magic.svg | 11 --------- docs/src/icons/duotone-icons/Design/Mask.svg | 11 --------- docs/src/icons/duotone-icons/Design/Patch.svg | 12 ---------- .../icons/duotone-icons/Design/Pen&ruller.svg | 11 --------- .../duotone-icons/Design/Pen-tool-vector.svg | 11 --------- .../src/icons/duotone-icons/Design/Pencil.svg | 11 --------- .../src/icons/duotone-icons/Design/Picker.svg | 11 --------- .../src/icons/duotone-icons/Design/Pixels.svg | 15 ------------ .../icons/duotone-icons/Design/Polygon.svg | 10 -------- .../icons/duotone-icons/Design/Position.svg | 10 -------- .../icons/duotone-icons/Design/Rectangle.svg | 10 -------- .../icons/duotone-icons/Design/Saturation.svg | 11 --------- .../src/icons/duotone-icons/Design/Select.svg | 11 --------- .../src/icons/duotone-icons/Design/Sketch.svg | 11 --------- docs/src/icons/duotone-icons/Design/Stamp.svg | 11 --------- .../icons/duotone-icons/Design/Substract.svg | 11 --------- .../src/icons/duotone-icons/Design/Target.svg | 11 --------- .../icons/duotone-icons/Design/Triangle.svg | 10 -------- docs/src/icons/duotone-icons/Design/Union.svg | 10 -------- .../icons/duotone-icons/Design/Vertical.svg | 11 --------- .../icons/duotone-icons/Design/Zoom minus.svg | 12 ---------- .../icons/duotone-icons/Design/Zoom plus.svg | 12 ---------- .../icons/duotone-icons/Devices/Airpods.svg | 13 ----------- .../icons/duotone-icons/Devices/Android.svg | 11 --------- .../duotone-icons/Devices/Apple-Watch.svg | 11 --------- .../Devices/Battery-charging.svg | 12 ---------- .../duotone-icons/Devices/Battery-empty.svg | 11 --------- .../duotone-icons/Devices/Battery-full.svg | 11 --------- .../duotone-icons/Devices/Battery-half.svg | 11 --------- .../icons/duotone-icons/Devices/Bluetooth.svg | 11 --------- .../src/icons/duotone-icons/Devices/CPU#1.svg | 17 -------------- .../src/icons/duotone-icons/Devices/CPU#2.svg | 23 ------------------- .../icons/duotone-icons/Devices/Camera.svg | 12 ---------- .../duotone-icons/Devices/Cardboard-vr.svg | 11 --------- .../icons/duotone-icons/Devices/Cassete.svg | 12 ---------- .../duotone-icons/Devices/Diagnostics.svg | 12 ---------- .../icons/duotone-icons/Devices/Display#1.svg | 11 --------- .../icons/duotone-icons/Devices/Display#2.svg | 12 ---------- .../icons/duotone-icons/Devices/Display#3.svg | 12 ---------- .../icons/duotone-icons/Devices/Gameboy.svg | 11 --------- .../icons/duotone-icons/Devices/Gamepad#1.svg | 11 --------- .../icons/duotone-icons/Devices/Gamepad#2.svg | 11 --------- .../icons/duotone-icons/Devices/Generator.svg | 13 ----------- .../duotone-icons/Devices/Hard-drive.svg | 11 --------- .../duotone-icons/Devices/Headphones.svg | 11 --------- .../icons/duotone-icons/Devices/Homepod.svg | 11 --------- .../icons/duotone-icons/Devices/Keyboard.svg | 11 --------- .../src/icons/duotone-icons/Devices/LTE#1.svg | 11 --------- .../src/icons/duotone-icons/Devices/LTE#2.svg | 11 --------- .../duotone-icons/Devices/Laptop-macbook.svg | 11 --------- .../icons/duotone-icons/Devices/Laptop.svg | 11 --------- docs/src/icons/duotone-icons/Devices/Mic.svg | 11 --------- docs/src/icons/duotone-icons/Devices/Midi.svg | 16 ------------- .../src/icons/duotone-icons/Devices/Mouse.svg | 12 ---------- .../src/icons/duotone-icons/Devices/Phone.svg | 12 ---------- .../icons/duotone-icons/Devices/Printer.svg | 11 --------- .../src/icons/duotone-icons/Devices/Radio.svg | 14 ----------- .../icons/duotone-icons/Devices/Router#1.svg | 11 --------- .../icons/duotone-icons/Devices/Router#2.svg | 11 --------- .../icons/duotone-icons/Devices/SD-card.svg | 10 -------- .../icons/duotone-icons/Devices/Server.svg | 12 ---------- .../icons/duotone-icons/Devices/Speaker.svg | 11 --------- docs/src/icons/duotone-icons/Devices/TV#1.svg | 12 ---------- docs/src/icons/duotone-icons/Devices/TV#2.svg | 11 --------- .../icons/duotone-icons/Devices/Tablet.svg | 11 --------- docs/src/icons/duotone-icons/Devices/USB.svg | 12 ---------- .../duotone-icons/Devices/Usb-storage.svg | 11 --------- .../duotone-icons/Devices/Video-camera.svg | 11 --------- .../icons/duotone-icons/Devices/Watch#1.svg | 13 ----------- .../icons/duotone-icons/Devices/Watch#2.svg | 13 ----------- .../src/icons/duotone-icons/Devices/Wi-fi.svg | 11 --------- docs/src/icons/duotone-icons/Devices/iMac.svg | 12 ---------- .../icons/duotone-icons/Devices/iPhone-X.svg | 11 --------- .../duotone-icons/Devices/iPhone-back.svg | 10 -------- .../duotone-icons/Devices/iPhone-x-back.svg | 10 -------- .../Electric/Air-conditioning.svg | 11 --------- .../icons/duotone-icons/Electric/Blender.svg | 10 -------- docs/src/icons/duotone-icons/Electric/Fan.svg | 12 ---------- .../icons/duotone-icons/Electric/Fridge.svg | 10 -------- .../duotone-icons/Electric/Gas-stove.svg | 11 --------- .../duotone-icons/Electric/Hair-dryer.svg | 11 --------- .../duotone-icons/Electric/Highvoltage.svg | 10 -------- .../src/icons/duotone-icons/Electric/Iron.svg | 11 --------- .../icons/duotone-icons/Electric/Kettle.svg | 11 --------- .../icons/duotone-icons/Electric/Mixer.svg | 11 --------- .../icons/duotone-icons/Electric/Outlet.svg | 11 --------- .../duotone-icons/Electric/Range-hood.svg | 11 --------- .../icons/duotone-icons/Electric/Shutdown.svg | 11 --------- .../duotone-icons/Electric/Socket-eu.svg | 10 -------- .../duotone-icons/Electric/Socket-us.svg | 10 -------- .../icons/duotone-icons/Electric/Washer.svg | 11 --------- .../duotone-icons/Files/Cloud-download.svg | 11 --------- .../duotone-icons/Files/Cloud-upload.svg | 11 --------- .../icons/duotone-icons/Files/Compilation.svg | 14 ----------- .../duotone-icons/Files/Compiled-file.svg | 14 ----------- .../duotone-icons/Files/Deleted-file.svg | 11 --------- .../duotone-icons/Files/Deleted-folder.svg | 11 --------- .../duotone-icons/Files/Download.inline.svg | 12 ---------- .../duotone-icons/Files/Downloaded file.svg | 11 --------- .../duotone-icons/Files/Downloads-folder.svg | 11 --------- docs/src/icons/duotone-icons/Files/Export.svg | 12 ---------- .../icons/duotone-icons/Files/File-cloud.svg | 11 --------- .../icons/duotone-icons/Files/File-done.svg | 11 --------- .../icons/duotone-icons/Files/File-minus.svg | 11 --------- .../icons/duotone-icons/Files/File-plus.svg | 11 --------- docs/src/icons/duotone-icons/Files/File.svg | 12 ---------- .../duotone-icons/Files/Folder-check.svg | 11 --------- .../duotone-icons/Files/Folder-cloud.svg | 11 --------- .../duotone-icons/Files/Folder-error.svg | 11 --------- .../duotone-icons/Files/Folder-heart.svg | 11 --------- .../duotone-icons/Files/Folder-minus.svg | 11 --------- .../icons/duotone-icons/Files/Folder-plus.svg | 11 --------- .../duotone-icons/Files/Folder-solid.svg | 10 -------- .../icons/duotone-icons/Files/Folder-star.svg | 11 --------- .../duotone-icons/Files/Folder-thunder.svg | 11 --------- docs/src/icons/duotone-icons/Files/Folder.svg | 10 -------- .../duotone-icons/Files/Group-folders.svg | 11 --------- docs/src/icons/duotone-icons/Files/Import.svg | 12 ---------- .../duotone-icons/Files/Locked-folder.svg | 11 --------- .../duotone-icons/Files/Media-folder.svg | 11 --------- docs/src/icons/duotone-icons/Files/Media.svg | 11 --------- docs/src/icons/duotone-icons/Files/Music.svg | 11 --------- .../icons/duotone-icons/Files/Pictures#1.svg | 13 ----------- .../icons/duotone-icons/Files/Pictures#2.svg | 15 ------------ .../duotone-icons/Files/Protected-file.svg | 11 --------- .../duotone-icons/Files/Selected-file.svg | 11 --------- docs/src/icons/duotone-icons/Files/Share.svg | 13 ----------- .../duotone-icons/Files/Upload-folder.svg | 11 --------- docs/src/icons/duotone-icons/Files/Upload.svg | 12 ---------- .../duotone-icons/Files/Uploaded-file.svg | 11 --------- .../icons/duotone-icons/Files/User-folder.svg | 12 ---------- docs/src/icons/duotone-icons/Food/Beer.svg | 14 ----------- .../src/icons/duotone-icons/Food/Bottle#1.svg | 11 --------- .../src/icons/duotone-icons/Food/Bottle#2.svg | 11 --------- docs/src/icons/duotone-icons/Food/Bread.svg | 11 --------- docs/src/icons/duotone-icons/Food/Bucket.svg | 11 --------- docs/src/icons/duotone-icons/Food/Burger.svg | 12 ---------- docs/src/icons/duotone-icons/Food/Cake.svg | 12 ---------- docs/src/icons/duotone-icons/Food/Carrot.svg | 13 ----------- docs/src/icons/duotone-icons/Food/Cheese.svg | 11 --------- docs/src/icons/duotone-icons/Food/Chicken.svg | 13 ----------- .../src/icons/duotone-icons/Food/Coffee#1.svg | 14 ----------- .../src/icons/duotone-icons/Food/Coffee#2.svg | 12 ---------- docs/src/icons/duotone-icons/Food/Cookie.svg | 13 ----------- docs/src/icons/duotone-icons/Food/Dinner.svg | 11 --------- docs/src/icons/duotone-icons/Food/Fish.svg | 11 --------- .../icons/duotone-icons/Food/French Bread.svg | 10 -------- .../duotone-icons/Food/Glass-martini.svg | 11 --------- .../icons/duotone-icons/Food/Ice-cream#1.svg | 11 --------- .../icons/duotone-icons/Food/Ice-cream#2.svg | 11 --------- .../icons/duotone-icons/Food/Miso-soup.svg | 11 --------- docs/src/icons/duotone-icons/Food/Orange.svg | 11 --------- docs/src/icons/duotone-icons/Food/Pizza.svg | 13 ----------- docs/src/icons/duotone-icons/Food/Sushi.svg | 11 --------- .../icons/duotone-icons/Food/Two-bottles.svg | 11 --------- docs/src/icons/duotone-icons/Food/Wine.svg | 11 --------- .../duotone-icons/General/Attachment#1.svg | 11 --------- .../duotone-icons/General/Attachment#2.svg | 13 ----------- .../icons/duotone-icons/General/Binocular.svg | 10 -------- .../icons/duotone-icons/General/Bookmark.svg | 10 -------- docs/src/icons/duotone-icons/General/Clip.svg | 10 -------- .../icons/duotone-icons/General/Clipboard.svg | 13 ----------- .../icons/duotone-icons/General/Cursor.svg | 10 -------- .../icons/duotone-icons/General/Dislike.svg | 11 --------- .../icons/duotone-icons/General/Duplicate.svg | 11 --------- docs/src/icons/duotone-icons/General/Edit.svg | 10 -------- .../duotone-icons/General/Expand-arrows.svg | 11 --------- docs/src/icons/duotone-icons/General/Fire.svg | 10 -------- .../icons/duotone-icons/General/Folder.svg | 10 -------- .../duotone-icons/General/Half-heart.svg | 11 --------- .../icons/duotone-icons/General/Half-star.svg | 11 --------- .../src/icons/duotone-icons/General/Heart.svg | 10 -------- .../icons/duotone-icons/General/Hidden.svg | 12 ---------- docs/src/icons/duotone-icons/General/Like.svg | 11 --------- docs/src/icons/duotone-icons/General/Lock.svg | 16 ------------- .../duotone-icons/General/Notification#2.svg | 11 --------- .../duotone-icons/General/Notifications#1.svg | 10 -------- .../icons/duotone-icons/General/Other#1.svg | 12 ---------- .../icons/duotone-icons/General/Other#2.svg | 12 ---------- docs/src/icons/duotone-icons/General/Sad.svg | 11 --------- docs/src/icons/duotone-icons/General/Save.svg | 11 --------- .../src/icons/duotone-icons/General/Scale.svg | 11 --------- .../icons/duotone-icons/General/Scissors.svg | 11 --------- .../icons/duotone-icons/General/Search.svg | 11 --------- .../duotone-icons/General/Settings#3.svg | 11 --------- .../General/Settings-1.inline.svg | 11 --------- .../duotone-icons/General/Settings-2.svg | 10 -------- .../duotone-icons/General/Shield-check.svg | 11 --------- .../duotone-icons/General/Shield-disabled.svg | 11 --------- .../General/Shield-protected.svg | 11 --------- docs/src/icons/duotone-icons/General/Size.svg | 11 --------- .../src/icons/duotone-icons/General/Smile.svg | 11 --------- docs/src/icons/duotone-icons/General/Star.svg | 10 -------- .../duotone-icons/General/Thunder-move.svg | 11 --------- .../icons/duotone-icons/General/Thunder.svg | 10 -------- .../src/icons/duotone-icons/General/Trash.svg | 11 --------- .../icons/duotone-icons/General/Unlock.svg | 16 ------------- .../icons/duotone-icons/General/Update.svg | 10 -------- docs/src/icons/duotone-icons/General/User.svg | 11 --------- .../icons/duotone-icons/General/Visible.svg | 11 --------- .../icons/duotone-icons/Home/Air-ballon.svg | 11 --------- .../icons/duotone-icons/Home/Alarm-clock.svg | 12 ---------- .../src/icons/duotone-icons/Home/Armchair.svg | 11 --------- .../icons/duotone-icons/Home/Bag-chair.svg | 11 --------- docs/src/icons/duotone-icons/Home/Bath.svg | 11 --------- docs/src/icons/duotone-icons/Home/Bed.svg | 12 ---------- .../icons/duotone-icons/Home/Book-open.svg | 11 --------- docs/src/icons/duotone-icons/Home/Book.svg | 16 ------------- docs/src/icons/duotone-icons/Home/Box.svg | 11 --------- docs/src/icons/duotone-icons/Home/Broom.svg | 11 --------- .../src/icons/duotone-icons/Home/Building.svg | 12 ---------- docs/src/icons/duotone-icons/Home/Bulb#1.svg | 13 ----------- docs/src/icons/duotone-icons/Home/Bulb#2.svg | 13 ----------- docs/src/icons/duotone-icons/Home/Chair#1.svg | 11 --------- docs/src/icons/duotone-icons/Home/Chair#2.svg | 11 --------- docs/src/icons/duotone-icons/Home/Clock.svg | 11 --------- .../icons/duotone-icons/Home/Commode#1.svg | 11 --------- .../icons/duotone-icons/Home/Commode#2.svg | 11 --------- docs/src/icons/duotone-icons/Home/Couch.svg | 11 --------- .../src/icons/duotone-icons/Home/Cupboard.svg | 11 --------- .../src/icons/duotone-icons/Home/Curtains.svg | 11 --------- docs/src/icons/duotone-icons/Home/Deer.svg | 11 --------- .../icons/duotone-icons/Home/Door-open.svg | 11 --------- docs/src/icons/duotone-icons/Home/Earth.svg | 11 --------- .../icons/duotone-icons/Home/Fireplace.svg | 11 --------- .../icons/duotone-icons/Home/Flashlight.svg | 11 --------- .../src/icons/duotone-icons/Home/Flower#1.svg | 13 ----------- .../src/icons/duotone-icons/Home/Flower#2.svg | 14 ----------- .../src/icons/duotone-icons/Home/Flower#3.svg | 12 ---------- docs/src/icons/duotone-icons/Home/Globe.svg | 11 --------- .../icons/duotone-icons/Home/Home-heart.svg | 11 --------- docs/src/icons/duotone-icons/Home/Home.svg | 10 -------- docs/src/icons/duotone-icons/Home/Key.svg | 11 --------- docs/src/icons/duotone-icons/Home/Ladder.svg | 11 --------- docs/src/icons/duotone-icons/Home/Lamp#1.svg | 13 ----------- docs/src/icons/duotone-icons/Home/Lamp#2.svg | 12 ---------- docs/src/icons/duotone-icons/Home/Library.svg | 11 --------- docs/src/icons/duotone-icons/Home/Mailbox.svg | 11 --------- docs/src/icons/duotone-icons/Home/Mirror.svg | 11 --------- docs/src/icons/duotone-icons/Home/Picture.svg | 13 ----------- docs/src/icons/duotone-icons/Home/Ruller.svg | 10 -------- docs/src/icons/duotone-icons/Home/Stairs.svg | 10 -------- docs/src/icons/duotone-icons/Home/Timer.svg | 13 ----------- docs/src/icons/duotone-icons/Home/Toilet.svg | 11 --------- docs/src/icons/duotone-icons/Home/Towel.svg | 10 -------- docs/src/icons/duotone-icons/Home/Trash.svg | 11 --------- .../icons/duotone-icons/Home/Water-mixer.svg | 12 ---------- .../src/icons/duotone-icons/Home/Weight#1.svg | 11 --------- .../src/icons/duotone-icons/Home/Weight#2.svg | 11 --------- docs/src/icons/duotone-icons/Home/Wood#1.svg | 11 --------- docs/src/icons/duotone-icons/Home/Wood#2.svg | 11 --------- .../icons/duotone-icons/Home/Wood-horse.svg | 10 -------- .../icons/duotone-icons/Layout/Layout-3d.svg | 11 --------- .../duotone-icons/Layout/Layout-4-blocks.svg | 11 --------- .../Layout/Layout-arrange.inline.svg | 11 --------- .../duotone-icons/Layout/Layout-grid.svg | 11 --------- .../Layout/Layout-horizontal.svg | 11 --------- .../Layout/Layout-left-panel-1.svg | 11 --------- .../Layout/Layout-left-panel-2.svg | 11 --------- .../Layout/Layout-right-panel-1.svg | 11 --------- .../Layout/Layout-right-panel-2.svg | 11 --------- .../Layout/Layout-top-panel-1.svg | 11 --------- .../Layout/Layout-top-panel-2.svg | 11 --------- .../Layout/Layout-top-panel-3.svg | 11 --------- .../Layout/Layout-top-panel-4.svg | 11 --------- .../Layout/Layout-top-panel-5.svg | 11 --------- .../Layout/Layout-top-panel-6.svg | 11 --------- .../duotone-icons/Layout/Layout-vertical.svg | 11 --------- docs/src/icons/duotone-icons/Map/Compass.svg | 10 -------- .../icons/duotone-icons/Map/Direction#1.svg | 10 -------- .../icons/duotone-icons/Map/Direction#2.svg | 10 -------- .../duotone-icons/Map/Location-arrow.svg | 10 -------- docs/src/icons/duotone-icons/Map/Marker#1.svg | 10 -------- docs/src/icons/duotone-icons/Map/Marker#2.svg | 10 -------- docs/src/icons/duotone-icons/Map/Position.svg | 12 ---------- .../icons/duotone-icons/Media/Add-music.svg | 11 --------- .../duotone-icons/Media/Airplay-video.svg | 11 --------- .../src/icons/duotone-icons/Media/Airplay.svg | 11 --------- docs/src/icons/duotone-icons/Media/Back.svg | 11 --------- .../icons/duotone-icons/Media/Backward.svg | 11 --------- docs/src/icons/duotone-icons/Media/CD.svg | 11 --------- docs/src/icons/duotone-icons/Media/DVD.svg | 11 --------- docs/src/icons/duotone-icons/Media/Eject.svg | 11 --------- .../icons/duotone-icons/Media/Equalizer.svg | 13 ----------- .../src/icons/duotone-icons/Media/Forward.svg | 11 --------- .../duotone-icons/Media/Media-library#1.svg | 13 ----------- .../duotone-icons/Media/Media-library#2.svg | 11 --------- .../duotone-icons/Media/Media-library#3.svg | 17 -------------- .../duotone-icons/Media/Movie-Lane #2.svg | 11 --------- .../duotone-icons/Media/Movie-lane#1.svg | 11 --------- .../icons/duotone-icons/Media/Music-cloud.svg | 11 --------- .../icons/duotone-icons/Media/Music-note.svg | 10 -------- docs/src/icons/duotone-icons/Media/Music.svg | 10 -------- docs/src/icons/duotone-icons/Media/Mute.svg | 11 --------- docs/src/icons/duotone-icons/Media/Next.svg | 11 --------- docs/src/icons/duotone-icons/Media/Pause.svg | 10 -------- docs/src/icons/duotone-icons/Media/Play.svg | 10 -------- .../icons/duotone-icons/Media/Playlist#1.svg | 11 --------- .../icons/duotone-icons/Media/Playlist#2.svg | 11 --------- docs/src/icons/duotone-icons/Media/Rec.svg | 10 -------- .../icons/duotone-icons/Media/Repeat-one.svg | 11 --------- docs/src/icons/duotone-icons/Media/Repeat.svg | 11 --------- .../src/icons/duotone-icons/Media/Shuffle.svg | 11 --------- .../icons/duotone-icons/Media/Volume-down.svg | 11 --------- .../icons/duotone-icons/Media/Volume-full.svg | 11 --------- .../icons/duotone-icons/Media/Volume-half.svg | 11 --------- .../icons/duotone-icons/Media/Volume-up.svg | 11 --------- docs/src/icons/duotone-icons/Media/Vynil.svg | 11 --------- .../src/icons/duotone-icons/Media/Youtube.svg | 11 --------- .../Navigation/Angle-double-down.svg | 11 --------- .../Navigation/Angle-double-left.svg | 11 --------- .../Navigation/Angle-double-right.svg | 11 --------- .../Navigation/Angle-double-up.svg | 11 --------- .../duotone-icons/Navigation/Angle-down.svg | 10 -------- .../duotone-icons/Navigation/Angle-left.svg | 10 -------- .../duotone-icons/Navigation/Angle-right.svg | 10 -------- .../duotone-icons/Navigation/Angle-up.svg | 10 -------- .../duotone-icons/Navigation/Arrow-down.svg | 11 --------- .../Navigation/Arrow-from-bottom.svg | 12 ---------- .../Navigation/Arrow-from-left.svg | 12 ---------- .../Navigation/Arrow-from-right.svg | 12 ---------- .../Navigation/Arrow-from-top.svg | 12 ---------- .../duotone-icons/Navigation/Arrow-left.svg | 11 --------- .../duotone-icons/Navigation/Arrow-right.svg | 11 --------- .../Navigation/Arrow-to-bottom.svg | 12 ---------- .../Navigation/Arrow-to-left.svg | 12 ---------- .../Navigation/Arrow-to-right.svg | 12 ---------- .../duotone-icons/Navigation/Arrow-to-up.svg | 12 ---------- .../duotone-icons/Navigation/Arrow-up.svg | 11 --------- .../duotone-icons/Navigation/Arrows-h.svg | 12 ---------- .../duotone-icons/Navigation/Arrows-v.svg | 12 ---------- .../icons/duotone-icons/Navigation/Check.svg | 10 -------- .../icons/duotone-icons/Navigation/Close.svg | 12 ---------- .../duotone-icons/Navigation/Double-check.svg | 11 --------- .../icons/duotone-icons/Navigation/Down-2.svg | 11 --------- .../duotone-icons/Navigation/Down-left.svg | 11 --------- .../duotone-icons/Navigation/Down-right.svg | 11 --------- .../duotone-icons/Navigation/Exchange.svg | 13 ----------- .../icons/duotone-icons/Navigation/Left 3.svg | 11 --------- .../icons/duotone-icons/Navigation/Left-2.svg | 11 --------- .../icons/duotone-icons/Navigation/Minus.svg | 9 -------- .../icons/duotone-icons/Navigation/Plus.svg | 10 -------- .../duotone-icons/Navigation/Right 3.svg | 11 --------- .../duotone-icons/Navigation/Right-2.svg | 11 --------- .../icons/duotone-icons/Navigation/Route.svg | 12 ---------- .../duotone-icons/Navigation/Sign-in.svg | 12 ---------- .../duotone-icons/Navigation/Sign-out.svg | 12 ---------- .../icons/duotone-icons/Navigation/Up-2.svg | 11 --------- .../duotone-icons/Navigation/Up-down.svg | 13 ----------- .../duotone-icons/Navigation/Up-left.svg | 11 --------- .../duotone-icons/Navigation/Up-right.svg | 11 --------- .../duotone-icons/Navigation/Waiting.svg | 10 -------- docs/src/icons/duotone-icons/Shopping/ATM.svg | 11 --------- .../icons/duotone-icons/Shopping/Bag#1.svg | 11 --------- .../icons/duotone-icons/Shopping/Bag#2.svg | 11 --------- .../duotone-icons/Shopping/Barcode-read.svg | 11 --------- .../duotone-icons/Shopping/Barcode-scan.svg | 12 ---------- .../icons/duotone-icons/Shopping/Barcode.svg | 11 --------- .../icons/duotone-icons/Shopping/Bitcoin.svg | 12 ---------- .../icons/duotone-icons/Shopping/Box#1.svg | 11 --------- .../icons/duotone-icons/Shopping/Box#3.svg | 11 --------- .../duotone-icons/Shopping/Box2.inline.svg | 11 --------- .../duotone-icons/Shopping/Calculator.svg | 11 --------- .../icons/duotone-icons/Shopping/Cart#1.svg | 11 --------- .../icons/duotone-icons/Shopping/Cart#2.svg | 11 --------- .../icons/duotone-icons/Shopping/Cart#3.svg | 11 --------- .../duotone-icons/Shopping/Chart-bar#1.svg | 13 ----------- .../duotone-icons/Shopping/Chart-bar#2.svg | 13 ----------- .../duotone-icons/Shopping/Chart-bar#3.svg | 13 ----------- .../duotone-icons/Shopping/Chart-line#1.svg | 11 --------- .../duotone-icons/Shopping/Chart-line#2.svg | 11 --------- .../duotone-icons/Shopping/Chart-pie.svg | 11 --------- .../duotone-icons/Shopping/Credit-card.svg | 12 ---------- .../icons/duotone-icons/Shopping/Dollar.svg | 12 ---------- .../src/icons/duotone-icons/Shopping/Euro.svg | 11 --------- .../src/icons/duotone-icons/Shopping/Gift.svg | 11 --------- .../icons/duotone-icons/Shopping/Loader.svg | 12 ---------- docs/src/icons/duotone-icons/Shopping/MC.svg | 11 --------- .../icons/duotone-icons/Shopping/Money.svg | 11 --------- .../icons/duotone-icons/Shopping/Pound.svg | 11 --------- .../icons/duotone-icons/Shopping/Price #1.svg | 10 -------- .../icons/duotone-icons/Shopping/Price #2.svg | 14 ----------- .../icons/duotone-icons/Shopping/Rouble.svg | 11 --------- .../src/icons/duotone-icons/Shopping/Safe.svg | 11 --------- .../icons/duotone-icons/Shopping/Sale#1.svg | 11 --------- .../icons/duotone-icons/Shopping/Sale#2.svg | 13 ----------- .../icons/duotone-icons/Shopping/Settings.svg | 11 --------- .../icons/duotone-icons/Shopping/Sort#1.svg | 11 --------- .../icons/duotone-icons/Shopping/Sort#2.svg | 10 -------- .../icons/duotone-icons/Shopping/Sort#3.svg | 10 -------- .../icons/duotone-icons/Shopping/Ticket.svg | 10 -------- .../icons/duotone-icons/Shopping/Wallet#2.svg | 11 --------- .../icons/duotone-icons/Shopping/Wallet#3.svg | 12 ---------- .../icons/duotone-icons/Shopping/Wallet.svg | 12 ---------- .../icons/duotone-icons/Text/Align-auto.svg | 11 --------- .../icons/duotone-icons/Text/Align-center.svg | 11 --------- .../duotone-icons/Text/Align-justify.svg | 11 --------- .../icons/duotone-icons/Text/Align-left.svg | 12 ---------- .../icons/duotone-icons/Text/Align-right.svg | 11 --------- docs/src/icons/duotone-icons/Text/Article.svg | 11 --------- docs/src/icons/duotone-icons/Text/Bold.svg | 10 -------- .../icons/duotone-icons/Text/Bullet-list.svg | 11 --------- docs/src/icons/duotone-icons/Text/Code.svg | 10 -------- .../icons/duotone-icons/Text/Edit-text.svg | 11 --------- docs/src/icons/duotone-icons/Text/Filter.svg | 10 -------- docs/src/icons/duotone-icons/Text/Font.svg | 11 --------- docs/src/icons/duotone-icons/Text/H1.svg | 11 --------- docs/src/icons/duotone-icons/Text/H2.svg | 11 --------- docs/src/icons/duotone-icons/Text/Itallic.svg | 10 -------- docs/src/icons/duotone-icons/Text/Menu.svg | 11 --------- .../icons/duotone-icons/Text/Paragraph.svg | 10 -------- docs/src/icons/duotone-icons/Text/Quote#1.svg | 11 --------- docs/src/icons/duotone-icons/Text/Quote#2.svg | 11 --------- docs/src/icons/duotone-icons/Text/Redo.svg | 10 -------- .../duotone-icons/Text/Strikethrough.svg | 11 --------- .../icons/duotone-icons/Text/Text-height.svg | 11 --------- .../icons/duotone-icons/Text/Text-width.svg | 11 --------- docs/src/icons/duotone-icons/Text/Text.svg | 10 -------- .../icons/duotone-icons/Text/Underline.svg | 11 --------- docs/src/icons/duotone-icons/Text/Undo.svg | 10 -------- .../duotone-icons/Tools/Angle Grinder.svg | 12 ---------- docs/src/icons/duotone-icons/Tools/Axe.svg | 11 --------- docs/src/icons/duotone-icons/Tools/Brush.svg | 11 --------- .../src/icons/duotone-icons/Tools/Compass.svg | 11 --------- .../icons/duotone-icons/Tools/Hummer#2.svg | 12 ---------- docs/src/icons/duotone-icons/Tools/Hummer.svg | 11 --------- .../src/icons/duotone-icons/Tools/Pantone.svg | 12 ---------- .../icons/duotone-icons/Tools/Road-Cone.svg | 11 --------- docs/src/icons/duotone-icons/Tools/Roller.svg | 12 ---------- .../icons/duotone-icons/Tools/Roulette.svg | 11 --------- .../icons/duotone-icons/Tools/Screwdriver.svg | 11 --------- docs/src/icons/duotone-icons/Tools/Shovel.svg | 11 --------- .../src/icons/duotone-icons/Tools/Spatula.svg | 11 --------- .../icons/duotone-icons/Tools/Swiss-knife.svg | 11 --------- docs/src/icons/duotone-icons/Tools/Tools.svg | 11 --------- .../icons/duotone-icons/Weather/Celcium.svg | 11 --------- .../icons/duotone-icons/Weather/Cloud#1.svg | 10 -------- .../icons/duotone-icons/Weather/Cloud#2.svg | 11 --------- .../icons/duotone-icons/Weather/Cloud-fog.svg | 11 --------- .../icons/duotone-icons/Weather/Cloud-sun.svg | 11 --------- .../duotone-icons/Weather/Cloud-wind.svg | 12 ---------- .../duotone-icons/Weather/Cloudy-night.svg | 11 --------- .../icons/duotone-icons/Weather/Cloudy.svg | 11 --------- .../icons/duotone-icons/Weather/Day-rain.svg | 11 --------- .../duotone-icons/Weather/Fahrenheit.svg | 11 --------- docs/src/icons/duotone-icons/Weather/Fog.svg | 15 ------------ docs/src/icons/duotone-icons/Weather/Moon.svg | 10 -------- .../icons/duotone-icons/Weather/Night-fog.svg | 11 --------- .../duotone-icons/Weather/Night-rain.svg | 11 --------- .../icons/duotone-icons/Weather/Rain#1.svg | 11 --------- .../icons/duotone-icons/Weather/Rain#2.svg | 11 --------- .../icons/duotone-icons/Weather/Rain#5.svg | 11 --------- .../icons/duotone-icons/Weather/Rainbow.svg | 12 ---------- .../icons/duotone-icons/Weather/Snow#1.svg | 11 --------- .../icons/duotone-icons/Weather/Snow#2.svg | 11 --------- .../icons/duotone-icons/Weather/Snow#3.svg | 11 --------- docs/src/icons/duotone-icons/Weather/Snow.svg | 11 --------- .../src/icons/duotone-icons/Weather/Storm.svg | 11 --------- .../icons/duotone-icons/Weather/Sun-fog.svg | 11 --------- docs/src/icons/duotone-icons/Weather/Sun.svg | 11 --------- .../icons/duotone-icons/Weather/Suset#1.svg | 11 --------- .../icons/duotone-icons/Weather/Suset#2.svg | 11 --------- .../Weather/Temperature-empty.svg | 10 -------- .../Weather/Temperature-full.svg | 10 -------- .../Weather/Temperature-half.svg | 10 -------- .../duotone-icons/Weather/Thunder-night.svg | 11 --------- .../icons/duotone-icons/Weather/Thunder.svg | 11 --------- .../icons/duotone-icons/Weather/Umbrella.svg | 11 --------- docs/src/icons/duotone-icons/Weather/Wind.svg | 11 --------- docs/src/icons/social/facebook.svg | 10 -------- docs/src/icons/social/instagram.svg | 10 -------- docs/src/icons/social/pinterest.svg | 3 --- docs/src/icons/social/twitter.svg | 10 -------- 662 files changed, 7437 deletions(-) delete mode 100644 docs/src/icons/Archivers.inline.svg delete mode 100755 docs/src/icons/Bulb.inline.svg delete mode 100755 docs/src/icons/Chat.inline.svg delete mode 100644 docs/src/icons/Clipboard.inline.svg delete mode 100644 docs/src/icons/Cloudbreak.inline.svg delete mode 100644 docs/src/icons/Code.inline.svg delete mode 100755 docs/src/icons/Fire.inline.svg delete mode 100644 docs/src/icons/Gamepad.inline.svg delete mode 100644 docs/src/icons/Globe.inline.svg delete mode 100644 docs/src/icons/Gulfstream.inline.svg delete mode 100644 docs/src/icons/History.inline.svg delete mode 100644 docs/src/icons/Money.inline.svg delete mode 100644 docs/src/icons/Pipeline.inline.svg delete mode 100644 docs/src/icons/PoH.inline.svg delete mode 100644 docs/src/icons/Sealevel.inline.svg delete mode 100644 docs/src/icons/Tools.inline.svg delete mode 100644 docs/src/icons/Tower.inline.svg delete mode 100644 docs/src/icons/Turbine.inline.svg delete mode 100644 docs/src/icons/duotone-icons/Clothes/Brassiere.svg delete mode 100644 docs/src/icons/duotone-icons/Clothes/Briefcase.svg delete mode 100644 docs/src/icons/duotone-icons/Clothes/Cap.svg delete mode 100644 docs/src/icons/duotone-icons/Clothes/Crown.svg delete mode 100644 docs/src/icons/duotone-icons/Clothes/Dress.svg delete mode 100644 docs/src/icons/duotone-icons/Clothes/Hanger.svg delete mode 100644 docs/src/icons/duotone-icons/Clothes/Hat.svg delete mode 100644 docs/src/icons/duotone-icons/Clothes/Panties.svg delete mode 100644 docs/src/icons/duotone-icons/Clothes/Shirt.svg delete mode 100644 docs/src/icons/duotone-icons/Clothes/Shoes.svg delete mode 100644 docs/src/icons/duotone-icons/Clothes/Shorts.svg delete mode 100644 docs/src/icons/duotone-icons/Clothes/Sneakers.svg delete mode 100644 docs/src/icons/duotone-icons/Clothes/Socks.svg delete mode 100644 docs/src/icons/duotone-icons/Clothes/Sun-glasses.svg delete mode 100644 docs/src/icons/duotone-icons/Clothes/T-Shirt.svg delete mode 100644 docs/src/icons/duotone-icons/Clothes/Tie.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Backspace.svg delete mode 100644 docs/src/icons/duotone-icons/Code/CMD.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Code.inline.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Commit.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Compiling.inline.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Control.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Done-circle.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Error-circle.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Git#1.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Git#2.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Git#3.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Git#4.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Github.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Info-circle.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Left-circle.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Loading.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Lock-circle.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Lock-overturning.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Minus.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Option.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Plus.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Puzzle.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Question-circle.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Right-circle.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Settings#4.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Shift.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Spy.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Stop.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Terminal.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Thunder-circle.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Time-schedule.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Warning-1-circle.svg delete mode 100644 docs/src/icons/duotone-icons/Code/Warning-2.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Active-call.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Add-user.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Address-card.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Adress-book#1.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Adress-book#2.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Archive.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Call#1.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Call.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Chat#1.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Chat#2.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Chat#4.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Chat#5.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Chat#6.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Chat-check.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Chat-error.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Chat-locked.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Chat-smile.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Clipboard-check.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Clipboard-list.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Contact#1.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Delete-user.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Dial-numbers.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Flag.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Forward.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Group-chat.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Group.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Incoming-box.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Incoming-call.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Incoming-mail.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Mail-@.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Mail-attachment.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Mail-box.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Mail-error.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Mail-heart.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Mail-locked.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Mail-notification.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Mail-opened.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Mail-unocked.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Mail.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Missed-call.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Outgoing-box.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Outgoing-call.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Outgoing-mail.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/RSS.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Readed-mail.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Reply-all.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Reply.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Right.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Safe-chat.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Send.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Sending mail.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Sending.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Share.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Shield-thunder.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Shield-user.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Snoozed-mail.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Spam.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Thumbtack.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Urgent-mail.svg delete mode 100644 docs/src/icons/duotone-icons/Communication/Write.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Baking-glove.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Bowl.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Chef.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Cooking-book.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Cooking-pot.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Cutting board.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Dinner.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Dish.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Dishes.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Fork-spoon-knife.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Fork-spoon.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Fork.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Frying-pan.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Grater.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Kitchen-scale.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Knife#1.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Knife#2.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Knife&fork#1.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Knife&fork#2.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Ladle.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Rolling-pin.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Saucepan.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Shovel.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Sieve.svg delete mode 100644 docs/src/icons/duotone-icons/Cooking/Spoon.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Adjust.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Anchor-center-down.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Anchor-center-up.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Anchor-center.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Anchor-left-down.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Anchor-left-up.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Anchor-left.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Anchor-right-down.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Anchor-right-up.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Anchor-right.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Arrows.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Bezier-curve.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Border.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Brush.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Bucket.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Cap-1.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Cap-2.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Cap-3.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Circle.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Color-profile.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Color.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Component.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Crop.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Difference.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Edit.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Eraser.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Flatten.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Flip-horizontal.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Flip-vertical.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Horizontal.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Image.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Interselect.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Join-1.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Join-2.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Join-3.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Layers.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Line.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Magic.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Mask.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Patch.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Pen&ruller.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Pen-tool-vector.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Pencil.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Picker.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Pixels.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Polygon.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Position.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Rectangle.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Saturation.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Select.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Sketch.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Stamp.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Substract.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Target.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Triangle.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Union.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Vertical.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Zoom minus.svg delete mode 100644 docs/src/icons/duotone-icons/Design/Zoom plus.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Airpods.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Android.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Apple-Watch.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Battery-charging.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Battery-empty.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Battery-full.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Battery-half.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Bluetooth.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/CPU#1.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/CPU#2.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Camera.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Cardboard-vr.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Cassete.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Diagnostics.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Display#1.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Display#2.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Display#3.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Gameboy.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Gamepad#1.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Gamepad#2.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Generator.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Hard-drive.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Headphones.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Homepod.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Keyboard.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/LTE#1.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/LTE#2.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Laptop-macbook.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Laptop.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Mic.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Midi.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Mouse.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Phone.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Printer.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Radio.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Router#1.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Router#2.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/SD-card.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Server.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Speaker.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/TV#1.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/TV#2.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Tablet.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/USB.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Usb-storage.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Video-camera.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Watch#1.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Watch#2.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/Wi-fi.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/iMac.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/iPhone-X.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/iPhone-back.svg delete mode 100644 docs/src/icons/duotone-icons/Devices/iPhone-x-back.svg delete mode 100644 docs/src/icons/duotone-icons/Electric/Air-conditioning.svg delete mode 100644 docs/src/icons/duotone-icons/Electric/Blender.svg delete mode 100644 docs/src/icons/duotone-icons/Electric/Fan.svg delete mode 100644 docs/src/icons/duotone-icons/Electric/Fridge.svg delete mode 100644 docs/src/icons/duotone-icons/Electric/Gas-stove.svg delete mode 100644 docs/src/icons/duotone-icons/Electric/Hair-dryer.svg delete mode 100644 docs/src/icons/duotone-icons/Electric/Highvoltage.svg delete mode 100644 docs/src/icons/duotone-icons/Electric/Iron.svg delete mode 100644 docs/src/icons/duotone-icons/Electric/Kettle.svg delete mode 100644 docs/src/icons/duotone-icons/Electric/Mixer.svg delete mode 100644 docs/src/icons/duotone-icons/Electric/Outlet.svg delete mode 100644 docs/src/icons/duotone-icons/Electric/Range-hood.svg delete mode 100644 docs/src/icons/duotone-icons/Electric/Shutdown.svg delete mode 100644 docs/src/icons/duotone-icons/Electric/Socket-eu.svg delete mode 100644 docs/src/icons/duotone-icons/Electric/Socket-us.svg delete mode 100644 docs/src/icons/duotone-icons/Electric/Washer.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Cloud-download.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Cloud-upload.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Compilation.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Compiled-file.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Deleted-file.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Deleted-folder.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Download.inline.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Downloaded file.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Downloads-folder.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Export.svg delete mode 100644 docs/src/icons/duotone-icons/Files/File-cloud.svg delete mode 100644 docs/src/icons/duotone-icons/Files/File-done.svg delete mode 100644 docs/src/icons/duotone-icons/Files/File-minus.svg delete mode 100644 docs/src/icons/duotone-icons/Files/File-plus.svg delete mode 100644 docs/src/icons/duotone-icons/Files/File.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Folder-check.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Folder-cloud.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Folder-error.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Folder-heart.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Folder-minus.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Folder-plus.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Folder-solid.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Folder-star.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Folder-thunder.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Folder.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Group-folders.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Import.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Locked-folder.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Media-folder.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Media.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Music.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Pictures#1.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Pictures#2.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Protected-file.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Selected-file.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Share.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Upload-folder.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Upload.svg delete mode 100644 docs/src/icons/duotone-icons/Files/Uploaded-file.svg delete mode 100644 docs/src/icons/duotone-icons/Files/User-folder.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Beer.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Bottle#1.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Bottle#2.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Bread.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Bucket.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Burger.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Cake.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Carrot.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Cheese.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Chicken.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Coffee#1.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Coffee#2.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Cookie.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Dinner.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Fish.svg delete mode 100644 docs/src/icons/duotone-icons/Food/French Bread.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Glass-martini.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Ice-cream#1.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Ice-cream#2.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Miso-soup.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Orange.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Pizza.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Sushi.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Two-bottles.svg delete mode 100644 docs/src/icons/duotone-icons/Food/Wine.svg delete mode 100644 docs/src/icons/duotone-icons/General/Attachment#1.svg delete mode 100644 docs/src/icons/duotone-icons/General/Attachment#2.svg delete mode 100644 docs/src/icons/duotone-icons/General/Binocular.svg delete mode 100644 docs/src/icons/duotone-icons/General/Bookmark.svg delete mode 100644 docs/src/icons/duotone-icons/General/Clip.svg delete mode 100644 docs/src/icons/duotone-icons/General/Clipboard.svg delete mode 100644 docs/src/icons/duotone-icons/General/Cursor.svg delete mode 100644 docs/src/icons/duotone-icons/General/Dislike.svg delete mode 100644 docs/src/icons/duotone-icons/General/Duplicate.svg delete mode 100644 docs/src/icons/duotone-icons/General/Edit.svg delete mode 100644 docs/src/icons/duotone-icons/General/Expand-arrows.svg delete mode 100644 docs/src/icons/duotone-icons/General/Fire.svg delete mode 100644 docs/src/icons/duotone-icons/General/Folder.svg delete mode 100644 docs/src/icons/duotone-icons/General/Half-heart.svg delete mode 100644 docs/src/icons/duotone-icons/General/Half-star.svg delete mode 100644 docs/src/icons/duotone-icons/General/Heart.svg delete mode 100644 docs/src/icons/duotone-icons/General/Hidden.svg delete mode 100644 docs/src/icons/duotone-icons/General/Like.svg delete mode 100644 docs/src/icons/duotone-icons/General/Lock.svg delete mode 100644 docs/src/icons/duotone-icons/General/Notification#2.svg delete mode 100644 docs/src/icons/duotone-icons/General/Notifications#1.svg delete mode 100644 docs/src/icons/duotone-icons/General/Other#1.svg delete mode 100644 docs/src/icons/duotone-icons/General/Other#2.svg delete mode 100644 docs/src/icons/duotone-icons/General/Sad.svg delete mode 100644 docs/src/icons/duotone-icons/General/Save.svg delete mode 100644 docs/src/icons/duotone-icons/General/Scale.svg delete mode 100644 docs/src/icons/duotone-icons/General/Scissors.svg delete mode 100644 docs/src/icons/duotone-icons/General/Search.svg delete mode 100644 docs/src/icons/duotone-icons/General/Settings#3.svg delete mode 100644 docs/src/icons/duotone-icons/General/Settings-1.inline.svg delete mode 100644 docs/src/icons/duotone-icons/General/Settings-2.svg delete mode 100644 docs/src/icons/duotone-icons/General/Shield-check.svg delete mode 100644 docs/src/icons/duotone-icons/General/Shield-disabled.svg delete mode 100644 docs/src/icons/duotone-icons/General/Shield-protected.svg delete mode 100644 docs/src/icons/duotone-icons/General/Size.svg delete mode 100644 docs/src/icons/duotone-icons/General/Smile.svg delete mode 100644 docs/src/icons/duotone-icons/General/Star.svg delete mode 100644 docs/src/icons/duotone-icons/General/Thunder-move.svg delete mode 100644 docs/src/icons/duotone-icons/General/Thunder.svg delete mode 100644 docs/src/icons/duotone-icons/General/Trash.svg delete mode 100644 docs/src/icons/duotone-icons/General/Unlock.svg delete mode 100644 docs/src/icons/duotone-icons/General/Update.svg delete mode 100644 docs/src/icons/duotone-icons/General/User.svg delete mode 100644 docs/src/icons/duotone-icons/General/Visible.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Air-ballon.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Alarm-clock.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Armchair.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Bag-chair.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Bath.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Bed.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Book-open.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Book.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Box.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Broom.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Building.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Bulb#1.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Bulb#2.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Chair#1.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Chair#2.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Clock.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Commode#1.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Commode#2.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Couch.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Cupboard.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Curtains.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Deer.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Door-open.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Earth.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Fireplace.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Flashlight.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Flower#1.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Flower#2.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Flower#3.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Globe.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Home-heart.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Home.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Key.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Ladder.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Lamp#1.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Lamp#2.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Library.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Mailbox.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Mirror.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Picture.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Ruller.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Stairs.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Timer.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Toilet.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Towel.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Trash.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Water-mixer.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Weight#1.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Weight#2.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Wood#1.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Wood#2.svg delete mode 100644 docs/src/icons/duotone-icons/Home/Wood-horse.svg delete mode 100644 docs/src/icons/duotone-icons/Layout/Layout-3d.svg delete mode 100644 docs/src/icons/duotone-icons/Layout/Layout-4-blocks.svg delete mode 100644 docs/src/icons/duotone-icons/Layout/Layout-arrange.inline.svg delete mode 100644 docs/src/icons/duotone-icons/Layout/Layout-grid.svg delete mode 100644 docs/src/icons/duotone-icons/Layout/Layout-horizontal.svg delete mode 100644 docs/src/icons/duotone-icons/Layout/Layout-left-panel-1.svg delete mode 100644 docs/src/icons/duotone-icons/Layout/Layout-left-panel-2.svg delete mode 100644 docs/src/icons/duotone-icons/Layout/Layout-right-panel-1.svg delete mode 100644 docs/src/icons/duotone-icons/Layout/Layout-right-panel-2.svg delete mode 100644 docs/src/icons/duotone-icons/Layout/Layout-top-panel-1.svg delete mode 100644 docs/src/icons/duotone-icons/Layout/Layout-top-panel-2.svg delete mode 100644 docs/src/icons/duotone-icons/Layout/Layout-top-panel-3.svg delete mode 100644 docs/src/icons/duotone-icons/Layout/Layout-top-panel-4.svg delete mode 100644 docs/src/icons/duotone-icons/Layout/Layout-top-panel-5.svg delete mode 100644 docs/src/icons/duotone-icons/Layout/Layout-top-panel-6.svg delete mode 100644 docs/src/icons/duotone-icons/Layout/Layout-vertical.svg delete mode 100644 docs/src/icons/duotone-icons/Map/Compass.svg delete mode 100644 docs/src/icons/duotone-icons/Map/Direction#1.svg delete mode 100644 docs/src/icons/duotone-icons/Map/Direction#2.svg delete mode 100644 docs/src/icons/duotone-icons/Map/Location-arrow.svg delete mode 100644 docs/src/icons/duotone-icons/Map/Marker#1.svg delete mode 100644 docs/src/icons/duotone-icons/Map/Marker#2.svg delete mode 100644 docs/src/icons/duotone-icons/Map/Position.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Add-music.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Airplay-video.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Airplay.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Back.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Backward.svg delete mode 100644 docs/src/icons/duotone-icons/Media/CD.svg delete mode 100644 docs/src/icons/duotone-icons/Media/DVD.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Eject.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Equalizer.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Forward.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Media-library#1.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Media-library#2.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Media-library#3.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Movie-Lane #2.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Movie-lane#1.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Music-cloud.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Music-note.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Music.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Mute.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Next.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Pause.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Play.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Playlist#1.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Playlist#2.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Rec.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Repeat-one.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Repeat.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Shuffle.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Volume-down.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Volume-full.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Volume-half.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Volume-up.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Vynil.svg delete mode 100644 docs/src/icons/duotone-icons/Media/Youtube.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Angle-double-down.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Angle-double-left.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Angle-double-right.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Angle-double-up.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Angle-down.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Angle-left.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Angle-right.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Angle-up.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Arrow-down.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Arrow-from-bottom.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Arrow-from-left.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Arrow-from-right.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Arrow-from-top.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Arrow-left.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Arrow-right.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Arrow-to-bottom.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Arrow-to-left.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Arrow-to-right.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Arrow-to-up.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Arrow-up.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Arrows-h.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Arrows-v.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Check.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Close.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Double-check.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Down-2.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Down-left.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Down-right.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Exchange.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Left 3.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Left-2.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Minus.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Plus.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Right 3.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Right-2.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Route.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Sign-in.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Sign-out.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Up-2.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Up-down.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Up-left.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Up-right.svg delete mode 100644 docs/src/icons/duotone-icons/Navigation/Waiting.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/ATM.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Bag#1.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Bag#2.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Barcode-read.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Barcode-scan.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Barcode.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Bitcoin.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Box#1.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Box#3.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Box2.inline.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Calculator.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Cart#1.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Cart#2.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Cart#3.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Chart-bar#1.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Chart-bar#2.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Chart-bar#3.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Chart-line#1.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Chart-line#2.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Chart-pie.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Credit-card.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Dollar.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Euro.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Gift.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Loader.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/MC.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Money.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Pound.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Price #1.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Price #2.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Rouble.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Safe.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Sale#1.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Sale#2.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Settings.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Sort#1.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Sort#2.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Sort#3.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Ticket.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Wallet#2.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Wallet#3.svg delete mode 100644 docs/src/icons/duotone-icons/Shopping/Wallet.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Align-auto.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Align-center.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Align-justify.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Align-left.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Align-right.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Article.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Bold.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Bullet-list.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Code.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Edit-text.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Filter.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Font.svg delete mode 100644 docs/src/icons/duotone-icons/Text/H1.svg delete mode 100644 docs/src/icons/duotone-icons/Text/H2.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Itallic.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Menu.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Paragraph.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Quote#1.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Quote#2.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Redo.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Strikethrough.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Text-height.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Text-width.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Text.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Underline.svg delete mode 100644 docs/src/icons/duotone-icons/Text/Undo.svg delete mode 100644 docs/src/icons/duotone-icons/Tools/Angle Grinder.svg delete mode 100644 docs/src/icons/duotone-icons/Tools/Axe.svg delete mode 100644 docs/src/icons/duotone-icons/Tools/Brush.svg delete mode 100644 docs/src/icons/duotone-icons/Tools/Compass.svg delete mode 100644 docs/src/icons/duotone-icons/Tools/Hummer#2.svg delete mode 100644 docs/src/icons/duotone-icons/Tools/Hummer.svg delete mode 100644 docs/src/icons/duotone-icons/Tools/Pantone.svg delete mode 100644 docs/src/icons/duotone-icons/Tools/Road-Cone.svg delete mode 100644 docs/src/icons/duotone-icons/Tools/Roller.svg delete mode 100644 docs/src/icons/duotone-icons/Tools/Roulette.svg delete mode 100644 docs/src/icons/duotone-icons/Tools/Screwdriver.svg delete mode 100644 docs/src/icons/duotone-icons/Tools/Shovel.svg delete mode 100644 docs/src/icons/duotone-icons/Tools/Spatula.svg delete mode 100644 docs/src/icons/duotone-icons/Tools/Swiss-knife.svg delete mode 100644 docs/src/icons/duotone-icons/Tools/Tools.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Celcium.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Cloud#1.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Cloud#2.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Cloud-fog.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Cloud-sun.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Cloud-wind.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Cloudy-night.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Cloudy.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Day-rain.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Fahrenheit.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Fog.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Moon.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Night-fog.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Night-rain.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Rain#1.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Rain#2.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Rain#5.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Rainbow.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Snow#1.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Snow#2.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Snow#3.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Snow.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Storm.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Sun-fog.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Sun.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Suset#1.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Suset#2.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Temperature-empty.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Temperature-full.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Temperature-half.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Thunder-night.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Thunder.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Umbrella.svg delete mode 100644 docs/src/icons/duotone-icons/Weather/Wind.svg delete mode 100644 docs/src/icons/social/facebook.svg delete mode 100644 docs/src/icons/social/instagram.svg delete mode 100644 docs/src/icons/social/pinterest.svg delete mode 100644 docs/src/icons/social/twitter.svg diff --git a/docs/src/icons/Archivers.inline.svg b/docs/src/icons/Archivers.inline.svg deleted file mode 100644 index 81f60545aa7205..00000000000000 --- a/docs/src/icons/Archivers.inline.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Tools / Shovel - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/Bulb.inline.svg b/docs/src/icons/Bulb.inline.svg deleted file mode 100755 index d31dea745e8a2b..00000000000000 --- a/docs/src/icons/Bulb.inline.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Home / Bulb#2 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/Chat.inline.svg b/docs/src/icons/Chat.inline.svg deleted file mode 100755 index 0130933e022557..00000000000000 --- a/docs/src/icons/Chat.inline.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Communication / Chat#4 - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/Clipboard.inline.svg b/docs/src/icons/Clipboard.inline.svg deleted file mode 100644 index b3128284c75505..00000000000000 --- a/docs/src/icons/Clipboard.inline.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / General / Clipboard - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/Cloudbreak.inline.svg b/docs/src/icons/Cloudbreak.inline.svg deleted file mode 100644 index 66e5d5a74c29f3..00000000000000 --- a/docs/src/icons/Cloudbreak.inline.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Cloud-sun - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/Code.inline.svg b/docs/src/icons/Code.inline.svg deleted file mode 100644 index 4122ae435c9017..00000000000000 --- a/docs/src/icons/Code.inline.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Code - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/Fire.inline.svg b/docs/src/icons/Fire.inline.svg deleted file mode 100755 index 0d53792dfb19a6..00000000000000 --- a/docs/src/icons/Fire.inline.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / General / Fire - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/Gamepad.inline.svg b/docs/src/icons/Gamepad.inline.svg deleted file mode 100644 index 2bde1e19ed59dc..00000000000000 --- a/docs/src/icons/Gamepad.inline.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Gamepad#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/Globe.inline.svg b/docs/src/icons/Globe.inline.svg deleted file mode 100644 index 325aae1c324ff7..00000000000000 --- a/docs/src/icons/Globe.inline.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/docs/src/icons/Gulfstream.inline.svg b/docs/src/icons/Gulfstream.inline.svg deleted file mode 100644 index f9a178f93be8ed..00000000000000 --- a/docs/src/icons/Gulfstream.inline.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Wind - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/History.inline.svg b/docs/src/icons/History.inline.svg deleted file mode 100644 index 00572b37fb55f5..00000000000000 --- a/docs/src/icons/History.inline.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Time-schedule - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/Money.inline.svg b/docs/src/icons/Money.inline.svg deleted file mode 100644 index 067f79763c0345..00000000000000 --- a/docs/src/icons/Money.inline.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Money - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/Pipeline.inline.svg b/docs/src/icons/Pipeline.inline.svg deleted file mode 100644 index 9129b0695b419c..00000000000000 --- a/docs/src/icons/Pipeline.inline.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - Stockholm-icons / Weather / Fog - Created with Sketch. - - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/PoH.inline.svg b/docs/src/icons/PoH.inline.svg deleted file mode 100644 index 961f298fe36b2b..00000000000000 --- a/docs/src/icons/PoH.inline.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Clock - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/Sealevel.inline.svg b/docs/src/icons/Sealevel.inline.svg deleted file mode 100644 index ba575e3ae21120..00000000000000 --- a/docs/src/icons/Sealevel.inline.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Suset#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/Tools.inline.svg b/docs/src/icons/Tools.inline.svg deleted file mode 100644 index e155753427b106..00000000000000 --- a/docs/src/icons/Tools.inline.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Tools / Tools - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/Tower.inline.svg b/docs/src/icons/Tower.inline.svg deleted file mode 100644 index 20300316ea0bd9..00000000000000 --- a/docs/src/icons/Tower.inline.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / LTE#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/Turbine.inline.svg b/docs/src/icons/Turbine.inline.svg deleted file mode 100644 index 0ccb1c20c3fdbb..00000000000000 --- a/docs/src/icons/Turbine.inline.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Electric / Fan - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Clothes/Brassiere.svg b/docs/src/icons/duotone-icons/Clothes/Brassiere.svg deleted file mode 100644 index 00f8a5edcc4f2b..00000000000000 --- a/docs/src/icons/duotone-icons/Clothes/Brassiere.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Clothes / Brassiere - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Clothes/Briefcase.svg b/docs/src/icons/duotone-icons/Clothes/Briefcase.svg deleted file mode 100644 index 9b9bd8e8b2be0c..00000000000000 --- a/docs/src/icons/duotone-icons/Clothes/Briefcase.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Clothes / Briefcase - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Clothes/Cap.svg b/docs/src/icons/duotone-icons/Clothes/Cap.svg deleted file mode 100644 index b6eee169db15b8..00000000000000 --- a/docs/src/icons/duotone-icons/Clothes/Cap.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Clothes / Cap - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Clothes/Crown.svg b/docs/src/icons/duotone-icons/Clothes/Crown.svg deleted file mode 100644 index c9de1151f0eb48..00000000000000 --- a/docs/src/icons/duotone-icons/Clothes/Crown.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Clothes / Crown - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Clothes/Dress.svg b/docs/src/icons/duotone-icons/Clothes/Dress.svg deleted file mode 100644 index f1d5576057c277..00000000000000 --- a/docs/src/icons/duotone-icons/Clothes/Dress.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Clothes / Dress - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Clothes/Hanger.svg b/docs/src/icons/duotone-icons/Clothes/Hanger.svg deleted file mode 100644 index 89552533f89cb9..00000000000000 --- a/docs/src/icons/duotone-icons/Clothes/Hanger.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Clothes / Hanger - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Clothes/Hat.svg b/docs/src/icons/duotone-icons/Clothes/Hat.svg deleted file mode 100644 index b24782f870361f..00000000000000 --- a/docs/src/icons/duotone-icons/Clothes/Hat.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Clothes / Hat - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Clothes/Panties.svg b/docs/src/icons/duotone-icons/Clothes/Panties.svg deleted file mode 100644 index 5aa26b23d95350..00000000000000 --- a/docs/src/icons/duotone-icons/Clothes/Panties.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Clothes / Panties - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Clothes/Shirt.svg b/docs/src/icons/duotone-icons/Clothes/Shirt.svg deleted file mode 100644 index 6e891dcffaae76..00000000000000 --- a/docs/src/icons/duotone-icons/Clothes/Shirt.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Clothes / Shirt - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Clothes/Shoes.svg b/docs/src/icons/duotone-icons/Clothes/Shoes.svg deleted file mode 100644 index 9591cb203c25be..00000000000000 --- a/docs/src/icons/duotone-icons/Clothes/Shoes.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Clothes / Shoes - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Clothes/Shorts.svg b/docs/src/icons/duotone-icons/Clothes/Shorts.svg deleted file mode 100644 index 6c06284ca3f00a..00000000000000 --- a/docs/src/icons/duotone-icons/Clothes/Shorts.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Clothes / Shorts - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Clothes/Sneakers.svg b/docs/src/icons/duotone-icons/Clothes/Sneakers.svg deleted file mode 100644 index bc739b5fb12045..00000000000000 --- a/docs/src/icons/duotone-icons/Clothes/Sneakers.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Clothes / Sneakers - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Clothes/Socks.svg b/docs/src/icons/duotone-icons/Clothes/Socks.svg deleted file mode 100644 index a6d9f6e21b1fe8..00000000000000 --- a/docs/src/icons/duotone-icons/Clothes/Socks.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Clothes / Socks - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Clothes/Sun-glasses.svg b/docs/src/icons/duotone-icons/Clothes/Sun-glasses.svg deleted file mode 100644 index 78cfc6e5a6bd61..00000000000000 --- a/docs/src/icons/duotone-icons/Clothes/Sun-glasses.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Clothes / Sun-glasses - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Clothes/T-Shirt.svg b/docs/src/icons/duotone-icons/Clothes/T-Shirt.svg deleted file mode 100644 index 77cb35e9687b45..00000000000000 --- a/docs/src/icons/duotone-icons/Clothes/T-Shirt.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Clothes / T-Shirt - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Clothes/Tie.svg b/docs/src/icons/duotone-icons/Clothes/Tie.svg deleted file mode 100644 index 3b5e815af93890..00000000000000 --- a/docs/src/icons/duotone-icons/Clothes/Tie.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Clothes / Tie - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Backspace.svg b/docs/src/icons/duotone-icons/Code/Backspace.svg deleted file mode 100644 index fd53a328cb9c70..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Backspace.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Backspace - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/CMD.svg b/docs/src/icons/duotone-icons/Code/CMD.svg deleted file mode 100644 index c58f6447295987..00000000000000 --- a/docs/src/icons/duotone-icons/Code/CMD.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Code / CMD - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Code.inline.svg b/docs/src/icons/duotone-icons/Code/Code.inline.svg deleted file mode 100644 index 4122ae435c9017..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Code.inline.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Code - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Commit.svg b/docs/src/icons/duotone-icons/Code/Commit.svg deleted file mode 100644 index fc9bd2fc6a76ee..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Commit.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Commit - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Compiling.inline.svg b/docs/src/icons/duotone-icons/Code/Compiling.inline.svg deleted file mode 100644 index 71212f8bae0d70..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Compiling.inline.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Compiling - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Control.svg b/docs/src/icons/duotone-icons/Code/Control.svg deleted file mode 100644 index d646b653b8e7a1..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Control.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Code / Control - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Done-circle.svg b/docs/src/icons/duotone-icons/Code/Done-circle.svg deleted file mode 100644 index f29d1b79fe1992..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Done-circle.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Done-circle - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Error-circle.svg b/docs/src/icons/duotone-icons/Code/Error-circle.svg deleted file mode 100644 index 9c8026f0527bbd..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Error-circle.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Error-circle - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Git#1.svg b/docs/src/icons/duotone-icons/Code/Git#1.svg deleted file mode 100644 index 8aed6415a16820..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Git#1.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Code / Git#1 - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Git#2.svg b/docs/src/icons/duotone-icons/Code/Git#2.svg deleted file mode 100644 index 41b5c99eec1800..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Git#2.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - Stockholm-icons / Code / Git#2 - Created with Sketch. - - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Git#3.svg b/docs/src/icons/duotone-icons/Code/Git#3.svg deleted file mode 100644 index 3e66f10c7425bb..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Git#3.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Code / Git#3 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Git#4.svg b/docs/src/icons/duotone-icons/Code/Git#4.svg deleted file mode 100644 index f317f38ad215d6..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Git#4.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Code / Git#4 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Github.svg b/docs/src/icons/duotone-icons/Code/Github.svg deleted file mode 100644 index 1c184600080f72..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Github.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Github - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Info-circle.svg b/docs/src/icons/duotone-icons/Code/Info-circle.svg deleted file mode 100644 index 5115975656115d..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Info-circle.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Code / Info-circle - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Left-circle.svg b/docs/src/icons/duotone-icons/Code/Left-circle.svg deleted file mode 100644 index 54894b4e34b235..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Left-circle.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Left-circle - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Loading.svg b/docs/src/icons/duotone-icons/Code/Loading.svg deleted file mode 100644 index 25b5015b78ee8b..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Loading.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Code / Loading - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Lock-circle.svg b/docs/src/icons/duotone-icons/Code/Lock-circle.svg deleted file mode 100644 index 9c418376353d0e..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Lock-circle.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Lock-circle - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Lock-overturning.svg b/docs/src/icons/duotone-icons/Code/Lock-overturning.svg deleted file mode 100644 index d4affd232b320e..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Lock-overturning.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Lock-overturning - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Minus.svg b/docs/src/icons/duotone-icons/Code/Minus.svg deleted file mode 100644 index 7ac349268cde61..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Minus.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Minus - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Option.svg b/docs/src/icons/duotone-icons/Code/Option.svg deleted file mode 100644 index a8104929d047fb..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Option.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Option - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Plus.svg b/docs/src/icons/duotone-icons/Code/Plus.svg deleted file mode 100644 index a864ad86acb9c6..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Plus.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Plus - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Puzzle.svg b/docs/src/icons/duotone-icons/Code/Puzzle.svg deleted file mode 100644 index ec83ca6ce29720..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Puzzle.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Code / Puzzle - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Question-circle.svg b/docs/src/icons/duotone-icons/Code/Question-circle.svg deleted file mode 100644 index d0b13e1c3b92df..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Question-circle.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Question-circle - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Right-circle.svg b/docs/src/icons/duotone-icons/Code/Right-circle.svg deleted file mode 100644 index 6874f7981bc854..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Right-circle.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Right-circle - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Settings#4.svg b/docs/src/icons/duotone-icons/Code/Settings#4.svg deleted file mode 100644 index 48a3b4f831c0fd..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Settings#4.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Settings#4 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Shift.svg b/docs/src/icons/duotone-icons/Code/Shift.svg deleted file mode 100644 index 2a513e8fd4965b..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Shift.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Code / Shift - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Spy.svg b/docs/src/icons/duotone-icons/Code/Spy.svg deleted file mode 100644 index 0c0e6e61bacd07..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Spy.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Spy - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Stop.svg b/docs/src/icons/duotone-icons/Code/Stop.svg deleted file mode 100644 index ec3fae0cb4751d..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Stop.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Code / Stop - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Terminal.svg b/docs/src/icons/duotone-icons/Code/Terminal.svg deleted file mode 100644 index 94692a39d2b11b..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Terminal.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Terminal - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Thunder-circle.svg b/docs/src/icons/duotone-icons/Code/Thunder-circle.svg deleted file mode 100644 index 69e66df8525c78..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Thunder-circle.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Thunder-circle - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Time-schedule.svg b/docs/src/icons/duotone-icons/Code/Time-schedule.svg deleted file mode 100644 index 00572b37fb55f5..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Time-schedule.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Code / Time-schedule - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Warning-1-circle.svg b/docs/src/icons/duotone-icons/Code/Warning-1-circle.svg deleted file mode 100644 index 93939dcbecaf00..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Warning-1-circle.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Code / Warning-1-circle - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Code/Warning-2.svg b/docs/src/icons/duotone-icons/Code/Warning-2.svg deleted file mode 100644 index 7e1c781d3e5a41..00000000000000 --- a/docs/src/icons/duotone-icons/Code/Warning-2.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Code / Warning-2 - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Active-call.svg b/docs/src/icons/duotone-icons/Communication/Active-call.svg deleted file mode 100644 index 6e2ccfacd4765f..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Active-call.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Active-call - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Add-user.svg b/docs/src/icons/duotone-icons/Communication/Add-user.svg deleted file mode 100644 index bc5bd362c26202..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Add-user.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Add-user - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Address-card.svg b/docs/src/icons/duotone-icons/Communication/Address-card.svg deleted file mode 100644 index a231007e9b6af3..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Address-card.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Communication / Address-card - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Adress-book#1.svg b/docs/src/icons/duotone-icons/Communication/Adress-book#1.svg deleted file mode 100644 index 35499c9e90dbf0..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Adress-book#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Adress-book#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Adress-book#2.svg b/docs/src/icons/duotone-icons/Communication/Adress-book#2.svg deleted file mode 100644 index f5d5dcf84bb90a..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Adress-book#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Adress-book#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Archive.svg b/docs/src/icons/duotone-icons/Communication/Archive.svg deleted file mode 100644 index 28ee2368749c01..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Archive.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Communication / Archive - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Call#1.svg b/docs/src/icons/duotone-icons/Communication/Call#1.svg deleted file mode 100644 index a443afd860675e..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Call#1.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Communication / Call#1 - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Call.svg b/docs/src/icons/duotone-icons/Communication/Call.svg deleted file mode 100644 index e235d5af408118..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Call.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Communication / Call - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Chat#1.svg b/docs/src/icons/duotone-icons/Communication/Chat#1.svg deleted file mode 100644 index c8b8a596713b6f..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Chat#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Chat#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Chat#2.svg b/docs/src/icons/duotone-icons/Communication/Chat#2.svg deleted file mode 100644 index 92ba45ffbe73e2..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Chat#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Chat#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Chat#4.svg b/docs/src/icons/duotone-icons/Communication/Chat#4.svg deleted file mode 100644 index 361e0d285d6484..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Chat#4.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Communication / Chat#4 - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Chat#5.svg b/docs/src/icons/duotone-icons/Communication/Chat#5.svg deleted file mode 100644 index 80def23819ec44..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Chat#5.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Chat#5 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Chat#6.svg b/docs/src/icons/duotone-icons/Communication/Chat#6.svg deleted file mode 100644 index 49833a3a1bb3a9..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Chat#6.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Chat#6 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Chat-check.svg b/docs/src/icons/duotone-icons/Communication/Chat-check.svg deleted file mode 100644 index 1376c7ccb53cd5..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Chat-check.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Chat-check - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Chat-error.svg b/docs/src/icons/duotone-icons/Communication/Chat-error.svg deleted file mode 100644 index 7d6bcb9814a79f..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Chat-error.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Chat-error - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Chat-locked.svg b/docs/src/icons/duotone-icons/Communication/Chat-locked.svg deleted file mode 100644 index 5a16c3d7caa8d7..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Chat-locked.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Chat-locked - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Chat-smile.svg b/docs/src/icons/duotone-icons/Communication/Chat-smile.svg deleted file mode 100644 index e757c32e4d28ba..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Chat-smile.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Chat-smile - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Clipboard-check.svg b/docs/src/icons/duotone-icons/Communication/Clipboard-check.svg deleted file mode 100644 index 0dc8297ccbe792..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Clipboard-check.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Communication / Clipboard-check - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Clipboard-list.svg b/docs/src/icons/duotone-icons/Communication/Clipboard-list.svg deleted file mode 100644 index e39d03e80ec48e..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Clipboard-list.svg +++ /dev/null @@ -1,17 +0,0 @@ - - - - Stockholm-icons / Communication / Clipboard-list - Created with Sketch. - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Contact#1.svg b/docs/src/icons/duotone-icons/Communication/Contact#1.svg deleted file mode 100644 index 4cf2c5fed2ef3d..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Contact#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Contact#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Delete-user.svg b/docs/src/icons/duotone-icons/Communication/Delete-user.svg deleted file mode 100644 index 5c1b0709ac6a1b..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Delete-user.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Delete-user - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Dial-numbers.svg b/docs/src/icons/duotone-icons/Communication/Dial-numbers.svg deleted file mode 100644 index 96b1f1352904e5..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Dial-numbers.svg +++ /dev/null @@ -1,18 +0,0 @@ - - - - Stockholm-icons / Communication / Dial-numbers - Created with Sketch. - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Flag.svg b/docs/src/icons/duotone-icons/Communication/Flag.svg deleted file mode 100644 index 9e1b91fc13e580..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Flag.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Flag - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Forward.svg b/docs/src/icons/duotone-icons/Communication/Forward.svg deleted file mode 100644 index 693cfa47854086..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Forward.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Communication / Forward - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Group-chat.svg b/docs/src/icons/duotone-icons/Communication/Group-chat.svg deleted file mode 100644 index 3535848bb3d6ee..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Group-chat.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Group-chat - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Group.svg b/docs/src/icons/duotone-icons/Communication/Group.svg deleted file mode 100644 index 911bf455be8e27..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Group.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Group - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Incoming-box.svg b/docs/src/icons/duotone-icons/Communication/Incoming-box.svg deleted file mode 100644 index 5e52048f83a9b7..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Incoming-box.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Communication / Incoming-box - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Incoming-call.svg b/docs/src/icons/duotone-icons/Communication/Incoming-call.svg deleted file mode 100644 index fb359471c444c4..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Incoming-call.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Incoming-call - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Incoming-mail.svg b/docs/src/icons/duotone-icons/Communication/Incoming-mail.svg deleted file mode 100644 index e2b07afe82152f..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Incoming-mail.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Incoming-mail - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Mail-@.svg b/docs/src/icons/duotone-icons/Communication/Mail-@.svg deleted file mode 100644 index bd227eb0134754..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Mail-@.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Communication / Mail-@ - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Mail-attachment.svg b/docs/src/icons/duotone-icons/Communication/Mail-attachment.svg deleted file mode 100644 index ab8f7c41c43e4c..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Mail-attachment.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Mail-attachment - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Mail-box.svg b/docs/src/icons/duotone-icons/Communication/Mail-box.svg deleted file mode 100644 index 084f4d7409441f..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Mail-box.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Mail-box - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Mail-error.svg b/docs/src/icons/duotone-icons/Communication/Mail-error.svg deleted file mode 100644 index 3b8b990de4ba19..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Mail-error.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Mail-error - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Mail-heart.svg b/docs/src/icons/duotone-icons/Communication/Mail-heart.svg deleted file mode 100644 index 899163c31e91b1..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Mail-heart.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Mail-heart - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Mail-locked.svg b/docs/src/icons/duotone-icons/Communication/Mail-locked.svg deleted file mode 100644 index 1bc317acddb9d8..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Mail-locked.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Mail-locked - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Mail-notification.svg b/docs/src/icons/duotone-icons/Communication/Mail-notification.svg deleted file mode 100644 index 0117f08299c80d..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Mail-notification.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Mail-notification - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Mail-opened.svg b/docs/src/icons/duotone-icons/Communication/Mail-opened.svg deleted file mode 100644 index 96b317462f4d86..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Mail-opened.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Mail-opened - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Mail-unocked.svg b/docs/src/icons/duotone-icons/Communication/Mail-unocked.svg deleted file mode 100644 index 4d4194dc236541..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Mail-unocked.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Mail-unocked - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Mail.svg b/docs/src/icons/duotone-icons/Communication/Mail.svg deleted file mode 100644 index 41083245ff6a45..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Mail.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Communication / Mail - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Missed-call.svg b/docs/src/icons/duotone-icons/Communication/Missed-call.svg deleted file mode 100644 index 1eb62873748488..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Missed-call.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Missed-call - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Outgoing-box.svg b/docs/src/icons/duotone-icons/Communication/Outgoing-box.svg deleted file mode 100644 index 945f6b3abac6de..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Outgoing-box.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Communication / Outgoing-box - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Outgoing-call.svg b/docs/src/icons/duotone-icons/Communication/Outgoing-call.svg deleted file mode 100644 index ff64737d33e4f3..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Outgoing-call.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Outgoing-call - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Outgoing-mail.svg b/docs/src/icons/duotone-icons/Communication/Outgoing-mail.svg deleted file mode 100644 index c0fc7c4c3997ae..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Outgoing-mail.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Outgoing-mail - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/RSS.svg b/docs/src/icons/duotone-icons/Communication/RSS.svg deleted file mode 100644 index 3e8596dc0899f6..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/RSS.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Communication / RSS - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Readed-mail.svg b/docs/src/icons/duotone-icons/Communication/Readed-mail.svg deleted file mode 100644 index 9b0fd39bd4a0ab..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Readed-mail.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Readed-mail - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Reply-all.svg b/docs/src/icons/duotone-icons/Communication/Reply-all.svg deleted file mode 100644 index 371022743ee56e..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Reply-all.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Reply-all - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Reply.svg b/docs/src/icons/duotone-icons/Communication/Reply.svg deleted file mode 100644 index 59dc333a432d71..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Reply.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Communication / Reply - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Right.svg b/docs/src/icons/duotone-icons/Communication/Right.svg deleted file mode 100644 index ced9b74f5e0177..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Right.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Communication / Right - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Safe-chat.svg b/docs/src/icons/duotone-icons/Communication/Safe-chat.svg deleted file mode 100644 index 7ba85e2d5cc0e6..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Safe-chat.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Safe-chat - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Send.svg b/docs/src/icons/duotone-icons/Communication/Send.svg deleted file mode 100644 index 146f856916f847..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Send.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Communication / Send - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Sending mail.svg b/docs/src/icons/duotone-icons/Communication/Sending mail.svg deleted file mode 100644 index 1330dc8077d539..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Sending mail.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Sending mail - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Sending.svg b/docs/src/icons/duotone-icons/Communication/Sending.svg deleted file mode 100644 index dd33edc56e51a5..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Sending.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Sending - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Share.svg b/docs/src/icons/duotone-icons/Communication/Share.svg deleted file mode 100644 index 654a8f3e905ea3..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Share.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Share - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Shield-thunder.svg b/docs/src/icons/duotone-icons/Communication/Shield-thunder.svg deleted file mode 100644 index 8f0111efcfe048..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Shield-thunder.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Shield-thunder - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Shield-user.svg b/docs/src/icons/duotone-icons/Communication/Shield-user.svg deleted file mode 100644 index c8213c72a08f39..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Shield-user.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Communication / Shield-user - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Snoozed-mail.svg b/docs/src/icons/duotone-icons/Communication/Snoozed-mail.svg deleted file mode 100644 index eb18ca6440c25b..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Snoozed-mail.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Snoozed-mail - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Spam.svg b/docs/src/icons/duotone-icons/Communication/Spam.svg deleted file mode 100644 index f2979426fdc2a5..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Spam.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Communication / Spam - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Thumbtack.svg b/docs/src/icons/duotone-icons/Communication/Thumbtack.svg deleted file mode 100644 index cc9fa488790fe5..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Thumbtack.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Thumbtack - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Urgent-mail.svg b/docs/src/icons/duotone-icons/Communication/Urgent-mail.svg deleted file mode 100644 index dcdd8226b312e9..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Urgent-mail.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Urgent-mail - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Communication/Write.svg b/docs/src/icons/duotone-icons/Communication/Write.svg deleted file mode 100644 index 8d436d59c632d1..00000000000000 --- a/docs/src/icons/duotone-icons/Communication/Write.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Communication / Write - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Baking-glove.svg b/docs/src/icons/duotone-icons/Cooking/Baking-glove.svg deleted file mode 100644 index 9c2320b9cdb735..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Baking-glove.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Baking-glove - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Bowl.svg b/docs/src/icons/duotone-icons/Cooking/Bowl.svg deleted file mode 100644 index bf1ca919dc7dee..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Bowl.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Bowl - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Chef.svg b/docs/src/icons/duotone-icons/Cooking/Chef.svg deleted file mode 100644 index 4b654334c57344..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Chef.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Chef - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Cooking-book.svg b/docs/src/icons/duotone-icons/Cooking/Cooking-book.svg deleted file mode 100644 index 31723a92e9d468..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Cooking-book.svg +++ /dev/null @@ -1,17 +0,0 @@ - - - - Stockholm-icons / Cooking / Cooking-book - Created with Sketch. - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Cooking-pot.svg b/docs/src/icons/duotone-icons/Cooking/Cooking-pot.svg deleted file mode 100644 index f26d816be057aa..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Cooking-pot.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Cooking-pot - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Cutting board.svg b/docs/src/icons/duotone-icons/Cooking/Cutting board.svg deleted file mode 100644 index dbf196e3b50279..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Cutting board.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Cutting board - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Dinner.svg b/docs/src/icons/duotone-icons/Cooking/Dinner.svg deleted file mode 100644 index 85225af684b098..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Dinner.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Stockholm-icons / Cooking / Dinner - Created with Sketch. - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Dish.svg b/docs/src/icons/duotone-icons/Cooking/Dish.svg deleted file mode 100644 index 4d88dea5c2d98d..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Dish.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Dish - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Dishes.svg b/docs/src/icons/duotone-icons/Cooking/Dishes.svg deleted file mode 100644 index 4cf5070b9b1934..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Dishes.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Dishes - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Fork-spoon-knife.svg b/docs/src/icons/duotone-icons/Cooking/Fork-spoon-knife.svg deleted file mode 100644 index 3fd1f460b78c32..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Fork-spoon-knife.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - Stockholm-icons / Cooking / Fork-spoon-knife - Created with Sketch. - - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Fork-spoon.svg b/docs/src/icons/duotone-icons/Cooking/Fork-spoon.svg deleted file mode 100644 index 69a7adac874c14..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Fork-spoon.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Cooking / Fork-spoon - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Fork.svg b/docs/src/icons/duotone-icons/Cooking/Fork.svg deleted file mode 100644 index c526ba5f6c79d2..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Fork.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Fork - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Frying-pan.svg b/docs/src/icons/duotone-icons/Cooking/Frying-pan.svg deleted file mode 100644 index 4473128a31b6de..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Frying-pan.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Frying-pan - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Grater.svg b/docs/src/icons/duotone-icons/Cooking/Grater.svg deleted file mode 100644 index 555e811d6ee5a3..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Grater.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Grater - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Kitchen-scale.svg b/docs/src/icons/duotone-icons/Cooking/Kitchen-scale.svg deleted file mode 100644 index e4586f62fc3947..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Kitchen-scale.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Kitchen-scale - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Knife#1.svg b/docs/src/icons/duotone-icons/Cooking/Knife#1.svg deleted file mode 100644 index 591d39ab55b064..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Knife#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Knife#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Knife#2.svg b/docs/src/icons/duotone-icons/Cooking/Knife#2.svg deleted file mode 100644 index f8a6952f8f3f39..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Knife#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Knife#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Knife&fork#1.svg b/docs/src/icons/duotone-icons/Cooking/Knife&fork#1.svg deleted file mode 100644 index 02eef7bbb6cb76..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Knife&fork#1.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Cooking / Knife&fork#1 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Knife&fork#2.svg b/docs/src/icons/duotone-icons/Cooking/Knife&fork#2.svg deleted file mode 100644 index f7095a0cd54fbc..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Knife&fork#2.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Cooking / Knife&fork#2 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Ladle.svg b/docs/src/icons/duotone-icons/Cooking/Ladle.svg deleted file mode 100644 index 7d756c6d5836cc..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Ladle.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Ladle - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Rolling-pin.svg b/docs/src/icons/duotone-icons/Cooking/Rolling-pin.svg deleted file mode 100644 index e60e9b9c3c74d1..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Rolling-pin.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Rolling-pin - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Saucepan.svg b/docs/src/icons/duotone-icons/Cooking/Saucepan.svg deleted file mode 100644 index e83e7db7e49ac7..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Saucepan.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Saucepan - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Shovel.svg b/docs/src/icons/duotone-icons/Cooking/Shovel.svg deleted file mode 100644 index 49b0e5bc9af390..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Shovel.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Shovel - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Sieve.svg b/docs/src/icons/duotone-icons/Cooking/Sieve.svg deleted file mode 100644 index 0251d666593c83..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Sieve.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Cooking / Sieve - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Cooking/Spoon.svg b/docs/src/icons/duotone-icons/Cooking/Spoon.svg deleted file mode 100644 index 3aaa85aada2f35..00000000000000 --- a/docs/src/icons/duotone-icons/Cooking/Spoon.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Cooking / Spoon - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Adjust.svg b/docs/src/icons/duotone-icons/Design/Adjust.svg deleted file mode 100644 index 29b0fb63e90dd3..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Adjust.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Design / Adjust - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Anchor-center-down.svg b/docs/src/icons/duotone-icons/Design/Anchor-center-down.svg deleted file mode 100644 index 156138726a4f32..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Anchor-center-down.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Stockholm-icons / Design / Anchor-center-down - Created with Sketch. - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Anchor-center-up.svg b/docs/src/icons/duotone-icons/Design/Anchor-center-up.svg deleted file mode 100644 index 0b2deba41130f7..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Anchor-center-up.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Stockholm-icons / Design / Anchor-center-up - Created with Sketch. - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Anchor-center.svg b/docs/src/icons/duotone-icons/Design/Anchor-center.svg deleted file mode 100644 index 1d071d2e8e2f81..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Anchor-center.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Anchor-center - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Anchor-left-down.svg b/docs/src/icons/duotone-icons/Design/Anchor-left-down.svg deleted file mode 100644 index 7d4270ad8e7194..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Anchor-left-down.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Stockholm-icons / Design / Anchor-left-down - Created with Sketch. - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Anchor-left-up.svg b/docs/src/icons/duotone-icons/Design/Anchor-left-up.svg deleted file mode 100644 index 97559c3013c849..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Anchor-left-up.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Stockholm-icons / Design / Anchor-left-up - Created with Sketch. - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Anchor-left.svg b/docs/src/icons/duotone-icons/Design/Anchor-left.svg deleted file mode 100644 index 6711c34916b66c..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Anchor-left.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Stockholm-icons / Design / Anchor-left - Created with Sketch. - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Anchor-right-down.svg b/docs/src/icons/duotone-icons/Design/Anchor-right-down.svg deleted file mode 100644 index 04995b30ffd9f0..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Anchor-right-down.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Stockholm-icons / Design / Anchor-right-down - Created with Sketch. - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Anchor-right-up.svg b/docs/src/icons/duotone-icons/Design/Anchor-right-up.svg deleted file mode 100644 index 331f99f4f5e517..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Anchor-right-up.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Stockholm-icons / Design / Anchor-right-up - Created with Sketch. - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Anchor-right.svg b/docs/src/icons/duotone-icons/Design/Anchor-right.svg deleted file mode 100644 index 510ecd004adec4..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Anchor-right.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Stockholm-icons / Design / Anchor-right - Created with Sketch. - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Arrows.svg b/docs/src/icons/duotone-icons/Design/Arrows.svg deleted file mode 100644 index bfa9fa9875bcc2..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Arrows.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Arrows - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Bezier-curve.svg b/docs/src/icons/duotone-icons/Design/Bezier-curve.svg deleted file mode 100644 index 69763d8e59ddc5..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Bezier-curve.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Bezier-curve - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Border.svg b/docs/src/icons/duotone-icons/Design/Border.svg deleted file mode 100644 index c9702d0a431c60..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Border.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Design / Border - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Brush.svg b/docs/src/icons/duotone-icons/Design/Brush.svg deleted file mode 100644 index cbb8f25ea367b1..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Brush.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Brush - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Bucket.svg b/docs/src/icons/duotone-icons/Design/Bucket.svg deleted file mode 100644 index 55e14c036bea27..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Bucket.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Bucket - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Cap-1.svg b/docs/src/icons/duotone-icons/Design/Cap-1.svg deleted file mode 100644 index 505b1637470f0a..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Cap-1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Cap-1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Cap-2.svg b/docs/src/icons/duotone-icons/Design/Cap-2.svg deleted file mode 100644 index e6047489393fc4..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Cap-2.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Design / Cap-2 - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Cap-3.svg b/docs/src/icons/duotone-icons/Design/Cap-3.svg deleted file mode 100644 index 4f2e1cb95aa111..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Cap-3.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Cap-3 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Circle.svg b/docs/src/icons/duotone-icons/Design/Circle.svg deleted file mode 100644 index c45788f845f120..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Circle.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Design / Circle - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Color-profile.svg b/docs/src/icons/duotone-icons/Design/Color-profile.svg deleted file mode 100644 index 775ce6185e33a5..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Color-profile.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Color-profile - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Color.svg b/docs/src/icons/duotone-icons/Design/Color.svg deleted file mode 100644 index 31f976b4bdb4e2..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Color.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Design / Color - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Component.svg b/docs/src/icons/duotone-icons/Design/Component.svg deleted file mode 100644 index 97d01502fe9f26..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Component.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Design / Component - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Crop.svg b/docs/src/icons/duotone-icons/Design/Crop.svg deleted file mode 100644 index be9749d8473183..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Crop.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Crop - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Difference.svg b/docs/src/icons/duotone-icons/Design/Difference.svg deleted file mode 100644 index 5f740ecb081a30..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Difference.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Difference - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Edit.svg b/docs/src/icons/duotone-icons/Design/Edit.svg deleted file mode 100644 index 262ccd0a1515e4..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Edit.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Edit - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Eraser.svg b/docs/src/icons/duotone-icons/Design/Eraser.svg deleted file mode 100644 index fdb01e53b605a5..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Eraser.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Design / Eraser - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Flatten.svg b/docs/src/icons/duotone-icons/Design/Flatten.svg deleted file mode 100644 index 9c86432bc10851..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Flatten.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Flatten - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Flip-horizontal.svg b/docs/src/icons/duotone-icons/Design/Flip-horizontal.svg deleted file mode 100644 index 7ea377a023f3bf..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Flip-horizontal.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Design / Flip-horizontal - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Flip-vertical.svg b/docs/src/icons/duotone-icons/Design/Flip-vertical.svg deleted file mode 100644 index 8efc83799786ad..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Flip-vertical.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Design / Flip-vertical - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Horizontal.svg b/docs/src/icons/duotone-icons/Design/Horizontal.svg deleted file mode 100644 index 61c285417fd335..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Horizontal.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Horizontal - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Image.svg b/docs/src/icons/duotone-icons/Design/Image.svg deleted file mode 100644 index 6a58f1c136ca5f..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Image.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Design / Image - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Interselect.svg b/docs/src/icons/duotone-icons/Design/Interselect.svg deleted file mode 100644 index 24cea1dd53f65d..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Interselect.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Interselect - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Join-1.svg b/docs/src/icons/duotone-icons/Design/Join-1.svg deleted file mode 100644 index b3aa95db0376c1..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Join-1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Join-1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Join-2.svg b/docs/src/icons/duotone-icons/Design/Join-2.svg deleted file mode 100644 index c10fb0c2aa5077..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Join-2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Join-2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Join-3.svg b/docs/src/icons/duotone-icons/Design/Join-3.svg deleted file mode 100644 index ffc60d9d0d9f87..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Join-3.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Join-3 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Layers.svg b/docs/src/icons/duotone-icons/Design/Layers.svg deleted file mode 100644 index 8b6feb4f6b6eef..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Layers.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Layers - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Line.svg b/docs/src/icons/duotone-icons/Design/Line.svg deleted file mode 100644 index cc1cfff354f00e..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Line.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Design / Line - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Magic.svg b/docs/src/icons/duotone-icons/Design/Magic.svg deleted file mode 100644 index ed8260ba05ed49..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Magic.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Magic - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Mask.svg b/docs/src/icons/duotone-icons/Design/Mask.svg deleted file mode 100644 index e61397a62b9ac5..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Mask.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Mask - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Patch.svg b/docs/src/icons/duotone-icons/Design/Patch.svg deleted file mode 100644 index 72088678489b8b..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Patch.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Design / Patch - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Pen&ruller.svg b/docs/src/icons/duotone-icons/Design/Pen&ruller.svg deleted file mode 100644 index 2b5133c1fde727..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Pen&ruller.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Pen&ruller - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Pen-tool-vector.svg b/docs/src/icons/duotone-icons/Design/Pen-tool-vector.svg deleted file mode 100644 index 3113dd140ae649..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Pen-tool-vector.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Pen-tool-vector - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Pencil.svg b/docs/src/icons/duotone-icons/Design/Pencil.svg deleted file mode 100644 index 3e36126a721099..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Pencil.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Pencil - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Picker.svg b/docs/src/icons/duotone-icons/Design/Picker.svg deleted file mode 100644 index d268793348b428..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Picker.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Picker - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Pixels.svg b/docs/src/icons/duotone-icons/Design/Pixels.svg deleted file mode 100644 index 2abe6044c558c8..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Pixels.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - Stockholm-icons / Design / Pixels - Created with Sketch. - - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Polygon.svg b/docs/src/icons/duotone-icons/Design/Polygon.svg deleted file mode 100644 index 4a2511fdf5eda9..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Polygon.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Design / Polygon - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Position.svg b/docs/src/icons/duotone-icons/Design/Position.svg deleted file mode 100644 index ab985c33a41b5e..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Position.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Design / Position - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Rectangle.svg b/docs/src/icons/duotone-icons/Design/Rectangle.svg deleted file mode 100644 index 58ec2abf6eb135..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Rectangle.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Design / Rectangle - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Saturation.svg b/docs/src/icons/duotone-icons/Design/Saturation.svg deleted file mode 100644 index 94a9091413c39d..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Saturation.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Saturation - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Select.svg b/docs/src/icons/duotone-icons/Design/Select.svg deleted file mode 100644 index 4203a4c951cb3c..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Select.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Select - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Sketch.svg b/docs/src/icons/duotone-icons/Design/Sketch.svg deleted file mode 100644 index b645b29fe2493d..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Sketch.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Sketch - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Stamp.svg b/docs/src/icons/duotone-icons/Design/Stamp.svg deleted file mode 100644 index 3ed4ee75855648..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Stamp.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Stamp - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Substract.svg b/docs/src/icons/duotone-icons/Design/Substract.svg deleted file mode 100644 index e350dc3d311fa6..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Substract.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Substract - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Target.svg b/docs/src/icons/duotone-icons/Design/Target.svg deleted file mode 100644 index 479e5bda5f4405..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Target.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Target - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Triangle.svg b/docs/src/icons/duotone-icons/Design/Triangle.svg deleted file mode 100644 index ef163688bb5ee6..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Triangle.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Design / Triangle - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Union.svg b/docs/src/icons/duotone-icons/Design/Union.svg deleted file mode 100644 index d810f46504a7e3..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Union.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Design / Union - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Vertical.svg b/docs/src/icons/duotone-icons/Design/Vertical.svg deleted file mode 100644 index e5c921bdb93e3a..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Vertical.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Design / Vertical - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Zoom minus.svg b/docs/src/icons/duotone-icons/Design/Zoom minus.svg deleted file mode 100644 index 7d53f41aaba4ed..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Zoom minus.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Design / Zoom minus - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Design/Zoom plus.svg b/docs/src/icons/duotone-icons/Design/Zoom plus.svg deleted file mode 100644 index ea522975ca0b1f..00000000000000 --- a/docs/src/icons/duotone-icons/Design/Zoom plus.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Design / Zoom plus - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Airpods.svg b/docs/src/icons/duotone-icons/Devices/Airpods.svg deleted file mode 100644 index b8124dde145080..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Airpods.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Devices / Airpods - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Android.svg b/docs/src/icons/duotone-icons/Devices/Android.svg deleted file mode 100644 index 7248d78c0209e4..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Android.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Android - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Apple-Watch.svg b/docs/src/icons/duotone-icons/Devices/Apple-Watch.svg deleted file mode 100644 index a59f62bdccfffe..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Apple-Watch.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Apple-Watch - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Battery-charging.svg b/docs/src/icons/duotone-icons/Devices/Battery-charging.svg deleted file mode 100644 index 4ffa1a87025c85..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Battery-charging.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Devices / Battery-charging - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Battery-empty.svg b/docs/src/icons/duotone-icons/Devices/Battery-empty.svg deleted file mode 100644 index 2fcff4ff13ba73..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Battery-empty.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Battery-empty - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Battery-full.svg b/docs/src/icons/duotone-icons/Devices/Battery-full.svg deleted file mode 100644 index 3fe4664756554d..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Battery-full.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Battery-full - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Battery-half.svg b/docs/src/icons/duotone-icons/Devices/Battery-half.svg deleted file mode 100644 index 39189e9ed0ec9e..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Battery-half.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Battery-half - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Bluetooth.svg b/docs/src/icons/duotone-icons/Devices/Bluetooth.svg deleted file mode 100644 index cd0b09e10ed0dc..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Bluetooth.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Bluetooth - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/CPU#1.svg b/docs/src/icons/duotone-icons/Devices/CPU#1.svg deleted file mode 100644 index 78af45decb7a1d..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/CPU#1.svg +++ /dev/null @@ -1,17 +0,0 @@ - - - - Stockholm-icons / Devices / CPU#1 - Created with Sketch. - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/CPU#2.svg b/docs/src/icons/duotone-icons/Devices/CPU#2.svg deleted file mode 100644 index d44fd3e355969a..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/CPU#2.svg +++ /dev/null @@ -1,23 +0,0 @@ - - - - Stockholm-icons / Devices / CPU#2 - Created with Sketch. - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Camera.svg b/docs/src/icons/duotone-icons/Devices/Camera.svg deleted file mode 100644 index 553651b1f559e0..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Camera.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Devices / Camera - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Cardboard-vr.svg b/docs/src/icons/duotone-icons/Devices/Cardboard-vr.svg deleted file mode 100644 index d14096453a6dbe..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Cardboard-vr.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Cardboard-vr - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Cassete.svg b/docs/src/icons/duotone-icons/Devices/Cassete.svg deleted file mode 100644 index 34c9bd44e0fc5b..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Cassete.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Devices / Cassete - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Diagnostics.svg b/docs/src/icons/duotone-icons/Devices/Diagnostics.svg deleted file mode 100644 index 14d4353dc88447..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Diagnostics.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Devices / Diagnostics - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Display#1.svg b/docs/src/icons/duotone-icons/Devices/Display#1.svg deleted file mode 100644 index 0cbc2edbcdbf87..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Display#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Display#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Display#2.svg b/docs/src/icons/duotone-icons/Devices/Display#2.svg deleted file mode 100644 index 846f252726db88..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Display#2.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Devices / Display#2 - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Display#3.svg b/docs/src/icons/duotone-icons/Devices/Display#3.svg deleted file mode 100644 index d31c6919dd21a1..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Display#3.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Devices / Display#3 - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Gameboy.svg b/docs/src/icons/duotone-icons/Devices/Gameboy.svg deleted file mode 100644 index f0035226e2e756..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Gameboy.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Gameboy - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Gamepad#1.svg b/docs/src/icons/duotone-icons/Devices/Gamepad#1.svg deleted file mode 100644 index bdf4c3c4fb4ca1..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Gamepad#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Gamepad#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Gamepad#2.svg b/docs/src/icons/duotone-icons/Devices/Gamepad#2.svg deleted file mode 100644 index 2bde1e19ed59dc..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Gamepad#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Gamepad#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Generator.svg b/docs/src/icons/duotone-icons/Devices/Generator.svg deleted file mode 100644 index 02640f7c03c067..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Generator.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Devices / Generator - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Hard-drive.svg b/docs/src/icons/duotone-icons/Devices/Hard-drive.svg deleted file mode 100644 index ba2645dc836c2e..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Hard-drive.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Hard-drive - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Headphones.svg b/docs/src/icons/duotone-icons/Devices/Headphones.svg deleted file mode 100644 index c25056d0a6f498..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Headphones.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Headphones - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Homepod.svg b/docs/src/icons/duotone-icons/Devices/Homepod.svg deleted file mode 100644 index 5a95c39bcc845d..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Homepod.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Homepod - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Keyboard.svg b/docs/src/icons/duotone-icons/Devices/Keyboard.svg deleted file mode 100644 index dc9a8da8c62a63..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Keyboard.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Keyboard - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/LTE#1.svg b/docs/src/icons/duotone-icons/Devices/LTE#1.svg deleted file mode 100644 index 9f327b772ce143..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/LTE#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / LTE#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/LTE#2.svg b/docs/src/icons/duotone-icons/Devices/LTE#2.svg deleted file mode 100644 index 20300316ea0bd9..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/LTE#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / LTE#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Laptop-macbook.svg b/docs/src/icons/duotone-icons/Devices/Laptop-macbook.svg deleted file mode 100644 index 8c59e60c234ea9..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Laptop-macbook.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Laptop-macbook - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Laptop.svg b/docs/src/icons/duotone-icons/Devices/Laptop.svg deleted file mode 100644 index 8aed93b5ca4060..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Laptop.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Laptop - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Mic.svg b/docs/src/icons/duotone-icons/Devices/Mic.svg deleted file mode 100644 index 104508c028a734..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Mic.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Mic - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Midi.svg b/docs/src/icons/duotone-icons/Devices/Midi.svg deleted file mode 100644 index 68305b09e59c0c..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Midi.svg +++ /dev/null @@ -1,16 +0,0 @@ - - - - Stockholm-icons / Devices / Midi - Created with Sketch. - - - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Mouse.svg b/docs/src/icons/duotone-icons/Devices/Mouse.svg deleted file mode 100644 index 098fd7d4f672f5..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Mouse.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Devices / Mouse - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Phone.svg b/docs/src/icons/duotone-icons/Devices/Phone.svg deleted file mode 100644 index 17041aa093c055..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Phone.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Devices / Phone - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Printer.svg b/docs/src/icons/duotone-icons/Devices/Printer.svg deleted file mode 100644 index b56af3c7cd7d8f..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Printer.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Printer - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Radio.svg b/docs/src/icons/duotone-icons/Devices/Radio.svg deleted file mode 100644 index c7f7acfd76a3ce..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Radio.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Stockholm-icons / Devices / Radio - Created with Sketch. - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Router#1.svg b/docs/src/icons/duotone-icons/Devices/Router#1.svg deleted file mode 100644 index eb964dab5b105e..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Router#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Router#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Router#2.svg b/docs/src/icons/duotone-icons/Devices/Router#2.svg deleted file mode 100644 index 09fb6e2e57f303..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Router#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Router#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/SD-card.svg b/docs/src/icons/duotone-icons/Devices/SD-card.svg deleted file mode 100644 index e805378837102e..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/SD-card.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Devices / SD-card - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Server.svg b/docs/src/icons/duotone-icons/Devices/Server.svg deleted file mode 100644 index 56329b7054f219..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Server.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Devices / Server - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Speaker.svg b/docs/src/icons/duotone-icons/Devices/Speaker.svg deleted file mode 100644 index 80c05d2bc6f375..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Speaker.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Speaker - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/TV#1.svg b/docs/src/icons/duotone-icons/Devices/TV#1.svg deleted file mode 100644 index bb28d44e9eaf81..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/TV#1.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Devices / TV#1 - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/TV#2.svg b/docs/src/icons/duotone-icons/Devices/TV#2.svg deleted file mode 100644 index 0b298d4d4cbe54..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/TV#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / TV#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Tablet.svg b/docs/src/icons/duotone-icons/Devices/Tablet.svg deleted file mode 100644 index 99b14997a667d8..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Tablet.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Tablet - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/USB.svg b/docs/src/icons/duotone-icons/Devices/USB.svg deleted file mode 100644 index 4b0de0105a4513..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/USB.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Devices / USB - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Usb-storage.svg b/docs/src/icons/duotone-icons/Devices/Usb-storage.svg deleted file mode 100644 index 642700e503d0b4..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Usb-storage.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Usb-storage - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Video-camera.svg b/docs/src/icons/duotone-icons/Devices/Video-camera.svg deleted file mode 100644 index 760cce56c761ee..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Video-camera.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Video-camera - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Watch#1.svg b/docs/src/icons/duotone-icons/Devices/Watch#1.svg deleted file mode 100644 index 079b243187acbe..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Watch#1.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Devices / Watch#1 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Watch#2.svg b/docs/src/icons/duotone-icons/Devices/Watch#2.svg deleted file mode 100644 index 0b416ad766448f..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Watch#2.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Devices / Watch#2 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/Wi-fi.svg b/docs/src/icons/duotone-icons/Devices/Wi-fi.svg deleted file mode 100644 index c93111a792b934..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/Wi-fi.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / Wi-fi - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/iMac.svg b/docs/src/icons/duotone-icons/Devices/iMac.svg deleted file mode 100644 index 021cfdc12fdfde..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/iMac.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Devices / iMac - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/iPhone-X.svg b/docs/src/icons/duotone-icons/Devices/iPhone-X.svg deleted file mode 100644 index 28112b7864264d..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/iPhone-X.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Devices / iPhone-X - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/iPhone-back.svg b/docs/src/icons/duotone-icons/Devices/iPhone-back.svg deleted file mode 100644 index 2b96c3c9dd8e6a..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/iPhone-back.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Devices / iPhone-back - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Devices/iPhone-x-back.svg b/docs/src/icons/duotone-icons/Devices/iPhone-x-back.svg deleted file mode 100644 index 16c3d369231929..00000000000000 --- a/docs/src/icons/duotone-icons/Devices/iPhone-x-back.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Devices / iPhone-x-back - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Electric/Air-conditioning.svg b/docs/src/icons/duotone-icons/Electric/Air-conditioning.svg deleted file mode 100644 index b701190a6d12f6..00000000000000 --- a/docs/src/icons/duotone-icons/Electric/Air-conditioning.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Electric / Air-conditioning - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Electric/Blender.svg b/docs/src/icons/duotone-icons/Electric/Blender.svg deleted file mode 100644 index 188d81a9be6114..00000000000000 --- a/docs/src/icons/duotone-icons/Electric/Blender.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Electric / Blender - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Electric/Fan.svg b/docs/src/icons/duotone-icons/Electric/Fan.svg deleted file mode 100644 index 0ccb1c20c3fdbb..00000000000000 --- a/docs/src/icons/duotone-icons/Electric/Fan.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Electric / Fan - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Electric/Fridge.svg b/docs/src/icons/duotone-icons/Electric/Fridge.svg deleted file mode 100644 index 31f4efa91d6f32..00000000000000 --- a/docs/src/icons/duotone-icons/Electric/Fridge.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Electric / Fridge - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Electric/Gas-stove.svg b/docs/src/icons/duotone-icons/Electric/Gas-stove.svg deleted file mode 100644 index acbb4f90c1285e..00000000000000 --- a/docs/src/icons/duotone-icons/Electric/Gas-stove.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Electric / Gas-stove - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Electric/Hair-dryer.svg b/docs/src/icons/duotone-icons/Electric/Hair-dryer.svg deleted file mode 100644 index 37ca87cfbccf9b..00000000000000 --- a/docs/src/icons/duotone-icons/Electric/Hair-dryer.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Electric / Нair-dryer - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Electric/Highvoltage.svg b/docs/src/icons/duotone-icons/Electric/Highvoltage.svg deleted file mode 100644 index 4872e1ef7b4268..00000000000000 --- a/docs/src/icons/duotone-icons/Electric/Highvoltage.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Electric / Highvoltage - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Electric/Iron.svg b/docs/src/icons/duotone-icons/Electric/Iron.svg deleted file mode 100644 index 7acb0ab889e81d..00000000000000 --- a/docs/src/icons/duotone-icons/Electric/Iron.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Electric / Iron - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Electric/Kettle.svg b/docs/src/icons/duotone-icons/Electric/Kettle.svg deleted file mode 100644 index 00fed6a7929aa1..00000000000000 --- a/docs/src/icons/duotone-icons/Electric/Kettle.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Electric / Kettle - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Electric/Mixer.svg b/docs/src/icons/duotone-icons/Electric/Mixer.svg deleted file mode 100644 index fa5d163c61d4f2..00000000000000 --- a/docs/src/icons/duotone-icons/Electric/Mixer.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Electric / Mixer - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Electric/Outlet.svg b/docs/src/icons/duotone-icons/Electric/Outlet.svg deleted file mode 100644 index b92cbdf8346247..00000000000000 --- a/docs/src/icons/duotone-icons/Electric/Outlet.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Electric / Outlet - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Electric/Range-hood.svg b/docs/src/icons/duotone-icons/Electric/Range-hood.svg deleted file mode 100644 index 298d5eb46dc519..00000000000000 --- a/docs/src/icons/duotone-icons/Electric/Range-hood.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Electric / Range-hood - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Electric/Shutdown.svg b/docs/src/icons/duotone-icons/Electric/Shutdown.svg deleted file mode 100644 index 756ce2725f3fd8..00000000000000 --- a/docs/src/icons/duotone-icons/Electric/Shutdown.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Electric / Shutdown - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Electric/Socket-eu.svg b/docs/src/icons/duotone-icons/Electric/Socket-eu.svg deleted file mode 100644 index 961b9bccf7ece0..00000000000000 --- a/docs/src/icons/duotone-icons/Electric/Socket-eu.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Electric / Socket-eu - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Electric/Socket-us.svg b/docs/src/icons/duotone-icons/Electric/Socket-us.svg deleted file mode 100644 index db19813f5d05e9..00000000000000 --- a/docs/src/icons/duotone-icons/Electric/Socket-us.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Electric / Socket-us - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Electric/Washer.svg b/docs/src/icons/duotone-icons/Electric/Washer.svg deleted file mode 100644 index 10f94848b1397b..00000000000000 --- a/docs/src/icons/duotone-icons/Electric/Washer.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Electric / Washer - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Cloud-download.svg b/docs/src/icons/duotone-icons/Files/Cloud-download.svg deleted file mode 100644 index f8bd620c804878..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Cloud-download.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Cloud-download - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Cloud-upload.svg b/docs/src/icons/duotone-icons/Files/Cloud-upload.svg deleted file mode 100644 index a0c9e52286043f..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Cloud-upload.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Cloud-upload - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Compilation.svg b/docs/src/icons/duotone-icons/Files/Compilation.svg deleted file mode 100644 index 1301f3ce6a6daf..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Compilation.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Stockholm-icons / Files / Compilation - Created with Sketch. - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Compiled-file.svg b/docs/src/icons/duotone-icons/Files/Compiled-file.svg deleted file mode 100644 index 9137d2ce6b2980..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Compiled-file.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Stockholm-icons / Files / Compiled-file - Created with Sketch. - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Deleted-file.svg b/docs/src/icons/duotone-icons/Files/Deleted-file.svg deleted file mode 100644 index d091043a887da7..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Deleted-file.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Deleted-file - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Deleted-folder.svg b/docs/src/icons/duotone-icons/Files/Deleted-folder.svg deleted file mode 100644 index bd0890b91cee4c..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Deleted-folder.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Deleted-folder - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Download.inline.svg b/docs/src/icons/duotone-icons/Files/Download.inline.svg deleted file mode 100644 index 43a64ee1c8d81a..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Download.inline.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Files / Download - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Downloaded file.svg b/docs/src/icons/duotone-icons/Files/Downloaded file.svg deleted file mode 100644 index 0db13260c0072f..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Downloaded file.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Downloaded file - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Downloads-folder.svg b/docs/src/icons/duotone-icons/Files/Downloads-folder.svg deleted file mode 100644 index 3de6fc92ede19d..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Downloads-folder.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Downloads-folder - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Export.svg b/docs/src/icons/duotone-icons/Files/Export.svg deleted file mode 100644 index 609b3499d8b893..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Export.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Files / Export - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/File-cloud.svg b/docs/src/icons/duotone-icons/Files/File-cloud.svg deleted file mode 100644 index b13faa993b6225..00000000000000 --- a/docs/src/icons/duotone-icons/Files/File-cloud.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / File-cloud - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/File-done.svg b/docs/src/icons/duotone-icons/Files/File-done.svg deleted file mode 100644 index 7d77fae00f3253..00000000000000 --- a/docs/src/icons/duotone-icons/Files/File-done.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / File-done - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/File-minus.svg b/docs/src/icons/duotone-icons/Files/File-minus.svg deleted file mode 100644 index 3fe6671fa0f130..00000000000000 --- a/docs/src/icons/duotone-icons/Files/File-minus.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / File-minus - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/File-plus.svg b/docs/src/icons/duotone-icons/Files/File-plus.svg deleted file mode 100644 index e210da7f6cd46d..00000000000000 --- a/docs/src/icons/duotone-icons/Files/File-plus.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / File-plus - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/File.svg b/docs/src/icons/duotone-icons/Files/File.svg deleted file mode 100644 index b5c0b13809eb59..00000000000000 --- a/docs/src/icons/duotone-icons/Files/File.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Files / File - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Folder-check.svg b/docs/src/icons/duotone-icons/Files/Folder-check.svg deleted file mode 100644 index ba05c9d98b885c..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Folder-check.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Folder-check - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Folder-cloud.svg b/docs/src/icons/duotone-icons/Files/Folder-cloud.svg deleted file mode 100644 index 876665e8fcb289..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Folder-cloud.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Folder-cloud - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Folder-error.svg b/docs/src/icons/duotone-icons/Files/Folder-error.svg deleted file mode 100644 index d4c30a85b71499..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Folder-error.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Folder-error - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Folder-heart.svg b/docs/src/icons/duotone-icons/Files/Folder-heart.svg deleted file mode 100644 index b6b0b4941d8a81..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Folder-heart.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Folder-heart - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Folder-minus.svg b/docs/src/icons/duotone-icons/Files/Folder-minus.svg deleted file mode 100644 index 157647ca141eb2..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Folder-minus.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Folder-minus - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Folder-plus.svg b/docs/src/icons/duotone-icons/Files/Folder-plus.svg deleted file mode 100644 index ef4c4143809f47..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Folder-plus.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Folder-plus - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Folder-solid.svg b/docs/src/icons/duotone-icons/Files/Folder-solid.svg deleted file mode 100644 index a8198d9d470aab..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Folder-solid.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Files / Folder-solid - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Folder-star.svg b/docs/src/icons/duotone-icons/Files/Folder-star.svg deleted file mode 100644 index 32ce2532252926..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Folder-star.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Folder-star - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Folder-thunder.svg b/docs/src/icons/duotone-icons/Files/Folder-thunder.svg deleted file mode 100644 index cd961efea10c9e..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Folder-thunder.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Folder-thunder - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Folder.svg b/docs/src/icons/duotone-icons/Files/Folder.svg deleted file mode 100644 index afb8a30ebd3027..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Folder.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Files / Folder - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Group-folders.svg b/docs/src/icons/duotone-icons/Files/Group-folders.svg deleted file mode 100644 index 58148ac263d28f..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Group-folders.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Group-folders - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Import.svg b/docs/src/icons/duotone-icons/Files/Import.svg deleted file mode 100644 index 9d233a9bc5cfb1..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Import.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Files / Import - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Locked-folder.svg b/docs/src/icons/duotone-icons/Files/Locked-folder.svg deleted file mode 100644 index cfb32384263f5b..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Locked-folder.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Locked-folder - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Media-folder.svg b/docs/src/icons/duotone-icons/Files/Media-folder.svg deleted file mode 100644 index fba811a6f37045..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Media-folder.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Media-folder - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Media.svg b/docs/src/icons/duotone-icons/Files/Media.svg deleted file mode 100644 index a8015fc6ea7ddd..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Media.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Media - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Music.svg b/docs/src/icons/duotone-icons/Files/Music.svg deleted file mode 100644 index 319e3b67849d30..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Music.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Music - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Pictures#1.svg b/docs/src/icons/duotone-icons/Files/Pictures#1.svg deleted file mode 100644 index 4c989a7a4a5382..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Pictures#1.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Files / Pictures#1 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Pictures#2.svg b/docs/src/icons/duotone-icons/Files/Pictures#2.svg deleted file mode 100644 index b0dd9b6fc68887..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Pictures#2.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - Stockholm-icons / Files / Pictures#2 - Created with Sketch. - - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Protected-file.svg b/docs/src/icons/duotone-icons/Files/Protected-file.svg deleted file mode 100644 index 160a3ab752a92f..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Protected-file.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Protected-file - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Selected-file.svg b/docs/src/icons/duotone-icons/Files/Selected-file.svg deleted file mode 100644 index 1aa8eca1f37afd..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Selected-file.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Selected-file - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Share.svg b/docs/src/icons/duotone-icons/Files/Share.svg deleted file mode 100644 index d0c2d7da06eaf4..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Share.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Files / Share - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Upload-folder.svg b/docs/src/icons/duotone-icons/Files/Upload-folder.svg deleted file mode 100644 index 7f7e0d69ea31e6..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Upload-folder.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Upload-folder - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Upload.svg b/docs/src/icons/duotone-icons/Files/Upload.svg deleted file mode 100644 index 4a6b0ff6ba4f6d..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Upload.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Files / Upload - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/Uploaded-file.svg b/docs/src/icons/duotone-icons/Files/Uploaded-file.svg deleted file mode 100644 index c09f9161b26ba6..00000000000000 --- a/docs/src/icons/duotone-icons/Files/Uploaded-file.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Files / Uploaded-file - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Files/User-folder.svg b/docs/src/icons/duotone-icons/Files/User-folder.svg deleted file mode 100644 index 22ba00787e3ef8..00000000000000 --- a/docs/src/icons/duotone-icons/Files/User-folder.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Files / User-folder - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Beer.svg b/docs/src/icons/duotone-icons/Food/Beer.svg deleted file mode 100644 index c1b837772c4ae3..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Beer.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Stockholm-icons / Food / Beer - Created with Sketch. - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Bottle#1.svg b/docs/src/icons/duotone-icons/Food/Bottle#1.svg deleted file mode 100644 index 6dc19123c71a92..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Bottle#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Food / Bottle#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Bottle#2.svg b/docs/src/icons/duotone-icons/Food/Bottle#2.svg deleted file mode 100644 index 8e5a3950ff2f79..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Bottle#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Food / Bottle#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Bread.svg b/docs/src/icons/duotone-icons/Food/Bread.svg deleted file mode 100644 index 4108420b127645..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Bread.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Food / Bread - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Bucket.svg b/docs/src/icons/duotone-icons/Food/Bucket.svg deleted file mode 100644 index 962a8208df2fd4..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Bucket.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Food / Bucket - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Burger.svg b/docs/src/icons/duotone-icons/Food/Burger.svg deleted file mode 100644 index fffd1f0b58dca1..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Burger.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Food / Burger - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Cake.svg b/docs/src/icons/duotone-icons/Food/Cake.svg deleted file mode 100644 index 53c91b17c6fac5..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Cake.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Food / Cake - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Carrot.svg b/docs/src/icons/duotone-icons/Food/Carrot.svg deleted file mode 100644 index 6b239805e8e3b0..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Carrot.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Food / Carrot - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Cheese.svg b/docs/src/icons/duotone-icons/Food/Cheese.svg deleted file mode 100644 index 24b48f380ac6e0..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Cheese.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Food / Cheese - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Chicken.svg b/docs/src/icons/duotone-icons/Food/Chicken.svg deleted file mode 100644 index 5ff51028c8d1f3..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Chicken.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Food / Chicken - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Coffee#1.svg b/docs/src/icons/duotone-icons/Food/Coffee#1.svg deleted file mode 100644 index f6364e9cfcaaaf..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Coffee#1.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Stockholm-icons / Food / Coffee#1 - Created with Sketch. - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Coffee#2.svg b/docs/src/icons/duotone-icons/Food/Coffee#2.svg deleted file mode 100644 index 121da1d2f76bab..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Coffee#2.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Food / Coffee#2 - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Cookie.svg b/docs/src/icons/duotone-icons/Food/Cookie.svg deleted file mode 100644 index f0cdfc49c04179..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Cookie.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Food / Cookie - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Dinner.svg b/docs/src/icons/duotone-icons/Food/Dinner.svg deleted file mode 100644 index f41921be0fd4dd..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Dinner.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Food / Dinner - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Fish.svg b/docs/src/icons/duotone-icons/Food/Fish.svg deleted file mode 100644 index 7ddf19ca913357..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Fish.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Food / Fish - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/French Bread.svg b/docs/src/icons/duotone-icons/Food/French Bread.svg deleted file mode 100644 index 2ab67fd7873637..00000000000000 --- a/docs/src/icons/duotone-icons/Food/French Bread.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Food / French Bread - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Glass-martini.svg b/docs/src/icons/duotone-icons/Food/Glass-martini.svg deleted file mode 100644 index 1ffcbbc71281a4..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Glass-martini.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Food / Glass-martini - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Ice-cream#1.svg b/docs/src/icons/duotone-icons/Food/Ice-cream#1.svg deleted file mode 100644 index 4a2af89c8da39e..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Ice-cream#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Food / Ice-cream#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Ice-cream#2.svg b/docs/src/icons/duotone-icons/Food/Ice-cream#2.svg deleted file mode 100644 index 20d638a1d1ef4f..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Ice-cream#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Food / Ice-cream#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Miso-soup.svg b/docs/src/icons/duotone-icons/Food/Miso-soup.svg deleted file mode 100644 index 6ecd7ae1c7c894..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Miso-soup.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Food / Miso-soup - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Orange.svg b/docs/src/icons/duotone-icons/Food/Orange.svg deleted file mode 100644 index fec58c21a1e9b4..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Orange.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Food / Orange - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Pizza.svg b/docs/src/icons/duotone-icons/Food/Pizza.svg deleted file mode 100644 index c86845ccdc9fbe..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Pizza.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Food / Pizza - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Sushi.svg b/docs/src/icons/duotone-icons/Food/Sushi.svg deleted file mode 100644 index b327f67bfe2b22..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Sushi.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Food / Sushi - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Two-bottles.svg b/docs/src/icons/duotone-icons/Food/Two-bottles.svg deleted file mode 100644 index 87be9dabba498c..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Two-bottles.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Food / Two-bottles - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Food/Wine.svg b/docs/src/icons/duotone-icons/Food/Wine.svg deleted file mode 100644 index afe20c63061b54..00000000000000 --- a/docs/src/icons/duotone-icons/Food/Wine.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Food / Wine - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Attachment#1.svg b/docs/src/icons/duotone-icons/General/Attachment#1.svg deleted file mode 100644 index f46bfea52f6a47..00000000000000 --- a/docs/src/icons/duotone-icons/General/Attachment#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Attachment#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Attachment#2.svg b/docs/src/icons/duotone-icons/General/Attachment#2.svg deleted file mode 100644 index 8c14511c674c16..00000000000000 --- a/docs/src/icons/duotone-icons/General/Attachment#2.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / General / Attachment#2 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Binocular.svg b/docs/src/icons/duotone-icons/General/Binocular.svg deleted file mode 100644 index dc3983b51b4941..00000000000000 --- a/docs/src/icons/duotone-icons/General/Binocular.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / General / Binocular - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Bookmark.svg b/docs/src/icons/duotone-icons/General/Bookmark.svg deleted file mode 100644 index 3ca53b8284fea6..00000000000000 --- a/docs/src/icons/duotone-icons/General/Bookmark.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / General / Bookmark - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Clip.svg b/docs/src/icons/duotone-icons/General/Clip.svg deleted file mode 100644 index 56af0dd3068f7c..00000000000000 --- a/docs/src/icons/duotone-icons/General/Clip.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / General / Clip - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Clipboard.svg b/docs/src/icons/duotone-icons/General/Clipboard.svg deleted file mode 100644 index 4b13d114dcb0ce..00000000000000 --- a/docs/src/icons/duotone-icons/General/Clipboard.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / General / Clipboard - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Cursor.svg b/docs/src/icons/duotone-icons/General/Cursor.svg deleted file mode 100644 index c6bd29d2888e1f..00000000000000 --- a/docs/src/icons/duotone-icons/General/Cursor.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / General / Cursor - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Dislike.svg b/docs/src/icons/duotone-icons/General/Dislike.svg deleted file mode 100644 index 6fdad8029235ad..00000000000000 --- a/docs/src/icons/duotone-icons/General/Dislike.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Dislike - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Duplicate.svg b/docs/src/icons/duotone-icons/General/Duplicate.svg deleted file mode 100644 index b4e54da78d9bbd..00000000000000 --- a/docs/src/icons/duotone-icons/General/Duplicate.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Duplicate - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Edit.svg b/docs/src/icons/duotone-icons/General/Edit.svg deleted file mode 100644 index 6bba81b5573e05..00000000000000 --- a/docs/src/icons/duotone-icons/General/Edit.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / General / Edit - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Expand-arrows.svg b/docs/src/icons/duotone-icons/General/Expand-arrows.svg deleted file mode 100644 index a217b81ba58ab3..00000000000000 --- a/docs/src/icons/duotone-icons/General/Expand-arrows.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Expand-arrows - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Fire.svg b/docs/src/icons/duotone-icons/General/Fire.svg deleted file mode 100644 index de50370b38f2b9..00000000000000 --- a/docs/src/icons/duotone-icons/General/Fire.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / General / Fire - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Folder.svg b/docs/src/icons/duotone-icons/General/Folder.svg deleted file mode 100644 index 507ecae7576e66..00000000000000 --- a/docs/src/icons/duotone-icons/General/Folder.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / General / Folder - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Half-heart.svg b/docs/src/icons/duotone-icons/General/Half-heart.svg deleted file mode 100644 index 8f9c45c4816a28..00000000000000 --- a/docs/src/icons/duotone-icons/General/Half-heart.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Half-heart - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Half-star.svg b/docs/src/icons/duotone-icons/General/Half-star.svg deleted file mode 100644 index abc709d3457b1f..00000000000000 --- a/docs/src/icons/duotone-icons/General/Half-star.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Half-star - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Heart.svg b/docs/src/icons/duotone-icons/General/Heart.svg deleted file mode 100644 index 2b8d33d94e00ce..00000000000000 --- a/docs/src/icons/duotone-icons/General/Heart.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / General / Heart - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Hidden.svg b/docs/src/icons/duotone-icons/General/Hidden.svg deleted file mode 100644 index 2f64bd24a7c72a..00000000000000 --- a/docs/src/icons/duotone-icons/General/Hidden.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / General / Hidden - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Like.svg b/docs/src/icons/duotone-icons/General/Like.svg deleted file mode 100644 index 53898127cc8d4a..00000000000000 --- a/docs/src/icons/duotone-icons/General/Like.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Like - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Lock.svg b/docs/src/icons/duotone-icons/General/Lock.svg deleted file mode 100644 index c64d5dbf5ab64d..00000000000000 --- a/docs/src/icons/duotone-icons/General/Lock.svg +++ /dev/null @@ -1,16 +0,0 @@ - - - - Stockholm-icons / General / Lock - Created with Sketch. - - - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Notification#2.svg b/docs/src/icons/duotone-icons/General/Notification#2.svg deleted file mode 100644 index 0b5534d3b8a15a..00000000000000 --- a/docs/src/icons/duotone-icons/General/Notification#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Notification#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Notifications#1.svg b/docs/src/icons/duotone-icons/General/Notifications#1.svg deleted file mode 100644 index c5061171ca6ec6..00000000000000 --- a/docs/src/icons/duotone-icons/General/Notifications#1.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / General / Notifications#1 - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Other#1.svg b/docs/src/icons/duotone-icons/General/Other#1.svg deleted file mode 100644 index d49b2547cc7931..00000000000000 --- a/docs/src/icons/duotone-icons/General/Other#1.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / General / Other#1 - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Other#2.svg b/docs/src/icons/duotone-icons/General/Other#2.svg deleted file mode 100644 index 1eb3d06efdb6fd..00000000000000 --- a/docs/src/icons/duotone-icons/General/Other#2.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / General / Other#2 - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Sad.svg b/docs/src/icons/duotone-icons/General/Sad.svg deleted file mode 100644 index 1a13070fe52b7a..00000000000000 --- a/docs/src/icons/duotone-icons/General/Sad.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Sad - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Save.svg b/docs/src/icons/duotone-icons/General/Save.svg deleted file mode 100644 index ebf726ab77b1b3..00000000000000 --- a/docs/src/icons/duotone-icons/General/Save.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Save - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Scale.svg b/docs/src/icons/duotone-icons/General/Scale.svg deleted file mode 100644 index 79f44c449bf5e7..00000000000000 --- a/docs/src/icons/duotone-icons/General/Scale.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Scale - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Scissors.svg b/docs/src/icons/duotone-icons/General/Scissors.svg deleted file mode 100644 index eb7e1ad192a616..00000000000000 --- a/docs/src/icons/duotone-icons/General/Scissors.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Scissors - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Search.svg b/docs/src/icons/duotone-icons/General/Search.svg deleted file mode 100644 index ea5a3206b9bfb5..00000000000000 --- a/docs/src/icons/duotone-icons/General/Search.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Search - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Settings#3.svg b/docs/src/icons/duotone-icons/General/Settings#3.svg deleted file mode 100644 index a3cfc7a849fdd8..00000000000000 --- a/docs/src/icons/duotone-icons/General/Settings#3.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Settings#3 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Settings-1.inline.svg b/docs/src/icons/duotone-icons/General/Settings-1.inline.svg deleted file mode 100644 index 680f892ad5cc52..00000000000000 --- a/docs/src/icons/duotone-icons/General/Settings-1.inline.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Settings-1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Settings-2.svg b/docs/src/icons/duotone-icons/General/Settings-2.svg deleted file mode 100644 index eac95a6c30a7f7..00000000000000 --- a/docs/src/icons/duotone-icons/General/Settings-2.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / General / Settings-2 - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Shield-check.svg b/docs/src/icons/duotone-icons/General/Shield-check.svg deleted file mode 100644 index 2b499a90e19224..00000000000000 --- a/docs/src/icons/duotone-icons/General/Shield-check.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Shield-check - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Shield-disabled.svg b/docs/src/icons/duotone-icons/General/Shield-disabled.svg deleted file mode 100644 index 3b7606a1ca2840..00000000000000 --- a/docs/src/icons/duotone-icons/General/Shield-disabled.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Shield-disabled - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Shield-protected.svg b/docs/src/icons/duotone-icons/General/Shield-protected.svg deleted file mode 100644 index 60a53f4adcba14..00000000000000 --- a/docs/src/icons/duotone-icons/General/Shield-protected.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Shield-protected - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Size.svg b/docs/src/icons/duotone-icons/General/Size.svg deleted file mode 100644 index 5aae16495963d5..00000000000000 --- a/docs/src/icons/duotone-icons/General/Size.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Size - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Smile.svg b/docs/src/icons/duotone-icons/General/Smile.svg deleted file mode 100644 index 4658752c7a8647..00000000000000 --- a/docs/src/icons/duotone-icons/General/Smile.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Smile - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Star.svg b/docs/src/icons/duotone-icons/General/Star.svg deleted file mode 100644 index f49dee46a13e74..00000000000000 --- a/docs/src/icons/duotone-icons/General/Star.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / General / Star - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Thunder-move.svg b/docs/src/icons/duotone-icons/General/Thunder-move.svg deleted file mode 100644 index 8345b470f5190f..00000000000000 --- a/docs/src/icons/duotone-icons/General/Thunder-move.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Thunder-move - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Thunder.svg b/docs/src/icons/duotone-icons/General/Thunder.svg deleted file mode 100644 index 7ce62b4d70a66d..00000000000000 --- a/docs/src/icons/duotone-icons/General/Thunder.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / General / Thunder - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Trash.svg b/docs/src/icons/duotone-icons/General/Trash.svg deleted file mode 100644 index 456e9c4ec23877..00000000000000 --- a/docs/src/icons/duotone-icons/General/Trash.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Trash - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Unlock.svg b/docs/src/icons/duotone-icons/General/Unlock.svg deleted file mode 100644 index 3acae6d8664a18..00000000000000 --- a/docs/src/icons/duotone-icons/General/Unlock.svg +++ /dev/null @@ -1,16 +0,0 @@ - - - - Stockholm-icons / General / Unlock - Created with Sketch. - - - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Update.svg b/docs/src/icons/duotone-icons/General/Update.svg deleted file mode 100644 index 51ed8657772d9d..00000000000000 --- a/docs/src/icons/duotone-icons/General/Update.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / General / Update - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/User.svg b/docs/src/icons/duotone-icons/General/User.svg deleted file mode 100644 index 0f94533bbf93bc..00000000000000 --- a/docs/src/icons/duotone-icons/General/User.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / User - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/General/Visible.svg b/docs/src/icons/duotone-icons/General/Visible.svg deleted file mode 100644 index 9718a63a6621f9..00000000000000 --- a/docs/src/icons/duotone-icons/General/Visible.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / General / Visible - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Air-ballon.svg b/docs/src/icons/duotone-icons/Home/Air-ballon.svg deleted file mode 100644 index 778cd19f452901..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Air-ballon.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Air-ballon - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Alarm-clock.svg b/docs/src/icons/duotone-icons/Home/Alarm-clock.svg deleted file mode 100644 index f853284f089958..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Alarm-clock.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Home / Alarm-clock - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Armchair.svg b/docs/src/icons/duotone-icons/Home/Armchair.svg deleted file mode 100644 index 490ff58ed114dd..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Armchair.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Armchair - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Bag-chair.svg b/docs/src/icons/duotone-icons/Home/Bag-chair.svg deleted file mode 100644 index 5f55b1bc71c8b6..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Bag-chair.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Bag-chair - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Bath.svg b/docs/src/icons/duotone-icons/Home/Bath.svg deleted file mode 100644 index e90c78c809e0c9..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Bath.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Bath - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Bed.svg b/docs/src/icons/duotone-icons/Home/Bed.svg deleted file mode 100644 index 9c19e25b575378..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Bed.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Home / Bed - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Book-open.svg b/docs/src/icons/duotone-icons/Home/Book-open.svg deleted file mode 100644 index 051692b4eb31e5..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Book-open.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Book-open - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Book.svg b/docs/src/icons/duotone-icons/Home/Book.svg deleted file mode 100644 index 6d4eb8056de274..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Book.svg +++ /dev/null @@ -1,16 +0,0 @@ - - - - Stockholm-icons / Home / Book - Created with Sketch. - - - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Box.svg b/docs/src/icons/duotone-icons/Home/Box.svg deleted file mode 100644 index ca0bc3d36bbbd2..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Box.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Box - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Broom.svg b/docs/src/icons/duotone-icons/Home/Broom.svg deleted file mode 100644 index 8e91173477a2a0..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Broom.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Broom - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Building.svg b/docs/src/icons/duotone-icons/Home/Building.svg deleted file mode 100644 index 3fdea9e6c4a9c7..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Building.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Home / Building - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Bulb#1.svg b/docs/src/icons/duotone-icons/Home/Bulb#1.svg deleted file mode 100644 index 445c43f9bf2b1e..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Bulb#1.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Home / Bulb#1 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Bulb#2.svg b/docs/src/icons/duotone-icons/Home/Bulb#2.svg deleted file mode 100644 index 04895a06091a23..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Bulb#2.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Home / Bulb#2 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Chair#1.svg b/docs/src/icons/duotone-icons/Home/Chair#1.svg deleted file mode 100644 index ba31844e693d93..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Chair#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Chair#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Chair#2.svg b/docs/src/icons/duotone-icons/Home/Chair#2.svg deleted file mode 100644 index fe3ae8cabbb226..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Chair#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Chair#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Clock.svg b/docs/src/icons/duotone-icons/Home/Clock.svg deleted file mode 100644 index 961f298fe36b2b..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Clock.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Clock - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Commode#1.svg b/docs/src/icons/duotone-icons/Home/Commode#1.svg deleted file mode 100644 index 346fb2c1f18d94..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Commode#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Сommode#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Commode#2.svg b/docs/src/icons/duotone-icons/Home/Commode#2.svg deleted file mode 100644 index dda84759da6f06..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Commode#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Сommode#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Couch.svg b/docs/src/icons/duotone-icons/Home/Couch.svg deleted file mode 100644 index d5a050874761fa..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Couch.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Couch - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Cupboard.svg b/docs/src/icons/duotone-icons/Home/Cupboard.svg deleted file mode 100644 index 564a4804dbb142..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Cupboard.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Сupboard - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Curtains.svg b/docs/src/icons/duotone-icons/Home/Curtains.svg deleted file mode 100644 index 51f39fdd520f57..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Curtains.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Сurtains - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Deer.svg b/docs/src/icons/duotone-icons/Home/Deer.svg deleted file mode 100644 index be78e7a812de8f..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Deer.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Deer - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Door-open.svg b/docs/src/icons/duotone-icons/Home/Door-open.svg deleted file mode 100644 index 68c2642c1e8f2c..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Door-open.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Door-open - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Earth.svg b/docs/src/icons/duotone-icons/Home/Earth.svg deleted file mode 100644 index 9e8f3d295b514d..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Earth.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Earth - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Fireplace.svg b/docs/src/icons/duotone-icons/Home/Fireplace.svg deleted file mode 100644 index f9ad684811fba3..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Fireplace.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Fireplace - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Flashlight.svg b/docs/src/icons/duotone-icons/Home/Flashlight.svg deleted file mode 100644 index 11f7dbdbc844c1..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Flashlight.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Flashlight - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Flower#1.svg b/docs/src/icons/duotone-icons/Home/Flower#1.svg deleted file mode 100644 index 0fa9df0334f310..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Flower#1.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Home / Flower#1 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Flower#2.svg b/docs/src/icons/duotone-icons/Home/Flower#2.svg deleted file mode 100644 index b813b73dd39829..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Flower#2.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Stockholm-icons / Home / Flower#2 - Created with Sketch. - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Flower#3.svg b/docs/src/icons/duotone-icons/Home/Flower#3.svg deleted file mode 100644 index 2be4f87d8158a7..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Flower#3.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Home / Flower#3 - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Globe.svg b/docs/src/icons/duotone-icons/Home/Globe.svg deleted file mode 100644 index 2b65a0925f9686..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Globe.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Globe - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Home-heart.svg b/docs/src/icons/duotone-icons/Home/Home-heart.svg deleted file mode 100644 index c82bde6a12c60c..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Home-heart.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Home-heart - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Home.svg b/docs/src/icons/duotone-icons/Home/Home.svg deleted file mode 100644 index 4fb8d17a672e6f..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Home.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Home / Home - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Key.svg b/docs/src/icons/duotone-icons/Home/Key.svg deleted file mode 100644 index 99b8ebb1eec21f..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Key.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Key - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Ladder.svg b/docs/src/icons/duotone-icons/Home/Ladder.svg deleted file mode 100644 index 4aaf6fa074d727..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Ladder.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Ladder - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Lamp#1.svg b/docs/src/icons/duotone-icons/Home/Lamp#1.svg deleted file mode 100644 index 42338e35531b9d..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Lamp#1.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Home / Lamp#1 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Lamp#2.svg b/docs/src/icons/duotone-icons/Home/Lamp#2.svg deleted file mode 100644 index f837b2c8a3aa17..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Lamp#2.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Home / Lamp#2 - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Library.svg b/docs/src/icons/duotone-icons/Home/Library.svg deleted file mode 100644 index 887db6ae97e289..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Library.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Library - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Mailbox.svg b/docs/src/icons/duotone-icons/Home/Mailbox.svg deleted file mode 100644 index c0376e12194f56..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Mailbox.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Mailbox - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Mirror.svg b/docs/src/icons/duotone-icons/Home/Mirror.svg deleted file mode 100644 index fc4a2e83445306..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Mirror.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Mirror - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Picture.svg b/docs/src/icons/duotone-icons/Home/Picture.svg deleted file mode 100644 index 077ff7b7191537..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Picture.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Home / Picture - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Ruller.svg b/docs/src/icons/duotone-icons/Home/Ruller.svg deleted file mode 100644 index f9af2cedc49feb..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Ruller.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Home / Ruller - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Stairs.svg b/docs/src/icons/duotone-icons/Home/Stairs.svg deleted file mode 100644 index 3f215891af8588..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Stairs.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Home / Stairs - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Timer.svg b/docs/src/icons/duotone-icons/Home/Timer.svg deleted file mode 100644 index bc461bc714a5c7..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Timer.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Home / Timer - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Toilet.svg b/docs/src/icons/duotone-icons/Home/Toilet.svg deleted file mode 100644 index 4c54e2408c988a..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Toilet.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Toilet - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Towel.svg b/docs/src/icons/duotone-icons/Home/Towel.svg deleted file mode 100644 index ce10ca4e6daaae..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Towel.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Home / Towel - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Trash.svg b/docs/src/icons/duotone-icons/Home/Trash.svg deleted file mode 100644 index 7e734d8f1d3a07..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Trash.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Trash - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Water-mixer.svg b/docs/src/icons/duotone-icons/Home/Water-mixer.svg deleted file mode 100644 index e0f18e1de1edf4..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Water-mixer.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Home / Water-mixer - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Weight#1.svg b/docs/src/icons/duotone-icons/Home/Weight#1.svg deleted file mode 100644 index c404c3fe5e06c9..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Weight#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Weight#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Weight#2.svg b/docs/src/icons/duotone-icons/Home/Weight#2.svg deleted file mode 100644 index 2db7ca08165bac..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Weight#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Weight#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Wood#1.svg b/docs/src/icons/duotone-icons/Home/Wood#1.svg deleted file mode 100644 index f24924d6ca3c8f..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Wood#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Wood#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Wood#2.svg b/docs/src/icons/duotone-icons/Home/Wood#2.svg deleted file mode 100644 index 847ea3178e3298..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Wood#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Home / Wood#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Home/Wood-horse.svg b/docs/src/icons/duotone-icons/Home/Wood-horse.svg deleted file mode 100644 index 3c224e0a473bed..00000000000000 --- a/docs/src/icons/duotone-icons/Home/Wood-horse.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Home / Wood-horse - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Layout/Layout-3d.svg b/docs/src/icons/duotone-icons/Layout/Layout-3d.svg deleted file mode 100644 index 6a2b716c093892..00000000000000 --- a/docs/src/icons/duotone-icons/Layout/Layout-3d.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Layout / Layout-3d - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Layout/Layout-4-blocks.svg b/docs/src/icons/duotone-icons/Layout/Layout-4-blocks.svg deleted file mode 100644 index 406da3f25695e4..00000000000000 --- a/docs/src/icons/duotone-icons/Layout/Layout-4-blocks.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Layout / Layout-4-blocks - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Layout/Layout-arrange.inline.svg b/docs/src/icons/duotone-icons/Layout/Layout-arrange.inline.svg deleted file mode 100644 index 1eacead343dde0..00000000000000 --- a/docs/src/icons/duotone-icons/Layout/Layout-arrange.inline.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Layout / Layout-arrange - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Layout/Layout-grid.svg b/docs/src/icons/duotone-icons/Layout/Layout-grid.svg deleted file mode 100644 index 45c0712c8b091e..00000000000000 --- a/docs/src/icons/duotone-icons/Layout/Layout-grid.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Layout / Layout-grid - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Layout/Layout-horizontal.svg b/docs/src/icons/duotone-icons/Layout/Layout-horizontal.svg deleted file mode 100644 index 82c3dfcd265e82..00000000000000 --- a/docs/src/icons/duotone-icons/Layout/Layout-horizontal.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Layout / Layout-horizontal - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Layout/Layout-left-panel-1.svg b/docs/src/icons/duotone-icons/Layout/Layout-left-panel-1.svg deleted file mode 100644 index 2ef4d65265f031..00000000000000 --- a/docs/src/icons/duotone-icons/Layout/Layout-left-panel-1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Layout / Layout-left-panel-1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Layout/Layout-left-panel-2.svg b/docs/src/icons/duotone-icons/Layout/Layout-left-panel-2.svg deleted file mode 100644 index 0cd4cea44c828e..00000000000000 --- a/docs/src/icons/duotone-icons/Layout/Layout-left-panel-2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Layout / Layout-left-panel-2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Layout/Layout-right-panel-1.svg b/docs/src/icons/duotone-icons/Layout/Layout-right-panel-1.svg deleted file mode 100644 index ba590aace4444d..00000000000000 --- a/docs/src/icons/duotone-icons/Layout/Layout-right-panel-1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Layout / Layout-right-panel-1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Layout/Layout-right-panel-2.svg b/docs/src/icons/duotone-icons/Layout/Layout-right-panel-2.svg deleted file mode 100644 index b126046864c9c5..00000000000000 --- a/docs/src/icons/duotone-icons/Layout/Layout-right-panel-2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Layout / Layout-right-panel-2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Layout/Layout-top-panel-1.svg b/docs/src/icons/duotone-icons/Layout/Layout-top-panel-1.svg deleted file mode 100644 index 0c069849a7f05d..00000000000000 --- a/docs/src/icons/duotone-icons/Layout/Layout-top-panel-1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Layout / Layout-top-panel-1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Layout/Layout-top-panel-2.svg b/docs/src/icons/duotone-icons/Layout/Layout-top-panel-2.svg deleted file mode 100644 index a455a7441ea579..00000000000000 --- a/docs/src/icons/duotone-icons/Layout/Layout-top-panel-2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Layout / Layout-top-panel-2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Layout/Layout-top-panel-3.svg b/docs/src/icons/duotone-icons/Layout/Layout-top-panel-3.svg deleted file mode 100644 index 5238d4a1bbef2f..00000000000000 --- a/docs/src/icons/duotone-icons/Layout/Layout-top-panel-3.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Layout / Layout-top-panel-3 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Layout/Layout-top-panel-4.svg b/docs/src/icons/duotone-icons/Layout/Layout-top-panel-4.svg deleted file mode 100644 index 686de9be48fd5d..00000000000000 --- a/docs/src/icons/duotone-icons/Layout/Layout-top-panel-4.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Layout / Layout-top-panel-4 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Layout/Layout-top-panel-5.svg b/docs/src/icons/duotone-icons/Layout/Layout-top-panel-5.svg deleted file mode 100644 index 0d28c0311deacc..00000000000000 --- a/docs/src/icons/duotone-icons/Layout/Layout-top-panel-5.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Layout / Layout-top-panel-5 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Layout/Layout-top-panel-6.svg b/docs/src/icons/duotone-icons/Layout/Layout-top-panel-6.svg deleted file mode 100644 index b4871cc60cf424..00000000000000 --- a/docs/src/icons/duotone-icons/Layout/Layout-top-panel-6.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Layout / Layout-top-panel-6 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Layout/Layout-vertical.svg b/docs/src/icons/duotone-icons/Layout/Layout-vertical.svg deleted file mode 100644 index bcd4fa57eb7b35..00000000000000 --- a/docs/src/icons/duotone-icons/Layout/Layout-vertical.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Layout / Layout-vertical - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Map/Compass.svg b/docs/src/icons/duotone-icons/Map/Compass.svg deleted file mode 100644 index ee9f79bc39b776..00000000000000 --- a/docs/src/icons/duotone-icons/Map/Compass.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Map / Compass - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Map/Direction#1.svg b/docs/src/icons/duotone-icons/Map/Direction#1.svg deleted file mode 100644 index 8f839e96750d58..00000000000000 --- a/docs/src/icons/duotone-icons/Map/Direction#1.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Map / Direction#1 - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Map/Direction#2.svg b/docs/src/icons/duotone-icons/Map/Direction#2.svg deleted file mode 100644 index f7251b792f416d..00000000000000 --- a/docs/src/icons/duotone-icons/Map/Direction#2.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Map / Direction#2 - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Map/Location-arrow.svg b/docs/src/icons/duotone-icons/Map/Location-arrow.svg deleted file mode 100644 index 224d9bb7b0489b..00000000000000 --- a/docs/src/icons/duotone-icons/Map/Location-arrow.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Map / Location-arrow - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Map/Marker#1.svg b/docs/src/icons/duotone-icons/Map/Marker#1.svg deleted file mode 100644 index f368efc00b5440..00000000000000 --- a/docs/src/icons/duotone-icons/Map/Marker#1.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Map / Marker#1 - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Map/Marker#2.svg b/docs/src/icons/duotone-icons/Map/Marker#2.svg deleted file mode 100644 index 8ed1508526b577..00000000000000 --- a/docs/src/icons/duotone-icons/Map/Marker#2.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Map / Marker#2 - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Map/Position.svg b/docs/src/icons/duotone-icons/Map/Position.svg deleted file mode 100644 index 09e68bcc0a69ce..00000000000000 --- a/docs/src/icons/duotone-icons/Map/Position.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Map / Position - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Add-music.svg b/docs/src/icons/duotone-icons/Media/Add-music.svg deleted file mode 100644 index 4dc5e08f1ec160..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Add-music.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Add-music - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Airplay-video.svg b/docs/src/icons/duotone-icons/Media/Airplay-video.svg deleted file mode 100644 index 26a73c2cc72582..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Airplay-video.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Airplay-video - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Airplay.svg b/docs/src/icons/duotone-icons/Media/Airplay.svg deleted file mode 100644 index aabc77ec21450e..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Airplay.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Airplay - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Back.svg b/docs/src/icons/duotone-icons/Media/Back.svg deleted file mode 100644 index 00b5c51ff71ff2..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Back.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Back - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Backward.svg b/docs/src/icons/duotone-icons/Media/Backward.svg deleted file mode 100644 index 59c752ed32a2b2..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Backward.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Backward - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/CD.svg b/docs/src/icons/duotone-icons/Media/CD.svg deleted file mode 100644 index 57a49fef7c3931..00000000000000 --- a/docs/src/icons/duotone-icons/Media/CD.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / CD - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/DVD.svg b/docs/src/icons/duotone-icons/Media/DVD.svg deleted file mode 100644 index 32da44a2ef8041..00000000000000 --- a/docs/src/icons/duotone-icons/Media/DVD.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / DVD - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Eject.svg b/docs/src/icons/duotone-icons/Media/Eject.svg deleted file mode 100644 index 08abe08779b1f8..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Eject.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Eject - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Equalizer.svg b/docs/src/icons/duotone-icons/Media/Equalizer.svg deleted file mode 100644 index 2c24d6047a9ffc..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Equalizer.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Media / Equalizer - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Forward.svg b/docs/src/icons/duotone-icons/Media/Forward.svg deleted file mode 100644 index 256e889f848b03..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Forward.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Forward - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Media-library#1.svg b/docs/src/icons/duotone-icons/Media/Media-library#1.svg deleted file mode 100644 index e5a48aae4c0880..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Media-library#1.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Media / Media-library#1 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Media-library#2.svg b/docs/src/icons/duotone-icons/Media/Media-library#2.svg deleted file mode 100644 index d71bdc287fb53a..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Media-library#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Media-library#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Media-library#3.svg b/docs/src/icons/duotone-icons/Media/Media-library#3.svg deleted file mode 100644 index d4a452967fa562..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Media-library#3.svg +++ /dev/null @@ -1,17 +0,0 @@ - - - - Stockholm-icons / Media / Media-library#3 - Created with Sketch. - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Movie-Lane #2.svg b/docs/src/icons/duotone-icons/Media/Movie-Lane #2.svg deleted file mode 100644 index 9537c2071ed97c..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Movie-Lane #2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Movie-Lane #2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Movie-lane#1.svg b/docs/src/icons/duotone-icons/Media/Movie-lane#1.svg deleted file mode 100644 index 95f0fda08b9a58..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Movie-lane#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Movie-lane#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Music-cloud.svg b/docs/src/icons/duotone-icons/Media/Music-cloud.svg deleted file mode 100644 index a9d4208101e05b..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Music-cloud.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Music-cloud - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Music-note.svg b/docs/src/icons/duotone-icons/Media/Music-note.svg deleted file mode 100644 index be5a690d4dd80d..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Music-note.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Media / Music-note - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Music.svg b/docs/src/icons/duotone-icons/Media/Music.svg deleted file mode 100644 index 7caad53243f9a8..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Music.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Media / Music - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Mute.svg b/docs/src/icons/duotone-icons/Media/Mute.svg deleted file mode 100644 index 2f466bdedc6f26..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Mute.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Mute - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Next.svg b/docs/src/icons/duotone-icons/Media/Next.svg deleted file mode 100644 index 1dfc7298266628..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Next.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Next - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Pause.svg b/docs/src/icons/duotone-icons/Media/Pause.svg deleted file mode 100644 index bc5cddde37cef9..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Pause.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Media / Pause - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Play.svg b/docs/src/icons/duotone-icons/Media/Play.svg deleted file mode 100644 index 5d8b36aee17933..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Play.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Media / Play - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Playlist#1.svg b/docs/src/icons/duotone-icons/Media/Playlist#1.svg deleted file mode 100644 index 3ff7f91cfbb534..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Playlist#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Playlist#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Playlist#2.svg b/docs/src/icons/duotone-icons/Media/Playlist#2.svg deleted file mode 100644 index 1f08cb98403103..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Playlist#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Playlist#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Rec.svg b/docs/src/icons/duotone-icons/Media/Rec.svg deleted file mode 100644 index 6fe7854d68dd91..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Rec.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Media / Rec - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Repeat-one.svg b/docs/src/icons/duotone-icons/Media/Repeat-one.svg deleted file mode 100644 index ba35fb39df29a0..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Repeat-one.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Repeat-one - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Repeat.svg b/docs/src/icons/duotone-icons/Media/Repeat.svg deleted file mode 100644 index fd633af8698c0e..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Repeat.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Repeat - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Shuffle.svg b/docs/src/icons/duotone-icons/Media/Shuffle.svg deleted file mode 100644 index 91cb42b2360896..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Shuffle.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Shuffle - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Volume-down.svg b/docs/src/icons/duotone-icons/Media/Volume-down.svg deleted file mode 100644 index a6c3dc0976c1fa..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Volume-down.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Volume-down - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Volume-full.svg b/docs/src/icons/duotone-icons/Media/Volume-full.svg deleted file mode 100644 index 809aea1797f721..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Volume-full.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Volume-full - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Volume-half.svg b/docs/src/icons/duotone-icons/Media/Volume-half.svg deleted file mode 100644 index 50645d59433057..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Volume-half.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Volume-half - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Volume-up.svg b/docs/src/icons/duotone-icons/Media/Volume-up.svg deleted file mode 100644 index a9d13b3d784022..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Volume-up.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Volume-up - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Vynil.svg b/docs/src/icons/duotone-icons/Media/Vynil.svg deleted file mode 100644 index 26e06553a2d05e..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Vynil.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Vynil - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Media/Youtube.svg b/docs/src/icons/duotone-icons/Media/Youtube.svg deleted file mode 100644 index c410cfd6e3521e..00000000000000 --- a/docs/src/icons/duotone-icons/Media/Youtube.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Media / Youtube - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Angle-double-down.svg b/docs/src/icons/duotone-icons/Navigation/Angle-double-down.svg deleted file mode 100644 index 31bc6bc7725f27..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Angle-double-down.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Angle-double-down - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Angle-double-left.svg b/docs/src/icons/duotone-icons/Navigation/Angle-double-left.svg deleted file mode 100644 index 3930a6e17fe3c2..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Angle-double-left.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Angle-double-left - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Angle-double-right.svg b/docs/src/icons/duotone-icons/Navigation/Angle-double-right.svg deleted file mode 100644 index 746d7190c412e5..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Angle-double-right.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Angle-double-right - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Angle-double-up.svg b/docs/src/icons/duotone-icons/Navigation/Angle-double-up.svg deleted file mode 100644 index e3aa7119b2a47d..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Angle-double-up.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Angle-double-up - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Angle-down.svg b/docs/src/icons/duotone-icons/Navigation/Angle-down.svg deleted file mode 100644 index 312bbd30bc8cf0..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Angle-down.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Navigation / Angle-down - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Angle-left.svg b/docs/src/icons/duotone-icons/Navigation/Angle-left.svg deleted file mode 100644 index d79cffa3ab917d..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Angle-left.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Navigation / Angle-left - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Angle-right.svg b/docs/src/icons/duotone-icons/Navigation/Angle-right.svg deleted file mode 100644 index c9bc069f48b92c..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Angle-right.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Navigation / Angle-right - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Angle-up.svg b/docs/src/icons/duotone-icons/Navigation/Angle-up.svg deleted file mode 100644 index 08e9f4cb4081f8..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Angle-up.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Navigation / Stockholm-icons / Navigation / Angle-up - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Arrow-down.svg b/docs/src/icons/duotone-icons/Navigation/Arrow-down.svg deleted file mode 100644 index 919686a9f712f8..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Arrow-down.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Arrow-down - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Arrow-from-bottom.svg b/docs/src/icons/duotone-icons/Navigation/Arrow-from-bottom.svg deleted file mode 100644 index 333e0ea979de00..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Arrow-from-bottom.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Navigation / Arrow-from-bottom - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Arrow-from-left.svg b/docs/src/icons/duotone-icons/Navigation/Arrow-from-left.svg deleted file mode 100644 index e5f3ccb181a5e4..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Arrow-from-left.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Navigation / Arrow-from-left - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Arrow-from-right.svg b/docs/src/icons/duotone-icons/Navigation/Arrow-from-right.svg deleted file mode 100644 index 681214c192fb57..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Arrow-from-right.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Navigation / Arrow-from-right - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Arrow-from-top.svg b/docs/src/icons/duotone-icons/Navigation/Arrow-from-top.svg deleted file mode 100644 index 0ff46ad767eb68..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Arrow-from-top.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Navigation / Arrow-from-top - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Arrow-left.svg b/docs/src/icons/duotone-icons/Navigation/Arrow-left.svg deleted file mode 100644 index 3f6355b8d08740..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Arrow-left.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Arrow-left - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Arrow-right.svg b/docs/src/icons/duotone-icons/Navigation/Arrow-right.svg deleted file mode 100644 index bba87e67d57d04..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Arrow-right.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Arrow-right - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Arrow-to-bottom.svg b/docs/src/icons/duotone-icons/Navigation/Arrow-to-bottom.svg deleted file mode 100644 index 76f58333f8bb95..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Arrow-to-bottom.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Navigation / Arrow-to-bottom - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Arrow-to-left.svg b/docs/src/icons/duotone-icons/Navigation/Arrow-to-left.svg deleted file mode 100644 index f67a0142fdd76e..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Arrow-to-left.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Navigation / Arrow-to-left - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Arrow-to-right.svg b/docs/src/icons/duotone-icons/Navigation/Arrow-to-right.svg deleted file mode 100644 index 69751e5c9f837c..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Arrow-to-right.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Navigation / Arrow-to-right - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Arrow-to-up.svg b/docs/src/icons/duotone-icons/Navigation/Arrow-to-up.svg deleted file mode 100644 index 17fa9377398822..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Arrow-to-up.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Navigation / Arrow-to-up - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Arrow-up.svg b/docs/src/icons/duotone-icons/Navigation/Arrow-up.svg deleted file mode 100644 index f8e955e7d68fe2..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Arrow-up.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Arrow-up - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Arrows-h.svg b/docs/src/icons/duotone-icons/Navigation/Arrows-h.svg deleted file mode 100644 index cf92b4f291e35e..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Arrows-h.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Navigation / Arrows-h - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Arrows-v.svg b/docs/src/icons/duotone-icons/Navigation/Arrows-v.svg deleted file mode 100644 index 1b663d46d54a74..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Arrows-v.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Navigation / Arrows-v - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Check.svg b/docs/src/icons/duotone-icons/Navigation/Check.svg deleted file mode 100644 index 57ea7ee42081e9..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Check.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Navigation / Check - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Close.svg b/docs/src/icons/duotone-icons/Navigation/Close.svg deleted file mode 100644 index 314f3c79c69a5f..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Close.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Navigation / Close - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Double-check.svg b/docs/src/icons/duotone-icons/Navigation/Double-check.svg deleted file mode 100644 index 618e019a226cd4..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Double-check.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Double-check - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Down-2.svg b/docs/src/icons/duotone-icons/Navigation/Down-2.svg deleted file mode 100644 index 10d01c5dafc0ed..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Down-2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Down-2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Down-left.svg b/docs/src/icons/duotone-icons/Navigation/Down-left.svg deleted file mode 100644 index 9dad4229c32660..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Down-left.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Down-left - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Down-right.svg b/docs/src/icons/duotone-icons/Navigation/Down-right.svg deleted file mode 100644 index 1da82d8fb148c3..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Down-right.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Down-right - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Exchange.svg b/docs/src/icons/duotone-icons/Navigation/Exchange.svg deleted file mode 100644 index 915ece6640f603..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Exchange.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Navigation / Exchange - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Left 3.svg b/docs/src/icons/duotone-icons/Navigation/Left 3.svg deleted file mode 100644 index 3b2e3f3c5c6eab..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Left 3.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Left 3 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Left-2.svg b/docs/src/icons/duotone-icons/Navigation/Left-2.svg deleted file mode 100644 index 899e3e4b756b4d..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Left-2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Left-2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Minus.svg b/docs/src/icons/duotone-icons/Navigation/Minus.svg deleted file mode 100644 index f75d0cda744685..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Minus.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - Stockholm-icons / Navigation / Minus - Created with Sketch. - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Plus.svg b/docs/src/icons/duotone-icons/Navigation/Plus.svg deleted file mode 100644 index c2536bca7d4897..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Plus.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Navigation / Plus - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Right 3.svg b/docs/src/icons/duotone-icons/Navigation/Right 3.svg deleted file mode 100644 index 785e2dc7f2d373..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Right 3.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Right 3 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Right-2.svg b/docs/src/icons/duotone-icons/Navigation/Right-2.svg deleted file mode 100644 index 27b7fc50262ff0..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Right-2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Right-2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Route.svg b/docs/src/icons/duotone-icons/Navigation/Route.svg deleted file mode 100644 index f422e3beb26b0f..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Route.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Navigation / Route - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Sign-in.svg b/docs/src/icons/duotone-icons/Navigation/Sign-in.svg deleted file mode 100644 index c28eeea51fdbd9..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Sign-in.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Navigation / Sign-in - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Sign-out.svg b/docs/src/icons/duotone-icons/Navigation/Sign-out.svg deleted file mode 100644 index dffc73f7e82449..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Sign-out.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Navigation / Sign-out - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Up-2.svg b/docs/src/icons/duotone-icons/Navigation/Up-2.svg deleted file mode 100644 index 64320af30a8379..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Up-2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Up-2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Up-down.svg b/docs/src/icons/duotone-icons/Navigation/Up-down.svg deleted file mode 100644 index 90d7612500a379..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Up-down.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Navigation / Up-down - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Up-left.svg b/docs/src/icons/duotone-icons/Navigation/Up-left.svg deleted file mode 100644 index ce5e60ebe7899d..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Up-left.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Up-left - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Up-right.svg b/docs/src/icons/duotone-icons/Navigation/Up-right.svg deleted file mode 100644 index 551fc784fb00a6..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Up-right.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Navigation / Up-right - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Navigation/Waiting.svg b/docs/src/icons/duotone-icons/Navigation/Waiting.svg deleted file mode 100644 index b89ed85ed79c4f..00000000000000 --- a/docs/src/icons/duotone-icons/Navigation/Waiting.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Navigation / Waiting - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/ATM.svg b/docs/src/icons/duotone-icons/Shopping/ATM.svg deleted file mode 100644 index 17d8ed9a2015ee..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/ATM.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / ATM - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Bag#1.svg b/docs/src/icons/duotone-icons/Shopping/Bag#1.svg deleted file mode 100644 index 8b5c65a5e2980d..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Bag#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Bag#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Bag#2.svg b/docs/src/icons/duotone-icons/Shopping/Bag#2.svg deleted file mode 100644 index b80ac67bfdcd1a..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Bag#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Bag#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Barcode-read.svg b/docs/src/icons/duotone-icons/Shopping/Barcode-read.svg deleted file mode 100644 index 859526569f53ff..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Barcode-read.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Barcode-read - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Barcode-scan.svg b/docs/src/icons/duotone-icons/Shopping/Barcode-scan.svg deleted file mode 100644 index 0e8ae00c3e89c9..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Barcode-scan.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Shopping / Barcode-scan - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Barcode.svg b/docs/src/icons/duotone-icons/Shopping/Barcode.svg deleted file mode 100644 index 2a6b7ef4442a0d..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Barcode.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Barcode - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Bitcoin.svg b/docs/src/icons/duotone-icons/Shopping/Bitcoin.svg deleted file mode 100644 index 416c26f54f6e76..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Bitcoin.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Shopping / Bitcoin - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Box#1.svg b/docs/src/icons/duotone-icons/Shopping/Box#1.svg deleted file mode 100644 index bc4e16629d2f03..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Box#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Box#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Box#3.svg b/docs/src/icons/duotone-icons/Shopping/Box#3.svg deleted file mode 100644 index 94e22e544bc0b5..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Box#3.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Box#3 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Box2.inline.svg b/docs/src/icons/duotone-icons/Shopping/Box2.inline.svg deleted file mode 100644 index 194d791c691be5..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Box2.inline.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Box#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Calculator.svg b/docs/src/icons/duotone-icons/Shopping/Calculator.svg deleted file mode 100644 index ff579de5488adc..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Calculator.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Calculator - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Cart#1.svg b/docs/src/icons/duotone-icons/Shopping/Cart#1.svg deleted file mode 100644 index 98dcc61f75722b..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Cart#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Cart#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Cart#2.svg b/docs/src/icons/duotone-icons/Shopping/Cart#2.svg deleted file mode 100644 index f7c6b10071fd07..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Cart#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Cart#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Cart#3.svg b/docs/src/icons/duotone-icons/Shopping/Cart#3.svg deleted file mode 100644 index 7dc98999fa41af..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Cart#3.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Cart#3 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Chart-bar#1.svg b/docs/src/icons/duotone-icons/Shopping/Chart-bar#1.svg deleted file mode 100644 index c7953345c33162..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Chart-bar#1.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Shopping / Chart-bar#1 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Chart-bar#2.svg b/docs/src/icons/duotone-icons/Shopping/Chart-bar#2.svg deleted file mode 100644 index 3c39a20b38e475..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Chart-bar#2.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Shopping / Chart-bar#2 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Chart-bar#3.svg b/docs/src/icons/duotone-icons/Shopping/Chart-bar#3.svg deleted file mode 100644 index 1ae39c08f712be..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Chart-bar#3.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Shopping / Chart-bar#3 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Chart-line#1.svg b/docs/src/icons/duotone-icons/Shopping/Chart-line#1.svg deleted file mode 100644 index a1270123ff2be3..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Chart-line#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Chart-line#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Chart-line#2.svg b/docs/src/icons/duotone-icons/Shopping/Chart-line#2.svg deleted file mode 100644 index 5ce8692cf2028e..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Chart-line#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Chart-line#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Chart-pie.svg b/docs/src/icons/duotone-icons/Shopping/Chart-pie.svg deleted file mode 100644 index 7d64617a77f49f..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Chart-pie.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Chart-pie - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Credit-card.svg b/docs/src/icons/duotone-icons/Shopping/Credit-card.svg deleted file mode 100644 index d03233133bae33..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Credit-card.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Shopping / Credit-card - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Dollar.svg b/docs/src/icons/duotone-icons/Shopping/Dollar.svg deleted file mode 100644 index fbec5a6ea2c402..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Dollar.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Shopping / Dollar - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Euro.svg b/docs/src/icons/duotone-icons/Shopping/Euro.svg deleted file mode 100644 index 12342c26adaa51..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Euro.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Euro - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Gift.svg b/docs/src/icons/duotone-icons/Shopping/Gift.svg deleted file mode 100644 index dd738e7d8d456f..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Gift.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Gift - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Loader.svg b/docs/src/icons/duotone-icons/Shopping/Loader.svg deleted file mode 100644 index d8b4f38d3c9d5c..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Loader.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Shopping / Loader - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/MC.svg b/docs/src/icons/duotone-icons/Shopping/MC.svg deleted file mode 100644 index 52427364bc459a..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/MC.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / MC - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Money.svg b/docs/src/icons/duotone-icons/Shopping/Money.svg deleted file mode 100644 index 067f79763c0345..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Money.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Money - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Pound.svg b/docs/src/icons/duotone-icons/Shopping/Pound.svg deleted file mode 100644 index 2e58b0c653c34e..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Pound.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Pound - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Price #1.svg b/docs/src/icons/duotone-icons/Shopping/Price #1.svg deleted file mode 100644 index b03905f8af62f4..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Price #1.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Shopping / Price #1 - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Price #2.svg b/docs/src/icons/duotone-icons/Shopping/Price #2.svg deleted file mode 100644 index f6718b67f98cb7..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Price #2.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Stockholm-icons / Shopping / Price #2 - Created with Sketch. - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Rouble.svg b/docs/src/icons/duotone-icons/Shopping/Rouble.svg deleted file mode 100644 index 202670fec44715..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Rouble.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Rouble - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Safe.svg b/docs/src/icons/duotone-icons/Shopping/Safe.svg deleted file mode 100644 index 6ccf9983efae47..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Safe.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Safe - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Sale#1.svg b/docs/src/icons/duotone-icons/Shopping/Sale#1.svg deleted file mode 100644 index cdf7418eee801a..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Sale#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Sale#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Sale#2.svg b/docs/src/icons/duotone-icons/Shopping/Sale#2.svg deleted file mode 100644 index 0f6a35b5ae7d9a..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Sale#2.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - Stockholm-icons / Shopping / Sale#2 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Settings.svg b/docs/src/icons/duotone-icons/Shopping/Settings.svg deleted file mode 100644 index 2f36fbda2f5805..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Settings.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Settings - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Sort#1.svg b/docs/src/icons/duotone-icons/Shopping/Sort#1.svg deleted file mode 100644 index f3a6fe4448727c..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Sort#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Sort#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Sort#2.svg b/docs/src/icons/duotone-icons/Shopping/Sort#2.svg deleted file mode 100644 index fa91bafde482e9..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Sort#2.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Shopping / Sort#2 - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Sort#3.svg b/docs/src/icons/duotone-icons/Shopping/Sort#3.svg deleted file mode 100644 index 0a25fdf4ccbaa6..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Sort#3.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Shopping / Sort#3 - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Ticket.svg b/docs/src/icons/duotone-icons/Shopping/Ticket.svg deleted file mode 100644 index baf2904bcf6317..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Ticket.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Shopping / Ticket - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Wallet#2.svg b/docs/src/icons/duotone-icons/Shopping/Wallet#2.svg deleted file mode 100644 index 6d36de8dfc0095..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Wallet#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Shopping / Wallet#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Wallet#3.svg b/docs/src/icons/duotone-icons/Shopping/Wallet#3.svg deleted file mode 100644 index 96c021965f3991..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Wallet#3.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Shopping / Wallet#3 - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Shopping/Wallet.svg b/docs/src/icons/duotone-icons/Shopping/Wallet.svg deleted file mode 100644 index 7521cad8db619a..00000000000000 --- a/docs/src/icons/duotone-icons/Shopping/Wallet.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Shopping / Wallet - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Align-auto.svg b/docs/src/icons/duotone-icons/Text/Align-auto.svg deleted file mode 100644 index d1b50d0b2d29a6..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Align-auto.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Text / Align-auto - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Align-center.svg b/docs/src/icons/duotone-icons/Text/Align-center.svg deleted file mode 100644 index 29f3df016d42e1..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Align-center.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Text / Align-center - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Align-justify.svg b/docs/src/icons/duotone-icons/Text/Align-justify.svg deleted file mode 100644 index 920739f13a768b..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Align-justify.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Text / Align-justify - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Align-left.svg b/docs/src/icons/duotone-icons/Text/Align-left.svg deleted file mode 100644 index 636f9d705a248b..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Align-left.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Text / Align-left - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Align-right.svg b/docs/src/icons/duotone-icons/Text/Align-right.svg deleted file mode 100644 index b413f05a5d15c2..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Align-right.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Text / Align-right - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Article.svg b/docs/src/icons/duotone-icons/Text/Article.svg deleted file mode 100644 index e9ab9b7dacd814..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Article.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Text / Article - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Bold.svg b/docs/src/icons/duotone-icons/Text/Bold.svg deleted file mode 100644 index 432722e22d2c8b..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Bold.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Text / Bold - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Bullet-list.svg b/docs/src/icons/duotone-icons/Text/Bullet-list.svg deleted file mode 100644 index 8d44c494d03704..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Bullet-list.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Text / Bullet-list - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Code.svg b/docs/src/icons/duotone-icons/Text/Code.svg deleted file mode 100644 index 1b8ef1bdbd369f..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Code.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Text / Code - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Edit-text.svg b/docs/src/icons/duotone-icons/Text/Edit-text.svg deleted file mode 100644 index 1f4a0353cecc87..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Edit-text.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Text / Edit-text - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Filter.svg b/docs/src/icons/duotone-icons/Text/Filter.svg deleted file mode 100644 index 952e47c80ea39e..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Filter.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Text / Filter - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Font.svg b/docs/src/icons/duotone-icons/Text/Font.svg deleted file mode 100644 index 26decb6c78dfce..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Font.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Text / Font - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/H1.svg b/docs/src/icons/duotone-icons/Text/H1.svg deleted file mode 100644 index db9b0453a9f54e..00000000000000 --- a/docs/src/icons/duotone-icons/Text/H1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Text / H1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/H2.svg b/docs/src/icons/duotone-icons/Text/H2.svg deleted file mode 100644 index bc4b3263fe4d4b..00000000000000 --- a/docs/src/icons/duotone-icons/Text/H2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Text / H2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Itallic.svg b/docs/src/icons/duotone-icons/Text/Itallic.svg deleted file mode 100644 index 290852f7ec49c3..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Itallic.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Text / Itallic - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Menu.svg b/docs/src/icons/duotone-icons/Text/Menu.svg deleted file mode 100644 index 86edb0b90e3faa..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Menu.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Text / Menu - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Paragraph.svg b/docs/src/icons/duotone-icons/Text/Paragraph.svg deleted file mode 100644 index 24385211d43730..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Paragraph.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Text / Paragraph - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Quote#1.svg b/docs/src/icons/duotone-icons/Text/Quote#1.svg deleted file mode 100644 index 65c6217d2cbbc4..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Quote#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Text / Quote#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Quote#2.svg b/docs/src/icons/duotone-icons/Text/Quote#2.svg deleted file mode 100644 index 23efc57c5d6026..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Quote#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Text / Quote#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Redo.svg b/docs/src/icons/duotone-icons/Text/Redo.svg deleted file mode 100644 index dd7f98e7c7f803..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Redo.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Text / Redo - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Strikethrough.svg b/docs/src/icons/duotone-icons/Text/Strikethrough.svg deleted file mode 100644 index 421c622c558f66..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Strikethrough.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Text / Strikethrough - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Text-height.svg b/docs/src/icons/duotone-icons/Text/Text-height.svg deleted file mode 100644 index 16bd2badb21b08..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Text-height.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Text / Text-height - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Text-width.svg b/docs/src/icons/duotone-icons/Text/Text-width.svg deleted file mode 100644 index 09ee00b085a679..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Text-width.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Text / Text-width - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Text.svg b/docs/src/icons/duotone-icons/Text/Text.svg deleted file mode 100644 index 54f2574d230a38..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Text.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Text / Text - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Underline.svg b/docs/src/icons/duotone-icons/Text/Underline.svg deleted file mode 100644 index ef85097f026b8a..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Underline.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Text / Underline - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Text/Undo.svg b/docs/src/icons/duotone-icons/Text/Undo.svg deleted file mode 100644 index 80774fdc37928a..00000000000000 --- a/docs/src/icons/duotone-icons/Text/Undo.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Text / Undo - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Tools/Angle Grinder.svg b/docs/src/icons/duotone-icons/Tools/Angle Grinder.svg deleted file mode 100644 index e9c199416acac5..00000000000000 --- a/docs/src/icons/duotone-icons/Tools/Angle Grinder.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Tools / Angle Grinder - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Tools/Axe.svg b/docs/src/icons/duotone-icons/Tools/Axe.svg deleted file mode 100644 index 1de55a4856024e..00000000000000 --- a/docs/src/icons/duotone-icons/Tools/Axe.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Tools / Axe - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Tools/Brush.svg b/docs/src/icons/duotone-icons/Tools/Brush.svg deleted file mode 100644 index 551ee29fe2943a..00000000000000 --- a/docs/src/icons/duotone-icons/Tools/Brush.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Tools / Brush - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Tools/Compass.svg b/docs/src/icons/duotone-icons/Tools/Compass.svg deleted file mode 100644 index 91173b29a485c7..00000000000000 --- a/docs/src/icons/duotone-icons/Tools/Compass.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Tools / Compass - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Tools/Hummer#2.svg b/docs/src/icons/duotone-icons/Tools/Hummer#2.svg deleted file mode 100644 index fc672f3f1819a7..00000000000000 --- a/docs/src/icons/duotone-icons/Tools/Hummer#2.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Tools / Hummer#2 - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Tools/Hummer.svg b/docs/src/icons/duotone-icons/Tools/Hummer.svg deleted file mode 100644 index 27c3f313fd3943..00000000000000 --- a/docs/src/icons/duotone-icons/Tools/Hummer.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Tools / Hummer - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Tools/Pantone.svg b/docs/src/icons/duotone-icons/Tools/Pantone.svg deleted file mode 100644 index 3a595019117d70..00000000000000 --- a/docs/src/icons/duotone-icons/Tools/Pantone.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Tools / Pantone - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Tools/Road-Cone.svg b/docs/src/icons/duotone-icons/Tools/Road-Cone.svg deleted file mode 100644 index ea684b78dbf329..00000000000000 --- a/docs/src/icons/duotone-icons/Tools/Road-Cone.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Tools / Road-Cone - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Tools/Roller.svg b/docs/src/icons/duotone-icons/Tools/Roller.svg deleted file mode 100644 index f7aa36b6eccea3..00000000000000 --- a/docs/src/icons/duotone-icons/Tools/Roller.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Tools / Roller - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Tools/Roulette.svg b/docs/src/icons/duotone-icons/Tools/Roulette.svg deleted file mode 100644 index 9f2651e2a19ea1..00000000000000 --- a/docs/src/icons/duotone-icons/Tools/Roulette.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Tools / Roulette - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Tools/Screwdriver.svg b/docs/src/icons/duotone-icons/Tools/Screwdriver.svg deleted file mode 100644 index 64f376072c1a3d..00000000000000 --- a/docs/src/icons/duotone-icons/Tools/Screwdriver.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Tools / Screwdriver - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Tools/Shovel.svg b/docs/src/icons/duotone-icons/Tools/Shovel.svg deleted file mode 100644 index 81f60545aa7205..00000000000000 --- a/docs/src/icons/duotone-icons/Tools/Shovel.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Tools / Shovel - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Tools/Spatula.svg b/docs/src/icons/duotone-icons/Tools/Spatula.svg deleted file mode 100644 index cfe2042e58ff7b..00000000000000 --- a/docs/src/icons/duotone-icons/Tools/Spatula.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Tools / Spatula - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Tools/Swiss-knife.svg b/docs/src/icons/duotone-icons/Tools/Swiss-knife.svg deleted file mode 100644 index 15e78d4cc79f50..00000000000000 --- a/docs/src/icons/duotone-icons/Tools/Swiss-knife.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Tools / Swiss-knife - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Tools/Tools.svg b/docs/src/icons/duotone-icons/Tools/Tools.svg deleted file mode 100644 index e155753427b106..00000000000000 --- a/docs/src/icons/duotone-icons/Tools/Tools.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Tools / Tools - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Celcium.svg b/docs/src/icons/duotone-icons/Weather/Celcium.svg deleted file mode 100644 index 0a5045964a6c0a..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Celcium.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Celcium - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Cloud#1.svg b/docs/src/icons/duotone-icons/Weather/Cloud#1.svg deleted file mode 100644 index 371a6a3a9d6f8e..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Cloud#1.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Weather / Cloud#1 - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Cloud#2.svg b/docs/src/icons/duotone-icons/Weather/Cloud#2.svg deleted file mode 100644 index 81d73fb7be2bdd..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Cloud#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Cloud#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Cloud-fog.svg b/docs/src/icons/duotone-icons/Weather/Cloud-fog.svg deleted file mode 100644 index d62660ffedb430..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Cloud-fog.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Cloud-fog - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Cloud-sun.svg b/docs/src/icons/duotone-icons/Weather/Cloud-sun.svg deleted file mode 100644 index 66e5d5a74c29f3..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Cloud-sun.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Cloud-sun - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Cloud-wind.svg b/docs/src/icons/duotone-icons/Weather/Cloud-wind.svg deleted file mode 100644 index 3c6fd8a5c7fe37..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Cloud-wind.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Weather / Cloud-wind - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Cloudy-night.svg b/docs/src/icons/duotone-icons/Weather/Cloudy-night.svg deleted file mode 100644 index 38976c4ecd2bd1..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Cloudy-night.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Cloudy-night - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Cloudy.svg b/docs/src/icons/duotone-icons/Weather/Cloudy.svg deleted file mode 100644 index 9a5d9f92bd4fef..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Cloudy.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Cloudy - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Day-rain.svg b/docs/src/icons/duotone-icons/Weather/Day-rain.svg deleted file mode 100644 index 8c55a58297962c..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Day-rain.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Day-rain - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Fahrenheit.svg b/docs/src/icons/duotone-icons/Weather/Fahrenheit.svg deleted file mode 100644 index f4e84868adc470..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Fahrenheit.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Fahrenheit - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Fog.svg b/docs/src/icons/duotone-icons/Weather/Fog.svg deleted file mode 100644 index 9129b0695b419c..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Fog.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - Stockholm-icons / Weather / Fog - Created with Sketch. - - - - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Moon.svg b/docs/src/icons/duotone-icons/Weather/Moon.svg deleted file mode 100644 index bdc4f7a8f0b2e7..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Moon.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Weather / Moon - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Night-fog.svg b/docs/src/icons/duotone-icons/Weather/Night-fog.svg deleted file mode 100644 index f4029889af609f..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Night-fog.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Night-fog - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Night-rain.svg b/docs/src/icons/duotone-icons/Weather/Night-rain.svg deleted file mode 100644 index 28618b58b7b9cf..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Night-rain.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Night-rain - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Rain#1.svg b/docs/src/icons/duotone-icons/Weather/Rain#1.svg deleted file mode 100644 index 1e782ac52c53d2..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Rain#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Rain#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Rain#2.svg b/docs/src/icons/duotone-icons/Weather/Rain#2.svg deleted file mode 100644 index a4c3596a9e3a9e..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Rain#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Rain#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Rain#5.svg b/docs/src/icons/duotone-icons/Weather/Rain#5.svg deleted file mode 100644 index 8a6c3b3cee7655..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Rain#5.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Rain#5 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Rainbow.svg b/docs/src/icons/duotone-icons/Weather/Rainbow.svg deleted file mode 100644 index e3714478a8f4f0..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Rainbow.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Stockholm-icons / Weather / Rainbow - Created with Sketch. - - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Snow#1.svg b/docs/src/icons/duotone-icons/Weather/Snow#1.svg deleted file mode 100644 index f0411ec5aa9a79..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Snow#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Snow#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Snow#2.svg b/docs/src/icons/duotone-icons/Weather/Snow#2.svg deleted file mode 100644 index 0ba0a489c27770..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Snow#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Snow#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Snow#3.svg b/docs/src/icons/duotone-icons/Weather/Snow#3.svg deleted file mode 100644 index 6bf44afabe4718..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Snow#3.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Snow#3 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Snow.svg b/docs/src/icons/duotone-icons/Weather/Snow.svg deleted file mode 100644 index ec6ad8fdcd11d4..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Snow.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Snow - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Storm.svg b/docs/src/icons/duotone-icons/Weather/Storm.svg deleted file mode 100644 index 447f02c7c16a70..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Storm.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Storm - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Sun-fog.svg b/docs/src/icons/duotone-icons/Weather/Sun-fog.svg deleted file mode 100644 index ea7ee8e32c1e6f..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Sun-fog.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Sun-fog - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Sun.svg b/docs/src/icons/duotone-icons/Weather/Sun.svg deleted file mode 100644 index 14c70cb8caa6e0..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Sun.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Sun - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Suset#1.svg b/docs/src/icons/duotone-icons/Weather/Suset#1.svg deleted file mode 100644 index ba575e3ae21120..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Suset#1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Suset#1 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Suset#2.svg b/docs/src/icons/duotone-icons/Weather/Suset#2.svg deleted file mode 100644 index 3994120748a0b7..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Suset#2.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Suset#2 - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Temperature-empty.svg b/docs/src/icons/duotone-icons/Weather/Temperature-empty.svg deleted file mode 100644 index 86e435a1b69dd7..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Temperature-empty.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Weather / Temperature-empty - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Temperature-full.svg b/docs/src/icons/duotone-icons/Weather/Temperature-full.svg deleted file mode 100644 index c8bc2eaa5cb528..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Temperature-full.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Weather / Temperature-full - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Temperature-half.svg b/docs/src/icons/duotone-icons/Weather/Temperature-half.svg deleted file mode 100644 index b2f7f4c66a3a4f..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Temperature-half.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Stockholm-icons / Weather / Temperature-half - Created with Sketch. - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Thunder-night.svg b/docs/src/icons/duotone-icons/Weather/Thunder-night.svg deleted file mode 100644 index a665fa63fae3f9..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Thunder-night.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Thunder-night - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Thunder.svg b/docs/src/icons/duotone-icons/Weather/Thunder.svg deleted file mode 100644 index 9c34c416a43fec..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Thunder.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Thunder - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Umbrella.svg b/docs/src/icons/duotone-icons/Weather/Umbrella.svg deleted file mode 100644 index fc1b8104cef63d..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Umbrella.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Umbrella - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/duotone-icons/Weather/Wind.svg b/docs/src/icons/duotone-icons/Weather/Wind.svg deleted file mode 100644 index f9a178f93be8ed..00000000000000 --- a/docs/src/icons/duotone-icons/Weather/Wind.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Stockholm-icons / Weather / Wind - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/docs/src/icons/social/facebook.svg b/docs/src/icons/social/facebook.svg deleted file mode 100644 index c7955a6a65ea49..00000000000000 --- a/docs/src/icons/social/facebook.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/docs/src/icons/social/instagram.svg b/docs/src/icons/social/instagram.svg deleted file mode 100644 index 08db6f2cd86623..00000000000000 --- a/docs/src/icons/social/instagram.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/docs/src/icons/social/pinterest.svg b/docs/src/icons/social/pinterest.svg deleted file mode 100644 index 0f2b48c934787e..00000000000000 --- a/docs/src/icons/social/pinterest.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/docs/src/icons/social/twitter.svg b/docs/src/icons/social/twitter.svg deleted file mode 100644 index 4dcd4a9c7e6bac..00000000000000 --- a/docs/src/icons/social/twitter.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - From 41ed9ab6d9d77dd82ad39e3a69ae99dc4b09f38a Mon Sep 17 00:00:00 2001 From: Sammy Harris <41593264+stegaBOB@users.noreply.github.com> Date: Fri, 6 Oct 2023 16:48:12 -0400 Subject: [PATCH 273/407] Fix: handle `-- --target-dir` arg in `cargo build-sbf` (#33555) * fix: handle target dir override in build-sbf cargo args * fix: refactor to canonicalize target arg for workspace absolute paths * fix: nightly linting --- sdk/cargo-build-sbf/src/main.rs | 57 +++++++++++++++++++++++------ sdk/cargo-build-sbf/tests/crates.rs | 40 +++++++++++++++++++- 2 files changed, 84 insertions(+), 13 deletions(-) diff --git a/sdk/cargo-build-sbf/src/main.rs b/sdk/cargo-build-sbf/src/main.rs index 1a9e4e1b622261..6ff755d8d1bb21 100644 --- a/sdk/cargo-build-sbf/src/main.rs +++ b/sdk/cargo-build-sbf/src/main.rs @@ -1,5 +1,6 @@ use { bzip2::bufread::BzDecoder, + cargo_metadata::camino::Utf8PathBuf, clap::{crate_description, crate_name, crate_version, Arg}, itertools::Itertools, log::*, @@ -22,7 +23,8 @@ use { #[derive(Debug)] struct Config<'a> { - cargo_args: Option>, + cargo_args: Vec<&'a str>, + target_directory: Option, sbf_out_dir: Option, sbf_sdk: PathBuf, platform_tools_version: &'a str, @@ -43,7 +45,8 @@ struct Config<'a> { impl Default for Config<'_> { fn default() -> Self { Self { - cargo_args: None, + cargo_args: vec![], + target_directory: None, sbf_sdk: env::current_exe() .expect("Unable to get current executable") .parent() @@ -721,11 +724,7 @@ fn build_solana_package( cargo_build_args.push("--jobs"); cargo_build_args.push(jobs); } - if let Some(args) = &config.cargo_args { - for arg in args { - cargo_build_args.push(arg); - } - } + cargo_build_args.append(&mut config.cargo_args.clone()); let output = spawn( &cargo_build, &cargo_build_args, @@ -864,9 +863,14 @@ fn build_solana(config: Config, manifest_path: Option) { exit(1); }); + let target_dir = config + .target_directory + .clone() + .unwrap_or(metadata.target_directory.clone()); + if let Some(root_package) = metadata.root_package() { if !config.workspace { - build_solana_package(&config, metadata.target_directory.as_ref(), root_package); + build_solana_package(&config, target_dir.as_ref(), root_package); return; } } @@ -887,7 +891,7 @@ fn build_solana(config: Config, manifest_path: Option) { .collect::>(); for package in all_sbf_packages { - build_solana_package(&config, metadata.target_directory.as_ref(), package); + build_solana_package(&config, target_dir.as_ref(), package); } } @@ -1050,10 +1054,39 @@ fn main() { } else { platform_tools_version }; + + let mut cargo_args = matches + .values_of("cargo_args") + .map(|vals| vals.collect::>()) + .unwrap_or_default(); + + let target_dir_string; + let target_directory = if let Some(target_dir) = cargo_args + .iter_mut() + .skip_while(|x| x != &&"--target-dir") + .nth(1) + { + let target_path = Utf8PathBuf::from(*target_dir); + // Directory needs to exist in order to canonicalize it + fs::create_dir_all(&target_path).unwrap_or_else(|err| { + error!("Unable to create target-dir directory {target_dir}: {err}"); + exit(1); + }); + // Canonicalize the path to avoid issues with relative paths + let canonicalized = target_path.canonicalize_utf8().unwrap_or_else(|err| { + error!("Unable to canonicalize provided target-dir directory {target_path}: {err}"); + exit(1); + }); + target_dir_string = canonicalized.to_string(); + *target_dir = &target_dir_string; + Some(canonicalized) + } else { + None + }; + let config = Config { - cargo_args: matches - .values_of("cargo_args") - .map(|vals| vals.collect::>()), + cargo_args, + target_directory, sbf_sdk: fs::canonicalize(&sbf_sdk).unwrap_or_else(|err| { error!( "Solana SDK path does not exist: {}: {}", diff --git a/sdk/cargo-build-sbf/tests/crates.rs b/sdk/cargo-build-sbf/tests/crates.rs index c63308124b0130..09015eb208e437 100644 --- a/sdk/cargo-build-sbf/tests/crates.rs +++ b/sdk/cargo-build-sbf/tests/crates.rs @@ -2,6 +2,8 @@ use { predicates::prelude::*, std::{ env, fs, + path::PathBuf, + str::FromStr, sync::atomic::{AtomicBool, Ordering}, }, }; @@ -25,7 +27,9 @@ fn run_cargo_build(crate_name: &str, extra_args: &[&str], fail: bool) { for arg in extra_args { args.push(arg); } - args.push("--"); + if !extra_args.contains(&"--") { + args.push("--"); + } args.push("-vv"); let mut cmd = assert_cmd::Command::cargo_bin("cargo-build-sbf").unwrap(); let assert = cmd.env("RUST_LOG", "debug").args(&args).assert(); @@ -88,6 +92,40 @@ fn test_out_dir() { clean_target("noop"); } +#[test] +#[serial] +fn test_target_dir() { + let target_dir = "./temp-target-dir"; + run_cargo_build("noop", &["--", "--target-dir", target_dir], false); + let cwd = env::current_dir().expect("Unable to get current working directory"); + let normal_target_dir = cwd.join("tests").join("crates").join("noop").join("target"); + assert!(!normal_target_dir.exists()); + let so_file = PathBuf::from_str(target_dir) + .unwrap() + .join("deploy") + .join("noop.so"); + assert!(so_file.exists()); + fs::remove_dir_all(target_dir).expect("Failed to remove custom target dir"); +} + +#[test] +#[serial] +fn test_target_and_out_dir() { + let target_dir = "./temp-target-dir"; + run_cargo_build( + "noop", + &["--sbf-out-dir", "tmp_out", "--", "--target-dir", target_dir], + false, + ); + let cwd = env::current_dir().expect("Unable to get current working directory"); + let dir = cwd.join("tmp_out"); + assert!(dir.exists()); + fs::remove_dir_all("tmp_out").expect("Failed to remove tmp_out dir"); + let normal_target_dir = cwd.join("tests").join("crates").join("noop").join("target"); + assert!(!normal_target_dir.exists()); + fs::remove_dir_all(target_dir).expect("Failed to remove custom target dir"); +} + #[test] #[serial] fn test_generate_child_script_on_failure() { From f075867ceb0870b4c6741a5324776bbba67c5eff Mon Sep 17 00:00:00 2001 From: Tyera Date: Fri, 6 Oct 2023 15:12:08 -0600 Subject: [PATCH 274/407] Blockstore::get_sigs_for_addr2: ensure lowest_slot >= first_available_block (#33556) * Set empty lowest_slot to first_available_block and remove check in loop * Ensure get_transaction_status on_with_counter returns slots >= first_available_block * Actually cleanup ledger --- ledger/src/blockstore.rs | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index ec78883548a3a4..c23dc240d79f7f 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -2325,14 +2325,15 @@ impl Blockstore { confirmed_unrooted_slots: &HashSet, ) -> Result<(Option<(Slot, TransactionStatusMeta)>, u64)> { let mut counter = 0; - let (lock, lowest_available_slot) = self.ensure_lowest_cleanup_slot(); + let (lock, _) = self.ensure_lowest_cleanup_slot(); + let first_available_block = self.get_first_available_block()?; for transaction_status_cf_primary_index in 0..=1 { let index_iterator = self.transaction_status_cf.iter(IteratorMode::From( ( transaction_status_cf_primary_index, signature, - lowest_available_slot, + first_available_block, ), IteratorDirection::Forward, ))?; @@ -2630,16 +2631,17 @@ impl Blockstore { }; get_before_slot_timer.stop(); + let first_available_block = self.get_first_available_block()?; // Generate a HashSet of signatures that should be excluded from the results based on // `until` signature let mut get_until_slot_timer = Measure::start("get_until_slot_timer"); let (lowest_slot, until_excluded_signatures) = match until { - None => (0, HashSet::new()), + None => (first_available_block, HashSet::new()), Some(until) => { let transaction_status = self.get_transaction_status(until, &confirmed_unrooted_slots)?; match transaction_status { - None => (0, HashSet::new()), + None => (first_available_block, HashSet::new()), Some((slot, _)) => { let mut slot_signatures = self.get_sorted_block_signatures(slot)?; if let Some(pos) = slot_signatures.iter().position(|&x| x == until) { @@ -2654,7 +2656,6 @@ impl Blockstore { get_until_slot_timer.stop(); // Fetch the list of signatures that affect the given address - let first_available_block = self.get_first_available_block()?; let mut address_signatures = vec![]; // Get signatures in `slot` @@ -2695,10 +2696,7 @@ impl Blockstore { if slot == next_max_slot || slot < lowest_slot { break; } - if i == starting_primary_index - && key_address == address - && slot >= first_available_block - { + if i == starting_primary_index && key_address == address { if self.is_root(slot) || confirmed_unrooted_slots.contains(&slot) { address_signatures.push((slot, signature)); } @@ -2733,10 +2731,7 @@ impl Blockstore { if slot < lowest_slot { break; } - if i == next_primary_index - && key_address == address - && slot >= first_available_block - { + if i == next_primary_index && key_address == address { if self.is_root(slot) || confirmed_unrooted_slots.contains(&slot) { address_signatures.push((slot, signature)); } @@ -8017,6 +8012,7 @@ pub mod tests { if simulate_ledger_cleanup_service { *blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot; + blockstore.purge_slots(0, lowest_cleanup_slot, PurgeType::CompactionFilter); } let are_missing = check_for_missing(); From 630feeddf2e735c313f15fd7ac653b566c228984 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Fri, 6 Oct 2023 15:04:37 -0700 Subject: [PATCH 275/407] Add wen_restart module (#33344) * Add wen_restart module: - Implement reading LastVotedForkSlots from blockstore. - Add proto file to record the intermediate results. - Also link wen_restart into validator. - Move recreation of tower outside replay_stage so we can get last_vote. * Update lock file. * Fix linter errors. * Fix depencies order. * Update wen_restart explanation and small fixes. * Generate tower outside tvu. * Update validator/src/cli.rs Co-authored-by: Tyera * Update wen-restart/protos/wen_restart.proto Co-authored-by: Tyera * Update wen-restart/build.rs Co-authored-by: Tyera * Update wen-restart/src/wen_restart.rs Co-authored-by: Tyera * Rename proto directory. * Rename InitRecord to MyLastVotedForkSlots, add imports. * Update wen-restart/Cargo.toml Co-authored-by: Tyera * Update wen-restart/src/wen_restart.rs Co-authored-by: Tyera * Move prost-build dependency to project toml. * No need to continue if the distance between slot and last_vote is already larger than MAX_SLOTS_ON_VOTED_FORKS. * Use 16k slots instead of 81k slots, a few more wording changes. * Use AncestorIterator which does the same thing. * Update Cargo.lock * Update Cargo.lock --------- Co-authored-by: Tyera --- Cargo.lock | 23 ++++ Cargo.toml | 3 + core/Cargo.toml | 1 + core/src/replay_stage.rs | 12 +- core/src/tvu.rs | 9 +- core/src/validator.rs | 36 +++++- gossip/src/epoch_slots.rs | 2 +- local-cluster/src/validator_configs.rs | 1 + programs/sbf/Cargo.lock | 20 ++++ validator/src/cli.rs | 32 ++++++ wen-restart/Cargo.toml | 43 +++++++ wen-restart/build.rs | 41 +++++++ wen-restart/proto/wen_restart.proto | 23 ++++ wen-restart/src/lib.rs | 7 ++ wen-restart/src/wen_restart.rs | 152 +++++++++++++++++++++++++ 15 files changed, 387 insertions(+), 18 deletions(-) create mode 100644 wen-restart/Cargo.toml create mode 100644 wen-restart/build.rs create mode 100644 wen-restart/proto/wen_restart.proto create mode 100644 wen-restart/src/lib.rs create mode 100644 wen-restart/src/wen_restart.rs diff --git a/Cargo.lock b/Cargo.lock index 988d11f7f5631b..f0103dd448109d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5792,6 +5792,7 @@ dependencies = [ "solana-version", "solana-vote", "solana-vote-program", + "solana-wen-restart", "static_assertions", "strum", "strum_macros", @@ -7482,6 +7483,28 @@ dependencies = [ "solana-version", ] +[[package]] +name = "solana-wen-restart" +version = "1.18.0" +dependencies = [ + "log", + "prost", + "prost-build", + "prost-types", + "protobuf-src", + "rustc_version 0.4.0", + "serial_test", + "solana-entry", + "solana-gossip", + "solana-ledger", + "solana-logger", + "solana-program", + "solana-runtime", + "solana-sdk", + "solana-streamer", + "solana-vote-program", +] + [[package]] name = "solana-zk-keygen" version = "1.18.0" diff --git a/Cargo.toml b/Cargo.toml index ed39154543f8e2..f5849604d0e3b3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -111,6 +111,7 @@ members = [ "version", "vote", "watchtower", + "wen-restart", "zk-keygen", "zk-token-sdk", ] @@ -261,6 +262,7 @@ pretty-hex = "0.3.0" proc-macro2 = "1.0.67" proptest = "1.2" prost = "0.11.9" +prost-build = "0.11.9" prost-types = "0.11.9" protobuf-src = "1.1.0" qstring = "0.7.2" @@ -371,6 +373,7 @@ solana-udp-client = { path = "udp-client", version = "=1.18.0" } solana-version = { path = "version", version = "=1.18.0" } solana-vote = { path = "vote", version = "=1.18.0" } solana-vote-program = { path = "programs/vote", version = "=1.18.0" } +solana-wen-restart = { path = "wen-restart", version = "=1.18.0" } solana-zk-keygen = { path = "zk-keygen", version = "=1.18.0" } solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=1.18.0" } solana-zk-token-sdk = { path = "zk-token-sdk", version = "=1.18.0" } diff --git a/core/Cargo.toml b/core/Cargo.toml index fcab8ff8775912..c3923613b768a2 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -71,6 +71,7 @@ solana-turbine = { workspace = true } solana-version = { workspace = true } solana-vote = { workspace = true } solana-vote-program = { workspace = true } +solana-wen-restart = { workspace = true } strum = { workspace = true, features = ["derive"] } strum_macros = { workspace = true } sys-info = { workspace = true } diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 37067ce38f556d..59036a997039c1 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -29,7 +29,6 @@ use { }, rewards_recorder_service::{RewardsMessage, RewardsRecorderSender}, unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes, - validator::ProcessBlockStore, voting_service::VoteOp, window_service::DuplicateSlotReceiver, }, @@ -483,7 +482,7 @@ impl ReplayStage { ledger_signal_receiver: Receiver, duplicate_slots_receiver: DuplicateSlotReceiver, poh_recorder: Arc>, - maybe_process_blockstore: Option, + mut tower: Tower, vote_tracker: Arc, cluster_slots: Arc, retransmit_slots_sender: Sender, @@ -502,15 +501,6 @@ impl ReplayStage { banking_tracer: Arc, popular_pruned_forks_receiver: PopularPrunedForksReceiver, ) -> Result { - let mut tower = if let Some(process_blockstore) = maybe_process_blockstore { - let tower = process_blockstore.process_to_create_tower()?; - info!("Tower state: {:?}", tower); - tower - } else { - warn!("creating default tower...."); - Tower::default() - }; - let ReplayStageConfig { vote_account, authorized_voter_keypairs, diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 0b8358863fbceb..ec444ae4403d7e 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -11,7 +11,7 @@ use { }, cluster_slots_service::{cluster_slots::ClusterSlots, ClusterSlotsService}, completed_data_sets_service::CompletedDataSetsSender, - consensus::tower_storage::TowerStorage, + consensus::{tower_storage::TowerStorage, Tower}, cost_update_service::CostUpdateService, drop_bank_service::DropBankService, ledger_cleanup_service::LedgerCleanupService, @@ -19,7 +19,6 @@ use { replay_stage::{ReplayStage, ReplayStageConfig}, rewards_recorder_service::RewardsRecorderSender, shred_fetch_stage::ShredFetchStage, - validator::ProcessBlockStore, voting_service::VotingService, warm_quic_cache_service::WarmQuicCacheService, window_service::WindowService, @@ -109,7 +108,7 @@ impl Tvu { ledger_signal_receiver: Receiver, rpc_subscriptions: &Arc, poh_recorder: &Arc>, - maybe_process_block_store: Option, + tower: Tower, tower_storage: Arc, leader_schedule_cache: &Arc, exit: Arc, @@ -292,7 +291,7 @@ impl Tvu { ledger_signal_receiver, duplicate_slots_receiver, poh_recorder.clone(), - maybe_process_block_store, + tower, vote_tracker, cluster_slots, retransmit_slots_sender, @@ -463,7 +462,7 @@ pub mod tests { OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), )), &poh_recorder, - None, + Tower::default(), Arc::new(FileTowerStorage::default()), &leader_schedule_cache, exit.clone(), diff --git a/core/src/validator.rs b/core/src/validator.rs index e2b763f202f89b..e5eb3544ab468f 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -119,6 +119,7 @@ use { solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes}, solana_turbine::{self, broadcast_stage::BroadcastStageType}, solana_vote_program::vote_state, + solana_wen_restart::wen_restart::wait_for_wen_restart, std::{ collections::{HashMap, HashSet}, net::SocketAddr, @@ -259,6 +260,7 @@ pub struct ValidatorConfig { pub block_production_method: BlockProductionMethod, pub generator_config: Option, pub use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup, + pub wen_restart_proto_path: Option, } impl Default for ValidatorConfig { @@ -326,6 +328,7 @@ impl Default for ValidatorConfig { block_production_method: BlockProductionMethod::default(), generator_config: None, use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup::default(), + wen_restart_proto_path: None, } } } @@ -1202,6 +1205,22 @@ impl Validator { ) .unwrap(); + let in_wen_restart = config.wen_restart_proto_path.is_some() && !waited_for_supermajority; + let tower = match process_blockstore.process_to_create_tower() { + Ok(tower) => { + info!("Tower state: {:?}", tower); + tower + } + Err(e) => { + warn!( + "Unable to retrieve tower: {:?} creating default tower....", + e + ); + Tower::default() + } + }; + let last_vote = tower.last_vote(); + let (replay_vote_sender, replay_vote_receiver) = unbounded(); let tvu = Tvu::new( vote_account, @@ -1218,7 +1237,7 @@ impl Validator { ledger_signal_receiver, &rpc_subscriptions, &poh_recorder, - Some(process_blockstore), + tower, config.tower_storage.clone(), &leader_schedule_cache, exit.clone(), @@ -1257,6 +1276,21 @@ impl Validator { repair_quic_endpoint_sender, )?; + if in_wen_restart { + info!("Waiting for wen_restart phase one to finish"); + match wait_for_wen_restart( + &config.wen_restart_proto_path.clone().unwrap(), + last_vote, + blockstore.clone(), + cluster_info.clone(), + ) { + Ok(()) => { + return Err("wen_restart phase one completedy".to_string()); + } + Err(e) => return Err(format!("wait_for_wen_restart failed: {e:?}")), + }; + } + let tpu = Tpu::new( &cluster_info, &poh_recorder, diff --git a/gossip/src/epoch_slots.rs b/gossip/src/epoch_slots.rs index dc94380b33e5de..186a17aa6ec255 100644 --- a/gossip/src/epoch_slots.rs +++ b/gossip/src/epoch_slots.rs @@ -13,7 +13,7 @@ use { }, }; -const MAX_SLOTS_PER_ENTRY: usize = 2048 * 8; +pub const MAX_SLOTS_PER_ENTRY: usize = 2048 * 8; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, AbiExample)] pub struct Uncompressed { pub first_slot: Slot, diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 70211b5dac666b..d480dc2653567e 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -68,6 +68,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { block_production_method: config.block_production_method.clone(), generator_config: config.generator_config.clone(), use_snapshot_archives_at_startup: config.use_snapshot_archives_at_startup, + wen_restart_proto_path: config.wen_restart_proto_path.clone(), } } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 438ecaaaff9e15..167f2e4c4fa8c4 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4831,6 +4831,7 @@ dependencies = [ "solana-version", "solana-vote", "solana-vote-program", + "solana-wen-restart", "strum", "strum_macros", "sys-info", @@ -6436,6 +6437,25 @@ dependencies = [ "thiserror", ] +[[package]] +name = "solana-wen-restart" +version = "1.18.0" +dependencies = [ + "log", + "prost", + "prost-build", + "prost-types", + "protobuf-src", + "rustc_version", + "solana-gossip", + "solana-ledger", + "solana-logger", + "solana-program", + "solana-runtime", + "solana-sdk", + "solana-vote-program", +] + [[package]] name = "solana-zk-token-proof-program" version = "1.18.0" diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 72e82ca13b56d9..cd3f3323589653 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1382,6 +1382,35 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .possible_values(BlockProductionMethod::cli_names()) .help(BlockProductionMethod::cli_message()) ) + .arg( + Arg::with_name("wen_restart") + .long("wen-restart") + .value_name("DIR") + .takes_value(true) + .required(false) + .default_value(&default_args.wen_restart_path) + .conflicts_with("wait_for_supermajority") + .help( + "When specified, the validator will enter Wen Restart mode which + pauses normal activity. Validators in this mode will gossip their last + vote to reach consensus on a safe restart slot and repair all blocks + on the selected fork. The safe slot will be a descendant of the latest + optimistically confirmed slot to ensure we do not roll back any + optimistically confirmed slots. + + The progress in this mode will be saved in the file location provided. + If consensus is reached, the validator will automatically exit and then + execute wait_for_supermajority logic so the cluster will resume execution. + The progress file will be kept around for future debugging. + + After the cluster resumes normal operation, the validator arguments can + be adjusted to remove --wen_restart and update expected_shred_version to + the new shred_version agreed on in the consensus. + + If wen_restart fails, refer to the progress file (in proto3 format) for + further debugging. + ") + ) .args(&get_deprecated_arguments()) .after_help("The default subcommand is run") .subcommand( @@ -1931,6 +1960,8 @@ pub struct DefaultArgs { pub wait_for_restart_window_max_delinquent_stake: String, pub banking_trace_dir_byte_limit: String, + + pub wen_restart_path: String, } impl DefaultArgs { @@ -2009,6 +2040,7 @@ impl DefaultArgs { wait_for_restart_window_min_idle_time: "10".to_string(), wait_for_restart_window_max_delinquent_stake: "5".to_string(), banking_trace_dir_byte_limit: BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT.to_string(), + wen_restart_path: "wen_restart_progress.proto".to_string(), } } } diff --git a/wen-restart/Cargo.toml b/wen-restart/Cargo.toml new file mode 100644 index 00000000000000..b74871801872af --- /dev/null +++ b/wen-restart/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "solana-wen-restart" +description = "Automatic repair and restart protocol" +documentation = "https://github.com/solana-foundation/solana-improvement-documents/pull/46" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = false + +[dependencies] +log = { workspace = true } +prost = { workspace = true } +prost-types = { workspace = true } +solana-gossip = { workspace = true } +solana-ledger = { workspace = true } +solana-logger = { workspace = true } +solana-program = { workspace = true } +solana-runtime = { workspace = true } +solana-sdk = { workspace = true } +solana-vote-program = { workspace = true } + +[dev-dependencies] +serial_test = { workspace = true } +solana-entry = { workspace = true } +solana-streamer = { workspace = true } + +[build-dependencies] +prost-build = { workspace = true } +rustc_version = { workspace = true } + +# windows users should install the protobuf compiler manually and set the PROTOC +# envar to point to the installed binary +[target."cfg(not(windows))".build-dependencies] +protobuf-src = { workspace = true } + +[lib] +name = "solana_wen_restart" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/wen-restart/build.rs b/wen-restart/build.rs new file mode 100644 index 00000000000000..4360117bb445d4 --- /dev/null +++ b/wen-restart/build.rs @@ -0,0 +1,41 @@ +extern crate rustc_version; + +use { + rustc_version::{version_meta, Channel}, + std::io::Result, +}; + +fn main() -> Result<()> { + const PROTOC_ENVAR: &str = "PROTOC"; + if std::env::var(PROTOC_ENVAR).is_err() { + #[cfg(not(windows))] + std::env::set_var(PROTOC_ENVAR, protobuf_src::protoc()); + } + + // Copied and adapted from + // https://github.com/Kimundi/rustc-version-rs/blob/1d692a965f4e48a8cb72e82cda953107c0d22f47/README.md#example + // Licensed under Apache-2.0 + MIT + match version_meta().unwrap().channel { + Channel::Stable => { + println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); + } + Channel::Beta => { + println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); + } + Channel::Nightly => { + println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); + } + Channel::Dev => { + println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); + // See https://github.com/solana-labs/solana/issues/11055 + // We may be running the custom `rust-bpf-builder` toolchain, + // which currently needs `#![feature(proc_macro_hygiene)]` to + // be applied. + println!("cargo:rustc-cfg=RUSTC_NEEDS_PROC_MACRO_HYGIENE"); + } + } + + // Generate rust files from protos. + prost_build::compile_protos(&["proto/wen_restart.proto"], &["proto/"])?; + Ok(()) +} diff --git a/wen-restart/proto/wen_restart.proto b/wen-restart/proto/wen_restart.proto new file mode 100644 index 00000000000000..1f6423462b55b0 --- /dev/null +++ b/wen-restart/proto/wen_restart.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; +package solana.wen_restart_proto; + +enum State { + INIT = 0; + LAST_VOTED_FORK_SLOTS = 1; + HEAVIEST_FORK = 2; + GENERATING_SNAPSHOT = 3; + FINISHED_SNAPSHOT = 4; + WAITING_FOR_SUPERMAJORITY = 5; + DONE = 6; +} + +message MyLastVotedForkSlots { + uint64 last_vote_slot = 1; + string last_vote_bankhash = 2; + uint32 shred_version = 3; +} + +message WenRestartProgress { + State state = 1; + optional MyLastVotedForkSlots my_last_voted_fork_slots = 2; +} \ No newline at end of file diff --git a/wen-restart/src/lib.rs b/wen-restart/src/lib.rs new file mode 100644 index 00000000000000..e58a6d04bf831f --- /dev/null +++ b/wen-restart/src/lib.rs @@ -0,0 +1,7 @@ +pub(crate) mod solana { + pub(crate) mod wen_restart_proto { + include!(concat!(env!("OUT_DIR"), "/solana.wen_restart_proto.rs")); + } +} + +pub mod wen_restart; diff --git a/wen-restart/src/wen_restart.rs b/wen-restart/src/wen_restart.rs new file mode 100644 index 00000000000000..75e4e21ce9431a --- /dev/null +++ b/wen-restart/src/wen_restart.rs @@ -0,0 +1,152 @@ +//! The `wen-restart` module handles automatic repair during a cluster restart + +use { + crate::solana::wen_restart_proto::{ + MyLastVotedForkSlots, State as RestartState, WenRestartProgress, + }, + log::*, + prost::Message, + solana_gossip::{cluster_info::ClusterInfo, epoch_slots::MAX_SLOTS_PER_ENTRY}, + solana_ledger::{ancestor_iterator::AncestorIterator, blockstore::Blockstore}, + solana_vote_program::vote_state::VoteTransaction, + std::{ + fs::File, + io::{Error, Write}, + path::PathBuf, + sync::Arc, + }, +}; + +pub fn wait_for_wen_restart( + wen_restart_path: &PathBuf, + last_vote: VoteTransaction, + blockstore: Arc, + cluster_info: Arc, +) -> Result<(), Box> { + // repair and restart option does not work without last voted slot. + let last_vote_slot = last_vote + .last_voted_slot() + .expect("wen_restart doesn't work if local tower is wiped"); + let mut last_vote_fork: Vec = AncestorIterator::new_inclusive(last_vote_slot, &blockstore) + .take(MAX_SLOTS_PER_ENTRY) + .collect(); + info!( + "wen_restart last voted fork {} {:?}", + last_vote_slot, last_vote_fork + ); + last_vote_fork.reverse(); + // Todo(wen): add the following back in after Gossip code is checked in. + // cluster_info.push_last_voted_fork_slots(&last_voted_fork, last_vote.hash()); + // The rest of the protocol will be in another PR. + let current_progress = WenRestartProgress { + state: RestartState::Init.into(), + my_last_voted_fork_slots: Some(MyLastVotedForkSlots { + last_vote_slot, + last_vote_bankhash: last_vote.hash().to_string(), + shred_version: cluster_info.my_shred_version() as u32, + }), + }; + write_wen_restart_records(wen_restart_path, current_progress)?; + Ok(()) +} + +fn write_wen_restart_records( + records_path: &PathBuf, + new_progress: WenRestartProgress, +) -> Result<(), Error> { + // overwrite anything if exists + let mut file = File::create(records_path)?; + info!("writing new record {:?}", new_progress); + let mut buf = Vec::with_capacity(new_progress.encoded_len()); + new_progress.encode(&mut buf)?; + file.write_all(&buf)?; + Ok(()) +} +#[cfg(test)] +mod tests { + use { + crate::wen_restart::*, + solana_entry::entry, + solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, + solana_ledger::{blockstore, get_tmp_ledger_path_auto_delete}, + solana_program::{hash::Hash, vote::state::Vote}, + solana_sdk::{ + signature::{Keypair, Signer}, + timing::timestamp, + }, + solana_streamer::socket::SocketAddrSpace, + std::{fs::read, sync::Arc}, + }; + + #[test] + fn test_wen_restart_normal_flow() { + solana_logger::setup(); + let node_keypair = Arc::new(Keypair::new()); + let cluster_info = Arc::new(ClusterInfo::new( + { + let mut contact_info = + ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()); + contact_info.set_shred_version(2); + contact_info + }, + node_keypair, + SocketAddrSpace::Unspecified, + )); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let mut wen_restart_proto_path = ledger_path.path().to_path_buf(); + wen_restart_proto_path.push("wen_restart_status.proto"); + let blockstore = Arc::new(blockstore::Blockstore::open(ledger_path.path()).unwrap()); + let expected_slots = 400; + let last_vote_slot = (MAX_SLOTS_PER_ENTRY + expected_slots).try_into().unwrap(); + let last_parent = (MAX_SLOTS_PER_ENTRY >> 1).try_into().unwrap(); + for i in 0..expected_slots { + let entries = entry::create_ticks(1, 0, Hash::default()); + let parent_slot = if i > 0 { + (MAX_SLOTS_PER_ENTRY + i).try_into().unwrap() + } else { + last_parent + }; + let shreds = blockstore::entries_to_test_shreds( + &entries, + (MAX_SLOTS_PER_ENTRY + i + 1).try_into().unwrap(), + parent_slot, + false, + 0, + true, // merkle_variant + ); + blockstore.insert_shreds(shreds, None, false).unwrap(); + } + // link directly to slot 1 whose distance to last_vote > MAX_SLOTS_PER_ENTRY so it will not be included. + let entries = entry::create_ticks(1, 0, Hash::default()); + let shreds = blockstore::entries_to_test_shreds( + &entries, + last_parent, + 1, + false, + 0, + true, // merkle_variant + ); + blockstore.insert_shreds(shreds, None, false).unwrap(); + let last_vote_bankhash = Hash::new_unique(); + assert!(wait_for_wen_restart( + &wen_restart_proto_path, + VoteTransaction::from(Vote::new(vec![last_vote_slot], last_vote_bankhash)), + blockstore, + cluster_info + ) + .is_ok()); + let buffer = read(wen_restart_proto_path).unwrap(); + let progress = WenRestartProgress::decode(&mut std::io::Cursor::new(buffer)).unwrap(); + assert_eq!( + progress, + WenRestartProgress { + state: RestartState::Init.into(), + my_last_voted_fork_slots: Some(MyLastVotedForkSlots { + last_vote_slot, + last_vote_bankhash: last_vote_bankhash.to_string(), + shred_version: 2, + }), + } + ) + } +} From 95810d876a7cf8bdf9991ff5b887074c8d835de1 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Sat, 7 Oct 2023 13:15:38 +0900 Subject: [PATCH 276/407] Enable frozen_abi on banking trace file (#33501) * Enable frozen_abi on banking trace file * Fix ci with really correct bugfix... * Remove tracker_callers * Fix typo... * Fix AbiExample for Arc/Rc's Weaks * Added comment for AbiExample impl of SystemTime * Simplify and document EvenAsOpaque with new usage * Minor clean-ups * Simplify SystemTime::example() with UNIX_EPOCH... * Add comment for AbiExample subtleties --- Cargo.lock | 4 ++ core/src/banking_trace.rs | 7 ++- core/src/sigverify.rs | 2 +- frozen-abi/Cargo.toml | 1 + frozen-abi/src/abi_digester.rs | 54 +++++++++++++--- frozen-abi/src/abi_example.rs | 109 +++++++++++++++++++++++++-------- perf/Cargo.toml | 5 ++ perf/build.rs | 26 ++++++++ perf/src/cuda_runtime.rs | 2 +- perf/src/lib.rs | 4 ++ perf/src/packet.rs | 2 +- perf/src/recycler.rs | 9 +++ programs/sbf/Cargo.lock | 3 + sdk/src/packet.rs | 19 +++++- 14 files changed, 205 insertions(+), 42 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f0103dd448109d..6cb8b7eccad260 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5932,6 +5932,7 @@ dependencies = [ name = "solana-frozen-abi" version = "1.18.0" dependencies = [ + "bitflags 2.3.3", "block-buffer 0.10.4", "bs58", "bv", @@ -6441,7 +6442,10 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", + "rustc_version 0.4.0", "serde", + "solana-frozen-abi", + "solana-frozen-abi-macro", "solana-logger", "solana-metrics", "solana-rayon-threadlimit", diff --git a/core/src/banking_trace.rs b/core/src/banking_trace.rs index 760121dc7c557d..ba76b794ba2919 100644 --- a/core/src/banking_trace.rs +++ b/core/src/banking_trace.rs @@ -62,16 +62,17 @@ pub struct BankingTracer { active_tracer: Option, } -#[derive(Serialize, Deserialize, Debug)] +#[frozen_abi(digest = "Eq6YrAFtTbtPrCEvh6Et1mZZDCARUg1gcK2qiZdqyjUz")] +#[derive(Serialize, Deserialize, Debug, AbiExample)] pub struct TimedTracedEvent(pub std::time::SystemTime, pub TracedEvent); -#[derive(Serialize, Deserialize, Debug)] +#[derive(Serialize, Deserialize, Debug, AbiExample, AbiEnumVisitor)] pub enum TracedEvent { PacketBatch(ChannelLabel, BankingPacketBatch), BlockAndBankHash(Slot, Hash, Hash), } -#[derive(Serialize, Deserialize, Debug, Clone, Copy)] +#[derive(Serialize, Deserialize, Debug, Clone, Copy, AbiExample, AbiEnumVisitor)] pub enum ChannelLabel { NonVote, TpuVote, diff --git a/core/src/sigverify.rs b/core/src/sigverify.rs index 8140efac7ec2aa..b496452078d883 100644 --- a/core/src/sigverify.rs +++ b/core/src/sigverify.rs @@ -16,7 +16,7 @@ use { solana_sdk::{packet::Packet, saturating_add_assign}, }; -#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize, AbiExample)] pub struct SigverifyTracerPacketStats { pub total_removed_before_sigverify_stage: usize, pub total_tracer_packets_received_in_sigverify_stage: usize, diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index 2965dd17a368d7..4a4029ceb843d4 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -31,6 +31,7 @@ subtle = { workspace = true } [target.'cfg(not(target_os = "solana"))'.dev-dependencies] solana-logger = { workspace = true } +bitflags = { workspace = true } [build-dependencies] rustc_version = { workspace = true } diff --git a/frozen-abi/src/abi_digester.rs b/frozen-abi/src/abi_digester.rs index 0d0886daae7438..b014efd2ba1570 100644 --- a/frozen-abi/src/abi_digester.rs +++ b/frozen-abi/src/abi_digester.rs @@ -17,7 +17,7 @@ pub struct AbiDigester { data_types: std::rc::Rc>>, depth: usize, for_enum: bool, - opaque_scope: Option, + opaque_type_matcher: Option, } pub type DigestResult = Result; @@ -70,7 +70,7 @@ impl AbiDigester { data_types: std::rc::Rc::new(std::cell::RefCell::new(vec![])), for_enum: false, depth: 0, - opaque_scope: None, + opaque_type_matcher: None, } } @@ -81,16 +81,16 @@ impl AbiDigester { data_types: self.data_types.clone(), depth: self.depth, for_enum: false, - opaque_scope: self.opaque_scope.clone(), + opaque_type_matcher: self.opaque_type_matcher.clone(), } } - pub fn create_new_opaque(&self, top_scope: &str) -> Self { + pub fn create_new_opaque(&self, type_matcher: &str) -> Self { Self { data_types: self.data_types.clone(), depth: self.depth, for_enum: false, - opaque_scope: Some(top_scope.to_owned()), + opaque_type_matcher: Some(type_matcher.to_owned()), } } @@ -103,7 +103,7 @@ impl AbiDigester { data_types: self.data_types.clone(), depth, for_enum: false, - opaque_scope: self.opaque_scope.clone(), + opaque_type_matcher: self.opaque_type_matcher.clone(), }) } @@ -116,15 +116,15 @@ impl AbiDigester { data_types: self.data_types.clone(), depth, for_enum: true, - opaque_scope: self.opaque_scope.clone(), + opaque_type_matcher: self.opaque_type_matcher.clone(), }) } pub fn digest_data(&mut self, value: &T) -> DigestResult { let type_name = normalize_type_name(type_name::()); if type_name.ends_with("__SerializeWith") - || (self.opaque_scope.is_some() - && type_name.starts_with(self.opaque_scope.as_ref().unwrap())) + || (self.opaque_type_matcher.is_some() + && type_name.contains(self.opaque_type_matcher.as_ref().unwrap())) { // we can't use the AbiEnumVisitor trait for these cases. value.serialize(self.create_new()) @@ -661,6 +661,34 @@ mod tests { #[frozen_abi(digest = "9PMdHRb49BpkywrmPoJyZWMsEmf5E1xgmsFGkGmea5RW")] type TestBitVec = bv::BitVec; + mod bitflags_abi { + use crate::abi_example::{AbiExample, EvenAsOpaque, IgnoreAsHelper}; + + bitflags::bitflags! { + #[frozen_abi(digest = "HhKNkaeAd7AohTb8S8sPKjAWwzxWY2DPz5FvkWmx5bSH")] + #[derive(Serialize, Deserialize)] + struct TestFlags: u8 { + const TestBit = 0b0000_0001; + } + } + + impl AbiExample for TestFlags { + fn example() -> Self { + Self::empty() + } + } + + impl IgnoreAsHelper for TestFlags {} + // This (EvenAsOpaque) marker trait is needed for bitflags-generated types because we can't + // impl AbiExample for its private type: + // thread '...TestFlags_frozen_abi...' panicked at ...: + // derive or implement AbiExample/AbiEnumVisitor for + // solana_frozen_abi::abi_digester::tests::_::InternalBitFlags + impl EvenAsOpaque for TestFlags { + const TYPE_NAME_MATCHER: &'static str = "::_::InternalBitFlags"; + } + } + mod skip_should_be_same { #[frozen_abi(digest = "4LbuvQLX78XPbm4hqqZcHFHpseDJcw4qZL9EUZXSi2Ss")] #[derive(Serialize, AbiExample)] @@ -691,4 +719,12 @@ mod tests { Variant2(u8, u16, #[serde(skip)] u32), } } + + #[frozen_abi(digest = "B1PcwZdUfGnxaRid9e6ZwkST3NZ2KUEYobA1DkxWrYLP")] + #[derive(Serialize, AbiExample)] + struct TestArcWeak(std::sync::Weak); + + #[frozen_abi(digest = "4R8uCLR1BVU1aFgkSaNyKcFD1FeM6rGdsjbJBFpnqx4v")] + #[derive(Serialize, AbiExample)] + struct TestRcWeak(std::rc::Weak); } diff --git a/frozen-abi/src/abi_example.rs b/frozen-abi/src/abi_example.rs index 976668d487b3e0..50a17af715bd1d 100644 --- a/frozen-abi/src/abi_example.rs +++ b/frozen-abi/src/abi_example.rs @@ -6,6 +6,24 @@ use { std::any::type_name, }; +// The most important trait for the abi digesting. This trait is used to create any complexities of +// object graph to generate the abi digest. The frozen abi test harness calls T::example() to +// instantiate the tested root type and traverses its fields recursively, abusing the +// serde::serialize(). +// +// This trait applicability is similar to the Default trait. That means all referenced types must +// implement this trait. AbiExample is implemented for almost all common types in this file. +// +// When implementing AbiExample manually, you need to return a _minimally-populated_ value +// from it to actually generate a meaningful digest. This impl semantics is unlike Default, which +// usually returns something empty. See actual impls for inspiration. +// +// The requirement of AbiExample impls even applies to those types of `#[serde(skip)]`-ed fields. +// That's because the abi digesting needs a properly initialized object to enter into the +// serde::serialize() to begin with, even knowning they aren't used for serialization and thus abi +// digest. Luckily, `#[serde(skip)]`-ed fields' AbiExample impls can just delegate to T::default(), +// exploiting the nature of this artificial impl requirement as an exception from the usual +// AbiExample semantics. pub trait AbiExample: Sized { fn example() -> Self; } @@ -137,25 +155,12 @@ tuple_example_impls! { } } -// Source: https://github.com/rust-lang/rust/blob/ba18875557aabffe386a2534a1aa6118efb6ab88/src/libcore/array/mod.rs#L417 -macro_rules! array_example_impls { - {$n:expr, $t:ident $($ts:ident)*} => { - impl AbiExample for [T; $n] where T: AbiExample { - fn example() -> Self { - [$t::example(), $($ts::example()),*] - } - } - array_example_impls!{($n - 1), $($ts)*} - }; - {$n:expr,} => { - impl AbiExample for [T; $n] { - fn example() -> Self { [] } - } - }; +impl AbiExample for [T; N] { + fn example() -> Self { + std::array::from_fn(|_| T::example()) + } } -array_example_impls! {32, T T T T T T T T T T T T T T T T T T T T T T T T T T T T T T T T} - // Source: https://github.com/rust-lang/rust/blob/ba18875557aabffe386a2534a1aa6118efb6ab88/src/libcore/default.rs#L137 macro_rules! example_impls { ($t:ty, $v:expr) => { @@ -232,7 +237,14 @@ impl AbiExample for BitVec { } impl IgnoreAsHelper for BitVec {} -impl EvenAsOpaque for BitVec {} +// This (EvenAsOpaque) marker trait is needed for BitVec because we can't impl AbiExample for its +// private type: +// thread '...TestBitVec_frozen_abi...' panicked at ...: +// derive or implement AbiExample/AbiEnumVisitor for +// bv::bit_vec::inner::Inner +impl EvenAsOpaque for BitVec { + const TYPE_NAME_MATCHER: &'static str = "bv::bit_vec::inner::"; +} pub(crate) fn normalize_type_name(type_name: &str) -> String { type_name.chars().filter(|c| *c != '&').collect() @@ -329,6 +341,23 @@ impl AbiExample for std::sync::Arc { } } +// When T is weakly owned by the likes of `std::{sync, rc}::Weak`s, we need to uphold the ownership +// of T in some way at least during abi digesting... However, there's no easy way. Stashing them +// into static is confronted with Send/Sync issue. Stashing them into thread_local is confronted +// with not enough (T + 'static) lifetime bound.. So, just leak the examples. This should be +// tolerated, considering ::example() should ever be called inside tests, not in production code... +fn leak_and_inhibit_drop<'a, T>(t: T) -> &'a mut T { + Box::leak(Box::new(t)) +} + +impl AbiExample for std::sync::Weak { + fn example() -> Self { + info!("AbiExample for (Arc's Weak): {}", type_name::()); + // leaking is needed otherwise Arc::upgrade() will always return None... + std::sync::Arc::downgrade(leak_and_inhibit_drop(std::sync::Arc::new(T::example()))) + } +} + impl AbiExample for std::rc::Rc { fn example() -> Self { info!("AbiExample for (Rc): {}", type_name::()); @@ -336,6 +365,14 @@ impl AbiExample for std::rc::Rc { } } +impl AbiExample for std::rc::Weak { + fn example() -> Self { + info!("AbiExample for (Rc's Weak): {}", type_name::()); + // leaking is needed otherwise Rc::upgrade() will always return None... + std::rc::Rc::downgrade(leak_and_inhibit_drop(std::rc::Rc::new(T::example()))) + } +} + impl AbiExample for std::sync::Mutex { fn example() -> Self { info!("AbiExample for (Mutex): {}", type_name::()); @@ -457,6 +494,13 @@ impl AbiExample for std::path::PathBuf { } } +#[cfg(not(target_os = "solana"))] +impl AbiExample for std::time::SystemTime { + fn example() -> Self { + std::time::SystemTime::UNIX_EPOCH + } +} + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; impl AbiExample for SocketAddr { fn example() -> Self { @@ -470,13 +514,22 @@ impl AbiExample for IpAddr { } } -// This is a control flow indirection needed for digesting all variants of an enum +// This is a control flow indirection needed for digesting all variants of an enum. +// +// All of types (including non-enums) will be processed by this trait, albeit the +// name of this trait. +// User-defined enums usually just need to impl this with namesake derive macro (AbiEnumVisitor). +// +// Note that sometimes this indirection doesn't work for various reasons. For that end, there are +// hacks with marker traits (IgnoreAsHelper/EvenAsOpaque). pub trait AbiEnumVisitor: Serialize { fn visit_for_abi(&self, digester: &mut AbiDigester) -> DigestResult; } pub trait IgnoreAsHelper {} -pub trait EvenAsOpaque {} +pub trait EvenAsOpaque { + const TYPE_NAME_MATCHER: &'static str; +} impl AbiEnumVisitor for T { default fn visit_for_abi(&self, _digester: &mut AbiDigester) -> DigestResult { @@ -489,7 +542,9 @@ impl AbiEnumVisitor for T { impl AbiEnumVisitor for T { default fn visit_for_abi(&self, digester: &mut AbiDigester) -> DigestResult { - info!("AbiEnumVisitor for (default): {}", type_name::()); + info!("AbiEnumVisitor for T: {}", type_name::()); + // not calling self.serialize(...) is intentional here as the most generic impl + // consider IgnoreAsHelper and EvenAsOpaque if you're stuck on this.... T::example() .serialize(digester.create_new()) .map_err(DigestError::wrap_by_type::) @@ -501,7 +556,7 @@ impl AbiEnumVisitor for T { // relevant test: TestVecEnum impl AbiEnumVisitor for &T { default fn visit_for_abi(&self, digester: &mut AbiDigester) -> DigestResult { - info!("AbiEnumVisitor for (&default): {}", type_name::()); + info!("AbiEnumVisitor for &T: {}", type_name::()); // Don't call self.visit_for_abi(...) to avoid the infinite recursion! T::visit_for_abi(self, digester) } @@ -521,9 +576,13 @@ impl AbiEnumVisitor for &T { // inability of implementing AbiExample for private structs from other crates impl AbiEnumVisitor for &T { default fn visit_for_abi(&self, digester: &mut AbiDigester) -> DigestResult { - info!("AbiEnumVisitor for (IgnoreAsOpaque): {}", type_name::()); - let top_scope = type_name::().split("::").next().unwrap(); - self.serialize(digester.create_new_opaque(top_scope)) + let type_name = type_name::(); + let matcher = T::TYPE_NAME_MATCHER; + info!( + "AbiEnumVisitor for (EvenAsOpaque): {}: matcher: {}", + type_name, matcher + ); + self.serialize(digester.create_new_opaque(matcher)) .map_err(DigestError::wrap_by_type::) } } diff --git a/perf/Cargo.toml b/perf/Cargo.toml index aea478da078c35..b62484f4249abd 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -21,6 +21,8 @@ log = { workspace = true } rand = { workspace = true } rayon = { workspace = true } serde = { workspace = true } +solana-frozen-abi = { workspace = true } +solana-frozen-abi-macro = { workspace = true } solana-metrics = { workspace = true } solana-rayon-threadlimit = { workspace = true } solana-sdk = { workspace = true } @@ -40,6 +42,9 @@ rand_chacha = { workspace = true } solana-logger = { workspace = true } test-case = { workspace = true } +[build-dependencies] +rustc_version = { workspace = true } + [[bench]] name = "sigverify" diff --git a/perf/build.rs b/perf/build.rs index 025c71008f092b..4925ee898eb612 100644 --- a/perf/build.rs +++ b/perf/build.rs @@ -1,3 +1,6 @@ +extern crate rustc_version; +use rustc_version::{version_meta, Channel}; + fn main() { #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] { @@ -8,4 +11,27 @@ fn main() { println!("cargo:rustc-cfg=build_target_feature_avx2"); } } + + // Copied and adapted from + // https://github.com/Kimundi/rustc-version-rs/blob/1d692a965f4e48a8cb72e82cda953107c0d22f47/README.md#example + // Licensed under Apache-2.0 + MIT + match version_meta().unwrap().channel { + Channel::Stable => { + println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); + } + Channel::Beta => { + println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); + } + Channel::Nightly => { + println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); + } + Channel::Dev => { + println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); + // See https://github.com/solana-labs/solana/issues/11055 + // We may be running the custom `rust-bpf-builder` toolchain, + // which currently needs `#![feature(proc_macro_hygiene)]` to + // be applied. + println!("cargo:rustc-cfg=RUSTC_NEEDS_PROC_MACRO_HYGIENE"); + } + } } diff --git a/perf/src/cuda_runtime.rs b/perf/src/cuda_runtime.rs index a2986af1813680..5b44099aecb36c 100644 --- a/perf/src/cuda_runtime.rs +++ b/perf/src/cuda_runtime.rs @@ -54,7 +54,7 @@ fn unpin(mem: *mut T) { // A vector wrapper where the underlying memory can be // page-pinned. Controlled by flags in case user only wants // to pin in certain circumstances. -#[derive(Debug, Default, Serialize, Deserialize)] +#[derive(Debug, Default, Serialize, Deserialize, AbiExample)] pub struct PinnedVec { x: Vec, pinned: bool, diff --git a/perf/src/lib.rs b/perf/src/lib.rs index 8d277d7ad69778..83cefe1f319145 100644 --- a/perf/src/lib.rs +++ b/perf/src/lib.rs @@ -1,3 +1,4 @@ +#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] pub mod cuda_runtime; pub mod data_budget; pub mod deduper; @@ -23,6 +24,9 @@ extern crate assert_matches; #[macro_use] extern crate solana_metrics; +#[macro_use] +extern crate solana_frozen_abi_macro; + fn is_rosetta_emulated() -> bool { #[cfg(target_os = "macos")] { diff --git a/perf/src/packet.rs b/perf/src/packet.rs index b030f04dae8ce6..fbb8a437d6bd32 100644 --- a/perf/src/packet.rs +++ b/perf/src/packet.rs @@ -18,7 +18,7 @@ pub const NUM_PACKETS: usize = 1024 * 8; pub const PACKETS_PER_BATCH: usize = 64; pub const NUM_RCVMMSGS: usize = 64; -#[derive(Debug, Default, Clone, Serialize, Deserialize)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, AbiExample)] pub struct PacketBatch { packets: PinnedVec, } diff --git a/perf/src/recycler.rs b/perf/src/recycler.rs index 27c47d0df45103..87c44399e7fbc8 100644 --- a/perf/src/recycler.rs +++ b/perf/src/recycler.rs @@ -57,6 +57,15 @@ impl Default for RecyclerX { } } +#[cfg(RUSTC_WITH_SPECIALIZATION)] +impl solana_frozen_abi::abi_example::AbiExample + for RecyclerX> +{ + fn example() -> Self { + Self::default() + } +} + pub trait Reset { fn reset(&mut self); fn warm(&mut self, size_hint: usize); diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 167f2e4c4fa8c4..2500080aaf5e5a 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5201,7 +5201,10 @@ dependencies = [ "nix", "rand 0.8.5", "rayon", + "rustc_version", "serde", + "solana-frozen-abi", + "solana-frozen-abi-macro", "solana-metrics", "solana-rayon-threadlimit", "solana-sdk", diff --git a/sdk/src/packet.rs b/sdk/src/packet.rs index b70d8adae8a4bb..faea9ab4753c67 100644 --- a/sdk/src/packet.rs +++ b/sdk/src/packet.rs @@ -36,7 +36,7 @@ bitflags! { } } -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, AbiExample)] #[repr(C)] pub struct Meta { pub size: usize, @@ -45,6 +45,21 @@ pub struct Meta { pub flags: PacketFlags, } +#[cfg(RUSTC_WITH_SPECIALIZATION)] +impl ::solana_frozen_abi::abi_example::AbiExample for PacketFlags { + fn example() -> Self { + Self::empty() + } +} + +#[cfg(RUSTC_WITH_SPECIALIZATION)] +impl ::solana_frozen_abi::abi_example::IgnoreAsHelper for PacketFlags {} + +#[cfg(RUSTC_WITH_SPECIALIZATION)] +impl ::solana_frozen_abi::abi_example::EvenAsOpaque for PacketFlags { + const TYPE_NAME_MATCHER: &'static str = "::_::InternalBitFlags"; +} + // serde_as is used as a work around because array isn't supported by serde // (and serde_bytes). // @@ -71,7 +86,7 @@ pub struct Meta { // ryoqun's dirty experiments: // https://github.com/ryoqun/serde-array-comparisons #[serde_as] -#[derive(Clone, Eq, Serialize, Deserialize)] +#[derive(Clone, Eq, Serialize, Deserialize, AbiExample)] #[repr(C)] pub struct Packet { // Bytes past Packet.meta.size are not valid to read from. From c588f25eca9b0277aea9f524aacfc35552a01b4b Mon Sep 17 00:00:00 2001 From: Brooks Date: Sat, 7 Oct 2023 14:29:42 -0400 Subject: [PATCH 277/407] Stops pushing legacy snapshot hashes to crds (#33576) --- core/src/snapshot_packager_service.rs | 18 ++-------- .../snapshot_gossip_manager.rs | 36 +------------------ 2 files changed, 4 insertions(+), 50 deletions(-) diff --git a/core/src/snapshot_packager_service.rs b/core/src/snapshot_packager_service.rs index 4840e118231b1d..162d7405e068e0 100644 --- a/core/src/snapshot_packager_service.rs +++ b/core/src/snapshot_packager_service.rs @@ -2,7 +2,7 @@ mod snapshot_gossip_manager; use { crossbeam_channel::{Receiver, Sender}, snapshot_gossip_manager::SnapshotGossipManager, - solana_gossip::cluster_info::{ClusterInfo, MAX_LEGACY_SNAPSHOT_HASHES}, + solana_gossip::cluster_info::ClusterInfo, solana_measure::measure_us, solana_perf::thread::renice_this_thread, solana_runtime::{ @@ -39,25 +39,13 @@ impl SnapshotPackagerService { snapshot_config: SnapshotConfig, enable_gossip_push: bool, ) -> Self { - let max_full_snapshot_hashes = std::cmp::min( - MAX_LEGACY_SNAPSHOT_HASHES, - snapshot_config - .maximum_full_snapshot_archives_to_retain - .get(), - ); - let t_snapshot_packager = Builder::new() .name("solSnapshotPkgr".to_string()) .spawn(move || { info!("SnapshotPackagerService has started"); renice_this_thread(snapshot_config.packager_thread_niceness_adj).unwrap(); - let mut snapshot_gossip_manager = enable_gossip_push.then(|| { - SnapshotGossipManager::new( - cluster_info, - max_full_snapshot_hashes, - starting_snapshot_hashes, - ) - }); + let mut snapshot_gossip_manager = enable_gossip_push + .then(|| SnapshotGossipManager::new(cluster_info, starting_snapshot_hashes)); loop { if exit.load(Ordering::Relaxed) { diff --git a/core/src/snapshot_packager_service/snapshot_gossip_manager.rs b/core/src/snapshot_packager_service/snapshot_gossip_manager.rs index a2d7239b3197ae..d4ab9863642e09 100644 --- a/core/src/snapshot_packager_service/snapshot_gossip_manager.rs +++ b/core/src/snapshot_packager_service/snapshot_gossip_manager.rs @@ -4,7 +4,7 @@ use { snapshot_hash::{ FullSnapshotHash, IncrementalSnapshotHash, SnapshotHash, StartingSnapshotHashes, }, - snapshot_package::{retain_max_n_elements, SnapshotKind}, + snapshot_package::SnapshotKind, }, solana_sdk::{clock::Slot, hash::Hash}, std::sync::Arc, @@ -14,8 +14,6 @@ use { pub struct SnapshotGossipManager { cluster_info: Arc, latest_snapshot_hashes: Option, - max_legacy_full_snapshot_hashes: usize, - legacy_full_snapshot_hashes: Vec, } impl SnapshotGossipManager { @@ -24,14 +22,11 @@ impl SnapshotGossipManager { #[must_use] pub fn new( cluster_info: Arc, - max_legacy_full_snapshot_hashes: usize, starting_snapshot_hashes: Option, ) -> Self { let mut this = SnapshotGossipManager { cluster_info, latest_snapshot_hashes: None, - max_legacy_full_snapshot_hashes, - legacy_full_snapshot_hashes: Vec::default(), }; if let Some(starting_snapshot_hashes) = starting_snapshot_hashes { this.push_starting_snapshot_hashes(starting_snapshot_hashes); @@ -49,10 +44,6 @@ impl SnapshotGossipManager { ); } self.push_latest_snapshot_hashes_to_cluster(); - - // Handle legacy snapshot hashes here too - // Once LegacySnapshotHashes are removed from CRDS, also remove them here - self.push_legacy_full_snapshot_hash(starting_snapshot_hashes.full); } /// Push new snapshot hash to the cluster via CRDS @@ -78,10 +69,6 @@ impl SnapshotGossipManager { fn push_full_snapshot_hash(&mut self, full_snapshot_hash: FullSnapshotHash) { self.update_latest_full_snapshot_hash(full_snapshot_hash); self.push_latest_snapshot_hashes_to_cluster(); - - // Handle legacy snapshot hashes here too - // Once LegacySnapshotHashes are removed from CRDS, also remove them here - self.push_legacy_full_snapshot_hash(full_snapshot_hash); } /// Push new incremental snapshot hash to the cluster via CRDS @@ -146,22 +133,6 @@ impl SnapshotGossipManager { and a new error case has been added that has not been handled here.", ); } - - /// Add `full_snapshot_hash` to the vector of full snapshot hashes, then push that vector to - /// the cluster via CRDS. - fn push_legacy_full_snapshot_hash(&mut self, full_snapshot_hash: FullSnapshotHash) { - self.legacy_full_snapshot_hashes.push(full_snapshot_hash); - - retain_max_n_elements( - &mut self.legacy_full_snapshot_hashes, - self.max_legacy_full_snapshot_hashes, - ); - - self.cluster_info - .push_legacy_snapshot_hashes(clone_hashes_for_crds( - self.legacy_full_snapshot_hashes.as_slice(), - )); - } } #[derive(Debug, Copy, Clone, Eq, PartialEq)] @@ -191,8 +162,3 @@ impl AsSnapshotHash for IncrementalSnapshotHash { &self.0 } } - -/// Clones and maps snapshot hashes into what CRDS expects -fn clone_hashes_for_crds(hashes: &[impl AsSnapshotHash]) -> Vec<(Slot, Hash)> { - hashes.iter().map(AsSnapshotHash::clone_for_crds).collect() -} From bd8cfc9923fcce0851ed433d91933898c33580b5 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Mon, 9 Oct 2023 03:12:52 +0900 Subject: [PATCH 278/407] fix typo in latest_validator_votes_for_frozen_banks.rs (#33585) upate -> update --- core/src/consensus/latest_validator_votes_for_frozen_banks.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/consensus/latest_validator_votes_for_frozen_banks.rs b/core/src/consensus/latest_validator_votes_for_frozen_banks.rs index f80183dc4660e2..ea5400aaacd6ec 100644 --- a/core/src/consensus/latest_validator_votes_for_frozen_banks.rs +++ b/core/src/consensus/latest_validator_votes_for_frozen_banks.rs @@ -292,7 +292,7 @@ mod tests { } // Case 6: Adding a vote for a new higher slot that *is* frozen - // should upate the state + // should update the state let frozen_hash = Hash::new_unique(); assert_eq!( latest_validator_votes_for_frozen_banks.check_add_vote( From 7afb11f1e6daa3e9bd93cfe203211de5b14ba56a Mon Sep 17 00:00:00 2001 From: DimAn Date: Mon, 9 Oct 2023 02:33:53 +0300 Subject: [PATCH 279/407] validator: skip health check (#33568) * validator: skip health check * keep `healthy` as a boolean --- validator/src/cli.rs | 10 ++++++++++ validator/src/main.rs | 7 ++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/validator/src/cli.rs b/validator/src/cli.rs index cd3f3323589653..aba402b4257b3b 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1453,6 +1453,11 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .long("skip-new-snapshot-check") .help("Skip check for a new snapshot") ) + .arg( + Arg::with_name("skip_health_check") + .long("skip-health-check") + .help("Skip health check") + ) ) .subcommand( SubCommand::with_name("authorized-voter") @@ -1668,6 +1673,11 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .long("skip-new-snapshot-check") .help("Skip check for a new snapshot") ) + .arg( + Arg::with_name("skip_health_check") + .long("skip-health-check") + .help("Skip health check") + ) .after_help("Note: If this command exits with a non-zero status \ then this not a good time for a restart") ). diff --git a/validator/src/main.rs b/validator/src/main.rs index b97789061c9e3b..0c998b91c30309 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -118,6 +118,7 @@ fn wait_for_restart_window( min_idle_time_in_minutes: usize, max_delinquency_percentage: u8, skip_new_snapshot_check: bool, + skip_health_check: bool, ) -> Result<(), Box> { let sleep_interval = Duration::from_secs(5); @@ -161,7 +162,7 @@ fn wait_for_restart_window( seen_incremential_snapshot |= snapshot_slot_info_has_incremential; let epoch_info = rpc_client.get_epoch_info_with_commitment(CommitmentConfig::processed())?; - let healthy = rpc_client.get_health().ok().is_some(); + let healthy = skip_health_check || rpc_client.get_health().ok().is_some(); let delinquent_stake_percentage = { let vote_accounts = rpc_client.get_vote_accounts()?; let current_stake: u64 = vote_accounts @@ -649,6 +650,7 @@ pub fn main() { let force = subcommand_matches.is_present("force"); let monitor = subcommand_matches.is_present("monitor"); let skip_new_snapshot_check = subcommand_matches.is_present("skip_new_snapshot_check"); + let skip_health_check = subcommand_matches.is_present("skip_health_check"); let max_delinquent_stake = value_t_or_exit!(subcommand_matches, "max_delinquent_stake", u8); @@ -659,6 +661,7 @@ pub fn main() { min_idle_time, max_delinquent_stake, skip_new_snapshot_check, + skip_health_check, ) .unwrap_or_else(|err| { println!("{err}"); @@ -777,6 +780,7 @@ pub fn main() { let max_delinquent_stake = value_t_or_exit!(subcommand_matches, "max_delinquent_stake", u8); let skip_new_snapshot_check = subcommand_matches.is_present("skip_new_snapshot_check"); + let skip_health_check = subcommand_matches.is_present("skip_health_check"); wait_for_restart_window( &ledger_path, @@ -784,6 +788,7 @@ pub fn main() { min_idle_time, max_delinquent_stake, skip_new_snapshot_check, + skip_health_check, ) .unwrap_or_else(|err| { println!("{err}"); From 1a2c0943db6519dc9519bfc74c2426332ba26850 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 13:40:45 +0000 Subject: [PATCH 280/407] build(deps): bump byteorder from 1.4.3 to 1.5.0 (#33590) * build(deps): bump byteorder from 1.4.3 to 1.5.0 Bumps [byteorder](https://github.com/BurntSushi/byteorder) from 1.4.3 to 1.5.0. - [Changelog](https://github.com/BurntSushi/byteorder/blob/master/CHANGELOG.md) - [Commits](https://github.com/BurntSushi/byteorder/compare/1.4.3...1.5.0) --- updated-dependencies: - dependency-name: byteorder dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 38 +++++++++++++++++++------------------- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6cb8b7eccad260..bca7b91d7c420b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -890,9 +890,9 @@ dependencies = [ [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" diff --git a/Cargo.toml b/Cargo.toml index f5849604d0e3b3..d4d7cd9bde50e6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -159,7 +159,7 @@ bv = "0.11.1" byte-unit = "4.0.19" bytecount = "0.6.4" bytemuck = "1.14.0" -byteorder = "1.4.3" +byteorder = "1.5.0" bytes = "1.5" bzip2 = "0.4.4" caps = "0.5.5" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 2500080aaf5e5a..6bd4234b947cfa 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -613,7 +613,7 @@ checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ "block-padding 0.1.5", "byte-tools", - "byteorder 1.4.3", + "byteorder 1.5.0", "generic-array 0.12.4", ] @@ -827,9 +827,9 @@ checksum = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" @@ -981,7 +981,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da3da6baa321ec19e1cc41d31bf599f00c783d0517095cdaf0332e3fe8d20680" dependencies = [ "ascii", - "byteorder 1.4.3", + "byteorder 1.5.0", "either", "memchr", "unreachable", @@ -1177,7 +1177,7 @@ version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90f9d052967f590a76e62eb387bd0bbb1b000182c3cefe5364db6b7211651bc0" dependencies = [ - "byteorder 1.4.3", + "byteorder 1.5.0", "digest 0.9.0", "rand_core 0.5.1", "serde", @@ -1913,7 +1913,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67" dependencies = [ - "byteorder 1.4.3", + "byteorder 1.5.0", ] [[package]] @@ -2729,7 +2729,7 @@ version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" dependencies = [ - "byteorder 1.4.3", + "byteorder 1.5.0", "keccak", "rand_core 0.6.4", "zeroize", @@ -4474,7 +4474,7 @@ dependencies = [ "blake3", "bv", "bytemuck", - "byteorder 1.4.3", + "byteorder 1.5.0", "bzip2", "crossbeam-channel", "dashmap", @@ -4606,7 +4606,7 @@ name = "solana-bpf-loader-program" version = "1.18.0" dependencies = [ "bincode", - "byteorder 1.4.3", + "byteorder 1.5.0", "libsecp256k1 0.6.0", "log", "scopeguard", @@ -4901,7 +4901,7 @@ name = "solana-faucet" version = "1.18.0" dependencies = [ "bincode", - "byteorder 1.4.3", + "byteorder 1.5.0", "clap 2.33.3", "crossbeam-channel", "log", @@ -5052,7 +5052,7 @@ dependencies = [ "assert_matches", "bincode", "bitflags 2.3.3", - "byteorder 1.4.3", + "byteorder 1.5.0", "chrono", "chrono-humanize", "crossbeam-channel", @@ -5525,7 +5525,7 @@ dependencies = [ "blake3", "bv", "bytemuck", - "byteorder 1.4.3", + "byteorder 1.5.0", "bzip2", "crossbeam-channel", "dashmap", @@ -5595,7 +5595,7 @@ name = "solana-sbf-programs" version = "1.18.0" dependencies = [ "bincode", - "byteorder 1.4.3", + "byteorder 1.5.0", "elf", "itertools", "log", @@ -5690,7 +5690,7 @@ dependencies = [ name = "solana-sbf-rust-dep-crate" version = "1.18.0" dependencies = [ - "byteorder 1.4.3", + "byteorder 1.5.0", "solana-program", ] @@ -6023,7 +6023,7 @@ dependencies = [ "borsh 0.10.3", "bs58", "bytemuck", - "byteorder 1.4.3", + "byteorder 1.5.0", "chrono", "derivation-path", "digest 0.10.7", @@ -6479,7 +6479,7 @@ dependencies = [ "base64 0.21.4", "bincode", "bytemuck", - "byteorder 1.4.3", + "byteorder 1.5.0", "curve25519-dalek", "getrandom 0.1.14", "itertools", @@ -6504,7 +6504,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "103318aa365ff7caa8cf534f2246b5eb7e5b34668736d52b1266b143f7a21196" dependencies = [ - "byteorder 1.4.3", + "byteorder 1.5.0", "combine", "goblin", "hash32", @@ -6851,7 +6851,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "225e483f02d0ad107168dc57381a8a40c3aeea6abe47f37506931f861643cfa8" dependencies = [ "bitflags 1.3.2", - "byteorder 1.4.3", + "byteorder 1.5.0", "libc", "thiserror", "walkdir", @@ -7391,7 +7391,7 @@ version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" dependencies = [ - "byteorder 1.4.3", + "byteorder 1.5.0", "bytes", "data-encoding", "http", From d74de6780e2975472796a6a752b362152cd008a6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 13:42:44 +0000 Subject: [PATCH 281/407] build(deps): bump proc-macro2 from 1.0.67 to 1.0.69 (#33591) * build(deps): bump proc-macro2 from 1.0.67 to 1.0.69 Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.67 to 1.0.69. - [Release notes](https://github.com/dtolnay/proc-macro2/releases) - [Commits](https://github.com/dtolnay/proc-macro2/compare/1.0.67...1.0.69) --- updated-dependencies: - dependency-name: proc-macro2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bca7b91d7c420b..1216ba86e7ed92 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3978,9 +3978,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.67" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] diff --git a/Cargo.toml b/Cargo.toml index d4d7cd9bde50e6..05c6241523883a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -259,7 +259,7 @@ pickledb = { version = "0.5.1", default-features = false } pkcs8 = "0.8.0" predicates = "2.1" pretty-hex = "0.3.0" -proc-macro2 = "1.0.67" +proc-macro2 = "1.0.69" proptest = "1.2" prost = "0.11.9" prost-build = "0.11.9" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 6bd4234b947cfa..6fbb354a4818e7 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3504,9 +3504,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.67" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] From 46cf79a9c47261a3eb17efafe2069ade9ff7c6db Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 14:21:09 +0000 Subject: [PATCH 282/407] build(deps): bump num-traits from 0.2.16 to 0.2.17 (#33589) * build(deps): bump num-traits from 0.2.16 to 0.2.17 Bumps [num-traits](https://github.com/rust-num/num-traits) from 0.2.16 to 0.2.17. - [Changelog](https://github.com/rust-num/num-traits/blob/master/RELEASES.md) - [Commits](https://github.com/rust-num/num-traits/compare/num-traits-0.2.16...num-traits-0.2.17) --- updated-dependencies: - dependency-name: num-traits dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- programs/sbf/Cargo.lock | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1216ba86e7ed92..378a93e91df240 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3345,9 +3345,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", "libm", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 6fbb354a4818e7..25c3c36583bf4a 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2964,9 +2964,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", ] From 72574dac02bfecded82a544655e8bfd83154caec Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Mon, 9 Oct 2023 09:38:00 -0500 Subject: [PATCH 283/407] Assert acount hash mmap file capacity > 0 (#33575) assert mmap capacity > 0 Co-authored-by: HaoranYi --- accounts-db/src/accounts_hash.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 66a77c81883300..222f2b1a640984 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -97,6 +97,7 @@ impl AccountHashesFile { // Theoretical performance optimization: write a zero to the end of // the file so that we won't have to resize it later, which may be // expensive. + assert!(self.capacity > 0); data.seek(SeekFrom::Start((self.capacity - 1) as u64)) .unwrap(); data.write_all(&[0]).unwrap(); From 1d91b60a57b7d000fe0a72761c320d6f855b7c54 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Mon, 9 Oct 2023 15:22:34 +0000 Subject: [PATCH 284/407] removes unused legacy-snapshot-hashes api in gossip (#33593) https://github.com/solana-labs/solana/pull/33576 stops broadcasting legacy snapshot hashes over gossip, and this commit removes unused legacy snapshot hashed code in gossip. --- gossip/src/cluster_info.rs | 75 ++++---------------------------------- gossip/src/crds.rs | 8 ++-- gossip/src/crds_entry.rs | 11 +----- gossip/src/crds_value.rs | 8 ++-- 4 files changed, 17 insertions(+), 85 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index b0b99b1c02dca4..113b387512608d 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -32,9 +32,8 @@ use { CrdsFilter, CrdsTimeouts, ProcessPullStats, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS, }, crds_value::{ - self, AccountsHashes, CrdsData, CrdsValue, CrdsValueLabel, EpochSlotsIndex, - LegacySnapshotHashes, LowestSlot, NodeInstance, SnapshotHashes, Version, Vote, - MAX_WALLCLOCK, + self, AccountsHashes, CrdsData, CrdsValue, CrdsValueLabel, EpochSlotsIndex, LowestSlot, + NodeInstance, SnapshotHashes, Version, Vote, MAX_WALLCLOCK, }, duplicate_shred::DuplicateShred, epoch_slots::EpochSlots, @@ -118,10 +117,6 @@ pub(crate) const DUPLICATE_SHRED_MAX_PAYLOAD_SIZE: usize = PACKET_DATA_SIZE - 11 /// such that the serialized size of the push/pull message stays below /// PACKET_DATA_SIZE. pub const MAX_ACCOUNTS_HASHES: usize = 16; -/// Maximum number of hashes in LegacySnapshotHashes a node publishes -/// such that the serialized size of the push/pull message stays below -/// PACKET_DATA_SIZE. -pub const MAX_LEGACY_SNAPSHOT_HASHES: usize = 16; /// Maximum number of incremental hashes in SnapshotHashes a node publishes /// such that the serialized size of the push/pull message stays below /// PACKET_DATA_SIZE. @@ -997,20 +992,6 @@ impl ClusterInfo { self.push_message(CrdsValue::new_signed(message, &self.keypair())); } - pub fn push_legacy_snapshot_hashes(&self, snapshot_hashes: Vec<(Slot, Hash)>) { - if snapshot_hashes.len() > MAX_LEGACY_SNAPSHOT_HASHES { - warn!( - "snapshot hashes too large, ignored: {}", - snapshot_hashes.len(), - ); - return; - } - - let message = - CrdsData::LegacySnapshotHashes(LegacySnapshotHashes::new(self.id(), snapshot_hashes)); - self.push_message(CrdsValue::new_signed(message, &self.keypair())); - } - pub fn push_snapshot_hashes( &self, full: (Slot, Hash), @@ -1208,15 +1189,6 @@ impl ClusterInfo { .map(map) } - pub fn get_legacy_snapshot_hash_for_node(&self, pubkey: &Pubkey, map: F) -> Option - where - F: FnOnce(&Vec<(Slot, Hash)>) -> Y, - { - let gossip_crds = self.gossip.crds.read().unwrap(); - let hashes = &gossip_crds.get::<&LegacySnapshotHashes>(*pubkey)?.hashes; - Some(map(hashes)) - } - pub fn get_snapshot_hashes_for_node(&self, pubkey: &Pubkey) -> Option { self.gossip .crds @@ -3413,36 +3385,6 @@ mod tests { } } - #[test] - fn test_max_legecy_snapshot_hashes_with_push_messages() { - let mut rng = rand::thread_rng(); - for _ in 0..256 { - let snapshot_hash = LegacySnapshotHashes::new_rand(&mut rng, None); - let crds_value = CrdsValue::new_signed( - CrdsData::LegacySnapshotHashes(snapshot_hash), - &Keypair::new(), - ); - let message = Protocol::PushMessage(Pubkey::new_unique(), vec![crds_value]); - let socket = new_rand_socket_addr(&mut rng); - assert!(Packet::from_data(Some(&socket), message).is_ok()); - } - } - - #[test] - fn test_max_legacy_snapshot_hashes_with_pull_responses() { - let mut rng = rand::thread_rng(); - for _ in 0..256 { - let snapshot_hash = LegacySnapshotHashes::new_rand(&mut rng, None); - let crds_value = CrdsValue::new_signed( - CrdsData::LegacySnapshotHashes(snapshot_hash), - &Keypair::new(), - ); - let response = Protocol::PullResponse(Pubkey::new_unique(), vec![crds_value]); - let socket = new_rand_socket_addr(&mut rng); - assert!(Packet::from_data(Some(&socket), response).is_ok()); - } - } - #[test] fn test_max_snapshot_hashes_with_push_messages() { let mut rng = rand::thread_rng(); @@ -4110,16 +4052,15 @@ mod tests { fn test_split_messages_packet_size() { // Test that if a value is smaller than payload size but too large to be wrapped in a vec // that it is still dropped - let mut value = - CrdsValue::new_unsigned(CrdsData::LegacySnapshotHashes(LegacySnapshotHashes { - from: Pubkey::default(), - hashes: vec![], - wallclock: 0, - })); + let mut value = CrdsValue::new_unsigned(CrdsData::AccountsHashes(AccountsHashes { + from: Pubkey::default(), + hashes: vec![], + wallclock: 0, + })); let mut i = 0; while value.size() < PUSH_MESSAGE_MAX_PAYLOAD_SIZE as u64 { - value.data = CrdsData::LegacySnapshotHashes(LegacySnapshotHashes { + value.data = CrdsData::AccountsHashes(AccountsHashes { from: Pubkey::default(), hashes: vec![(0, Hash::default()); i], wallclock: 0, diff --git a/gossip/src/crds.rs b/gossip/src/crds.rs index d8ab6e45b3d593..b20ba9dfb15647 100644 --- a/gossip/src/crds.rs +++ b/gossip/src/crds.rs @@ -759,7 +759,7 @@ fn should_report_message_signature(signature: &Signature) -> bool { mod tests { use { super::*, - crate::crds_value::{new_rand_timestamp, LegacySnapshotHashes, NodeInstance}, + crate::crds_value::{new_rand_timestamp, AccountsHashes, NodeInstance}, rand::{thread_rng, Rng, SeedableRng}, rand_chacha::ChaChaRng, rayon::ThreadPoolBuilder, @@ -1341,8 +1341,8 @@ mod tests { ); assert_eq!(crds.get_shred_version(&pubkey), Some(8)); // Add other crds values with the same pubkey. - let val = LegacySnapshotHashes::new_rand(&mut rng, Some(pubkey)); - let val = CrdsData::LegacySnapshotHashes(val); + let val = AccountsHashes::new_rand(&mut rng, Some(pubkey)); + let val = CrdsData::AccountsHashes(val); let val = CrdsValue::new_unsigned(val); assert_eq!( crds.insert(val, timestamp(), GossipRoute::LocalMessage), @@ -1355,7 +1355,7 @@ mod tests { assert_eq!(crds.get::<&ContactInfo>(pubkey), None); assert_eq!(crds.get_shred_version(&pubkey), Some(8)); // Remove the remaining entry with the same pubkey. - crds.remove(&CrdsValueLabel::LegacySnapshotHashes(pubkey), timestamp()); + crds.remove(&CrdsValueLabel::AccountsHashes(pubkey), timestamp()); assert_eq!(crds.get_records(&pubkey).count(), 0); assert_eq!(crds.get_shred_version(&pubkey), None); } diff --git a/gossip/src/crds_entry.rs b/gossip/src/crds_entry.rs index ccb8ed310eea8a..526f04eb56aab8 100644 --- a/gossip/src/crds_entry.rs +++ b/gossip/src/crds_entry.rs @@ -2,8 +2,7 @@ use { crate::{ crds::VersionedCrdsValue, crds_value::{ - CrdsData, CrdsValue, CrdsValueLabel, LegacySnapshotHashes, LegacyVersion, LowestSlot, - SnapshotHashes, Version, + CrdsData, CrdsValue, CrdsValueLabel, LegacyVersion, LowestSlot, SnapshotHashes, Version, }, legacy_contact_info::LegacyContactInfo, }, @@ -57,11 +56,6 @@ impl_crds_entry!(LegacyContactInfo, CrdsData::LegacyContactInfo(node), node); impl_crds_entry!(LegacyVersion, CrdsData::LegacyVersion(version), version); impl_crds_entry!(LowestSlot, CrdsData::LowestSlot(_, slot), slot); impl_crds_entry!(Version, CrdsData::Version(version), version); -impl_crds_entry!( - LegacySnapshotHashes, - CrdsData::LegacySnapshotHashes(snapshot_hashes), - snapshot_hashes -); impl_crds_entry!( SnapshotHashes, CrdsData::SnapshotHashes(snapshot_hashes), @@ -118,9 +112,6 @@ mod tests { CrdsData::LegacyVersion(version) => { assert_eq!(crds.get::<&LegacyVersion>(key), Some(version)) } - CrdsData::LegacySnapshotHashes(hash) => { - assert_eq!(crds.get::<&LegacySnapshotHashes>(key), Some(hash)) - } CrdsData::SnapshotHashes(hash) => { assert_eq!(crds.get::<&SnapshotHashes>(key), Some(hash)) } diff --git a/gossip/src/crds_value.rs b/gossip/src/crds_value.rs index 87ba34604e61d2..125555ea51eeb4 100644 --- a/gossip/src/crds_value.rs +++ b/gossip/src/crds_value.rs @@ -1,6 +1,6 @@ use { crate::{ - cluster_info::MAX_LEGACY_SNAPSHOT_HASHES, + cluster_info::MAX_ACCOUNTS_HASHES, contact_info::ContactInfo, deprecated, duplicate_shred::{DuplicateShred, DuplicateShredIndex, MAX_DUPLICATE_SHREDS}, @@ -85,7 +85,7 @@ pub enum CrdsData { LegacyContactInfo(LegacyContactInfo), Vote(VoteIndex, Vote), LowestSlot(/*DEPRECATED:*/ u8, LowestSlot), - LegacySnapshotHashes(LegacySnapshotHashes), + LegacySnapshotHashes(LegacySnapshotHashes), // Deprecated AccountsHashes(AccountsHashes), EpochSlots(EpochSlotsIndex, EpochSlots), LegacyVersion(LegacyVersion), @@ -195,7 +195,7 @@ impl AccountsHashes { /// New random AccountsHashes for tests and benchmarks. pub(crate) fn new_rand(rng: &mut R, pubkey: Option) -> Self { - let num_hashes = rng.gen_range(0..MAX_LEGACY_SNAPSHOT_HASHES) + 1; + let num_hashes = rng.gen_range(0..MAX_ACCOUNTS_HASHES) + 1; let hashes = std::iter::repeat_with(|| { let slot = 47825632 + rng.gen_range(0..512); let hash = Hash::new_unique(); @@ -211,7 +211,7 @@ impl AccountsHashes { } } -pub type LegacySnapshotHashes = AccountsHashes; +type LegacySnapshotHashes = AccountsHashes; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, AbiExample)] pub struct SnapshotHashes { From 2d5496a5641c7292c737ca82fd18502e352f4335 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Mon, 9 Oct 2023 10:51:44 -0700 Subject: [PATCH 285/407] Fix wen_restart proto compilation: (#33596) * Fix wen_restart proto compilation: - should recompile when proto changes - no need for customization * There is only one proto file, no need for loop. --- wen-restart/build.rs | 32 +++++--------------------------- 1 file changed, 5 insertions(+), 27 deletions(-) diff --git a/wen-restart/build.rs b/wen-restart/build.rs index 4360117bb445d4..30fdc64a9bcfe0 100644 --- a/wen-restart/build.rs +++ b/wen-restart/build.rs @@ -1,9 +1,6 @@ extern crate rustc_version; -use { - rustc_version::{version_meta, Channel}, - std::io::Result, -}; +use std::io::Result; fn main() -> Result<()> { const PROTOC_ENVAR: &str = "PROTOC"; @@ -12,30 +9,11 @@ fn main() -> Result<()> { std::env::set_var(PROTOC_ENVAR, protobuf_src::protoc()); } - // Copied and adapted from - // https://github.com/Kimundi/rustc-version-rs/blob/1d692a965f4e48a8cb72e82cda953107c0d22f47/README.md#example - // Licensed under Apache-2.0 + MIT - match version_meta().unwrap().channel { - Channel::Stable => { - println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); - } - Channel::Beta => { - println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); - } - Channel::Nightly => { - println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - } - Channel::Dev => { - println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - // See https://github.com/solana-labs/solana/issues/11055 - // We may be running the custom `rust-bpf-builder` toolchain, - // which currently needs `#![feature(proc_macro_hygiene)]` to - // be applied. - println!("cargo:rustc-cfg=RUSTC_NEEDS_PROC_MACRO_HYGIENE"); - } - } + let proto_base_path = std::path::PathBuf::from("proto"); + let proto = proto_base_path.join("wen_restart.proto"); + println!("cargo:rerun-if-changed={}", proto.display()); // Generate rust files from protos. - prost_build::compile_protos(&["proto/wen_restart.proto"], &["proto/"])?; + prost_build::compile_protos(&[proto], &[proto_base_path])?; Ok(()) } From c9247190407664352293b593eaff03a96048f032 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 9 Oct 2023 14:16:15 -0400 Subject: [PATCH 286/407] Removes hash param from AccountsCache::store() (#33598) --- accounts-db/src/accounts_cache.rs | 9 ++------- accounts-db/src/accounts_db.rs | 1 - 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/accounts-db/src/accounts_cache.rs b/accounts-db/src/accounts_cache.rs index 48aefc98413311..dcb2c3fdd517cf 100644 --- a/accounts-db/src/accounts_cache.rs +++ b/accounts-db/src/accounts_cache.rs @@ -8,7 +8,6 @@ use { pubkey::Pubkey, }, std::{ - borrow::Borrow, collections::BTreeSet, ops::Deref, sync::{ @@ -70,14 +69,13 @@ impl SlotCacheInner { &self, pubkey: &Pubkey, account: AccountSharedData, - hash: Option>, slot: Slot, include_slot_in_hash: IncludeSlotInHash, ) -> CachedAccount { let data_len = account.data().len() as u64; let item = Arc::new(CachedAccountInner { account, - hash: RwLock::new(hash.map(|h| *h.borrow())), + hash: RwLock::new(None), slot, pubkey: *pubkey, include_slot_in_hash, @@ -233,7 +231,6 @@ impl AccountsCache { slot: Slot, pubkey: &Pubkey, account: AccountSharedData, - hash: Option>, include_slot_in_hash: IncludeSlotInHash, ) -> CachedAccount { let slot_cache = self.slot_cache(slot).unwrap_or_else(|| @@ -247,7 +244,7 @@ impl AccountsCache { .or_insert(self.new_inner()) .clone()); - slot_cache.insert(pubkey, account, hash, slot, include_slot_in_hash) + slot_cache.insert(pubkey, account, slot, include_slot_in_hash) } pub fn load(&self, slot: Slot, pubkey: &Pubkey) -> Option { @@ -351,7 +348,6 @@ pub mod tests { inserted_slot, &Pubkey::new_unique(), AccountSharedData::new(1, 0, &Pubkey::default()), - Some(&Hash::default()), INCLUDE_SLOT_IN_HASH_TESTS, ); // If the cache is told the size limit is 0, it should return the one slot @@ -370,7 +366,6 @@ pub mod tests { inserted_slot, &Pubkey::new_unique(), AccountSharedData::new(1, 0, &Pubkey::default()), - Some(&Hash::default()), INCLUDE_SLOT_IN_HASH_TESTS, ); diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 4516643fcc35f8..deb04fd9209aac 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -6775,7 +6775,6 @@ impl AccountsDb { slot, accounts_and_meta_to_store.pubkey(i), account, - None::<&Hash>, include_slot_in_hash, ); // hash this account in the bg From 052677595c4314d2d6e9a258c2556393575cf70c Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 9 Oct 2023 11:47:39 -0700 Subject: [PATCH 287/407] in hash calc, delete old cache files that will not be used earlier (#33432) * in hash calc, delete old cache files that will not be used earlier * only delete if supposed to * fmt --- accounts-db/src/accounts_db.rs | 179 +++++++++++++++++------------ accounts-db/src/cache_hash_data.rs | 28 +++-- 2 files changed, 121 insertions(+), 86 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index deb04fd9209aac..4291cdfe9a24ad 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -181,6 +181,13 @@ impl<'a> StoreTo<'a> { } } +enum ScanAccountStorageResult { + /// this data has already been scanned and cached + CacheFileAlreadyExists(CacheHashDataFileReference), + /// this data needs to be scanned and cached + CacheFileNeedsToBeCreated((String, Range)), +} + #[derive(Default, Debug)] /// hold alive accounts /// alive means in the accounts index @@ -7222,90 +7229,114 @@ impl AccountsDb { .saturating_sub(slots_per_epoch); stats.scan_chunks = splitter.chunk_count; - (0..splitter.chunk_count) - .into_par_iter() - .map(|chunk| { - let mut scanner = scanner.clone(); + let cache_files = (0..splitter.chunk_count) + .into_par_iter() + .filter_map(|chunk| { let range_this_chunk = splitter.get_slot_range(chunk)?; - let file_name = { - let mut load_from_cache = true; - let mut hasher = hash_map::DefaultHasher::new(); - bin_range.start.hash(&mut hasher); - bin_range.end.hash(&mut hasher); - let is_first_scan_pass = bin_range.start == 0; - - // calculate hash representing all storages in this chunk - for (slot, storage) in snapshot_storages.iter_range(&range_this_chunk) { - if is_first_scan_pass && slot < one_epoch_old { - self.update_old_slot_stats(stats, storage); - } - if !Self::hash_storage_info(&mut hasher, storage, slot) { - load_from_cache = false; - break; - } + let mut load_from_cache = true; + let mut hasher = hash_map::DefaultHasher::new(); + bin_range.start.hash(&mut hasher); + bin_range.end.hash(&mut hasher); + let is_first_scan_pass = bin_range.start == 0; + + // calculate hash representing all storages in this chunk + let mut empty = true; + for (slot, storage) in snapshot_storages.iter_range(&range_this_chunk) { + empty = false; + if is_first_scan_pass && slot < one_epoch_old { + self.update_old_slot_stats(stats, storage); } - // we have a hash value for the storages in this chunk - // so, build a file name: - let hash = hasher.finish(); - let file_name = format!( - "{}.{}.{}.{}.{:016x}", - range_this_chunk.start, - range_this_chunk.end, - bin_range.start, - bin_range.end, - hash - ); - if load_from_cache { - if let Ok(mapped_file) = - cache_hash_data.get_file_reference_to_map_later(&file_name) - { - return Some(mapped_file); - } + if !Self::hash_storage_info(&mut hasher, storage, slot) { + load_from_cache = false; + break; + } + } + if empty { + return None; + } + // we have a hash value for the storages in this chunk + // so, build a file name: + let hash = hasher.finish(); + let file_name = format!( + "{}.{}.{}.{}.{:016x}", + range_this_chunk.start, + range_this_chunk.end, + bin_range.start, + bin_range.end, + hash + ); + if load_from_cache { + if let Ok(mapped_file) = + cache_hash_data.get_file_reference_to_map_later(&file_name) + { + return Some(ScanAccountStorageResult::CacheFileAlreadyExists( + mapped_file, + )); } + } - // fall through and load normally - we failed to load from a cache file - file_name - }; + // fall through and load normally - we failed to load from a cache file but there are storages present + Some(ScanAccountStorageResult::CacheFileNeedsToBeCreated(( + file_name, + range_this_chunk, + ))) + }) + .collect::>(); - let mut init_accum = true; - // load from cache failed, so create the cache file for this chunk - for (slot, storage) in snapshot_storages.iter_range(&range_this_chunk) { - let ancient = slot < oldest_non_ancient_slot; - let (_, scan_us) = measure_us!(if let Some(storage) = storage { - if init_accum { - let range = bin_range.end - bin_range.start; - scanner.init_accum(range); - init_accum = false; - } - scanner.set_slot(slot); + // deletes the old files that will not be used before creating new ones + cache_hash_data.delete_old_cache_files(); - Self::scan_single_account_storage(storage, &mut scanner); - }); - if ancient { - stats - .sum_ancient_scans_us - .fetch_add(scan_us, Ordering::Relaxed); - stats.count_ancient_scans.fetch_add(1, Ordering::Relaxed); - stats - .longest_ancient_scan_us - .fetch_max(scan_us, Ordering::Relaxed); + cache_files + .into_par_iter() + .map(|chunk| { + match chunk { + ScanAccountStorageResult::CacheFileAlreadyExists(file) => Some(file), + ScanAccountStorageResult::CacheFileNeedsToBeCreated(( + file_name, + range_this_chunk, + )) => { + let mut scanner = scanner.clone(); + let mut init_accum = true; + // load from cache failed, so create the cache file for this chunk + for (slot, storage) in snapshot_storages.iter_range(&range_this_chunk) { + let ancient = slot < oldest_non_ancient_slot; + let (_, scan_us) = measure_us!(if let Some(storage) = storage { + if init_accum { + let range = bin_range.end - bin_range.start; + scanner.init_accum(range); + init_accum = false; + } + scanner.set_slot(slot); + + Self::scan_single_account_storage(storage, &mut scanner); + }); + if ancient { + stats + .sum_ancient_scans_us + .fetch_add(scan_us, Ordering::Relaxed); + stats.count_ancient_scans.fetch_add(1, Ordering::Relaxed); + stats + .longest_ancient_scan_us + .fetch_max(scan_us, Ordering::Relaxed); + } + } + (!init_accum) + .then(|| { + let r = scanner.scanning_complete(); + assert!(!file_name.is_empty()); + (!r.is_empty() && r.iter().any(|b| !b.is_empty())).then(|| { + // error if we can't write this + cache_hash_data.save(&file_name, &r).unwrap(); + cache_hash_data + .get_file_reference_to_map_later(&file_name) + .unwrap() + }) + }) + .flatten() } } - (!init_accum) - .then(|| { - let r = scanner.scanning_complete(); - assert!(!file_name.is_empty()); - (!r.is_empty() && r.iter().any(|b| !b.is_empty())).then(|| { - // error if we can't write this - cache_hash_data.save(&file_name, &r).unwrap(); - cache_hash_data - .get_file_reference_to_map_later(&file_name) - .unwrap() - }) - }) - .flatten() }) .filter_map(|x| x) .collect() diff --git a/accounts-db/src/cache_hash_data.rs b/accounts-db/src/cache_hash_data.rs index 630d650b36f2b9..5ccb478620680c 100644 --- a/accounts-db/src/cache_hash_data.rs +++ b/accounts-db/src/cache_hash_data.rs @@ -198,9 +198,7 @@ pub(crate) struct CacheHashData { impl Drop for CacheHashData { fn drop(&mut self) { - if self.should_delete_old_cache_files_on_drop { - self.delete_old_cache_files(); - } + self.delete_old_cache_files(); self.stats.report(); } } @@ -224,18 +222,24 @@ impl CacheHashData { result.get_cache_files(); result } - fn delete_old_cache_files(&self) { - let old_cache_files = std::mem::take(&mut *self.pre_existing_cache_files.lock().unwrap()); - if !old_cache_files.is_empty() { - self.stats - .unused_cache_files - .fetch_add(old_cache_files.len(), Ordering::Relaxed); - for file_name in old_cache_files.iter() { - let result = self.cache_dir.join(file_name); - let _ = fs::remove_file(result); + + /// delete all pre-existing files that will not be used + pub(crate) fn delete_old_cache_files(&self) { + if self.should_delete_old_cache_files_on_drop { + let old_cache_files = + std::mem::take(&mut *self.pre_existing_cache_files.lock().unwrap()); + if !old_cache_files.is_empty() { + self.stats + .unused_cache_files + .fetch_add(old_cache_files.len(), Ordering::Relaxed); + for file_name in old_cache_files.iter() { + let result = self.cache_dir.join(file_name); + let _ = fs::remove_file(result); + } } } } + fn get_cache_files(&self) { if self.cache_dir.is_dir() { let dir = fs::read_dir(&self.cache_dir); From fc73813db278407da85fab555b835dfffde0f714 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 9 Oct 2023 16:00:52 -0400 Subject: [PATCH 288/407] Adds AccountHash newtype (#33597) --- accounts-db/src/accounts_cache.rs | 12 ++--- accounts-db/src/accounts_db.rs | 65 ++++++++++++++------------- accounts-db/src/accounts_hash.rs | 36 ++++++++++----- runtime/src/bank/bank_hash_details.rs | 11 +++-- 4 files changed, 74 insertions(+), 50 deletions(-) diff --git a/accounts-db/src/accounts_cache.rs b/accounts-db/src/accounts_cache.rs index dcb2c3fdd517cf..04d1ef9d736bcc 100644 --- a/accounts-db/src/accounts_cache.rs +++ b/accounts-db/src/accounts_cache.rs @@ -1,10 +1,12 @@ use { - crate::accounts_db::IncludeSlotInHash, + crate::{ + accounts_db::{AccountsDb, IncludeSlotInHash}, + accounts_hash::AccountHash, + }, dashmap::DashMap, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, clock::Slot, - hash::Hash, pubkey::Pubkey, }, std::{ @@ -141,7 +143,7 @@ pub type CachedAccount = Arc; #[derive(Debug)] pub struct CachedAccountInner { pub account: AccountSharedData, - hash: RwLock>, + hash: RwLock>, slot: Slot, pubkey: Pubkey, /// temporarily here during feature activation @@ -150,13 +152,13 @@ pub struct CachedAccountInner { } impl CachedAccountInner { - pub fn hash(&self) -> Hash { + pub fn hash(&self) -> AccountHash { let hash = self.hash.read().unwrap(); match *hash { Some(hash) => hash, None => { drop(hash); - let hash = crate::accounts_db::AccountsDb::hash_account( + let hash = AccountsDb::hash_account( self.slot, &self.account, &self.pubkey, diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 4291cdfe9a24ad..e82a2edf080526 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -33,7 +33,7 @@ use { accounts_cache::{AccountsCache, CachedAccount, SlotCache}, accounts_file::{AccountsFile, AccountsFileError}, accounts_hash::{ - AccountsDeltaHash, AccountsHash, AccountsHashKind, AccountsHasher, + AccountHash, AccountsDeltaHash, AccountsHash, AccountsHashKind, AccountsHasher, CalcAccountsHashConfig, CalculateHashIntermediate, HashStats, IncrementalAccountsHash, SerdeAccountsDeltaHash, SerdeAccountsHash, SerdeIncrementalAccountsHash, ZeroLamportAccounts, @@ -894,9 +894,9 @@ pub enum LoadedAccount<'a> { } impl<'a> LoadedAccount<'a> { - pub fn loaded_hash(&self) -> Hash { + pub fn loaded_hash(&self) -> AccountHash { match self { - LoadedAccount::Stored(stored_account_meta) => *stored_account_meta.hash(), + LoadedAccount::Stored(stored_account_meta) => AccountHash(*stored_account_meta.hash()), LoadedAccount::Cached(cached_account) => cached_account.hash(), } } @@ -913,7 +913,7 @@ impl<'a> LoadedAccount<'a> { slot: Slot, pubkey: &Pubkey, include_slot: IncludeSlotInHash, - ) -> Hash { + ) -> AccountHash { match self { LoadedAccount::Stored(stored_account_meta) => AccountsDb::hash_account( slot, @@ -2392,7 +2392,7 @@ impl<'a> AppendVecScan for ScanState<'a> { let balance = loaded_account.lamports(); let mut loaded_hash = loaded_account.loaded_hash(); - let hash_is_missing = loaded_hash == Hash::default(); + let hash_is_missing = loaded_hash == AccountHash(Hash::default()); if (self.config.check_hash || hash_is_missing) && !AccountsDb::is_filler_account_helper(pubkey, self.filler_account_suffix) { @@ -2406,13 +2406,13 @@ impl<'a> AppendVecScan for ScanState<'a> { } else if self.config.check_hash && computed_hash != loaded_hash { info!( "hash mismatch found: computed: {}, loaded: {}, pubkey: {}", - computed_hash, loaded_hash, pubkey + computed_hash.0, loaded_hash.0, pubkey ); self.mismatch_found.fetch_add(1, Ordering::Relaxed); } } let source_item = CalculateHashIntermediate { - hash: loaded_hash, + hash: loaded_hash.0, lamports: balance, pubkey: *pubkey, }; @@ -2429,7 +2429,7 @@ impl<'a> AppendVecScan for ScanState<'a> { #[derive(Clone, Debug, Eq, PartialEq)] pub struct PubkeyHashAccount { pub pubkey: Pubkey, - pub hash: Hash, + pub hash: AccountHash, pub account: AccountSharedData, } @@ -5567,7 +5567,7 @@ impl AccountsDb { pubkey: &Pubkey, max_root: Option, load_hint: LoadHint, - ) -> Option { + ) -> Option { let (slot, storage_location, _maybe_account_accesor) = self.read_index_for_accessor_or_load_slow(ancestors, pubkey, max_root, false)?; // Notice the subtle `?` at previous line, we bail out pretty early if missing. @@ -6185,7 +6185,7 @@ impl AccountsDb { account: &T, pubkey: &Pubkey, include_slot: IncludeSlotInHash, - ) -> Hash { + ) -> AccountHash { Self::hash_account_data( slot, account.lamports(), @@ -6207,9 +6207,9 @@ impl AccountsDb { data: &[u8], pubkey: &Pubkey, include_slot: IncludeSlotInHash, - ) -> Hash { + ) -> AccountHash { if lamports == 0 { - return Hash::default(); + return AccountHash(Hash::default()); } let mut hasher = blake3::Hasher::new(); @@ -6239,7 +6239,7 @@ impl AccountsDb { hasher.update(owner.as_ref()); hasher.update(pubkey.as_ref()); - Hash::new_from_array(hasher.finalize().into()) + AccountHash(Hash::new_from_array(hasher.finalize().into())) } fn bulk_assign_write_version(&self, count: usize) -> StoredMetaWriteVersion { @@ -6560,7 +6560,7 @@ impl AccountsDb { } } - let (accounts, hashes): (Vec<(&Pubkey, &AccountSharedData)>, Vec) = iter_items + let (accounts, hashes): (Vec<(&Pubkey, &AccountSharedData)>, Vec) = iter_items .iter() .filter_map(|iter_item| { let key = iter_item.key(); @@ -7025,7 +7025,7 @@ impl AccountsDb { |loaded_account| { let mut loaded_hash = loaded_account.loaded_hash(); let balance = loaded_account.lamports(); - let hash_is_missing = loaded_hash == Hash::default(); + let hash_is_missing = loaded_hash == AccountHash(Hash::default()); if (config.check_hash || hash_is_missing) && !self.is_filler_account(pubkey) { let computed_hash = loaded_account.compute_hash(*slot, pubkey, config.include_slot_in_hash); @@ -7033,7 +7033,7 @@ impl AccountsDb { loaded_hash = computed_hash; } else if config.check_hash && computed_hash != loaded_hash { - info!("hash mismatch found: computed: {}, loaded: {}, pubkey: {}", computed_hash, loaded_hash, pubkey); + info!("hash mismatch found: computed: {}, loaded: {}, pubkey: {}", computed_hash.0, loaded_hash.0, pubkey); mismatch_found .fetch_add(1, Ordering::Relaxed); return None; @@ -7041,7 +7041,7 @@ impl AccountsDb { } sum += balance as u128; - Some(loaded_hash) + Some(loaded_hash.0) }, ) } else { @@ -7916,17 +7916,20 @@ impl AccountsDb { /// 1. pubkey, hash pairs for the slot /// 2. us spent scanning /// 3. Measure started when we began accumulating - pub fn get_pubkey_hash_for_slot(&self, slot: Slot) -> (Vec<(Pubkey, Hash)>, u64, Measure) { + pub fn get_pubkey_hash_for_slot( + &self, + slot: Slot, + ) -> (Vec<(Pubkey, AccountHash)>, u64, Measure) { let mut scan = Measure::start("scan"); - let scan_result: ScanStorageResult<(Pubkey, Hash), DashMap> = self - .scan_account_storage( + let scan_result: ScanStorageResult<(Pubkey, AccountHash), DashMap> = + self.scan_account_storage( slot, |loaded_account: LoadedAccount| { // Cache only has one version per key, don't need to worry about versioning Some((*loaded_account.pubkey(), loaded_account.loaded_hash())) }, - |accum: &DashMap, loaded_account: LoadedAccount| { + |accum: &DashMap, loaded_account: LoadedAccount| { let loaded_hash = loaded_account.loaded_hash(); accum.insert(*loaded_account.pubkey(), loaded_hash); }, @@ -7944,7 +7947,7 @@ impl AccountsDb { /// Return all of the accounts for a given slot pub fn get_pubkey_hash_account_for_slot(&self, slot: Slot) -> Vec { type ScanResult = - ScanStorageResult>; + ScanStorageResult>; let scan_result: ScanResult = self.scan_account_storage( slot, |loaded_account: LoadedAccount| { @@ -7955,7 +7958,8 @@ impl AccountsDb { account: loaded_account.take_account(), }) }, - |accum: &DashMap, loaded_account: LoadedAccount| { + |accum: &DashMap, + loaded_account: LoadedAccount| { // Storage may have duplicates so only keep the latest version for each key accum.insert( *loaded_account.pubkey(), @@ -10482,10 +10486,10 @@ pub mod tests { ]; let expected_hashes = [ - Hash::from_str("5K3NW73xFHwgTWVe4LyCg4QfQda8f88uZj2ypDx2kmmH").unwrap(), - Hash::from_str("84ozw83MZ8oeSF4hRAg7SeW1Tqs9LMXagX1BrDRjtZEx").unwrap(), - Hash::from_str("5XqtnEJ41CG2JWNp7MAg9nxkRUAnyjLxfsKsdrLxQUbC").unwrap(), - Hash::from_str("DpvwJcznzwULYh19Zu5CuAA4AT6WTBe4H6n15prATmqj").unwrap(), + AccountHash(Hash::from_str("5K3NW73xFHwgTWVe4LyCg4QfQda8f88uZj2ypDx2kmmH").unwrap()), + AccountHash(Hash::from_str("84ozw83MZ8oeSF4hRAg7SeW1Tqs9LMXagX1BrDRjtZEx").unwrap()), + AccountHash(Hash::from_str("5XqtnEJ41CG2JWNp7MAg9nxkRUAnyjLxfsKsdrLxQUbC").unwrap()), + AccountHash(Hash::from_str("DpvwJcznzwULYh19Zu5CuAA4AT6WTBe4H6n15prATmqj").unwrap()), ]; let mut raw_accounts = Vec::default(); @@ -10505,7 +10509,7 @@ pub mod tests { if slot == 1 && matches!(include_slot_in_hash, IncludeSlotInHash::IncludeSlot) { assert_eq!(hash, expected_hashes[i]); } - raw_expected[i].hash = hash; + raw_expected[i].hash = hash.0; } let to_store = raw_accounts @@ -12696,7 +12700,7 @@ pub mod tests { let account = stored_account.to_account_shared_data(); let expected_account_hash = - Hash::from_str("6VeAL4x4PVkECKL1hD1avwPE1uMCRoWiZJzVMvVNYhTq").unwrap(); + AccountHash(Hash::from_str("6VeAL4x4PVkECKL1hD1avwPE1uMCRoWiZJzVMvVNYhTq").unwrap()); assert_eq!( AccountsDb::hash_account( @@ -18161,7 +18165,7 @@ pub mod tests { // Ensure the zero-lamport accounts are NOT included in the full accounts hash. let full_account_hashes = [(2, 0), (3, 0), (4, 1)].into_iter().map(|(index, slot)| { let (pubkey, account) = &accounts[index]; - AccountsDb::hash_account(slot, account, pubkey, INCLUDE_SLOT_IN_HASH_TESTS) + AccountsDb::hash_account(slot, account, pubkey, INCLUDE_SLOT_IN_HASH_TESTS).0 }); let expected_accounts_hash = AccountsHash(compute_merkle_root(full_account_hashes)); assert_eq!(full_accounts_hash.0, expected_accounts_hash); @@ -18243,6 +18247,7 @@ pub mod tests { Hash::new_from_array(hash.into()) } else { AccountsDb::hash_account(slot, account, pubkey, INCLUDE_SLOT_IN_HASH_TESTS) + .0 } }); let expected_accounts_hash = diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 222f2b1a640984..cc6db20fbfbb00 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -740,14 +740,9 @@ impl<'a> AccountsHasher<'a> { ) } - pub fn accumulate_account_hashes(mut hashes: Vec<(Pubkey, Hash)>) -> Hash { - Self::sort_hashes_by_pubkey(&mut hashes); - - Self::compute_merkle_root_loop(hashes, MERKLE_FANOUT, |i| &i.1) - } - - pub fn sort_hashes_by_pubkey(hashes: &mut Vec<(Pubkey, Hash)>) { + pub fn accumulate_account_hashes(mut hashes: Vec<(Pubkey, AccountHash)>) -> Hash { hashes.par_sort_unstable_by(|a, b| a.0.cmp(&b.0)); + Self::compute_merkle_root_loop(hashes, MERKLE_FANOUT, |i| &i.1 .0) } pub fn compare_two_hash_entries( @@ -1206,6 +1201,21 @@ pub enum ZeroLamportAccounts { Included, } +/// Hash of an account +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Eq, PartialEq, Pod, Zeroable)] +pub struct AccountHash(pub Hash); + +// Ensure the newtype wrapper never changes size from the underlying Hash +// This also ensures there are no padding bytes, which is requried to safely implement Pod +const _: () = assert!(std::mem::size_of::() == std::mem::size_of::()); + +impl Borrow for AccountHash { + fn borrow(&self) -> &Hash { + &self.0 + } +} + /// Hash of accounts #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum AccountsHashKind { @@ -2320,14 +2330,18 @@ mod tests { .collect(); let result = if pass == 0 { - test_hashing_larger(input.clone(), fanout) + test_hashing_larger(input, fanout) } else { // this sorts inside let early_result = AccountsHasher::accumulate_account_hashes( - input.iter().map(|i| (i.0, i.1)).collect::>(), + input + .iter() + .map(|i| (i.0, AccountHash(i.1))) + .collect::>(), ); - AccountsHasher::sort_hashes_by_pubkey(&mut input); - let result = AccountsHasher::compute_merkle_root(input.clone(), fanout); + + input.par_sort_unstable_by(|a, b| a.0.cmp(&b.0)); + let result = AccountsHasher::compute_merkle_root(input, fanout); assert_eq!(early_result, result); result }; diff --git a/runtime/src/bank/bank_hash_details.rs b/runtime/src/bank/bank_hash_details.rs index d4ea3f65651b5c..a1b4fa74f2ff73 100644 --- a/runtime/src/bank/bank_hash_details.rs +++ b/runtime/src/bank/bank_hash_details.rs @@ -8,7 +8,10 @@ use { de::{self, Deserialize, Deserializer}, ser::{Serialize, SerializeSeq, Serializer}, }, - solana_accounts_db::{accounts_db::PubkeyHashAccount, accounts_hash::AccountsDeltaHash}, + solana_accounts_db::{ + accounts_db::PubkeyHashAccount, + accounts_hash::{AccountHash, AccountsDeltaHash}, + }, solana_sdk::{ account::{Account, AccountSharedData, ReadableAccount}, clock::{Epoch, Slot}, @@ -124,7 +127,7 @@ impl From<&PubkeyHashAccount> for SerdeAccount { } = pubkey_hash_account; Self { pubkey: pubkey.to_string(), - hash: hash.to_string(), + hash: hash.0.to_string(), owner: account.owner().to_string(), lamports: account.lamports(), rent_epoch: account.rent_epoch(), @@ -139,7 +142,7 @@ impl TryFrom for PubkeyHashAccount { fn try_from(temp_account: SerdeAccount) -> Result { let pubkey = Pubkey::from_str(&temp_account.pubkey).map_err(|err| err.to_string())?; - let hash = Hash::from_str(&temp_account.hash).map_err(|err| err.to_string())?; + let hash = AccountHash(Hash::from_str(&temp_account.hash).map_err(|err| err.to_string())?); let account = AccountSharedData::from(Account { lamports: temp_account.lamports, @@ -244,7 +247,7 @@ pub mod tests { rent_epoch: 123, }); let account_pubkey = Pubkey::new_unique(); - let account_hash = hash("account".as_bytes()); + let account_hash = AccountHash(hash("account".as_bytes())); let accounts = BankHashAccounts { accounts: vec![PubkeyHashAccount { pubkey: account_pubkey, From 2d84c1dbba26dd25a66a84dd93b7aefe03c73b61 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Mon, 9 Oct 2023 13:41:35 -0700 Subject: [PATCH 289/407] Fix deployment of program-v4 in freshly started test validator (#33583) --- programs/loader-v4/src/lib.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index 4645b33c26a6e0..6f15096ecc6389 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -366,7 +366,11 @@ pub fn process_instruction_deploy( authority_address, )?; let current_slot = invoke_context.get_sysvar_cache().get_clock()?.slot; - if state.slot.saturating_add(DEPLOYMENT_COOLDOWN_IN_SLOTS) > current_slot { + + // Slot = 0 indicates that the program hasn't been deployed yet. So no need to check for the cooldown slots. + // (Without this check, the program deployment is failing in freshly started test validators. That's + // because at startup current_slot is 0, which is < DEPLOYMENT_COOLDOWN_IN_SLOTS). + if state.slot != 0 && state.slot.saturating_add(DEPLOYMENT_COOLDOWN_IN_SLOTS) > current_slot { ic_logger_msg!( log_collector, "Program was deployed recently, cooldown still in effect" From 4c664a8d31cb745b41c7ea33859b28275553d1fd Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Mon, 9 Oct 2023 14:32:35 -0700 Subject: [PATCH 290/407] Cargo registry service for deploying programs (#33570) --- Cargo.lock | 164 ++++++++++++- Cargo.toml | 5 + cargo-registry/Cargo.toml | 45 ++++ cargo-registry/src/client.rs | 209 ++++++++++++++++ cargo-registry/src/dummy_git_index.rs | 119 ++++++++++ cargo-registry/src/main.rs | 327 ++++++++++++++++++++++++++ cargo-registry/src/publisher.rs | 160 +++++++++++++ cli/src/program_v4.rs | 4 +- 8 files changed, 1029 insertions(+), 4 deletions(-) create mode 100644 cargo-registry/Cargo.toml create mode 100644 cargo-registry/src/client.rs create mode 100644 cargo-registry/src/dummy_git_index.rs create mode 100644 cargo-registry/src/main.rs create mode 100644 cargo-registry/src/publisher.rs diff --git a/Cargo.lock b/Cargo.lock index 378a93e91df240..8afcd1f638451b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2209,6 +2209,21 @@ version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" +[[package]] +name = "git2" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf97ba92db08df386e10c8ede66a2a0369bd277090afd8710e19e38de9ec0cd" +dependencies = [ + "bitflags 2.3.3", + "libc", + "libgit2-sys", + "log", + "openssl-probe", + "openssl-sys", + "url 2.4.1", +] + [[package]] name = "glob" version = "0.3.0" @@ -2447,6 +2462,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "http-range" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21dec9db110f5f872ed9699c3ecf50cf16f423502706ba5c72462e28d3157573" + [[package]] name = "httparse" version = "1.8.0" @@ -2521,6 +2542,25 @@ dependencies = [ "tokio-rustls", ] +[[package]] +name = "hyper-staticfile" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "318ca89e4827e7fe4ddd2824f52337239796ae8ecc761a663324407dc3d8d7e7" +dependencies = [ + "futures-util", + "http", + "http-range", + "httpdate", + "hyper", + "mime_guess", + "percent-encoding 2.3.0", + "rand 0.8.5", + "tokio", + "url 2.4.1", + "winapi 0.3.9", +] + [[package]] name = "hyper-timeout" version = "0.4.1" @@ -2872,6 +2912,20 @@ version = "0.2.148" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" +[[package]] +name = "libgit2-sys" +version = "0.16.1+1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2a2bb3680b094add03bb3732ec520ece34da31a8cd2d633d1389d0f0fb60d0c" +dependencies = [ + "cc", + "libc", + "libssh2-sys", + "libz-sys", + "openssl-sys", + "pkg-config", +] + [[package]] name = "libloading" version = "0.7.4" @@ -2951,6 +3005,20 @@ dependencies = [ "libsecp256k1-core", ] +[[package]] +name = "libssh2-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dc8a030b787e2119a731f1951d6a773e2280c660f8ec4b0f5e1505a386e71ee" +dependencies = [ + "cc", + "libc", + "libz-sys", + "openssl-sys", + "pkg-config", + "vcpkg", +] + [[package]] name = "libz-sys" version = "1.1.3" @@ -2958,6 +3026,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de5435b8549c16d423ed0c03dbaafe57cf6c3344744f1242520d59c9d8ecec66" dependencies = [ "cc", + "libc", "pkg-config", "vcpkg", ] @@ -3113,6 +3182,16 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +[[package]] +name = "mime_guess" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +dependencies = [ + "mime", + "unicase", +] + [[package]] name = "min-max-heap" version = "1.3.0" @@ -3939,7 +4018,7 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" dependencies = [ - "toml", + "toml 0.5.8", ] [[package]] @@ -3949,7 +4028,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ebace6889caf889b4d3f76becee12e90353f2b8c7d875534a71e5742f8f6f83" dependencies = [ "thiserror", - "toml", + "toml 0.5.8", ] [[package]] @@ -4784,6 +4863,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -5471,6 +5559,35 @@ dependencies = [ "tar", ] +[[package]] +name = "solana-cargo-registry" +version = "1.18.0" +dependencies = [ + "clap 2.33.3", + "flate2", + "git2", + "hyper", + "hyper-staticfile", + "log", + "rustc_version 0.4.0", + "serde", + "serde_json", + "solana-clap-utils", + "solana-cli", + "solana-cli-config", + "solana-cli-output", + "solana-logger", + "solana-remote-wallet", + "solana-rpc-client", + "solana-rpc-client-api", + "solana-sdk", + "solana-version", + "tar", + "tempfile", + "tokio", + "toml 0.8.2", +] + [[package]] name = "solana-cargo-test-bpf" version = "1.18.0" @@ -8371,6 +8488,40 @@ dependencies = [ "serde", ] +[[package]] +name = "toml" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "185d8ab0dfbb35cf1399a6344d8484209c088f75f8f68230da55d48d95d43e3d" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" +dependencies = [ + "indexmap 2.0.2", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + [[package]] name = "tonic" version = "0.9.2" @@ -9057,6 +9208,15 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +[[package]] +name = "winnow" +version = "0.5.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "037711d82167854aff2018dfd193aa0fef5370f456732f0d5a0c59b0f1b4b907" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" diff --git a/Cargo.toml b/Cargo.toml index 05c6241523883a..509ffb6047ac9e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,6 +17,7 @@ members = [ "bench-tps", "bloom", "bucket_map", + "cargo-registry", "clap-utils", "clap-v3-utils", "cli", @@ -205,6 +206,7 @@ gag = "1.0.0" generic-array = { version = "0.14.7", default-features = false } gethostname = "0.2.3" getrandom = "0.2.10" +git2 = "0.18.1" goauth = "0.13.1" hex = "0.4.3" hidapi = { version = "2.4.1", default-features = false } @@ -214,6 +216,7 @@ http = "0.2.9" humantime = "2.0.1" hyper = "0.14.27" hyper-proxy = "0.9.1" +hyper-staticfile = "0.9.5" im = "15.1.0" index_list = "0.2.7" indexmap = "2.0.2" @@ -311,6 +314,7 @@ solana-bench-tps = { path = "bench-tps", version = "=1.18.0" } solana-bloom = { path = "bloom", version = "=1.18.0" } solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=1.18.0" } solana-bucket-map = { path = "bucket_map", version = "=1.18.0" } +solana-cargo-registry = { path = "cargo-registry", version = "=1.18.0" } solana-connection-cache = { path = "connection-cache", version = "=1.18.0", default-features = false } solana-clap-utils = { path = "clap-utils", version = "=1.18.0" } solana-clap-v3-utils = { path = "clap-v3-utils", version = "=1.18.0" } @@ -405,6 +409,7 @@ tokio-serde = "0.8" tokio-stream = "0.1.14" tokio-tungstenite = "0.20.1" tokio-util = "0.6" +toml = "0.8.0" tonic = "0.9.2" tonic-build = "0.9.2" trees = "0.4.2" diff --git a/cargo-registry/Cargo.toml b/cargo-registry/Cargo.toml new file mode 100644 index 00000000000000..43aed1f4fa2097 --- /dev/null +++ b/cargo-registry/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "solana-cargo-registry" +description = "Solana cargo registry" +documentation = "https://docs.rs/solana-cargo-registry" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +clap = { workspace = true } +flate2 = { workspace = true } +git2 = { workspace = true } +hyper = { workspace = true, features = ["full"] } +hyper-staticfile = { workspace = true } +log = { workspace = true } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +solana-clap-utils = { workspace = true } +solana-cli = { workspace = true } +solana-cli-config = { workspace = true } +solana-cli-output = { workspace = true } +solana-logger = { workspace = true } +solana-remote-wallet = { workspace = true, features = ["default"] } +solana-rpc-client = { workspace = true, features = ["default"] } +solana-rpc-client-api = { workspace = true } +solana-sdk = { workspace = true } +solana-version = { workspace = true } +tar = { workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true, features = ["full"] } +toml = { workspace = true } + +[dev-dependencies] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[build-dependencies] +rustc_version = { workspace = true } + +[features] +dev-context-only-utils = [] diff --git a/cargo-registry/src/client.rs b/cargo-registry/src/client.rs new file mode 100644 index 00000000000000..17432f0ebe27cd --- /dev/null +++ b/cargo-registry/src/client.rs @@ -0,0 +1,209 @@ +use { + clap::{crate_description, crate_name, value_t_or_exit, App, Arg, ArgMatches}, + solana_clap_utils::{ + hidden_unless_forced, + input_validators::is_url_or_moniker, + keypair::{DefaultSigner, SignerIndex}, + }, + solana_cli::{ + cli::{DEFAULT_CONFIRM_TX_TIMEOUT_SECONDS, DEFAULT_RPC_TIMEOUT_SECONDS}, + program_v4::ProgramV4CommandConfig, + }, + solana_cli_config::{Config, ConfigInput}, + solana_cli_output::OutputFormat, + solana_rpc_client::rpc_client::RpcClient, + solana_sdk::{ + commitment_config, + signature::{read_keypair_file, Keypair}, + }, + std::{error, sync::Arc, time::Duration}, +}; + +pub struct ClientConfig<'a>(pub ProgramV4CommandConfig<'a>); + +impl<'a> ClientConfig<'a> { + pub fn new(client: &'a Client) -> Self { + Self(ProgramV4CommandConfig { + websocket_url: &client.websocket_url, + commitment: client.commitment, + payer: &client.cli_signers[0], + authority: &client.cli_signers[client.authority_signer_index], + output_format: &OutputFormat::Display, + use_quic: true, + }) + } +} + +pub struct Client { + pub rpc_client: Arc, + pub port: u16, + websocket_url: String, + commitment: commitment_config::CommitmentConfig, + cli_signers: Vec, + authority_signer_index: SignerIndex, +} + +impl Client { + fn get_keypair( + matches: &ArgMatches<'_>, + config_path: &str, + name: &str, + ) -> Result> { + let (_, default_signer_path) = ConfigInput::compute_keypair_path_setting( + matches.value_of(name).unwrap_or(""), + config_path, + ); + + let default_signer = DefaultSigner::new(name, default_signer_path); + + read_keypair_file(default_signer.path) + } + + fn get_clap_app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, 'v> { + App::new(name) + .about(about) + .version(version) + .arg( + Arg::with_name("config_file") + .short("C") + .long("config") + .value_name("FILEPATH") + .takes_value(true) + .global(true) + .help("Configuration file to use"), + ) + .arg( + Arg::with_name("json_rpc_url") + .short("u") + .long("url") + .value_name("URL_OR_MONIKER") + .takes_value(true) + .global(true) + .validator(is_url_or_moniker) + .help( + "URL for Solana's JSON RPC or moniker (or their first letter): \ + [mainnet-beta, testnet, devnet, localhost]", + ), + ) + .arg( + Arg::with_name("keypair") + .short("k") + .long("keypair") + .value_name("KEYPAIR") + .global(true) + .takes_value(true) + .help("Filepath or URL to a keypair"), + ) + .arg( + Arg::with_name("authority") + .short("a") + .long("authority") + .value_name("KEYPAIR") + .global(true) + .takes_value(true) + .help("Filepath or URL to program authority keypair"), + ) + .arg( + Arg::with_name("port") + .short("p") + .long("port") + .value_name("PORT") + .global(true) + .takes_value(true) + .help("Cargo registry's local TCP port. The server will bind to this port and wait for requests."), + ) + .arg( + Arg::with_name("commitment") + .long("commitment") + .takes_value(true) + .possible_values(&[ + "processed", + "confirmed", + "finalized", + ]) + .value_name("COMMITMENT_LEVEL") + .hide_possible_values(true) + .global(true) + .help("Return information at the selected commitment level [possible values: processed, confirmed, finalized]"), + ) + .arg( + Arg::with_name("rpc_timeout") + .long("rpc-timeout") + .value_name("SECONDS") + .takes_value(true) + .default_value(DEFAULT_RPC_TIMEOUT_SECONDS) + .global(true) + .hidden(hidden_unless_forced()) + .help("Timeout value for RPC requests"), + ) + .arg( + Arg::with_name("confirm_transaction_initial_timeout") + .long("confirm-timeout") + .value_name("SECONDS") + .takes_value(true) + .default_value(DEFAULT_CONFIRM_TX_TIMEOUT_SECONDS) + .global(true) + .hidden(hidden_unless_forced()) + .help("Timeout value for initial transaction status"), + ) + } + + pub fn new() -> Result> { + let matches = Self::get_clap_app( + crate_name!(), + crate_description!(), + solana_version::version!(), + ) + .get_matches(); + + let config = if let Some(config_file) = matches.value_of("config_file") { + Config::load(config_file).unwrap_or_default() + } else { + Config::default() + }; + + let (_, json_rpc_url) = ConfigInput::compute_json_rpc_url_setting( + matches.value_of("json_rpc_url").unwrap_or(""), + &config.json_rpc_url, + ); + + let (_, websocket_url) = ConfigInput::compute_websocket_url_setting( + matches.value_of("websocket_url").unwrap_or(""), + &config.websocket_url, + matches.value_of("json_rpc_url").unwrap_or(""), + &config.json_rpc_url, + ); + + let (_, commitment) = ConfigInput::compute_commitment_config( + matches.value_of("commitment").unwrap_or(""), + &config.commitment, + ); + + let rpc_timeout = value_t_or_exit!(matches, "rpc_timeout", u64); + let rpc_timeout = Duration::from_secs(rpc_timeout); + + let confirm_transaction_initial_timeout = + value_t_or_exit!(matches, "confirm_transaction_initial_timeout", u64); + let confirm_transaction_initial_timeout = + Duration::from_secs(confirm_transaction_initial_timeout); + + let payer_keypair = Self::get_keypair(&matches, &config.keypair_path, "keypair")?; + let authority_keypair = Self::get_keypair(&matches, &config.keypair_path, "authority")?; + + let port = value_t_or_exit!(matches, "port", u16); + + Ok(Client { + rpc_client: Arc::new(RpcClient::new_with_timeouts_and_commitment( + json_rpc_url.to_string(), + rpc_timeout, + commitment, + confirm_transaction_initial_timeout, + )), + port, + websocket_url, + commitment, + cli_signers: vec![payer_keypair, authority_keypair], + authority_signer_index: 1, + }) + } +} diff --git a/cargo-registry/src/dummy_git_index.rs b/cargo-registry/src/dummy_git_index.rs new file mode 100644 index 00000000000000..1b36f485ebff3e --- /dev/null +++ b/cargo-registry/src/dummy_git_index.rs @@ -0,0 +1,119 @@ +use { + git2::{IndexAddOption, Repository}, + serde::{Deserialize, Serialize}, + std::{ + fs::{self, create_dir_all}, + io::ErrorKind, + net::SocketAddr, + path::PathBuf, + process::Command, + }, +}; + +#[derive(Debug, Default, Deserialize, Serialize)] +struct RegistryConfig { + dl: String, + api: Option, +} + +pub struct DummyGitIndex {} + +impl DummyGitIndex { + pub fn create_or_update_git_repo(root_dir: PathBuf, server_addr: &SocketAddr) { + create_dir_all(&root_dir).expect("Failed to create root directory"); + + let expected_config = serde_json::to_string(&RegistryConfig { + dl: format!( + "http://{}/api/v1/crates/{{crate}}/{{version}}/download", + server_addr + ), + api: Some(format!("http://{}", server_addr)), + }) + .expect("Failed to create expected config"); + + let config_path = root_dir.join("config.json"); + let config_written = if let Ok(config) = fs::read_to_string(&config_path) { + if config != expected_config { + fs::write(config_path, expected_config).expect("Failed to update config"); + true + } else { + false + } + } else { + fs::write(config_path, expected_config).expect("Failed to write config"); + true + }; + + #[cfg(unix)] + use std::os::unix::fs::symlink; + #[cfg(windows)] + use std::os::windows::fs::symlink_dir as symlink; + + let new_symlink = match symlink(".", root_dir.join("index")) { + Ok(()) => true, + Err(ref err) if err.kind() == ErrorKind::AlreadyExists => false, + Err(err) => panic!("Failed to create a symlink: {}", err), + }; + + let new_git_symlink = match symlink(".git", root_dir.join("git")) { + Ok(()) => true, + Err(ref err) if err.kind() == ErrorKind::AlreadyExists => false, + Err(err) => panic!("Failed to create git symlink: {}", err), + }; + + let repository = Repository::init(&root_dir).expect("Failed to GIT init"); + + let empty = repository + .is_empty() + .expect("Failed to check if GIT repo is empty"); + + if empty || config_written || new_symlink || new_git_symlink { + let mut index = repository.index().expect("cannot get the Index file"); + index + .add_all(["*"].iter(), IndexAddOption::DEFAULT, None) + .expect("Failed to add modified files to git index"); + index.write().expect("Failed to update the git index"); + + let tree = index + .write_tree() + .and_then(|tree_id| repository.find_tree(tree_id)) + .expect("Failed to get tree"); + + let signature = repository.signature().expect("Failed to get signature"); + + if empty { + repository.commit( + Some("HEAD"), + &signature, + &signature, + "Created new repo", + &tree, + &[], + ) + } else { + let oid = repository + .refname_to_id("HEAD") + .expect("Failed to get HEAD ref"); + let parent = repository + .find_commit(oid) + .expect("Failed to find parent commit"); + + repository.commit( + Some("HEAD"), + &signature, + &signature, + "Updated GIT repo", + &tree, + &[&parent], + ) + } + .expect("Failed to commit the changes"); + } + + Command::new("git") + .current_dir(&root_dir) + .arg("update-server-info") + .status() + .expect("git update-server-info failed"); + } +} diff --git a/cargo-registry/src/main.rs b/cargo-registry/src/main.rs new file mode 100644 index 00000000000000..0749875824c072 --- /dev/null +++ b/cargo-registry/src/main.rs @@ -0,0 +1,327 @@ +//! The `registry_service` module implements the Solana cargo registry service. +use { + crate::{ + client::Client, + dummy_git_index::DummyGitIndex, + publisher::{Error, Publisher}, + }, + hyper::{ + body, + service::{make_service_fn, service_fn}, + Method, Server, + }, + hyper_staticfile::Static, + log::*, + std::{ + net::{IpAddr, Ipv4Addr, SocketAddr}, + path::PathBuf, + sync::Arc, + }, +}; + +mod client; +mod dummy_git_index; +mod publisher; + +const PATH_PREFIX: &str = "/api/v1/crates"; + +pub struct CargoRegistryService {} + +impl CargoRegistryService { + fn error_response(status: hyper::StatusCode, msg: &str) -> hyper::Response { + error!("{}", msg); + hyper::Response::builder() + .status(status) + .body(hyper::Body::from( + serde_json::json!({ + "errors" : [ + {"details": msg} + ] + }) + .to_string(), + )) + .unwrap() + } + + fn success_response() -> hyper::Response { + hyper::Response::builder() + .status(hyper::StatusCode::OK) + .body(hyper::Body::from("")) + .unwrap() + } + + async fn handle_publish_request( + request: hyper::Request, + client: Arc, + ) -> hyper::Response { + info!("Handling request to publish the crate"); + let bytes = body::to_bytes(request.into_body()).await; + + match bytes { + Ok(data) => { + let Ok(result) = + tokio::task::spawn_blocking(move || Publisher::publish_crate(data, client)) + .await + else { + return Self::error_response( + hyper::StatusCode::INTERNAL_SERVER_ERROR, + "Internal error. Failed to wait for program deployment", + ); + }; + + if result.is_ok() { + info!("Published the crate successfully. {:?}", result); + Self::success_response() + } else { + Self::error_response( + hyper::StatusCode::BAD_REQUEST, + format!("Failed to publish the crate. {:?}", result).as_str(), + ) + } + } + Err(_) => Self::error_response( + hyper::StatusCode::BAD_REQUEST, + "Failed to receive the crate data from the client.", + ), + } + } + + fn get_crate_name_and_version(path: &str) -> Option<(&str, &str, &str)> { + path.rsplit_once('/').and_then(|(remainder, version)| { + remainder + .rsplit_once('/') + .map(|(remainder, name)| (remainder, name, version)) + }) + } + + fn handle_yank_request( + path: &str, + _request: &hyper::Request, + ) -> hyper::Response { + let Some((path, _crate_name, _version)) = Self::get_crate_name_and_version(path) else { + return Self::error_response( + hyper::StatusCode::BAD_REQUEST, + "Failed to parse the request.", + ); + }; + + if path.len() != PATH_PREFIX.len() { + return Self::error_response( + hyper::StatusCode::BAD_REQUEST, + "Request length is incorrect", + ); + } + + Self::error_response( + hyper::StatusCode::NOT_IMPLEMENTED, + "This command is not implemented yet", + ) + } + + fn handle_unyank_request( + path: &str, + _request: &hyper::Request, + ) -> hyper::Response { + let Some((path, _crate_name, _version)) = Self::get_crate_name_and_version(path) else { + return Self::error_response( + hyper::StatusCode::BAD_REQUEST, + "Failed to parse the request.", + ); + }; + + if path.len() != PATH_PREFIX.len() { + return Self::error_response( + hyper::StatusCode::BAD_REQUEST, + "Request length is incorrect", + ); + } + + Self::error_response( + hyper::StatusCode::NOT_IMPLEMENTED, + "This command is not implemented yet", + ) + } + + fn get_crate_name(path: &str) -> Option<(&str, &str)> { + path.rsplit_once('/') + } + + fn handle_get_owners_request( + path: &str, + _request: &hyper::Request, + ) -> hyper::Response { + let Some((path, _crate_name)) = Self::get_crate_name(path) else { + return Self::error_response( + hyper::StatusCode::BAD_REQUEST, + "Failed to parse the request.", + ); + }; + + if path.len() != PATH_PREFIX.len() { + return Self::error_response( + hyper::StatusCode::BAD_REQUEST, + "Request length is incorrect", + ); + } + + Self::error_response( + hyper::StatusCode::NOT_IMPLEMENTED, + "This command is not implemented yet", + ) + } + + fn handle_add_owners_request( + path: &str, + _request: &hyper::Request, + ) -> hyper::Response { + let Some((path, _crate_name)) = Self::get_crate_name(path) else { + return Self::error_response( + hyper::StatusCode::BAD_REQUEST, + "Failed to parse the request.", + ); + }; + + if path.len() != PATH_PREFIX.len() { + return Self::error_response( + hyper::StatusCode::BAD_REQUEST, + "Request length is incorrect", + ); + } + + Self::error_response( + hyper::StatusCode::NOT_IMPLEMENTED, + "This command is not implemented yet", + ) + } + + fn handle_delete_owners_request( + path: &str, + _request: &hyper::Request, + ) -> hyper::Response { + let Some((path, _crate_name)) = Self::get_crate_name(path) else { + return Self::error_response( + hyper::StatusCode::BAD_REQUEST, + "Failed to parse the request.", + ); + }; + + if path.len() != PATH_PREFIX.len() { + return Self::error_response( + hyper::StatusCode::BAD_REQUEST, + "Request length is incorrect", + ); + } + + Self::error_response( + hyper::StatusCode::NOT_IMPLEMENTED, + "This command is not implemented yet", + ) + } + + fn handle_get_crates_request( + path: &str, + _request: &hyper::Request, + ) -> hyper::Response { + // The endpoint for this type of request is `/api/v1/crates` (same as PATH_PREFIX). + // The `crates` substring has already been extracted out of the endpoint string. + // So the path should only contain `/api/v1". The caller already checked that the + // full path started with PATH_PREFIX. So it's sufficient to check that provided + // path is smaller than PATH_PREFIX. + if path.len() >= PATH_PREFIX.len() { + return Self::error_response( + hyper::StatusCode::BAD_REQUEST, + "Request length is incorrect", + ); + } + + Self::error_response( + hyper::StatusCode::NOT_IMPLEMENTED, + "This command is not implemented yet", + ) + } + + async fn handler( + request: hyper::Request, + client: Arc, + ) -> Result, Error> { + let path = request.uri().path(); + if path.starts_with("/git") { + return Static::new("/tmp/dummy-git") + .serve(request) + .await + .or_else(|_| { + Ok(Self::error_response( + hyper::StatusCode::BAD_REQUEST, + "Failed to serve git index", + )) + }); + } + + if !path.starts_with(PATH_PREFIX) { + return Ok(Self::error_response( + hyper::StatusCode::BAD_REQUEST, + "Invalid path for the request", + )); + } + + let Some((path, endpoint)) = path.rsplit_once('/') else { + return Ok(Self::error_response( + hyper::StatusCode::BAD_REQUEST, + "Invalid endpoint in the path", + )); + }; + + Ok(match *request.method() { + Method::PUT => match endpoint { + "new" => { + if path.len() != PATH_PREFIX.len() { + Self::error_response( + hyper::StatusCode::BAD_REQUEST, + "Invalid length of the request.", + ) + } else { + Self::handle_publish_request(request, client.clone()).await + } + } + "unyank" => Self::handle_unyank_request(path, &request), + "owners" => Self::handle_add_owners_request(path, &request), + _ => Self::error_response(hyper::StatusCode::METHOD_NOT_ALLOWED, "Unknown request"), + }, + Method::GET => match endpoint { + "crates" => Self::handle_get_crates_request(path, &request), + "owners" => Self::handle_get_owners_request(path, &request), + _ => Self::error_response(hyper::StatusCode::METHOD_NOT_ALLOWED, "Unknown request"), + }, + Method::DELETE => match endpoint { + "yank" => Self::handle_yank_request(path, &request), + "owners" => Self::handle_delete_owners_request(path, &request), + _ => Self::error_response(hyper::StatusCode::METHOD_NOT_ALLOWED, "Unknown request"), + }, + _ => Self::error_response(hyper::StatusCode::METHOD_NOT_ALLOWED, "Unknown request"), + }) + } +} + +#[tokio::main] +async fn main() { + solana_logger::setup_with_default("solana=info"); + let client = Arc::new(Client::new().expect("Failed to get RPC Client instance")); + let port = client.port; + + let registry_service = make_service_fn(move |_| { + let client_inner = client.clone(); + async move { + Ok::<_, Error>(service_fn(move |request| { + CargoRegistryService::handler(request, client_inner.clone()) + })) + } + }); + + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port); + DummyGitIndex::create_or_update_git_repo(PathBuf::from("/tmp/dummy-git"), &addr); + + let server = Server::bind(&addr).serve(registry_service); + info!("Server running on on http://{}", addr); + + let _ = server.await; +} diff --git a/cargo-registry/src/publisher.rs b/cargo-registry/src/publisher.rs new file mode 100644 index 00000000000000..a712da35895e1c --- /dev/null +++ b/cargo-registry/src/publisher.rs @@ -0,0 +1,160 @@ +use { + crate::client::{Client, ClientConfig}, + flate2::read::GzDecoder, + hyper::body::Bytes, + log::*, + serde::{Deserialize, Serialize}, + serde_json::from_slice, + solana_cli::program_v4::{process_deploy_program, read_and_verify_elf}, + solana_sdk::{ + signature::{Keypair, Signer}, + signer::EncodableKey, + }, + std::{ + collections::BTreeMap, + fs, + mem::size_of, + ops::Deref, + path::{Path, PathBuf}, + sync::Arc, + }, + tar::Archive, + tempfile::{tempdir, TempDir}, +}; + +pub type Error = Box; + +#[derive(Debug, Deserialize, Serialize)] +#[serde(rename_all = "lowercase")] +enum DependencyType { + Dev, + Build, + Normal, +} + +#[allow(dead_code)] +#[derive(Debug, Deserialize)] +struct Dependency { + name: String, + version_req: String, + features: Vec, + optional: bool, + default_features: bool, + target: Option, + kind: DependencyType, + registry: Option, + explicit_name_in_toml: Option, +} + +#[derive(Debug, Deserialize)] +#[allow(unused)] +struct PackageMetaData { + name: String, + vers: String, + deps: Vec, + features: BTreeMap>, + authors: Vec, + description: Option, + documentation: Option, + homepage: Option, + readme: Option, + readme_file: Option, + keywords: Vec, + categories: Vec, + license: Option, + license_file: Option, + repository: Option, + badges: BTreeMap>, + links: Option, + rust_version: Option, +} + +impl PackageMetaData { + fn new(bytes: &Bytes) -> serde_json::Result<(PackageMetaData, usize)> { + let (json_length, sizeof_length) = Self::read_u32_length(bytes)?; + let end_of_meta_data = sizeof_length.saturating_add(json_length as usize); + let json_body = bytes.slice(sizeof_length..end_of_meta_data); + from_slice::(json_body.deref()).map(|data| (data, end_of_meta_data)) + } + + fn read_u32_length(bytes: &Bytes) -> serde_json::Result<(u32, usize)> { + let sizeof_length = size_of::(); + let length_le = bytes.slice(0..sizeof_length); + let length = + u32::from_le_bytes(length_le.deref().try_into().expect("Failed to read length")); + Ok((length, sizeof_length)) + } +} + +pub struct Publisher {} + +impl Publisher { + fn make_path>(tempdir: &TempDir, meta: &PackageMetaData, append: P) -> PathBuf { + let mut path = tempdir.path().to_path_buf(); + path.push(format!("{}-{}/", meta.name, meta.vers)); + path.push(append); + path + } + + fn program_library_name(tempdir: &TempDir, meta: &PackageMetaData) -> Result { + let toml_content = fs::read_to_string(Self::make_path(tempdir, meta, "Cargo.toml.orig"))?; + let toml = toml_content.parse::()?; + let library_name = toml + .get("lib") + .and_then(|v| v.get("name")) + .and_then(|v| v.as_str()) + .ok_or("Failed to get module name")?; + Ok(library_name.to_string()) + } + + pub(crate) fn publish_crate(bytes: Bytes, client: Arc) -> Result<(), Error> { + let (meta_data, offset) = PackageMetaData::new(&bytes)?; + + let (_crate_file_length, length_size) = + PackageMetaData::read_u32_length(&bytes.slice(offset..))?; + let crate_bytes = bytes.slice(offset.saturating_add(length_size)..); + + let decoder = GzDecoder::new(crate_bytes.as_ref()); + let mut archive = Archive::new(decoder); + + let tempdir = tempdir()?; + archive.unpack(tempdir.path())?; + + let config = ClientConfig::new(client.as_ref()); + + let lib_name = Self::program_library_name(&tempdir, &meta_data)?; + + let program_path = Self::make_path(&tempdir, &meta_data, format!("out/{}.so", lib_name)) + .into_os_string() + .into_string() + .map_err(|_| "Failed to get program file path")?; + + let program_data = read_and_verify_elf(program_path.as_ref()) + .map_err(|e| format!("failed to read the program: {}", e))?; + + let program_keypair = Keypair::read_from_file(Self::make_path( + &tempdir, + &meta_data, + format!("out/{}-keypair.json", lib_name), + )) + .map_err(|e| format!("Failed to get keypair from the file: {}", e))?; + + info!("Deploying program at {:?}", program_keypair.pubkey()); + + process_deploy_program( + client.rpc_client.clone(), + &config.0, + &program_data, + program_data.len() as u32, + &program_keypair.pubkey(), + Some(&program_keypair), + ) + .map_err(|e| { + error!("Failed to deploy the program: {}", e); + format!("Failed to deploy the program: {}", e) + })?; + + info!("Successfully deployed the program"); + Ok(()) + } +} diff --git a/cli/src/program_v4.rs b/cli/src/program_v4.rs index b1e0d60fa9a39c..b676656fedfa11 100644 --- a/cli/src/program_v4.rs +++ b/cli/src/program_v4.rs @@ -311,7 +311,7 @@ pub fn parse_program_v4_subcommand( Ok(response) } -fn read_and_verify_elf(program_location: &str) -> Result, Box> { +pub fn read_and_verify_elf(program_location: &str) -> Result, Box> { let mut file = File::open(program_location) .map_err(|err| format!("Unable to open program file: {err}"))?; let mut program_data = Vec::new(); @@ -427,7 +427,7 @@ pub fn process_program_v4_subcommand( // * Redeploy a program using a buffer account // - buffer_signer argument must contain the temporary buffer account information // (program_address must contain program ID and must NOT be same as buffer_signer.pubkey()) -fn process_deploy_program( +pub fn process_deploy_program( rpc_client: Arc, config: &ProgramV4CommandConfig, program_data: &[u8], From 55f3f203c6367abeb9cd468680eabb03198dc4d9 Mon Sep 17 00:00:00 2001 From: Trent Nelson Date: Mon, 9 Oct 2023 15:47:21 -0600 Subject: [PATCH 291/407] bank: remove ambiguating "helper" method (#33606) --- runtime/src/bank.rs | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 68e5492186ff9d..61fab556e503a4 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1793,14 +1793,10 @@ impl Bank { a corrupted snapshot or bugs in cached accounts or accounts-db.", ); let stakes_accounts_load_duration = now.elapsed(); - fn new() -> T { - T::default() - } - let feature_set = new(); let mut bank = Self { incremental_snapshot_persistence: fields.incremental_snapshot_persistence, rc: bank_rc, - status_cache: new(), + status_cache: Arc::>::default(), blockhash_queue: RwLock::new(fields.blockhash_queue), ancestors, hash: RwLock::new(fields.hash), @@ -1808,10 +1804,10 @@ impl Bank { parent_slot: fields.parent_slot, hard_forks: Arc::new(RwLock::new(fields.hard_forks)), transaction_count: AtomicU64::new(fields.transaction_count), - non_vote_transaction_count_since_restart: new(), - transaction_error_count: new(), - transaction_entries_count: new(), - transactions_per_entry_max: new(), + non_vote_transaction_count_since_restart: AtomicU64::default(), + transaction_error_count: AtomicU64::default(), + transaction_entries_count: AtomicU64::default(), + transactions_per_entry_max: AtomicU64::default(), tick_height: AtomicU64::new(fields.tick_height), signature_count: AtomicU64::new(fields.signature_count), capitalization: AtomicU64::new(fields.capitalization), @@ -1836,16 +1832,17 @@ impl Bank { stakes_cache: StakesCache::new(stakes), epoch_stakes: fields.epoch_stakes, is_delta: AtomicBool::new(fields.is_delta), - builtin_programs: new(), + builtin_programs: HashSet::::default(), runtime_config, - rewards: new(), + rewards: RwLock::new(vec![]), cluster_type: Some(genesis_config.cluster_type), - lazy_rent_collection: new(), - rewards_pool_pubkeys: new(), + lazy_rent_collection: AtomicBool::default(), + rewards_pool_pubkeys: Arc::>::default(), transaction_debug_keys: debug_keys, - transaction_log_collector_config: new(), - transaction_log_collector: new(), - feature_set: Arc::clone(&feature_set), + transaction_log_collector_config: Arc::>::default( + ), + transaction_log_collector: Arc::>::default(), + feature_set: Arc::::default(), drop_callback: RwLock::new(OptionalDropCallback(None)), freeze_started: AtomicBool::new(fields.hash != Hash::default()), vote_only_bank: false, From 0a3810854fa4a11b0841c548dcbc0ada311b8830 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Mon, 9 Oct 2023 15:01:50 -0700 Subject: [PATCH 292/407] Add RestartLastVotedForkSlots for wen_restart. (#33239) * Add RestartLastVotedForkSlots and RestartHeaviestFork for wen_restart. * Fix linter errors. * Revert RestartHeaviestFork, it will be added in another PR. * Update frozen abi message. * Fix wrong number in test generation, change to pub(crate) to limit scope. * Separate push_epoch_slots and push_restart_last_voted_fork_slots. * Add RestartLastVotedForkSlots data structure. * Remove unused parts to make PR smaller. * Remove unused clone. * Use CompressedSlotsVec to share code between EpochSlots and RestartLastVotedForkSlots. * Add total_messages to show how many messages are there. * Reduce RestartLastVotedForkSlots to one packet (16k slots). * Replace last_vote_slot with shred_version, revert CompressedSlotsVec. --- gossip/src/cluster_info.rs | 7 +- gossip/src/cluster_info_metrics.rs | 20 ++++ gossip/src/crds.rs | 3 +- gossip/src/crds_value.rs | 156 ++++++++++++++++++++++++++++- gossip/src/epoch_slots.rs | 2 +- 5 files changed, 180 insertions(+), 8 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 113b387512608d..8bfe628da8c441 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -267,7 +267,7 @@ pub fn make_accounts_hashes_message( pub(crate) type Ping = ping_pong::Ping<[u8; GOSSIP_PING_TOKEN_SIZE]>; // TODO These messages should go through the gpu pipeline for spam filtering -#[frozen_abi(digest = "EnbW8mYTsPMndq9NkHLTkHJgduXvWSfSD6bBdmqQ8TiF")] +#[frozen_abi(digest = "CVvKB495YW6JN4w1rWwajyZmG5wvNhmD97V99rSv9fGw")] #[derive(Serialize, Deserialize, Debug, AbiEnumVisitor, AbiExample)] #[allow(clippy::large_enum_variant)] pub(crate) enum Protocol { @@ -393,7 +393,8 @@ fn retain_staked(values: &mut Vec, stakes: &HashMap) { CrdsData::AccountsHashes(_) => true, CrdsData::LowestSlot(_, _) | CrdsData::LegacyVersion(_) - | CrdsData::DuplicateShred(_, _) => { + | CrdsData::DuplicateShred(_, _) + | CrdsData::RestartLastVotedForkSlots(_) => { let stake = stakes.get(&value.pubkey()).copied(); stake.unwrap_or_default() >= MIN_STAKE_FOR_GOSSIP } @@ -4020,7 +4021,7 @@ mod tests { ClusterInfo::split_gossip_messages(PUSH_MESSAGE_MAX_PAYLOAD_SIZE, values.clone()) .collect(); let self_pubkey = solana_sdk::pubkey::new_rand(); - assert!(splits.len() * 3 < NUM_CRDS_VALUES); + assert!(splits.len() * 2 < NUM_CRDS_VALUES); // Assert that all messages are included in the splits. assert_eq!(NUM_CRDS_VALUES, splits.iter().map(Vec::len).sum::()); splits diff --git a/gossip/src/cluster_info_metrics.rs b/gossip/src/cluster_info_metrics.rs index 095848fd2932ca..fbb7365387aad9 100644 --- a/gossip/src/cluster_info_metrics.rs +++ b/gossip/src/cluster_info_metrics.rs @@ -627,6 +627,16 @@ pub(crate) fn submit_gossip_stats( ("SnapshotHashes-pull", crds_stats.pull.counts[10], i64), ("ContactInfo-push", crds_stats.push.counts[11], i64), ("ContactInfo-pull", crds_stats.pull.counts[11], i64), + ( + "RestartLastVotedForkSlots-push", + crds_stats.push.counts[12], + i64 + ), + ( + "RestartLastVotedForkSlots-pull", + crds_stats.pull.counts[12], + i64 + ), ( "all-push", crds_stats.push.counts.iter().sum::(), @@ -664,6 +674,16 @@ pub(crate) fn submit_gossip_stats( ("SnapshotHashes-pull", crds_stats.pull.fails[10], i64), ("ContactInfo-push", crds_stats.push.fails[11], i64), ("ContactInfo-pull", crds_stats.pull.fails[11], i64), + ( + "RestartLastVotedForkSlots-push", + crds_stats.push.fails[12], + i64 + ), + ( + "RestartLastVotedForkSlots-pull", + crds_stats.pull.fails[12], + i64 + ), ("all-push", crds_stats.push.fails.iter().sum::(), i64), ("all-pull", crds_stats.pull.fails.iter().sum::(), i64), ); diff --git a/gossip/src/crds.rs b/gossip/src/crds.rs index b20ba9dfb15647..5ce3cf5ec56065 100644 --- a/gossip/src/crds.rs +++ b/gossip/src/crds.rs @@ -103,7 +103,7 @@ pub enum GossipRoute<'a> { PushMessage(/*from:*/ &'a Pubkey), } -type CrdsCountsArray = [usize; 12]; +type CrdsCountsArray = [usize; 13]; pub(crate) struct CrdsDataStats { pub(crate) counts: CrdsCountsArray, @@ -721,6 +721,7 @@ impl CrdsDataStats { CrdsData::DuplicateShred(_, _) => 9, CrdsData::SnapshotHashes(_) => 10, CrdsData::ContactInfo(_) => 11, + CrdsData::RestartLastVotedForkSlots(_) => 12, // Update CrdsCountsArray if new items are added here. } } diff --git a/gossip/src/crds_value.rs b/gossip/src/crds_value.rs index 125555ea51eeb4..63efa141bdf129 100644 --- a/gossip/src/crds_value.rs +++ b/gossip/src/crds_value.rs @@ -1,10 +1,10 @@ use { crate::{ - cluster_info::MAX_ACCOUNTS_HASHES, + cluster_info::{MAX_ACCOUNTS_HASHES, MAX_CRDS_OBJECT_SIZE}, contact_info::ContactInfo, deprecated, duplicate_shred::{DuplicateShred, DuplicateShredIndex, MAX_DUPLICATE_SHREDS}, - epoch_slots::EpochSlots, + epoch_slots::{CompressedSlots, EpochSlots, MAX_SLOTS_PER_ENTRY}, legacy_contact_info::LegacyContactInfo, }, bincode::{serialize, serialized_size}, @@ -94,6 +94,7 @@ pub enum CrdsData { DuplicateShred(DuplicateShredIndex, DuplicateShred), SnapshotHashes(SnapshotHashes), ContactInfo(ContactInfo), + RestartLastVotedForkSlots(RestartLastVotedForkSlots), } impl Sanitize for CrdsData { @@ -132,6 +133,7 @@ impl Sanitize for CrdsData { } CrdsData::SnapshotHashes(val) => val.sanitize(), CrdsData::ContactInfo(node) => node.sanitize(), + CrdsData::RestartLastVotedForkSlots(slots) => slots.sanitize(), } } } @@ -145,7 +147,7 @@ pub(crate) fn new_rand_timestamp(rng: &mut R) -> u64 { impl CrdsData { /// New random CrdsData for tests and benchmarks. fn new_rand(rng: &mut R, pubkey: Option) -> CrdsData { - let kind = rng.gen_range(0..7); + let kind = rng.gen_range(0..8); // TODO: Implement other kinds of CrdsData here. // TODO: Assign ranges to each arm proportional to their frequency in // the mainnet crds table. @@ -157,6 +159,9 @@ impl CrdsData { 3 => CrdsData::AccountsHashes(AccountsHashes::new_rand(rng, pubkey)), 4 => CrdsData::Version(Version::new_rand(rng, pubkey)), 5 => CrdsData::Vote(rng.gen_range(0..MAX_VOTES), Vote::new_rand(rng, pubkey)), + 6 => CrdsData::RestartLastVotedForkSlots(RestartLastVotedForkSlots::new_rand( + rng, pubkey, + )), _ => CrdsData::EpochSlots( rng.gen_range(0..MAX_EPOCH_SLOTS), EpochSlots::new_rand(rng, pubkey), @@ -485,6 +490,87 @@ impl Sanitize for NodeInstance { } } +#[derive(Serialize, Deserialize, Clone, Default, PartialEq, Eq, AbiExample, Debug)] +pub struct RestartLastVotedForkSlots { + pub from: Pubkey, + pub wallclock: u64, + pub slots: Vec, + pub last_voted_hash: Hash, + pub shred_version: u16, +} + +impl Sanitize for RestartLastVotedForkSlots { + fn sanitize(&self) -> std::result::Result<(), SanitizeError> { + if self.slots.is_empty() { + return Err(SanitizeError::InvalidValue); + } + self.slots.sanitize()?; + self.last_voted_hash.sanitize() + } +} + +impl RestartLastVotedForkSlots { + pub fn new(from: Pubkey, now: u64, last_voted_hash: Hash, shred_version: u16) -> Self { + Self { + from, + wallclock: now, + slots: Vec::new(), + last_voted_hash, + shred_version, + } + } + + /// New random Version for tests and benchmarks. + pub fn new_rand(rng: &mut R, pubkey: Option) -> Self { + let pubkey = pubkey.unwrap_or_else(solana_sdk::pubkey::new_rand); + let mut result = + RestartLastVotedForkSlots::new(pubkey, new_rand_timestamp(rng), Hash::new_unique(), 1); + let num_slots = rng.gen_range(2..20); + let mut slots = std::iter::repeat_with(|| 47825632 + rng.gen_range(0..512)) + .take(num_slots) + .collect::>(); + slots.sort(); + result.fill(&slots); + result + } + + pub fn fill(&mut self, slots: &[Slot]) -> usize { + let slots = &slots[slots.len().saturating_sub(MAX_SLOTS_PER_ENTRY)..]; + let mut num = 0; + let space = self.max_compressed_slot_size(); + if space == 0 { + return 0; + } + while num < slots.len() { + let mut cslot = CompressedSlots::new(space as usize); + num += cslot.add(&slots[num..]); + self.slots.push(cslot); + } + num + } + + pub fn deflate(&mut self) { + for s in self.slots.iter_mut() { + let _ = s.deflate(); + } + } + + pub fn max_compressed_slot_size(&self) -> isize { + let len_header = serialized_size(self).unwrap(); + let len_slot = serialized_size(&CompressedSlots::default()).unwrap(); + MAX_CRDS_OBJECT_SIZE as isize - (len_header + len_slot) as isize + } + + pub fn to_slots(&self, min_slot: Slot) -> Vec { + self.slots + .iter() + .filter(|s| min_slot < s.first_slot() + s.num_slots() as u64) + .filter_map(|s| s.to_slots(min_slot).ok()) + .flatten() + .collect() + } +} + /// Type of the replicated value /// These are labels for values in a record that is associated with `Pubkey` #[derive(PartialEq, Hash, Eq, Clone, Debug)] @@ -501,6 +587,7 @@ pub enum CrdsValueLabel { DuplicateShred(DuplicateShredIndex, Pubkey), SnapshotHashes(Pubkey), ContactInfo(Pubkey), + RestartLastVotedForkSlots(Pubkey), } impl fmt::Display for CrdsValueLabel { @@ -524,6 +611,9 @@ impl fmt::Display for CrdsValueLabel { write!(f, "SnapshotHashes({})", self.pubkey()) } CrdsValueLabel::ContactInfo(_) => write!(f, "ContactInfo({})", self.pubkey()), + CrdsValueLabel::RestartLastVotedForkSlots(_) => { + write!(f, "RestartLastVotedForkSlots({})", self.pubkey()) + } } } } @@ -543,6 +633,7 @@ impl CrdsValueLabel { CrdsValueLabel::DuplicateShred(_, p) => *p, CrdsValueLabel::SnapshotHashes(p) => *p, CrdsValueLabel::ContactInfo(pubkey) => *pubkey, + CrdsValueLabel::RestartLastVotedForkSlots(p) => *p, } } } @@ -593,6 +684,7 @@ impl CrdsValue { CrdsData::DuplicateShred(_, shred) => shred.wallclock, CrdsData::SnapshotHashes(hash) => hash.wallclock, CrdsData::ContactInfo(node) => node.wallclock(), + CrdsData::RestartLastVotedForkSlots(slots) => slots.wallclock, } } pub fn pubkey(&self) -> Pubkey { @@ -609,6 +701,7 @@ impl CrdsValue { CrdsData::DuplicateShred(_, shred) => shred.from, CrdsData::SnapshotHashes(hash) => hash.from, CrdsData::ContactInfo(node) => *node.pubkey(), + CrdsData::RestartLastVotedForkSlots(slots) => slots.from, } } pub fn label(&self) -> CrdsValueLabel { @@ -627,6 +720,9 @@ impl CrdsValue { CrdsData::DuplicateShred(ix, shred) => CrdsValueLabel::DuplicateShred(*ix, shred.from), CrdsData::SnapshotHashes(_) => CrdsValueLabel::SnapshotHashes(self.pubkey()), CrdsData::ContactInfo(node) => CrdsValueLabel::ContactInfo(*node.pubkey()), + CrdsData::RestartLastVotedForkSlots(_) => { + CrdsValueLabel::RestartLastVotedForkSlots(self.pubkey()) + } } } pub fn contact_info(&self) -> Option<&LegacyContactInfo> { @@ -1073,4 +1169,58 @@ mod test { assert!(node.should_force_push(&pubkey)); assert!(!node.should_force_push(&Pubkey::new_unique())); } + + #[test] + fn test_restart_last_voted_fork_slots() { + let keypair = Keypair::new(); + let slot = 53; + let slot_parent = slot - 5; + let shred_version = 21; + let mut slots = RestartLastVotedForkSlots::new( + keypair.pubkey(), + timestamp(), + Hash::default(), + shred_version, + ); + let original_slots_vec = [slot_parent, slot]; + slots.fill(&original_slots_vec); + let value = + CrdsValue::new_signed(CrdsData::RestartLastVotedForkSlots(slots.clone()), &keypair); + assert_eq!(value.sanitize(), Ok(())); + let label = value.label(); + assert_eq!( + label, + CrdsValueLabel::RestartLastVotedForkSlots(keypair.pubkey()) + ); + assert_eq!(label.pubkey(), keypair.pubkey()); + assert_eq!(value.wallclock(), slots.wallclock); + let retrived_slots = slots.to_slots(0); + assert_eq!(retrived_slots.len(), 2); + assert_eq!(retrived_slots[0], slot_parent); + assert_eq!(retrived_slots[1], slot); + + let empty_slots = RestartLastVotedForkSlots::new( + keypair.pubkey(), + timestamp(), + Hash::default(), + shred_version, + ); + let bad_value = + CrdsValue::new_signed(CrdsData::RestartLastVotedForkSlots(empty_slots), &keypair); + assert_eq!(bad_value.sanitize(), Err(SanitizeError::InvalidValue)); + + let last_slot: Slot = (MAX_SLOTS_PER_ENTRY + 10).try_into().unwrap(); + let mut large_slots = RestartLastVotedForkSlots::new( + keypair.pubkey(), + timestamp(), + Hash::default(), + shred_version, + ); + let large_slots_vec: Vec = (0..last_slot + 1).collect(); + large_slots.fill(&large_slots_vec); + let retrived_slots = large_slots.to_slots(0); + assert_eq!(retrived_slots.len(), MAX_SLOTS_PER_ENTRY); + assert_eq!(retrived_slots.first(), Some(&11)); + assert_eq!(retrived_slots.last(), Some(&last_slot)); + } } diff --git a/gossip/src/epoch_slots.rs b/gossip/src/epoch_slots.rs index 186a17aa6ec255..c589e348143f7d 100644 --- a/gossip/src/epoch_slots.rs +++ b/gossip/src/epoch_slots.rs @@ -178,7 +178,7 @@ impl Default for CompressedSlots { } impl CompressedSlots { - fn new(max_size: usize) -> Self { + pub(crate) fn new(max_size: usize) -> Self { CompressedSlots::Uncompressed(Uncompressed::new(max_size)) } From 170478924705c9c62dbeb475c5425b68ba61b375 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Tue, 10 Oct 2023 09:23:18 +0900 Subject: [PATCH 293/407] Define tick related helper test methods (#33537) * Define tick related helper methods * dcou VoteSimulator * blacklist ledger-tool for dcou * fix dcou ci... * github --- core/src/replay_stage.rs | 4 +-- core/src/vote_simulator.rs | 5 +-- core/tests/snapshots.rs | 6 ++-- ledger-tool/Cargo.toml | 5 ++- ledger-tool/src/main.rs | 2 +- ledger/Cargo.toml | 1 + ledger/src/blockstore_processor.rs | 2 +- runtime/src/bank.rs | 16 +++++++++ runtime/src/bank/tests.rs | 6 ++-- runtime/src/bank_forks.rs | 2 +- runtime/src/snapshot_bank_utils.rs | 48 ++++++++++++------------- scripts/check-dev-context-only-utils.sh | 1 + 12 files changed, 60 insertions(+), 38 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 59036a997039c1..5af02b9c382898 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -4834,7 +4834,7 @@ pub(crate) mod tests { genesis_config.ticks_per_slot = 4; let bank0 = Bank::new_for_tests(&genesis_config); for _ in 0..genesis_config.ticks_per_slot { - bank0.register_tick(&Hash::default()); + bank0.register_default_tick_for_test(); } bank0.freeze(); let arc_bank0 = Arc::new(bank0); @@ -4879,7 +4879,7 @@ pub(crate) mod tests { &solana_sdk::pubkey::new_rand(), ); for _ in 0..genesis_config.ticks_per_slot { - bank.register_tick(&Hash::default()); + bank.register_default_tick_for_test(); } bank_forks.write().unwrap().insert(bank); let arc_bank = bank_forks.read().unwrap().get(i).unwrap(); diff --git a/core/src/vote_simulator.rs b/core/src/vote_simulator.rs index 79c418bcc7892f..50d76bf4caa447 100644 --- a/core/src/vote_simulator.rs +++ b/core/src/vote_simulator.rs @@ -1,3 +1,4 @@ +#![cfg(feature = "dev-context-only-utils")] use { crate::{ cluster_info_vote_listener::VoteTracker, @@ -117,7 +118,7 @@ impl VoteSimulator { } } while new_bank.tick_height() < new_bank.max_tick_height() { - new_bank.register_tick(&Hash::new_unique()); + new_bank.register_unique_tick(); } if !visit.node().has_no_child() || is_frozen { new_bank.freeze(); @@ -358,7 +359,7 @@ pub fn initialize_state( } while bank0.tick_height() < bank0.max_tick_height() { - bank0.register_tick(&Hash::new_unique()); + bank0.register_unique_tick(); } bank0.freeze(); let mut progress = ProgressMap::default(); diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index b61e84a90810c9..3b689a8423e8b7 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -310,7 +310,7 @@ fn goto_end_of_slot(bank: &Bank) { let mut tick_hash = bank.last_blockhash(); loop { tick_hash = hashv(&[tick_hash.as_ref(), &[42]]); - bank.register_tick(&tick_hash); + bank.register_tick_for_test(&tick_hash); if tick_hash == bank.last_blockhash() { bank.freeze(); return; @@ -742,7 +742,7 @@ fn test_bank_forks_incremental_snapshot( assert_eq!(bank.process_transaction(&tx), Ok(())); while !bank.is_complete() { - bank.register_tick(&Hash::new_unique()); + bank.register_unique_tick(); } bank_forks.insert(bank) @@ -1041,7 +1041,7 @@ fn test_snapshots_with_background_services( assert_eq!(bank.process_transaction(&tx), Ok(())); while !bank.is_complete() { - bank.register_tick(&Hash::new_unique()); + bank.register_unique_tick(); } bank_forks.write().unwrap().insert(bank); diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index fb387773c14cfa..c64dfa07e91a91 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -39,7 +39,7 @@ solana-logger = { workspace = true } solana-measure = { workspace = true } solana-program-runtime = { workspace = true } solana-rpc = { workspace = true } -solana-runtime = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-sdk = { workspace = true } solana-stake-program = { workspace = true } solana-storage-bigtable = { workspace = true } @@ -57,6 +57,9 @@ jemallocator = { workspace = true } assert_cmd = { workspace = true } bytecount = { workspace = true } +[features] +dev-context-only-utils = [] + [target."cfg(unix)".dependencies] signal-hook = { workspace = true } diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 1cce5ad2789371..697199981b26f8 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -3139,7 +3139,7 @@ fn main() { if child_bank_required { while !bank.is_complete() { - bank.register_tick(&Hash::new_unique()); + bank.register_unique_tick(); } } diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index df52fb3462eaf1..b3fb1ac5f9b97d 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -78,6 +78,7 @@ features = ["lz4"] bs58 = { workspace = true } solana-account-decoder = { workspace = true } solana-logger = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } spl-pod = { workspace = true } test-case = { workspace = true } diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index f5a8836087d3d4..219fa4c62ed3d4 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -3373,7 +3373,7 @@ pub mod tests { let blockhash = bank.last_blockhash(); while blockhash == bank.last_blockhash() { - bank.register_tick(&Hash::default()); + bank.register_default_tick_for_test(); } // ensure bank can process 2 entries that do not have a common account and tick is registered diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 61fab556e503a4..76102732c7d2d8 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4208,6 +4208,22 @@ impl Bank { self.tick_height.fetch_add(1, Relaxed); } + #[cfg(feature = "dev-context-only-utils")] + pub fn register_tick_for_test(&self, hash: &Hash) { + // currently meaningless wrapper; upcoming pr will make it an actual helper... + self.register_tick(hash) + } + + #[cfg(feature = "dev-context-only-utils")] + pub fn register_default_tick_for_test(&self) { + self.register_tick(&Hash::default()) + } + + #[cfg(feature = "dev-context-only-utils")] + pub fn register_unique_tick(&self) { + self.register_tick(&Hash::new_unique()) + } + pub fn is_complete(&self) -> bool { self.tick_height() == self.max_tick_height() } diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 58ce790d43d0d4..d009f60d5e4073 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -180,7 +180,7 @@ fn test_race_register_tick_freeze() { let register_tick_thread = Builder::new() .name("register_tick".to_string()) .spawn(move || { - bank0_.register_tick(&hash); + bank0_.register_tick_for_test(&hash); }) .unwrap(); @@ -4204,7 +4204,7 @@ fn test_is_delta_true() { assert!(!bank1.is_delta.load(Relaxed)); assert_ne!(hash1, bank.hash()); // ticks don't make a bank into a delta or change its state unless a block boundary is crossed - bank1.register_tick(&Hash::default()); + bank1.register_default_tick_for_test(); assert!(!bank1.is_delta.load(Relaxed)); assert_eq!(bank1.hash_internal_state(), hash1); } @@ -4928,7 +4928,7 @@ fn test_hash_internal_state_unchanged_with_ticks() { // because blockhashes are only recorded at block boundaries for _ in 0..genesis_config.ticks_per_slot { assert_eq!(bank1.hash_internal_state(), hash1); - bank1.register_tick(&Hash::default()); + bank1.register_default_tick_for_test(); } assert_eq!(bank1.hash_internal_state(), hash1); } diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index ec69df9dded953..c1ef6830d1998c 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -696,7 +696,7 @@ mod tests { let bank = Bank::new_for_tests(&genesis_config); let mut bank_forks = BankForks::new(bank); let child_bank = Bank::new_from_parent(bank_forks[0].clone(), &Pubkey::default(), 1); - child_bank.register_tick(&Hash::default()); + child_bank.register_default_tick_for_test(); bank_forks.insert(child_bank); assert_eq!(bank_forks[1u64].tick_height(), 1); assert_eq!(bank_forks.working_bank().tick_height(), 1); diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 43b0ef5a364563..e538b07677630f 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -1290,7 +1290,7 @@ mod tests { let original_bank = Bank::new_for_tests(&genesis_config); while !original_bank.is_complete() { - original_bank.register_tick(&Hash::new_unique()); + original_bank.register_unique_tick(); } let (_tmp_dir, accounts_dir) = create_tmp_accounts_dir_for_tests(); @@ -1359,7 +1359,7 @@ mod tests { .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) .unwrap(); while !bank0.is_complete() { - bank0.register_tick(&Hash::new_unique()); + bank0.register_unique_tick(); } let slot = 1; @@ -1374,7 +1374,7 @@ mod tests { .transfer(sol_to_lamports(5.), &mint_keypair, &key5.pubkey()) .unwrap(); while !bank1.is_complete() { - bank1.register_tick(&Hash::new_unique()); + bank1.register_unique_tick(); } let slot = slot + 1; @@ -1383,7 +1383,7 @@ mod tests { .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); while !bank2.is_complete() { - bank2.register_tick(&Hash::new_unique()); + bank2.register_unique_tick(); } let slot = slot + 1; @@ -1392,7 +1392,7 @@ mod tests { .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); while !bank3.is_complete() { - bank3.register_tick(&Hash::new_unique()); + bank3.register_unique_tick(); } let slot = slot + 1; @@ -1401,7 +1401,7 @@ mod tests { .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); while !bank4.is_complete() { - bank4.register_tick(&Hash::new_unique()); + bank4.register_unique_tick(); } let (_tmp_dir, accounts_dir) = create_tmp_accounts_dir_for_tests(); @@ -1476,7 +1476,7 @@ mod tests { .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) .unwrap(); while !bank0.is_complete() { - bank0.register_tick(&Hash::new_unique()); + bank0.register_unique_tick(); } let slot = 1; @@ -1491,7 +1491,7 @@ mod tests { .transfer(sol_to_lamports(5.), &mint_keypair, &key5.pubkey()) .unwrap(); while !bank1.is_complete() { - bank1.register_tick(&Hash::new_unique()); + bank1.register_unique_tick(); } let (_tmp_dir, accounts_dir) = create_tmp_accounts_dir_for_tests(); @@ -1519,7 +1519,7 @@ mod tests { .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); while !bank2.is_complete() { - bank2.register_tick(&Hash::new_unique()); + bank2.register_unique_tick(); } let slot = slot + 1; @@ -1528,7 +1528,7 @@ mod tests { .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); while !bank3.is_complete() { - bank3.register_tick(&Hash::new_unique()); + bank3.register_unique_tick(); } let slot = slot + 1; @@ -1537,7 +1537,7 @@ mod tests { .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); while !bank4.is_complete() { - bank4.register_tick(&Hash::new_unique()); + bank4.register_unique_tick(); } let incremental_snapshot_archive_info = bank_to_incremental_snapshot_archive( @@ -1597,7 +1597,7 @@ mod tests { .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) .unwrap(); while !bank0.is_complete() { - bank0.register_tick(&Hash::new_unique()); + bank0.register_unique_tick(); } let slot = 1; @@ -1612,7 +1612,7 @@ mod tests { .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) .unwrap(); while !bank1.is_complete() { - bank1.register_tick(&Hash::new_unique()); + bank1.register_unique_tick(); } let (_tmp_dir, accounts_dir) = create_tmp_accounts_dir_for_tests(); @@ -1640,7 +1640,7 @@ mod tests { .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); while !bank2.is_complete() { - bank2.register_tick(&Hash::new_unique()); + bank2.register_unique_tick(); } let slot = slot + 1; @@ -1649,7 +1649,7 @@ mod tests { .transfer(sol_to_lamports(2.), &mint_keypair, &key2.pubkey()) .unwrap(); while !bank3.is_complete() { - bank3.register_tick(&Hash::new_unique()); + bank3.register_unique_tick(); } let slot = slot + 1; @@ -1658,7 +1658,7 @@ mod tests { .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) .unwrap(); while !bank4.is_complete() { - bank4.register_tick(&Hash::new_unique()); + bank4.register_unique_tick(); } bank_to_incremental_snapshot_archive( @@ -1746,7 +1746,7 @@ mod tests { .transfer(lamports_to_transfer, &mint_keypair, &key2.pubkey()) .unwrap(); while !bank0.is_complete() { - bank0.register_tick(&Hash::new_unique()); + bank0.register_unique_tick(); } let slot = 1; @@ -1755,7 +1755,7 @@ mod tests { .transfer(lamports_to_transfer, &key2, &key1.pubkey()) .unwrap(); while !bank1.is_complete() { - bank1.register_tick(&Hash::new_unique()); + bank1.register_unique_tick(); } let full_snapshot_slot = slot; @@ -1794,7 +1794,7 @@ mod tests { "Ensure Account1's balance is zero" ); while !bank2.is_complete() { - bank2.register_tick(&Hash::new_unique()); + bank2.register_unique_tick(); } // Take an incremental snapshot and then do a roundtrip on the bank and ensure it @@ -1844,13 +1844,13 @@ mod tests { .transfer(lamports_to_transfer, &mint_keypair, &key2.pubkey()) .unwrap(); while !bank3.is_complete() { - bank3.register_tick(&Hash::new_unique()); + bank3.register_unique_tick(); } let slot = slot + 1; let bank4 = Arc::new(Bank::new_from_parent(bank3, &collector, slot)); while !bank4.is_complete() { - bank4.register_tick(&Hash::new_unique()); + bank4.register_unique_tick(); } // Ensure account1 has been cleaned/purged from everywhere @@ -1917,13 +1917,13 @@ mod tests { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); while !bank0.is_complete() { - bank0.register_tick(&Hash::new_unique()); + bank0.register_unique_tick(); } let slot = 1; let bank1 = Arc::new(Bank::new_from_parent(bank0, &collector, slot)); while !bank1.is_complete() { - bank1.register_tick(&Hash::new_unique()); + bank1.register_unique_tick(); } let all_snapshots_dir = tempfile::TempDir::new().unwrap(); @@ -1948,7 +1948,7 @@ mod tests { .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); while !bank2.is_complete() { - bank2.register_tick(&Hash::new_unique()); + bank2.register_unique_tick(); } bank_to_incremental_snapshot_archive( diff --git a/scripts/check-dev-context-only-utils.sh b/scripts/check-dev-context-only-utils.sh index debb323db113c4..fb459f0759729d 100755 --- a/scripts/check-dev-context-only-utils.sh +++ b/scripts/check-dev-context-only-utils.sh @@ -29,6 +29,7 @@ source ci/rust-version.sh nightly # reason to bend dev-context-only-utils's original intention and that listed # package isn't part of released binaries. declare tainted_packages=( + solana-ledger-tool ) # convert to comma separeted (ref: https://stackoverflow.com/a/53839433) From 288e0b75363403d21a0947840c36ebd39459c122 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Oct 2023 13:24:42 +0000 Subject: [PATCH 294/407] build(deps): bump regex from 1.9.6 to 1.10.0 (#33620) * build(deps): bump regex from 1.9.6 to 1.10.0 Bumps [regex](https://github.com/rust-lang/regex) from 1.9.6 to 1.10.0. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.9.6...1.10.0) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 18 +++++++++--------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 12 ++++++------ 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8afcd1f638451b..f7fb5e32d35a43 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4451,14 +4451,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.6" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff" +checksum = "d119d7c7ca818f8a53c300863d4f87566aac09943aef5b355bb83969dae75d87" dependencies = [ "aho-corasick 1.0.1", "memchr", - "regex-automata 0.3.9", - "regex-syntax 0.7.5", + "regex-automata 0.4.1", + "regex-syntax 0.8.0", ] [[package]] @@ -4469,13 +4469,13 @@ checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" [[package]] name = "regex-automata" -version = "0.3.9" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9" +checksum = "465c6fc0621e4abc4187a2bda0937bfd4f722c2730b29562e19689ea796c9a4b" dependencies = [ "aho-corasick 1.0.1", "memchr", - "regex-syntax 0.7.5", + "regex-syntax 0.8.0", ] [[package]] @@ -4486,9 +4486,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" +checksum = "c3cbb081b9784b07cceb8824c8583f86db4814d172ab043f3c23f7dc600bf83d" [[package]] name = "reqwest" diff --git a/Cargo.toml b/Cargo.toml index 509ffb6047ac9e..4df3ac478fa11d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -280,7 +280,7 @@ raptorq = "1.7.0" rayon = "1.8.0" rcgen = "0.10.0" reed-solomon-erasure = "6.0.0" -regex = "1.9.6" +regex = "1.10.0" rolling-file = "0.2.0" reqwest = { version = "0.11.22", default-features = false } rpassword = "7.2" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 25c3c36583bf4a..16d0f6a12f1ea8 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3814,9 +3814,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.6" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff" +checksum = "d119d7c7ca818f8a53c300863d4f87566aac09943aef5b355bb83969dae75d87" dependencies = [ "aho-corasick 1.0.1", "memchr", @@ -3826,9 +3826,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.9" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9" +checksum = "465c6fc0621e4abc4187a2bda0937bfd4f722c2730b29562e19689ea796c9a4b" dependencies = [ "aho-corasick 1.0.1", "memchr", @@ -3837,9 +3837,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" +checksum = "c3cbb081b9784b07cceb8824c8583f86db4814d172ab043f3c23f7dc600bf83d" [[package]] name = "reqwest" From 42b63a021f650430658f18cd6380778b7528fac8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Oct 2023 13:25:11 +0000 Subject: [PATCH 295/407] build(deps): bump semver from 1.0.19 to 1.0.20 (#33622) * build(deps): bump semver from 1.0.19 to 1.0.20 Bumps [semver](https://github.com/dtolnay/semver) from 1.0.19 to 1.0.20. - [Release notes](https://github.com/dtolnay/semver/releases) - [Commits](https://github.com/dtolnay/semver/compare/1.0.19...1.0.20) --- updated-dependencies: - dependency-name: semver dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f7fb5e32d35a43..21d20a3516ef6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -963,7 +963,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.19", + "semver 1.0.20", "serde", "serde_json", "thiserror", @@ -4617,7 +4617,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.19", + "semver 1.0.20", ] [[package]] @@ -4807,9 +4807,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" +checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" dependencies = [ "serde", ] @@ -5551,7 +5551,7 @@ dependencies = [ "predicates", "regex", "reqwest", - "semver 1.0.19", + "semver 1.0.20", "serial_test", "solana-download-utils", "solana-logger", @@ -5657,7 +5657,7 @@ dependencies = [ "num-traits", "pretty-hex", "reqwest", - "semver 1.0.19", + "semver 1.0.20", "serde", "serde_derive", "serde_json", @@ -5719,7 +5719,7 @@ dependencies = [ "humantime", "indicatif", "pretty-hex", - "semver 1.0.19", + "semver 1.0.20", "serde", "serde_json", "solana-account-decoder", @@ -6220,7 +6220,7 @@ dependencies = [ "nix 0.26.4", "reqwest", "scopeguard", - "semver 1.0.19", + "semver 1.0.20", "serde", "serde_yaml 0.8.26", "serde_yaml 0.9.25", @@ -6730,7 +6730,7 @@ dependencies = [ "futures-util", "log", "reqwest", - "semver 1.0.19", + "semver 1.0.20", "serde", "serde_derive", "serde_json", @@ -6794,7 +6794,7 @@ dependencies = [ "num-traits", "parking_lot 0.12.1", "qstring", - "semver 1.0.19", + "semver 1.0.20", "solana-sdk", "thiserror", "uriparse", @@ -6875,7 +6875,7 @@ dependencies = [ "jsonrpc-http-server", "log", "reqwest", - "semver 1.0.19", + "semver 1.0.20", "serde", "serde_derive", "serde_json", @@ -6896,7 +6896,7 @@ dependencies = [ "bs58", "jsonrpc-core", "reqwest", - "semver 1.0.19", + "semver 1.0.20", "serde", "serde_derive", "serde_json", @@ -7535,7 +7535,7 @@ version = "1.18.0" dependencies = [ "log", "rustc_version 0.4.0", - "semver 1.0.19", + "semver 1.0.20", "serde", "serde_derive", "solana-frozen-abi", diff --git a/Cargo.toml b/Cargo.toml index 4df3ac478fa11d..993189e1e9d807 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -288,7 +288,7 @@ rustc_version = "0.4" rustls = { version = "0.21.7", default-features = false, features = ["quic"] } rustversion = "1.0.14" scopeguard = "1.2.0" -semver = "1.0.19" +semver = "1.0.20" serde = "1.0.188" serde_bytes = "0.11.12" serde_derive = "1.0.103" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 16d0f6a12f1ea8..0742f193ac8de3 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4128,9 +4128,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" +checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" [[package]] name = "serde" From 6c7d3c8aebb0745a410c2ee04c6c5b6935247555 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 10 Oct 2023 07:02:47 -0700 Subject: [PATCH 296/407] remove redundant page_align (#33608) --- accounts-db/src/accounts_db.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index e82a2edf080526..8bfeb3fb289b85 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -6597,12 +6597,10 @@ impl AccountsDb { ); if !is_dead_slot { - let aligned_total_size = Self::page_align(total_size); // This ensures that all updates are written to an AppendVec, before any // updates to the index happen, so anybody that sees a real entry in the index, // will be able to find the account in storage - let flushed_store = - self.create_and_insert_store(slot, aligned_total_size, "flush_slot_cache"); + let flushed_store = self.create_and_insert_store(slot, total_size, "flush_slot_cache"); // irrelevant - account will already be hashed since it was used in bank hash previously let include_slot_in_hash = IncludeSlotInHash::IrrelevantAssertOnUse; self.store_accounts_frozen( From b7962a3610b9beec2bfe660622a31fc8e34c1cd3 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 10 Oct 2023 07:03:37 -0700 Subject: [PATCH 297/407] stop padding new append vecs to page size (#33607) * stop padding new append vecs to page size * for creating test accounts, allocate larger like we used to --- accounts-db/src/accounts_db.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 8bfeb3fb289b85..d37fcf655d74f5 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -5733,11 +5733,7 @@ impl AccountsDb { .create_store_count .fetch_add(1, Ordering::Relaxed); let path_index = thread_rng().gen_range(0..paths.len()); - let store = Arc::new(self.new_storage_entry( - slot, - Path::new(&paths[path_index]), - Self::page_align(size), - )); + let store = Arc::new(self.new_storage_entry(slot, Path::new(&paths[path_index]), size)); debug!( "creating store: {} slot: {} len: {} size: {} from: {} path: {:?}", @@ -9913,7 +9909,7 @@ pub mod test_utils { // allocate an append vec for this slot that can hold all the test accounts. This prevents us from creating more than 1 append vec for this slot. _ = accounts.accounts_db.create_and_insert_store( slot, - bytes_required as u64, + AccountsDb::page_align(bytes_required as u64), "create_test_accounts", ); } From 1262ff7589d8de5adb6e7640b76544eb3c422ce8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Tue, 10 Oct 2023 16:09:12 +0200 Subject: [PATCH 298/407] Fix - CPI interface `bool` masking (#33623) Adds masking of booleans in CPI interface to disable_cpi_setting_executable_and_rent_epoch. --- programs/bpf_loader/src/syscalls/cpi.rs | 153 +++++++++++++++++++----- 1 file changed, 125 insertions(+), 28 deletions(-) diff --git a/programs/bpf_loader/src/syscalls/cpi.rs b/programs/bpf_loader/src/syscalls/cpi.rs index 0240ca65b0d54b..1509805b9f9cb0 100644 --- a/programs/bpf_loader/src/syscalls/cpi.rs +++ b/programs/bpf_loader/src/syscalls/cpi.rs @@ -106,6 +106,7 @@ impl<'a, 'b> CallerAccount<'a, 'b> { fn from_account_info( invoke_context: &InvokeContext, memory_mapping: &'b MemoryMapping<'a>, + is_disable_cpi_setting_executable_and_rent_epoch_active: bool, _vm_addr: u64, account_info: &AccountInfo, account_metadata: &SerializedAccountMetadata, @@ -257,8 +258,16 @@ impl<'a, 'b> CallerAccount<'a, 'b> { vm_data_addr, ref_to_len_in_vm, serialized_len_ptr, - executable: account_info.executable, - rent_epoch: account_info.rent_epoch, + executable: if is_disable_cpi_setting_executable_and_rent_epoch_active { + false + } else { + account_info.executable + }, + rent_epoch: if is_disable_cpi_setting_executable_and_rent_epoch_active { + 0 + } else { + account_info.rent_epoch + }, }) } @@ -266,6 +275,7 @@ impl<'a, 'b> CallerAccount<'a, 'b> { fn from_sol_account_info( invoke_context: &InvokeContext, memory_mapping: &'b MemoryMapping<'a>, + is_disable_cpi_setting_executable_and_rent_epoch_active: bool, vm_addr: u64, account_info: &SolAccountInfo, account_metadata: &SerializedAccountMetadata, @@ -391,8 +401,16 @@ impl<'a, 'b> CallerAccount<'a, 'b> { vm_data_addr: account_info.data_addr, ref_to_len_in_vm, serialized_len_ptr, - executable: account_info.executable, - rent_epoch: account_info.rent_epoch, + executable: if is_disable_cpi_setting_executable_and_rent_epoch_active { + false + } else { + account_info.executable + }, + rent_epoch: if is_disable_cpi_setting_executable_and_rent_epoch_active { + 0 + } else { + account_info.rent_epoch + }, }) } @@ -475,14 +493,36 @@ impl SyscallInvokeSigned for SyscallInvokeSignedRust { check_instruction_size(ix.accounts.len(), ix.data.len(), invoke_context)?; - let accounts = translate_slice::( + let account_metas = translate_slice::( memory_mapping, ix.accounts.as_ptr() as u64, ix.accounts.len() as u64, invoke_context.get_check_aligned(), invoke_context.get_check_size(), - )? - .to_vec(); + )?; + let accounts = if invoke_context + .feature_set + .is_active(&feature_set::disable_cpi_setting_executable_and_rent_epoch::id()) + { + let mut accounts = Vec::with_capacity(ix.accounts.len()); + #[allow(clippy::needless_range_loop)] + for account_index in 0..ix.accounts.len() { + #[allow(clippy::indexing_slicing)] + let account_meta = &account_metas[account_index]; + if unsafe { + std::ptr::read_volatile(&account_meta.is_signer as *const _ as *const u8) > 1 + || std::ptr::read_volatile( + &account_meta.is_writable as *const _ as *const u8, + ) > 1 + } { + return Err(Box::new(InstructionError::InvalidArgument)); + } + accounts.push(account_meta.clone()); + } + accounts + } else { + account_metas.to_vec() + }; let ix_data_len = ix.data.len() as u64; if invoke_context @@ -695,7 +735,7 @@ impl SyscallInvokeSigned for SyscallInvokeSignedC { ix_c.program_id_addr, invoke_context.get_check_aligned(), )?; - let meta_cs = translate_slice::( + let account_metas = translate_slice::( memory_mapping, ix_c.accounts_addr, ix_c.accounts_len, @@ -724,21 +764,53 @@ impl SyscallInvokeSigned for SyscallInvokeSignedC { invoke_context.get_check_size(), )? .to_vec(); - let accounts = meta_cs - .iter() - .map(|meta_c| { + + let accounts = if invoke_context + .feature_set + .is_active(&feature_set::disable_cpi_setting_executable_and_rent_epoch::id()) + { + let mut accounts = Vec::with_capacity(ix_c.accounts_len as usize); + #[allow(clippy::needless_range_loop)] + for account_index in 0..ix_c.accounts_len as usize { + #[allow(clippy::indexing_slicing)] + let account_meta = &account_metas[account_index]; + if unsafe { + std::ptr::read_volatile(&account_meta.is_signer as *const _ as *const u8) > 1 + || std::ptr::read_volatile( + &account_meta.is_writable as *const _ as *const u8, + ) > 1 + } { + return Err(Box::new(InstructionError::InvalidArgument)); + } let pubkey = translate_type::( memory_mapping, - meta_c.pubkey_addr, + account_meta.pubkey_addr, invoke_context.get_check_aligned(), )?; - Ok(AccountMeta { + accounts.push(AccountMeta { pubkey: *pubkey, - is_signer: meta_c.is_signer, - is_writable: meta_c.is_writable, + is_signer: account_meta.is_signer, + is_writable: account_meta.is_writable, + }); + } + accounts + } else { + account_metas + .iter() + .map(|account_meta| { + let pubkey = translate_type::( + memory_mapping, + account_meta.pubkey_addr, + invoke_context.get_check_aligned(), + )?; + Ok(AccountMeta { + pubkey: *pubkey, + is_signer: account_meta.is_signer, + is_writable: account_meta.is_writable, + }) }) - }) - .collect::, Error>>()?; + .collect::, Error>>()? + }; Ok(StableInstruction { accounts: accounts.into(), @@ -848,17 +920,34 @@ where invoke_context.get_check_size(), )?; check_account_infos(account_infos.len(), invoke_context)?; - let account_info_keys = account_infos - .iter() - .map(|account_info| { - translate_type::( + let account_info_keys = if invoke_context + .feature_set + .is_active(&feature_set::disable_cpi_setting_executable_and_rent_epoch::id()) + { + let mut account_info_keys = Vec::with_capacity(account_infos_len as usize); + #[allow(clippy::needless_range_loop)] + for account_index in 0..account_infos_len as usize { + #[allow(clippy::indexing_slicing)] + let account_info = &account_infos[account_index]; + account_info_keys.push(translate_type::( memory_mapping, key_addr(account_info), invoke_context.get_check_aligned(), - ) - }) - .collect::, Error>>()?; - + )?); + } + account_info_keys + } else { + account_infos + .iter() + .map(|account_info| { + translate_type::( + memory_mapping, + key_addr(account_info), + invoke_context.get_check_aligned(), + ) + }) + .collect::, Error>>()? + }; Ok((account_infos, account_info_keys)) } @@ -879,6 +968,7 @@ where F: Fn( &InvokeContext, &'b MemoryMapping<'a>, + bool, u64, &T, &SerializedAccountMetadata, @@ -887,6 +977,9 @@ where let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let mut accounts = Vec::with_capacity(instruction_accounts.len().saturating_add(1)); + let is_disable_cpi_setting_executable_and_rent_epoch_active = invoke_context + .feature_set + .is_active(&disable_cpi_setting_executable_and_rent_epoch::id()); let program_account_index = program_indices .last() @@ -943,16 +1036,19 @@ where })?; // build the CallerAccount corresponding to this account. + if caller_account_index >= account_infos.len() { + return Err(Box::new(SyscallError::InvalidLength)); + } + #[allow(clippy::indexing_slicing)] let caller_account = do_translate( invoke_context, memory_mapping, + is_disable_cpi_setting_executable_and_rent_epoch_active, account_infos_addr.saturating_add( caller_account_index.saturating_mul(mem::size_of::()) as u64, ), - account_infos - .get(caller_account_index) - .ok_or(SyscallError::InvalidLength)?, + &account_infos[caller_account_index], serialized_metadata, )?; @@ -1822,6 +1918,7 @@ mod tests { let caller_account = CallerAccount::from_account_info( &invoke_context, &memory_mapping, + false, vm_addr, account_info, &account_metadata, From dc522012c8bae1ff3bda75b8942c8d0699692c24 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Oct 2023 14:42:04 +0000 Subject: [PATCH 299/407] build(deps): bump proptest from 1.2.0 to 1.3.1 (#33621) Bumps [proptest](https://github.com/proptest-rs/proptest) from 1.2.0 to 1.3.1. - [Release notes](https://github.com/proptest-rs/proptest/releases) - [Changelog](https://github.com/proptest-rs/proptest/blob/master/CHANGELOG.md) - [Commits](https://github.com/proptest-rs/proptest/compare/v1.2.0...v1.3.1) --- updated-dependencies: - dependency-name: proptest dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 14 +++++++------- Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 21d20a3516ef6f..66fb0306840620 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4066,19 +4066,19 @@ dependencies = [ [[package]] name = "proptest" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e35c06b98bf36aba164cc17cb25f7e232f5c4aeea73baa14b8a9f0d92dbfa65" +checksum = "7c003ac8c77cb07bb74f5f198bce836a689bcd5a42574612bf14d17bfd08c20e" dependencies = [ "bit-set", - "bitflags 1.3.2", - "byteorder", + "bit-vec", + "bitflags 2.3.3", "lazy_static", "num-traits", "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", - "regex-syntax 0.6.29", + "regex-syntax 0.7.5", "rusty-fork", "tempfile", "unarray", @@ -4480,9 +4480,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.29" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "regex-syntax" diff --git a/Cargo.toml b/Cargo.toml index 993189e1e9d807..703c61fddaaf32 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -263,7 +263,7 @@ pkcs8 = "0.8.0" predicates = "2.1" pretty-hex = "0.3.0" proc-macro2 = "1.0.69" -proptest = "1.2" +proptest = "1.3" prost = "0.11.9" prost-build = "0.11.9" prost-types = "0.11.9" From cb695c7b32b9064736a1d675df561d7b208259f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Tue, 10 Oct 2023 17:26:17 +0200 Subject: [PATCH 300/407] Cleanup - Feature Gate of `enable_early_verification_of_account_modifications` (#33579) Cleans up the feature gate of enable_early_verification_of_account_modifications: - Removes PreAccount - Removes InvokeContext::pre_accounts and InvokeContext::rent - Removes InvokeContext::verify() and InvokeContext::verify_and_update() - Removes TransactionContext::is_early_verification_of_account_modifications_enabled() - Removes TransactionAccounts::is_early_verification_of_account_modifications_enabled - No longer optional: TransactionContext::rent --- accounts-db/src/transaction_results.rs | 7 +- program-runtime/benches/pre_account.rs | 101 --- program-runtime/src/invoke_context.rs | 291 +-------- program-runtime/src/lib.rs | 1 - program-runtime/src/message_processor.rs | 20 +- program-runtime/src/pre_account.rs | 610 ------------------- programs/bpf_loader/benches/serialization.rs | 2 +- programs/bpf_loader/src/syscalls/cpi.rs | 11 +- programs/bpf_loader/src/syscalls/mod.rs | 5 +- programs/vote/src/vote_state/mod.rs | 14 +- runtime/src/bank.rs | 31 +- runtime/src/bank/tests.rs | 6 +- sdk/src/transaction_context.rs | 211 +++---- 13 files changed, 122 insertions(+), 1188 deletions(-) delete mode 100644 program-runtime/benches/pre_account.rs delete mode 100644 program-runtime/src/pre_account.rs diff --git a/accounts-db/src/transaction_results.rs b/accounts-db/src/transaction_results.rs index f7228dfff5db88..7a6401d62d7a04 100644 --- a/accounts-db/src/transaction_results.rs +++ b/accounts-db/src/transaction_results.rs @@ -176,13 +176,16 @@ pub fn inner_instructions_list_from_instruction_trace( #[cfg(test)] mod tests { - use {super::*, solana_sdk::transaction_context::TransactionContext}; + use { + super::*, + solana_sdk::{sysvar::rent::Rent, transaction_context::TransactionContext}, + }; #[test] fn test_inner_instructions_list_from_instruction_trace() { let instruction_trace = [1, 2, 1, 1, 2, 3, 2]; let mut transaction_context = - TransactionContext::new(vec![], None, 3, instruction_trace.len()); + TransactionContext::new(vec![], Rent::default(), 3, instruction_trace.len()); for (index_in_trace, stack_height) in instruction_trace.into_iter().enumerate() { while stack_height <= transaction_context.get_instruction_context_stack_height() { transaction_context.pop().unwrap(); diff --git a/program-runtime/benches/pre_account.rs b/program-runtime/benches/pre_account.rs deleted file mode 100644 index b5fdc5cb945e3e..00000000000000 --- a/program-runtime/benches/pre_account.rs +++ /dev/null @@ -1,101 +0,0 @@ -#![feature(test)] - -extern crate test; - -use { - log::*, - solana_program_runtime::{pre_account::PreAccount, timings::ExecuteDetailsTimings}, - solana_sdk::{account::AccountSharedData, pubkey, rent::Rent}, - test::Bencher, -}; - -#[bench] -fn bench_verify_account_changes_data(bencher: &mut Bencher) { - solana_logger::setup(); - - let owner = pubkey::new_rand(); - let non_owner = pubkey::new_rand(); - let pre = PreAccount::new( - &pubkey::new_rand(), - AccountSharedData::new(0, BUFSIZE, &owner), - ); - let post = AccountSharedData::new(0, BUFSIZE, &owner); - assert_eq!( - pre.verify( - &owner, - false, - &Rent::default(), - &post, - &mut ExecuteDetailsTimings::default(), - false, - ), - Ok(()) - ); - - // this one should be faster - bencher.iter(|| { - pre.verify( - &owner, - false, - &Rent::default(), - &post, - &mut ExecuteDetailsTimings::default(), - false, - ) - .unwrap(); - }); - let summary = bencher.bench(|_bencher| Ok(())).unwrap().unwrap(); - info!("data no change by owner: {} ns/iter", summary.median); - - let pre_data = vec![BUFSIZE]; - let post_data = vec![BUFSIZE]; - bencher.iter(|| pre_data == post_data); - let summary = bencher.bench(|_bencher| Ok(())).unwrap().unwrap(); - info!("data compare {} ns/iter", summary.median); - - let pre = PreAccount::new( - &pubkey::new_rand(), - AccountSharedData::new(0, BUFSIZE, &owner), - ); - bencher.iter(|| { - pre.verify( - &non_owner, - false, - &Rent::default(), - &post, - &mut ExecuteDetailsTimings::default(), - false, - ) - .unwrap(); - }); - let summary = bencher.bench(|_bencher| Ok(())).unwrap().unwrap(); - info!("data no change by non owner: {} ns/iter", summary.median); -} - -const BUFSIZE: usize = 1024 * 1024 + 127; -static BUF0: [u8; BUFSIZE] = [0; BUFSIZE]; -static BUF1: [u8; BUFSIZE] = [1; BUFSIZE]; - -#[bench] -fn bench_is_zeroed(bencher: &mut Bencher) { - bencher.iter(|| { - PreAccount::is_zeroed(&BUF0); - }); -} - -#[bench] -fn bench_is_zeroed_not(bencher: &mut Bencher) { - bencher.iter(|| { - PreAccount::is_zeroed(&BUF1); - }); -} - -#[bench] -fn bench_is_zeroed_by_iter(bencher: &mut Bencher) { - bencher.iter(|| BUF0.iter().all(|item| *item == 0)); -} - -#[bench] -fn bench_is_zeroed_not_by_iter(bencher: &mut Bencher) { - bencher.iter(|| BUF1.iter().all(|item| *item == 0)); -} diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 12f82300d78521..abe49ccd84b270 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -2,10 +2,9 @@ use { crate::{ accounts_data_meter::AccountsDataMeter, compute_budget::ComputeBudget, - ic_logger_msg, ic_msg, + ic_msg, loaded_programs::{LoadedProgram, LoadedProgramType, LoadedProgramsForTxBatch}, log_collector::LogCollector, - pre_account::PreAccount, stable_log, sysvar_cache::SysvarCache, timings::{ExecuteDetailsTimings, ExecuteTimings}, @@ -18,17 +17,13 @@ use { vm::{BuiltinFunction, Config, ContextObject, ProgramResult}, }, solana_sdk::{ - account::{AccountSharedData, ReadableAccount}, + account::AccountSharedData, bpf_loader_deprecated, - feature_set::{ - check_slice_translation_size, enable_early_verification_of_account_modifications, - native_programs_consume_cu, FeatureSet, - }, + feature_set::{check_slice_translation_size, native_programs_consume_cu, FeatureSet}, hash::Hash, instruction::{AccountMeta, InstructionError}, native_loader, pubkey::Pubkey, - rent::Rent, saturating_add_assign, stable_layout::stable_instruction::StableInstruction, transaction_context::{ @@ -161,8 +156,6 @@ pub struct SerializedAccountMetadata { pub struct InvokeContext<'a> { pub transaction_context: &'a mut TransactionContext, - rent: Rent, - pre_accounts: Vec, sysvar_cache: &'a SysvarCache, log_collector: Option>>, compute_budget: ComputeBudget, @@ -184,7 +177,6 @@ impl<'a> InvokeContext<'a> { #[allow(clippy::too_many_arguments)] pub fn new( transaction_context: &'a mut TransactionContext, - rent: Rent, sysvar_cache: &'a SysvarCache, log_collector: Option>>, compute_budget: ComputeBudget, @@ -198,8 +190,6 @@ impl<'a> InvokeContext<'a> { ) -> Self { Self { transaction_context, - rent, - pre_accounts: Vec::new(), sysvar_cache, log_collector, current_compute_budget: compute_budget, @@ -242,42 +232,6 @@ impl<'a> InvokeContext<'a> { == 0 { self.current_compute_budget = self.compute_budget; - - if !self - .feature_set - .is_active(&enable_early_verification_of_account_modifications::id()) - { - self.pre_accounts = Vec::with_capacity( - instruction_context.get_number_of_instruction_accounts() as usize, - ); - for instruction_account_index in - 0..instruction_context.get_number_of_instruction_accounts() - { - if instruction_context - .is_instruction_account_duplicate(instruction_account_index)? - .is_some() - { - continue; // Skip duplicate account - } - let index_in_transaction = instruction_context - .get_index_of_instruction_account_in_transaction( - instruction_account_index, - )?; - if index_in_transaction >= self.transaction_context.get_number_of_accounts() { - return Err(InstructionError::MissingAccount); - } - let account = self - .transaction_context - .get_account_at_index(index_in_transaction)? - .borrow() - .clone(); - self.pre_accounts.push(PreAccount::new( - self.transaction_context - .get_key_of_account_at_index(index_in_transaction)?, - account, - )); - } - } } else { let contains = (0..self .transaction_context @@ -325,189 +279,6 @@ impl<'a> InvokeContext<'a> { .get_instruction_context_stack_height() } - /// Verify the results of an instruction - /// - /// Note: `instruction_accounts` must be the same as passed to `InvokeContext::push()`, - /// so that they match the order of `pre_accounts`. - fn verify( - &mut self, - instruction_accounts: &[InstructionAccount], - program_indices: &[IndexOfAccount], - ) -> Result<(), InstructionError> { - let instruction_context = self - .transaction_context - .get_current_instruction_context() - .map_err(|_| InstructionError::CallDepth)?; - let program_id = instruction_context - .get_last_program_key(self.transaction_context) - .map_err(|_| InstructionError::CallDepth)?; - - // Verify all executable accounts have zero outstanding refs - for account_index in program_indices.iter() { - self.transaction_context - .get_account_at_index(*account_index)? - .try_borrow_mut() - .map_err(|_| InstructionError::AccountBorrowOutstanding)?; - } - - // Verify the per-account instruction results - let (mut pre_sum, mut post_sum) = (0_u128, 0_u128); - let mut pre_account_index = 0; - for (instruction_account_index, instruction_account) in - instruction_accounts.iter().enumerate() - { - if instruction_account_index as IndexOfAccount != instruction_account.index_in_callee { - continue; // Skip duplicate account - } - { - // Verify account has no outstanding references - let _ = self - .transaction_context - .get_account_at_index(instruction_account.index_in_transaction)? - .try_borrow_mut() - .map_err(|_| InstructionError::AccountBorrowOutstanding)?; - } - let pre_account = &self - .pre_accounts - .get(pre_account_index) - .ok_or(InstructionError::NotEnoughAccountKeys)?; - pre_account_index = pre_account_index.saturating_add(1); - let account = self - .transaction_context - .get_account_at_index(instruction_account.index_in_transaction)? - .borrow(); - pre_account - .verify( - program_id, - instruction_account.is_writable, - &self.rent, - &account, - &mut self.timings, - true, - ) - .map_err(|err| { - ic_logger_msg!( - self.log_collector, - "failed to verify account {}: {}", - pre_account.key(), - err - ); - err - })?; - pre_sum = pre_sum - .checked_add(u128::from(pre_account.lamports())) - .ok_or(InstructionError::UnbalancedInstruction)?; - post_sum = post_sum - .checked_add(u128::from(account.lamports())) - .ok_or(InstructionError::UnbalancedInstruction)?; - - let pre_data_len = pre_account.data().len() as i64; - let post_data_len = account.data().len() as i64; - let data_len_delta = post_data_len.saturating_sub(pre_data_len); - self.accounts_data_meter - .adjust_delta_unchecked(data_len_delta); - } - - // Verify that the total sum of all the lamports did not change - if pre_sum != post_sum { - return Err(InstructionError::UnbalancedInstruction); - } - Ok(()) - } - - /// Verify and update PreAccount state based on program execution - /// - /// Note: `instruction_accounts` must be the same as passed to `InvokeContext::push()`, - /// so that they match the order of `pre_accounts`. - fn verify_and_update( - &mut self, - instruction_accounts: &[InstructionAccount], - before_instruction_context_push: bool, - ) -> Result<(), InstructionError> { - let transaction_context = &self.transaction_context; - let instruction_context = transaction_context.get_current_instruction_context()?; - let program_id = instruction_context - .get_last_program_key(transaction_context) - .map_err(|_| InstructionError::CallDepth)?; - - // Verify the per-account instruction results - let (mut pre_sum, mut post_sum) = (0_u128, 0_u128); - for (instruction_account_index, instruction_account) in - instruction_accounts.iter().enumerate() - { - if instruction_account_index as IndexOfAccount != instruction_account.index_in_callee { - continue; // Skip duplicate account - } - if instruction_account.index_in_transaction - < transaction_context.get_number_of_accounts() - { - let key = transaction_context - .get_key_of_account_at_index(instruction_account.index_in_transaction)?; - let account = transaction_context - .get_account_at_index(instruction_account.index_in_transaction)?; - let is_writable = if before_instruction_context_push { - instruction_context - .is_instruction_account_writable(instruction_account.index_in_caller)? - } else { - instruction_account.is_writable - }; - // Find the matching PreAccount - for pre_account in self.pre_accounts.iter_mut() { - if key == pre_account.key() { - { - // Verify account has no outstanding references - let _ = account - .try_borrow_mut() - .map_err(|_| InstructionError::AccountBorrowOutstanding)?; - } - let account = account.borrow(); - pre_account - .verify( - program_id, - is_writable, - &self.rent, - &account, - &mut self.timings, - false, - ) - .map_err(|err| { - ic_logger_msg!( - self.log_collector, - "failed to verify account {}: {}", - key, - err - ); - err - })?; - pre_sum = pre_sum - .checked_add(u128::from(pre_account.lamports())) - .ok_or(InstructionError::UnbalancedInstruction)?; - post_sum = post_sum - .checked_add(u128::from(account.lamports())) - .ok_or(InstructionError::UnbalancedInstruction)?; - if is_writable && !pre_account.executable() { - pre_account.update(account.clone()); - } - - let pre_data_len = pre_account.data().len() as i64; - let post_data_len = account.data().len() as i64; - let data_len_delta = post_data_len.saturating_sub(pre_data_len); - self.accounts_data_meter - .adjust_delta_unchecked(data_len_delta); - - break; - } - } - } - } - - // Verify that the total sum of all the lamports did not change - if pre_sum != post_sum { - return Err(InstructionError::UnbalancedInstruction); - } - Ok(()) - } - /// Entrypoint for a cross-program invocation from a builtin program pub fn native_invoke( &mut self, @@ -660,60 +431,11 @@ impl<'a> InvokeContext<'a> { timings: &mut ExecuteTimings, ) -> Result<(), InstructionError> { *compute_units_consumed = 0; - - let nesting_level = self - .transaction_context - .get_instruction_context_stack_height(); - let is_top_level_instruction = nesting_level == 0; - if !is_top_level_instruction - && !self - .feature_set - .is_active(&enable_early_verification_of_account_modifications::id()) - { - // Verify the calling program hasn't misbehaved - let mut verify_caller_time = Measure::start("verify_caller_time"); - let verify_caller_result = self.verify_and_update(instruction_accounts, true); - verify_caller_time.stop(); - saturating_add_assign!( - timings - .execute_accessories - .process_instructions - .verify_caller_us, - verify_caller_time.as_us() - ); - verify_caller_result?; - } - self.transaction_context .get_next_instruction_context()? .configure(program_indices, instruction_accounts, instruction_data); self.push()?; self.process_executable_chain(compute_units_consumed, timings) - .and_then(|_| { - if self - .feature_set - .is_active(&enable_early_verification_of_account_modifications::id()) - { - Ok(()) - } else { - // Verify the called program has not misbehaved - let mut verify_callee_time = Measure::start("verify_callee_time"); - let result = if is_top_level_instruction { - self.verify(instruction_accounts, program_indices) - } else { - self.verify_and_update(instruction_accounts, false) - }; - verify_callee_time.stop(); - saturating_add_assign!( - timings - .execute_accessories - .process_instructions - .verify_callee_us, - verify_callee_time.as_us() - ); - result - } - }) // MUST pop if and only if `push` succeeded, independent of `result`. // Thus, the `.and()` instead of an `.and_then()`. .and(self.pop()) @@ -929,7 +651,7 @@ macro_rules! with_mock_invoke_context { let compute_budget = ComputeBudget::default(); let mut $transaction_context = TransactionContext::new( $transaction_accounts, - Some(Rent::default()), + Rent::default(), compute_budget.max_invoke_stack_height, compute_budget.max_instruction_trace_length, ); @@ -957,7 +679,6 @@ macro_rules! with_mock_invoke_context { let mut programs_updated_only_for_global_cache = LoadedProgramsForTxBatch::default(); let mut $invoke_context = InvokeContext::new( &mut $transaction_context, - Rent::default(), &sysvar_cache, Some(LogCollector::new_ref()), compute_budget, @@ -1038,7 +759,7 @@ mod tests { super::*, crate::compute_budget, serde::{Deserialize, Serialize}, - solana_sdk::{account::WritableAccount, instruction::Instruction}, + solana_sdk::{account::WritableAccount, instruction::Instruction, rent::Rent}, }; #[derive(Debug, Serialize, Deserialize)] @@ -1223,7 +944,7 @@ mod tests { fn test_max_instruction_trace_length() { const MAX_INSTRUCTIONS: usize = 8; let mut transaction_context = - TransactionContext::new(Vec::new(), Some(Rent::default()), 1, MAX_INSTRUCTIONS); + TransactionContext::new(Vec::new(), Rent::default(), 1, MAX_INSTRUCTIONS); for _ in 0..MAX_INSTRUCTIONS { transaction_context.push().unwrap(); transaction_context.pop().unwrap(); diff --git a/program-runtime/src/lib.rs b/program-runtime/src/lib.rs index d43d9a5b35fafc..c79505495f3e73 100644 --- a/program-runtime/src/lib.rs +++ b/program-runtime/src/lib.rs @@ -16,7 +16,6 @@ pub mod invoke_context; pub mod loaded_programs; pub mod log_collector; pub mod message_processor; -pub mod pre_account; pub mod prioritization_fee; pub mod stable_log; pub mod sysvar_cache; diff --git a/program-runtime/src/message_processor.rs b/program-runtime/src/message_processor.rs index 80bfaf16e974bc..6b3727e6a8ce74 100644 --- a/program-runtime/src/message_processor.rs +++ b/program-runtime/src/message_processor.rs @@ -15,7 +15,6 @@ use { hash::Hash, message::SanitizedMessage, precompiles::is_precompile, - rent::Rent, saturating_add_assign, sysvar::instructions, transaction::TransactionError, @@ -54,7 +53,6 @@ impl MessageProcessor { message: &SanitizedMessage, program_indices: &[Vec], transaction_context: &mut TransactionContext, - rent: Rent, log_collector: Option>>, programs_loaded_for_tx_batch: &LoadedProgramsForTxBatch, programs_modified_by_tx: &mut LoadedProgramsForTxBatch, @@ -70,7 +68,6 @@ impl MessageProcessor { ) -> Result { let mut invoke_context = InvokeContext::new( transaction_context, - rent, sysvar_cache, log_collector, compute_budget, @@ -199,6 +196,7 @@ mod tests { message::{AccountKeys, LegacyMessage, Message}, native_loader::{self, create_loadable_account_for_test}, pubkey::Pubkey, + rent::Rent, secp256k1_instruction::new_secp256k1_instruction, secp256k1_program, }, @@ -268,8 +266,7 @@ mod tests { create_loadable_account_for_test("mock_system_program"), ), ]; - let mut transaction_context = - TransactionContext::new(accounts, Some(Rent::default()), 1, 3); + let mut transaction_context = TransactionContext::new(accounts, Rent::default(), 1, 3); let program_indices = vec![vec![2]]; let mut programs_loaded_for_tx_batch = LoadedProgramsForTxBatch::default(); programs_loaded_for_tx_batch.replenish( @@ -310,7 +307,6 @@ mod tests { &message, &program_indices, &mut transaction_context, - Rent::default(), None, &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, @@ -363,7 +359,6 @@ mod tests { &message, &program_indices, &mut transaction_context, - Rent::default(), None, &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, @@ -406,7 +401,6 @@ mod tests { &message, &program_indices, &mut transaction_context, - Rent::default(), None, &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, @@ -501,8 +495,7 @@ mod tests { create_loadable_account_for_test("mock_system_program"), ), ]; - let mut transaction_context = - TransactionContext::new(accounts, Some(Rent::default()), 1, 3); + let mut transaction_context = TransactionContext::new(accounts, Rent::default(), 1, 3); let program_indices = vec![vec![2]]; let mut programs_loaded_for_tx_batch = LoadedProgramsForTxBatch::default(); programs_loaded_for_tx_batch.replenish( @@ -540,7 +533,6 @@ mod tests { &message, &program_indices, &mut transaction_context, - Rent::default(), None, &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, @@ -577,7 +569,6 @@ mod tests { &message, &program_indices, &mut transaction_context, - Rent::default(), None, &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, @@ -611,7 +602,6 @@ mod tests { &message, &program_indices, &mut transaction_context, - Rent::default(), None, &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, @@ -667,8 +657,7 @@ mod tests { (secp256k1_program::id(), secp256k1_account), (mock_program_id, mock_program_account), ]; - let mut transaction_context = - TransactionContext::new(accounts, Some(Rent::default()), 1, 2); + let mut transaction_context = TransactionContext::new(accounts, Rent::default(), 1, 2); // Since libsecp256k1 is still using the old version of rand, this test // copies the `random` implementation at: @@ -703,7 +692,6 @@ mod tests { &message, &[vec![0], vec![1]], &mut transaction_context, - Rent::default(), None, &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, diff --git a/program-runtime/src/pre_account.rs b/program-runtime/src/pre_account.rs deleted file mode 100644 index 2ca91ba0904b7d..00000000000000 --- a/program-runtime/src/pre_account.rs +++ /dev/null @@ -1,610 +0,0 @@ -use { - crate::timings::ExecuteDetailsTimings, - solana_sdk::{ - account::{AccountSharedData, ReadableAccount, WritableAccount}, - instruction::InstructionError, - pubkey::Pubkey, - rent::Rent, - system_instruction::MAX_PERMITTED_DATA_LENGTH, - }, - std::fmt::Debug, -}; - -// The relevant state of an account before an Instruction executes, used -// to verify account integrity after the Instruction completes -#[derive(Clone, Debug, Default)] -pub struct PreAccount { - key: Pubkey, - account: AccountSharedData, - changed: bool, -} -impl PreAccount { - pub fn new(key: &Pubkey, account: AccountSharedData) -> Self { - Self { - key: *key, - account, - changed: false, - } - } - - pub fn verify( - &self, - program_id: &Pubkey, - is_writable: bool, - rent: &Rent, - post: &AccountSharedData, - timings: &mut ExecuteDetailsTimings, - outermost_call: bool, - ) -> Result<(), InstructionError> { - let pre = &self.account; - - // Only the owner of the account may change owner and - // only if the account is writable and - // only if the account is not executable and - // only if the data is zero-initialized or empty - let owner_changed = pre.owner() != post.owner(); - if owner_changed - && (!is_writable // line coverage used to get branch coverage - || pre.executable() - || program_id != pre.owner() - || !Self::is_zeroed(post.data())) - { - return Err(InstructionError::ModifiedProgramId); - } - - // An account not assigned to the program cannot have its balance decrease. - if program_id != pre.owner() // line coverage used to get branch coverage - && pre.lamports() > post.lamports() - { - return Err(InstructionError::ExternalAccountLamportSpend); - } - - // The balance of read-only and executable accounts may not change - let lamports_changed = pre.lamports() != post.lamports(); - if lamports_changed { - if !is_writable { - return Err(InstructionError::ReadonlyLamportChange); - } - if pre.executable() { - return Err(InstructionError::ExecutableLamportChange); - } - } - - // Account data size cannot exceed a maxumum length - if post.data().len() > MAX_PERMITTED_DATA_LENGTH as usize { - return Err(InstructionError::InvalidRealloc); - } - - // The owner of the account can change the size of the data - let data_len_changed = pre.data().len() != post.data().len(); - if data_len_changed && program_id != pre.owner() { - return Err(InstructionError::AccountDataSizeChanged); - } - - // Only the owner may change account data - // and if the account is writable - // and if the account is not executable - if !(program_id == pre.owner() - && is_writable // line coverage used to get branch coverage - && !pre.executable()) - && pre.data() != post.data() - { - if pre.executable() { - return Err(InstructionError::ExecutableDataModified); - } else if is_writable { - return Err(InstructionError::ExternalAccountDataModified); - } else { - return Err(InstructionError::ReadonlyDataModified); - } - } - - // executable is one-way (false->true) and only the account owner may set it. - let executable_changed = pre.executable() != post.executable(); - if executable_changed { - if !rent.is_exempt(post.lamports(), post.data().len()) { - return Err(InstructionError::ExecutableAccountNotRentExempt); - } - if !is_writable // line coverage used to get branch coverage - || pre.executable() - || program_id != post.owner() - { - return Err(InstructionError::ExecutableModified); - } - } - - // No one modifies rent_epoch (yet). - let rent_epoch_changed = pre.rent_epoch() != post.rent_epoch(); - if rent_epoch_changed { - return Err(InstructionError::RentEpochModified); - } - - if outermost_call { - timings.total_account_count = timings.total_account_count.saturating_add(1); - if owner_changed - || lamports_changed - || data_len_changed - || executable_changed - || rent_epoch_changed - || self.changed - { - timings.changed_account_count = timings.changed_account_count.saturating_add(1); - } - } - - Ok(()) - } - - pub fn update(&mut self, account: AccountSharedData) { - let rent_epoch = self.account.rent_epoch(); - self.account = account; - self.account.set_rent_epoch(rent_epoch); - - self.changed = true; - } - - pub fn key(&self) -> &Pubkey { - &self.key - } - - pub fn data(&self) -> &[u8] { - self.account.data() - } - - pub fn lamports(&self) -> u64 { - self.account.lamports() - } - - pub fn executable(&self) -> bool { - self.account.executable() - } - - pub fn is_zeroed(buf: &[u8]) -> bool { - const ZEROS_LEN: usize = 1024; - static ZEROS: [u8; ZEROS_LEN] = [0; ZEROS_LEN]; - let mut chunks = buf.chunks_exact(ZEROS_LEN); - - #[allow(clippy::indexing_slicing)] - { - chunks.all(|chunk| chunk == &ZEROS[..]) - && chunks.remainder() == &ZEROS[..chunks.remainder().len()] - } - } -} - -#[cfg(test)] -mod tests { - use { - super::*, - solana_sdk::{account::Account, instruction::InstructionError, system_program}, - }; - - #[test] - fn test_is_zeroed() { - const ZEROS_LEN: usize = 1024; - let mut buf = [0; ZEROS_LEN]; - assert!(PreAccount::is_zeroed(&buf)); - buf[0] = 1; - assert!(!PreAccount::is_zeroed(&buf)); - - let mut buf = [0; ZEROS_LEN - 1]; - assert!(PreAccount::is_zeroed(&buf)); - buf[0] = 1; - assert!(!PreAccount::is_zeroed(&buf)); - - let mut buf = [0; ZEROS_LEN + 1]; - assert!(PreAccount::is_zeroed(&buf)); - buf[0] = 1; - assert!(!PreAccount::is_zeroed(&buf)); - - let buf = vec![]; - assert!(PreAccount::is_zeroed(&buf)); - } - - struct Change { - program_id: Pubkey, - is_writable: bool, - rent: Rent, - pre: PreAccount, - post: AccountSharedData, - } - impl Change { - pub fn new(owner: &Pubkey, program_id: &Pubkey) -> Self { - Self { - program_id: *program_id, - rent: Rent::default(), - is_writable: true, - pre: PreAccount::new( - &solana_sdk::pubkey::new_rand(), - AccountSharedData::from(Account { - owner: *owner, - lamports: std::u64::MAX, - ..Account::default() - }), - ), - post: AccountSharedData::from(Account { - owner: *owner, - lamports: std::u64::MAX, - ..Account::default() - }), - } - } - pub fn read_only(mut self) -> Self { - self.is_writable = false; - self - } - pub fn executable(mut self, pre: bool, post: bool) -> Self { - self.pre.account.set_executable(pre); - self.post.set_executable(post); - self - } - pub fn lamports(mut self, pre: u64, post: u64) -> Self { - self.pre.account.set_lamports(pre); - self.post.set_lamports(post); - self - } - pub fn owner(mut self, post: &Pubkey) -> Self { - self.post.set_owner(*post); - self - } - pub fn data(mut self, pre: Vec, post: Vec) -> Self { - self.pre.account.set_data(pre); - self.post.set_data(post); - self - } - pub fn rent_epoch(mut self, pre: u64, post: u64) -> Self { - self.pre.account.set_rent_epoch(pre); - self.post.set_rent_epoch(post); - self - } - pub fn verify(&self) -> Result<(), InstructionError> { - self.pre.verify( - &self.program_id, - self.is_writable, - &self.rent, - &self.post, - &mut ExecuteDetailsTimings::default(), - false, - ) - } - } - - #[test] - fn test_verify_account_changes_owner() { - let system_program_id = system_program::id(); - let alice_program_id = solana_sdk::pubkey::new_rand(); - let mallory_program_id = solana_sdk::pubkey::new_rand(); - - assert_eq!( - Change::new(&system_program_id, &system_program_id) - .owner(&alice_program_id) - .verify(), - Ok(()), - "system program should be able to change the account owner" - ); - assert_eq!( - Change::new(&system_program_id, &system_program_id) - .owner(&alice_program_id) - .read_only() - .verify(), - Err(InstructionError::ModifiedProgramId), - "system program should not be able to change the account owner of a read-only account" - ); - assert_eq!( - Change::new(&mallory_program_id, &system_program_id) - .owner(&alice_program_id) - .verify(), - Err(InstructionError::ModifiedProgramId), - "system program should not be able to change the account owner of a non-system account" - ); - assert_eq!( - Change::new(&mallory_program_id, &mallory_program_id) - .owner(&alice_program_id) - .verify(), - Ok(()), - "mallory should be able to change the account owner, if she leaves clear data" - ); - assert_eq!( - Change::new(&mallory_program_id, &mallory_program_id) - .owner(&alice_program_id) - .data(vec![42], vec![0]) - .verify(), - Ok(()), - "mallory should be able to change the account owner, if she leaves clear data" - ); - assert_eq!( - Change::new(&mallory_program_id, &mallory_program_id) - .owner(&alice_program_id) - .executable(true, true) - .data(vec![42], vec![0]) - .verify(), - Err(InstructionError::ModifiedProgramId), - "mallory should not be able to change the account owner, if the account executable" - ); - assert_eq!( - Change::new(&mallory_program_id, &mallory_program_id) - .owner(&alice_program_id) - .data(vec![42], vec![42]) - .verify(), - Err(InstructionError::ModifiedProgramId), - "mallory should not be able to inject data into the alice program" - ); - } - - #[test] - fn test_verify_account_changes_executable() { - let owner = solana_sdk::pubkey::new_rand(); - let mallory_program_id = solana_sdk::pubkey::new_rand(); - let system_program_id = system_program::id(); - - assert_eq!( - Change::new(&owner, &system_program_id) - .executable(false, true) - .verify(), - Err(InstructionError::ExecutableModified), - "system program can't change executable if system doesn't own the account" - ); - assert_eq!( - Change::new(&owner, &system_program_id) - .executable(true, true) - .data(vec![1], vec![2]) - .verify(), - Err(InstructionError::ExecutableDataModified), - "system program can't change executable data if system doesn't own the account" - ); - assert_eq!( - Change::new(&owner, &owner).executable(false, true).verify(), - Ok(()), - "owner should be able to change executable" - ); - assert_eq!( - Change::new(&owner, &owner) - .executable(false, true) - .read_only() - .verify(), - Err(InstructionError::ExecutableModified), - "owner can't modify executable of read-only accounts" - ); - assert_eq!( - Change::new(&owner, &owner).executable(true, false).verify(), - Err(InstructionError::ExecutableModified), - "owner program can't reverse executable" - ); - assert_eq!( - Change::new(&owner, &mallory_program_id) - .executable(false, true) - .verify(), - Err(InstructionError::ExecutableModified), - "malicious Mallory should not be able to change the account executable" - ); - assert_eq!( - Change::new(&owner, &owner) - .executable(false, true) - .data(vec![1], vec![2]) - .verify(), - Ok(()), - "account data can change in the same instruction that sets the bit" - ); - assert_eq!( - Change::new(&owner, &owner) - .executable(true, true) - .data(vec![1], vec![2]) - .verify(), - Err(InstructionError::ExecutableDataModified), - "owner should not be able to change an account's data once its marked executable" - ); - assert_eq!( - Change::new(&owner, &owner) - .executable(true, true) - .lamports(1, 2) - .verify(), - Err(InstructionError::ExecutableLamportChange), - "owner should not be able to add lamports once marked executable" - ); - assert_eq!( - Change::new(&owner, &owner) - .executable(true, true) - .lamports(1, 2) - .verify(), - Err(InstructionError::ExecutableLamportChange), - "owner should not be able to add lamports once marked executable" - ); - assert_eq!( - Change::new(&owner, &owner) - .executable(true, true) - .lamports(2, 1) - .verify(), - Err(InstructionError::ExecutableLamportChange), - "owner should not be able to subtract lamports once marked executable" - ); - let data = vec![1; 100]; - let min_lamports = Rent::default().minimum_balance(data.len()); - assert_eq!( - Change::new(&owner, &owner) - .executable(false, true) - .lamports(0, min_lamports) - .data(data.clone(), data.clone()) - .verify(), - Ok(()), - ); - assert_eq!( - Change::new(&owner, &owner) - .executable(false, true) - .lamports(0, min_lamports - 1) - .data(data.clone(), data) - .verify(), - Err(InstructionError::ExecutableAccountNotRentExempt), - "owner should not be able to change an account's data once its marked executable" - ); - } - - #[test] - fn test_verify_account_changes_data_len() { - let alice_program_id = solana_sdk::pubkey::new_rand(); - - assert_eq!( - Change::new(&system_program::id(), &system_program::id()) - .data(vec![0], vec![0, 0]) - .verify(), - Ok(()), - "system program should be able to change the data len" - ); - assert_eq!( - Change::new(&alice_program_id, &system_program::id()) - .data(vec![0], vec![0,0]) - .verify(), - Err(InstructionError::AccountDataSizeChanged), - "system program should not be able to change the data length of accounts it does not own" - ); - } - - #[test] - fn test_verify_account_changes_data() { - let alice_program_id = solana_sdk::pubkey::new_rand(); - let mallory_program_id = solana_sdk::pubkey::new_rand(); - - assert_eq!( - Change::new(&alice_program_id, &alice_program_id) - .data(vec![0], vec![42]) - .verify(), - Ok(()), - "alice program should be able to change the data" - ); - assert_eq!( - Change::new(&mallory_program_id, &alice_program_id) - .data(vec![0], vec![42]) - .verify(), - Err(InstructionError::ExternalAccountDataModified), - "non-owner mallory should not be able to change the account data" - ); - assert_eq!( - Change::new(&alice_program_id, &alice_program_id) - .data(vec![0], vec![42]) - .read_only() - .verify(), - Err(InstructionError::ReadonlyDataModified), - "alice isn't allowed to touch a CO account" - ); - } - - #[test] - fn test_verify_account_changes_rent_epoch() { - let alice_program_id = solana_sdk::pubkey::new_rand(); - - assert_eq!( - Change::new(&alice_program_id, &system_program::id()).verify(), - Ok(()), - "nothing changed!" - ); - assert_eq!( - Change::new(&alice_program_id, &system_program::id()) - .rent_epoch(0, 1) - .verify(), - Err(InstructionError::RentEpochModified), - "no one touches rent_epoch" - ); - } - - #[test] - fn test_verify_account_changes_deduct_lamports_and_reassign_account() { - let alice_program_id = solana_sdk::pubkey::new_rand(); - let bob_program_id = solana_sdk::pubkey::new_rand(); - - // positive test of this capability - assert_eq!( - Change::new(&alice_program_id, &alice_program_id) - .owner(&bob_program_id) - .lamports(42, 1) - .data(vec![42], vec![0]) - .verify(), - Ok(()), - "alice should be able to deduct lamports and give the account to bob if the data is zeroed", - ); - } - - #[test] - fn test_verify_account_changes_lamports() { - let alice_program_id = solana_sdk::pubkey::new_rand(); - - assert_eq!( - Change::new(&alice_program_id, &system_program::id()) - .lamports(42, 0) - .read_only() - .verify(), - Err(InstructionError::ExternalAccountLamportSpend), - "debit should fail, even if system program" - ); - assert_eq!( - Change::new(&alice_program_id, &alice_program_id) - .lamports(42, 0) - .read_only() - .verify(), - Err(InstructionError::ReadonlyLamportChange), - "debit should fail, even if owning program" - ); - assert_eq!( - Change::new(&alice_program_id, &system_program::id()) - .lamports(42, 0) - .owner(&system_program::id()) - .verify(), - Err(InstructionError::ModifiedProgramId), - "system program can't debit the account unless it was the pre.owner" - ); - assert_eq!( - Change::new(&system_program::id(), &system_program::id()) - .lamports(42, 0) - .owner(&alice_program_id) - .verify(), - Ok(()), - "system can spend (and change owner)" - ); - } - - #[test] - fn test_verify_account_changes_data_size_changed() { - let alice_program_id = solana_sdk::pubkey::new_rand(); - - assert_eq!( - Change::new(&alice_program_id, &system_program::id()) - .data(vec![0], vec![0, 0]) - .verify(), - Err(InstructionError::AccountDataSizeChanged), - "system program should not be able to change another program's account data size" - ); - assert_eq!( - Change::new(&alice_program_id, &solana_sdk::pubkey::new_rand()) - .data(vec![0], vec![0, 0]) - .verify(), - Err(InstructionError::AccountDataSizeChanged), - "one program should not be able to change another program's account data size" - ); - assert_eq!( - Change::new(&alice_program_id, &alice_program_id) - .data(vec![0], vec![0, 0]) - .verify(), - Ok(()), - "programs can change their own data size" - ); - assert_eq!( - Change::new(&system_program::id(), &system_program::id()) - .data(vec![0], vec![0, 0]) - .verify(), - Ok(()), - "system program should be able to change account data size" - ); - } - - #[test] - fn test_verify_account_changes_owner_executable() { - let alice_program_id = solana_sdk::pubkey::new_rand(); - let bob_program_id = solana_sdk::pubkey::new_rand(); - - assert_eq!( - Change::new(&alice_program_id, &alice_program_id) - .owner(&bob_program_id) - .executable(false, true) - .verify(), - Err(InstructionError::ExecutableModified), - "program should not be able to change owner and executable at the same time" - ); - } -} diff --git a/programs/bpf_loader/benches/serialization.rs b/programs/bpf_loader/benches/serialization.rs index 87d1fc4f8a8c21..2acd8d374c1f8c 100644 --- a/programs/bpf_loader/benches/serialization.rs +++ b/programs/bpf_loader/benches/serialization.rs @@ -109,7 +109,7 @@ fn create_inputs(owner: Pubkey, num_instruction_accounts: usize) -> TransactionC } let mut transaction_context = - TransactionContext::new(transaction_accounts, Some(Rent::default()), 1, 1); + TransactionContext::new(transaction_accounts, Rent::default(), 1, 1); let instruction_data = vec![1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; transaction_context .get_next_instruction_context() diff --git a/programs/bpf_loader/src/syscalls/cpi.rs b/programs/bpf_loader/src/syscalls/cpi.rs index 1509805b9f9cb0..79fc52c6ca8ffa 100644 --- a/programs/bpf_loader/src/syscalls/cpi.rs +++ b/programs/bpf_loader/src/syscalls/cpi.rs @@ -1385,16 +1385,7 @@ fn update_callee_account( if !is_disable_cpi_setting_executable_and_rent_epoch_active && callee_account.borrow().rent_epoch() != caller_account.rent_epoch { - if invoke_context - .feature_set - .is_active(&enable_early_verification_of_account_modifications::id()) - { - return Err(Box::new(InstructionError::RentEpochModified)); - } else { - callee_account - .borrow_mut() - .set_rent_epoch(caller_account.rent_epoch); - } + return Err(Box::new(InstructionError::RentEpochModified)); } Ok(()) diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index c4a7fe1e6db50b..64d1d85e5ee964 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -21,7 +21,7 @@ use { vm::{BuiltinFunction, BuiltinProgram, Config, ProgramResult}, }, solana_sdk::{ - account::{ReadableAccount, WritableAccount}, + account::ReadableAccount, account_info::AccountInfo, alt_bn128::prelude::{ alt_bn128_addition, alt_bn128_multiplication, alt_bn128_pairing, AltBn128Error, @@ -37,8 +37,7 @@ use { self, blake3_syscall_enabled, curve25519_syscall_enabled, disable_cpi_setting_executable_and_rent_epoch, disable_deploy_of_alloc_free_syscall, disable_fees_sysvar, enable_alt_bn128_compression_syscall, enable_alt_bn128_syscall, - enable_big_mod_exp_syscall, enable_early_verification_of_account_modifications, - enable_partitioned_epoch_reward, enable_poseidon_syscall, + enable_big_mod_exp_syscall, enable_partitioned_epoch_reward, enable_poseidon_syscall, error_on_syscall_bpf_function_hash_collisions, last_restart_slot_sysvar, libsecp256k1_0_5_upgrade_enabled, reject_callx_r10, remaining_compute_units_syscall_enabled, stop_sibling_instruction_search_at_parent, diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index 871f4696c1a078..cdc6780d2bc000 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -1225,7 +1225,6 @@ mod tests { VoteState1_14_11::from(vote_state.clone()), ))) .unwrap(); - let version1_14_11_serialized_len = version1_14_11_serialized.len(); let rent = Rent::default(); let lamports = rent.minimum_balance(version1_14_11_serialized_len); @@ -1235,14 +1234,19 @@ mod tests { // Create a fake TransactionContext with a fake InstructionContext with a single account which is the // vote account that was just created - let transaction_context = - TransactionContext::new(vec![(node_pubkey, vote_account)], None, 0, 0); + let processor_account = AccountSharedData::new(0, 0, &solana_sdk::native_loader::id()); + let transaction_context = TransactionContext::new( + vec![(id(), processor_account), (node_pubkey, vote_account)], + rent, + 0, + 0, + ); let mut instruction_context = InstructionContext::default(); instruction_context.configure( &[0], &[InstructionAccount { - index_in_transaction: 0, - index_in_caller: 0, + index_in_transaction: 1, + index_in_caller: 1, index_in_callee: 0, is_signer: false, is_writable: true, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 76102732c7d2d8..28428dabe099f4 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -135,7 +135,6 @@ use { feature, feature_set::{ self, add_set_tx_loaded_accounts_data_size_instruction, - enable_early_verification_of_account_modifications, include_loaded_accounts_data_size_in_fee_calculation, remove_congestion_multiplier_from_fee_calculation, remove_deprecated_request_unit_ix, FeatureSet, @@ -4841,14 +4840,7 @@ impl Bank { let mut transaction_context = TransactionContext::new( transaction_accounts, - if self - .feature_set - .is_active(&enable_early_verification_of_account_modifications::id()) - { - Some(self.rent_collector.rent) - } else { - None - }, + self.rent_collector.rent, compute_budget.max_invoke_stack_height, if self .feature_set @@ -4898,7 +4890,6 @@ impl Bank { tx.message(), &loaded_transaction.program_indices, &mut transaction_context, - self.rent_collector.rent, log_collector.clone(), programs_loaded_for_tx_batch, &mut programs_modified_by_tx, @@ -4975,23 +4966,15 @@ impl Bank { { status = Err(TransactionError::UnbalancedTransaction); } - let mut accounts_data_len_delta = status - .as_ref() - .map_or(0, |info| info.accounts_data_len_delta); let status = status.map(|_| ()); loaded_transaction.accounts = accounts; - if self - .feature_set - .is_active(&enable_early_verification_of_account_modifications::id()) - { - saturating_add_assign!( - timings.details.total_account_count, - loaded_transaction.accounts.len() as u64 - ); - saturating_add_assign!(timings.details.changed_account_count, touched_account_count); - accounts_data_len_delta = status.as_ref().map_or(0, |_| accounts_resize_delta); - } + saturating_add_assign!( + timings.details.total_account_count, + loaded_transaction.accounts.len() as u64 + ); + saturating_add_assign!(timings.details.changed_account_count, touched_account_count); + let accounts_data_len_delta = status.as_ref().map_or(0, |_| accounts_resize_delta); let return_data = if enable_return_data_recording { if let Some(end_index) = return_data.data.iter().rposition(|&x| x != 0) { diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index d009f60d5e4073..97c08289fbc0e7 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -6576,8 +6576,9 @@ fn test_bank_hash_consistency() { } } +#[ignore] #[test] -fn test_same_program_id_uses_unqiue_executable_accounts() { +fn test_same_program_id_uses_unique_executable_accounts() { declare_process_instruction!(process_instruction, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; @@ -11475,7 +11476,7 @@ fn test_rent_state_list_len() { }); let transaction_context = TransactionContext::new( loaded_txs[0].0.as_ref().unwrap().accounts.clone(), - Some(Rent::default()), + Rent::default(), compute_budget.max_invoke_stack_height, compute_budget.max_instruction_trace_length, ); @@ -12165,7 +12166,6 @@ fn test_cap_accounts_data_allocations_per_transaction() { let (genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); let mut bank = Bank::new_for_tests(&genesis_config); - bank.activate_feature(&feature_set::enable_early_verification_of_account_modifications::id()); bank.activate_feature(&feature_set::cap_accounts_data_allocations_per_transaction::id()); let mut instructions = Vec::new(); diff --git a/sdk/src/transaction_context.rs b/sdk/src/transaction_context.rs index cdfb162fc475a1..266456f219361d 100644 --- a/sdk/src/transaction_context.rs +++ b/sdk/src/transaction_context.rs @@ -60,19 +60,14 @@ pub type TransactionAccount = (Pubkey, AccountSharedData); pub struct TransactionAccounts { accounts: Vec>, touched_flags: RefCell>, - is_early_verification_of_account_modifications_enabled: bool, } impl TransactionAccounts { #[cfg(not(target_os = "solana"))] - fn new( - accounts: Vec>, - is_early_verification_of_account_modifications_enabled: bool, - ) -> TransactionAccounts { + fn new(accounts: Vec>) -> TransactionAccounts { TransactionAccounts { touched_flags: RefCell::new(vec![false; accounts.len()].into_boxed_slice()), accounts, - is_early_verification_of_account_modifications_enabled, } } @@ -86,13 +81,11 @@ impl TransactionAccounts { #[cfg(not(target_os = "solana"))] pub fn touch(&self, index: IndexOfAccount) -> Result<(), InstructionError> { - if self.is_early_verification_of_account_modifications_enabled { - *self - .touched_flags - .borrow_mut() - .get_mut(index as usize) - .ok_or(InstructionError::NotEnoughAccountKeys)? = true; - } + *self + .touched_flags + .borrow_mut() + .get_mut(index as usize) + .ok_or(InstructionError::NotEnoughAccountKeys)? = true; Ok(()) } @@ -150,7 +143,7 @@ pub struct TransactionContext { return_data: TransactionReturnData, accounts_resize_delta: RefCell, #[cfg(not(target_os = "solana"))] - rent: Option, + rent: Rent, #[cfg(not(target_os = "solana"))] is_cap_accounts_data_allocations_per_transaction_enabled: bool, /// Useful for debugging to filter by or to look it up on the explorer @@ -163,7 +156,7 @@ impl TransactionContext { #[cfg(not(target_os = "solana"))] pub fn new( transaction_accounts: Vec, - rent: Option, + rent: Rent, instruction_stack_capacity: usize, instruction_trace_capacity: usize, ) -> Self { @@ -173,7 +166,7 @@ impl TransactionContext { .unzip(); Self { account_keys: Pin::new(account_keys.into_boxed_slice()), - accounts: Rc::new(TransactionAccounts::new(accounts, rent.is_some())), + accounts: Rc::new(TransactionAccounts::new(accounts)), instruction_stack_capacity, instruction_trace_capacity, instruction_stack: Vec::with_capacity(instruction_stack_capacity), @@ -204,12 +197,6 @@ impl TransactionContext { &self.accounts } - /// Returns true if `enable_early_verification_of_account_modifications` is active - #[cfg(not(target_os = "solana"))] - pub fn is_early_verification_of_account_modifications_enabled(&self) -> bool { - self.rent.is_some() - } - /// Stores the signature of the current transaction #[cfg(all(not(target_os = "solana"), debug_assertions))] pub fn set_signature(&mut self, signature: &Signature) { @@ -342,9 +329,7 @@ impl TransactionContext { .ok_or(InstructionError::CallDepth)?; let callee_instruction_accounts_lamport_sum = self.instruction_accounts_lamport_sum(caller_instruction_context)?; - if !self.instruction_stack.is_empty() - && self.is_early_verification_of_account_modifications_enabled() - { + if !self.instruction_stack.is_empty() { let caller_instruction_context = self.get_current_instruction_context()?; let original_caller_instruction_accounts_lamport_sum = caller_instruction_context.instruction_accounts_lamport_sum; @@ -382,24 +367,20 @@ impl TransactionContext { } // Verify (before we pop) that the total sum of all lamports in this instruction did not change let detected_an_unbalanced_instruction = - if self.is_early_verification_of_account_modifications_enabled() { - self.get_current_instruction_context() - .and_then(|instruction_context| { - // Verify all executable accounts have no outstanding refs - for account_index in instruction_context.program_accounts.iter() { - self.get_account_at_index(*account_index)? - .try_borrow_mut() - .map_err(|_| InstructionError::AccountBorrowOutstanding)?; - } - self.instruction_accounts_lamport_sum(instruction_context) - .map(|instruction_accounts_lamport_sum| { - instruction_context.instruction_accounts_lamport_sum - != instruction_accounts_lamport_sum - }) - }) - } else { - Ok(false) - }; + self.get_current_instruction_context() + .and_then(|instruction_context| { + // Verify all executable accounts have no outstanding refs + for account_index in instruction_context.program_accounts.iter() { + self.get_account_at_index(*account_index)? + .try_borrow_mut() + .map_err(|_| InstructionError::AccountBorrowOutstanding)?; + } + self.instruction_accounts_lamport_sum(instruction_context) + .map(|instruction_accounts_lamport_sum| { + instruction_context.instruction_accounts_lamport_sum + != instruction_accounts_lamport_sum + }) + }); // Always pop, even if we `detected_an_unbalanced_instruction` self.instruction_stack.pop(); if detected_an_unbalanced_instruction? { @@ -430,9 +411,6 @@ impl TransactionContext { &self, instruction_context: &InstructionContext, ) -> Result { - if !self.is_early_verification_of_account_modifications_enabled() { - return Ok(0); - } let mut instruction_accounts_lamport_sum: u128 = 0; for instruction_account_index in 0..instruction_context.get_number_of_instruction_accounts() { @@ -771,32 +749,27 @@ impl<'a> BorrowedAccount<'a> { /// Assignes the owner of this account (transaction wide) #[cfg(not(target_os = "solana"))] pub fn set_owner(&mut self, pubkey: &[u8]) -> Result<(), InstructionError> { - if self - .transaction_context - .is_early_verification_of_account_modifications_enabled() - { - // Only the owner can assign a new owner - if !self.is_owned_by_current_program() { - return Err(InstructionError::ModifiedProgramId); - } - // and only if the account is writable - if !self.is_writable() { - return Err(InstructionError::ModifiedProgramId); - } - // and only if the account is not executable - if self.is_executable() { - return Err(InstructionError::ModifiedProgramId); - } - // and only if the data is zero-initialized or empty - if !is_zeroed(self.get_data()) { - return Err(InstructionError::ModifiedProgramId); - } - // don't touch the account if the owner does not change - if self.get_owner().to_bytes() == pubkey { - return Ok(()); - } - self.touch()?; + // Only the owner can assign a new owner + if !self.is_owned_by_current_program() { + return Err(InstructionError::ModifiedProgramId); + } + // and only if the account is writable + if !self.is_writable() { + return Err(InstructionError::ModifiedProgramId); + } + // and only if the account is not executable + if self.is_executable() { + return Err(InstructionError::ModifiedProgramId); + } + // and only if the data is zero-initialized or empty + if !is_zeroed(self.get_data()) { + return Err(InstructionError::ModifiedProgramId); } + // don't touch the account if the owner does not change + if self.get_owner().to_bytes() == pubkey { + return Ok(()); + } + self.touch()?; self.account.copy_into_owner_from_slice(pubkey); Ok(()) } @@ -810,28 +783,23 @@ impl<'a> BorrowedAccount<'a> { /// Overwrites the number of lamports of this account (transaction wide) #[cfg(not(target_os = "solana"))] pub fn set_lamports(&mut self, lamports: u64) -> Result<(), InstructionError> { - if self - .transaction_context - .is_early_verification_of_account_modifications_enabled() - { - // An account not owned by the program cannot have its balance decrease - if !self.is_owned_by_current_program() && lamports < self.get_lamports() { - return Err(InstructionError::ExternalAccountLamportSpend); - } - // The balance of read-only may not change - if !self.is_writable() { - return Err(InstructionError::ReadonlyLamportChange); - } - // The balance of executable accounts may not change - if self.is_executable() { - return Err(InstructionError::ExecutableLamportChange); - } - // don't touch the account if the lamports do not change - if self.get_lamports() == lamports { - return Ok(()); - } - self.touch()?; + // An account not owned by the program cannot have its balance decrease + if !self.is_owned_by_current_program() && lamports < self.get_lamports() { + return Err(InstructionError::ExternalAccountLamportSpend); + } + // The balance of read-only may not change + if !self.is_writable() { + return Err(InstructionError::ReadonlyLamportChange); } + // The balance of executable accounts may not change + if self.is_executable() { + return Err(InstructionError::ExecutableLamportChange); + } + // don't touch the account if the lamports do not change + if self.get_lamports() == lamports { + return Ok(()); + } + self.touch()?; self.account.set_lamports(lamports); Ok(()) } @@ -1034,7 +1002,6 @@ impl<'a> BorrowedAccount<'a> { pub fn is_rent_exempt_at_data_length(&self, data_length: usize) -> bool { self.transaction_context .rent - .unwrap_or_default() .is_exempt(self.get_lamports(), data_length) } @@ -1047,29 +1014,31 @@ impl<'a> BorrowedAccount<'a> { /// Configures whether this account is executable (transaction wide) #[cfg(not(target_os = "solana"))] pub fn set_executable(&mut self, is_executable: bool) -> Result<(), InstructionError> { - if let Some(rent) = self.transaction_context.rent { - // To become executable an account must be rent exempt - if !rent.is_exempt(self.get_lamports(), self.get_data().len()) { - return Err(InstructionError::ExecutableAccountNotRentExempt); - } - // Only the owner can set the executable flag - if !self.is_owned_by_current_program() { - return Err(InstructionError::ExecutableModified); - } - // and only if the account is writable - if !self.is_writable() { - return Err(InstructionError::ExecutableModified); - } - // one can not clear the executable flag - if self.is_executable() && !is_executable { - return Err(InstructionError::ExecutableModified); - } - // don't touch the account if the executable flag does not change - if self.is_executable() == is_executable { - return Ok(()); - } - self.touch()?; + // To become executable an account must be rent exempt + if !self + .transaction_context + .rent + .is_exempt(self.get_lamports(), self.get_data().len()) + { + return Err(InstructionError::ExecutableAccountNotRentExempt); + } + // Only the owner can set the executable flag + if !self.is_owned_by_current_program() { + return Err(InstructionError::ExecutableModified); } + // and only if the account is writable + if !self.is_writable() { + return Err(InstructionError::ExecutableModified); + } + // one can not clear the executable flag + if self.is_executable() && !is_executable { + return Err(InstructionError::ExecutableModified); + } + // don't touch the account if the executable flag does not change + if self.is_executable() == is_executable { + return Ok(()); + } + self.touch()?; self.account.set_executable(is_executable); Ok(()) } @@ -1118,12 +1087,6 @@ impl<'a> BorrowedAccount<'a> { /// Returns an error if the account data can not be mutated by the current program #[cfg(not(target_os = "solana"))] pub fn can_data_be_changed(&self) -> Result<(), InstructionError> { - if !self - .transaction_context - .is_early_verification_of_account_modifications_enabled() - { - return Ok(()); - } // Only non-executable accounts data can be changed if self.is_executable() { return Err(InstructionError::ExecutableDataModified); @@ -1142,12 +1105,6 @@ impl<'a> BorrowedAccount<'a> { /// Returns an error if the account data can not be resized to the given length #[cfg(not(target_os = "solana"))] pub fn can_data_be_resized(&self, new_length: usize) -> Result<(), InstructionError> { - if !self - .transaction_context - .is_early_verification_of_account_modifications_enabled() - { - return Ok(()); - } let old_length = self.get_data().len(); // Only the owner can change the length of the data if new_length != old_length && !self.is_owned_by_current_program() { From 509d6acd2b545681214950075a9ec8f6ef3b9731 Mon Sep 17 00:00:00 2001 From: Tyera Date: Tue, 10 Oct 2023 10:40:36 -0600 Subject: [PATCH 301/407] Remove primary index from Blockstore special-column keys (#33419) * Add helper trait for column key deprecation * Add WriteBatch::delete_raw * Add ProtobufColumn::get_raw_protobuf_or_bincode * Add ColumnIndexDeprecation iterator methods * Impl ColumnIndexDeprecation for TransactionStatus (doesn't build) * Update TransactionStatus put * Update TransactionStatus purge_exact * Fix read_transaction_status * Fix get_transaction_status_with_counter * Fix test_all_empty_or_min (builds except tests) * Fix test_get_rooted_block * Fix test_persist_transaction_status * Fix test_get_transaction_status * Fix test_get_rooted_transaction * Fix test_get_complete_transaction * Fix test_lowest_cleanup_slot_and_special_cfs * Fix test_map_transactions_to_statuses * Fix test_transaction_status_protobuf_backward_compatability * Fix test_special_columns_empty * Delete test_transaction_status_index * Delete test_purge_transaction_status * Ignore some tests until both special columns are dealt with (all build) * Impl ColumnIndexDeprecation for AddressSignatures (doesn't build) * Add BlockstoreError variant * Update AddressSignatures put * Remove unneeded active_transaction_status_index column lock * Update AddressSignatures purge_exact * Fix find_address_signatures_for_slot methods * Fix get_block_signatures methods * Fix get_confirmed_signatures_for_address2 * Remove unused method * Fix test_all_empty_or_min moar (builds except tests) * Fix tests (all build) * Fix test_get_confirmed_signatures_for_address * Fix test_lowest_cleanup_slot_and_special_cfs moar * Unignore tests (builds except tests) * Fix test_purge_transaction_status_exact * Fix test_purge_front_of_ledger * Fix test_purge_special_columns_compaction_filter (all build) * Move some test-harness stuff around * Add test cases for purge_special_columns_with_old_data * Add test_read_transaction_status_with_old_data * Add test_get_transaction_status_with_old_data * Review comments * Move rev of block-signatures into helper * Improve deprecated_key impls * iter_filtered -> iter_current_index_filtered * Add comment to explain why use the smallest (index, Signature) to seed the iterator * Impl ColumnIndexDeprecation for TransactionMemos (doesn't build) * Update TransactionMemos put * Add LedgerColumn::get_raw * Fix read_transaction_memos * Add TransactionMemos to purge_special_columns_exact * Add TransactionMemos to compaction filter * Take find_address_signatures out of service * Remove faulty delete_new_column_key logic * Simplify comments --- ledger/src/blockstore.rs | 1009 +++++++++------------ ledger/src/blockstore/blockstore_purge.rs | 861 +++++------------- ledger/src/blockstore_db.rs | 348 ++++++- rpc/src/transaction_status_service.rs | 3 +- 4 files changed, 955 insertions(+), 1266 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index c23dc240d79f7f..4ea608d3471c26 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -6,8 +6,8 @@ use { crate::{ ancestor_iterator::AncestorIterator, blockstore_db::{ - columns as cf, Column, Database, IteratorDirection, IteratorMode, LedgerColumn, Result, - WriteBatch, + columns as cf, Column, ColumnIndexDeprecation, Database, IteratorDirection, + IteratorMode, LedgerColumn, Result, WriteBatch, }, blockstore_meta::*, blockstore_options::{ @@ -74,7 +74,7 @@ use { rc::Rc, sync::{ atomic::{AtomicBool, Ordering}, - Arc, Mutex, RwLock, RwLockWriteGuard, + Arc, Mutex, RwLock, }, }, tempfile::{Builder, TempDir}, @@ -2210,40 +2210,40 @@ impl Blockstore { } } - fn get_primary_index_to_write( - &self, - slot: Slot, - // take WriteGuard to require critical section semantics at call site - w_active_transaction_status_index: &RwLockWriteGuard, - ) -> Result { - let i = **w_active_transaction_status_index; - let mut index_meta = self.transaction_status_index_cf.get(i)?.unwrap(); - if slot > index_meta.max_slot { - assert!(!index_meta.frozen); - index_meta.max_slot = slot; - self.transaction_status_index_cf.put(i, &index_meta)?; - } - Ok(i) - } - - pub fn read_transaction_status( + fn read_deprecated_transaction_status( &self, index: (Signature, Slot), ) -> Result> { let (signature, slot) = index; let result = self .transaction_status_cf - .get_protobuf_or_bincode::((0, signature, slot))?; + .get_raw_protobuf_or_bincode::( + &cf::TransactionStatus::deprecated_key((0, signature, slot)), + )?; if result.is_none() { Ok(self .transaction_status_cf - .get_protobuf_or_bincode::((1, signature, slot))? + .get_raw_protobuf_or_bincode::( + &cf::TransactionStatus::deprecated_key((1, signature, slot)), + )? .and_then(|meta| meta.try_into().ok())) } else { Ok(result.and_then(|meta| meta.try_into().ok())) } } + pub fn read_transaction_status( + &self, + index: (Signature, Slot), + ) -> Result> { + let result = self.transaction_status_cf.get_protobuf(index)?; + if result.is_none() { + self.read_deprecated_transaction_status(index) + } else { + Ok(result.and_then(|meta| meta.try_into().ok())) + } + } + pub fn write_transaction_status( &self, slot: Slot, @@ -2251,37 +2251,49 @@ impl Blockstore { writable_keys: Vec<&Pubkey>, readonly_keys: Vec<&Pubkey>, status: TransactionStatusMeta, + transaction_index: usize, ) -> Result<()> { let status = status.into(); - // This write lock prevents interleaving issues with the transaction_status_index_cf by gating - // writes to that column - let w_active_transaction_status_index = - self.active_transaction_status_index.write().unwrap(); - let primary_index = - self.get_primary_index_to_write(slot, &w_active_transaction_status_index)?; + let transaction_index = u32::try_from(transaction_index) + .map_err(|_| BlockstoreError::TransactionIndexOverflow)?; self.transaction_status_cf - .put_protobuf((primary_index, signature, slot), &status)?; + .put_protobuf((signature, slot), &status)?; for address in writable_keys { self.address_signatures_cf.put( - (primary_index, *address, slot, signature), + (*address, slot, transaction_index, signature), &AddressSignatureMeta { writeable: true }, )?; } for address in readonly_keys { self.address_signatures_cf.put( - (primary_index, *address, slot, signature), + (*address, slot, transaction_index, signature), &AddressSignatureMeta { writeable: false }, )?; } Ok(()) } - pub fn read_transaction_memos(&self, signature: Signature) -> Result> { - self.transaction_memos_cf.get(signature) + pub fn read_transaction_memos( + &self, + signature: Signature, + slot: Slot, + ) -> Result> { + let memos = self.transaction_memos_cf.get((signature, slot))?; + if memos.is_none() { + self.transaction_memos_cf + .get_raw(&cf::TransactionMemos::deprecated_key(signature)) + } else { + Ok(memos) + } } - pub fn write_transaction_memos(&self, signature: &Signature, memos: String) -> Result<()> { - self.transaction_memos_cf.put(*signature, &memos) + pub fn write_transaction_memos( + &self, + signature: &Signature, + slot: Slot, + memos: String, + ) -> Result<()> { + self.transaction_memos_cf.put((*signature, slot), &memos) } /// Acquires the `lowest_cleanup_slot` lock and returns a tuple of the held lock @@ -2328,15 +2340,40 @@ impl Blockstore { let (lock, _) = self.ensure_lowest_cleanup_slot(); let first_available_block = self.get_first_available_block()?; + let iterator = + self.transaction_status_cf + .iter_current_index_filtered(IteratorMode::From( + (signature, first_available_block), + IteratorDirection::Forward, + ))?; + + for ((sig, slot), _data) in iterator { + counter += 1; + if sig != signature { + break; + } + if !self.is_root(slot) && !confirmed_unrooted_slots.contains(&slot) { + continue; + } + let status = self + .transaction_status_cf + .get_protobuf((signature, slot))? + .and_then(|status| status.try_into().ok()) + .map(|status| (slot, status)); + return Ok((status, counter)); + } + for transaction_status_cf_primary_index in 0..=1 { - let index_iterator = self.transaction_status_cf.iter(IteratorMode::From( - ( - transaction_status_cf_primary_index, - signature, - first_available_block, - ), - IteratorDirection::Forward, - ))?; + let index_iterator = + self.transaction_status_cf + .iter_deprecated_index_filtered(IteratorMode::From( + ( + transaction_status_cf_primary_index, + signature, + first_available_block, + ), + IteratorDirection::Forward, + ))?; for ((i, sig, slot), _data) in index_iterator { counter += 1; if i != transaction_status_cf_primary_index || sig != signature { @@ -2347,7 +2384,9 @@ impl Blockstore { } let status = self .transaction_status_cf - .get_protobuf_or_bincode::((i, sig, slot))? + .get_raw_protobuf_or_bincode::( + &cf::TransactionStatus::deprecated_key((i, signature, slot)), + )? .and_then(|status| status.try_into().ok()) .map(|status| (slot, status)); return Ok((status, counter)); @@ -2463,50 +2502,19 @@ impl Blockstore { .find(|transaction| transaction.signatures[0] == signature)) } - // Returns all rooted signatures for an address, ordered by slot that the transaction was - // processed in. Within each slot the transactions will be ordered by signature, and NOT by - // the order in which the transactions exist in the block - // - // DEPRECATED + // DEPRECATED and decommissioned + // This method always returns an empty Vec fn find_address_signatures( &self, - pubkey: Pubkey, - start_slot: Slot, - end_slot: Slot, + _pubkey: Pubkey, + _start_slot: Slot, + _end_slot: Slot, ) -> Result> { - let (lock, lowest_available_slot) = self.ensure_lowest_cleanup_slot(); - let mut signatures: Vec<(Slot, Signature)> = vec![]; - if end_slot < lowest_available_slot { - return Ok(signatures); - } - for transaction_status_cf_primary_index in 0..=1 { - let index_iterator = self.address_signatures_cf.iter(IteratorMode::From( - ( - transaction_status_cf_primary_index, - pubkey, - start_slot.max(lowest_available_slot), - Signature::default(), - ), - IteratorDirection::Forward, - ))?; - for ((i, address, slot, signature), _) in index_iterator { - if i != transaction_status_cf_primary_index || slot > end_slot || address != pubkey - { - break; - } - if self.is_root(slot) { - signatures.push((slot, signature)); - } - } - } - drop(lock); - signatures.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap().then(a.1.cmp(&b.1))); - Ok(signatures) + Ok(vec![]) } // Returns all signatures for an address in a particular slot, regardless of whether that slot - // has been rooted. The transactions will be ordered by signature, and NOT by the order in - // which the transactions exist in the block + // has been rooted. The transactions will be ordered by their occurrence in the block fn find_address_signatures_for_slot( &self, pubkey: Pubkey, @@ -2517,32 +2525,29 @@ impl Blockstore { if slot < lowest_available_slot { return Ok(signatures); } - for transaction_status_cf_primary_index in 0..=1 { - let index_iterator = self.address_signatures_cf.iter(IteratorMode::From( - ( - transaction_status_cf_primary_index, - pubkey, - slot, - Signature::default(), - ), - IteratorDirection::Forward, - ))?; - for ((i, address, transaction_slot, signature), _) in index_iterator { - if i != transaction_status_cf_primary_index - || transaction_slot > slot - || address != pubkey - { - break; - } - signatures.push((slot, signature)); + let index_iterator = + self.address_signatures_cf + .iter_current_index_filtered(IteratorMode::From( + ( + pubkey, + slot.max(lowest_available_slot), + 0, + Signature::default(), + ), + IteratorDirection::Forward, + ))?; + for ((address, transaction_slot, _transaction_index, signature), _) in index_iterator { + if transaction_slot > slot || address != pubkey { + break; } + signatures.push((slot, signature)); } drop(lock); - signatures.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap().then(a.1.cmp(&b.1))); Ok(signatures) } - // DEPRECATED + // DEPRECATED and decommissioned + // This method always returns an empty Vec pub fn get_confirmed_signatures_for_address( &self, pubkey: Pubkey, @@ -2557,7 +2562,7 @@ impl Blockstore { .map(|signatures| signatures.iter().map(|(_, signature)| *signature).collect()) } - fn get_sorted_block_signatures(&self, slot: Slot) -> Result> { + fn get_block_signatures_rev(&self, slot: Slot) -> Result> { let block = self.get_complete_block(slot, false).map_err(|err| { BlockstoreError::Io(IoError::new( ErrorKind::Other, @@ -2565,10 +2570,10 @@ impl Blockstore { )) })?; - // Load all signatures for the block - let mut slot_signatures: Vec<_> = block + Ok(block .transactions .into_iter() + .rev() .filter_map(|transaction_with_meta| { transaction_with_meta .transaction @@ -2576,14 +2581,7 @@ impl Blockstore { .into_iter() .next() }) - .collect(); - - // Reverse sort signatures as a way to entire a stable ordering within a slot, as - // the AddressSignatures column is ordered by signatures within a slot, - // not by block ordering - slot_signatures.sort_unstable_by(|a, b| b.cmp(a)); - - Ok(slot_signatures) + .collect()) } pub fn get_confirmed_signatures_for_address2( @@ -2616,7 +2614,7 @@ impl Blockstore { match transaction_status { None => return Ok(SignatureInfosForAddress::default()), Some((slot, _)) => { - let mut slot_signatures = self.get_sorted_block_signatures(slot)?; + let mut slot_signatures = self.get_block_signatures_rev(slot)?; if let Some(pos) = slot_signatures.iter().position(|&x| x == before) { slot_signatures.truncate(pos + 1); } @@ -2643,7 +2641,7 @@ impl Blockstore { match transaction_status { None => (first_available_block, HashSet::new()), Some((slot, _)) => { - let mut slot_signatures = self.get_sorted_block_signatures(slot)?; + let mut slot_signatures = self.get_block_signatures_rev(slot)?; if let Some(pos) = slot_signatures.iter().position(|&x| x == until) { slot_signatures = slot_signatures.split_off(pos); } @@ -2673,65 +2671,26 @@ impl Blockstore { } get_initial_slot_timer.stop(); - // Check the active_transaction_status_index to see if it contains slot. If so, start with - // that index, as it will contain higher slots - let starting_primary_index = *self.active_transaction_status_index.read().unwrap(); - let next_primary_index = u64::from(starting_primary_index == 0); - let next_max_slot = self - .transaction_status_index_cf - .get(next_primary_index)? - .unwrap() - .max_slot; - - let mut starting_primary_index_iter_timer = Measure::start("starting_primary_index_iter"); - if slot > next_max_slot { - let mut starting_iterator = self.address_signatures_cf.iter(IteratorMode::From( - (starting_primary_index, address, slot, Signature::default()), - IteratorDirection::Reverse, - ))?; - - // Iterate through starting_iterator until limit is reached - while address_signatures.len() < limit { - if let Some(((i, key_address, slot, signature), _)) = starting_iterator.next() { - if slot == next_max_slot || slot < lowest_slot { - break; - } - if i == starting_primary_index && key_address == address { - if self.is_root(slot) || confirmed_unrooted_slots.contains(&slot) { - address_signatures.push((slot, signature)); - } - continue; - } - } - break; - } - - // Handle slots that cross primary indexes - if next_max_slot >= lowest_slot { - let mut signatures = - self.find_address_signatures_for_slot(address, next_max_slot)?; - signatures.reverse(); - address_signatures.append(&mut signatures); - } - } - starting_primary_index_iter_timer.stop(); - - // Iterate through next_iterator until limit is reached - let mut next_primary_index_iter_timer = Measure::start("next_primary_index_iter_timer"); - let mut next_iterator = self.address_signatures_cf.iter(IteratorMode::From( - (next_primary_index, address, slot, Signature::default()), - IteratorDirection::Reverse, - ))?; + let mut address_signatures_iter_timer = Measure::start("iter_timer"); + let mut iterator = + self.address_signatures_cf + .iter_current_index_filtered(IteratorMode::From( + // Ragardless of whether a `before` signature is provided, the latest relevant + // `slot` is queried directly with the `find_address_signatures_for_slot()` + // call above. Thus, this iterator starts at the lowest entry of `address, + // slot` and iterates backwards to continue reporting the next earliest + // signatures. + (address, slot, 0, Signature::default()), + IteratorDirection::Reverse, + ))?; + + // Iterate until limit is reached while address_signatures.len() < limit { - if let Some(((i, key_address, slot, signature), _)) = next_iterator.next() { - // Skip next_max_slot, which is already included - if slot == next_max_slot { - continue; - } + if let Some(((key_address, slot, _transaction_index, signature), _)) = iterator.next() { if slot < lowest_slot { break; } - if i == next_primary_index && key_address == address { + if key_address == address { if self.is_root(slot) || confirmed_unrooted_slots.contains(&slot) { address_signatures.push((slot, signature)); } @@ -2740,7 +2699,8 @@ impl Blockstore { } break; } - next_primary_index_iter_timer.stop(); + address_signatures_iter_timer.stop(); + let mut address_signatures: Vec<(Slot, Signature)> = address_signatures .into_iter() .filter(|(_, signature)| !until_excluded_signatures.contains(signature)) @@ -2754,7 +2714,7 @@ impl Blockstore { let transaction_status = self.get_transaction_status(signature, &confirmed_unrooted_slots)?; let err = transaction_status.and_then(|(_slot, status)| status.status.err()); - let memo = self.read_transaction_memos(signature)?; + let memo = self.read_transaction_memos(signature, slot)?; let block_time = self.get_block_time(slot)?; infos.push(ConfirmedTransactionStatusWithSignature { signature, @@ -2779,13 +2739,8 @@ impl Blockstore { i64 ), ( - "starting_primary_index_iter_us", - starting_primary_index_iter_timer.as_us() as i64, - i64 - ), - ( - "next_primary_index_iter_us", - next_primary_index_iter_timer.as_us() as i64, + "address_signatures_iter_us", + address_signatures_iter_timer.as_us() as i64, i64 ), ( @@ -4479,18 +4434,14 @@ pub fn test_all_empty_or_min(blockstore: &Blockstore, min_slot: Slot) { .iter::(IteratorMode::Start) .unwrap() .next() - .map(|((primary_index, _, slot), _)| { - slot >= min_slot || (primary_index == 2 && slot == 0) - }) + .map(|((_, slot), _)| slot >= min_slot || slot == 0) .unwrap_or(true) & blockstore .db .iter::(IteratorMode::Start) .unwrap() .next() - .map(|((primary_index, _, slot, _), _)| { - slot >= min_slot || (primary_index == 2 && slot == 0) - }) + .map(|((_, slot, _, _), _)| slot >= min_slot || slot == 0) .unwrap_or(true) & blockstore .db @@ -7204,7 +7155,7 @@ pub mod tests { .into(); blockstore .transaction_status_cf - .put_protobuf((0, signature, slot), &status) + .put_protobuf((signature, slot), &status) .unwrap(); let status = TransactionStatusMeta { status: Ok(()), @@ -7223,7 +7174,7 @@ pub mod tests { .into(); blockstore .transaction_status_cf - .put_protobuf((0, signature, slot + 1), &status) + .put_protobuf((signature, slot + 1), &status) .unwrap(); let status = TransactionStatusMeta { status: Ok(()), @@ -7242,7 +7193,7 @@ pub mod tests { .into(); blockstore .transaction_status_cf - .put_protobuf((0, signature, slot + 2), &status) + .put_protobuf((signature, slot + 2), &status) .unwrap(); VersionedTransactionWithStatusMeta { transaction, @@ -7383,7 +7334,7 @@ pub mod tests { // result not found assert!(transaction_status_cf - .get_protobuf_or_bincode::((0, Signature::default(), 0)) + .get_protobuf((Signature::default(), 0)) .unwrap() .is_none()); @@ -7404,7 +7355,7 @@ pub mod tests { } .into(); assert!(transaction_status_cf - .put_protobuf((0, Signature::default(), 0), &status,) + .put_protobuf((Signature::default(), 0), &status) .is_ok()); // result found @@ -7422,7 +7373,7 @@ pub mod tests { return_data, compute_units_consumed, } = transaction_status_cf - .get_protobuf_or_bincode::((0, Signature::default(), 0)) + .get_protobuf((Signature::default(), 0)) .unwrap() .unwrap() .try_into() @@ -7457,7 +7408,7 @@ pub mod tests { } .into(); assert!(transaction_status_cf - .put_protobuf((0, Signature::from([2u8; 64]), 9), &status,) + .put_protobuf((Signature::from([2u8; 64]), 9), &status,) .is_ok()); // result found @@ -7475,11 +7426,7 @@ pub mod tests { return_data, compute_units_consumed, } = transaction_status_cf - .get_protobuf_or_bincode::(( - 0, - Signature::from([2u8; 64]), - 9, - )) + .get_protobuf((Signature::from([2u8; 64]), 9)) .unwrap() .unwrap() .try_into() @@ -7501,208 +7448,73 @@ pub mod tests { } #[test] - #[allow(clippy::cognitive_complexity)] - fn test_transaction_status_index() { + fn test_read_transaction_status_with_old_data() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + let signature = Signature::from([1; 64]); - let transaction_status_index_cf = &blockstore.transaction_status_index_cf; - let slot0 = 10; - - // Primary index column is initialized on Blockstore::open - assert!(transaction_status_index_cf.get(0).unwrap().is_some()); - assert!(transaction_status_index_cf.get(1).unwrap().is_some()); + let index0_slot = 2; + blockstore + .write_deprecated_transaction_status( + 0, + index0_slot, + signature, + vec![&Pubkey::new_unique()], + vec![&Pubkey::new_unique()], + TransactionStatusMeta { + fee: index0_slot * 1_000, + ..TransactionStatusMeta::default() + }, + ) + .unwrap(); - for _ in 0..5 { - let random_bytes: [u8; 64] = std::array::from_fn(|_| rand::random::()); - blockstore - .write_transaction_status( - slot0, - Signature::from(random_bytes), - vec![&Pubkey::try_from(&random_bytes[..32]).unwrap()], - vec![&Pubkey::try_from(&random_bytes[32..]).unwrap()], - TransactionStatusMeta::default(), - ) - .unwrap(); - } + let index1_slot = 1; + blockstore + .write_deprecated_transaction_status( + 1, + index1_slot, + signature, + vec![&Pubkey::new_unique()], + vec![&Pubkey::new_unique()], + TransactionStatusMeta { + fee: index1_slot * 1_000, + ..TransactionStatusMeta::default() + }, + ) + .unwrap(); - // New statuses bump index 0 max_slot - assert_eq!( - transaction_status_index_cf.get(0).unwrap().unwrap(), - TransactionStatusIndexMeta { - max_slot: slot0, - frozen: false, - } - ); - assert_eq!( - transaction_status_index_cf.get(1).unwrap().unwrap(), - TransactionStatusIndexMeta::default() - ); + let slot = 3; + blockstore + .write_transaction_status( + slot, + signature, + vec![&Pubkey::new_unique()], + vec![&Pubkey::new_unique()], + TransactionStatusMeta { + fee: slot * 1_000, + ..TransactionStatusMeta::default() + }, + 0, + ) + .unwrap(); - let first_status_entry = blockstore - .db - .iter::(IteratorMode::From( - cf::TransactionStatus::as_index(0), - IteratorDirection::Forward, - )) - .unwrap() - .next() - .unwrap() - .0; - assert_eq!(first_status_entry.0, 0); - assert_eq!(first_status_entry.2, slot0); - let first_address_entry = blockstore - .db - .iter::(IteratorMode::From( - cf::AddressSignatures::as_index(0), - IteratorDirection::Forward, - )) - .unwrap() - .next() + let meta = blockstore + .read_transaction_status((signature, slot)) .unwrap() - .0; - assert_eq!(first_address_entry.0, 0); - assert_eq!(first_address_entry.2, slot0); - - blockstore.run_purge(0, 8, PurgeType::PrimaryIndex).unwrap(); - // First successful prune freezes index 0 - assert_eq!( - transaction_status_index_cf.get(0).unwrap().unwrap(), - TransactionStatusIndexMeta { - max_slot: slot0, - frozen: true, - } - ); - assert_eq!( - transaction_status_index_cf.get(1).unwrap().unwrap(), - TransactionStatusIndexMeta::default() - ); - - let slot1 = 20; - for _ in 0..5 { - let random_bytes: [u8; 64] = std::array::from_fn(|_| rand::random::()); - blockstore - .write_transaction_status( - slot1, - Signature::from(random_bytes), - vec![&Pubkey::try_from(&random_bytes[..32]).unwrap()], - vec![&Pubkey::try_from(&random_bytes[32..]).unwrap()], - TransactionStatusMeta::default(), - ) - .unwrap(); - } - - assert_eq!( - transaction_status_index_cf.get(0).unwrap().unwrap(), - TransactionStatusIndexMeta { - max_slot: slot0, - frozen: true, - } - ); - // Index 0 is frozen, so new statuses bump index 1 max_slot - assert_eq!( - transaction_status_index_cf.get(1).unwrap().unwrap(), - TransactionStatusIndexMeta { - max_slot: slot1, - frozen: false, - } - ); + .unwrap(); + assert_eq!(meta.fee, slot * 1000); - // Index 0 statuses and address records still exist - let first_status_entry = blockstore - .db - .iter::(IteratorMode::From( - cf::TransactionStatus::as_index(0), - IteratorDirection::Forward, - )) - .unwrap() - .next() - .unwrap() - .0; - assert_eq!(first_status_entry.0, 0); - assert_eq!(first_status_entry.2, 10); - let first_address_entry = blockstore - .db - .iter::(IteratorMode::From( - cf::AddressSignatures::as_index(0), - IteratorDirection::Forward, - )) - .unwrap() - .next() - .unwrap() - .0; - assert_eq!(first_address_entry.0, 0); - assert_eq!(first_address_entry.2, slot0); - // New statuses and address records are stored in index 1 - let index1_first_status_entry = blockstore - .db - .iter::(IteratorMode::From( - cf::TransactionStatus::as_index(1), - IteratorDirection::Forward, - )) - .unwrap() - .next() - .unwrap() - .0; - assert_eq!(index1_first_status_entry.0, 1); - assert_eq!(index1_first_status_entry.2, slot1); - let index1_first_address_entry = blockstore - .db - .iter::(IteratorMode::From( - cf::AddressSignatures::as_index(1), - IteratorDirection::Forward, - )) - .unwrap() - .next() + let meta = blockstore + .read_transaction_status((signature, index0_slot)) .unwrap() - .0; - assert_eq!(index1_first_address_entry.0, 1); - assert_eq!(index1_first_address_entry.2, slot1); - - blockstore - .run_purge(0, 18, PurgeType::PrimaryIndex) .unwrap(); - // Successful prune toggles TransactionStatusIndex - assert_eq!( - transaction_status_index_cf.get(0).unwrap().unwrap(), - TransactionStatusIndexMeta { - max_slot: 0, - frozen: false, - } - ); - assert_eq!( - transaction_status_index_cf.get(1).unwrap().unwrap(), - TransactionStatusIndexMeta { - max_slot: slot1, - frozen: true, - } - ); + assert_eq!(meta.fee, index0_slot * 1000); - // Index 0 has been pruned, so first status and address entries are now index 1 - let first_status_entry = blockstore - .db - .iter::(IteratorMode::From( - cf::TransactionStatus::as_index(0), - IteratorDirection::Forward, - )) - .unwrap() - .next() - .unwrap() - .0; - assert_eq!(first_status_entry.0, 1); - assert_eq!(first_status_entry.2, slot1); - let first_address_entry = blockstore - .db - .iter::(IteratorMode::From( - cf::AddressSignatures::as_index(0), - IteratorDirection::Forward, - )) - .unwrap() - .next() + let meta = blockstore + .read_transaction_status((signature, index1_slot)) .unwrap() - .0; - assert_eq!(first_address_entry.0, 1); - assert_eq!(first_address_entry.2, slot1); + .unwrap(); + assert_eq!(meta.fee, index1_slot * 1000); } #[test] @@ -7755,56 +7567,45 @@ pub mod tests { blockstore.set_roots([0, 2].iter()).unwrap(); - // Initialize index 0, including: - // signature2 in non-root and root, - // signature4 in non-root, + // Initialize statuses: + // signature2 in skipped slot and root, + // signature4 in skipped slot, // signature5 in skipped slot and non-root, // signature6 in skipped slot, + // signature5 extra entries transaction_status_cf - .put_protobuf((0, signature2, 1), &status) - .unwrap(); - - transaction_status_cf - .put_protobuf((0, signature2, 2), &status) - .unwrap(); - - transaction_status_cf - .put_protobuf((0, signature4, 1), &status) + .put_protobuf((signature2, 1), &status) .unwrap(); transaction_status_cf - .put_protobuf((0, signature5, 1), &status) + .put_protobuf((signature2, 2), &status) .unwrap(); transaction_status_cf - .put_protobuf((0, signature5, 3), &status) + .put_protobuf((signature4, 1), &status) .unwrap(); transaction_status_cf - .put_protobuf((0, signature6, 1), &status) + .put_protobuf((signature5, 1), &status) .unwrap(); - // Initialize index 1, including: - // signature4 in root, - // signature6 in non-root, - // signature5 extra entries transaction_status_cf - .put_protobuf((1, signature4, 2), &status) + .put_protobuf((signature5, 3), &status) .unwrap(); transaction_status_cf - .put_protobuf((1, signature5, 4), &status) + .put_protobuf((signature6, 1), &status) .unwrap(); transaction_status_cf - .put_protobuf((1, signature5, 5), &status) + .put_protobuf((signature5, 5), &status) .unwrap(); transaction_status_cf - .put_protobuf((1, signature6, 3), &status) + .put_protobuf((signature6, 3), &status) .unwrap(); - // Signature exists, root found in index 0 + // Signature exists, root found if let (Some((slot, _status)), counter) = blockstore .get_transaction_status_with_counter(signature2, &[].into()) .unwrap() @@ -7822,30 +7623,26 @@ pub mod tests { assert_eq!(counter, 2); } - // Signature exists, root found in index 1 - if let (Some((slot, _status)), counter) = blockstore + // Signature exists in skipped slot, no root found + let (status, counter) = blockstore .get_transaction_status_with_counter(signature4, &[].into()) - .unwrap() - { - assert_eq!(slot, 2); - assert_eq!(counter, 3); - } + .unwrap(); + assert_eq!(status, None); + assert_eq!(counter, 2); - // Signature exists, root found although not required, in index 1 - if let (Some((slot, _status)), counter) = blockstore + // Signature exists in skipped slot, no non-root found + let (status, counter) = blockstore .get_transaction_status_with_counter(signature4, &[3].into()) - .unwrap() - { - assert_eq!(slot, 2); - assert_eq!(counter, 3); - } + .unwrap(); + assert_eq!(status, None); + assert_eq!(counter, 2); // Signature exists, no root found let (status, counter) = blockstore .get_transaction_status_with_counter(signature5, &[].into()) .unwrap(); assert_eq!(status, None); - assert_eq!(counter, 6); + assert_eq!(counter, 4); // Signature exists, root not required if let (Some((slot, _status)), counter) = blockstore @@ -7861,38 +7658,163 @@ pub mod tests { .get_transaction_status_with_counter(signature1, &[].into()) .unwrap(); assert_eq!(status, None); - assert_eq!(counter, 2); + assert_eq!(counter, 1); let (status, counter) = blockstore .get_transaction_status_with_counter(signature1, &[3].into()) .unwrap(); assert_eq!(status, None); - assert_eq!(counter, 2); + assert_eq!(counter, 1); // Signature does not exist, between existing entries let (status, counter) = blockstore .get_transaction_status_with_counter(signature3, &[].into()) .unwrap(); assert_eq!(status, None); - assert_eq!(counter, 2); + assert_eq!(counter, 1); let (status, counter) = blockstore .get_transaction_status_with_counter(signature3, &[3].into()) .unwrap(); assert_eq!(status, None); - assert_eq!(counter, 2); + assert_eq!(counter, 1); // Signature does not exist, larger than existing entries let (status, counter) = blockstore .get_transaction_status_with_counter(signature7, &[].into()) .unwrap(); assert_eq!(status, None); - assert_eq!(counter, 1); + assert_eq!(counter, 0); let (status, counter) = blockstore .get_transaction_status_with_counter(signature7, &[3].into()) .unwrap(); assert_eq!(status, None); + assert_eq!(counter, 0); + } + + #[test] + fn test_get_transaction_status_with_old_data() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + let transaction_status_cf = &blockstore.transaction_status_cf; + + let pre_balances_vec = vec![1, 2, 3]; + let post_balances_vec = vec![3, 2, 1]; + let status = TransactionStatusMeta { + status: solana_sdk::transaction::Result::<()>::Ok(()), + fee: 42u64, + pre_balances: pre_balances_vec, + post_balances: post_balances_vec, + inner_instructions: Some(vec![]), + log_messages: Some(vec![]), + pre_token_balances: Some(vec![]), + post_token_balances: Some(vec![]), + rewards: Some(vec![]), + loaded_addresses: LoadedAddresses::default(), + return_data: Some(TransactionReturnData::default()), + compute_units_consumed: Some(42u64), + } + .into(); + + let signature1 = Signature::from([1u8; 64]); + let signature2 = Signature::from([2u8; 64]); + let signature3 = Signature::from([3u8; 64]); + let signature4 = Signature::from([4u8; 64]); + let signature5 = Signature::from([5u8; 64]); + let signature6 = Signature::from([6u8; 64]); + + // Insert slots with fork + // 0 (root) + // / \ + // 1 | + // 2 (root) + // / | + // 3 | + // 4 (root) + // | + // 5 + let meta0 = SlotMeta::new(0, Some(0)); + blockstore.meta_cf.put(0, &meta0).unwrap(); + let meta1 = SlotMeta::new(1, Some(0)); + blockstore.meta_cf.put(1, &meta1).unwrap(); + let meta2 = SlotMeta::new(2, Some(0)); + blockstore.meta_cf.put(2, &meta2).unwrap(); + let meta3 = SlotMeta::new(3, Some(2)); + blockstore.meta_cf.put(3, &meta3).unwrap(); + let meta4 = SlotMeta::new(4, Some(2)); + blockstore.meta_cf.put(4, &meta4).unwrap(); + let meta5 = SlotMeta::new(5, Some(4)); + blockstore.meta_cf.put(5, &meta5).unwrap(); + + blockstore.set_roots([0, 2, 4].iter()).unwrap(); + + // Initialize statuses: + // signature1 in skipped slot and root (2), both index 1 + // signature2 in skipped slot and root (4), both index 0 + // signature3 in root + // signature4 in non-root, + // signature5 extra entries + transaction_status_cf + .put_deprecated_protobuf((1, signature1, 1), &status) + .unwrap(); + + transaction_status_cf + .put_deprecated_protobuf((1, signature1, 2), &status) + .unwrap(); + + transaction_status_cf + .put_deprecated_protobuf((0, signature2, 3), &status) + .unwrap(); + + transaction_status_cf + .put_deprecated_protobuf((0, signature2, 4), &status) + .unwrap(); + + transaction_status_cf + .put_protobuf((signature3, 4), &status) + .unwrap(); + + transaction_status_cf + .put_protobuf((signature4, 5), &status) + .unwrap(); + + transaction_status_cf + .put_protobuf((signature5, 5), &status) + .unwrap(); + + // Signature exists, root found in index 1 + if let (Some((slot, _status)), counter) = blockstore + .get_transaction_status_with_counter(signature1, &[].into()) + .unwrap() + { + assert_eq!(slot, 2); + assert_eq!(counter, 4); + } + + // Signature exists, root found in index 0 + if let (Some((slot, _status)), counter) = blockstore + .get_transaction_status_with_counter(signature2, &[].into()) + .unwrap() + { + assert_eq!(slot, 4); + assert_eq!(counter, 3); + } + + // Signature exists + if let (Some((slot, _status)), counter) = blockstore + .get_transaction_status_with_counter(signature3, &[].into()) + .unwrap() + { + assert_eq!(slot, 4); + assert_eq!(counter, 1); + } + + // Signature does not exist + let (status, counter) = blockstore + .get_transaction_status_with_counter(signature6, &[].into()) + .unwrap(); + assert_eq!(status, None); assert_eq!(counter, 1); } @@ -7940,11 +7862,11 @@ pub mod tests { let lowest_available_slot = lowest_cleanup_slot + 1; transaction_status_cf - .put_protobuf((0, signature1, lowest_cleanup_slot), &status) + .put_protobuf((signature1, lowest_cleanup_slot), &status) .unwrap(); transaction_status_cf - .put_protobuf((0, signature2, lowest_available_slot), &status) + .put_protobuf((signature2, lowest_available_slot), &status) .unwrap(); let address0 = solana_sdk::pubkey::new_rand(); @@ -7956,6 +7878,7 @@ pub mod tests { vec![&address0], vec![], TransactionStatusMeta::default(), + 0, ) .unwrap(); blockstore @@ -7965,6 +7888,7 @@ pub mod tests { vec![&address1], vec![], TransactionStatusMeta::default(), + 0, ) .unwrap(); @@ -7979,10 +7903,6 @@ pub mod tests { .find_address_signatures_for_slot(address0, lowest_cleanup_slot) .unwrap() .is_empty(), - blockstore - .find_address_signatures(address0, lowest_cleanup_slot, lowest_cleanup_slot) - .unwrap() - .is_empty(), ) }; @@ -7997,17 +7917,13 @@ pub mod tests { .find_address_signatures_for_slot(address1, lowest_available_slot) .unwrap() .is_empty(), - !blockstore - .find_address_signatures(address1, lowest_available_slot, lowest_available_slot) - .unwrap() - .is_empty(), ); - assert_eq!(are_existing_always, (true, true, true)); + assert_eq!(are_existing_always, (true, true)); }; let are_missing = check_for_missing(); // should never be missing before the conditional compaction & simulation... - assert_eq!(are_missing, (false, false, false)); + assert_eq!(are_missing, (false, false)); assert_existing_always(); if simulate_ledger_cleanup_service { @@ -8019,10 +7935,10 @@ pub mod tests { if simulate_ledger_cleanup_service { // ... when either simulation (or both) is effective, we should observe to be missing // consistently - assert_eq!(are_missing, (true, true, true)); + assert_eq!(are_missing, (true, true)); } else { // ... otherwise, we should observe to be existing... - assert_eq!(are_missing, (false, false, false)); + assert_eq!(are_missing, (false, false)); } assert_existing_always(); } @@ -8099,7 +8015,7 @@ pub mod tests { .into(); blockstore .transaction_status_cf - .put_protobuf((0, signature, slot), &status) + .put_protobuf((signature, slot), &status) .unwrap(); VersionedTransactionWithStatusMeta { transaction, @@ -8143,11 +8059,13 @@ pub mod tests { ); } - blockstore.run_purge(0, 2, PurgeType::PrimaryIndex).unwrap(); + blockstore + .run_purge(0, slot, PurgeType::CompactionFilter) + .unwrap(); *blockstore.lowest_cleanup_slot.write().unwrap() = slot; for VersionedTransactionWithStatusMeta { transaction, .. } in expected_transactions { let signature = transaction.signatures[0]; - assert_eq!(blockstore.get_rooted_transaction(signature).unwrap(), None,); + assert_eq!(blockstore.get_rooted_transaction(signature).unwrap(), None); assert_eq!( blockstore .get_complete_transaction(signature, slot + 1) @@ -8219,7 +8137,7 @@ pub mod tests { .into(); blockstore .transaction_status_cf - .put_protobuf((0, signature, slot), &status) + .put_protobuf((signature, slot), &status) .unwrap(); VersionedTransactionWithStatusMeta { transaction, @@ -8256,7 +8174,9 @@ pub mod tests { assert_eq!(blockstore.get_rooted_transaction(signature).unwrap(), None); } - blockstore.run_purge(0, 2, PurgeType::PrimaryIndex).unwrap(); + blockstore + .run_purge(0, slot, PurgeType::CompactionFilter) + .unwrap(); *blockstore.lowest_cleanup_slot.write().unwrap() = slot; for VersionedTransactionWithStatusMeta { transaction, .. } in expected_transactions { let signature = transaction.signatures[0]; @@ -8284,138 +8204,32 @@ pub mod tests { ); } - #[test] - fn test_get_confirmed_signatures_for_address() { - let ledger_path = get_tmp_ledger_path_auto_delete!(); - let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - - let address0 = solana_sdk::pubkey::new_rand(); - let address1 = solana_sdk::pubkey::new_rand(); - - let slot0 = 10; - for x in 1..5 { - let signature = Signature::from([x; 64]); - blockstore - .write_transaction_status( - slot0, - signature, - vec![&address0], - vec![&address1], - TransactionStatusMeta::default(), - ) - .unwrap(); - } - let slot1 = 20; - for x in 5..9 { - let signature = Signature::from([x; 64]); - blockstore - .write_transaction_status( - slot1, - signature, - vec![&address0], - vec![&address1], - TransactionStatusMeta::default(), - ) - .unwrap(); - } - blockstore.set_roots([slot0, slot1].iter()).unwrap(); - - let all0 = blockstore - .get_confirmed_signatures_for_address(address0, 0, 50) - .unwrap(); - assert_eq!(all0.len(), 8); - for x in 1..9 { - let expected_signature = Signature::from([x; 64]); - assert_eq!(all0[x as usize - 1], expected_signature); - } - assert_eq!( - blockstore - .get_confirmed_signatures_for_address(address0, 20, 50) - .unwrap() - .len(), - 4 - ); - assert_eq!( - blockstore - .get_confirmed_signatures_for_address(address0, 0, 10) - .unwrap() - .len(), - 4 - ); - assert!(blockstore - .get_confirmed_signatures_for_address(address0, 1, 5) - .unwrap() - .is_empty()); - assert_eq!( - blockstore - .get_confirmed_signatures_for_address(address0, 1, 15) - .unwrap() - .len(), - 4 - ); - - let all1 = blockstore - .get_confirmed_signatures_for_address(address1, 0, 50) - .unwrap(); - assert_eq!(all1.len(), 8); - for x in 1..9 { - let expected_signature = Signature::from([x; 64]); - assert_eq!(all1[x as usize - 1], expected_signature); - } - - // Purge index 0 - blockstore - .run_purge(0, 10, PurgeType::PrimaryIndex) - .unwrap(); - assert_eq!( - blockstore - .get_confirmed_signatures_for_address(address0, 0, 50) - .unwrap() - .len(), - 4 - ); - assert_eq!( - blockstore - .get_confirmed_signatures_for_address(address0, 20, 50) - .unwrap() - .len(), - 4 - ); - assert!(blockstore - .get_confirmed_signatures_for_address(address0, 0, 10) - .unwrap() - .is_empty()); - assert!(blockstore - .get_confirmed_signatures_for_address(address0, 1, 5) - .unwrap() - .is_empty()); - assert_eq!( - blockstore - .get_confirmed_signatures_for_address(address0, 1, 25) - .unwrap() - .len(), - 4 - ); - - // Test sort, regardless of entry order or signature value - for slot in (21..25).rev() { - let random_bytes: [u8; 64] = std::array::from_fn(|_| rand::random::()); - let signature = Signature::from(random_bytes); - blockstore - .write_transaction_status( - slot, - signature, - vec![&address0], - vec![&address1], - TransactionStatusMeta::default(), - ) - .unwrap(); - } - blockstore.set_roots([21, 22, 23, 24].iter()).unwrap(); - let mut past_slot = 0; - for (slot, _) in blockstore.find_address_signatures(address0, 1, 25).unwrap() { - assert!(slot >= past_slot); - past_slot = slot; + impl Blockstore { + pub(crate) fn write_deprecated_transaction_status( + &self, + primary_index: u64, + slot: Slot, + signature: Signature, + writable_keys: Vec<&Pubkey>, + readonly_keys: Vec<&Pubkey>, + status: TransactionStatusMeta, + ) -> Result<()> { + let status = status.into(); + self.transaction_status_cf + .put_deprecated_protobuf((primary_index, signature, slot), &status)?; + for address in writable_keys { + self.address_signatures_cf.put_deprecated( + (primary_index, *address, slot, signature), + &AddressSignatureMeta { writeable: true }, + )?; + } + for address in readonly_keys { + self.address_signatures_cf.put_deprecated( + (primary_index, *address, slot, signature), + &AddressSignatureMeta { writeable: false }, + )?; + } + Ok(()) } } @@ -8437,6 +8251,7 @@ pub mod tests { vec![&address0], vec![&address1], TransactionStatusMeta::default(), + x as usize, ) .unwrap(); } @@ -8450,6 +8265,7 @@ pub mod tests { vec![&address0], vec![&address1], TransactionStatusMeta::default(), + x as usize, ) .unwrap(); } @@ -8462,6 +8278,7 @@ pub mod tests { vec![&address0], vec![&address1], TransactionStatusMeta::default(), + x as usize, ) .unwrap(); } @@ -8475,6 +8292,7 @@ pub mod tests { vec![&address0], vec![&address1], TransactionStatusMeta::default(), + x as usize, ) .unwrap(); } @@ -8547,6 +8365,7 @@ pub mod tests { ); blockstore.insert_shreds(shreds, None, false).unwrap(); + let mut counter = 0; for entry in entries.into_iter() { for transaction in entry.transactions { assert_eq!(transaction.signatures.len(), 1); @@ -8557,8 +8376,10 @@ pub mod tests { transaction.message.static_account_keys().iter().collect(), vec![], TransactionStatusMeta::default(), + counter, ) .unwrap(); + counter += 1; } } } @@ -8572,6 +8393,7 @@ pub mod tests { entries_to_test_shreds(&entries, slot, 8, true, 0, /*merkle_variant:*/ true); blockstore.insert_shreds(shreds, None, false).unwrap(); + let mut counter = 0; for entry in entries.into_iter() { for transaction in entry.transactions { assert_eq!(transaction.signatures.len(), 1); @@ -8582,8 +8404,10 @@ pub mod tests { transaction.message.static_account_keys().iter().collect(), vec![], TransactionStatusMeta::default(), + counter, ) .unwrap(); + counter += 1; } } } @@ -8710,8 +8534,7 @@ pub mod tests { assert_eq!(results[2], all0[i + 2]); } - // Ensure that the signatures within a slot are reverse ordered by signature - // (current limitation of the .get_confirmed_signatures_for_address2()) + // Ensure that the signatures within a slot are reverse ordered by occurrence in block for i in (0..all1.len()).step_by(2) { let results = blockstore .get_confirmed_signatures_for_address2( @@ -8729,7 +8552,6 @@ pub mod tests { .infos; assert_eq!(results.len(), 2); assert_eq!(results[0].slot, results[1].slot); - assert!(results[0].signature >= results[1].signature); assert_eq!(results[0], all1[i]); assert_eq!(results[1], all1[i + 1]); } @@ -8885,8 +8707,7 @@ pub mod tests { assert_eq!(results[1], all0[i + 1]); } - // Ensure that the signatures within a slot are reverse ordered by signature - // (current limitation of the .get_confirmed_signatures_for_address2()) + // Ensure that the signatures within a slot are reverse ordered by occurrence in block for i in (0..all1.len()).step_by(2) { let results = blockstore .get_confirmed_signatures_for_address2( @@ -8904,7 +8725,6 @@ pub mod tests { .infos; assert_eq!(results.len(), 2); assert_eq!(results[0].slot, results[1].slot); - assert!(results[0].signature >= results[1].signature); assert_eq!(results[0], all1[i]); assert_eq!(results[1], all1[i + 1]); } @@ -8939,7 +8759,7 @@ pub mod tests { // Remove signature blockstore .address_signatures_cf - .delete((0, address0, 2, all0[0].signature)) + .delete((address0, 2, 0, all0[0].signature)) .unwrap(); let sig_infos = blockstore .get_confirmed_signatures_for_address2( @@ -9005,7 +8825,7 @@ pub mod tests { } .into(); transaction_status_cf - .put_protobuf((0, transaction.signatures[0], slot), &status) + .put_protobuf((transaction.signatures[0], slot), &status) .unwrap(); transactions.push(transaction.into()); } @@ -9698,6 +9518,10 @@ pub mod tests { } } + // This test is probably superfluous, since it is highly unlikely that bincode-format + // TransactionStatus entries exist in any current ledger. They certainly exist in historical + // ledger archives, but typically those require contemporaraneous software for other reasons. + // However, we are persisting the test since the apis still exist in `blockstore_db`. #[test] fn test_transaction_status_protobuf_backward_compatability() { let ledger_path = get_tmp_ledger_path_auto_delete!(); @@ -9755,13 +9579,13 @@ pub mod tests { let data = serialize(&deprecated_status).unwrap(); blockstore .transaction_status_cf - .put_bytes((0, Signature::default(), slot), &data) + .put_bytes((Signature::default(), slot), &data) .unwrap(); } for slot in 2..4 { blockstore .transaction_status_cf - .put_protobuf((0, Signature::default(), slot), &protobuf_status) + .put_protobuf((Signature::default(), slot), &protobuf_status) .unwrap(); } for slot in 0..4 { @@ -9769,7 +9593,6 @@ pub mod tests { blockstore .transaction_status_cf .get_protobuf_or_bincode::(( - 0, Signature::default(), slot )) diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index f7e8aab3db3ad7..5c1644aaa032a0 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -1,4 +1,7 @@ -use {super::*, solana_sdk::message::AccountKeys, std::time::Instant}; +use { + super::*, crate::blockstore_db::ColumnIndexDeprecation, solana_sdk::message::AccountKeys, + std::time::Instant, +}; #[derive(Default)] pub struct PurgeStats { @@ -391,18 +394,28 @@ impl Blockstore { for slot in from_slot..=to_slot { let primary_indexes = slot_indexes(slot); - if primary_indexes.is_empty() { - continue; - } let slot_entries = self.get_any_valid_slot_entries(slot, 0); let transactions = slot_entries .into_iter() .flat_map(|entry| entry.transactions); - for transaction in transactions { + for (i, transaction) in transactions.enumerate() { if let Some(&signature) = transaction.signatures.get(0) { + batch.delete::((signature, slot))?; + batch.delete::((signature, slot))?; + if !primary_indexes.is_empty() { + batch.delete_raw::( + &cf::TransactionMemos::deprecated_key(signature), + )?; + } for primary_index in &primary_indexes { - batch.delete::((*primary_index, signature, slot))?; + batch.delete_raw::( + &cf::TransactionStatus::deprecated_key(( + *primary_index, + signature, + slot, + )), + )?; } let meta = self.read_transaction_status((signature, slot))?; @@ -412,14 +425,24 @@ impl Blockstore { loaded_addresses.as_ref(), ); + let transaction_index = + u32::try_from(i).map_err(|_| BlockstoreError::TransactionIndexOverflow)?; for pubkey in account_keys.iter() { + batch.delete::(( + *pubkey, + slot, + transaction_index, + signature, + ))?; for primary_index in &primary_indexes { - batch.delete::(( - *primary_index, - *pubkey, - slot, - signature, - ))?; + batch.delete_raw::( + &cf::AddressSignatures::deprecated_key(( + *primary_index, + *pubkey, + slot, + signature, + )), + )?; } } } @@ -482,6 +505,7 @@ pub mod tests { message::Message, transaction::Transaction, }, + test_case::test_case, }; #[test] @@ -525,226 +549,39 @@ pub mod tests { vec![&Pubkey::try_from(&random_bytes[..32]).unwrap()], vec![&Pubkey::try_from(&random_bytes[32..]).unwrap()], TransactionStatusMeta::default(), - ) - .unwrap(); - } - // Purge to freeze index 0 - blockstore.run_purge(0, 1, PurgeType::PrimaryIndex).unwrap(); - - for x in max_slot..2 * max_slot { - let random_bytes: [u8; 64] = std::array::from_fn(|_| rand::random::()); - blockstore - .write_transaction_status( - x, - Signature::from(random_bytes), - vec![&Pubkey::try_from(&random_bytes[..32]).unwrap()], - vec![&Pubkey::try_from(&random_bytes[32..]).unwrap()], - TransactionStatusMeta::default(), + 0, ) .unwrap(); } // Purging range outside of TransactionStatus max slots should not affect TransactionStatus data - blockstore.run_purge(20, 30, PurgeType::Exact).unwrap(); + blockstore.run_purge(10, 20, PurgeType::Exact).unwrap(); - let mut status_entry_iterator = blockstore + let status_entries: Vec<_> = blockstore .db - .iter::(IteratorMode::From( - cf::TransactionStatus::as_index(0), - IteratorDirection::Forward, - )) - .unwrap(); - let entry = status_entry_iterator.next().unwrap().0; - assert_eq!(entry.0, 0); + .iter::(IteratorMode::Start) + .unwrap() + .collect(); + assert_eq!(status_entries.len(), 10); } - #[test] - #[allow(clippy::cognitive_complexity)] - fn test_purge_transaction_status() { - let ledger_path = get_tmp_ledger_path_auto_delete!(); - let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - - let transaction_status_index_cf = &blockstore.transaction_status_index_cf; - let slot = 10; - for _ in 0..5 { - let random_bytes: [u8; 64] = std::array::from_fn(|_| rand::random::()); - blockstore - .write_transaction_status( - slot, - Signature::from(random_bytes), - vec![&Pubkey::try_from(&random_bytes[..32]).unwrap()], - vec![&Pubkey::try_from(&random_bytes[32..]).unwrap()], - TransactionStatusMeta::default(), - ) - .unwrap(); - } - // Purge to freeze index 0 - blockstore.run_purge(0, 1, PurgeType::PrimaryIndex).unwrap(); - let mut status_entry_iterator = blockstore - .db - .iter::(IteratorMode::From( - cf::TransactionStatus::as_index(0), - IteratorDirection::Forward, - )) - .unwrap(); - for _ in 0..5 { - let entry = status_entry_iterator.next().unwrap().0; - assert_eq!(entry.0, 0); - assert_eq!(entry.2, slot); - } - let mut address_transactions_iterator = blockstore - .db - .iter::(IteratorMode::From( - (0, Pubkey::default(), 0, Signature::default()), - IteratorDirection::Forward, - )) - .unwrap(); - for _ in 0..10 { - let entry = address_transactions_iterator.next().unwrap().0; - assert_eq!(entry.0, 0); - assert_eq!(entry.2, slot); - } - assert_eq!( - transaction_status_index_cf.get(0).unwrap().unwrap(), - TransactionStatusIndexMeta { - max_slot: 10, - frozen: true, - } - ); - drop(status_entry_iterator); - drop(address_transactions_iterator); - - // Low purge should not affect state - blockstore.run_purge(0, 5, PurgeType::PrimaryIndex).unwrap(); - let mut status_entry_iterator = blockstore - .db - .iter::(IteratorMode::From( - cf::TransactionStatus::as_index(0), - IteratorDirection::Forward, - )) - .unwrap(); - for _ in 0..5 { - let entry = status_entry_iterator.next().unwrap().0; - assert_eq!(entry.0, 0); - assert_eq!(entry.2, slot); - } - let mut address_transactions_iterator = blockstore - .db - .iter::(IteratorMode::From( - cf::AddressSignatures::as_index(0), - IteratorDirection::Forward, - )) - .unwrap(); - for _ in 0..10 { - let entry = address_transactions_iterator.next().unwrap().0; - assert_eq!(entry.0, 0); - assert_eq!(entry.2, slot); - } - assert_eq!( - transaction_status_index_cf.get(0).unwrap().unwrap(), - TransactionStatusIndexMeta { - max_slot: 10, - frozen: true, - } - ); - drop(status_entry_iterator); - drop(address_transactions_iterator); - - // Test boundary conditions: < slot should not purge statuses; <= slot should - blockstore.run_purge(0, 9, PurgeType::PrimaryIndex).unwrap(); - let mut status_entry_iterator = blockstore - .db - .iter::(IteratorMode::From( - cf::TransactionStatus::as_index(0), - IteratorDirection::Forward, - )) - .unwrap(); - for _ in 0..5 { - let entry = status_entry_iterator.next().unwrap().0; - assert_eq!(entry.0, 0); - assert_eq!(entry.2, slot); - } - let mut address_transactions_iterator = blockstore - .db - .iter::(IteratorMode::From( - cf::AddressSignatures::as_index(0), - IteratorDirection::Forward, - )) - .unwrap(); - for _ in 0..10 { - let entry = address_transactions_iterator.next().unwrap().0; - assert_eq!(entry.0, 0); - assert_eq!(entry.2, slot); - } - assert_eq!( - transaction_status_index_cf.get(0).unwrap().unwrap(), - TransactionStatusIndexMeta { - max_slot: 10, - frozen: true, - } - ); - drop(status_entry_iterator); - drop(address_transactions_iterator); - - blockstore - .run_purge(0, 10, PurgeType::PrimaryIndex) - .unwrap(); - let mut status_entry_iterator = blockstore - .db - .iter::(IteratorMode::From( - cf::TransactionStatus::as_index(0), - IteratorDirection::Forward, - )) - .unwrap(); - assert!(status_entry_iterator.next().is_none()); - let mut address_transactions_iterator = blockstore + fn clear_and_repopulate_transaction_statuses_for_test(blockstore: &Blockstore, max_slot: u64) { + blockstore.run_purge(0, max_slot, PurgeType::Exact).unwrap(); + let mut iter = blockstore .db - .iter::(IteratorMode::From( - cf::AddressSignatures::as_index(0), - IteratorDirection::Forward, - )) + .iter::(IteratorMode::Start) .unwrap(); - assert!(address_transactions_iterator.next().is_none()); + assert_eq!(iter.next(), None); - assert_eq!( - transaction_status_index_cf.get(0).unwrap().unwrap(), - TransactionStatusIndexMeta { - max_slot: 0, - frozen: false, - } - ); - assert_eq!( - transaction_status_index_cf.get(1).unwrap().unwrap(), - TransactionStatusIndexMeta { - max_slot: 0, - frozen: true, - } - ); + populate_transaction_statuses_for_test(blockstore, 0, max_slot); } - fn clear_and_repopulate_transaction_statuses_for_test( + fn populate_transaction_statuses_for_test( blockstore: &Blockstore, - index0_max_slot: u64, - index1_max_slot: u64, + min_slot: u64, + max_slot: u64, ) { - assert!(index1_max_slot > index0_max_slot); - let mut write_batch = blockstore.db.batch().unwrap(); - blockstore - .run_purge(0, index1_max_slot, PurgeType::PrimaryIndex) - .unwrap(); - blockstore - .db - .delete_range_cf::(&mut write_batch, 0, 2) - .unwrap(); - blockstore - .db - .delete_range_cf::(&mut write_batch, 0, 2) - .unwrap(); - blockstore.db.write(write_batch).unwrap(); - blockstore.initialize_transaction_status_index().unwrap(); - *blockstore.active_transaction_status_index.write().unwrap() = 0; - - for x in 0..index0_max_slot { + for x in min_slot..=max_slot { let entries = make_slot_entries_with_transactions(1); let shreds = entries_to_test_shreds( &entries, @@ -770,67 +607,19 @@ pub mod tests { vec![&Pubkey::try_from(&random_bytes[..32]).unwrap()], vec![&Pubkey::try_from(&random_bytes[32..]).unwrap()], TransactionStatusMeta::default(), + 0, ) .unwrap(); } + } - // Add slot that crosses primary indexes - let entries = make_slot_entries_with_transactions(2); - let shreds = entries_to_test_shreds( - &entries, - index0_max_slot, // slot - index0_max_slot.saturating_sub(1), // parent_slot - true, // is_full_slot - 0, // version - true, // merkle_variant - ); - blockstore.insert_shreds(shreds, None, false).unwrap(); - let signatures = entries - .iter() - .filter(|entry| !entry.is_tick()) - .cloned() - .flat_map(|entry| entry.transactions) - .map(|transaction| transaction.signatures[0]) - .collect::>(); - let random_bytes: Vec = (0..64).map(|_| rand::random::()).collect(); - blockstore - .write_transaction_status( - index0_max_slot, - signatures[0], - vec![&Pubkey::try_from(&random_bytes[..32]).unwrap()], - vec![&Pubkey::try_from(&random_bytes[32..]).unwrap()], - TransactionStatusMeta::default(), - ) - .unwrap(); - - // Freeze index 0 - let mut write_batch = blockstore.db.batch().unwrap(); - let mut w_active_transaction_status_index = - blockstore.active_transaction_status_index.write().unwrap(); - blockstore - .toggle_transaction_status_index( - &mut write_batch, - &mut w_active_transaction_status_index, - index0_max_slot + 1, - ) - .unwrap(); - drop(w_active_transaction_status_index); - blockstore.db.write(write_batch).unwrap(); - - let random_bytes: Vec = (0..64).map(|_| rand::random::()).collect(); - blockstore - .write_transaction_status( - index0_max_slot, - signatures[1], - vec![&Pubkey::try_from(&random_bytes[..32]).unwrap()], - vec![&Pubkey::try_from(&random_bytes[32..]).unwrap()], - TransactionStatusMeta::default(), - ) - .unwrap(); - - // Note: index0_max_slot exists in both indexes - - for x in index0_max_slot + 1..index1_max_slot + 1 { + fn populate_deprecated_transaction_statuses_for_test( + blockstore: &Blockstore, + primary_index: u64, + min_slot: u64, + max_slot: u64, + ) { + for x in min_slot..=max_slot { let entries = make_slot_entries_with_transactions(1); let shreds = entries_to_test_shreds( &entries, @@ -841,7 +630,7 @@ pub mod tests { true, // merkle_variant ); blockstore.insert_shreds(shreds, None, false).unwrap(); - let signature: Signature = entries + let signature = entries .iter() .filter(|entry| !entry.is_tick()) .cloned() @@ -850,7 +639,8 @@ pub mod tests { .collect::>()[0]; let random_bytes: Vec = (0..64).map(|_| rand::random::()).collect(); blockstore - .write_transaction_status( + .write_deprecated_transaction_status( + primary_index, x, signature, vec![&Pubkey::try_from(&random_bytes[..32]).unwrap()], @@ -859,28 +649,6 @@ pub mod tests { ) .unwrap(); } - assert_eq!( - blockstore - .transaction_status_index_cf - .get(0) - .unwrap() - .unwrap(), - TransactionStatusIndexMeta { - max_slot: index0_max_slot, - frozen: true, - } - ); - assert_eq!( - blockstore - .transaction_status_index_cf - .get(1) - .unwrap() - .unwrap(), - TransactionStatusIndexMeta { - max_slot: index1_max_slot, - frozen: false, - } - ); } #[test] @@ -914,6 +682,7 @@ pub mod tests { transaction.message.static_account_keys().iter().collect(), vec![], TransactionStatusMeta::default(), + 0, ) .unwrap(); } @@ -937,359 +706,216 @@ pub mod tests { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - let index0_max_slot = 9; - let index1_max_slot = 19; + let max_slot = 9; // Test purge outside bounds - clear_and_repopulate_transaction_statuses_for_test( - &blockstore, - index0_max_slot, - index1_max_slot, - ); - blockstore.run_purge(20, 22, PurgeType::Exact).unwrap(); + clear_and_repopulate_transaction_statuses_for_test(&blockstore, max_slot); + blockstore.run_purge(10, 12, PurgeType::Exact).unwrap(); let mut status_entry_iterator = blockstore .db - .iter::(IteratorMode::From( - cf::TransactionStatus::as_index(0), - IteratorDirection::Forward, - )) + .iter::(IteratorMode::Start) .unwrap(); - assert_eq!( - blockstore - .transaction_status_index_cf - .get(0) - .unwrap() - .unwrap(), - TransactionStatusIndexMeta { - max_slot: index0_max_slot, - frozen: true, - } - ); - for _ in 0..index0_max_slot + 1 { + for _ in 0..max_slot + 1 { let entry = status_entry_iterator.next().unwrap().0; - assert_eq!(entry.0, 0); - } - assert_eq!( - blockstore - .transaction_status_index_cf - .get(1) - .unwrap() - .unwrap(), - TransactionStatusIndexMeta { - max_slot: index1_max_slot, - frozen: false, - } - ); - for _ in index0_max_slot + 1..index1_max_slot + 1 { - let entry = status_entry_iterator.next().unwrap().0; - assert_eq!(entry.0, 1); + assert!(entry.1 <= max_slot || entry.1 > 0); } + assert_eq!(status_entry_iterator.next(), None); drop(status_entry_iterator); - // Test purge inside index 0 - clear_and_repopulate_transaction_statuses_for_test( - &blockstore, - index0_max_slot, - index1_max_slot, - ); + // Test purge inside written range + clear_and_repopulate_transaction_statuses_for_test(&blockstore, max_slot); blockstore.run_purge(2, 4, PurgeType::Exact).unwrap(); let mut status_entry_iterator = blockstore .db - .iter::(IteratorMode::From( - cf::TransactionStatus::as_index(0), - IteratorDirection::Forward, - )) + .iter::(IteratorMode::Start) .unwrap(); - assert_eq!( - blockstore - .transaction_status_index_cf - .get(0) - .unwrap() - .unwrap(), - TransactionStatusIndexMeta { - max_slot: index0_max_slot, - frozen: true, - } - ); for _ in 0..7 { // 7 entries remaining let entry = status_entry_iterator.next().unwrap().0; - assert_eq!(entry.0, 0); - assert!(entry.2 < 2 || entry.2 > 4); - } - assert_eq!( - blockstore - .transaction_status_index_cf - .get(1) - .unwrap() - .unwrap(), - TransactionStatusIndexMeta { - max_slot: index1_max_slot, - frozen: false, - } - ); - for _ in index0_max_slot + 1..index1_max_slot + 1 { - let entry = status_entry_iterator.next().unwrap().0; - assert_eq!(entry.0, 1); + assert!(entry.1 < 2 || entry.1 > 4); } + assert_eq!(status_entry_iterator.next(), None); drop(status_entry_iterator); - // Test purge inside index 0 at upper boundary - clear_and_repopulate_transaction_statuses_for_test( - &blockstore, - index0_max_slot, - index1_max_slot, - ); + // Purge up to but not including max_slot + clear_and_repopulate_transaction_statuses_for_test(&blockstore, max_slot); blockstore - .run_purge(7, index0_max_slot, PurgeType::Exact) + .run_purge(0, max_slot - 1, PurgeType::Exact) .unwrap(); let mut status_entry_iterator = blockstore .db - .iter::(IteratorMode::From( - cf::TransactionStatus::as_index(0), - IteratorDirection::Forward, - )) + .iter::(IteratorMode::Start) .unwrap(); - assert_eq!( - blockstore - .transaction_status_index_cf - .get(0) - .unwrap() - .unwrap(), - TransactionStatusIndexMeta { - max_slot: 6, - frozen: true, - } - ); - for _ in 0..7 { - // 7 entries remaining - let entry = status_entry_iterator.next().unwrap().0; - assert_eq!(entry.0, 0); - assert!(entry.2 < 7); - } - assert_eq!( - blockstore - .transaction_status_index_cf - .get(1) - .unwrap() - .unwrap(), - TransactionStatusIndexMeta { - max_slot: index1_max_slot, - frozen: false, - } - ); - for _ in index0_max_slot + 1..index1_max_slot + 1 { - let entry = status_entry_iterator.next().unwrap().0; - assert_eq!(entry.0, 1); - } + let entry = status_entry_iterator.next().unwrap().0; + assert_eq!(entry.1, 9); + assert_eq!(status_entry_iterator.next(), None); drop(status_entry_iterator); - // Test purge inside index 1 at lower boundary - clear_and_repopulate_transaction_statuses_for_test( - &blockstore, - index0_max_slot, - index1_max_slot, - ); - blockstore.run_purge(10, 12, PurgeType::Exact).unwrap(); + // Test purge all + clear_and_repopulate_transaction_statuses_for_test(&blockstore, max_slot); + blockstore.run_purge(0, 22, PurgeType::Exact).unwrap(); let mut status_entry_iterator = blockstore .db - .iter::(IteratorMode::From( - cf::TransactionStatus::as_index(0), - IteratorDirection::Forward, - )) + .iter::(IteratorMode::Start) .unwrap(); - assert_eq!( - blockstore - .transaction_status_index_cf - .get(0) - .unwrap() - .unwrap(), - TransactionStatusIndexMeta { - max_slot: index0_max_slot, - frozen: true, - } - ); - for _ in 0..index0_max_slot + 1 { - let entry = status_entry_iterator.next().unwrap().0; - assert_eq!(entry.0, 0); - } - assert_eq!( - blockstore - .transaction_status_index_cf - .get(1) - .unwrap() - .unwrap(), - TransactionStatusIndexMeta { - max_slot: index1_max_slot, - frozen: false, - } - ); - for _ in 13..index1_max_slot + 1 { - let entry = status_entry_iterator.next().unwrap().0; - assert_eq!(entry.0, 1); - assert!(entry.2 == index0_max_slot || entry.2 > 12); - } - drop(status_entry_iterator); + assert_eq!(status_entry_iterator.next(), None); + } - // Test purge across index boundaries - clear_and_repopulate_transaction_statuses_for_test( - &blockstore, - index0_max_slot, - index1_max_slot, - ); - blockstore.run_purge(7, 12, PurgeType::Exact).unwrap(); + fn get_index_bounds(blockstore: &Blockstore) -> (Box<[u8]>, Box<[u8]>) { + let first_index = { + let mut status_entry_iterator = blockstore + .transaction_status_cf + .iterator_cf_raw_key(IteratorMode::Start); + status_entry_iterator.next().unwrap().unwrap().0 + }; + let last_index = { + let mut status_entry_iterator = blockstore + .transaction_status_cf + .iterator_cf_raw_key(IteratorMode::End); + status_entry_iterator.next().unwrap().unwrap().0 + }; + (first_index, last_index) + } - let mut status_entry_iterator = blockstore + fn purge_exact(blockstore: &Blockstore, oldest_slot: Slot) { + blockstore + .run_purge(0, oldest_slot - 1, PurgeType::Exact) + .unwrap(); + } + + fn purge_compaction_filter(blockstore: &Blockstore, oldest_slot: Slot) { + let (first_index, last_index) = get_index_bounds(blockstore); + blockstore.db.set_oldest_slot(oldest_slot); + blockstore .db - .iter::(IteratorMode::From( - cf::TransactionStatus::as_index(0), - IteratorDirection::Forward, - )) + .compact_range_cf::(&first_index, &last_index); + } + + #[test_case(purge_exact; "exact")] + #[test_case(purge_compaction_filter; "compaction_filter")] + fn test_purge_special_columns_with_old_data(purge: impl Fn(&Blockstore, Slot)) { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + populate_deprecated_transaction_statuses_for_test(&blockstore, 0, 0, 4); + populate_deprecated_transaction_statuses_for_test(&blockstore, 1, 5, 9); + populate_transaction_statuses_for_test(&blockstore, 10, 14); + + let mut index0 = blockstore + .transaction_status_index_cf + .get(0) + .unwrap() .unwrap(); - assert_eq!( - blockstore - .transaction_status_index_cf - .get(0) - .unwrap() - .unwrap(), - TransactionStatusIndexMeta { - max_slot: 6, - frozen: true, - } - ); - for _ in 0..7 { - // 7 entries remaining - let entry = status_entry_iterator.next().unwrap().0; - assert_eq!(entry.0, 0); - assert!(entry.2 < 7); - } - assert_eq!( - blockstore - .transaction_status_index_cf - .get(1) - .unwrap() - .unwrap(), - TransactionStatusIndexMeta { - max_slot: index1_max_slot, - frozen: false, - } - ); - for _ in 13..index1_max_slot + 1 { - let entry = status_entry_iterator.next().unwrap().0; - assert_eq!(entry.0, 1); - assert!(entry.2 > 12); + index0.frozen = true; + index0.max_slot = 4; + blockstore + .transaction_status_index_cf + .put(0, &index0) + .unwrap(); + let mut index1 = blockstore + .transaction_status_index_cf + .get(1) + .unwrap() + .unwrap(); + index1.frozen = false; + index1.max_slot = 9; + blockstore + .transaction_status_index_cf + .put(1, &index1) + .unwrap(); + + let statuses: Vec<_> = blockstore + .transaction_status_cf + .iterator_cf_raw_key(IteratorMode::Start) + .collect(); + assert_eq!(statuses.len(), 15); + + // Delete some of primary-index 0 + let oldest_slot = 3; + purge(&blockstore, oldest_slot); + let status_entry_iterator = blockstore + .transaction_status_cf + .iterator_cf_raw_key(IteratorMode::Start); + let mut count = 0; + for entry in status_entry_iterator { + let (key, _value) = entry.unwrap(); + let (_signature, slot) = ::index(&key); + assert!(slot >= oldest_slot); + count += 1; } - drop(status_entry_iterator); + assert_eq!(count, 12); - // Test purge include complete index 1 - clear_and_repopulate_transaction_statuses_for_test( - &blockstore, - index0_max_slot, - index1_max_slot, - ); - blockstore.run_purge(7, 22, PurgeType::Exact).unwrap(); + // Delete the rest of primary-index 0 + let oldest_slot = 5; + purge(&blockstore, oldest_slot); + let status_entry_iterator = blockstore + .transaction_status_cf + .iterator_cf_raw_key(IteratorMode::Start); + let mut count = 0; + for entry in status_entry_iterator { + let (key, _value) = entry.unwrap(); + let (_signature, slot) = ::index(&key); + assert!(slot >= oldest_slot); + count += 1; + } + assert_eq!(count, 10); - let mut status_entry_iterator = blockstore - .db - .iter::(IteratorMode::From( - cf::TransactionStatus::as_index(0), - IteratorDirection::Forward, - )) - .unwrap(); - assert_eq!( - blockstore - .transaction_status_index_cf - .get(0) - .unwrap() - .unwrap(), - TransactionStatusIndexMeta { - max_slot: 6, - frozen: true, - } - ); - for _ in 0..7 { - // 7 entries remaining - let entry = status_entry_iterator.next().unwrap().0; - assert_eq!(entry.0, 0); - assert!(entry.2 < 7); + // Delete some of primary-index 1 + let oldest_slot = 8; + purge(&blockstore, oldest_slot); + let status_entry_iterator = blockstore + .transaction_status_cf + .iterator_cf_raw_key(IteratorMode::Start); + let mut count = 0; + for entry in status_entry_iterator { + let (key, _value) = entry.unwrap(); + let (_signature, slot) = ::index(&key); + assert!(slot >= oldest_slot); + count += 1; } - assert_eq!( - blockstore - .transaction_status_index_cf - .get(1) - .unwrap() - .unwrap(), - TransactionStatusIndexMeta { - max_slot: 6, - frozen: false, - } - ); - drop(status_entry_iterator); + assert_eq!(count, 7); - // Purge up to but not including index0_max_slot - clear_and_repopulate_transaction_statuses_for_test( - &blockstore, - index0_max_slot, - index1_max_slot, - ); - blockstore - .run_purge(0, index0_max_slot - 1, PurgeType::Exact) - .unwrap(); - assert_eq!( - blockstore - .transaction_status_index_cf - .get(0) - .unwrap() - .unwrap(), - TransactionStatusIndexMeta { - max_slot: index0_max_slot, - frozen: true, - } - ); + // Delete the rest of primary-index 1 + let oldest_slot = 10; + purge(&blockstore, oldest_slot); + let status_entry_iterator = blockstore + .transaction_status_cf + .iterator_cf_raw_key(IteratorMode::Start); + let mut count = 0; + for entry in status_entry_iterator { + let (key, _value) = entry.unwrap(); + let (_signature, slot) = ::index(&key); + assert!(slot >= oldest_slot); + count += 1; + } + assert_eq!(count, 5); - // Test purge all - clear_and_repopulate_transaction_statuses_for_test( - &blockstore, - index0_max_slot, - index1_max_slot, - ); - blockstore.run_purge(0, 22, PurgeType::Exact).unwrap(); + // Delete some of new-style entries + let oldest_slot = 13; + purge(&blockstore, oldest_slot); + let status_entry_iterator = blockstore + .transaction_status_cf + .iterator_cf_raw_key(IteratorMode::Start); + let mut count = 0; + for entry in status_entry_iterator { + let (key, _value) = entry.unwrap(); + let (_signature, slot) = ::index(&key); + assert!(slot >= oldest_slot); + count += 1; + } + assert_eq!(count, 2); + // Delete the rest of the new-style entries + let oldest_slot = 20; + purge(&blockstore, oldest_slot); let mut status_entry_iterator = blockstore - .db - .iter::(IteratorMode::From( - cf::TransactionStatus::as_index(0), - IteratorDirection::Forward, - )) - .unwrap(); + .transaction_status_cf + .iterator_cf_raw_key(IteratorMode::Start); assert!(status_entry_iterator.next().is_none()); - - assert_eq!( - blockstore - .transaction_status_index_cf - .get(0) - .unwrap() - .unwrap(), - TransactionStatusIndexMeta { - max_slot: 0, - frozen: true, - } - ); - assert_eq!( - blockstore - .transaction_status_index_cf - .get(1) - .unwrap() - .unwrap(), - TransactionStatusIndexMeta { - max_slot: 0, - frozen: false, - } - ); } #[test] @@ -1326,16 +952,9 @@ pub mod tests { fn test_purge_special_columns_compaction_filter() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - let index0_max_slot = 9; - let index1_max_slot = 19; - // includes slot 0, and slot 9 has 2 transactions - let num_total_transactions = index1_max_slot + 2; - - clear_and_repopulate_transaction_statuses_for_test( - &blockstore, - index0_max_slot, - index1_max_slot, - ); + let max_slot = 19; + + clear_and_repopulate_transaction_statuses_for_test(&blockstore, max_slot); let first_index = { let mut status_entry_iterator = blockstore .db @@ -1363,17 +982,13 @@ pub mod tests { .iter::(IteratorMode::Start) .unwrap(); let mut count = 0; - for ((_primary_index, _signature, slot), _value) in status_entry_iterator { + for ((_signature, slot), _value) in status_entry_iterator { assert!(slot >= oldest_slot); count += 1; } - assert_eq!(count, num_total_transactions - oldest_slot); + assert_eq!(count, max_slot - (oldest_slot - 1)); - clear_and_repopulate_transaction_statuses_for_test( - &blockstore, - index0_max_slot, - index1_max_slot, - ); + clear_and_repopulate_transaction_statuses_for_test(&blockstore, max_slot); let first_index = { let mut status_entry_iterator = blockstore .db @@ -1401,10 +1016,10 @@ pub mod tests { .iter::(IteratorMode::Start) .unwrap(); let mut count = 0; - for ((_primary_index, _signature, slot), _value) in status_entry_iterator { + for ((_signature, slot), _value) in status_entry_iterator { assert!(slot >= oldest_slot); count += 1; } - assert_eq!(count, num_total_transactions - oldest_slot - 1); // Extra transaction in slot 9 + assert_eq!(count, max_slot - (oldest_slot - 1)); } } diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index 3fd33fa12acea3..f9c87ce397d434 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -146,6 +146,8 @@ pub enum BlockstoreError { UnsupportedTransactionVersion, #[error("missing transaction metadata")] MissingTransactionMetadata, + #[error("transaction-index overflow")] + TransactionIndexOverflow, } pub type Result = std::result::Result; @@ -270,14 +272,14 @@ pub mod columns { #[derive(Debug)] /// The transaction status column /// - /// * index type: `(u64, `[`Signature`]`, `[`Slot`])` + /// * index type: `(`[`Signature`]`, `[`Slot`])` /// * value type: [`generated::TransactionStatusMeta`] pub struct TransactionStatus; #[derive(Debug)] /// The address signatures column /// - /// * index type: `(u64, `[`Pubkey`]`, `[`Slot`]`, `[`Signature`]`)` + /// * index type: `(`[`Pubkey`]`, `[`Slot`]`, u32, `[`Signature`]`)` /// * value type: [`blockstore_meta::AddressSignatureMeta`] pub struct AddressSignatures; @@ -614,6 +616,23 @@ impl Rocks { self.db.iterator_cf(cf, iterator_mode) } + fn iterator_cf_raw_key( + &self, + cf: &ColumnFamily, + iterator_mode: IteratorMode>, + ) -> DBIterator { + let start_key; + let iterator_mode = match iterator_mode { + IteratorMode::From(start_from, direction) => { + start_key = start_from; + RocksIteratorMode::From(&start_key, direction) + } + IteratorMode::Start => RocksIteratorMode::Start, + IteratorMode::End => RocksIteratorMode::End, + }; + self.db.iterator_cf(cf, iterator_mode) + } + fn raw_iterator_cf(&self, cf: &ColumnFamily) -> DBRawIterator { self.db.raw_iterator_cf(cf) } @@ -677,6 +696,9 @@ pub trait Column { fn key(index: Self::Index) -> Vec; fn index(key: &[u8]) -> Self::Index; + // This trait method is primarily used by `Database::delete_range_cf()`, and is therefore only + // relevant for columns keyed by Slot: ie. SlotColumns and columns that feature a Slot as the + // first item in the key. fn as_index(slot: Slot) -> Self::Index; fn slot(index: Self::Index) -> Slot; } @@ -739,34 +761,58 @@ impl Column for T { } } +pub enum IndexError { + UnpackError, +} + +/// Helper trait to transition primary indexes out from the columns that are using them. +pub trait ColumnIndexDeprecation: Column { + const DEPRECATED_INDEX_LEN: usize; + const CURRENT_INDEX_LEN: usize; + type DeprecatedIndex; + + fn deprecated_key(index: Self::DeprecatedIndex) -> Vec; + fn try_deprecated_index(key: &[u8]) -> std::result::Result; + + fn try_current_index(key: &[u8]) -> std::result::Result; + fn convert_index(deprecated_index: Self::DeprecatedIndex) -> Self::Index; + + fn index(key: &[u8]) -> Self::Index { + if let Ok(index) = Self::try_current_index(key) { + index + } else if let Ok(index) = Self::try_deprecated_index(key) { + Self::convert_index(index) + } else { + // Way back in the day, we broke the TransactionStatus column key. This fallback + // preserves the existing logic for ancient keys, but realistically should never be + // executed. + Self::as_index(0) + } + } +} + impl Column for columns::TransactionStatus { - type Index = (u64, Signature, Slot); + type Index = (Signature, Slot); - fn key((index, signature, slot): (u64, Signature, Slot)) -> Vec { - let mut key = vec![0; 8 + 64 + 8]; // size_of u64 + size_of Signature + size_of Slot - BigEndian::write_u64(&mut key[0..8], index); - key[8..72].copy_from_slice(&signature.as_ref()[0..64]); - BigEndian::write_u64(&mut key[72..80], slot); + fn key((signature, slot): Self::Index) -> Vec { + let mut key = vec![0; Self::CURRENT_INDEX_LEN]; + key[0..64].copy_from_slice(&signature.as_ref()[0..64]); + BigEndian::write_u64(&mut key[64..72], slot); key } - fn index(key: &[u8]) -> (u64, Signature, Slot) { - if key.len() != 80 { - Self::as_index(0) - } else { - let index = BigEndian::read_u64(&key[0..8]); - let signature = Signature::try_from(&key[8..72]).unwrap(); - let slot = BigEndian::read_u64(&key[72..80]); - (index, signature, slot) - } + fn index(key: &[u8]) -> (Signature, Slot) { + ::index(key) } fn slot(index: Self::Index) -> Slot { - index.2 + index.1 } - fn as_index(index: u64) -> Self::Index { - (index, Signature::default(), 0) + // The TransactionStatus column is not keyed by slot so this method is meaningless + // See Column::as_index() declaration for more details + fn as_index(_index: u64) -> Self::Index { + (Signature::default(), 0) } } impl ColumnName for columns::TransactionStatus { @@ -776,63 +822,171 @@ impl ProtobufColumn for columns::TransactionStatus { type Type = generated::TransactionStatusMeta; } -impl Column for columns::AddressSignatures { - type Index = (u64, Pubkey, Slot, Signature); +impl ColumnIndexDeprecation for columns::TransactionStatus { + const DEPRECATED_INDEX_LEN: usize = 80; + const CURRENT_INDEX_LEN: usize = 72; + type DeprecatedIndex = (u64, Signature, Slot); - fn key((index, pubkey, slot, signature): (u64, Pubkey, Slot, Signature)) -> Vec { - let mut key = vec![0; 8 + 32 + 8 + 64]; // size_of u64 + size_of Pubkey + size_of Slot + size_of Signature + fn deprecated_key((index, signature, slot): Self::DeprecatedIndex) -> Vec { + let mut key = vec![0; Self::DEPRECATED_INDEX_LEN]; BigEndian::write_u64(&mut key[0..8], index); - key[8..40].copy_from_slice(&pubkey.as_ref()[0..32]); - BigEndian::write_u64(&mut key[40..48], slot); - key[48..112].copy_from_slice(&signature.as_ref()[0..64]); + key[8..72].copy_from_slice(&signature.as_ref()[0..64]); + BigEndian::write_u64(&mut key[72..80], slot); key } - fn index(key: &[u8]) -> (u64, Pubkey, Slot, Signature) { - let index = BigEndian::read_u64(&key[0..8]); - let pubkey = Pubkey::try_from(&key[8..40]).unwrap(); - let slot = BigEndian::read_u64(&key[40..48]); - let signature = Signature::try_from(&key[48..112]).unwrap(); - (index, pubkey, slot, signature) + fn try_deprecated_index(key: &[u8]) -> std::result::Result { + if key.len() != Self::DEPRECATED_INDEX_LEN { + return Err(IndexError::UnpackError); + } + let primary_index = BigEndian::read_u64(&key[0..8]); + let signature = Signature::try_from(&key[8..72]).unwrap(); + let slot = BigEndian::read_u64(&key[72..80]); + Ok((primary_index, signature, slot)) + } + + fn try_current_index(key: &[u8]) -> std::result::Result { + if key.len() != Self::CURRENT_INDEX_LEN { + return Err(IndexError::UnpackError); + } + let signature = Signature::try_from(&key[0..64]).unwrap(); + let slot = BigEndian::read_u64(&key[64..72]); + Ok((signature, slot)) + } + + fn convert_index(deprecated_index: Self::DeprecatedIndex) -> Self::Index { + let (_primary_index, signature, slot) = deprecated_index; + (signature, slot) + } +} + +impl Column for columns::AddressSignatures { + type Index = (Pubkey, Slot, u32, Signature); + + fn key((pubkey, slot, transaction_index, signature): Self::Index) -> Vec { + let mut key = vec![0; Self::CURRENT_INDEX_LEN]; + key[0..32].copy_from_slice(&pubkey.as_ref()[0..32]); + BigEndian::write_u64(&mut key[32..40], slot); + BigEndian::write_u32(&mut key[40..44], transaction_index); + key[44..108].copy_from_slice(&signature.as_ref()[0..64]); + key + } + + fn index(key: &[u8]) -> Self::Index { + ::index(key) } fn slot(index: Self::Index) -> Slot { - index.2 + index.1 } - fn as_index(index: u64) -> Self::Index { - (index, Pubkey::default(), 0, Signature::default()) + // The AddressSignatures column is not keyed by slot so this method is meaningless + // See Column::as_index() declaration for more details + fn as_index(_index: u64) -> Self::Index { + (Pubkey::default(), 0, 0, Signature::default()) } } impl ColumnName for columns::AddressSignatures { const NAME: &'static str = ADDRESS_SIGNATURES_CF; } +impl ColumnIndexDeprecation for columns::AddressSignatures { + const DEPRECATED_INDEX_LEN: usize = 112; + const CURRENT_INDEX_LEN: usize = 108; + type DeprecatedIndex = (u64, Pubkey, Slot, Signature); + + fn deprecated_key((primary_index, pubkey, slot, signature): Self::DeprecatedIndex) -> Vec { + let mut key = vec![0; Self::DEPRECATED_INDEX_LEN]; + BigEndian::write_u64(&mut key[0..8], primary_index); + key[8..40].clone_from_slice(&pubkey.as_ref()[0..32]); + BigEndian::write_u64(&mut key[40..48], slot); + key[48..112].clone_from_slice(&signature.as_ref()[0..64]); + key + } + + fn try_deprecated_index(key: &[u8]) -> std::result::Result { + if key.len() != Self::DEPRECATED_INDEX_LEN { + return Err(IndexError::UnpackError); + } + let primary_index = BigEndian::read_u64(&key[0..8]); + let pubkey = Pubkey::try_from(&key[8..40]).unwrap(); + let slot = BigEndian::read_u64(&key[40..48]); + let signature = Signature::try_from(&key[48..112]).unwrap(); + Ok((primary_index, pubkey, slot, signature)) + } + + fn try_current_index(key: &[u8]) -> std::result::Result { + if key.len() != Self::CURRENT_INDEX_LEN { + return Err(IndexError::UnpackError); + } + let pubkey = Pubkey::try_from(&key[0..32]).unwrap(); + let slot = BigEndian::read_u64(&key[32..40]); + let transaction_index = BigEndian::read_u32(&key[40..44]); + let signature = Signature::try_from(&key[44..108]).unwrap(); + Ok((pubkey, slot, transaction_index, signature)) + } + + fn convert_index(deprecated_index: Self::DeprecatedIndex) -> Self::Index { + let (_primary_index, pubkey, slot, signature) = deprecated_index; + (pubkey, slot, 0, signature) + } +} + impl Column for columns::TransactionMemos { - type Index = Signature; + type Index = (Signature, Slot); - fn key(signature: Signature) -> Vec { - let mut key = vec![0; 64]; // size_of Signature + fn key((signature, slot): Self::Index) -> Vec { + let mut key = vec![0; Self::CURRENT_INDEX_LEN]; key[0..64].copy_from_slice(&signature.as_ref()[0..64]); + BigEndian::write_u64(&mut key[64..72], slot); key } - fn index(key: &[u8]) -> Signature { - Signature::try_from(&key[..64]).unwrap() + fn index(key: &[u8]) -> Self::Index { + ::index(key) } - fn slot(_index: Self::Index) -> Slot { - unimplemented!() + fn slot(index: Self::Index) -> Slot { + index.1 } - fn as_index(_index: u64) -> Self::Index { - Signature::default() + fn as_index(index: u64) -> Self::Index { + (Signature::default(), index) } } impl ColumnName for columns::TransactionMemos { const NAME: &'static str = TRANSACTION_MEMOS_CF; } +impl ColumnIndexDeprecation for columns::TransactionMemos { + const DEPRECATED_INDEX_LEN: usize = 64; + const CURRENT_INDEX_LEN: usize = 72; + type DeprecatedIndex = Signature; + + fn deprecated_key(signature: Self::DeprecatedIndex) -> Vec { + let mut key = vec![0; Self::DEPRECATED_INDEX_LEN]; + key[0..64].copy_from_slice(&signature.as_ref()[0..64]); + key + } + + fn try_deprecated_index(key: &[u8]) -> std::result::Result { + Signature::try_from(&key[..64]).map_err(|_| IndexError::UnpackError) + } + + fn try_current_index(key: &[u8]) -> std::result::Result { + if key.len() != Self::CURRENT_INDEX_LEN { + return Err(IndexError::UnpackError); + } + let signature = Signature::try_from(&key[0..64]).unwrap(); + let slot = BigEndian::read_u64(&key[64..72]); + Ok((signature, slot)) + } + + fn convert_index(deprecated_index: Self::DeprecatedIndex) -> Self::Index { + (deprecated_index, 0) + } +} + impl Column for columns::TransactionStatusIndex { type Index = u64; @@ -1456,12 +1610,16 @@ where } pub fn get(&self, key: C::Index) -> Result> { + self.get_raw(&C::key(key)) + } + + pub fn get_raw(&self, key: &[u8]) -> Result> { let mut result = Ok(None); let is_perf_enabled = maybe_enable_rocksdb_perf( self.column_options.rocks_perf_sample_interval, &self.read_perf_status, ); - if let Some(pinnable_slice) = self.backend.get_pinned_cf(self.handle(), &C::key(key))? { + if let Some(pinnable_slice) = self.backend.get_pinned_cf(self.handle(), key)? { let value = deserialize(pinnable_slice.as_ref())?; result = Ok(Some(value)) } @@ -1507,12 +1665,19 @@ where pub fn get_protobuf_or_bincode>( &self, key: C::Index, + ) -> Result> { + self.get_raw_protobuf_or_bincode::(&C::key(key)) + } + + pub(crate) fn get_raw_protobuf_or_bincode>( + &self, + key: &[u8], ) -> Result> { let is_perf_enabled = maybe_enable_rocksdb_perf( self.column_options.rocks_perf_sample_interval, &self.read_perf_status, ); - let result = self.backend.get_pinned_cf(self.handle(), &C::key(key)); + let result = self.backend.get_pinned_cf(self.handle(), key); if let Some(op_start_instant) = is_perf_enabled { report_rocksdb_read_perf( C::NAME, @@ -1577,6 +1742,45 @@ where } } +impl LedgerColumn +where + C: ColumnIndexDeprecation + ColumnName, +{ + pub(crate) fn iter_current_index_filtered( + &self, + iterator_mode: IteratorMode, + ) -> Result)> + '_> { + let cf = self.handle(); + let iter = self.backend.iterator_cf::(cf, iterator_mode); + Ok(iter.filter_map(|pair| { + let (key, value) = pair.unwrap(); + C::try_current_index(&key).ok().map(|index| (index, value)) + })) + } + + pub(crate) fn iter_deprecated_index_filtered( + &self, + iterator_mode: IteratorMode, + ) -> Result)> + '_> { + let cf = self.handle(); + let iterator_mode_raw_key = match iterator_mode { + IteratorMode::Start => IteratorMode::Start, + IteratorMode::End => IteratorMode::End, + IteratorMode::From(start_from, direction) => { + let raw_key = C::deprecated_key(start_from); + IteratorMode::From(raw_key, direction) + } + }; + let iter = self.backend.iterator_cf_raw_key(cf, iterator_mode_raw_key); + Ok(iter.filter_map(|pair| { + let (key, value) = pair.unwrap(); + C::try_deprecated_index(&key) + .ok() + .map(|index| (index, value)) + })) + } +} + impl<'a> WriteBatch<'a> { pub fn put_bytes(&mut self, key: C::Index, bytes: &[u8]) -> Result<()> { self.write_batch @@ -1585,7 +1789,11 @@ impl<'a> WriteBatch<'a> { } pub fn delete(&mut self, key: C::Index) -> Result<()> { - self.write_batch.delete_cf(self.get_cf::(), C::key(key)); + self.delete_raw::(&C::key(key)) + } + + pub(crate) fn delete_raw(&mut self, key: &[u8]) -> Result<()> { + self.write_batch.delete_cf(self.get_cf::(), key); Ok(()) } @@ -1882,7 +2090,9 @@ fn should_enable_cf_compaction(cf_name: &str) -> bool { // completed on a given range or file. matches!( cf_name, - columns::TransactionStatus::NAME | columns::AddressSignatures::NAME + columns::TransactionStatus::NAME + | columns::TransactionMemos::NAME + | columns::AddressSignatures::NAME ) } @@ -1976,4 +2186,44 @@ pub mod tests { }); assert!(!should_enable_cf_compaction("something else")); } + + impl LedgerColumn + where + C: ColumnIndexDeprecation + ProtobufColumn + ColumnName, + { + pub fn put_deprecated_protobuf( + &self, + key: C::DeprecatedIndex, + value: &C::Type, + ) -> Result<()> { + let mut buf = Vec::with_capacity(value.encoded_len()); + value.encode(&mut buf)?; + self.backend + .put_cf(self.handle(), &C::deprecated_key(key), &buf) + } + } + + impl LedgerColumn + where + C: ColumnIndexDeprecation + TypedColumn + ColumnName, + { + pub fn put_deprecated(&self, key: C::DeprecatedIndex, value: &C::Type) -> Result<()> { + let serialized_value = serialize(value)?; + self.backend + .put_cf(self.handle(), &C::deprecated_key(key), &serialized_value) + } + } + + impl LedgerColumn + where + C: ColumnIndexDeprecation + ColumnName, + { + pub(crate) fn iterator_cf_raw_key( + &self, + iterator_mode: IteratorMode>, + ) -> DBIterator { + let cf = self.handle(); + self.backend.iterator_cf_raw_key(cf, iterator_mode) + } + } } diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index c4c619a1a32568..ccc4364891dd75 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -188,7 +188,7 @@ impl TransactionStatusService { if enable_rpc_transaction_history { if let Some(memos) = extract_and_fmt_memos(transaction.message()) { blockstore - .write_transaction_memos(transaction.signature(), memos) + .write_transaction_memos(transaction.signature(), slot, memos) .expect("Expect database write to succeed: TransactionMemos"); } @@ -199,6 +199,7 @@ impl TransactionStatusService { tx_account_locks.writable, tx_account_locks.readonly, transaction_status_meta, + transaction_index, ) .expect("Expect database write to succeed: TransactionStatus"); } From ad949b21b71ab2b626a3da5e625eff2c13f1f280 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Tue, 10 Oct 2023 11:48:52 -0700 Subject: [PATCH 302/407] Fix failure in updating GIT index of cargo registry server (#33628) --- cargo-registry/src/dummy_git_index.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cargo-registry/src/dummy_git_index.rs b/cargo-registry/src/dummy_git_index.rs index 1b36f485ebff3e..a04792e9122698 100644 --- a/cargo-registry/src/dummy_git_index.rs +++ b/cargo-registry/src/dummy_git_index.rs @@ -70,7 +70,11 @@ impl DummyGitIndex { if empty || config_written || new_symlink || new_git_symlink { let mut index = repository.index().expect("cannot get the Index file"); index - .add_all(["*"].iter(), IndexAddOption::DEFAULT, None) + .add_all( + ["config.json", "index"].iter(), + IndexAddOption::DEFAULT, + None, + ) .expect("Failed to add modified files to git index"); index.write().expect("Failed to update the git index"); From 8c27d8bbb3f8c38bf101b4291c542be444e6f044 Mon Sep 17 00:00:00 2001 From: Jeff Biseda Date: Tue, 10 Oct 2023 12:08:14 -0700 Subject: [PATCH 303/407] indicate that test-checks.sh requires cargo-hack (#33519) --- ci/test-checks.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ci/test-checks.sh b/ci/test-checks.sh index 0f037cb3478ded..85375d6bbeec4b 100755 --- a/ci/test-checks.sh +++ b/ci/test-checks.sh @@ -13,6 +13,14 @@ source ci/rust-version.sh nightly eval "$(ci/channel-info.sh)" cargoNightly="$(readlink -f "./cargo") nightly" +# check that cargo-hack has been installed +if ! $cargoNightly hack --version >/dev/null 2>&1; then + cat >&2 < Date: Tue, 10 Oct 2023 14:43:52 -0500 Subject: [PATCH 304/407] Revert "stop padding new append vecs to page size (#33607)" (#33634) This reverts commit b7962a3610b9beec2bfe660622a31fc8e34c1cd3. --- accounts-db/src/accounts_db.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index d37fcf655d74f5..8bfeb3fb289b85 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -5733,7 +5733,11 @@ impl AccountsDb { .create_store_count .fetch_add(1, Ordering::Relaxed); let path_index = thread_rng().gen_range(0..paths.len()); - let store = Arc::new(self.new_storage_entry(slot, Path::new(&paths[path_index]), size)); + let store = Arc::new(self.new_storage_entry( + slot, + Path::new(&paths[path_index]), + Self::page_align(size), + )); debug!( "creating store: {} slot: {} len: {} size: {} from: {} path: {:?}", @@ -9909,7 +9913,7 @@ pub mod test_utils { // allocate an append vec for this slot that can hold all the test accounts. This prevents us from creating more than 1 append vec for this slot. _ = accounts.accounts_db.create_and_insert_store( slot, - AccountsDb::page_align(bytes_required as u64), + bytes_required as u64, "create_test_accounts", ); } From a22678312e878d1a39c81cdfc96552371b70d742 Mon Sep 17 00:00:00 2001 From: Kirill Fomichev Date: Wed, 11 Oct 2023 00:40:35 +0400 Subject: [PATCH 305/407] Allow to create HTTP Sender with custom Client (#33580) * Allow to create HTTP Sender with custom Client * Update rpc-client/src/http_sender.rs Co-authored-by: Tyera --------- Co-authored-by: Tyera --- rpc-client/src/http_sender.rs | 36 ++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/rpc-client/src/http_sender.rs b/rpc-client/src/http_sender.rs index 902f86ce631a48..6ef22cc42c842a 100644 --- a/rpc-client/src/http_sender.rs +++ b/rpc-client/src/http_sender.rs @@ -47,31 +47,41 @@ impl HttpSender { /// /// The URL is an HTTP URL, usually for port 8899. pub fn new_with_timeout(url: U, timeout: Duration) -> Self { - let mut default_headers = header::HeaderMap::new(); - default_headers.append( - header::HeaderName::from_static("solana-client"), - header::HeaderValue::from_str( - format!("rust/{}", solana_version::Version::default()).as_str(), - ) - .unwrap(), - ); - - let client = Arc::new( + Self::new_with_client( + url, reqwest::Client::builder() - .default_headers(default_headers) + .default_headers(Self::default_headers()) .timeout(timeout) .pool_idle_timeout(timeout) .build() .expect("build rpc client"), - ); + ) + } + /// Create an HTTP RPC sender. + /// + /// Most flexible way to create a sender. Pass a created `reqwest::Client`. + pub fn new_with_client(url: U, client: reqwest::Client) -> Self { Self { - client, + client: Arc::new(client), url: url.to_string(), request_id: AtomicU64::new(0), stats: RwLock::new(RpcTransportStats::default()), } } + + /// Create default headers used by HTTP Sender. + pub fn default_headers() -> header::HeaderMap { + let mut default_headers = header::HeaderMap::new(); + default_headers.append( + header::HeaderName::from_static("solana-client"), + header::HeaderValue::from_str( + format!("rust/{}", solana_version::Version::default()).as_str(), + ) + .unwrap(), + ); + default_headers + } } struct StatsUpdater<'a> { From 73a9a14731582d94104fa17ce906c8cbcfe5f403 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 10 Oct 2023 16:35:16 -0500 Subject: [PATCH 306/407] Fold noisy metric into struct of metrics that is reported every 10s (#33635) --- rpc/src/rpc_pubsub_service.rs | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/rpc/src/rpc_pubsub_service.rs b/rpc/src/rpc_pubsub_service.rs index 99eab0a2353670..7d12feac726059 100644 --- a/rpc/src/rpc_pubsub_service.rs +++ b/rpc/src/rpc_pubsub_service.rs @@ -18,7 +18,7 @@ use { net::SocketAddr, str, sync::{ - atomic::{AtomicUsize, Ordering}, + atomic::{AtomicU64, AtomicUsize, Ordering}, Arc, }, thread::{self, Builder, JoinHandle}, @@ -132,6 +132,7 @@ struct SentNotificationStats { num_root: AtomicUsize, num_vote: AtomicUsize, num_block: AtomicUsize, + total_creation_to_queue_time_us: AtomicU64, last_report: AtomicInterval, } @@ -185,6 +186,12 @@ impl SentNotificationStats { self.num_block.swap(0, Ordering::Relaxed) as i64, i64 ), + ( + "total_creation_to_queue_time_us", + self.total_creation_to_queue_time_us + .swap(0, Ordering::Relaxed) as i64, + i64 + ) ); } } @@ -197,6 +204,7 @@ struct BroadcastHandler { fn increment_sent_notification_stats( params: &SubscriptionParams, + notification: &RpcNotification, stats: &Arc, ) { match params { @@ -228,6 +236,11 @@ fn increment_sent_notification_stats( stats.num_block.fetch_add(1, Ordering::Relaxed); } } + stats.total_creation_to_queue_time_us.fetch_add( + notification.created_at.elapsed().as_micros() as u64, + Ordering::Relaxed, + ); + stats.maybe_report(); } @@ -245,17 +258,10 @@ impl BroadcastHandler { .current_subscriptions .entry(notification.subscription_id) { - increment_sent_notification_stats(entry.get().params(), &self.sent_stats); - - let time_since_created = notification.created_at.elapsed(); - - datapoint_info!( - "pubsub_notifications", - ( - "created_to_queue_time_us", - time_since_created.as_micros() as i64, - i64 - ), + increment_sent_notification_stats( + entry.get().params(), + ¬ification, + &self.sent_stats, ); if notification.is_final { From 33e1dd71f3725ac128522f5e07acb1621b14068d Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 10 Oct 2023 16:35:42 -0500 Subject: [PATCH 307/407] Remove dated Blockstore PurgeType::PrimaryIndex code (#33631) * Update instances of PurgeType::PrimaryIndex to PurgeType::Exact * Remove now unused functions * Remove unused active_transaction_status_index field --- ledger/src/blockstore.rs | 89 +++-------------------- ledger/src/blockstore/blockstore_purge.rs | 50 +------------ 2 files changed, 11 insertions(+), 128 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 4ea608d3471c26..eca79093ddeb0d 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -206,7 +206,6 @@ pub struct Blockstore { address_signatures_cf: LedgerColumn, transaction_memos_cf: LedgerColumn, transaction_status_index_cf: LedgerColumn, - active_transaction_status_index: RwLock, rewards_cf: LedgerColumn, blocktime_cf: LedgerColumn, perf_samples_cf: LedgerColumn, @@ -326,21 +325,11 @@ impl Blockstore { .unwrap_or(0); let last_root = RwLock::new(max_root); - // Get active transaction-status index or 0 - let active_transaction_status_index = db + // Initialize transaction status index if entries are not present + let initialize_transaction_status_index = db .iter::(IteratorMode::Start)? - .next(); - let initialize_transaction_status_index = active_transaction_status_index.is_none(); - let active_transaction_status_index = active_transaction_status_index - .and_then(|(_, data)| { - let index0: TransactionStatusIndexMeta = deserialize(&data).unwrap(); - if index0.frozen { - Some(1) - } else { - None - } - }) - .unwrap_or(0); + .next() + .is_none(); measure.stop(); info!("{:?} {}", blockstore_path, measure); @@ -360,7 +349,6 @@ impl Blockstore { address_signatures_cf, transaction_memos_cf, transaction_status_index_cf, - active_transaction_status_index: RwLock::new(active_transaction_status_index), rewards_cf, blocktime_cf, perf_samples_cf, @@ -1125,7 +1113,7 @@ impl Blockstore { .expect("Couldn't fetch from SlotMeta column family") { // Clear all slot related information - self.run_purge(slot, slot, PurgeType::PrimaryIndex) + self.run_purge(slot, slot, PurgeType::Exact) .expect("Purge database operations failed"); // Clear this slot as a next slot from parent @@ -2155,61 +2143,6 @@ impl Blockstore { Ok(()) } - /// Toggles the active primary index between `0` and `1`, and clears the - /// stored max-slot of the frozen index in preparation for pruning. - fn toggle_transaction_status_index( - &self, - batch: &mut WriteBatch, - w_active_transaction_status_index: &mut u64, - to_slot: Slot, - ) -> Result> { - let index0 = self.transaction_status_index_cf.get(0)?; - if index0.is_none() { - return Ok(None); - } - let mut index0 = index0.unwrap(); - let mut index1 = self.transaction_status_index_cf.get(1)?.unwrap(); - - if !index0.frozen && !index1.frozen { - index0.frozen = true; - *w_active_transaction_status_index = 1; - batch.put::(0, &index0)?; - Ok(None) - } else { - let purge_target_primary_index = if index0.frozen && to_slot > index0.max_slot { - info!( - "Pruning expired primary index 0 up to slot {} (max requested: {})", - index0.max_slot, to_slot - ); - Some(0) - } else if index1.frozen && to_slot > index1.max_slot { - info!( - "Pruning expired primary index 1 up to slot {} (max requested: {})", - index1.max_slot, to_slot - ); - Some(1) - } else { - None - }; - - if let Some(purge_target_primary_index) = purge_target_primary_index { - *w_active_transaction_status_index = purge_target_primary_index; - if index0.frozen { - index0.max_slot = 0 - }; - index0.frozen = !index0.frozen; - batch.put::(0, &index0)?; - if index1.frozen { - index1.max_slot = 0 - }; - index1.frozen = !index1.frozen; - batch.put::(1, &index1)?; - } - - Ok(purge_target_primary_index) - } - } - fn read_deprecated_transaction_status( &self, index: (Signature, Slot), @@ -4938,7 +4871,7 @@ pub mod tests { let max_purge_slot = 1; blockstore - .run_purge(0, max_purge_slot, PurgeType::PrimaryIndex) + .run_purge(0, max_purge_slot, PurgeType::Exact) .unwrap(); *blockstore.lowest_cleanup_slot.write().unwrap() = max_purge_slot; @@ -9012,7 +8945,7 @@ pub mod tests { blockstore.insert_shreds(shreds, None, false).unwrap(); } assert_eq!(blockstore.lowest_slot(), 1); - blockstore.run_purge(0, 5, PurgeType::PrimaryIndex).unwrap(); + blockstore.run_purge(0, 5, PurgeType::Exact).unwrap(); assert_eq!(blockstore.lowest_slot(), 6); } @@ -9028,12 +8961,10 @@ pub mod tests { blockstore.insert_shreds(shreds, None, false).unwrap(); assert_eq!(blockstore.highest_slot().unwrap(), Some(slot)); } - blockstore - .run_purge(5, 10, PurgeType::PrimaryIndex) - .unwrap(); + blockstore.run_purge(5, 10, PurgeType::Exact).unwrap(); assert_eq!(blockstore.highest_slot().unwrap(), Some(4)); - blockstore.run_purge(0, 4, PurgeType::PrimaryIndex).unwrap(); + blockstore.run_purge(0, 4, PurgeType::Exact).unwrap(); assert_eq!(blockstore.highest_slot().unwrap(), None); } @@ -9768,7 +9699,7 @@ pub mod tests { // Cleanup the slot blockstore - .run_purge(slot, slot, PurgeType::PrimaryIndex) + .run_purge(slot, slot, PurgeType::Exact) .expect("Purge database operations failed"); assert!(blockstore.meta(slot).unwrap().is_none()); diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index 5c1644aaa032a0..b4e50234106bf0 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -16,9 +16,6 @@ pub enum PurgeType { /// A slower but more accurate way to purge slots by also ensuring higher /// level of consistency between data during the clean up process. Exact, - /// A faster approximation of `Exact` where the purge process only takes - /// care of the primary index and does not update the associated entries. - PrimaryIndex, /// The fastest purge mode that relies on the slot-id based TTL /// compaction filter to do the cleanup. CompactionFilter, @@ -158,7 +155,7 @@ impl Blockstore { .batch() .expect("Database Error: Failed to get write batch"); let mut delete_range_timer = Measure::start("delete_range"); - let mut columns_purged = self + let columns_purged = self .db .delete_range_cf::(&mut write_batch, from_slot, to_slot) .is_ok() @@ -218,20 +215,10 @@ impl Blockstore { .db .delete_range_cf::(&mut write_batch, from_slot, to_slot) .is_ok(); - let mut w_active_transaction_status_index = - self.active_transaction_status_index.write().unwrap(); match purge_type { PurgeType::Exact => { self.purge_special_columns_exact(&mut write_batch, from_slot, to_slot)?; } - PurgeType::PrimaryIndex => { - self.purge_special_columns_with_primary_index( - &mut write_batch, - &mut columns_purged, - &mut w_active_transaction_status_index, - to_slot, - )?; - } PurgeType::CompactionFilter => { // No explicit action is required here because this purge type completely and // indefinitely relies on the proper working of compaction filter for those @@ -273,10 +260,6 @@ impl Blockstore { purge_stats.write_batch += write_timer.as_us(); purge_stats.delete_files_in_range += purge_files_in_range_timer.as_us(); - // only drop w_active_transaction_status_index after we do db.write(write_batch); - // otherwise, readers might be confused with inconsistent state between - // self.active_transaction_status_index and RockDb's TransactionStatusIndex contents - drop(w_active_transaction_status_index); Ok(columns_purged) } @@ -458,37 +441,6 @@ impl Blockstore { } Ok(()) } - - /// Purges special columns (using a non-Slot primary-index) by range. Purge - /// occurs if frozen primary index has a max-slot less than the highest slot - /// being purged. - fn purge_special_columns_with_primary_index( - &self, - write_batch: &mut WriteBatch, - columns_purged: &mut bool, - w_active_transaction_status_index: &mut u64, - to_slot: Slot, - ) -> Result<()> { - if let Some(purged_index) = self.toggle_transaction_status_index( - write_batch, - w_active_transaction_status_index, - to_slot + 1, - )? { - *columns_purged &= self - .db - .delete_range_cf::(write_batch, purged_index, purged_index) - .is_ok() - & self - .db - .delete_range_cf::( - write_batch, - purged_index, - purged_index, - ) - .is_ok(); - } - Ok(()) - } } #[cfg(test)] From 7c80fa1f67eb0d24775b27fd45d1f68c0670ff7e Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Tue, 10 Oct 2023 15:05:55 -0700 Subject: [PATCH 308/407] Hide wen_restart and do not take default value for now. (#33637) --- validator/src/cli.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator/src/cli.rs b/validator/src/cli.rs index aba402b4257b3b..d9b974426bb2a8 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1385,10 +1385,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .arg( Arg::with_name("wen_restart") .long("wen-restart") + .hidden(hidden_unless_forced()) .value_name("DIR") .takes_value(true) .required(false) - .default_value(&default_args.wen_restart_path) .conflicts_with("wait_for_supermajority") .help( "When specified, the validator will enter Wen Restart mode which From c92977510668a54c8309e6d2c21d1f8e668610f1 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Tue, 10 Oct 2023 15:54:34 -0700 Subject: [PATCH 309/407] Enable running remote cargo registry server (#33629) * Enable running remote cargo registry server * use server URL to configure index --- cargo-registry/src/client.rs | 19 ++++++++++++++++++- cargo-registry/src/dummy_git_index.rs | 9 ++++----- cargo-registry/src/main.rs | 11 +++++------ 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/cargo-registry/src/client.rs b/cargo-registry/src/client.rs index 17432f0ebe27cd..7edad1e0aa6599 100644 --- a/cargo-registry/src/client.rs +++ b/cargo-registry/src/client.rs @@ -1,5 +1,5 @@ use { - clap::{crate_description, crate_name, value_t_or_exit, App, Arg, ArgMatches}, + clap::{crate_description, crate_name, value_t, value_t_or_exit, App, Arg, ArgMatches}, solana_clap_utils::{ hidden_unless_forced, input_validators::is_url_or_moniker, @@ -37,6 +37,7 @@ impl<'a> ClientConfig<'a> { pub struct Client { pub rpc_client: Arc, pub port: u16, + pub server_url: String, websocket_url: String, commitment: commitment_config::CommitmentConfig, cli_signers: Vec, @@ -112,6 +113,18 @@ impl Client { .takes_value(true) .help("Cargo registry's local TCP port. The server will bind to this port and wait for requests."), ) + .arg( + Arg::with_name("server_url") + .short("s") + .long("server-url") + .value_name("URL_OR_MONIKER") + .takes_value(true) + .global(true) + .validator(is_url_or_moniker) + .help( + "URL where the registry service will be hosted. Default: http://0.0.0.0:", + ), + ) .arg( Arg::with_name("commitment") .long("commitment") @@ -192,6 +205,9 @@ impl Client { let port = value_t_or_exit!(matches, "port", u16); + let server_url = + value_t!(matches, "server_url", String).unwrap_or(format!("http://0.0.0.0:{}", port)); + Ok(Client { rpc_client: Arc::new(RpcClient::new_with_timeouts_and_commitment( json_rpc_url.to_string(), @@ -200,6 +216,7 @@ impl Client { confirm_transaction_initial_timeout, )), port, + server_url, websocket_url, commitment, cli_signers: vec![payer_keypair, authority_keypair], diff --git a/cargo-registry/src/dummy_git_index.rs b/cargo-registry/src/dummy_git_index.rs index a04792e9122698..ae5def46b082bb 100644 --- a/cargo-registry/src/dummy_git_index.rs +++ b/cargo-registry/src/dummy_git_index.rs @@ -4,7 +4,6 @@ use { std::{ fs::{self, create_dir_all}, io::ErrorKind, - net::SocketAddr, path::PathBuf, process::Command, }, @@ -19,15 +18,15 @@ struct RegistryConfig { pub struct DummyGitIndex {} impl DummyGitIndex { - pub fn create_or_update_git_repo(root_dir: PathBuf, server_addr: &SocketAddr) { + pub fn create_or_update_git_repo(root_dir: PathBuf, server_url: &str) { create_dir_all(&root_dir).expect("Failed to create root directory"); let expected_config = serde_json::to_string(&RegistryConfig { dl: format!( - "http://{}/api/v1/crates/{{crate}}/{{version}}/download", - server_addr + "{}/api/v1/crates/{{crate}}/{{version}}/download", + server_url ), - api: Some(format!("http://{}", server_addr)), + api: Some(server_url.to_string()), }) .expect("Failed to create expected config"); diff --git a/cargo-registry/src/main.rs b/cargo-registry/src/main.rs index 0749875824c072..d225ca8b112f3e 100644 --- a/cargo-registry/src/main.rs +++ b/cargo-registry/src/main.rs @@ -306,7 +306,9 @@ impl CargoRegistryService { async fn main() { solana_logger::setup_with_default("solana=info"); let client = Arc::new(Client::new().expect("Failed to get RPC Client instance")); - let port = client.port; + + let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), client.port); + DummyGitIndex::create_or_update_git_repo(PathBuf::from("/tmp/dummy-git"), &client.server_url); let registry_service = make_service_fn(move |_| { let client_inner = client.clone(); @@ -317,11 +319,8 @@ async fn main() { } }); - let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port); - DummyGitIndex::create_or_update_git_repo(PathBuf::from("/tmp/dummy-git"), &addr); - - let server = Server::bind(&addr).serve(registry_service); - info!("Server running on on http://{}", addr); + let server = Server::bind(&bind_addr).serve(registry_service); + info!("Server running on on http://{}", bind_addr); let _ = server.await; } From 7006a6f94f58fdfa9169121ab19e010d9b263d9f Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Tue, 10 Oct 2023 16:34:03 -0700 Subject: [PATCH 310/407] Reduce ConnectionPool size used by send-transaction-service (#33548) Reduce pool size for ConnectionCache used by send-transaction-service to 2 from 4. No significant slow down of performance from bench-tps testing using rpc-client which is used by send-transaction-service. This will reduce active connections maintained both on the server and client. This will enable us to cache connections for more nodes. --- connection-cache/src/connection_cache.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/connection-cache/src/connection_cache.rs b/connection-cache/src/connection_cache.rs index 4962f815c33129..a674dccd7020fb 100644 --- a/connection-cache/src/connection_cache.rs +++ b/connection-cache/src/connection_cache.rs @@ -22,7 +22,7 @@ use { const MAX_CONNECTIONS: usize = 1024; /// Default connection pool size per remote address -pub const DEFAULT_CONNECTION_POOL_SIZE: usize = 4; +pub const DEFAULT_CONNECTION_POOL_SIZE: usize = 2; #[derive(Clone, Copy, Eq, Hash, PartialEq)] pub enum Protocol { @@ -81,6 +81,7 @@ where connection_config: C, connection_manager: M, ) -> Self { + info!("Creating ConnectionCache {name}, pool size: {connection_pool_size}"); let (sender, receiver) = crossbeam_channel::unbounded(); let map = Arc::new(RwLock::new(IndexMap::with_capacity(MAX_CONNECTIONS))); From 2f090a5882aaa1a0337ae1a584cf396531bbd09a Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Wed, 11 Oct 2023 10:34:39 +0900 Subject: [PATCH 311/407] Define PohRecorder set_bank related test helper methods (#33626) Define PohRecorder set_bank related test methods --- Cargo.lock | 1 + banking-bench/Cargo.toml | 5 ++- banking-bench/src/main.rs | 5 ++- core/Cargo.toml | 1 + core/src/banking_stage.rs | 5 ++- core/src/banking_stage/consume_worker.rs | 15 ++++++-- core/src/banking_stage/consumer.rs | 46 ++++++++++++++++++------ core/src/banking_stage/decision_maker.rs | 5 ++- poh/Cargo.toml | 4 +++ poh/src/poh_recorder.rs | 38 ++++++++++++-------- poh/src/poh_service.rs | 2 +- scripts/check-dev-context-only-utils.sh | 1 + 12 files changed, 95 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 66fb0306840620..f240f204be8c0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6587,6 +6587,7 @@ dependencies = [ "solana-measure", "solana-metrics", "solana-perf", + "solana-poh", "solana-runtime", "solana-sdk", "thiserror", diff --git a/banking-bench/Cargo.toml b/banking-bench/Cargo.toml index 258f1ba13f5f7c..44453a5e35d2e3 100644 --- a/banking-bench/Cargo.toml +++ b/banking-bench/Cargo.toml @@ -21,12 +21,15 @@ solana-ledger = { workspace = true } solana-logger = { workspace = true } solana-measure = { workspace = true } solana-perf = { workspace = true } -solana-poh = { workspace = true } +solana-poh = { workspace = true, features = ["dev-context-only-utils"] } solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-streamer = { workspace = true } solana-tpu-client = { workspace = true } solana-version = { workspace = true } +[features] +dev-context-only-utils = [] + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index bb5149f47c85b9..8b8ee2b2723c72 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -549,7 +549,10 @@ fn main() { ); assert!(poh_recorder.read().unwrap().bank().is_none()); - poh_recorder.write().unwrap().set_bank(bank.clone(), false); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); assert!(poh_recorder.read().unwrap().bank().is_some()); debug!( "new_bank_time: {}us insert_time: {}us poh_time: {}us", diff --git a/core/Cargo.toml b/core/Cargo.toml index c3923613b768a2..2b44685c13eb25 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -89,6 +89,7 @@ serial_test = { workspace = true } # See order-crates-for-publishing.py for using this unusual `path = "."` solana-core = { path = ".", features = ["dev-context-only-utils"] } solana-logger = { workspace = true } +solana-poh = { workspace = true, features = ["dev-context-only-utils"] } solana-program-runtime = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index e8b61de94dce2d..a1758616d14f9f 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -1071,7 +1071,10 @@ mod tests { let poh_simulator = simulate_poh(record_receiver, &poh_recorder); - poh_recorder.write().unwrap().set_bank(bank.clone(), false); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); let pubkey = solana_sdk::pubkey::new_rand(); let keypair2 = Keypair::new(); let pubkey2 = solana_sdk::pubkey::new_rand(); diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index 1795db97439a50..eb58d45c8ea34f 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -283,7 +283,10 @@ mod tests { .. } = &test_frame; let worker_thread = std::thread::spawn(move || worker.run()); - poh_recorder.write().unwrap().set_bank(bank.clone(), false); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); let pubkey1 = Pubkey::new_unique(); @@ -325,7 +328,10 @@ mod tests { .. } = &test_frame; let worker_thread = std::thread::spawn(move || worker.run()); - poh_recorder.write().unwrap().set_bank(bank.clone(), false); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); let pubkey1 = Pubkey::new_unique(); let pubkey2 = Pubkey::new_unique(); @@ -370,7 +376,10 @@ mod tests { .. } = &test_frame; let worker_thread = std::thread::spawn(move || worker.run()); - poh_recorder.write().unwrap().set_bank(bank.clone(), false); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); let pubkey1 = Pubkey::new_unique(); let pubkey2 = Pubkey::new_unique(); diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index af7b5b93e40501..9f9edcf89fd6bb 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -805,7 +805,10 @@ mod tests { let recorder = poh_recorder.new_recorder(); let poh_recorder = Arc::new(RwLock::new(poh_recorder)); - poh_recorder.write().unwrap().set_bank(bank.clone(), false); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); let poh_simulator = simulate_poh(record_receiver, &poh_recorder); @@ -966,7 +969,10 @@ mod tests { let poh_simulator = simulate_poh(record_receiver, &poh_recorder); - poh_recorder.write().unwrap().set_bank(bank.clone(), false); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); let (replay_vote_sender, _replay_vote_receiver) = unbounded(); let committer = Committer::new( None, @@ -1093,7 +1099,10 @@ mod tests { let poh_simulator = simulate_poh(record_receiver, &poh_recorder); - poh_recorder.write().unwrap().set_bank(bank.clone(), false); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); let (replay_vote_sender, _replay_vote_receiver) = unbounded(); let committer = Committer::new( None, @@ -1179,7 +1188,10 @@ mod tests { let poh_simulator = simulate_poh(record_receiver, &poh_recorder); - poh_recorder.write().unwrap().set_bank(bank.clone(), false); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); let (replay_vote_sender, _replay_vote_receiver) = unbounded(); let committer = Committer::new( None, @@ -1328,7 +1340,10 @@ mod tests { let recorder = poh_recorder.new_recorder(); let poh_recorder = Arc::new(RwLock::new(poh_recorder)); - poh_recorder.write().unwrap().set_bank(bank.clone(), false); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); let poh_simulator = simulate_poh(record_receiver, &poh_recorder); @@ -1628,7 +1643,10 @@ mod tests { let poh_simulator = simulate_poh(record_receiver, &poh_recorder); - poh_recorder.write().unwrap().set_bank(bank.clone(), false); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); let shreds = entries_to_test_shreds( &entries, @@ -1765,7 +1783,10 @@ mod tests { let poh_simulator = simulate_poh(record_receiver, &poh_recorder); - poh_recorder.write().unwrap().set_bank(bank.clone(), false); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); let shreds = entries_to_test_shreds( &entries, @@ -1864,7 +1885,7 @@ mod tests { assert_eq!(buffered_packet_batches.len(), num_conflicting_transactions); // When the working bank in poh_recorder is Some, all packets should be processed. // Multi-Iterator will process them 1-by-1 if all txs are conflicting. - poh_recorder.write().unwrap().set_bank(bank, false); + poh_recorder.write().unwrap().set_bank_for_test(bank); let bank_start = poh_recorder.read().unwrap().bank_start().unwrap(); let banking_stage_stats = BankingStageStats::default(); consumer.consume_buffered_packets( @@ -1942,7 +1963,7 @@ mod tests { assert_eq!(buffered_packet_batches.len(), num_conflicting_transactions); // When the working bank in poh_recorder is Some, all packets should be processed. // Multi-Iterator will process them 1-by-1 if all txs are conflicting. - poh_recorder.write().unwrap().set_bank(bank, false); + poh_recorder.write().unwrap().set_bank_for_test(bank); let bank_start = poh_recorder.read().unwrap().bank_start().unwrap(); consumer.consume_buffered_packets( &bank_start, @@ -1995,7 +2016,10 @@ mod tests { // When the working bank in poh_recorder is Some, all packets should be processed // except except for retryable errors. Manually take the lock of a transaction to // simulate another thread processing a transaction with that lock. - poh_recorder.write().unwrap().set_bank(bank.clone(), false); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); let bank_start = poh_recorder.read().unwrap().bank_start().unwrap(); let lock_account = transactions[0].message.account_keys[1]; @@ -2116,7 +2140,7 @@ mod tests { assert_eq!(buffered_packet_batches.len(), num_conflicting_transactions); // When the working bank in poh_recorder is Some, all packets should be processed. // Multi-Iterator will process them 1-by-1 if all txs are conflicting. - poh_recorder.write().unwrap().set_bank(bank, false); + poh_recorder.write().unwrap().set_bank_for_test(bank); let bank_start = poh_recorder.read().unwrap().bank_start().unwrap(); let banking_stage_stats = BankingStageStats::default(); consumer.consume_buffered_packets( diff --git a/core/src/banking_stage/decision_maker.rs b/core/src/banking_stage/decision_maker.rs index 6d26a9d0fcc02a..a2d19937ad614c 100644 --- a/core/src/banking_stage/decision_maker.rs +++ b/core/src/banking_stage/decision_maker.rs @@ -164,7 +164,10 @@ mod tests { // Currently Leader - Consume { - poh_recorder.write().unwrap().set_bank(bank.clone(), false); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); let decision = decision_maker.make_consume_or_forward_decision(); assert_matches!(decision, BufferedPacketsDecision::Consume(_)); } diff --git a/poh/Cargo.toml b/poh/Cargo.toml index 4df76178d61841..683d668ddfbd7a 100644 --- a/poh/Cargo.toml +++ b/poh/Cargo.toml @@ -27,6 +27,10 @@ bincode = { workspace = true } rand = { workspace = true } solana-logger = { workspace = true } solana-perf = { workspace = true } +solana-poh = { path = ".", features = ["dev-context-only-utils"] } + +[features] +dev-context-only-utils = [] [lib] crate-type = ["lib"] diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index 8fb10807af0928..bb14042cb584e9 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -642,6 +642,16 @@ impl PohRecorder { let _ = self.flush_cache(false); } + #[cfg(feature = "dev-context-only-utils")] + pub fn set_bank_for_test(&mut self, bank: Arc) { + self.set_bank(bank, false) + } + + #[cfg(test)] + pub fn set_bank_with_transaction_index_for_test(&mut self, bank: Arc) { + self.set_bank(bank, true) + } + // Flush cache will delay flushing the cache for a bank until it past the WorkingBank::min_tick_height // On a record flush will flush the cache at the WorkingBank::min_tick_height, since a record // occurs after the min_tick_height was generated @@ -1219,7 +1229,7 @@ mod tests { Arc::new(AtomicBool::default()), ); - poh_recorder.set_bank(bank, false); + poh_recorder.set_bank_for_test(bank); assert!(poh_recorder.working_bank.is_some()); poh_recorder.clear_bank(); assert!(poh_recorder.working_bank.is_none()); @@ -1253,7 +1263,7 @@ mod tests { let bank1 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 1)); // Set a working bank - poh_recorder.set_bank(bank1.clone(), false); + poh_recorder.set_bank_for_test(bank1.clone()); // Tick until poh_recorder.tick_height == working bank's min_tick_height let num_new_ticks = bank1.tick_height() - poh_recorder.tick_height(); @@ -1322,7 +1332,7 @@ mod tests { ); assert_eq!(poh_recorder.tick_height, bank.max_tick_height() + 1); - poh_recorder.set_bank(bank.clone(), false); + poh_recorder.set_bank_for_test(bank.clone()); poh_recorder.tick(); assert_eq!(poh_recorder.tick_height, bank.max_tick_height() + 2); @@ -1363,7 +1373,7 @@ mod tests { bank0.fill_bank_with_ticks_for_tests(); let bank1 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 1)); - poh_recorder.set_bank(bank1.clone(), false); + poh_recorder.set_bank_for_test(bank1.clone()); // Let poh_recorder tick up to bank1.tick_height() - 1 for _ in 0..bank1.tick_height() - 1 { poh_recorder.tick() @@ -1404,7 +1414,7 @@ mod tests { Arc::new(AtomicBool::default()), ); - poh_recorder.set_bank(bank.clone(), false); + poh_recorder.set_bank_for_test(bank.clone()); let tx = test_tx(); let h1 = hash(b"hello world!"); @@ -1448,7 +1458,7 @@ mod tests { bank0.fill_bank_with_ticks_for_tests(); let bank1 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 1)); - poh_recorder.set_bank(bank1.clone(), false); + poh_recorder.set_bank_for_test(bank1.clone()); // Record up to exactly min tick height let min_tick_height = poh_recorder.working_bank.as_ref().unwrap().min_tick_height; @@ -1502,7 +1512,7 @@ mod tests { Arc::new(AtomicBool::default()), ); - poh_recorder.set_bank(bank.clone(), false); + poh_recorder.set_bank_for_test(bank.clone()); let num_ticks_to_max = bank.max_tick_height() - poh_recorder.tick_height; for _ in 0..num_ticks_to_max { poh_recorder.tick(); @@ -1542,7 +1552,7 @@ mod tests { Arc::new(AtomicBool::default()), ); - poh_recorder.set_bank(bank.clone(), true); + poh_recorder.set_bank_with_transaction_index_for_test(bank.clone()); poh_recorder.tick(); assert_eq!( poh_recorder @@ -1616,7 +1626,7 @@ mod tests { bank0.fill_bank_with_ticks_for_tests(); let bank1 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 1)); - poh_recorder.set_bank(bank1, false); + poh_recorder.set_bank_for_test(bank1); // Check we can make two ticks without hitting min_tick_height let remaining_ticks_to_min = @@ -1764,7 +1774,7 @@ mod tests { Arc::new(AtomicBool::default()), ); - poh_recorder.set_bank(bank.clone(), false); + poh_recorder.set_bank_for_test(bank.clone()); assert_eq!(bank.slot(), 0); poh_recorder.reset(bank, Some((4, 4))); assert!(poh_recorder.working_bank.is_none()); @@ -1796,7 +1806,7 @@ mod tests { None, Arc::new(AtomicBool::default()), ); - poh_recorder.set_bank(bank, false); + poh_recorder.set_bank_for_test(bank); poh_recorder.clear_bank(); assert!(receiver.try_recv().is_ok()); } @@ -1831,7 +1841,7 @@ mod tests { Arc::new(AtomicBool::default()), ); - poh_recorder.set_bank(bank.clone(), false); + poh_recorder.set_bank_for_test(bank.clone()); // Simulate ticking much further than working_bank.max_tick_height let max_tick_height = poh_recorder.working_bank.as_ref().unwrap().max_tick_height; @@ -2126,7 +2136,7 @@ mod tests { // Move the bank up a slot (so that max_tick_height > slot 0's tick_height) let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), 1)); // If we set the working bank, the node should be leader within next 2 slots - poh_recorder.set_bank(bank.clone(), false); + poh_recorder.set_bank_for_test(bank.clone()); assert!(poh_recorder.would_be_leader(2 * bank.ticks_per_slot())); } } @@ -2160,7 +2170,7 @@ mod tests { for _ in 0..(bank.ticks_per_slot() * 3) { poh_recorder.tick(); } - poh_recorder.set_bank(bank.clone(), false); + poh_recorder.set_bank_for_test(bank.clone()); assert!(!bank.is_hash_valid_for_age(&genesis_hash, 0)); assert!(bank.is_hash_valid_for_age(&genesis_hash, 1)); } diff --git a/poh/src/poh_service.rs b/poh/src/poh_service.rs index caa2c2a7c8770a..65806b54532744 100644 --- a/poh/src/poh_service.rs +++ b/poh/src/poh_service.rs @@ -498,7 +498,7 @@ mod tests { hashes_per_batch, record_receiver, ); - poh_recorder.write().unwrap().set_bank(bank, false); + poh_recorder.write().unwrap().set_bank_for_test(bank); // get some events let mut hashes = 0; diff --git a/scripts/check-dev-context-only-utils.sh b/scripts/check-dev-context-only-utils.sh index fb459f0759729d..33bfbd00d8e4a5 100755 --- a/scripts/check-dev-context-only-utils.sh +++ b/scripts/check-dev-context-only-utils.sh @@ -29,6 +29,7 @@ source ci/rust-version.sh nightly # reason to bend dev-context-only-utils's original intention and that listed # package isn't part of released binaries. declare tainted_packages=( + solana-banking-bench solana-ledger-tool ) From fa21a3d78ee4dccfc050503a8cc7a60db6d6b2c4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Oct 2023 13:36:51 +0000 Subject: [PATCH 312/407] build(deps): bump libc from 0.2.148 to 0.2.149 (#33653) * build(deps): bump libc from 0.2.148 to 0.2.149 Bumps [libc](https://github.com/rust-lang/libc) from 0.2.148 to 0.2.149. - [Release notes](https://github.com/rust-lang/libc/releases) - [Commits](https://github.com/rust-lang/libc/compare/0.2.148...0.2.149) --- updated-dependencies: - dependency-name: libc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f240f204be8c0d..e6fe9deca1c36f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2908,9 +2908,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.148" +version = "0.2.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" +checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] name = "libgit2-sys" diff --git a/Cargo.toml b/Cargo.toml index 703c61fddaaf32..19a9d3b70898e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -234,7 +234,7 @@ jsonrpc-ipc-server = "18.0.0" jsonrpc-pubsub = "18.0.0" jsonrpc-server-utils = "18.0.0" lazy_static = "1.4.0" -libc = "0.2.148" +libc = "0.2.149" libloading = "0.7.4" libsecp256k1 = "0.6.0" light-poseidon = "0.1.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 0742f193ac8de3..bfb72770567bfe 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2463,9 +2463,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.148" +version = "0.2.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" +checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] name = "libloading" From 99542d9b59ef9c6d8ace283e5d893e7329f4c193 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 11 Oct 2023 10:39:09 -0400 Subject: [PATCH 313/407] docs: Removes accounts-on-ramdisk section (#33655) --- docs/src/running-validator/validator-start.md | 34 ------------------- 1 file changed, 34 deletions(-) diff --git a/docs/src/running-validator/validator-start.md b/docs/src/running-validator/validator-start.md index d30533abd54b87..16940dc7030d45 100644 --- a/docs/src/running-validator/validator-start.md +++ b/docs/src/running-validator/validator-start.md @@ -431,40 +431,6 @@ solana-validator ..."); otherwise, when logrotate sends its signal to the validator, the enclosing script will die and take the validator process with it. -### Using a ramdisk with spill-over into swap for the accounts database to reduce SSD wear - -If your machine has plenty of RAM, a tmpfs ramdisk -([tmpfs](https://man7.org/linux/man-pages/man5/tmpfs.5.html)) may be used to hold -the accounts database - -When using tmpfs it's essential to also configure swap on your machine as well to -avoid running out of tmpfs space periodically. - -A 300GB tmpfs partition is recommended, with an accompanying 250GB swap -partition. - -Example configuration: - -1. `sudo mkdir /mnt/solana-accounts` -2. Add a 300GB tmpfs partition by adding a new line containing `tmpfs /mnt/solana-accounts tmpfs rw,size=300G,user=sol 0 0` to `/etc/fstab` - (assuming your validator is running under the user "sol"). **CAREFUL: If you - incorrectly edit /etc/fstab your machine may no longer boot** -3. Create at least 250GB of swap space - -- Choose a device to use in place of `SWAPDEV` for the remainder of these instructions. - Ideally select a free disk partition of 250GB or greater on a fast disk. If one is not - available, create a swap file with `sudo dd if=/dev/zero of=/swapfile bs=1MiB count=250KiB`, - set its permissions with `sudo chmod 0600 /swapfile` and use `/swapfile` as `SWAPDEV` for - the remainder of these instructions -- Format the device for usage as swap with `sudo mkswap SWAPDEV` - -4. Add the swap file to `/etc/fstab` with a new line containing `SWAPDEV swap swap defaults 0 0` -5. Enable swap with `sudo swapon -a` and mount the tmpfs with `sudo mount /mnt/solana-accounts/` -6. Confirm swap is active with `free -g` and the tmpfs is mounted with `mount` - -Now add the `--accounts /mnt/solana-accounts` argument to your `solana-validator` -command-line arguments and restart the validator. - ### Account indexing As the number of populated accounts on the cluster grows, account-data RPC From cd743dc496d2006c8b6fde8d2ccb1f7d7b06bff4 Mon Sep 17 00:00:00 2001 From: Will Hickey Date: Wed, 11 Oct 2023 10:06:38 -0500 Subject: [PATCH 314/407] Update the changelog for v1.18 (#33636) * Update the changelog for v1.18 * Add sub-headings under v1.18.0 --- CHANGELOG.md | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index df54c187651d98..f329001e138635 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,12 +7,16 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm and follows a [Backwards Compatability Policy](https://docs.solana.com/developing/backwards-compatibility) Release channels have their own copy of this changelog: -* [edge - v1.17](#edge-channel) -* [beta - v1.16](https://github.com/solana-labs/solana/blob/v1.16/CHANGELOG.md) -* [stable - v1.14](https://github.com/solana-labs/solana/blob/v1.14/CHANGELOG.md) +* [edge - v1.18](#edge-channel) +* [beta - v1.17](https://github.com/solana-labs/solana/blob/v1.17/CHANGELOG.md) +* [stable - v1.16](https://github.com/solana-labs/solana/blob/v1.16/CHANGELOG.md) -## [1.17.0] - Unreleased +## [1.18.0] - Unreleased +* Changes +* Upgrade Notes + +## [1.17.0] * Changes * Added a changelog. * Upgrade Notes @@ -49,8 +53,9 @@ This simplifies the process of diffing between versions of the log. ## Maintaining This Changelog ### When creating a new release branch: * Commit to master updating the changelog: - * Remove `Unreleased` annotation from vx.y.0 section. + * Update the edge, beta, and stable links * Create new section: `vx.y+1.0 - Unreleased` + * Remove `Unreleased` annotation from vx.y.0 section. * Create vx.y branch starting at that commit * Tag that commit as vx.y.0 From 15debcd6a19f2f4be62fce060b02c46c69e137ab Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 12 Oct 2023 00:54:13 +0800 Subject: [PATCH 315/407] chore: remove unused deps (#33652) --- Cargo.toml | 3 --- 1 file changed, 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 19a9d3b70898e2..6b180f0b373a63 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -275,7 +275,6 @@ quinn-proto = "0.10.5" quote = "1.0" rand = "0.8.5" rand_chacha = "0.3.1" -rand_core = "0.6.4" raptorq = "1.7.0" rayon = "1.8.0" rcgen = "0.10.0" @@ -414,10 +413,8 @@ tonic = "0.9.2" tonic-build = "0.9.2" trees = "0.4.2" tungstenite = "0.20.1" -unix_socket2 = "0.5.4" uriparse = "0.6.4" url = "2.4.1" -users = "0.10.0" wasm-bindgen = "0.2" winapi = "0.3.8" winreg = "0.50" From b36d051b510bf2083034263b7321272f174b4a83 Mon Sep 17 00:00:00 2001 From: Brennan Date: Wed, 11 Oct 2023 09:58:06 -0700 Subject: [PATCH 316/407] Update hashes per tick with feature gates (#33600) * Update hashes per tick with feature gates --- runtime/src/bank.rs | 23 +++++++++++- runtime/src/bank/tests.rs | 76 +++++++++++++++++++++++++++++++++++++++ sdk/program/src/clock.rs | 33 +++++++++++++++++ sdk/src/feature_set.rs | 25 +++++++++++++ 4 files changed, 156 insertions(+), 1 deletion(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 28428dabe099f4..b31a9cb2a46f44 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -128,7 +128,8 @@ use { BankId, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_HASHES_PER_TICK, DEFAULT_TICKS_PER_SECOND, INITIAL_RENT_EPOCH, MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY, MAX_TRANSACTION_FORWARDING_DELAY_GPU, - SECONDS_PER_DAY, + SECONDS_PER_DAY, UPDATED_HASHES_PER_TICK2, UPDATED_HASHES_PER_TICK3, + UPDATED_HASHES_PER_TICK4, UPDATED_HASHES_PER_TICK5, UPDATED_HASHES_PER_TICK6, }, epoch_info::EpochInfo, epoch_schedule::EpochSchedule, @@ -8053,6 +8054,26 @@ impl Bank { self.apply_updated_hashes_per_tick(DEFAULT_HASHES_PER_TICK); } + if new_feature_activations.contains(&feature_set::update_hashes_per_tick2::id()) { + self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK2); + } + + if new_feature_activations.contains(&feature_set::update_hashes_per_tick3::id()) { + self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK3); + } + + if new_feature_activations.contains(&feature_set::update_hashes_per_tick4::id()) { + self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK4); + } + + if new_feature_activations.contains(&feature_set::update_hashes_per_tick5::id()) { + self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK5); + } + + if new_feature_activations.contains(&feature_set::update_hashes_per_tick6::id()) { + self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK6); + } + if new_feature_activations.contains(&feature_set::programify_feature_gate_program::id()) { let datapoint_name = "bank-progamify_feature_gate_program"; if let Err(e) = replace_account::replace_empty_account_with_upgradeable_program( diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 97c08289fbc0e7..9dd27bfd3254bf 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -62,6 +62,8 @@ use { clock::{ BankId, Epoch, Slot, UnixTimestamp, DEFAULT_HASHES_PER_TICK, DEFAULT_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT, INITIAL_RENT_EPOCH, MAX_PROCESSING_AGE, MAX_RECENT_BLOCKHASHES, + UPDATED_HASHES_PER_TICK2, UPDATED_HASHES_PER_TICK3, UPDATED_HASHES_PER_TICK4, + UPDATED_HASHES_PER_TICK5, UPDATED_HASHES_PER_TICK6, }, compute_budget::ComputeBudgetInstruction, entrypoint::MAX_PERMITTED_DATA_INCREASE, @@ -12230,6 +12232,80 @@ fn test_feature_activation_idempotent() { assert_eq!(bank.hashes_per_tick, Some(DEFAULT_HASHES_PER_TICK)); } +#[test] +fn test_feature_hashes_per_tick() { + let mut genesis_config = GenesisConfig::default(); + const HASHES_PER_TICK_START: u64 = 3; + genesis_config.poh_config.hashes_per_tick = Some(HASHES_PER_TICK_START); + + let mut bank = Bank::new_for_tests(&genesis_config); + assert_eq!(bank.hashes_per_tick, Some(HASHES_PER_TICK_START)); + + // Don't activate feature + bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false); + assert_eq!(bank.hashes_per_tick, Some(HASHES_PER_TICK_START)); + + // Activate feature + let feature_account_balance = + std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1); + bank.store_account( + &feature_set::update_hashes_per_tick::id(), + &feature::create_account(&Feature { activated_at: None }, feature_account_balance), + ); + bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false); + assert_eq!(bank.hashes_per_tick, Some(DEFAULT_HASHES_PER_TICK)); + + // Activate feature + let feature_account_balance = + std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1); + bank.store_account( + &feature_set::update_hashes_per_tick2::id(), + &feature::create_account(&Feature { activated_at: None }, feature_account_balance), + ); + bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false); + assert_eq!(bank.hashes_per_tick, Some(UPDATED_HASHES_PER_TICK2)); + + // Activate feature + let feature_account_balance = + std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1); + bank.store_account( + &feature_set::update_hashes_per_tick3::id(), + &feature::create_account(&Feature { activated_at: None }, feature_account_balance), + ); + bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false); + assert_eq!(bank.hashes_per_tick, Some(UPDATED_HASHES_PER_TICK3)); + + // Activate feature + let feature_account_balance = + std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1); + bank.store_account( + &feature_set::update_hashes_per_tick4::id(), + &feature::create_account(&Feature { activated_at: None }, feature_account_balance), + ); + bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false); + assert_eq!(bank.hashes_per_tick, Some(UPDATED_HASHES_PER_TICK4)); + + // Activate feature + let feature_account_balance = + std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1); + bank.store_account( + &feature_set::update_hashes_per_tick5::id(), + &feature::create_account(&Feature { activated_at: None }, feature_account_balance), + ); + bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false); + assert_eq!(bank.hashes_per_tick, Some(UPDATED_HASHES_PER_TICK5)); + + // Activate feature + let feature_account_balance = + std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1); + bank.store_account( + &feature_set::update_hashes_per_tick6::id(), + &feature::create_account(&Feature { activated_at: None }, feature_account_balance), + ); + bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false); + assert_eq!(bank.hashes_per_tick, Some(UPDATED_HASHES_PER_TICK6)); +} + #[test_case(true)] #[test_case(false)] fn test_stake_account_consistency_with_rent_epoch_max_feature( diff --git a/sdk/program/src/clock.rs b/sdk/program/src/clock.rs index 45f49f218b15c7..e988bafb21d354 100644 --- a/sdk/program/src/clock.rs +++ b/sdk/program/src/clock.rs @@ -44,10 +44,43 @@ pub const DEFAULT_TICKS_PER_SLOT: u64 = 64; // GCP n1-standard hardware and also a xeon e5-2520 v4 are about this rate of hashes/s pub const DEFAULT_HASHES_PER_SECOND: u64 = 2_000_000; +// Empirical sampling of mainnet validator hash rate showed the following stake +// percentages can exceed the designated hash rates as of July 2023: +// 97.6% +pub const UPDATED_HASHES_PER_SECOND_2: u64 = 2_800_000; +// 96.2% +pub const UPDATED_HASHES_PER_SECOND_3: u64 = 4_400_000; +// 96.2% +pub const UPDATED_HASHES_PER_SECOND_4: u64 = 7_600_000; +// 96.2% +pub const UPDATED_HASHES_PER_SECOND_5: u64 = 9_200_000; +// 96.2% +pub const UPDATED_HASHES_PER_SECOND_6: u64 = 10_000_000; + #[cfg(test)] static_assertions::const_assert_eq!(DEFAULT_HASHES_PER_TICK, 12_500); pub const DEFAULT_HASHES_PER_TICK: u64 = DEFAULT_HASHES_PER_SECOND / DEFAULT_TICKS_PER_SECOND; +#[cfg(test)] +static_assertions::const_assert_eq!(UPDATED_HASHES_PER_TICK2, 17_500); +pub const UPDATED_HASHES_PER_TICK2: u64 = UPDATED_HASHES_PER_SECOND_2 / DEFAULT_TICKS_PER_SECOND; + +#[cfg(test)] +static_assertions::const_assert_eq!(UPDATED_HASHES_PER_TICK3, 27_500); +pub const UPDATED_HASHES_PER_TICK3: u64 = UPDATED_HASHES_PER_SECOND_3 / DEFAULT_TICKS_PER_SECOND; + +#[cfg(test)] +static_assertions::const_assert_eq!(UPDATED_HASHES_PER_TICK4, 47_500); +pub const UPDATED_HASHES_PER_TICK4: u64 = UPDATED_HASHES_PER_SECOND_4 / DEFAULT_TICKS_PER_SECOND; + +#[cfg(test)] +static_assertions::const_assert_eq!(UPDATED_HASHES_PER_TICK5, 57_500); +pub const UPDATED_HASHES_PER_TICK5: u64 = UPDATED_HASHES_PER_SECOND_5 / DEFAULT_TICKS_PER_SECOND; + +#[cfg(test)] +static_assertions::const_assert_eq!(UPDATED_HASHES_PER_TICK6, 62_500); +pub const UPDATED_HASHES_PER_TICK6: u64 = UPDATED_HASHES_PER_SECOND_6 / DEFAULT_TICKS_PER_SECOND; + // 1 Dev Epoch = 400 ms * 8192 ~= 55 minutes pub const DEFAULT_DEV_SLOTS_PER_EPOCH: u64 = 8192; diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 9ec56b03e0e3bf..bc81d781a7c176 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -704,6 +704,26 @@ pub mod programify_feature_gate_program { solana_sdk::declare_id!("8GdovDzVwWU5edz2G697bbB7GZjrUc6aQZLWyNNAtHdg"); } +pub mod update_hashes_per_tick2 { + solana_sdk::declare_id!("EWme9uFqfy1ikK1jhJs8fM5hxWnK336QJpbscNtizkTU"); +} + +pub mod update_hashes_per_tick3 { + solana_sdk::declare_id!("8C8MCtsab5SsfammbzvYz65HHauuUYdbY2DZ4sznH6h5"); +} + +pub mod update_hashes_per_tick4 { + solana_sdk::declare_id!("8We4E7DPwF2WfAN8tRTtWQNhi98B99Qpuj7JoZ3Aikgg"); +} + +pub mod update_hashes_per_tick5 { + solana_sdk::declare_id!("BsKLKAn1WM4HVhPRDsjosmqSg2J8Tq5xP2s2daDS6Ni4"); +} + +pub mod update_hashes_per_tick6 { + solana_sdk::declare_id!("FKu1qYwLQSiehz644H6Si65U5ZQ2cp9GxsyFUfYcuADv"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -875,6 +895,11 @@ lazy_static! { (better_error_codes_for_tx_lamport_check::id(), "better error codes for tx lamport check #33353"), (enable_alt_bn128_compression_syscall::id(), "add alt_bn128 compression syscalls"), (programify_feature_gate_program::id(), "move feature gate activation logic to an on-chain program #32783"), + (update_hashes_per_tick2::id(), "Update desired hashes per tick to 2.8M"), + (update_hashes_per_tick3::id(), "Update desired hashes per tick to 4.4M"), + (update_hashes_per_tick4::id(), "Update desired hashes per tick to 7.6M"), + (update_hashes_per_tick5::id(), "Update desired hashes per tick to 9.2M"), + (update_hashes_per_tick6::id(), "Update desired hashes per tick to 10M"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() From 0f82662a7f86c9595c15bb10d60c193b06c0cc80 Mon Sep 17 00:00:00 2001 From: Jeff Biseda Date: Wed, 11 Oct 2023 09:58:39 -0700 Subject: [PATCH 317/407] allow empty string for SOLANA_METRICS_CONFIG sanity checking (#33515) --- core/src/validator.rs | 9 +++++---- metrics/src/metrics.rs | 7 +++++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index e5eb3544ab468f..b206cf87b30d8c 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -561,6 +561,10 @@ impl Validator { )); } + let genesis_config = + open_genesis_config(ledger_path, config.max_genesis_archive_unpacked_size); + metrics_config_sanity_check(genesis_config.cluster_type)?; + if let Some(expected_shred_version) = config.expected_shred_version { if let Some(wait_for_supermajority_slot) = config.wait_for_supermajority { *start_progress.write().unwrap() = ValidatorStartProgress::CleaningBlockStore; @@ -1334,14 +1338,11 @@ impl Validator { config.generator_config.clone(), ); - let cluster_type = bank_forks.read().unwrap().root_bank().cluster_type(); - metrics_config_sanity_check(cluster_type)?; - datapoint_info!( "validator-new", ("id", id.to_string(), String), ("version", solana_version::version!(), String), - ("cluster_type", cluster_type as u32, i64), + ("cluster_type", genesis_config.cluster_type as u32, i64), ); *start_progress.write().unwrap() = ValidatorStartProgress::Running; diff --git a/metrics/src/metrics.rs b/metrics/src/metrics.rs index df761a6ac2b9b0..b989ada6861fd1 100644 --- a/metrics/src/metrics.rs +++ b/metrics/src/metrics.rs @@ -25,7 +25,7 @@ type CounterMap = HashMap<(&'static str, u64), CounterPoint>; #[derive(Debug, Error)] pub enum MetricsError { #[error(transparent)] - VarError(#[from] std::env::VarError), + VarError(#[from] env::VarError), #[error(transparent)] ReqwestError(#[from] reqwest::Error), #[error("SOLANA_METRICS_CONFIG is invalid: '{0}'")] @@ -405,6 +405,9 @@ impl MetricsConfig { fn get_metrics_config() -> Result { let mut config = MetricsConfig::default(); let config_var = env::var("SOLANA_METRICS_CONFIG")?; + if config_var.is_empty() { + Err(env::VarError::NotPresent)?; + } for pair in config_var.split(',') { let nv: Vec<_> = pair.split('=').collect(); @@ -431,7 +434,7 @@ fn get_metrics_config() -> Result { pub fn metrics_config_sanity_check(cluster_type: ClusterType) -> Result<(), MetricsError> { let config = match get_metrics_config() { Ok(config) => config, - Err(MetricsError::VarError(std::env::VarError::NotPresent)) => return Ok(()), + Err(MetricsError::VarError(env::VarError::NotPresent)) => return Ok(()), Err(e) => return Err(e), }; match &config.db[..] { From 6009d49cc35e726d4022018129c11dfb1a62aab2 Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 11 Oct 2023 13:13:10 -0500 Subject: [PATCH 318/407] Remove dummy entries in Blockstore special columns (part 2) (#33649) * Always call initialize_transaction_status_index() at startup, doing so will ensure dummy entries are actually cleaned * Rename initialize_transaction_status_index() * Stop initializing TransactionStatusIndex column entries, these are no longer needed and old software will initialize if needed --- ledger/src/blockstore.rs | 25 ++++++----------------- ledger/src/blockstore/blockstore_purge.rs | 4 ++-- 2 files changed, 8 insertions(+), 21 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index eca79093ddeb0d..2894733e66cc8d 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -325,12 +325,6 @@ impl Blockstore { .unwrap_or(0); let last_root = RwLock::new(max_root); - // Initialize transaction status index if entries are not present - let initialize_transaction_status_index = db - .iter::(IteratorMode::Start)? - .next() - .is_none(); - measure.stop(); info!("{:?} {}", blockstore_path, measure); let blockstore = Blockstore { @@ -364,9 +358,8 @@ impl Blockstore { lowest_cleanup_slot: RwLock::::default(), slots_stats: SlotsStats::default(), }; - if initialize_transaction_status_index { - blockstore.initialize_transaction_status_index()?; - } + blockstore.cleanup_old_entries()?; + Ok(blockstore) } @@ -2109,16 +2102,10 @@ impl Blockstore { .collect() } - /// Initializes the TransactionStatusIndex column family with two records, `0` and `1`, - /// which are used as the primary index for entries in the TransactionStatus and - /// AddressSignatures columns. At any given time, one primary index is active (ie. new records - /// are stored under this index), the other is frozen. - fn initialize_transaction_status_index(&self) -> Result<()> { - self.transaction_status_index_cf - .put(0, &TransactionStatusIndexMeta::default())?; - self.transaction_status_index_cf - .put(1, &TransactionStatusIndexMeta::default())?; - + fn cleanup_old_entries(&self) -> Result<()> { + if !self.is_primary_access() { + return Ok(()); + } // If present, delete dummy entries inserted by old software // https://github.com/solana-labs/solana/blob/bc2b372/ledger/src/blockstore.rs#L2130-L2137 let transaction_status_dummy_key = cf::TransactionStatus::as_index(2); diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index b4e50234106bf0..edd35ba899fb58 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -761,7 +761,7 @@ pub mod tests { .transaction_status_index_cf .get(0) .unwrap() - .unwrap(); + .unwrap_or_default(); index0.frozen = true; index0.max_slot = 4; blockstore @@ -772,7 +772,7 @@ pub mod tests { .transaction_status_index_cf .get(1) .unwrap() - .unwrap(); + .unwrap_or_default(); index1.frozen = false; index1.max_slot = 9; blockstore From 295d610f4309062e69cac1f71e25ee8f5cefdeaa Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Wed, 11 Oct 2023 11:27:09 -0700 Subject: [PATCH 319/407] We need to publish solana-wen-restart so we can publish 1.18.0 later. (#33662) --- wen-restart/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wen-restart/Cargo.toml b/wen-restart/Cargo.toml index b74871801872af..a2e6e5c1ac885d 100644 --- a/wen-restart/Cargo.toml +++ b/wen-restart/Cargo.toml @@ -8,7 +8,7 @@ repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } edition = { workspace = true } -publish = false +publish = true [dependencies] log = { workspace = true } From 21b2fce6f47899eb3f8d6be1f18431c808349811 Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Wed, 11 Oct 2023 14:00:30 -0700 Subject: [PATCH 320/407] Correct num_packets stats (#33630) Do not count the empty packets from cache warmer in num_packets stats as they are not sent. --- quic-client/src/nonblocking/quic_client.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/quic-client/src/nonblocking/quic_client.rs b/quic-client/src/nonblocking/quic_client.rs index 66a55f8f3fd78f..e2c861df48fa57 100644 --- a/quic-client/src/nonblocking/quic_client.rs +++ b/quic-client/src/nonblocking/quic_client.rs @@ -582,10 +582,13 @@ impl ClientConnection for QuicClientConnection { async fn send_data(&self, data: &[u8]) -> TransportResult<()> { let stats = Arc::new(ClientStats::default()); + // When data is empty which is from cache warmer, we are not sending packets actually, do not count it in + let num_packets = if data.is_empty() { 0 } else { 1 }; self.client .send_buffer(data, &stats, self.connection_stats.clone()) .map_ok(|v| { - self.connection_stats.add_client_stats(&stats, 1, true); + self.connection_stats + .add_client_stats(&stats, num_packets, true); v }) .map_err(|e| { @@ -595,7 +598,8 @@ impl ClientConnection for QuicClientConnection { e ); datapoint_warn!("send-wire-async", ("failure", 1, i64),); - self.connection_stats.add_client_stats(&stats, 1, false); + self.connection_stats + .add_client_stats(&stats, num_packets, false); e.into() }) .await From 0ad519961b7314d583dab2215b3fdc8912de6d00 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Oct 2023 12:19:21 +0800 Subject: [PATCH 321/407] build(deps): bump num-derive from 0.3.3 to 0.4.0 (#33654) * build(deps): bump num-derive from 0.3.3 to 0.4.0 Bumps [num-derive](https://github.com/rust-num/num-derive) from 0.3.3 to 0.4.0. - [Changelog](https://github.com/rust-num/num-derive/blob/master/RELEASES.md) - [Commits](https://github.com/rust-num/num-derive/compare/num-derive-0.3.3...num-derive-0.4.0) --- updated-dependencies: - dependency-name: num-derive dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * updates programs/sbf/Cargo.lock --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: behzad nouri --- Cargo.lock | 20 ++++++++++---------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 20 ++++++++++---------- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e6fe9deca1c36f..5324efde6068e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5276,7 +5276,7 @@ dependencies = [ "memmap2", "memoffset 0.9.0", "modular-bitfield", - "num-derive 0.3.3", + "num-derive 0.4.0", "num-traits", "num_cpus", "num_enum 0.7.0", @@ -5320,7 +5320,7 @@ dependencies = [ "bincode", "bytemuck", "log", - "num-derive 0.3.3", + "num-derive 0.4.0", "num-traits", "rustc_version 0.4.0", "serde", @@ -6642,7 +6642,7 @@ dependencies = [ "log", "memoffset 0.9.0", "num-bigint 0.4.4", - "num-derive 0.3.3", + "num-derive 0.4.0", "num-traits", "parking_lot 0.12.1", "rand 0.8.5", @@ -6678,7 +6678,7 @@ dependencies = [ "libc", "libsecp256k1", "log", - "num-derive 0.3.3", + "num-derive 0.4.0", "num-traits", "percentage", "rand 0.8.5", @@ -6791,7 +6791,7 @@ dependencies = [ "dialoguer", "hidapi", "log", - "num-derive 0.3.3", + "num-derive 0.4.0", "num-traits", "parking_lot 0.12.1", "qstring", @@ -6984,7 +6984,7 @@ dependencies = [ "memmap2", "memoffset 0.9.0", "modular-bitfield", - "num-derive 0.3.3", + "num-derive 0.4.0", "num-traits", "num_cpus", "num_enum 0.7.0", @@ -7064,7 +7064,7 @@ dependencies = [ "libsecp256k1", "log", "memmap2", - "num-derive 0.3.3", + "num-derive 0.4.0", "num-traits", "num_enum 0.7.0", "pbkdf2 0.11.0", @@ -7570,7 +7570,7 @@ dependencies = [ "assert_matches", "bincode", "log", - "num-derive 0.3.3", + "num-derive 0.4.0", "num-traits", "rustc_version 0.4.0", "serde", @@ -7653,7 +7653,7 @@ dependencies = [ "bytemuck", "criterion", "curve25519-dalek", - "num-derive 0.3.3", + "num-derive 0.4.0", "num-traits", "solana-program-runtime", "solana-sdk", @@ -7686,7 +7686,7 @@ dependencies = [ "itertools", "lazy_static", "merlin", - "num-derive 0.3.3", + "num-derive 0.4.0", "num-traits", "rand 0.7.3", "serde", diff --git a/Cargo.toml b/Cargo.toml index 6b180f0b373a63..c63b62a47c9782 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -250,7 +250,7 @@ nix = "0.26.4" num-bigint = "0.4.4" num_cpus = "1.16.0" num_enum = "0.7.0" -num-derive = "0.3" +num-derive = "0.4" num-traits = "0.2" openssl = "0.10" ouroboros = "0.15.6" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index bfb72770567bfe..3c8bfa22636b32 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4489,7 +4489,7 @@ dependencies = [ "lz4", "memmap2", "modular-bitfield", - "num-derive 0.3.0", + "num-derive 0.4.0", "num-traits", "num_cpus", "num_enum 0.7.0", @@ -4530,7 +4530,7 @@ dependencies = [ "bincode", "bytemuck", "log", - "num-derive 0.3.0", + "num-derive 0.4.0", "num-traits", "rustc_version", "serde", @@ -5258,7 +5258,7 @@ dependencies = [ "log", "memoffset 0.9.0", "num-bigint 0.4.4", - "num-derive 0.3.0", + "num-derive 0.4.0", "num-traits", "parking_lot 0.12.1", "rand 0.8.5", @@ -5290,7 +5290,7 @@ dependencies = [ "itertools", "libc", "log", - "num-derive 0.3.0", + "num-derive 0.4.0", "num-traits", "percentage", "rand 0.8.5", @@ -5395,7 +5395,7 @@ dependencies = [ "console", "dialoguer", "log", - "num-derive 0.3.0", + "num-derive 0.4.0", "num-traits", "parking_lot 0.12.1", "qstring", @@ -5542,7 +5542,7 @@ dependencies = [ "lz4", "memmap2", "modular-bitfield", - "num-derive 0.3.0", + "num-derive 0.4.0", "num-traits", "num_cpus", "num_enum 0.7.0", @@ -6037,7 +6037,7 @@ dependencies = [ "libsecp256k1 0.6.0", "log", "memmap2", - "num-derive 0.3.0", + "num-derive 0.4.0", "num-traits", "num_enum 0.7.0", "pbkdf2 0.11.0", @@ -6426,7 +6426,7 @@ version = "1.18.0" dependencies = [ "bincode", "log", - "num-derive 0.3.0", + "num-derive 0.4.0", "num-traits", "rustc_version", "serde", @@ -6464,7 +6464,7 @@ name = "solana-zk-token-proof-program" version = "1.18.0" dependencies = [ "bytemuck", - "num-derive 0.3.0", + "num-derive 0.4.0", "num-traits", "solana-program-runtime", "solana-sdk", @@ -6485,7 +6485,7 @@ dependencies = [ "itertools", "lazy_static", "merlin", - "num-derive 0.3.0", + "num-derive 0.4.0", "num-traits", "rand 0.7.3", "serde", From d286c00a30ec326b11732c137cc930a9737e04c2 Mon Sep 17 00:00:00 2001 From: Tyera Date: Thu, 12 Oct 2023 01:12:33 -0600 Subject: [PATCH 322/407] Blockstore: track when all primary-index data has been purged (#33668) * Fix typo * Add Blockstore::highest_primary_index_slot * Add getter * Populate highest_primary_index_slot on boot * Wipe highest_primary_index_slot when surpassed by oldest_slot * Update highest_primary_index_slot in exact purge * Return indexes early if highest_primary_index_slot has been cleared * Limit read_transaction_status based on highest_primary_index_slot * Limit read_transaction_memos based on highest_primary_index_slot * Use highest_primary_index_slot to add early return to get_transaction_status_with_counter * Fixup tests * Use existing getter for highest_primary_index_slot Co-authored-by: steviez --------- Co-authored-by: steviez --- ledger/src/blockstore.rs | 64 +++++++++++++++++++++-- ledger/src/blockstore/blockstore_purge.rs | 20 ++++++- 2 files changed, 79 insertions(+), 5 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 2894733e66cc8d..080be2fafd5f44 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -206,6 +206,7 @@ pub struct Blockstore { address_signatures_cf: LedgerColumn, transaction_memos_cf: LedgerColumn, transaction_status_index_cf: LedgerColumn, + highest_primary_index_slot: RwLock>, rewards_cf: LedgerColumn, blocktime_cf: LedgerColumn, perf_samples_cf: LedgerColumn, @@ -343,6 +344,7 @@ impl Blockstore { address_signatures_cf, transaction_memos_cf, transaction_status_index_cf, + highest_primary_index_slot: RwLock::>::default(), rewards_cf, blocktime_cf, perf_samples_cf, @@ -359,6 +361,7 @@ impl Blockstore { slots_stats: SlotsStats::default(), }; blockstore.cleanup_old_entries()?; + blockstore.update_highest_primary_index_slot()?; Ok(blockstore) } @@ -2130,6 +2133,43 @@ impl Blockstore { Ok(()) } + fn get_highest_primary_index_slot(&self) -> Option { + *self.highest_primary_index_slot.read().unwrap() + } + + fn set_highest_primary_index_slot(&self, slot: Option) { + *self.highest_primary_index_slot.write().unwrap() = slot; + } + + fn update_highest_primary_index_slot(&self) -> Result<()> { + let iterator = self.transaction_status_index_cf.iter(IteratorMode::Start)?; + let mut highest_primary_index_slot = None; + for (_, data) in iterator { + let meta: TransactionStatusIndexMeta = deserialize(&data).unwrap(); + if highest_primary_index_slot.is_none() + || highest_primary_index_slot.is_some_and(|slot| slot < meta.max_slot) + { + highest_primary_index_slot = Some(meta.max_slot); + } + } + if highest_primary_index_slot.is_some() { + self.set_highest_primary_index_slot(highest_primary_index_slot); + } + Ok(()) + } + + fn maybe_cleanup_highest_primary_index_slot(&self, oldest_slot: Slot) -> Result<()> { + let mut w_highest_primary_index_slot = self.highest_primary_index_slot.write().unwrap(); + if let Some(highest_primary_index_slot) = *w_highest_primary_index_slot { + if oldest_slot > highest_primary_index_slot { + *w_highest_primary_index_slot = None; + self.transaction_status_index_cf.delete(0)?; + self.transaction_status_index_cf.delete(1)?; + } + } + Ok(()) + } + fn read_deprecated_transaction_status( &self, index: (Signature, Slot), @@ -2157,7 +2197,11 @@ impl Blockstore { index: (Signature, Slot), ) -> Result> { let result = self.transaction_status_cf.get_protobuf(index)?; - if result.is_none() { + if result.is_none() + && self + .get_highest_primary_index_slot() + .is_some_and(|highest_slot| highest_slot >= index.1) + { self.read_deprecated_transaction_status(index) } else { Ok(result.and_then(|meta| meta.try_into().ok())) @@ -2199,7 +2243,11 @@ impl Blockstore { slot: Slot, ) -> Result> { let memos = self.transaction_memos_cf.get((signature, slot))?; - if memos.is_none() { + if memos.is_none() + && self + .get_highest_primary_index_slot() + .is_some_and(|highest_slot| highest_slot >= slot) + { self.transaction_memos_cf .get_raw(&cf::TransactionMemos::deprecated_key(signature)) } else { @@ -2283,6 +2331,9 @@ impl Blockstore { return Ok((status, counter)); } + if self.get_highest_primary_index_slot().is_none() { + return Ok((None, counter)); + } for transaction_status_cf_primary_index in 0..=1 { let index_iterator = self.transaction_status_cf @@ -2595,7 +2646,7 @@ impl Blockstore { let mut iterator = self.address_signatures_cf .iter_current_index_filtered(IteratorMode::From( - // Ragardless of whether a `before` signature is provided, the latest relevant + // Regardless of whether a `before` signature is provided, the latest relevant // `slot` is queried directly with the `find_address_signatures_for_slot()` // call above. Thus, this iterator starts at the lowest entry of `address, // slot` and iterates backwards to continue reporting the next earliest @@ -7690,6 +7741,7 @@ pub mod tests { transaction_status_cf .put_deprecated_protobuf((0, signature2, 4), &status) .unwrap(); + blockstore.set_highest_primary_index_slot(Some(4)); transaction_status_cf .put_protobuf((signature3, 4), &status) @@ -8149,6 +8201,12 @@ pub mod tests { &AddressSignatureMeta { writeable: false }, )?; } + let mut w_highest_primary_index_slot = self.highest_primary_index_slot.write().unwrap(); + if w_highest_primary_index_slot.is_none() + || w_highest_primary_index_slot.is_some_and(|highest_slot| highest_slot < slot) + { + *w_highest_primary_index_slot = Some(slot); + } Ok(()) } } diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index edd35ba899fb58..d643dd2c7ef075 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -1,6 +1,8 @@ use { - super::*, crate::blockstore_db::ColumnIndexDeprecation, solana_sdk::message::AccountKeys, - std::time::Instant, + super::*, + crate::blockstore_db::ColumnIndexDeprecation, + solana_sdk::message::AccountKeys, + std::{cmp::max, time::Instant}, }; #[derive(Default)] @@ -73,6 +75,10 @@ impl Blockstore { // with Slot::default() for initial compaction filter behavior consistency let to_slot = to_slot.checked_add(1).unwrap(); self.db.set_oldest_slot(to_slot); + + if let Err(err) = self.maybe_cleanup_highest_primary_index_slot(to_slot) { + warn!("Could not clean up TransactionStatusIndex: {err:?}"); + } } pub fn purge_and_compact_slots(&self, from_slot: Slot, to_slot: Slot) { @@ -364,8 +370,12 @@ impl Blockstore { let mut index0 = self.transaction_status_index_cf.get(0)?.unwrap_or_default(); let mut index1 = self.transaction_status_index_cf.get(1)?.unwrap_or_default(); + let highest_primary_index_slot = self.get_highest_primary_index_slot(); let slot_indexes = |slot: Slot| -> Vec { let mut indexes = vec![]; + if highest_primary_index_slot.is_none() { + return indexes; + } if slot <= index0.max_slot && (index0.frozen || slot >= index1.max_slot) { indexes.push(0); } @@ -431,13 +441,19 @@ impl Blockstore { } } } + let mut update_highest_primary_index_slot = false; if index0.max_slot >= from_slot && index0.max_slot <= to_slot { index0.max_slot = from_slot.saturating_sub(1); batch.put::(0, &index0)?; + update_highest_primary_index_slot = true; } if index1.max_slot >= from_slot && index1.max_slot <= to_slot { index1.max_slot = from_slot.saturating_sub(1); batch.put::(1, &index1)?; + update_highest_primary_index_slot = true + } + if update_highest_primary_index_slot { + self.set_highest_primary_index_slot(Some(max(index0.max_slot, index1.max_slot))) } Ok(()) } From 1a2c7f106ee6e636516131202289a59ce45d730f Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Thu, 12 Oct 2023 09:05:10 -0500 Subject: [PATCH 323/407] fix typo in comments (#33665) Co-authored-by: HaoranYi --- accounts-db/src/ancient_append_vecs.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 2bfe2a094edf84..770eb0be73a741 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -240,7 +240,7 @@ struct WriteAncientAccounts<'a> { impl AccountsDb { /// Combine account data from storages in 'sorted_slots' into packed storages. /// This keeps us from accumulating storages for each slot older than an epoch. - /// Ater this function the number of alive roots is <= # alive roots when it was called. + /// After this function the number of alive roots is <= # alive roots when it was called. /// In practice, the # of alive roots after will be significantly less than # alive roots when called. /// Trying to reduce # roots and storages (one per root) required to store all the data in ancient slots pub(crate) fn combine_ancient_slots_packed( From c354879a3f61362d5f7bf79919e2f2feae54a421 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 12 Oct 2023 07:11:20 -0700 Subject: [PATCH 324/407] stop padding new append vecs to page size (#33658) * stop padding new append vecs to page size * fix test * fix another test * for creating test accounts, allocate larger like we used to --- accounts-db/src/accounts_db.rs | 40 +++++++++++++--------------------- 1 file changed, 15 insertions(+), 25 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 8bfeb3fb289b85..b4162eeecabaa3 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -5733,11 +5733,7 @@ impl AccountsDb { .create_store_count .fetch_add(1, Ordering::Relaxed); let path_index = thread_rng().gen_range(0..paths.len()); - let store = Arc::new(self.new_storage_entry( - slot, - Path::new(&paths[path_index]), - Self::page_align(size), - )); + let store = Arc::new(self.new_storage_entry(slot, Path::new(&paths[path_index]), size)); debug!( "creating store: {} slot: {} len: {} size: {} from: {} path: {:?}", @@ -9913,7 +9909,7 @@ pub mod test_utils { // allocate an append vec for this slot that can hold all the test accounts. This prevents us from creating more than 1 append vec for this slot. _ = accounts.accounts_db.create_and_insert_store( slot, - bytes_required as u64, + AccountsDb::page_align(bytes_required as u64), "create_test_accounts", ); } @@ -15054,16 +15050,13 @@ pub mod tests { let account1 = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); // Store into slot 0 - db.store_cached((0, &[(&account_key1, &account1)][..]), None); - db.store_cached((0, &[(&account_key2, &account1)][..]), None); + // This has to be done uncached since we are trying to add another account to the append vec AFTER it has been flushed. + // This doesn't work if the flush creates an append vec of exactly the right size. + // Normal operations NEVER write the same account to the same append vec twice during a write cache flush. + db.store_uncached(0, &[(&account_key1, &account1)][..]); + db.store_uncached(0, &[(&account_key2, &account1)][..]); db.add_root(0); if !do_intra_cache_clean { - // If we don't want the cache doing purges before flush, - // then we cannot flush multiple roots at once, otherwise the later - // roots will clean the earlier roots before they are stored. - // Thus flush the roots individually - db.flush_accounts_cache(true, None); - // Add an additional ref within the same slot to pubkey 1 db.store_uncached(0, &[(&account_key1, &account1)]); } @@ -17391,17 +17384,14 @@ pub mod tests { assert_eq!(shrink_collect.aligned_total_bytes, 0); assert_eq!(shrink_collect.alive_total_bytes, 0); } - // these constants are multiples of page size (4096). - // They are determined by what size append vec gets created when the write cache is flushed to an append vec. - // Thus, they are dependent on the # of accounts that are written. They were identified by hitting the asserts and noting the value - // for shrink_collect.original_bytes at each account_count and then encoding it here. - let expected_capacity = if account_count >= 100 { - 16384 - } else if account_count >= 50 { - 8192 - } else { - 4096 - }; + // expected_capacity is determined by what size append vec gets created when the write cache is flushed to an append vec. + let mut expected_capacity = + (account_count * aligned_stored_size(space)) as u64; + if append_opposite_zero_lamport_account && space != 0 { + // zero lamport accounts always write space = 0 + expected_capacity -= space as u64; + } + assert_eq!(shrink_collect.capacity, expected_capacity); assert_eq!(shrink_collect.total_starting_accounts, account_count); let mut expected_all_are_zero_lamports = lamports == 0; From ac788ab4557c65d46aa93ac1e5a6d43b947d7748 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Oct 2023 15:04:18 +0000 Subject: [PATCH 325/407] build(deps): bump num-derive from 0.4.0 to 0.4.1 (#33671) * build(deps): bump num-derive from 0.4.0 to 0.4.1 Bumps [num-derive](https://github.com/rust-num/num-derive) from 0.4.0 to 0.4.1. - [Changelog](https://github.com/rust-num/num-derive/blob/master/RELEASES.md) - [Commits](https://github.com/rust-num/num-derive/compare/num-derive-0.4.0...num-derive-0.4.1) --- updated-dependencies: - dependency-name: num-derive dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 30 +++++++++++++++--------------- programs/sbf/Cargo.lock | 30 +++++++++++++++--------------- 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5324efde6068e8..c0261886a6e459 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3380,9 +3380,9 @@ dependencies = [ [[package]] name = "num-derive" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6a0fd4f737c707bd9086cc16c925f294943eb62eb71499e9fd4cf71f8b9f4e" +checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" dependencies = [ "proc-macro2", "quote", @@ -5276,7 +5276,7 @@ dependencies = [ "memmap2", "memoffset 0.9.0", "modular-bitfield", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "num_cpus", "num_enum 0.7.0", @@ -5320,7 +5320,7 @@ dependencies = [ "bincode", "bytemuck", "log", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "rustc_version 0.4.0", "serde", @@ -6642,7 +6642,7 @@ dependencies = [ "log", "memoffset 0.9.0", "num-bigint 0.4.4", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "parking_lot 0.12.1", "rand 0.8.5", @@ -6678,7 +6678,7 @@ dependencies = [ "libc", "libsecp256k1", "log", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "percentage", "rand 0.8.5", @@ -6791,7 +6791,7 @@ dependencies = [ "dialoguer", "hidapi", "log", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "parking_lot 0.12.1", "qstring", @@ -6984,7 +6984,7 @@ dependencies = [ "memmap2", "memoffset 0.9.0", "modular-bitfield", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "num_cpus", "num_enum 0.7.0", @@ -7064,7 +7064,7 @@ dependencies = [ "libsecp256k1", "log", "memmap2", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "num_enum 0.7.0", "pbkdf2 0.11.0", @@ -7570,7 +7570,7 @@ dependencies = [ "assert_matches", "bincode", "log", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "rustc_version 0.4.0", "serde", @@ -7653,7 +7653,7 @@ dependencies = [ "bytemuck", "criterion", "curve25519-dalek", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "solana-program-runtime", "solana-sdk", @@ -7686,7 +7686,7 @@ dependencies = [ "itertools", "lazy_static", "merlin", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "rand 0.7.3", "serde", @@ -7750,7 +7750,7 @@ checksum = "385e31c29981488f2820b2022d8e731aae3b02e6e18e2fd854e4c9a94dc44fc3" dependencies = [ "assert_matches", "borsh 0.10.3", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "solana-program", "spl-token", @@ -7831,7 +7831,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "249e0318493b6bcf27ae9902600566c689b7dfba9f1bdff5893e92253374e78c" dependencies = [ - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "solana-program", "spl-program-error-derive", @@ -7887,7 +7887,7 @@ checksum = "e4abf34a65ba420584a0c35f3903f8d727d1f13ababbdc3f714c6b065a686e86" dependencies = [ "arrayref", "bytemuck", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "num_enum 0.7.0", "solana-program", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 3c8bfa22636b32..66b4d27d9abe7a 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2920,9 +2920,9 @@ dependencies = [ [[package]] name = "num-derive" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6a0fd4f737c707bd9086cc16c925f294943eb62eb71499e9fd4cf71f8b9f4e" +checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" dependencies = [ "proc-macro2", "quote", @@ -4489,7 +4489,7 @@ dependencies = [ "lz4", "memmap2", "modular-bitfield", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "num_cpus", "num_enum 0.7.0", @@ -4530,7 +4530,7 @@ dependencies = [ "bincode", "bytemuck", "log", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "rustc_version", "serde", @@ -5258,7 +5258,7 @@ dependencies = [ "log", "memoffset 0.9.0", "num-bigint 0.4.4", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "parking_lot 0.12.1", "rand 0.8.5", @@ -5290,7 +5290,7 @@ dependencies = [ "itertools", "libc", "log", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "percentage", "rand 0.8.5", @@ -5395,7 +5395,7 @@ dependencies = [ "console", "dialoguer", "log", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "parking_lot 0.12.1", "qstring", @@ -5542,7 +5542,7 @@ dependencies = [ "lz4", "memmap2", "modular-bitfield", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "num_cpus", "num_enum 0.7.0", @@ -6037,7 +6037,7 @@ dependencies = [ "libsecp256k1 0.6.0", "log", "memmap2", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "num_enum 0.7.0", "pbkdf2 0.11.0", @@ -6426,7 +6426,7 @@ version = "1.18.0" dependencies = [ "bincode", "log", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "rustc_version", "serde", @@ -6464,7 +6464,7 @@ name = "solana-zk-token-proof-program" version = "1.18.0" dependencies = [ "bytemuck", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "solana-program-runtime", "solana-sdk", @@ -6485,7 +6485,7 @@ dependencies = [ "itertools", "lazy_static", "merlin", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "rand 0.7.3", "serde", @@ -6547,7 +6547,7 @@ checksum = "385e31c29981488f2820b2022d8e731aae3b02e6e18e2fd854e4c9a94dc44fc3" dependencies = [ "assert_matches", "borsh 0.10.3", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "solana-program", "spl-token", @@ -6618,7 +6618,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "249e0318493b6bcf27ae9902600566c689b7dfba9f1bdff5893e92253374e78c" dependencies = [ - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "solana-program", "spl-program-error-derive", @@ -6674,7 +6674,7 @@ checksum = "e4abf34a65ba420584a0c35f3903f8d727d1f13ababbdc3f714c6b065a686e86" dependencies = [ "arrayref", "bytemuck", - "num-derive 0.4.0", + "num-derive 0.4.1", "num-traits", "num_enum 0.7.0", "solana-program", From 452fd5d384e344377c4d14ab4373f9ad129f36d2 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 12 Oct 2023 13:32:40 -0400 Subject: [PATCH 326/407] Adds `--no-skip-initial-accounts-db-clean` *hidden* CLI flag (#33664) --- core/src/validator.rs | 3 +++ core/tests/epoch_accounts_hash.rs | 1 + core/tests/snapshots.rs | 3 +++ ledger/src/bank_forks_utils.rs | 1 + ledger/src/blockstore_processor.rs | 1 + local-cluster/src/validator_configs.rs | 1 + runtime/src/bank.rs | 7 ++++--- runtime/src/bank/serde_snapshot.rs | 1 + runtime/src/bank/tests.rs | 4 ++-- runtime/src/snapshot_bank_utils.rs | 11 +++++++++++ validator/src/cli.rs | 7 +++++++ validator/src/main.rs | 1 + 12 files changed, 36 insertions(+), 5 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index b206cf87b30d8c..011d63924328c0 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -246,6 +246,7 @@ pub struct ValidatorConfig { pub warp_slot: Option, pub accounts_db_test_hash_calculation: bool, pub accounts_db_skip_shrink: bool, + pub accounts_db_force_initial_clean: bool, pub tpu_coalesce: Duration, pub staked_nodes_overrides: Arc>>, pub validator_exit: Arc>, @@ -313,6 +314,7 @@ impl Default for ValidatorConfig { warp_slot: None, accounts_db_test_hash_calculation: false, accounts_db_skip_shrink: false, + accounts_db_force_initial_clean: false, tpu_coalesce: DEFAULT_TPU_COALESCE, staked_nodes_overrides: Arc::new(RwLock::new(HashMap::new())), validator_exit: Arc::new(RwLock::new(Exit::default())), @@ -1759,6 +1761,7 @@ fn load_blockstore( shrink_ratio: config.accounts_shrink_ratio, accounts_db_test_hash_calculation: config.accounts_db_test_hash_calculation, accounts_db_skip_shrink: config.accounts_db_skip_shrink, + accounts_db_force_initial_clean: config.accounts_db_force_initial_clean, runtime_config: config.runtime_config.clone(), use_snapshot_archives_at_startup: config.use_snapshot_archives_at_startup, ..blockstore_processor::ProcessOptions::default() diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index 718e62688b8c4c..8fa6919e99db1f 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -461,6 +461,7 @@ fn test_snapshots_have_expected_epoch_accounts_hash() { AccountShrinkThreshold::default(), true, true, + false, true, None, None, diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 3b689a8423e8b7..1520a410c0268c 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -171,6 +171,7 @@ fn restore_from_snapshot( check_hash_calculation, false, false, + false, Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), None, Arc::default(), @@ -893,6 +894,7 @@ fn restore_from_snapshots_and_check_banks_are_equal( false, false, false, + false, Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), None, Arc::default(), @@ -1113,6 +1115,7 @@ fn test_snapshots_with_background_services( false, false, false, + false, Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), None, exit.clone(), diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index b46d950adba28a..0be01e9bde975b 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -264,6 +264,7 @@ fn bank_forks_from_snapshot( process_options.shrink_ratio, process_options.accounts_db_test_hash_calculation, process_options.accounts_db_skip_shrink, + process_options.accounts_db_force_initial_clean, process_options.verify_index, process_options.accounts_db_config.clone(), accounts_update_notifier, diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 219fa4c62ed3d4..618fe4a2c4a2c3 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -610,6 +610,7 @@ pub struct ProcessOptions { pub allow_dead_slots: bool, pub accounts_db_test_hash_calculation: bool, pub accounts_db_skip_shrink: bool, + pub accounts_db_force_initial_clean: bool, pub accounts_db_config: Option, pub verify_index: bool, pub shrink_ratio: AccountShrinkThreshold, diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index d480dc2653567e..3479422c2f5147 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -51,6 +51,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { warp_slot: config.warp_slot, accounts_db_test_hash_calculation: config.accounts_db_test_hash_calculation, accounts_db_skip_shrink: config.accounts_db_skip_shrink, + accounts_db_force_initial_clean: config.accounts_db_force_initial_clean, tpu_coalesce: config.tpu_coalesce, staked_nodes_overrides: config.staked_nodes_overrides.clone(), validator_exit: Arc::new(RwLock::new(Exit::default())), diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index b31a9cb2a46f44..1a46feabfd5945 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -7609,12 +7609,13 @@ impl Bank { pub fn verify_snapshot_bank( &self, test_hash_calculation: bool, - accounts_db_skip_shrink: bool, + skip_shrink: bool, + force_clean: bool, last_full_snapshot_slot: Slot, base: Option<(Slot, /*capitalization*/ u64)>, ) -> bool { let (_, clean_time_us) = measure_us!({ - let should_clean = !accounts_db_skip_shrink && self.slot() > 0; + let should_clean = force_clean || (!skip_shrink && self.slot() > 0); if should_clean { info!("Cleaning..."); // We cannot clean past the last full snapshot's slot because we are about to @@ -7634,7 +7635,7 @@ impl Bank { }); let (_, shrink_time_us) = measure_us!({ - let should_shrink = !accounts_db_skip_shrink && self.slot() > 0; + let should_shrink = !skip_shrink && self.slot() > 0; if should_shrink { info!("Shrinking..."); self.rc.accounts.accounts_db.shrink_all_slots( diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 671a6dc6d738e5..17bba5638f2d47 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -496,6 +496,7 @@ mod tests { false, false, false, + false, Some(solana_accounts_db::accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING), None, Arc::default(), diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 9dd27bfd3254bf..190cd15a1278c8 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -3608,11 +3608,11 @@ fn test_verify_snapshot_bank() { bank.freeze(); add_root_and_flush_write_cache(&bank); bank.update_accounts_hash_for_tests(); - assert!(bank.verify_snapshot_bank(true, false, bank.slot(), None)); + assert!(bank.verify_snapshot_bank(true, false, false, bank.slot(), None)); // tamper the bank after freeze! bank.increment_signature_count(1); - assert!(!bank.verify_snapshot_bank(true, false, bank.slot(), None)); + assert!(!bank.verify_snapshot_bank(true, false, false, bank.slot(), None)); } // Test that two bank forks with the same accounts should not hash to the same value. diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index e538b07677630f..1757c00a9aadf5 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -271,6 +271,7 @@ pub fn bank_from_snapshot_archives( shrink_ratio: AccountShrinkThreshold, test_hash_calculation: bool, accounts_db_skip_shrink: bool, + accounts_db_force_initial_clean: bool, verify_index: bool, accounts_db_config: Option, accounts_update_notifier: Option, @@ -365,6 +366,7 @@ pub fn bank_from_snapshot_archives( if !bank.verify_snapshot_bank( test_hash_calculation, accounts_db_skip_shrink || !full_snapshot_archive_info.is_remote(), + accounts_db_force_initial_clean, full_snapshot_archive_info.slot(), base, ) && limit_load_slot_count_from_snapshot.is_none() @@ -427,6 +429,7 @@ pub fn bank_from_latest_snapshot_archives( shrink_ratio: AccountShrinkThreshold, test_hash_calculation: bool, accounts_db_skip_shrink: bool, + accounts_db_force_initial_clean: bool, verify_index: bool, accounts_db_config: Option, accounts_update_notifier: Option, @@ -459,6 +462,7 @@ pub fn bank_from_latest_snapshot_archives( shrink_ratio, test_hash_calculation, accounts_db_skip_shrink, + accounts_db_force_initial_clean, verify_index, accounts_db_config, accounts_update_notifier, @@ -1326,6 +1330,7 @@ mod tests { false, false, false, + false, Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), None, Arc::default(), @@ -1437,6 +1442,7 @@ mod tests { false, false, false, + false, Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), None, Arc::default(), @@ -1568,6 +1574,7 @@ mod tests { false, false, false, + false, Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), None, Arc::default(), @@ -1689,6 +1696,7 @@ mod tests { false, false, false, + false, Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), None, Arc::default(), @@ -1826,6 +1834,7 @@ mod tests { false, false, false, + false, Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), None, Arc::default(), @@ -1891,6 +1900,7 @@ mod tests { false, false, false, + false, Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), None, Arc::default(), @@ -2255,6 +2265,7 @@ mod tests { false, false, false, + false, Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), None, Arc::default(), diff --git a/validator/src/cli.rs b/validator/src/cli.rs index d9b974426bb2a8..0dcafe309eb5d9 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1196,6 +1196,13 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .help("Debug option to scan all append vecs and verify account index refcounts prior to clean") .hidden(hidden_unless_forced()) ) + .arg( + Arg::with_name("no_skip_initial_accounts_db_clean") + .long("no-skip-initial-accounts-db-clean") + .help("Do not skip the initial cleaning of accounts when verifying snapshot bank") + .hidden(hidden_unless_forced()) + .conflicts_with("accounts_db_skip_shrink") + ) .arg( Arg::with_name("accounts_db_create_ancient_storage_packed") .long("accounts-db-create-ancient-storage-packed") diff --git a/validator/src/main.rs b/validator/src/main.rs index 0c998b91c30309..abfe6e2009bc45 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1390,6 +1390,7 @@ pub fn main() { accounts_db_test_hash_calculation: matches.is_present("accounts_db_test_hash_calculation"), accounts_db_config, accounts_db_skip_shrink: true, + accounts_db_force_initial_clean: matches.is_present("no_skip_initial_accounts_db_clean"), tpu_coalesce, no_wait_for_vote_to_start_leader: matches.is_present("no_wait_for_vote_to_start_leader"), accounts_shrink_ratio, From 47540af978b45efcf373cbdb8b62803f3d072d2d Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Thu, 12 Oct 2023 15:58:01 -0700 Subject: [PATCH 327/407] Replace cargo registry server's GIT index with sparse index (#33666) * Replace cargo registry server's GIT index with sparse index * Remove GIT index support * handler for crate download request processing * restructure the index code --- cargo-registry/src/dummy_git_index.rs | 122 ------------------- cargo-registry/src/main.rs | 120 +++++++++---------- cargo-registry/src/response_builder.rs | 27 +++++ cargo-registry/src/sparse_index.rs | 158 +++++++++++++++++++++++++ 4 files changed, 240 insertions(+), 187 deletions(-) delete mode 100644 cargo-registry/src/dummy_git_index.rs create mode 100644 cargo-registry/src/response_builder.rs create mode 100644 cargo-registry/src/sparse_index.rs diff --git a/cargo-registry/src/dummy_git_index.rs b/cargo-registry/src/dummy_git_index.rs deleted file mode 100644 index ae5def46b082bb..00000000000000 --- a/cargo-registry/src/dummy_git_index.rs +++ /dev/null @@ -1,122 +0,0 @@ -use { - git2::{IndexAddOption, Repository}, - serde::{Deserialize, Serialize}, - std::{ - fs::{self, create_dir_all}, - io::ErrorKind, - path::PathBuf, - process::Command, - }, -}; - -#[derive(Debug, Default, Deserialize, Serialize)] -struct RegistryConfig { - dl: String, - api: Option, -} - -pub struct DummyGitIndex {} - -impl DummyGitIndex { - pub fn create_or_update_git_repo(root_dir: PathBuf, server_url: &str) { - create_dir_all(&root_dir).expect("Failed to create root directory"); - - let expected_config = serde_json::to_string(&RegistryConfig { - dl: format!( - "{}/api/v1/crates/{{crate}}/{{version}}/download", - server_url - ), - api: Some(server_url.to_string()), - }) - .expect("Failed to create expected config"); - - let config_path = root_dir.join("config.json"); - let config_written = if let Ok(config) = fs::read_to_string(&config_path) { - if config != expected_config { - fs::write(config_path, expected_config).expect("Failed to update config"); - true - } else { - false - } - } else { - fs::write(config_path, expected_config).expect("Failed to write config"); - true - }; - - #[cfg(unix)] - use std::os::unix::fs::symlink; - #[cfg(windows)] - use std::os::windows::fs::symlink_dir as symlink; - - let new_symlink = match symlink(".", root_dir.join("index")) { - Ok(()) => true, - Err(ref err) if err.kind() == ErrorKind::AlreadyExists => false, - Err(err) => panic!("Failed to create a symlink: {}", err), - }; - - let new_git_symlink = match symlink(".git", root_dir.join("git")) { - Ok(()) => true, - Err(ref err) if err.kind() == ErrorKind::AlreadyExists => false, - Err(err) => panic!("Failed to create git symlink: {}", err), - }; - - let repository = Repository::init(&root_dir).expect("Failed to GIT init"); - - let empty = repository - .is_empty() - .expect("Failed to check if GIT repo is empty"); - - if empty || config_written || new_symlink || new_git_symlink { - let mut index = repository.index().expect("cannot get the Index file"); - index - .add_all( - ["config.json", "index"].iter(), - IndexAddOption::DEFAULT, - None, - ) - .expect("Failed to add modified files to git index"); - index.write().expect("Failed to update the git index"); - - let tree = index - .write_tree() - .and_then(|tree_id| repository.find_tree(tree_id)) - .expect("Failed to get tree"); - - let signature = repository.signature().expect("Failed to get signature"); - - if empty { - repository.commit( - Some("HEAD"), - &signature, - &signature, - "Created new repo", - &tree, - &[], - ) - } else { - let oid = repository - .refname_to_id("HEAD") - .expect("Failed to get HEAD ref"); - let parent = repository - .find_commit(oid) - .expect("Failed to find parent commit"); - - repository.commit( - Some("HEAD"), - &signature, - &signature, - "Updated GIT repo", - &tree, - &[&parent], - ) - } - .expect("Failed to commit the changes"); - } - - Command::new("git") - .current_dir(&root_dir) - .arg("update-server-info") - .status() - .expect("git update-server-info failed"); - } -} diff --git a/cargo-registry/src/main.rs b/cargo-registry/src/main.rs index d225ca8b112f3e..60227fa32a9962 100644 --- a/cargo-registry/src/main.rs +++ b/cargo-registry/src/main.rs @@ -2,7 +2,6 @@ use { crate::{ client::Client, - dummy_git_index::DummyGitIndex, publisher::{Error, Publisher}, }, hyper::{ @@ -10,46 +9,24 @@ use { service::{make_service_fn, service_fn}, Method, Server, }, - hyper_staticfile::Static, log::*, std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, - path::PathBuf, sync::Arc, }, }; mod client; -mod dummy_git_index; mod publisher; +mod response_builder; +mod sparse_index; + const PATH_PREFIX: &str = "/api/v1/crates"; pub struct CargoRegistryService {} impl CargoRegistryService { - fn error_response(status: hyper::StatusCode, msg: &str) -> hyper::Response { - error!("{}", msg); - hyper::Response::builder() - .status(status) - .body(hyper::Body::from( - serde_json::json!({ - "errors" : [ - {"details": msg} - ] - }) - .to_string(), - )) - .unwrap() - } - - fn success_response() -> hyper::Response { - hyper::Response::builder() - .status(hyper::StatusCode::OK) - .body(hyper::Body::from("")) - .unwrap() - } - async fn handle_publish_request( request: hyper::Request, client: Arc, @@ -63,7 +40,7 @@ impl CargoRegistryService { tokio::task::spawn_blocking(move || Publisher::publish_crate(data, client)) .await else { - return Self::error_response( + return response_builder::error_response( hyper::StatusCode::INTERNAL_SERVER_ERROR, "Internal error. Failed to wait for program deployment", ); @@ -71,15 +48,15 @@ impl CargoRegistryService { if result.is_ok() { info!("Published the crate successfully. {:?}", result); - Self::success_response() + response_builder::success_response() } else { - Self::error_response( + response_builder::error_response( hyper::StatusCode::BAD_REQUEST, format!("Failed to publish the crate. {:?}", result).as_str(), ) } } - Err(_) => Self::error_response( + Err(_) => response_builder::error_response( hyper::StatusCode::BAD_REQUEST, "Failed to receive the crate data from the client.", ), @@ -99,20 +76,20 @@ impl CargoRegistryService { _request: &hyper::Request, ) -> hyper::Response { let Some((path, _crate_name, _version)) = Self::get_crate_name_and_version(path) else { - return Self::error_response( + return response_builder::error_response( hyper::StatusCode::BAD_REQUEST, "Failed to parse the request.", ); }; if path.len() != PATH_PREFIX.len() { - return Self::error_response( + return response_builder::error_response( hyper::StatusCode::BAD_REQUEST, "Request length is incorrect", ); } - Self::error_response( + response_builder::error_response( hyper::StatusCode::NOT_IMPLEMENTED, "This command is not implemented yet", ) @@ -123,20 +100,20 @@ impl CargoRegistryService { _request: &hyper::Request, ) -> hyper::Response { let Some((path, _crate_name, _version)) = Self::get_crate_name_and_version(path) else { - return Self::error_response( + return response_builder::error_response( hyper::StatusCode::BAD_REQUEST, "Failed to parse the request.", ); }; if path.len() != PATH_PREFIX.len() { - return Self::error_response( + return response_builder::error_response( hyper::StatusCode::BAD_REQUEST, "Request length is incorrect", ); } - Self::error_response( + response_builder::error_response( hyper::StatusCode::NOT_IMPLEMENTED, "This command is not implemented yet", ) @@ -151,20 +128,20 @@ impl CargoRegistryService { _request: &hyper::Request, ) -> hyper::Response { let Some((path, _crate_name)) = Self::get_crate_name(path) else { - return Self::error_response( + return response_builder::error_response( hyper::StatusCode::BAD_REQUEST, "Failed to parse the request.", ); }; if path.len() != PATH_PREFIX.len() { - return Self::error_response( + return response_builder::error_response( hyper::StatusCode::BAD_REQUEST, "Request length is incorrect", ); } - Self::error_response( + response_builder::error_response( hyper::StatusCode::NOT_IMPLEMENTED, "This command is not implemented yet", ) @@ -175,20 +152,20 @@ impl CargoRegistryService { _request: &hyper::Request, ) -> hyper::Response { let Some((path, _crate_name)) = Self::get_crate_name(path) else { - return Self::error_response( + return response_builder::error_response( hyper::StatusCode::BAD_REQUEST, "Failed to parse the request.", ); }; if path.len() != PATH_PREFIX.len() { - return Self::error_response( + return response_builder::error_response( hyper::StatusCode::BAD_REQUEST, "Request length is incorrect", ); } - Self::error_response( + response_builder::error_response( hyper::StatusCode::NOT_IMPLEMENTED, "This command is not implemented yet", ) @@ -199,20 +176,20 @@ impl CargoRegistryService { _request: &hyper::Request, ) -> hyper::Response { let Some((path, _crate_name)) = Self::get_crate_name(path) else { - return Self::error_response( + return response_builder::error_response( hyper::StatusCode::BAD_REQUEST, "Failed to parse the request.", ); }; if path.len() != PATH_PREFIX.len() { - return Self::error_response( + return response_builder::error_response( hyper::StatusCode::BAD_REQUEST, "Request length is incorrect", ); } - Self::error_response( + response_builder::error_response( hyper::StatusCode::NOT_IMPLEMENTED, "This command is not implemented yet", ) @@ -228,44 +205,44 @@ impl CargoRegistryService { // full path started with PATH_PREFIX. So it's sufficient to check that provided // path is smaller than PATH_PREFIX. if path.len() >= PATH_PREFIX.len() { - return Self::error_response( + return response_builder::error_response( hyper::StatusCode::BAD_REQUEST, "Request length is incorrect", ); } - Self::error_response( + response_builder::error_response( hyper::StatusCode::NOT_IMPLEMENTED, "This command is not implemented yet", ) } async fn handler( + index: sparse_index::RegistryIndex, request: hyper::Request, client: Arc, ) -> Result, Error> { let path = request.uri().path(); if path.starts_with("/git") { - return Static::new("/tmp/dummy-git") - .serve(request) - .await - .or_else(|_| { - Ok(Self::error_response( - hyper::StatusCode::BAD_REQUEST, - "Failed to serve git index", - )) - }); + return Ok(response_builder::error_response( + hyper::StatusCode::BAD_REQUEST, + "This registry server does not support GIT index. Please use sparse index.", + )); + } + + if path.starts_with(index.index_root.as_str()) { + return Ok(index.handler(request)); } if !path.starts_with(PATH_PREFIX) { - return Ok(Self::error_response( + return Ok(response_builder::error_response( hyper::StatusCode::BAD_REQUEST, "Invalid path for the request", )); } let Some((path, endpoint)) = path.rsplit_once('/') else { - return Ok(Self::error_response( + return Ok(response_builder::error_response( hyper::StatusCode::BAD_REQUEST, "Invalid endpoint in the path", )); @@ -275,7 +252,7 @@ impl CargoRegistryService { Method::PUT => match endpoint { "new" => { if path.len() != PATH_PREFIX.len() { - Self::error_response( + response_builder::error_response( hyper::StatusCode::BAD_REQUEST, "Invalid length of the request.", ) @@ -285,19 +262,31 @@ impl CargoRegistryService { } "unyank" => Self::handle_unyank_request(path, &request), "owners" => Self::handle_add_owners_request(path, &request), - _ => Self::error_response(hyper::StatusCode::METHOD_NOT_ALLOWED, "Unknown request"), + _ => response_builder::error_response( + hyper::StatusCode::METHOD_NOT_ALLOWED, + "Unknown request", + ), }, Method::GET => match endpoint { "crates" => Self::handle_get_crates_request(path, &request), "owners" => Self::handle_get_owners_request(path, &request), - _ => Self::error_response(hyper::StatusCode::METHOD_NOT_ALLOWED, "Unknown request"), + _ => response_builder::error_response( + hyper::StatusCode::METHOD_NOT_ALLOWED, + "Unknown request", + ), }, Method::DELETE => match endpoint { "yank" => Self::handle_yank_request(path, &request), "owners" => Self::handle_delete_owners_request(path, &request), - _ => Self::error_response(hyper::StatusCode::METHOD_NOT_ALLOWED, "Unknown request"), + _ => response_builder::error_response( + hyper::StatusCode::METHOD_NOT_ALLOWED, + "Unknown request", + ), }, - _ => Self::error_response(hyper::StatusCode::METHOD_NOT_ALLOWED, "Unknown request"), + _ => response_builder::error_response( + hyper::StatusCode::METHOD_NOT_ALLOWED, + "Unknown request", + ), }) } } @@ -308,13 +297,14 @@ async fn main() { let client = Arc::new(Client::new().expect("Failed to get RPC Client instance")); let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), client.port); - DummyGitIndex::create_or_update_git_repo(PathBuf::from("/tmp/dummy-git"), &client.server_url); + let index = sparse_index::RegistryIndex::new("/index", &client.server_url); let registry_service = make_service_fn(move |_| { let client_inner = client.clone(); + let index = index.clone(); async move { Ok::<_, Error>(service_fn(move |request| { - CargoRegistryService::handler(request, client_inner.clone()) + CargoRegistryService::handler(index.clone(), request, client_inner.clone()) })) } }); diff --git a/cargo-registry/src/response_builder.rs b/cargo-registry/src/response_builder.rs new file mode 100644 index 00000000000000..8a56e298f713ae --- /dev/null +++ b/cargo-registry/src/response_builder.rs @@ -0,0 +1,27 @@ +use {crate::response_builder, log::error}; + +pub(crate) fn error_response(status: hyper::StatusCode, msg: &str) -> hyper::Response { + error!("{}", msg); + hyper::Response::builder() + .status(status) + .body(hyper::Body::from( + serde_json::json!({ + "errors" : [ + {"details": msg} + ] + }) + .to_string(), + )) + .unwrap() +} + +pub(crate) fn success_response_str(value: &str) -> hyper::Response { + hyper::Response::builder() + .status(hyper::StatusCode::OK) + .body(hyper::Body::from(value.to_string())) + .unwrap() +} + +pub(crate) fn success_response() -> hyper::Response { + response_builder::success_response_str("") +} diff --git a/cargo-registry/src/sparse_index.rs b/cargo-registry/src/sparse_index.rs new file mode 100644 index 00000000000000..59b9b88985c445 --- /dev/null +++ b/cargo-registry/src/sparse_index.rs @@ -0,0 +1,158 @@ +use { + crate::response_builder, + log::info, + serde::{Deserialize, Serialize}, +}; + +#[derive(Debug, Default, Deserialize, Serialize)] +struct RegistryConfig { + dl: String, + api: Option, +} + +#[derive(Clone)] +pub struct RegistryIndex { + pub(crate) index_root: String, + config: String, +} + +impl RegistryIndex { + pub fn new(root: &str, server_url: &str) -> Self { + let registry_config = RegistryConfig { + dl: format!("{}/api/v1/crates", server_url), + api: Some(server_url.to_string()), + }; + let config = + serde_json::to_string(®istry_config).expect("Failed to create registry config"); + + info!("Registry index is available at {}{}/", server_url, root); + Self { + index_root: root.to_string(), + config, + } + } + + pub fn handler(&self, request: hyper::Request) -> hyper::Response { + let path = request.uri().path(); + let expected_root = self.index_root.as_str(); + if !path.starts_with(expected_root) { + return response_builder::error_response( + hyper::StatusCode::BAD_REQUEST, + "Invalid path for index", + ); + } + + let Some((_, path)) = path.split_once(expected_root) else { + return response_builder::error_response( + hyper::StatusCode::BAD_REQUEST, + "Invalid path for index", + ); + }; + + if path == "/config.json" { + return response_builder::success_response_str(&self.config); + } + + Self::handle_crate_lookup_request(path) + } + + fn get_crate_name_from_path(path: &str) -> Option<&str> { + let (path, crate_name) = path.rsplit_once('/')?; + + // The index for deployed crates follow the path naming described here + // https://doc.rust-lang.org/cargo/reference/registry-index.html#index-files + match crate_name.len() { + 0 => false, + 1 => path == "/1", + 2 => path == "/2", + 3 => { + let first_char = crate_name.chars().next()?; + path == format!("/3/{}", first_char) + } + _ => { + let (first_two_char, rest) = crate_name.split_at(2); + let (next_two_char, _) = rest.split_at(2); + path == format!("/{}/{}", first_two_char, next_two_char) + } + } + .then_some(crate_name) + } + + fn handle_crate_lookup_request(path: &str) -> hyper::Response { + let Some(crate_name) = Self::get_crate_name_from_path(path) else { + return response_builder::error_response( + hyper::StatusCode::BAD_REQUEST, + "Invalid path for the request", + ); + }; + + // Fetch the index information for the crate + info!("Received a request to fetch {:?}", crate_name); + + response_builder::success_response() + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_get_crate_name_from_path() { + assert_eq!(RegistryIndex::get_crate_name_from_path(""), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/"), None); + + // Single character crate name + assert_eq!(RegistryIndex::get_crate_name_from_path("/a"), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/1/a"), Some("a")); + assert_eq!(RegistryIndex::get_crate_name_from_path("/2/a"), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/a/a"), None); + + // Two character crate name + assert_eq!(RegistryIndex::get_crate_name_from_path("/ab"), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/1/ab"), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/2/ab"), Some("ab")); + assert_eq!(RegistryIndex::get_crate_name_from_path("/3/ab"), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/ab/ab"), None); + + // Three character crate name + assert_eq!(RegistryIndex::get_crate_name_from_path("/abc"), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/1/abc"), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/2/abc"), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/3/abc"), None); + assert_eq!( + RegistryIndex::get_crate_name_from_path("/3/a/abc"), + Some("abc") + ); + assert_eq!(RegistryIndex::get_crate_name_from_path("/ab/abc"), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/ab/c/abc"), None); + + // Four character crate name + assert_eq!(RegistryIndex::get_crate_name_from_path("/abcd"), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/1/abcd"), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/2/abcd"), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/3/abcd"), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/3/a/abcd"), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/4/abcd"), None); + assert_eq!( + RegistryIndex::get_crate_name_from_path("/ab/cd/abcd"), + Some("abcd") + ); + assert_eq!(RegistryIndex::get_crate_name_from_path("/ab/cd/abc"), None); + + // More character crate name + assert_eq!(RegistryIndex::get_crate_name_from_path("/abcdefgh"), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/1/abcdefgh"), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/2/abcdefgh"), None); + assert_eq!(RegistryIndex::get_crate_name_from_path("/3/abcdefgh"), None); + assert_eq!( + RegistryIndex::get_crate_name_from_path("/3/a/abcdefgh"), + None + ); + assert_eq!(RegistryIndex::get_crate_name_from_path("/4/abcdefgh"), None); + assert_eq!( + RegistryIndex::get_crate_name_from_path("/ab/cd/abcdefgh"), + Some("abcdefgh") + ); + } +} From 53925b618201677fb4077eb0a60cb342c1664555 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Fri, 13 Oct 2023 13:08:38 +0900 Subject: [PATCH 328/407] Make goto_end_of_slot() take Arc (#33650) --- runtime/benches/bank.rs | 4 +- runtime/src/bank.rs | 3 +- runtime/src/bank/tests.rs | 106 +++++++++++++++++++------------------- 3 files changed, 57 insertions(+), 56 deletions(-) diff --git a/runtime/benches/bank.rs b/runtime/benches/bank.rs index fc8dfbd4a4e564..b853789ddbc21b 100644 --- a/runtime/benches/bank.rs +++ b/runtime/benches/bank.rs @@ -184,7 +184,7 @@ fn bench_bank_async_process_native_loader_transactions(bencher: &mut Bencher) { fn bench_bank_update_recent_blockhashes(bencher: &mut Bencher) { let (genesis_config, _mint_keypair) = create_genesis_config(100); let mut bank = Arc::new(Bank::new_for_benches(&genesis_config)); - goto_end_of_slot(Arc::get_mut(&mut bank).unwrap()); + goto_end_of_slot(bank.clone()); let genesis_hash = bank.last_blockhash(); // Prime blockhash_queue for i in 0..(MAX_RECENT_BLOCKHASHES + 1) { @@ -193,7 +193,7 @@ fn bench_bank_update_recent_blockhashes(bencher: &mut Bencher) { &Pubkey::default(), (i + 1) as u64, )); - goto_end_of_slot(Arc::get_mut(&mut bank).unwrap()); + goto_end_of_slot(bank.clone()); } // Verify blockhash_queue is full (genesis hash has been kicked out) assert!(!bank.is_hash_valid_for_age(&genesis_hash, MAX_RECENT_BLOCKHASHES)); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 1a46feabfd5945..6f220a810e3f59 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -8508,8 +8508,9 @@ pub mod test_utils { super::Bank, solana_sdk::{hash::hashv, pubkey::Pubkey}, solana_vote_program::vote_state::{self, BlockTimestamp, VoteStateVersions}, + std::sync::Arc, }; - pub fn goto_end_of_slot(bank: &Bank) { + pub fn goto_end_of_slot(bank: Arc) { let mut tick_hash = bank.last_blockhash(); loop { tick_hash = hashv(&[tick_hash.as_ref(), &[42]]); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 190cd15a1278c8..69f9d18e5a9b8f 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -2727,7 +2727,7 @@ fn test_bank_tx_fee() { let (expected_fee_collected, expected_fee_burned) = genesis_config.fee_rate_governor.burn(expected_fee_paid); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let capitalization = bank.capitalization(); @@ -2748,7 +2748,7 @@ fn test_bank_tx_fee() { ); assert_eq!(bank.get_balance(&leader), initial_balance); - goto_end_of_slot(&bank); + goto_end_of_slot(bank.clone()); assert_eq!(bank.signature_count(), 1); assert_eq!( bank.get_balance(&leader), @@ -2776,7 +2776,7 @@ fn test_bank_tx_fee() { ); // Verify that an InstructionError collects fees, too - let bank = Bank::new_from_parent(Arc::new(bank), &leader, 1); + let bank = Arc::new(Bank::new_from_parent(bank, &leader, 1)); let mut tx = system_transaction::transfer(&mint_keypair, &key, 1, bank.last_blockhash()); // Create a bogus instruction to system_program to cause an instruction error tx.message.instructions[0].data[0] = 40; @@ -2788,7 +2788,7 @@ fn test_bank_tx_fee() { bank.get_balance(&mint_keypair.pubkey()), mint - arbitrary_transfer_amount - 2 * expected_fee_paid ); // mint_keypair still pays a fee - goto_end_of_slot(&bank); + goto_end_of_slot(bank.clone()); assert_eq!(bank.signature_count(), 1); // Profit! 2 transaction signatures processed at 3 lamports each @@ -2840,7 +2840,7 @@ fn test_bank_tx_compute_unit_fee() { let (expected_fee_collected, expected_fee_burned) = genesis_config.fee_rate_governor.burn(expected_fee_paid); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let capitalization = bank.capitalization(); @@ -2860,7 +2860,7 @@ fn test_bank_tx_compute_unit_fee() { ); assert_eq!(bank.get_balance(&leader), initial_balance); - goto_end_of_slot(&bank); + goto_end_of_slot(bank.clone()); assert_eq!(bank.signature_count(), 1); assert_eq!( bank.get_balance(&leader), @@ -2888,7 +2888,7 @@ fn test_bank_tx_compute_unit_fee() { ); // Verify that an InstructionError collects fees, too - let bank = Bank::new_from_parent(Arc::new(bank), &leader, 1); + let bank = Arc::new(Bank::new_from_parent(bank, &leader, 1)); let mut tx = system_transaction::transfer(&mint_keypair, &key, 1, bank.last_blockhash()); // Create a bogus instruction to system_program to cause an instruction error tx.message.instructions[0].data[0] = 40; @@ -2900,7 +2900,7 @@ fn test_bank_tx_compute_unit_fee() { bank.get_balance(&mint_keypair.pubkey()), mint - arbitrary_transfer_amount - 2 * expected_fee_paid ); // mint_keypair still pays a fee - goto_end_of_slot(&bank); + goto_end_of_slot(bank.clone()); assert_eq!(bank.signature_count(), 1); // Profit! 2 transaction signatures processed at 3 lamports each @@ -2938,19 +2938,19 @@ fn test_bank_blockhash_fee_structure() { .target_lamports_per_signature = 5000; genesis_config.fee_rate_governor.target_signatures_per_slot = 0; - let bank = Bank::new_for_tests(&genesis_config); - goto_end_of_slot(&bank); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + goto_end_of_slot(bank.clone()); let cheap_blockhash = bank.last_blockhash(); let cheap_lamports_per_signature = bank.get_lamports_per_signature(); assert_eq!(cheap_lamports_per_signature, 0); - let bank = Bank::new_from_parent(Arc::new(bank), &leader, 1); - goto_end_of_slot(&bank); + let bank = Arc::new(Bank::new_from_parent(bank, &leader, 1)); + goto_end_of_slot(bank.clone()); let expensive_blockhash = bank.last_blockhash(); let expensive_lamports_per_signature = bank.get_lamports_per_signature(); assert!(cheap_lamports_per_signature < expensive_lamports_per_signature); - let bank = Bank::new_from_parent(Arc::new(bank), &leader, 2); + let bank = Bank::new_from_parent(bank, &leader, 2); // Send a transfer using cheap_blockhash let key = solana_sdk::pubkey::new_rand(); @@ -2990,19 +2990,19 @@ fn test_bank_blockhash_compute_unit_fee_structure() { .target_lamports_per_signature = 1000; genesis_config.fee_rate_governor.target_signatures_per_slot = 1; - let bank = Bank::new_for_tests(&genesis_config); - goto_end_of_slot(&bank); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + goto_end_of_slot(bank.clone()); let cheap_blockhash = bank.last_blockhash(); let cheap_lamports_per_signature = bank.get_lamports_per_signature(); assert_eq!(cheap_lamports_per_signature, 0); - let bank = Bank::new_from_parent(Arc::new(bank), &leader, 1); - goto_end_of_slot(&bank); + let bank = Arc::new(Bank::new_from_parent(bank, &leader, 1)); + goto_end_of_slot(bank.clone()); let expensive_blockhash = bank.last_blockhash(); let expensive_lamports_per_signature = bank.get_lamports_per_signature(); assert!(cheap_lamports_per_signature < expensive_lamports_per_signature); - let bank = Bank::new_from_parent(Arc::new(bank), &leader, 2); + let bank = Bank::new_from_parent(bank, &leader, 2); // Send a transfer using cheap_blockhash let key = solana_sdk::pubkey::new_rand(); @@ -4887,7 +4887,7 @@ fn test_recent_blockhashes_sysvar() { let most_recent_hash = recent_blockhashes.iter().next().unwrap().blockhash; // Check order assert!(bank.is_hash_valid_for_age(&most_recent_hash, 0)); - goto_end_of_slot(Arc::get_mut(&mut bank).unwrap()); + goto_end_of_slot(bank.clone()); bank = Arc::new(new_from_parent(bank)); } } @@ -4895,8 +4895,8 @@ fn test_recent_blockhashes_sysvar() { #[allow(deprecated)] #[test] fn test_blockhash_queue_sysvar_consistency() { - let mut bank = create_simple_test_arc_bank(100_000); - goto_end_of_slot(Arc::get_mut(&mut bank).unwrap()); + let bank = create_simple_test_arc_bank(100_000); + goto_end_of_slot(bank.clone()); let bhq_account = bank.get_account(&sysvar::recent_blockhashes::id()).unwrap(); let recent_blockhashes = @@ -5055,7 +5055,7 @@ where // Banks 0 and 1 have no fees, wait two blocks before // initializing our nonce accounts for _ in 0..2 { - goto_end_of_slot(Arc::get_mut(&mut bank).unwrap()); + goto_end_of_slot(bank.clone()); bank = Arc::new(new_from_parent(bank)); } @@ -5069,7 +5069,7 @@ where // The setup nonce is not valid to be used until the next bank // so wait one more block - goto_end_of_slot(Arc::get_mut(&mut bank).unwrap()); + goto_end_of_slot(bank.clone()); bank = Arc::new(new_from_parent(bank)); Ok((bank, mint_keypair, custodian_keypair, nonce_keypair)) @@ -5324,7 +5324,7 @@ fn test_nonce_transaction() { /* Kick nonce hash off the blockhash_queue */ for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { - goto_end_of_slot(Arc::get_mut(&mut bank).unwrap()); + goto_end_of_slot(bank.clone()); bank = Arc::new(new_from_parent(bank)); } @@ -5393,7 +5393,7 @@ fn test_nonce_transaction() { /* Kick nonce hash off the blockhash_queue */ for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { - goto_end_of_slot(Arc::get_mut(&mut bank).unwrap()); + goto_end_of_slot(bank.clone()); bank = Arc::new(new_from_parent(bank)); } @@ -5451,7 +5451,7 @@ fn test_nonce_transaction_with_tx_wide_caps() { /* Kick nonce hash off the blockhash_queue */ for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { - goto_end_of_slot(Arc::get_mut(&mut bank).unwrap()); + goto_end_of_slot(bank.clone()); bank = Arc::new(new_from_parent(bank)); } @@ -5520,7 +5520,7 @@ fn test_nonce_transaction_with_tx_wide_caps() { /* Kick nonce hash off the blockhash_queue */ for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { - goto_end_of_slot(Arc::get_mut(&mut bank).unwrap()); + goto_end_of_slot(bank.clone()); bank = Arc::new(new_from_parent(bank)); } @@ -5588,7 +5588,7 @@ fn test_nonce_authority() { let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap(); for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { - goto_end_of_slot(Arc::get_mut(&mut bank).unwrap()); + goto_end_of_slot(bank.clone()); bank = Arc::new(new_from_parent(bank)); } @@ -5646,7 +5646,7 @@ fn test_nonce_payer() { let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap(); for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { - goto_end_of_slot(Arc::get_mut(&mut bank).unwrap()); + goto_end_of_slot(bank.clone()); bank = Arc::new(new_from_parent(bank)); } @@ -5711,7 +5711,7 @@ fn test_nonce_payer_tx_wide_cap() { let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap(); for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { - goto_end_of_slot(Arc::get_mut(&mut bank).unwrap()); + goto_end_of_slot(bank.clone()); bank = Arc::new(new_from_parent(bank)); } @@ -5779,7 +5779,7 @@ fn test_nonce_fee_calculator_updates() { // Kick nonce hash off the blockhash_queue for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { - goto_end_of_slot(Arc::get_mut(&mut bank).unwrap()); + goto_end_of_slot(bank.clone()); bank = Arc::new(new_from_parent(bank)); } @@ -5847,7 +5847,7 @@ fn test_nonce_fee_calculator_updates_tx_wide_cap() { // Kick nonce hash off the blockhash_queue for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { - goto_end_of_slot(Arc::get_mut(&mut bank).unwrap()); + goto_end_of_slot(bank.clone()); bank = Arc::new(new_from_parent(bank)); } @@ -5927,7 +5927,7 @@ fn test_check_ro_durable_nonce_fails() { ); // Kick nonce hash off the blockhash_queue for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { - goto_end_of_slot(Arc::get_mut(&mut bank).unwrap()); + goto_end_of_slot(bank.clone()); bank = Arc::new(new_from_parent(bank)); } // Caught by the runtime because it is a nonce transaction @@ -6548,7 +6548,7 @@ fn test_bank_hash_consistency() { // Check a few slots, cross an epoch boundary assert_eq!(bank.get_slots_in_epoch(0), 32); loop { - goto_end_of_slot(Arc::get_mut(&mut bank).unwrap()); + goto_end_of_slot(bank.clone()); if bank.slot == 0 { assert_eq!( bank.hash().to_string(), @@ -6619,13 +6619,13 @@ fn get_shrink_account_size() -> usize { // Set root for bank 0, with caching disabled so we can get the size // of the storage for this slot - let mut bank0 = Arc::new(Bank::new_with_config_for_tests( + let bank0 = Arc::new(Bank::new_with_config_for_tests( &genesis_config, AccountSecondaryIndexes::default(), AccountShrinkThreshold::default(), )); bank0.restore_old_behavior_for_fragile_tests(); - goto_end_of_slot(Arc::::get_mut(&mut bank0).unwrap()); + goto_end_of_slot(bank0.clone()); bank0.freeze(); bank0.squash(); add_root_and_flush_write_cache(&bank0); @@ -6658,7 +6658,7 @@ fn test_clean_nonrooted() { info!("pubkey1: {}", pubkey1); // Set root for bank 0, with caching enabled - let mut bank0 = Arc::new(Bank::new_with_config_for_tests( + let bank0 = Arc::new(Bank::new_with_config_for_tests( &genesis_config, AccountSecondaryIndexes::default(), AccountShrinkThreshold::default(), @@ -6666,7 +6666,7 @@ fn test_clean_nonrooted() { let account_zero = AccountSharedData::new(0, 0, &Pubkey::new_unique()); - goto_end_of_slot(Arc::::get_mut(&mut bank0).unwrap()); + goto_end_of_slot(bank0.clone()); bank0.freeze(); bank0.squash(); // Flush now so that accounts cache cleaning doesn't clean up bank 0 when later @@ -6675,9 +6675,9 @@ fn test_clean_nonrooted() { // Store some lamports in bank 1 let some_lamports = 123; - let mut bank1 = Arc::new(Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 1)); + let bank1 = Arc::new(Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 1)); bank1.deposit(&pubkey0, some_lamports).unwrap(); - goto_end_of_slot(Arc::::get_mut(&mut bank1).unwrap()); + goto_end_of_slot(bank1.clone()); bank1.freeze(); bank1.flush_accounts_cache_slot_for_tests(); @@ -6685,10 +6685,10 @@ fn test_clean_nonrooted() { // Store some lamports for pubkey1 in bank 2, root bank 2 // bank2's parent is bank0 - let mut bank2 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 2)); + let bank2 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 2)); bank2.deposit(&pubkey1, some_lamports).unwrap(); bank2.store_account(&pubkey0, &account_zero); - goto_end_of_slot(Arc::::get_mut(&mut bank2).unwrap()); + goto_end_of_slot(bank2.clone()); bank2.freeze(); bank2.squash(); bank2.force_flush_accounts_cache(); @@ -6700,9 +6700,9 @@ fn test_clean_nonrooted() { // candidate set bank2.clean_accounts_for_tests(); - let mut bank3 = Arc::new(Bank::new_from_parent(bank2, &Pubkey::default(), 3)); + let bank3 = Arc::new(Bank::new_from_parent(bank2, &Pubkey::default(), 3)); bank3.deposit(&pubkey1, some_lamports + 1).unwrap(); - goto_end_of_slot(Arc::::get_mut(&mut bank3).unwrap()); + goto_end_of_slot(bank3.clone()); bank3.freeze(); bank3.squash(); bank3.force_flush_accounts_cache(); @@ -6733,7 +6733,7 @@ fn test_shrink_candidate_slots_cached() { let pubkey2 = solana_sdk::pubkey::new_rand(); // Set root for bank 0, with caching enabled - let mut bank0 = Arc::new(Bank::new_with_config_for_tests( + let bank0 = Arc::new(Bank::new_with_config_for_tests( &genesis_config, AccountSecondaryIndexes::default(), AccountShrinkThreshold::default(), @@ -6745,7 +6745,7 @@ fn test_shrink_candidate_slots_cached() { let account0 = AccountSharedData::new(1000, pubkey0_size, &Pubkey::new_unique()); bank0.store_account(&pubkey0, &account0); - goto_end_of_slot(Arc::::get_mut(&mut bank0).unwrap()); + goto_end_of_slot(bank0.clone()); bank0.freeze(); bank0.squash(); // Flush now so that accounts cache cleaning doesn't clean up bank 0 when later @@ -6754,10 +6754,10 @@ fn test_shrink_candidate_slots_cached() { // Store some lamports in bank 1 let some_lamports = 123; - let mut bank1 = Arc::new(new_from_parent(bank0)); + let bank1 = Arc::new(new_from_parent(bank0)); bank1.deposit(&pubkey1, some_lamports).unwrap(); bank1.deposit(&pubkey2, some_lamports).unwrap(); - goto_end_of_slot(Arc::::get_mut(&mut bank1).unwrap()); + goto_end_of_slot(bank1.clone()); bank1.freeze(); bank1.squash(); // Flush now so that accounts cache cleaning doesn't clean up bank 0 when later @@ -6765,10 +6765,10 @@ fn test_shrink_candidate_slots_cached() { bank1.force_flush_accounts_cache(); // Store some lamports for pubkey1 in bank 2, root bank 2 - let mut bank2 = Arc::new(new_from_parent(bank1)); + let bank2 = Arc::new(new_from_parent(bank1)); bank2.deposit(&pubkey1, some_lamports).unwrap(); bank2.store_account(&pubkey0, &account0); - goto_end_of_slot(Arc::::get_mut(&mut bank2).unwrap()); + goto_end_of_slot(bank2.clone()); bank2.freeze(); bank2.squash(); bank2.force_flush_accounts_cache(); @@ -12501,7 +12501,7 @@ fn test_runtime_feature_enable_with_program_cache() { genesis_config .accounts .remove(&feature_set::reject_callx_r10::id()); - let root_bank = Bank::new_for_tests(&genesis_config); + let root_bank = Arc::new(Bank::new_for_tests(&genesis_config)); // Test a basic transfer let amount = genesis_config.rent.minimum_balance(0); @@ -12530,8 +12530,8 @@ fn test_runtime_feature_enable_with_program_cache() { let transaction1 = Transaction::new(&signers1, message1, root_bank.last_blockhash()); // Advance the bank so the next transaction can be submitted. - goto_end_of_slot(&root_bank); - let mut bank = new_from_parent(Arc::new(root_bank)); + goto_end_of_slot(root_bank.clone()); + let mut bank = new_from_parent(root_bank); // Compose second instruction using the same program with a different block hash let instruction2 = Instruction::new_with_bytes(program_keypair.pubkey(), &[], Vec::new()); From 01a3b1b52f8f09b4e972aa31277ef6cc6dac9cef Mon Sep 17 00:00:00 2001 From: Tyera Date: Thu, 12 Oct 2023 22:43:27 -0600 Subject: [PATCH 329/407] Blockstore: clean/save old TransactionMemos sensibly (#33678) * Convert OldestSlot to named struct * Add clean_slot_0 to OldestSlot * Set AtomicBool to true when all primary-index keys returning slot 0 should be purged * Add PurgedFilter::clean_slot_0 * Use clean_slot_0 to preserve deprecated TransactionMemos * Also set AtomicBool to true immediately on boot, if highest_primary_index_slot.is_none * Add test * Fixup test --- ledger/src/blockstore.rs | 5 ++ ledger/src/blockstore/blockstore_purge.rs | 105 ++++++++++++++++++++++ ledger/src/blockstore_db.rs | 32 +++++-- 3 files changed, 137 insertions(+), 5 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 080be2fafd5f44..74440cdd0c0a8f 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -2154,6 +2154,8 @@ impl Blockstore { } if highest_primary_index_slot.is_some() { self.set_highest_primary_index_slot(highest_primary_index_slot); + } else { + self.db.set_clean_slot_0(true); } Ok(()) } @@ -2167,6 +2169,9 @@ impl Blockstore { self.transaction_status_index_cf.delete(1)?; } } + if w_highest_primary_index_slot.is_none() { + self.db.set_clean_slot_0(true); + } Ok(()) } diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index d643dd2c7ef075..9669f8bd305a00 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -990,4 +990,109 @@ pub mod tests { } assert_eq!(count, max_slot - (oldest_slot - 1)); } + + #[test] + fn test_purge_transaction_memos_compaction_filter() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + let oldest_slot = 5; + + fn random_signature() -> Signature { + use rand::Rng; + + let mut key = [0u8; 64]; + rand::thread_rng().fill(&mut key[..]); + Signature::from(key) + } + + // Insert some deprecated TransactionMemos + blockstore + .transaction_memos_cf + .put_deprecated(random_signature(), &"this is a memo".to_string()) + .unwrap(); + blockstore + .transaction_memos_cf + .put_deprecated(random_signature(), &"another memo".to_string()) + .unwrap(); + // Set clean_slot_0 to false, since we have deprecated memos + blockstore.db.set_clean_slot_0(false); + + // Insert some current TransactionMemos + blockstore + .transaction_memos_cf + .put( + (random_signature(), oldest_slot - 1), + &"this is a new memo in slot 4".to_string(), + ) + .unwrap(); + blockstore + .transaction_memos_cf + .put( + (random_signature(), oldest_slot), + &"this is a memo in slot 5 ".to_string(), + ) + .unwrap(); + + let first_index = { + let mut memos_iterator = blockstore + .transaction_memos_cf + .iterator_cf_raw_key(IteratorMode::Start); + memos_iterator.next().unwrap().unwrap().0 + }; + let last_index = { + let mut memos_iterator = blockstore + .transaction_memos_cf + .iterator_cf_raw_key(IteratorMode::End); + memos_iterator.next().unwrap().unwrap().0 + }; + + // Purge at slot 0 should not affect any memos + blockstore.db.set_oldest_slot(0); + blockstore + .db + .compact_range_cf::(&first_index, &last_index); + let memos_iterator = blockstore + .transaction_memos_cf + .iterator_cf_raw_key(IteratorMode::Start); + let mut count = 0; + for item in memos_iterator { + let _item = item.unwrap(); + count += 1; + } + assert_eq!(count, 4); + + // Purge at oldest_slot without clean_slot_0 only purges the current memo at slot 4 + blockstore.db.set_oldest_slot(oldest_slot); + blockstore + .db + .compact_range_cf::(&first_index, &last_index); + let memos_iterator = blockstore + .transaction_memos_cf + .iterator_cf_raw_key(IteratorMode::Start); + let mut count = 0; + for item in memos_iterator { + let (key, _value) = item.unwrap(); + let slot = ::index(&key).1; + assert!(slot == 0 || slot >= oldest_slot); + count += 1; + } + assert_eq!(count, 3); + + // Purge at oldest_slot with clean_slot_0 purges deprecated memos + blockstore.db.set_clean_slot_0(true); + blockstore + .db + .compact_range_cf::(&first_index, &last_index); + let memos_iterator = blockstore + .transaction_memos_cf + .iterator_cf_raw_key(IteratorMode::Start); + let mut count = 0; + for item in memos_iterator { + let (key, _value) = item.unwrap(); + let slot = ::index(&key).1; + assert!(slot >= oldest_slot); + count += 1; + } + assert_eq!(count, 1); + } } diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index f9c87ce397d434..b65df82ee00c9e 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -40,7 +40,7 @@ use { marker::PhantomData, path::Path, sync::{ - atomic::{AtomicU64, Ordering}, + atomic::{AtomicBool, AtomicU64, Ordering}, Arc, }, }, @@ -348,7 +348,10 @@ pub mod columns { } #[derive(Default, Clone, Debug)] -struct OldestSlot(Arc); +struct OldestSlot { + slot: Arc, + clean_slot_0: Arc, +} impl OldestSlot { pub fn set(&self, oldest_slot: Slot) { @@ -356,7 +359,7 @@ impl OldestSlot { // also, compaction_filters are created via its factories, creating short-lived copies of // this atomic value for the single job of compaction. So, Relaxed store can be justified // in total - self.0.store(oldest_slot, Ordering::Relaxed); + self.slot.store(oldest_slot, Ordering::Relaxed); } pub fn get(&self) -> Slot { @@ -365,7 +368,15 @@ impl OldestSlot { // requirement at the moment // also eventual propagation (very Relaxed) load is Ok, because compaction by nature doesn't // require strictly synchronized semantics in this regard - self.0.load(Ordering::Relaxed) + self.slot.load(Ordering::Relaxed) + } + + pub(crate) fn set_clean_slot_0(&self, clean_slot_0: bool) { + self.clean_slot_0.store(clean_slot_0, Ordering::Relaxed); + } + + pub(crate) fn get_clean_slot_0(&self) -> bool { + self.clean_slot_0.load(Ordering::Relaxed) } } @@ -1427,6 +1438,10 @@ impl Database { self.backend.oldest_slot.set(oldest_slot); } + pub(crate) fn set_clean_slot_0(&self, clean_slot_0: bool) { + self.backend.oldest_slot.set_clean_slot_0(clean_slot_0); + } + pub fn live_files_metadata(&self) -> Result> { self.backend.live_files_metadata() } @@ -1835,6 +1850,10 @@ impl<'a> WriteBatch<'a> { struct PurgedSlotFilter { /// The oldest slot to keep; any slot < oldest_slot will be removed oldest_slot: Slot, + /// Whether to preserve keys that return slot 0, even when oldest_slot > 0. + // This is used to delete old column data that wasn't keyed with a Slot, and so always returns + // `C::slot() == 0` + clean_slot_0: bool, name: CString, _phantom: PhantomData, } @@ -1844,7 +1863,7 @@ impl CompactionFilter for PurgedSlotFilter { use rocksdb::CompactionDecision::*; let slot_in_key = C::slot(C::index(key)); - if slot_in_key >= self.oldest_slot { + if slot_in_key >= self.oldest_slot || (slot_in_key == 0 && !self.clean_slot_0) { Keep } else { Remove @@ -1867,8 +1886,10 @@ impl CompactionFilterFactory for PurgedSlotFilterFactory fn create(&mut self, _context: CompactionFilterContext) -> Self::Filter { let copied_oldest_slot = self.oldest_slot.get(); + let copied_clean_slot_0 = self.oldest_slot.get_clean_slot_0(); PurgedSlotFilter:: { oldest_slot: copied_oldest_slot, + clean_slot_0: copied_clean_slot_0, name: CString::new(format!( "purged_slot_filter({}, {:?})", C::NAME, @@ -2113,6 +2134,7 @@ pub mod tests { is_manual_compaction: true, }; let oldest_slot = OldestSlot::default(); + oldest_slot.set_clean_slot_0(true); let mut factory = PurgedSlotFilterFactory:: { oldest_slot: oldest_slot.clone(), From abfecad785cef2ff0b3e57589507b076c3b8d4bb Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 13 Oct 2023 12:46:45 +0800 Subject: [PATCH 330/407] remove redis setup from net.sh (#33680) remove redis setup for perf tests --- net/gce.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/net/gce.sh b/net/gce.sh index 1a4eb775e220c1..058dcdcf0e7590 100755 --- a/net/gce.sh +++ b/net/gce.sh @@ -806,7 +806,6 @@ $( install-earlyoom.sh \ install-iftop.sh \ install-libssl-compatability.sh \ - install-redis.sh \ install-rsync.sh \ install-perf.sh \ localtime.sh \ From 923d5b5324075cd0c1ab69e2fd7cd582421d91e5 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 13 Oct 2023 12:47:06 +0800 Subject: [PATCH 331/407] chore: remove install-nodejs.sh and install-redis.sh (#33684) chore: remove unused scripts --- .../setup-new-machine.sh | 2 - net/scripts/install-nodejs.sh | 21 ---------- net/scripts/install-redis.sh | 39 ------------------- 3 files changed, 62 deletions(-) delete mode 100755 net/scripts/install-nodejs.sh delete mode 100755 net/scripts/install-redis.sh diff --git a/ci/setup-new-buildkite-agent/setup-new-machine.sh b/ci/setup-new-buildkite-agent/setup-new-machine.sh index cc17e89188b7eb..cbaedd2d0a9dcc 100755 --- a/ci/setup-new-buildkite-agent/setup-new-machine.sh +++ b/ci/setup-new-buildkite-agent/setup-new-machine.sh @@ -36,9 +36,7 @@ usermod -aG docker "$SETUP_USER" "$HERE"/disable-networkd-wait.sh "$SOLANA_ROOT"/net/scripts/install-earlyoom.sh -"$SOLANA_ROOT"/net/scripts/install-nodejs.sh "$SOLANA_ROOT"/net/scripts/localtime.sh -"$SOLANA_ROOT"/net/scripts/install-redis.sh "$SOLANA_ROOT"/net/scripts/install-rsync.sh "$SOLANA_ROOT"/net/scripts/install-libssl-compatability.sh diff --git a/net/scripts/install-nodejs.sh b/net/scripts/install-nodejs.sh deleted file mode 100755 index c936f1821d4b2f..00000000000000 --- a/net/scripts/install-nodejs.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash -# -# Reference: https://github.com/nodesource/distributions/blob/master/README.md#deb -# -set -ex - -[[ $(uname) = Linux ]] || exit 1 -[[ $USER = root ]] || exit 1 - -# Install node/npm -curl -sL https://deb.nodesource.com/setup_10.x | bash - -apt-get install -y nodejs -node --version -npm --version - -# Install yarn -curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - -echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list -apt-get update -qq -apt-get install -y yarn -yarn --version diff --git a/net/scripts/install-redis.sh b/net/scripts/install-redis.sh deleted file mode 100755 index fcc9169129e0a1..00000000000000 --- a/net/scripts/install-redis.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash -set -ex - -[[ $(uname) = Linux ]] || exit 1 -[[ $USER = root ]] || exit 1 - -add-apt-repository -y ppa:chris-lea/redis-server -apt-get --assume-yes install redis - -systemctl enable redis-server.service - -REDIS_CONF=/etc/redis/redis.conf - -if grep -q "^maxmemory " $REDIS_CONF; then - echo "setting maxmemory" - sed -i '/^maxmemory .*/ s//maxmemory 8gb/' $REDIS_CONF -else - echo "maxmemory not present: appending setting" - cat << EOF >> $REDIS_CONF - -# limit set by solana/net/scripts/install-redis.sh -maxmemory 8gb -EOF - -fi - -if grep -q "^maxmemory-policy " $REDIS_CONF; then - echo "setting maxmemory-policy" - sed -i '/^maxmemory-policy .*/ s//maxmemory-policy allkeys-lru/' $REDIS_CONF -else - echo "maxmemory-policy not present: appending setting" - cat << EOF >> $REDIS_CONF -# limit set by solana/net/scripts/install-redis.sh -maxmemory-policy allkeys-lru - -EOF -fi - -service redis-server restart From 47511999bbda8daac1c4b1f6a9ad0e8e8eb687f0 Mon Sep 17 00:00:00 2001 From: Sean Young Date: Fri, 13 Oct 2023 08:00:41 +0100 Subject: [PATCH 332/407] bank: do not remove trailing 0 bytes from return data (#33639) This is creating havoc for Solang, as the return data is borsh encoded and therefore `u64` values like 0x100 get truncated. --- runtime/src/bank.rs | 12 +++--------- runtime/src/bank/tests.rs | 9 +++++---- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 6f220a810e3f59..8d6c15c10afb62 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4955,7 +4955,7 @@ impl Bank { let ExecutionRecord { accounts, - mut return_data, + return_data, touched_account_count, accounts_resize_delta, } = transaction_context.into(); @@ -4977,14 +4977,8 @@ impl Bank { saturating_add_assign!(timings.details.changed_account_count, touched_account_count); let accounts_data_len_delta = status.as_ref().map_or(0, |_| accounts_resize_delta); - let return_data = if enable_return_data_recording { - if let Some(end_index) = return_data.data.iter().rposition(|&x| x != 0) { - let end_index = end_index.saturating_add(1); - return_data.data.truncate(end_index); - Some(return_data) - } else { - None - } + let return_data = if enable_return_data_recording && !return_data.data.is_empty() { + Some(return_data) } else { None }; diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 69f9d18e5a9b8f..ef1553d9addf31 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -9713,9 +9713,9 @@ fn test_tx_return_data() { let mut return_data = [0u8; MAX_RETURN_DATA]; if !instruction_data.is_empty() { let index = usize::from_le_bytes(instruction_data.try_into().unwrap()); - return_data[index] = 1; + return_data[index / 2] = 1; transaction_context - .set_return_data(mock_program_id, return_data.to_vec()) + .set_return_data(mock_program_id, return_data[..index + 1].to_vec()) .unwrap(); } Ok(()) @@ -9767,8 +9767,9 @@ fn test_tx_return_data() { if let Some(index) = index { let return_data = return_data.unwrap(); assert_eq!(return_data.program_id, mock_program_id); - let mut expected_data = vec![0u8; index]; - expected_data.push(1u8); + let mut expected_data = vec![0u8; index + 1]; + // include some trailing zeros + expected_data[index / 2] = 1; assert_eq!(return_data.data, expected_data); } else { assert!(return_data.is_none()); From fd9297775a4b2e0e3f4a6f8bf4ed757e81ea1ad0 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Fri, 13 Oct 2023 07:21:18 -0700 Subject: [PATCH 333/407] Prevent delay_visibility_of_program_deployment feature from cache pruning (#33689) --- runtime/src/bank.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 8d6c15c10afb62..4bbd825579d0e7 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -8170,7 +8170,6 @@ impl Bank { feature_set::enable_partitioned_epoch_reward::id(), feature_set::disable_deploy_of_alloc_free_syscall::id(), feature_set::last_restart_slot_sysvar::id(), - feature_set::delay_visibility_of_program_deployment::id(), feature_set::remaining_compute_units_syscall_enabled::id(), ]; if !only_apply_transitions_for_new_features From 09e858d93971ccc768080fd5b658ecb7c8570908 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Fri, 13 Oct 2023 07:54:37 -0700 Subject: [PATCH 334/407] Handle cargo registry index lookup requests (#33681) --- Cargo.lock | 82 +-------------------- Cargo.toml | 2 - cargo-registry/Cargo.toml | 3 +- cargo-registry/src/main.rs | 20 +++-- cargo-registry/src/publisher.rs | 81 ++++++++++++--------- cargo-registry/src/sparse_index.rs | 113 ++++++++++++++++++++++++++--- 6 files changed, 165 insertions(+), 136 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c0261886a6e459..ffec7fbc909432 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2209,21 +2209,6 @@ version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" -[[package]] -name = "git2" -version = "0.18.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf97ba92db08df386e10c8ede66a2a0369bd277090afd8710e19e38de9ec0cd" -dependencies = [ - "bitflags 2.3.3", - "libc", - "libgit2-sys", - "log", - "openssl-probe", - "openssl-sys", - "url 2.4.1", -] - [[package]] name = "glob" version = "0.3.0" @@ -2462,12 +2447,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "http-range" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21dec9db110f5f872ed9699c3ecf50cf16f423502706ba5c72462e28d3157573" - [[package]] name = "httparse" version = "1.8.0" @@ -2542,25 +2521,6 @@ dependencies = [ "tokio-rustls", ] -[[package]] -name = "hyper-staticfile" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "318ca89e4827e7fe4ddd2824f52337239796ae8ecc761a663324407dc3d8d7e7" -dependencies = [ - "futures-util", - "http", - "http-range", - "httpdate", - "hyper", - "mime_guess", - "percent-encoding 2.3.0", - "rand 0.8.5", - "tokio", - "url 2.4.1", - "winapi 0.3.9", -] - [[package]] name = "hyper-timeout" version = "0.4.1" @@ -2912,20 +2872,6 @@ version = "0.2.149" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" -[[package]] -name = "libgit2-sys" -version = "0.16.1+1.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2a2bb3680b094add03bb3732ec520ece34da31a8cd2d633d1389d0f0fb60d0c" -dependencies = [ - "cc", - "libc", - "libssh2-sys", - "libz-sys", - "openssl-sys", - "pkg-config", -] - [[package]] name = "libloading" version = "0.7.4" @@ -3005,20 +2951,6 @@ dependencies = [ "libsecp256k1-core", ] -[[package]] -name = "libssh2-sys" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dc8a030b787e2119a731f1951d6a773e2280c660f8ec4b0f5e1505a386e71ee" -dependencies = [ - "cc", - "libc", - "libz-sys", - "openssl-sys", - "pkg-config", - "vcpkg", -] - [[package]] name = "libz-sys" version = "1.1.3" @@ -3026,7 +2958,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de5435b8549c16d423ed0c03dbaafe57cf6c3344744f1242520d59c9d8ecec66" dependencies = [ "cc", - "libc", "pkg-config", "vcpkg", ] @@ -3182,16 +3113,6 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "min-max-heap" version = "1.3.0" @@ -5565,13 +5486,12 @@ version = "1.18.0" dependencies = [ "clap 2.33.3", "flate2", - "git2", "hyper", - "hyper-staticfile", "log", "rustc_version 0.4.0", "serde", "serde_json", + "sha2 0.10.8", "solana-clap-utils", "solana-cli", "solana-cli-config", diff --git a/Cargo.toml b/Cargo.toml index c63b62a47c9782..f26209e5104929 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -206,7 +206,6 @@ gag = "1.0.0" generic-array = { version = "0.14.7", default-features = false } gethostname = "0.2.3" getrandom = "0.2.10" -git2 = "0.18.1" goauth = "0.13.1" hex = "0.4.3" hidapi = { version = "2.4.1", default-features = false } @@ -216,7 +215,6 @@ http = "0.2.9" humantime = "2.0.1" hyper = "0.14.27" hyper-proxy = "0.9.1" -hyper-staticfile = "0.9.5" im = "15.1.0" index_list = "0.2.7" indexmap = "2.0.2" diff --git a/cargo-registry/Cargo.toml b/cargo-registry/Cargo.toml index 43aed1f4fa2097..afc5bf363b0bab 100644 --- a/cargo-registry/Cargo.toml +++ b/cargo-registry/Cargo.toml @@ -12,12 +12,11 @@ edition = { workspace = true } [dependencies] clap = { workspace = true } flate2 = { workspace = true } -git2 = { workspace = true } hyper = { workspace = true, features = ["full"] } -hyper-staticfile = { workspace = true } log = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } +sha2 = { workspace = true } solana-clap-utils = { workspace = true } solana-cli = { workspace = true } solana-cli-config = { workspace = true } diff --git a/cargo-registry/src/main.rs b/cargo-registry/src/main.rs index 60227fa32a9962..4ba61c917969b7 100644 --- a/cargo-registry/src/main.rs +++ b/cargo-registry/src/main.rs @@ -3,6 +3,7 @@ use { crate::{ client::Client, publisher::{Error, Publisher}, + sparse_index::RegistryIndex, }, hyper::{ body, @@ -30,15 +31,17 @@ impl CargoRegistryService { async fn handle_publish_request( request: hyper::Request, client: Arc, + index: Arc, ) -> hyper::Response { info!("Handling request to publish the crate"); let bytes = body::to_bytes(request.into_body()).await; match bytes { Ok(data) => { - let Ok(result) = - tokio::task::spawn_blocking(move || Publisher::publish_crate(data, client)) - .await + let Ok(result) = tokio::task::spawn_blocking(move || { + Publisher::publish_crate(data, client, index) + }) + .await else { return response_builder::error_response( hyper::StatusCode::INTERNAL_SERVER_ERROR, @@ -218,7 +221,7 @@ impl CargoRegistryService { } async fn handler( - index: sparse_index::RegistryIndex, + index: Arc, request: hyper::Request, client: Arc, ) -> Result, Error> { @@ -257,7 +260,7 @@ impl CargoRegistryService { "Invalid length of the request.", ) } else { - Self::handle_publish_request(request, client.clone()).await + Self::handle_publish_request(request, client.clone(), index.clone()).await } } "unyank" => Self::handle_unyank_request(path, &request), @@ -297,7 +300,10 @@ async fn main() { let client = Arc::new(Client::new().expect("Failed to get RPC Client instance")); let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), client.port); - let index = sparse_index::RegistryIndex::new("/index", &client.server_url); + let index = Arc::new(sparse_index::RegistryIndex::new( + "/index", + &client.server_url, + )); let registry_service = make_service_fn(move |_| { let client_inner = client.clone(); @@ -310,7 +316,7 @@ async fn main() { }); let server = Server::bind(&bind_addr).serve(registry_service); - info!("Server running on on http://{}", bind_addr); + info!("Server running on http://{}", bind_addr); let _ = server.await; } diff --git a/cargo-registry/src/publisher.rs b/cargo-registry/src/publisher.rs index a712da35895e1c..5940191b46dcaf 100644 --- a/cargo-registry/src/publisher.rs +++ b/cargo-registry/src/publisher.rs @@ -1,10 +1,14 @@ use { - crate::client::{Client, ClientConfig}, + crate::{ + client::{Client, ClientConfig}, + sparse_index::{IndexEntry, RegistryIndex}, + }, flate2::read::GzDecoder, hyper::body::Bytes, log::*, serde::{Deserialize, Serialize}, serde_json::from_slice, + sha2::{Digest, Sha256}, solana_cli::program_v4::{process_deploy_program, read_and_verify_elf}, solana_sdk::{ signature::{Keypair, Signer}, @@ -22,11 +26,11 @@ use { tempfile::{tempdir, TempDir}, }; -pub type Error = Box; +pub(crate) type Error = Box; #[derive(Debug, Deserialize, Serialize)] #[serde(rename_all = "lowercase")] -enum DependencyType { +pub(crate) enum DependencyType { Dev, Build, Normal, @@ -34,39 +38,39 @@ enum DependencyType { #[allow(dead_code)] #[derive(Debug, Deserialize)] -struct Dependency { - name: String, - version_req: String, - features: Vec, - optional: bool, - default_features: bool, - target: Option, - kind: DependencyType, - registry: Option, - explicit_name_in_toml: Option, +pub(crate) struct Dependency { + pub name: String, + pub version_req: String, + pub features: Vec, + pub optional: bool, + pub default_features: bool, + pub target: Option, + pub kind: DependencyType, + pub registry: Option, + pub explicit_name_in_toml: Option, } #[derive(Debug, Deserialize)] #[allow(unused)] -struct PackageMetaData { - name: String, - vers: String, - deps: Vec, - features: BTreeMap>, - authors: Vec, - description: Option, - documentation: Option, - homepage: Option, - readme: Option, - readme_file: Option, - keywords: Vec, - categories: Vec, - license: Option, - license_file: Option, - repository: Option, - badges: BTreeMap>, - links: Option, - rust_version: Option, +pub(crate) struct PackageMetaData { + pub name: String, + pub vers: String, + pub deps: Vec, + pub features: BTreeMap>, + pub authors: Vec, + pub description: Option, + pub documentation: Option, + pub homepage: Option, + pub readme: Option, + pub readme_file: Option, + pub keywords: Vec, + pub categories: Vec, + pub license: Option, + pub license_file: Option, + pub repository: Option, + pub badges: BTreeMap>, + pub links: Option, + pub rust_version: Option, } impl PackageMetaData { @@ -86,7 +90,7 @@ impl PackageMetaData { } } -pub struct Publisher {} +pub(crate) struct Publisher {} impl Publisher { fn make_path>(tempdir: &TempDir, meta: &PackageMetaData, append: P) -> PathBuf { @@ -107,12 +111,17 @@ impl Publisher { Ok(library_name.to_string()) } - pub(crate) fn publish_crate(bytes: Bytes, client: Arc) -> Result<(), Error> { + pub(crate) fn publish_crate( + bytes: Bytes, + client: Arc, + index: Arc, + ) -> Result<(), Error> { let (meta_data, offset) = PackageMetaData::new(&bytes)?; let (_crate_file_length, length_size) = PackageMetaData::read_u32_length(&bytes.slice(offset..))?; let crate_bytes = bytes.slice(offset.saturating_add(length_size)..); + let crate_cksum = format!("{:x}", Sha256::digest(&crate_bytes)); let decoder = GzDecoder::new(crate_bytes.as_ref()); let mut archive = Archive::new(decoder); @@ -154,6 +163,10 @@ impl Publisher { format!("Failed to deploy the program: {}", e) })?; + let mut entry: IndexEntry = meta_data.into(); + entry.cksum = crate_cksum; + index.insert_entry(entry)?; + info!("Successfully deployed the program"); Ok(()) } diff --git a/cargo-registry/src/sparse_index.rs b/cargo-registry/src/sparse_index.rs index 59b9b88985c445..e29a581c1c7819 100644 --- a/cargo-registry/src/sparse_index.rs +++ b/cargo-registry/src/sparse_index.rs @@ -1,7 +1,11 @@ use { - crate::response_builder, + crate::{ + publisher::{Dependency, Error, PackageMetaData}, + response_builder, + }, log::info, serde::{Deserialize, Serialize}, + std::{collections::BTreeMap, sync::RwLock}, }; #[derive(Debug, Default, Deserialize, Serialize)] @@ -10,14 +14,68 @@ struct RegistryConfig { api: Option, } -#[derive(Clone)] -pub struct RegistryIndex { +pub(crate) struct RegistryIndex { pub(crate) index_root: String, config: String, + index: RwLock>, +} + +#[derive(Serialize)] +pub(crate) struct IndexEntryDep { + pub name: String, + pub req: String, + pub features: Vec, + pub optional: bool, + pub default_features: bool, + pub target: Option, + pub kind: String, + pub registry: Option, + pub package: Option, +} + +impl From for IndexEntryDep { + fn from(v: Dependency) -> Self { + IndexEntryDep { + name: v.name, + req: v.version_req, + features: v.features, + optional: v.optional, + default_features: v.default_features, + target: v.target, + kind: serde_json::to_string(&v.kind).expect("Failed to stringify dep kind"), + registry: v.registry, + package: None, + } + } +} + +#[derive(Serialize)] +pub(crate) struct IndexEntry { + pub name: String, + pub vers: String, + pub deps: Vec, + pub cksum: String, + pub features: BTreeMap>, + pub yanked: bool, + pub links: Option, +} + +impl From for IndexEntry { + fn from(v: PackageMetaData) -> Self { + IndexEntry { + name: v.name, + vers: v.vers, + deps: v.deps.into_iter().map(|v| v.into()).collect(), + cksum: String::new(), + features: v.features, + yanked: false, + links: v.links, + } + } } impl RegistryIndex { - pub fn new(root: &str, server_url: &str) -> Self { + pub(crate) fn new(root: &str, server_url: &str) -> Self { let registry_config = RegistryConfig { dl: format!("{}/api/v1/crates", server_url), api: Some(server_url.to_string()), @@ -29,10 +87,14 @@ impl RegistryIndex { Self { index_root: root.to_string(), config, + index: RwLock::new(BTreeMap::new()), } } - pub fn handler(&self, request: hyper::Request) -> hyper::Response { + pub(crate) fn handler( + &self, + request: hyper::Request, + ) -> hyper::Response { let path = request.uri().path(); let expected_root = self.index_root.as_str(); if !path.starts_with(expected_root) { @@ -53,7 +115,17 @@ impl RegistryIndex { return response_builder::success_response_str(&self.config); } - Self::handle_crate_lookup_request(path) + self.handle_crate_lookup_request(path) + } + + pub(crate) fn insert_entry(&self, entry: IndexEntry) -> Result<(), Error> { + let mut write_index = self + .index + .write() + .map_err(|e| format!("Failed to lock the index for writing: {}", e))?; + info!("Inserting {}-{} in registry index", entry.name, entry.vers); + write_index.insert(entry.name.clone(), entry); + Ok(()) } fn get_crate_name_from_path(path: &str) -> Option<&str> { @@ -78,7 +150,7 @@ impl RegistryIndex { .then_some(crate_name) } - fn handle_crate_lookup_request(path: &str) -> hyper::Response { + fn handle_crate_lookup_request(&self, path: &str) -> hyper::Response { let Some(crate_name) = Self::get_crate_name_from_path(path) else { return response_builder::error_response( hyper::StatusCode::BAD_REQUEST, @@ -86,10 +158,31 @@ impl RegistryIndex { ); }; - // Fetch the index information for the crate - info!("Received a request to fetch {:?}", crate_name); + info!("Looking up index for {:?}", crate_name); + + let Ok(read_index) = self.index.read() else { + return response_builder::error_response( + hyper::StatusCode::INTERNAL_SERVER_ERROR, + "Internal error. Failed to lock the index for reading", + ); + }; + + let Some(entry) = read_index.get(crate_name) else { + // The index currently doesn't contain the program entry. + // Fetch the program information from the network using RPC client. + // In the meanwhile, return empty success response, so that the registry + // client continues to poll us for the index information. + return response_builder::success_response(); + }; + + let Ok(response) = serde_json::to_string(entry) else { + return response_builder::error_response( + hyper::StatusCode::INTERNAL_SERVER_ERROR, + "Internal error. index entry is corrupted", + ); + }; - response_builder::success_response() + response_builder::success_response_str(response.as_str()) } } From 1155d462664a74a7c127170664936b2ebf05d7b7 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Fri, 13 Oct 2023 12:13:45 -0700 Subject: [PATCH 335/407] Add CLI command to show/dump v4 programs (#33693) --- cli-output/src/cli_output.rs | 66 ++++++++++ cli/src/program_v4.rs | 249 ++++++++++++++++++++++++++++++++++- 2 files changed, 311 insertions(+), 4 deletions(-) diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index daf522c60055f4..8fe188ad7c970e 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -2148,6 +2148,72 @@ impl fmt::Display for CliProgram { } } +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliProgramV4 { + pub program_id: String, + pub owner: String, + pub authority: String, + pub last_deploy_slot: u64, + pub status: String, + pub data_len: usize, +} +impl QuietDisplay for CliProgramV4 {} +impl VerboseDisplay for CliProgramV4 {} +impl fmt::Display for CliProgramV4 { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!(f)?; + writeln_name_value(f, "Program Id:", &self.program_id)?; + writeln_name_value(f, "Owner:", &self.owner)?; + writeln_name_value(f, "Authority:", &self.authority)?; + writeln_name_value( + f, + "Last Deployed In Slot:", + &self.last_deploy_slot.to_string(), + )?; + writeln_name_value(f, "Status:", &self.status)?; + writeln_name_value( + f, + "Data Length:", + &format!("{:?} ({:#x?}) bytes", self.data_len, self.data_len), + )?; + Ok(()) + } +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliProgramsV4 { + pub programs: Vec, +} +impl QuietDisplay for CliProgramsV4 {} +impl VerboseDisplay for CliProgramsV4 {} +impl fmt::Display for CliProgramsV4 { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!(f)?; + writeln!( + f, + "{}", + style(format!( + "{:<44} | {:<9} | {:<44} | {:<10}", + "Program Id", "Slot", "Authority", "Status" + )) + .bold() + )?; + for program in self.programs.iter() { + writeln!( + f, + "{}", + &format!( + "{:<44} | {:<9} | {:<44} | {:<10}", + program.program_id, program.last_deploy_slot, program.authority, program.status, + ) + )?; + } + Ok(()) + } +} + #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct CliUpgradeableProgram { diff --git a/cli/src/program_v4.rs b/cli/src/program_v4.rs index b676656fedfa11..41a8fa9de32b61 100644 --- a/cli/src/program_v4.rs +++ b/cli/src/program_v4.rs @@ -9,12 +9,13 @@ use { }, clap::{App, AppSettings, Arg, ArgMatches, SubCommand}, log::*, + solana_account_decoder::{UiAccountEncoding, UiDataSliceConfig}, solana_clap_utils::{ input_parsers::{pubkey_of, pubkey_of_signer, signer_of}, - input_validators::is_valid_signer, + input_validators::{is_valid_pubkey, is_valid_signer}, keypair::{DefaultSigner, SignerIndex}, }, - solana_cli_output::{CliProgramId, OutputFormat}, + solana_cli_output::{CliProgramId, CliProgramV4, CliProgramsV4, OutputFormat}, solana_client::{ connection_cache::ConnectionCache, send_and_confirm_transactions_in_parallel::{ @@ -26,7 +27,10 @@ use { solana_rbpf::{elf::Executable, verifier::RequisiteVerifier}, solana_remote_wallet::remote_wallet::RemoteWalletManager, solana_rpc_client::rpc_client::RpcClient, - solana_rpc_client_api::config::RpcSendTransactionConfig, + solana_rpc_client_api::{ + config::{RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSendTransactionConfig}, + filter::{Memcmp, RpcFilterType}, + }, solana_sdk::{ account::Account, commitment_config::CommitmentConfig, @@ -42,7 +46,14 @@ use { system_instruction::{self, SystemError}, transaction::Transaction, }, - std::{cmp::Ordering, fs::File, io::Read, rc::Rc, sync::Arc}, + std::{ + cmp::Ordering, + fs::File, + io::{Read, Write}, + mem::size_of, + rc::Rc, + sync::Arc, + }, }; #[derive(Debug, PartialEq, Eq)] @@ -66,6 +77,15 @@ pub enum ProgramV4CliCommand { program_address: Pubkey, authority_signer_index: SignerIndex, }, + Show { + account_pubkey: Option, + authority: Pubkey, + all: bool, + }, + Dump { + account_pubkey: Option, + output_location: String, + }, } pub trait ProgramV4SubCommands { @@ -177,6 +197,51 @@ impl ProgramV4SubCommands for App<'_, '_> { .help("Program authority [default: the default configured keypair]") ), ) + .subcommand( + SubCommand::with_name("show") + .about("Display information about a buffer or program") + .arg( + Arg::with_name("account") + .index(1) + .value_name("ACCOUNT_ADDRESS") + .takes_value(true) + .help("Address of the program to show") + ) + .arg( + Arg::with_name("all") + .long("all") + .conflicts_with("account") + .conflicts_with("buffer_authority") + .help("Show accounts for all authorities") + ) + .arg( + pubkey!(Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY") + .conflicts_with("all"), + "Authority [default: the default configured keypair]"), + ), + ) + .subcommand( + SubCommand::with_name("dump") + .about("Write the program data to a file") + .arg( + Arg::with_name("account") + .index(1) + .value_name("ACCOUNT_ADDRESS") + .takes_value(true) + .required(true) + .help("Address of the buffer or program") + ) + .arg( + Arg::with_name("output_location") + .index(2) + .value_name("OUTPUT_FILEPATH") + .takes_value(true) + .required(true) + .help("/path/to/program.so"), + ), + ) ) } } @@ -306,6 +371,32 @@ pub fn parse_program_v4_subcommand( signers: signer_info.signers, } } + ("show", Some(matches)) => { + let authority = + if let Some(authority) = pubkey_of_signer(matches, "authority", wallet_manager)? { + authority + } else { + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey() + }; + + CliCommandInfo { + command: CliCommand::ProgramV4(ProgramV4CliCommand::Show { + account_pubkey: pubkey_of(matches, "account"), + authority, + all: matches.is_present("all"), + }), + signers: vec![], + } + } + ("dump", Some(matches)) => CliCommandInfo { + command: CliCommand::ProgramV4(ProgramV4CliCommand::Dump { + account_pubkey: pubkey_of(matches, "account"), + output_location: matches.value_of("output_location").unwrap().to_string(), + }), + signers: vec![], + }, _ => unreachable!(), }; Ok(response) @@ -415,6 +506,20 @@ pub fn process_program_v4_subcommand( &ProgramV4CommandConfig::new_from_cli_config(config, authority_signer_index), program_address, ), + ProgramV4CliCommand::Show { + account_pubkey, + authority, + all, + } => process_show(rpc_client, config, *account_pubkey, *authority, *all), + ProgramV4CliCommand::Dump { + account_pubkey, + output_location, + } => process_dump( + rpc_client, + config.commitment, + *account_pubkey, + output_location, + ), } } @@ -601,6 +706,78 @@ fn process_finalize_program( Ok(config.output_format.formatted_string(&program_id)) } +fn process_show( + rpc_client: Arc, + config: &CliConfig, + account_pubkey: Option, + authority: Pubkey, + all: bool, +) -> ProcessResult { + if let Some(account_pubkey) = account_pubkey { + if let Some(account) = rpc_client + .get_account_with_commitment(&account_pubkey, config.commitment)? + .value + { + if loader_v4::check_id(&account.owner) { + if let Ok(state) = solana_loader_v4_program::get_state(&account.data) { + let status = match state.status { + LoaderV4Status::Retracted => "retracted", + LoaderV4Status::Deployed => "deployed", + LoaderV4Status::Finalized => "finalized", + }; + Ok(config.output_format.formatted_string(&CliProgramV4 { + program_id: account_pubkey.to_string(), + owner: account.owner.to_string(), + authority: state.authority_address.to_string(), + last_deploy_slot: state.slot, + data_len: account + .data + .len() + .saturating_sub(LoaderV4State::program_data_offset()), + status: status.to_string(), + })) + } else { + Err(format!("{account_pubkey} SBF program state is invalid").into()) + } + } else { + Err(format!("{account_pubkey} is not an SBF program").into()) + } + } else { + Err(format!("Unable to find the account {account_pubkey}").into()) + } + } else { + let authority_pubkey = if all { None } else { Some(authority) }; + let programs = get_programs(rpc_client, authority_pubkey)?; + Ok(config.output_format.formatted_string(&programs)) + } +} + +fn process_dump( + rpc_client: Arc, + commitment: CommitmentConfig, + account_pubkey: Option, + output_location: &str, +) -> ProcessResult { + if let Some(account_pubkey) = account_pubkey { + if let Some(account) = rpc_client + .get_account_with_commitment(&account_pubkey, commitment)? + .value + { + if loader_v4::check_id(&account.owner) { + let mut f = File::create(output_location)?; + f.write_all(&account.data[LoaderV4State::program_data_offset()..])?; + Ok(format!("Wrote program to {output_location}")) + } else { + Err(format!("{account_pubkey} is not an SBF program").into()) + } + } else { + Err(format!("Unable to find the account {account_pubkey}").into()) + } + } else { + Err("No account specified".into()) + } +} + fn check_payer( rpc_client: &RpcClient, config: &ProgramV4CommandConfig, @@ -1025,6 +1202,70 @@ fn build_truncate_instructions( } } +fn get_accounts_with_filter( + rpc_client: Arc, + filters: Vec, + length: usize, +) -> Result, Box> { + let results = rpc_client.get_program_accounts_with_config( + &loader_v4::id(), + RpcProgramAccountsConfig { + filters: Some(filters), + account_config: RpcAccountInfoConfig { + encoding: Some(UiAccountEncoding::Base64), + data_slice: Some(UiDataSliceConfig { offset: 0, length }), + ..RpcAccountInfoConfig::default() + }, + ..RpcProgramAccountsConfig::default() + }, + )?; + Ok(results) +} + +fn get_programs( + rpc_client: Arc, + authority_pubkey: Option, +) -> Result> { + let filters = if let Some(authority_pubkey) = authority_pubkey { + vec![ + (RpcFilterType::Memcmp(Memcmp::new_base58_encoded( + size_of::(), + authority_pubkey.as_ref(), + ))), + ] + } else { + vec![] + }; + + let results = + get_accounts_with_filter(rpc_client, filters, LoaderV4State::program_data_offset())?; + + let mut programs = vec![]; + for (program, account) in results.iter() { + if let Ok(state) = solana_loader_v4_program::get_state(&account.data) { + let status = match state.status { + LoaderV4Status::Retracted => "retracted", + LoaderV4Status::Deployed => "deployed", + LoaderV4Status::Finalized => "finalized", + }; + programs.push(CliProgramV4 { + program_id: program.to_string(), + owner: account.owner.to_string(), + authority: state.authority_address.to_string(), + last_deploy_slot: state.slot, + status: status.to_string(), + data_len: account + .data + .len() + .saturating_sub(LoaderV4State::program_data_offset()), + }); + } else { + return Err(format!("Error parsing Program account {program}").into()); + } + } + Ok(CliProgramsV4 { programs }) +} + #[cfg(test)] mod tests { use { From a3f85aba21f6c43608a64ddd22a2ac0312fdc5ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 13 Oct 2023 21:59:48 +0200 Subject: [PATCH 336/407] Refactor - LoadedPrograms part 2 (#33694) --- program-runtime/src/loaded_programs.rs | 242 +++++++++++++------------ runtime/src/bank.rs | 20 +- runtime/src/bank/tests.rs | 22 ++- 3 files changed, 154 insertions(+), 130 deletions(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 43061f19a0758e..0ded17ee7877de 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -56,9 +56,12 @@ pub trait ForkGraph { /// Provides information about current working slot, and its ancestors pub trait WorkingSlot { - /// Returns the current slot value + /// Returns the current slot fn current_slot(&self) -> Slot; + /// Returns the epoch of the current slot + fn current_epoch(&self) -> Epoch; + /// Returns true if the `other` slot is an ancestor of self, false otherwise fn is_ancestor(&self, other: Slot) -> bool; } @@ -99,6 +102,20 @@ impl Debug for LoadedProgramType { } } +impl LoadedProgramType { + /// Returns a reference to its environment if it has one + pub fn get_environment(&self) -> Option<&ProgramRuntimeEnvironment> { + match self { + LoadedProgramType::LegacyV0(program) + | LoadedProgramType::LegacyV1(program) + | LoadedProgramType::Typed(program) => Some(program.get_loader()), + #[cfg(test)] + LoadedProgramType::TestLoaded(environment) => Some(environment), + _ => None, + } + } +} + #[derive(Debug, Default)] pub struct LoadedProgram { /// The program of this entry @@ -338,16 +355,8 @@ impl LoadedProgram { } pub fn to_unloaded(&self) -> Option { - let env = match &self.program { - LoadedProgramType::LegacyV0(program) - | LoadedProgramType::LegacyV1(program) - | LoadedProgramType::Typed(program) => program.get_loader().clone(), - #[cfg(test)] - LoadedProgramType::TestLoaded(env) => env.clone(), - _ => return None, - }; Some(Self { - program: LoadedProgramType::Unloaded(env), + program: LoadedProgramType::Unloaded(self.program.get_environment()?.clone()), account_size: self.account_size, deployment_slot: self.deployment_slot, effective_slot: self.effective_slot, @@ -523,6 +532,11 @@ pub enum LoadedProgramMatchCriteria { } impl LoadedPrograms { + /// Returns the current environments depending on the given epoch + pub fn get_environments_for_epoch(&self, _epoch: Epoch) -> &ProgramRuntimeEnvironments { + &self.environments + } + /// Refill the cache with a single entry. It's typically called during transaction loading, /// when the cache doesn't contain the entry corresponding to program `key`. /// The function dedupes the cache, in case some other thread replenished the entry in parallel. @@ -586,41 +600,13 @@ impl LoadedPrograms { pub fn prune_feature_set_transition(&mut self) { for second_level in self.entries.values_mut() { second_level.retain(|entry| { - let retain = match &entry.program { - LoadedProgramType::Builtin(_) - | LoadedProgramType::DelayVisibility - | LoadedProgramType::Closed => true, - LoadedProgramType::LegacyV0(program) | LoadedProgramType::LegacyV1(program) - if Arc::ptr_eq( - program.get_loader(), - &self.environments.program_runtime_v1, - ) => - { - true - } - LoadedProgramType::Unloaded(environment) - | LoadedProgramType::FailedVerification(environment) - if Arc::ptr_eq(environment, &self.environments.program_runtime_v1) - || Arc::ptr_eq(environment, &self.environments.program_runtime_v2) => - { - true - } - LoadedProgramType::Typed(program) - if Arc::ptr_eq( - program.get_loader(), - &self.environments.program_runtime_v2, - ) => - { - true - } - _ => false, - }; - if !retain { - self.stats - .prunes_environment - .fetch_add(1, Ordering::Relaxed); + if Self::matches_environment(entry, &self.environments) { + return true; } - retain + self.stats + .prunes_environment + .fetch_add(1, Ordering::Relaxed); + false }); } self.remove_programs_with_no_entries(); @@ -688,6 +674,17 @@ impl LoadedPrograms { } } + fn matches_environment( + entry: &Arc, + environments: &ProgramRuntimeEnvironments, + ) -> bool { + let Some(environment) = entry.program.get_environment() else { + return true; + }; + Arc::ptr_eq(environment, &environments.program_runtime_v1) + || Arc::ptr_eq(environment, &environments.program_runtime_v2) + } + fn matches_loaded_program_criteria( program: &Arc, criteria: &LoadedProgramMatchCriteria, @@ -727,6 +724,7 @@ impl LoadedPrograms { working_slot: &S, keys: impl Iterator, ) -> ExtractedPrograms { + let environments = self.get_environments_for_epoch(working_slot.current_epoch()); let mut missing = Vec::new(); let mut unloaded = Vec::new(); let found = keys @@ -738,27 +736,22 @@ impl LoadedPrograms { || entry.deployment_slot == current_slot || working_slot.is_ancestor(entry.deployment_slot) { - if !Self::is_entry_usable(entry, current_slot, &match_criteria) { - missing.push((key, count)); - return None; - } + if current_slot >= entry.effective_slot { + if !Self::is_entry_usable(entry, current_slot, &match_criteria) { + missing.push((key, count)); + return None; + } - if let LoadedProgramType::Unloaded(environment) = &entry.program { - if Arc::ptr_eq(environment, &self.environments.program_runtime_v1) - || Arc::ptr_eq( - environment, - &self.environments.program_runtime_v2, - ) - { - // if the environment hasn't changed since the entry was unloaded. - unloaded.push((key, count)); - } else { + if !Self::matches_environment(entry, environments) { missing.push((key, count)); + return None; + } + + if let LoadedProgramType::Unloaded(_environment) = &entry.program { + unloaded.push((key, count)); + return None; } - return None; - } - if current_slot >= entry.effective_slot { let mut usage_count = entry.tx_usage_counter.load(Ordering::Relaxed); saturating_add_assign!(usage_count, count); @@ -794,7 +787,7 @@ impl LoadedPrograms { loaded: LoadedProgramsForTxBatch { entries: found, slot: working_slot.current_slot(), - environments: self.environments.clone(), + environments: environments.clone(), }, missing, unloaded, @@ -930,13 +923,16 @@ mod tests { use { crate::loaded_programs::{ BlockRelation, ExtractedPrograms, ForkGraph, LoadedProgram, LoadedProgramMatchCriteria, - LoadedProgramType, LoadedPrograms, LoadedProgramsForTxBatch, WorkingSlot, - DELAY_VISIBILITY_SLOT_OFFSET, + LoadedProgramType, LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, + WorkingSlot, DELAY_VISIBILITY_SLOT_OFFSET, }, assert_matches::assert_matches, percentage::Percentage, solana_rbpf::vm::BuiltinProgram, - solana_sdk::{clock::Slot, pubkey::Pubkey}, + solana_sdk::{ + clock::{Epoch, Slot}, + pubkey::Pubkey, + }, std::{ ops::ControlFlow, sync::{ @@ -946,6 +942,51 @@ mod tests { }, }; + static MOCK_ENVIRONMENT: std::sync::OnceLock = + std::sync::OnceLock::::new(); + + fn new_mock_cache() -> LoadedPrograms { + let mut cache = LoadedPrograms::default(); + cache.environments.program_runtime_v1 = MOCK_ENVIRONMENT + .get_or_init(|| Arc::new(BuiltinProgram::new_mock())) + .clone(); + cache + } + + fn new_test_loaded_program(deployment_slot: Slot, effective_slot: Slot) -> Arc { + new_test_loaded_program_with_usage(deployment_slot, effective_slot, AtomicU64::default()) + } + + fn new_test_loaded_program_with_usage( + deployment_slot: Slot, + effective_slot: Slot, + usage_counter: AtomicU64, + ) -> Arc { + new_test_loaded_program_with_usage_and_expiry( + deployment_slot, + effective_slot, + usage_counter, + None, + ) + } + + fn new_test_loaded_program_with_usage_and_expiry( + deployment_slot: Slot, + effective_slot: Slot, + usage_counter: AtomicU64, + expiry: Option, + ) -> Arc { + Arc::new(LoadedProgram { + program: LoadedProgramType::TestLoaded(MOCK_ENVIRONMENT.get().unwrap().clone()), + account_size: 0, + deployment_slot, + effective_slot, + maybe_expiration_slot: expiry, + tx_usage_counter: usage_counter, + ix_usage_counter: AtomicU64::default(), + }) + } + fn new_test_builtin_program(deployment_slot: Slot, effective_slot: Slot) -> Arc { Arc::new(LoadedProgram { program: LoadedProgramType::Builtin(BuiltinProgram::new_mock()), @@ -1011,7 +1052,7 @@ mod tests { let mut programs = vec![]; let mut num_total_programs: usize = 0; - let mut cache = LoadedPrograms::default(); + let mut cache = new_mock_cache(); let program1 = Pubkey::new_unique(); let program1_deployment_slots = [0, 10, 20]; @@ -1177,7 +1218,7 @@ mod tests { #[test] fn test_usage_count_of_unloaded_program() { - let mut cache = LoadedPrograms::default(); + let mut cache = new_mock_cache(); let program = Pubkey::new_unique(); let num_total_programs = 6; @@ -1229,7 +1270,7 @@ mod tests { #[test] fn test_replace_tombstones() { - let mut cache = LoadedPrograms::default(); + let mut cache = new_mock_cache(); let program1 = Pubkey::new_unique(); let env = Arc::new(BuiltinProgram::new_mock()); set_tombstone( @@ -1261,7 +1302,7 @@ mod tests { assert_eq!(tombstone.deployment_slot, 100); assert_eq!(tombstone.effective_slot, 100); - let mut cache = LoadedPrograms::default(); + let mut cache = new_mock_cache(); let program1 = Pubkey::new_unique(); let tombstone = set_tombstone( &mut cache, @@ -1321,7 +1362,7 @@ mod tests { #[test] fn test_prune_empty() { - let mut cache = LoadedPrograms::default(); + let mut cache = new_mock_cache(); let fork_graph = TestForkGraph { relation: BlockRelation::Unrelated, }; @@ -1332,7 +1373,7 @@ mod tests { cache.prune(&fork_graph, 10, 0); assert!(cache.entries.is_empty()); - let mut cache = LoadedPrograms::default(); + let mut cache = new_mock_cache(); let fork_graph = TestForkGraph { relation: BlockRelation::Ancestor, }; @@ -1343,7 +1384,7 @@ mod tests { cache.prune(&fork_graph, 10, 0); assert!(cache.entries.is_empty()); - let mut cache = LoadedPrograms::default(); + let mut cache = new_mock_cache(); let fork_graph = TestForkGraph { relation: BlockRelation::Descendant, }; @@ -1354,7 +1395,7 @@ mod tests { cache.prune(&fork_graph, 10, 0); assert!(cache.entries.is_empty()); - let mut cache = LoadedPrograms::default(); + let mut cache = new_mock_cache(); let fork_graph = TestForkGraph { relation: BlockRelation::Unknown, }; @@ -1443,6 +1484,10 @@ mod tests { self.slot } + fn current_epoch(&self) -> Epoch { + 0 + } + fn is_ancestor(&self, other: Slot) -> bool { self.fork .iter() @@ -1452,41 +1497,6 @@ mod tests { } } - fn new_test_loaded_program(deployment_slot: Slot, effective_slot: Slot) -> Arc { - new_test_loaded_program_with_usage(deployment_slot, effective_slot, AtomicU64::default()) - } - - fn new_test_loaded_program_with_usage( - deployment_slot: Slot, - effective_slot: Slot, - usage_counter: AtomicU64, - ) -> Arc { - new_test_loaded_program_with_usage_and_expiry( - deployment_slot, - effective_slot, - usage_counter, - None, - ) - } - - fn new_test_loaded_program_with_usage_and_expiry( - deployment_slot: Slot, - effective_slot: Slot, - usage_counter: AtomicU64, - expiry: Option, - ) -> Arc { - let env = Arc::new(BuiltinProgram::new_mock()); - Arc::new(LoadedProgram { - program: LoadedProgramType::TestLoaded(env), - account_size: 0, - deployment_slot, - effective_slot, - maybe_expiration_slot: expiry, - tx_usage_counter: usage_counter, - ix_usage_counter: AtomicU64::default(), - }) - } - fn match_slot( table: &LoadedProgramsForTxBatch, program: &Pubkey, @@ -1502,7 +1512,7 @@ mod tests { #[test] fn test_fork_extract_and_prune() { - let mut cache = LoadedPrograms::default(); + let mut cache = new_mock_cache(); // Fork graph created for the test // 0 @@ -1883,7 +1893,7 @@ mod tests { #[test] fn test_extract_using_deployment_slot() { - let mut cache = LoadedPrograms::default(); + let mut cache = new_mock_cache(); // Fork graph created for the test // 0 @@ -1968,7 +1978,7 @@ mod tests { #[test] fn test_extract_unloaded() { - let mut cache = LoadedPrograms::default(); + let mut cache = new_mock_cache(); // Fork graph created for the test // 0 @@ -2081,13 +2091,12 @@ mod tests { assert!(match_slot(&found, &program1, 20, 22)); assert!(missing.contains(&(program2, 1))); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(unloaded.contains(&(program3, 1))); } #[test] fn test_prune_expired() { - let mut cache = LoadedPrograms::default(); + let mut cache = new_mock_cache(); // Fork graph created for the test // 0 @@ -2206,7 +2215,7 @@ mod tests { #[test] fn test_fork_prune_find_first_ancestor() { - let mut cache = LoadedPrograms::default(); + let mut cache = new_mock_cache(); // Fork graph created for the test // 0 @@ -2252,7 +2261,7 @@ mod tests { #[test] fn test_prune_by_deployment_slot() { - let mut cache = LoadedPrograms::default(); + let mut cache = new_mock_cache(); // Fork graph created for the test // 0 @@ -2371,6 +2380,7 @@ mod tests { #[test] fn test_usable_entries_for_slot() { + new_mock_cache(); let tombstone = Arc::new(LoadedProgram::new_tombstone(0, LoadedProgramType::Closed)); assert!(LoadedPrograms::is_entry_usable( diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 4bbd825579d0e7..5227db287c9f10 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -933,6 +933,10 @@ impl WorkingSlot for Bank { self.slot } + fn current_epoch(&self) -> Epoch { + self.epoch + } + fn is_ancestor(&self, other: Slot) -> bool { self.ancestors.contains_key(&other) } @@ -4675,12 +4679,8 @@ impl Bank { } pub fn load_program(&self, pubkey: &Pubkey, reload: bool) -> Arc { - let environments = self - .loaded_programs_cache - .read() - .unwrap() - .environments - .clone(); + let loaded_programs_cache = self.loaded_programs_cache.read().unwrap(); + let environments = loaded_programs_cache.get_environments_for_epoch(self.epoch); let mut load_program_metrics = LoadProgramMetrics { program_id: pubkey.to_string(), @@ -4773,20 +4773,22 @@ impl Bank { }) .unwrap_or(LoadedProgram::new_tombstone( self.slot, - LoadedProgramType::FailedVerification(environments.program_runtime_v2), + LoadedProgramType::FailedVerification( + environments.program_runtime_v2.clone(), + ), )); Ok(loaded_program) } ProgramAccountLoadResult::InvalidV4Program => Ok(LoadedProgram::new_tombstone( self.slot, - LoadedProgramType::FailedVerification(environments.program_runtime_v2), + LoadedProgramType::FailedVerification(environments.program_runtime_v2.clone()), )), } .unwrap_or_else(|_| { LoadedProgram::new_tombstone( self.slot, - LoadedProgramType::FailedVerification(environments.program_runtime_v1), + LoadedProgramType::FailedVerification(environments.program_runtime_v1.clone()), ) }); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index ef1553d9addf31..58e44366f1d876 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -7,12 +7,15 @@ use { *, }, crate::{ - accounts_background_service::{PrunedBanksRequestHandler, SendDroppedBankCallback}, + accounts_background_service::{ + AbsRequestSender, PrunedBanksRequestHandler, SendDroppedBankCallback, + }, bank::replace_account::{ replace_empty_account_with_upgradeable_program, replace_non_upgradeable_program_account, ReplaceAccountError, }, bank_client::BankClient, + bank_forks::BankForks, epoch_rewards_hasher::hash_rewards_into_partitions, genesis_utils::{ self, activate_all_features, activate_feature, bootstrap_validator_stake_lamports, @@ -12502,7 +12505,8 @@ fn test_runtime_feature_enable_with_program_cache() { genesis_config .accounts .remove(&feature_set::reject_callx_r10::id()); - let root_bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let mut bank_forks = BankForks::new(Bank::new_for_tests(&genesis_config)); + let root_bank = bank_forks.root_bank(); // Test a basic transfer let amount = genesis_config.rent.minimum_balance(0); @@ -12532,7 +12536,7 @@ fn test_runtime_feature_enable_with_program_cache() { // Advance the bank so the next transaction can be submitted. goto_end_of_slot(root_bank.clone()); - let mut bank = new_from_parent(root_bank); + let bank = Arc::new(new_from_parent(root_bank)); // Compose second instruction using the same program with a different block hash let instruction2 = Instruction::new_with_bytes(program_keypair.pubkey(), &[], Vec::new()); @@ -12558,9 +12562,17 @@ fn test_runtime_feature_enable_with_program_cache() { &feature_set::reject_callx_r10::id(), &feature::create_account(&Feature { activated_at: None }, feature_account_balance), ); - bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false); - // Execute after feature is enabled to check it was pruned and reverified. + // Reroot to call LoadedPrograms::prune() and end the current recompilation phase + goto_end_of_slot(bank.clone()); + bank_forks.insert(Arc::into_inner(bank).unwrap()); + let bank = bank_forks.working_bank(); + bank_forks.set_root(bank.slot, &AbsRequestSender::default(), None); + + // Advance to next epoch, which starts the next recompilation phase + let bank = new_from_parent_next_epoch(bank, 1); + + // Execute after feature is enabled to check it was filtered out and reverified. let result_with_feature_enabled = bank.process_transaction(&transaction2); assert_eq!( result_with_feature_enabled, From a60d1857bf805edd6e9b5e0e039fd7e2f6f8b518 Mon Sep 17 00:00:00 2001 From: Sanjay Singh Date: Sat, 14 Oct 2023 02:23:19 +0530 Subject: [PATCH 337/407] updated inner_call for hashing fn to follow generic approach (#33128) * updated inner_call for hashing fn to follow generic approach * different hash compute budget values for all digests * fixed conflicts * reverted changes to compute_budget.rs and added 3method to trait to get compute budget values * updated type for result fn for HasherImpl * using Hash directly in result fn, got rid of HASH_BYTES and removed comment form compute_budget * updated import statement * cargo fmt -all * removed unused import and reference related warning * oops forgot semicolon * removed trailing white space --- programs/bpf_loader/src/syscalls/mod.rs | 407 ++++++++++++------------ 1 file changed, 202 insertions(+), 205 deletions(-) diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 64d1d85e5ee964..4193b9fcfc97a8 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -43,7 +43,7 @@ use { remaining_compute_units_syscall_enabled, stop_sibling_instruction_search_at_parent, stop_truncating_strings_in_syscalls, switch_to_new_elf_parser, }, - hash::{Hasher, HASH_BYTES}, + hash::{Hash, Hasher}, instruction::{ AccountMeta, InstructionError, ProcessedSiblingInstruction, TRANSACTION_LEVEL_STACK_HEIGHT, @@ -132,6 +132,103 @@ pub enum SyscallError { type Error = Box; +pub trait HasherImpl { + const NAME: &'static str; + type Output: AsRef<[u8]>; + + fn create_hasher() -> Self; + fn hash(&mut self, val: &[u8]); + fn result(self) -> Self::Output; + fn get_base_cost(compute_budget: &ComputeBudget) -> u64; + fn get_byte_cost(compute_budget: &ComputeBudget) -> u64; + fn get_max_slices(compute_budget: &ComputeBudget) -> u64; +} + +pub struct Sha256Hasher(Hasher); +pub struct Blake3Hasher(blake3::Hasher); +pub struct Keccak256Hasher(keccak::Hasher); + +impl HasherImpl for Sha256Hasher { + const NAME: &'static str = "Sha256"; + type Output = Hash; + + fn create_hasher() -> Self { + Sha256Hasher(Hasher::default()) + } + + fn hash(&mut self, val: &[u8]) { + self.0.hash(val); + } + + fn result(self) -> Self::Output { + self.0.result() + } + + fn get_base_cost(compute_budget: &ComputeBudget) -> u64 { + compute_budget.sha256_base_cost + } + fn get_byte_cost(compute_budget: &ComputeBudget) -> u64 { + compute_budget.sha256_byte_cost + } + fn get_max_slices(compute_budget: &ComputeBudget) -> u64 { + compute_budget.sha256_max_slices + } +} + +impl HasherImpl for Blake3Hasher { + const NAME: &'static str = "Blake3"; + type Output = blake3::Hash; + + fn create_hasher() -> Self { + Blake3Hasher(blake3::Hasher::default()) + } + + fn hash(&mut self, val: &[u8]) { + self.0.hash(val); + } + + fn result(self) -> Self::Output { + self.0.result() + } + + fn get_base_cost(compute_budget: &ComputeBudget) -> u64 { + compute_budget.sha256_base_cost + } + fn get_byte_cost(compute_budget: &ComputeBudget) -> u64 { + compute_budget.sha256_byte_cost + } + fn get_max_slices(compute_budget: &ComputeBudget) -> u64 { + compute_budget.sha256_max_slices + } +} + +impl HasherImpl for Keccak256Hasher { + const NAME: &'static str = "Keccak256"; + type Output = keccak::Hash; + + fn create_hasher() -> Self { + Keccak256Hasher(keccak::Hasher::default()) + } + + fn hash(&mut self, val: &[u8]) { + self.0.hash(val); + } + + fn result(self) -> Self::Output { + self.0.result() + } + + fn get_base_cost(compute_budget: &ComputeBudget) -> u64 { + compute_budget.sha256_base_cost + } + fn get_byte_cost(compute_budget: &ComputeBudget) -> u64 { + compute_budget.sha256_byte_cost + } + fn get_max_slices(compute_budget: &ComputeBudget) -> u64 { + compute_budget.sha256_max_slices + } +} + fn consume_compute_meter(invoke_context: &InvokeContext, amount: u64) -> Result<(), Error> { invoke_context.consume_checked(amount)?; Ok(()) @@ -220,10 +317,10 @@ pub fn create_program_runtime_environment_v1<'a>( )?; // Sha256 - result.register_function_hashed(*b"sol_sha256", SyscallSha256::call)?; + result.register_function_hashed(*b"sol_sha256", SyscallHash::call::)?; // Keccak256 - result.register_function_hashed(*b"sol_keccak256", SyscallKeccak256::call)?; + result.register_function_hashed(*b"sol_keccak256", SyscallHash::call::)?; // Secp256k1 Recover result.register_function_hashed(*b"sol_secp256k1_recover", SyscallSecp256k1Recover::call)?; @@ -233,7 +330,7 @@ pub fn create_program_runtime_environment_v1<'a>( result, blake3_syscall_enabled, *b"sol_blake3", - SyscallBlake3::call, + SyscallHash::call::, )?; // Elliptic Curve Operations @@ -519,6 +616,32 @@ macro_rules! declare_syscall { }; } +#[macro_export] +macro_rules! declare_syscallhash { + ($(#[$attr:meta])* $name:ident, $inner_call:item) => { + $(#[$attr])* + pub struct $name {} + impl $name { + $inner_call + pub fn call( + invoke_context: &mut InvokeContext, + arg_a: u64, + arg_b: u64, + arg_c: u64, + arg_d: u64, + arg_e: u64, + memory_mapping: &mut MemoryMapping, + result: &mut ProgramResult, + ) { + let converted_result: ProgramResult = Self::inner_call::( + invoke_context, arg_a, arg_b, arg_c, arg_d, arg_e, memory_mapping, + ).into(); + *result = converted_result; + } + } + }; +} + declare_syscall!( /// Abort syscall functions, called when the SBF program calls `abort()` /// LLVM will insert calls to `abort()` if it detects an untenable situation, @@ -750,136 +873,6 @@ declare_syscall!( } ); -declare_syscall!( - /// SHA256 - SyscallSha256, - fn inner_call( - invoke_context: &mut InvokeContext, - vals_addr: u64, - vals_len: u64, - result_addr: u64, - _arg4: u64, - _arg5: u64, - memory_mapping: &mut MemoryMapping, - ) -> Result { - let compute_budget = invoke_context.get_compute_budget(); - if compute_budget.sha256_max_slices < vals_len { - ic_msg!( - invoke_context, - "Sha256 hashing {} sequences in one syscall is over the limit {}", - vals_len, - compute_budget.sha256_max_slices, - ); - return Err(SyscallError::TooManySlices.into()); - } - - consume_compute_meter(invoke_context, compute_budget.sha256_base_cost)?; - - let hash_result = translate_slice_mut::( - memory_mapping, - result_addr, - HASH_BYTES as u64, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - )?; - let mut hasher = Hasher::default(); - if vals_len > 0 { - let vals = translate_slice::<&[u8]>( - memory_mapping, - vals_addr, - vals_len, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - )?; - for val in vals.iter() { - let bytes = translate_slice::( - memory_mapping, - val.as_ptr() as u64, - val.len() as u64, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - )?; - let cost = compute_budget.mem_op_base_cost.max( - compute_budget.sha256_byte_cost.saturating_mul( - (val.len() as u64) - .checked_div(2) - .expect("div by non-zero literal"), - ), - ); - consume_compute_meter(invoke_context, cost)?; - hasher.hash(bytes); - } - } - hash_result.copy_from_slice(&hasher.result().to_bytes()); - Ok(0) - } -); - -declare_syscall!( - // Keccak256 - SyscallKeccak256, - fn inner_call( - invoke_context: &mut InvokeContext, - vals_addr: u64, - vals_len: u64, - result_addr: u64, - _arg4: u64, - _arg5: u64, - memory_mapping: &mut MemoryMapping, - ) -> Result { - let compute_budget = invoke_context.get_compute_budget(); - if compute_budget.sha256_max_slices < vals_len { - ic_msg!( - invoke_context, - "Keccak256 hashing {} sequences in one syscall is over the limit {}", - vals_len, - compute_budget.sha256_max_slices, - ); - return Err(SyscallError::TooManySlices.into()); - } - - consume_compute_meter(invoke_context, compute_budget.sha256_base_cost)?; - - let hash_result = translate_slice_mut::( - memory_mapping, - result_addr, - keccak::HASH_BYTES as u64, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - )?; - let mut hasher = keccak::Hasher::default(); - if vals_len > 0 { - let vals = translate_slice::<&[u8]>( - memory_mapping, - vals_addr, - vals_len, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - )?; - for val in vals.iter() { - let bytes = translate_slice::( - memory_mapping, - val.as_ptr() as u64, - val.len() as u64, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - )?; - let cost = compute_budget.mem_op_base_cost.max( - compute_budget.sha256_byte_cost.saturating_mul( - (val.len() as u64) - .checked_div(2) - .expect("div by non-zero literal"), - ), - ); - consume_compute_meter(invoke_context, cost)?; - hasher.hash(bytes); - } - } - hash_result.copy_from_slice(&hasher.result().to_bytes()); - Ok(0) - } -); - declare_syscall!( /// secp256k1_recover SyscallSecp256k1Recover, @@ -1314,71 +1307,6 @@ declare_syscall!( } ); -declare_syscall!( - // Blake3 - SyscallBlake3, - fn inner_call( - invoke_context: &mut InvokeContext, - vals_addr: u64, - vals_len: u64, - result_addr: u64, - _arg4: u64, - _arg5: u64, - memory_mapping: &mut MemoryMapping, - ) -> Result { - let compute_budget = invoke_context.get_compute_budget(); - if compute_budget.sha256_max_slices < vals_len { - ic_msg!( - invoke_context, - "Blake3 hashing {} sequences in one syscall is over the limit {}", - vals_len, - compute_budget.sha256_max_slices, - ); - return Err(SyscallError::TooManySlices.into()); - } - - consume_compute_meter(invoke_context, compute_budget.sha256_base_cost)?; - - let hash_result = translate_slice_mut::( - memory_mapping, - result_addr, - blake3::HASH_BYTES as u64, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - )?; - let mut hasher = blake3::Hasher::default(); - if vals_len > 0 { - let vals = translate_slice::<&[u8]>( - memory_mapping, - vals_addr, - vals_len, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - )?; - for val in vals.iter() { - let bytes = translate_slice::( - memory_mapping, - val.as_ptr() as u64, - val.len() as u64, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - )?; - let cost = compute_budget.mem_op_base_cost.max( - compute_budget.sha256_byte_cost.saturating_mul( - (val.len() as u64) - .checked_div(2) - .expect("div by non-zero literal"), - ), - ); - consume_compute_meter(invoke_context, cost)?; - hasher.hash(bytes); - } - } - hash_result.copy_from_slice(&hasher.result().to_bytes()); - Ok(0) - } -); - declare_syscall!( /// Set return data SyscallSetReturnData, @@ -2020,6 +1948,75 @@ declare_syscall!( } ); +declare_syscallhash!( + // Generic Hashing Syscall + SyscallHash, + fn inner_call( + invoke_context: &mut InvokeContext, + vals_addr: u64, + vals_len: u64, + result_addr: u64, + _arg4: u64, + _arg5: u64, + memory_mapping: &mut MemoryMapping, + ) -> Result { + let compute_budget = invoke_context.get_compute_budget(); + let hash_base_cost = H::get_base_cost(compute_budget); + let hash_byte_cost = H::get_byte_cost(compute_budget); + let hash_max_slices = H::get_max_slices(compute_budget); + if hash_max_slices < vals_len { + ic_msg!( + invoke_context, + "{} Hashing {} sequences in one syscall is over the limit {}", + H::NAME, + vals_len, + hash_max_slices, + ); + return Err(SyscallError::TooManySlices.into()); + } + + consume_compute_meter(invoke_context, hash_base_cost)?; + + let hash_result = translate_slice_mut::( + memory_mapping, + result_addr, + std::mem::size_of::() as u64, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; + let mut hasher = H::create_hasher(); + if vals_len > 0 { + let vals = translate_slice::<&[u8]>( + memory_mapping, + vals_addr, + vals_len, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; + for val in vals.iter() { + let bytes = translate_slice::( + memory_mapping, + val.as_ptr() as u64, + val.len() as u64, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; + let cost = compute_budget.mem_op_base_cost.max( + hash_byte_cost.saturating_mul( + (val.len() as u64) + .checked_div(2) + .expect("div by non-zero literal"), + ), + ); + consume_compute_meter(invoke_context, cost)?; + hasher.hash(bytes); + } + } + hash_result.copy_from_slice(hasher.result().as_ref()); + Ok(0) + } +); + #[cfg(test)] #[allow(clippy::arithmetic_side_effects)] #[allow(clippy::indexing_slicing)] @@ -2042,7 +2039,7 @@ mod tests { account::{create_account_shared_data_for_test, AccountSharedData}, bpf_loader, fee_calculator::FeeCalculator, - hash::hashv, + hash::{hashv, HASH_BYTES}, instruction::Instruction, program::check_type_assumptions, stable_layout::stable_instruction::StableInstruction, @@ -2705,7 +2702,7 @@ mod tests { ); let mut result = ProgramResult::Ok(0); - SyscallSha256::call( + SyscallHash::call::( &mut invoke_context, ro_va, ro_len, @@ -2720,7 +2717,7 @@ mod tests { let hash_local = hashv(&[bytes1.as_ref(), bytes2.as_ref()]).to_bytes(); assert_eq!(hash_result, hash_local); let mut result = ProgramResult::Ok(0); - SyscallSha256::call( + SyscallHash::call::( &mut invoke_context, ro_va - 1, // AccessViolation ro_len, @@ -2732,7 +2729,7 @@ mod tests { ); assert_access_violation!(result, ro_va - 1, 32); let mut result = ProgramResult::Ok(0); - SyscallSha256::call( + SyscallHash::call::( &mut invoke_context, ro_va, ro_len + 1, // AccessViolation @@ -2744,7 +2741,7 @@ mod tests { ); assert_access_violation!(result, ro_va, 48); let mut result = ProgramResult::Ok(0); - SyscallSha256::call( + SyscallHash::call::( &mut invoke_context, ro_va, ro_len, @@ -2756,7 +2753,7 @@ mod tests { ); assert_access_violation!(result, rw_va - 1, HASH_BYTES as u64); let mut result = ProgramResult::Ok(0); - SyscallSha256::call( + SyscallHash::call::( &mut invoke_context, ro_va, ro_len, From c0fbfc6422fa5b739049c01bfda48a0da1bf6a46 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 14 Oct 2023 17:58:36 +0000 Subject: [PATCH 338/407] build(deps): bump serde from 1.0.188 to 1.0.189 (#33686) * build(deps): bump serde from 1.0.188 to 1.0.189 Bumps [serde](https://github.com/serde-rs/serde) from 1.0.188 to 1.0.189. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.188...v1.0.189) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ffec7fbc909432..033095542093de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4746,9 +4746,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.188" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" dependencies = [ "serde_derive", ] @@ -4764,9 +4764,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.188" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index f26209e5104929..c5c55a09cc58af 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -286,7 +286,7 @@ rustls = { version = "0.21.7", default-features = false, features = ["quic"] } rustversion = "1.0.14" scopeguard = "1.2.0" semver = "1.0.20" -serde = "1.0.188" +serde = "1.0.189" serde_bytes = "0.11.12" serde_derive = "1.0.103" serde_json = "1.0.107" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 66b4d27d9abe7a..f34ae041826e77 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4134,9 +4134,9 @@ checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" [[package]] name = "serde" -version = "1.0.188" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" dependencies = [ "serde_derive", ] @@ -4152,9 +4152,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.188" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" dependencies = [ "proc-macro2", "quote", From 8a483a83026c874f2aecd6dd38639684241e4f6e Mon Sep 17 00:00:00 2001 From: Joe C Date: Mon, 16 Oct 2023 09:44:11 +0200 Subject: [PATCH 339/407] RPC: update websocket docs (#33460) * [rpc]: update websocket docs * rename rewards to showRewards * add remaining optional fields for slotsUpdates * update block subscription showRewards --- docs/sidebars/api.js | 10 +++ docs/src/api/websocket/_blockSubscribe.mdx | 66 ++++++++++++++----- .../api/websocket/_slotsUpdatesSubscribe.mdx | 8 ++- docs/src/api/websocket/_voteSubscribe.mdx | 1 + 4 files changed, 69 insertions(+), 16 deletions(-) diff --git a/docs/sidebars/api.js b/docs/sidebars/api.js index 53e15baf099f85..15a386eefb6339 100644 --- a/docs/sidebars/api.js +++ b/docs/sidebars/api.js @@ -417,6 +417,16 @@ module.exports = { href: "#programunsubscribe", label: "programUnsubscribe", }, + { + type: "link", + href: "#rootsubscribe", + label: "rootSubscribe", + }, + { + type: "link", + href: "#rootunsubscribe", + label: "rootUnsubscribe", + }, { type: "link", href: "#signaturesubscribe", diff --git a/docs/src/api/websocket/_blockSubscribe.mdx b/docs/src/api/websocket/_blockSubscribe.mdx index 078f7585846e7f..b86543798e5bc3 100644 --- a/docs/src/api/websocket/_blockSubscribe.mdx +++ b/docs/src/api/websocket/_blockSubscribe.mdx @@ -26,7 +26,7 @@ with the `--rpc-pubsub-enable-block-subscription` flag. ### Parameters: - + filter criteria for the logs to receive results by account type; currently supported: @@ -52,35 +52,71 @@ Configuration object containing the following fields: name="commitment" type="string" optional={true} + defaultValue={"finalized"} href="/api/http#configuring-state-commitment" -> - - - level of transaction detail to return, either "full", "signatures", or "none". +> +