Skip to content

Commit

Permalink
Enable payloads for non coinbase transactions (#591)
Browse files Browse the repository at this point in the history
* Enable payloads for non coinbase transactions

* Add payload hash to sighash

* test reflects enabling payload

* Enhance benchmarking: add payload size variations

Refactored `mock_tx` to `mock_tx_with_payload` to support custom payload sizes. Introduced new benchmark function `benchmark_check_scripts_with_payload` to test performance with varying payload sizes. Commented out the old benchmark function to focus on payload-based tests.

* Enhance script checking benchmarks

Added benchmarks to evaluate script checking performance with varying payload sizes and input counts. This helps in understanding the impact of transaction payload size on validation and the relationship between input count and payload processing overhead.

* Add new test case for transaction hashing and refactor code

This commit introduces a new test case to verify that transaction IDs and hashes change with payload modifications. Additionally, code readability and consistency are improved by refactoring multi-line expressions into single lines where appropriate.

* Add payload activation test for transactions

This commit introduces a new integration test to validate the enforcement of payload activation rules at a specified DAA score. The test ensures that transactions with large payloads are rejected before activation and accepted afterward, maintaining consensus integrity.

* style: fmt

* test: add test that checks that payload change reflects sighash

* rename test

* Don't ever skip utxo_free_tx_validation

* lints

---------

Co-authored-by: max143672 <[email protected]>
  • Loading branch information
someone235 and biryukovmaxim authored Nov 24, 2024
1 parent a0aeec3 commit 64eeb89
Show file tree
Hide file tree
Showing 14 changed files with 297 additions and 58 deletions.
56 changes: 46 additions & 10 deletions consensus/benches/check_scripts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,10 @@ use kaspa_utils::iter::parallelism_in_power_steps;
use rand::{thread_rng, Rng};
use secp256k1::Keypair;

// You may need to add more detailed mocks depending on your actual code.
fn mock_tx(inputs_count: usize, non_uniq_signatures: usize) -> (Transaction, Vec<UtxoEntry>) {
fn mock_tx_with_payload(inputs_count: usize, non_uniq_signatures: usize, payload_size: usize) -> (Transaction, Vec<UtxoEntry>) {
let mut payload = vec![0u8; payload_size];
thread_rng().fill(&mut payload[..]);

let reused_values = SigHashReusedValuesUnsync::new();
let dummy_prev_out = TransactionOutpoint::new(kaspa_hashes::Hash::from_u64_word(1), 1);
let mut tx = Transaction::new(
Expand All @@ -24,10 +26,11 @@ fn mock_tx(inputs_count: usize, non_uniq_signatures: usize) -> (Transaction, Vec
0,
SubnetworkId::from_bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
0,
vec![],
payload,
);
let mut utxos = vec![];
let mut kps = vec![];

for _ in 0..inputs_count - non_uniq_signatures {
let kp = Keypair::new(secp256k1::SECP256K1, &mut thread_rng());
tx.inputs.push(TransactionInput { previous_outpoint: dummy_prev_out, signature_script: vec![], sequence: 0, sig_op_count: 1 });
Expand All @@ -40,6 +43,7 @@ fn mock_tx(inputs_count: usize, non_uniq_signatures: usize) -> (Transaction, Vec
});
kps.push(kp);
}

for _ in 0..non_uniq_signatures {
let kp = kps.last().unwrap();
tx.inputs.push(TransactionInput { previous_outpoint: dummy_prev_out, signature_script: vec![], sequence: 0, sig_op_count: 1 });
Expand All @@ -51,31 +55,32 @@ fn mock_tx(inputs_count: usize, non_uniq_signatures: usize) -> (Transaction, Vec
is_coinbase: false,
});
}

for (i, kp) in kps.iter().enumerate().take(inputs_count - non_uniq_signatures) {
let mut_tx = MutableTransaction::with_entries(&tx, utxos.clone());
let sig_hash = calc_schnorr_signature_hash(&mut_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values);
let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap();
let sig: [u8; 64] = *kp.sign_schnorr(msg).as_ref();
// This represents OP_DATA_65 <SIGNATURE+SIGHASH_TYPE> (since signature length is 64 bytes and SIGHASH_TYPE is one byte)
tx.inputs[i].signature_script = std::iter::once(65u8).chain(sig).chain([SIG_HASH_ALL.to_u8()]).collect();
}

let length = tx.inputs.len();
for i in (inputs_count - non_uniq_signatures)..length {
let kp = kps.last().unwrap();
let mut_tx = MutableTransaction::with_entries(&tx, utxos.clone());
let sig_hash = calc_schnorr_signature_hash(&mut_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values);
let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap();
let sig: [u8; 64] = *kp.sign_schnorr(msg).as_ref();
// This represents OP_DATA_65 <SIGNATURE+SIGHASH_TYPE> (since signature length is 64 bytes and SIGHASH_TYPE is one byte)
tx.inputs[i].signature_script = std::iter::once(65u8).chain(sig).chain([SIG_HASH_ALL.to_u8()]).collect();
}

(tx, utxos)
}

fn benchmark_check_scripts(c: &mut Criterion) {
for inputs_count in [100, 50, 25, 10, 5, 2] {
for non_uniq_signatures in [0, inputs_count / 2] {
let (tx, utxos) = mock_tx(inputs_count, non_uniq_signatures);
let (tx, utxos) = mock_tx_with_payload(inputs_count, non_uniq_signatures, 0);
let mut group = c.benchmark_group(format!("inputs: {inputs_count}, non uniq: {non_uniq_signatures}"));
group.sampling_mode(SamplingMode::Flat);

Expand All @@ -97,12 +102,10 @@ fn benchmark_check_scripts(c: &mut Criterion) {
})
});

// Iterate powers of two up to available parallelism
for i in parallelism_in_power_steps() {
if inputs_count >= i {
group.bench_function(format!("rayon, custom thread pool, thread count {i}"), |b| {
let tx = MutableTransaction::with_entries(tx.clone(), utxos.clone());
// Create a custom thread pool with the specified number of threads
let pool = rayon::ThreadPoolBuilder::new().num_threads(i).build().unwrap();
let cache = Cache::new(inputs_count as u64);
b.iter(|| {
Expand All @@ -117,11 +120,44 @@ fn benchmark_check_scripts(c: &mut Criterion) {
}
}

/// Benchmarks script checking performance with different payload sizes and input counts.
///
/// This benchmark evaluates the performance impact of transaction payload size
/// on script validation, testing multiple scenarios:
///
/// * Payload sizes: 0KB, 16KB, 32KB, 64KB, 128KB
/// * Input counts: 1, 2, 10, 50 transactions
///
/// The benchmark helps understand:
/// 1. How payload size affects validation performance
/// 2. The relationship between input count and payload processing overhead
fn benchmark_check_scripts_with_payload(c: &mut Criterion) {
let payload_sizes = [0, 16_384, 32_768, 65_536, 131_072]; // 0, 16KB, 32KB, 64KB, 128KB
let input_counts = [1, 2, 10, 50];
let non_uniq_signatures = 0;

for inputs_count in input_counts {
for &payload_size in &payload_sizes {
let (tx, utxos) = mock_tx_with_payload(inputs_count, non_uniq_signatures, payload_size);
let mut group = c.benchmark_group(format!("script_check/inputs_{}/payload_{}_kb", inputs_count, payload_size / 1024));
group.sampling_mode(SamplingMode::Flat);

group.bench_function("parallel_validation", |b| {
let tx = MutableTransaction::with_entries(tx.clone(), utxos.clone());
let cache = Cache::new(inputs_count as u64);
b.iter(|| {
cache.clear();
check_scripts_par_iter(black_box(&cache), black_box(&tx.as_verifiable()), false).unwrap();
})
});
}
}
}

criterion_group! {
name = benches;
// This can be any expression that returns a `Criterion` object.
config = Criterion::default().with_output_color(true).measurement_time(std::time::Duration::new(20, 0));
targets = benchmark_check_scripts
targets = benchmark_check_scripts, benchmark_check_scripts_with_payload
}

criterion_main!(benches);
13 changes: 13 additions & 0 deletions consensus/core/src/config/params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,9 @@ pub struct Params {
pub skip_proof_of_work: bool,
pub max_block_level: BlockLevel,
pub pruning_proof_m: u64,

/// Activation rules for when to enable using the payload field in transactions
pub payload_activation: ForkActivation,
}

fn unix_now() -> u64 {
Expand Down Expand Up @@ -406,6 +409,8 @@ pub const MAINNET_PARAMS: Params = Params {
skip_proof_of_work: false,
max_block_level: 225,
pruning_proof_m: 1000,

payload_activation: ForkActivation::never(),
};

pub const TESTNET_PARAMS: Params = Params {
Expand Down Expand Up @@ -469,6 +474,8 @@ pub const TESTNET_PARAMS: Params = Params {
skip_proof_of_work: false,
max_block_level: 250,
pruning_proof_m: 1000,

payload_activation: ForkActivation::never(),
};

pub const TESTNET11_PARAMS: Params = Params {
Expand Down Expand Up @@ -530,6 +537,8 @@ pub const TESTNET11_PARAMS: Params = Params {

skip_proof_of_work: false,
max_block_level: 250,

payload_activation: ForkActivation::never(),
};

pub const SIMNET_PARAMS: Params = Params {
Expand Down Expand Up @@ -584,6 +593,8 @@ pub const SIMNET_PARAMS: Params = Params {

skip_proof_of_work: true, // For simnet only, PoW can be simulated by default
max_block_level: 250,

payload_activation: ForkActivation::never(),
};

pub const DEVNET_PARAMS: Params = Params {
Expand Down Expand Up @@ -641,4 +652,6 @@ pub const DEVNET_PARAMS: Params = Params {
skip_proof_of_work: false,
max_block_level: 250,
pruning_proof_m: 1000,

payload_activation: ForkActivation::never(),
};
61 changes: 45 additions & 16 deletions consensus/core/src/hashing/sighash.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,7 @@ use kaspa_hashes::{Hash, Hasher, HasherBase, TransactionSigningHash, Transaction
use std::cell::Cell;
use std::sync::Arc;

use crate::{
subnets::SUBNETWORK_ID_NATIVE,
tx::{ScriptPublicKey, Transaction, TransactionOutpoint, TransactionOutput, VerifiableTransaction},
};
use crate::tx::{ScriptPublicKey, Transaction, TransactionOutpoint, TransactionOutput, VerifiableTransaction};

use super::{sighash_type::SigHashType, HasherExtensions};

Expand All @@ -19,6 +16,7 @@ pub struct SigHashReusedValuesUnsync {
sequences_hash: Cell<Option<Hash>>,
sig_op_counts_hash: Cell<Option<Hash>>,
outputs_hash: Cell<Option<Hash>>,
payload_hash: Cell<Option<Hash>>,
}

impl SigHashReusedValuesUnsync {
Expand All @@ -33,6 +31,7 @@ pub struct SigHashReusedValuesSync {
sequences_hash: ArcSwapOption<Hash>,
sig_op_counts_hash: ArcSwapOption<Hash>,
outputs_hash: ArcSwapOption<Hash>,
payload_hash: ArcSwapOption<Hash>,
}

impl SigHashReusedValuesSync {
Expand All @@ -46,6 +45,7 @@ pub trait SigHashReusedValues {
fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash;
fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash;
fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash;
fn payload_hash(&self, set: impl Fn() -> Hash) -> Hash;
}

impl<T: SigHashReusedValues> SigHashReusedValues for Arc<T> {
Expand All @@ -64,6 +64,10 @@ impl<T: SigHashReusedValues> SigHashReusedValues for Arc<T> {
fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash {
self.as_ref().outputs_hash(set)
}

fn payload_hash(&self, set: impl Fn() -> Hash) -> Hash {
self.as_ref().outputs_hash(set)
}
}

impl SigHashReusedValues for SigHashReusedValuesUnsync {
Expand Down Expand Up @@ -98,6 +102,14 @@ impl SigHashReusedValues for SigHashReusedValuesUnsync {
hash
})
}

fn payload_hash(&self, set: impl Fn() -> Hash) -> Hash {
self.payload_hash.get().unwrap_or_else(|| {
let hash = set();
self.payload_hash.set(Some(hash));
hash
})
}
}

impl SigHashReusedValues for SigHashReusedValuesSync {
Expand Down Expand Up @@ -136,6 +148,15 @@ impl SigHashReusedValues for SigHashReusedValuesSync {
self.outputs_hash.rcu(|_| Arc::new(hash));
hash
}

fn payload_hash(&self, set: impl Fn() -> Hash) -> Hash {
if let Some(value) = self.payload_hash.load().as_ref() {
return **value;
}
let hash = set();
self.payload_hash.rcu(|_| Arc::new(hash));
hash
}
}

pub fn previous_outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues) -> Hash {
Expand Down Expand Up @@ -182,17 +203,17 @@ pub fn sig_op_counts_hash(tx: &Transaction, hash_type: SigHashType, reused_value
reused_values.sig_op_counts_hash(hash)
}

pub fn payload_hash(tx: &Transaction) -> Hash {
if tx.subnetwork_id == SUBNETWORK_ID_NATIVE {
return ZERO_HASH;
}
pub fn payload_hash(tx: &Transaction, reused_values: &impl SigHashReusedValues) -> Hash {
let hash = || {
if tx.subnetwork_id.is_native() && tx.payload.is_empty() {
return ZERO_HASH;
}

// TODO: Right now this branch will never be executed, since payload is disabled
// for all non coinbase transactions. Once payload is enabled, the payload hash
// should be cached to make it cost O(1) instead of O(tx.inputs.len()).
let mut hasher = TransactionSigningHash::new();
hasher.write_var_bytes(&tx.payload);
hasher.finalize()
let mut hasher = TransactionSigningHash::new();
hasher.write_var_bytes(&tx.payload);
hasher.finalize()
};
reused_values.payload_hash(hash)
}

pub fn outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues, input_index: usize) -> Hash {
Expand Down Expand Up @@ -260,7 +281,7 @@ pub fn calc_schnorr_signature_hash(
.write_u64(tx.lock_time)
.update(&tx.subnetwork_id)
.write_u64(tx.gas)
.update(payload_hash(tx))
.update(payload_hash(tx, reused_values))
.write_u8(hash_type.to_u8());
hasher.finalize()
}
Expand All @@ -285,7 +306,7 @@ mod tests {

use crate::{
hashing::sighash_type::{SIG_HASH_ALL, SIG_HASH_ANY_ONE_CAN_PAY, SIG_HASH_NONE, SIG_HASH_SINGLE},
subnets::SubnetworkId,
subnets::{SubnetworkId, SUBNETWORK_ID_NATIVE},
tx::{PopulatedTransaction, Transaction, TransactionId, TransactionInput, UtxoEntry},
};

Expand Down Expand Up @@ -608,6 +629,14 @@ mod tests {
action: ModifyAction::NoAction,
expected_hash: "846689131fb08b77f83af1d3901076732ef09d3f8fdff945be89aa4300562e5f", // should change the hash
},
TestVector {
name: "native-all-0-modify-payload",
populated_tx: &native_populated_tx,
hash_type: SIG_HASH_ALL,
input_index: 0,
action: ModifyAction::Payload,
expected_hash: "72ea6c2871e0f44499f1c2b556f265d9424bfea67cca9cb343b4b040ead65525", // should change the hash
},
// subnetwork transaction
TestVector {
name: "subnetwork-all-0",
Expand Down
7 changes: 7 additions & 0 deletions consensus/core/src/hashing/tx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,13 @@ mod tests {
expected_hash: "31da267d5c34f0740c77b8c9ebde0845a01179ec68074578227b804bac306361",
});

// Test #8, same as 7 but with a non-zero payload. The test checks id and hash are affected by payload change
tests.push(Test {
tx: Transaction::new(2, inputs.clone(), outputs.clone(), 54, subnets::SUBNETWORK_ID_REGISTRY, 3, vec![1, 2, 3]),
expected_id: "1f18b18ab004ff1b44dd915554b486d64d7ebc02c054e867cc44e3d746e80b3b",
expected_hash: "a2029ebd66d29d41aa7b0c40230c1bfa7fe8e026fb44b7815dda4e991b9a5fad",
});

for (i, test) in tests.iter().enumerate() {
assert_eq!(test.tx.id(), Hash::from_str(test.expected_id).unwrap(), "transaction id failed for test {}", i + 1);
assert_eq!(
Expand Down
1 change: 1 addition & 0 deletions consensus/src/consensus/services.rs
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,7 @@ impl ConsensusServices {
mass_calculator.clone(),
params.storage_mass_activation,
params.kip10_activation,
params.payload_activation,
);

let pruning_point_manager = PruningPointManager::new(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ use kaspa_consensus_core::block::Block;
use kaspa_database::prelude::StoreResultExtensions;
use kaspa_hashes::Hash;
use kaspa_utils::option::OptionExtensions;
use once_cell::unsync::Lazy;
use std::sync::Arc;

impl BlockBodyProcessor {
Expand All @@ -21,27 +20,17 @@ impl BlockBodyProcessor {
fn check_block_transactions_in_context(self: &Arc<Self>, block: &Block) -> BlockProcessResult<()> {
// Note: This is somewhat expensive during ibd, as it incurs cache misses.

// Use lazy evaluation to avoid unnecessary work, as most of the time we expect the txs not to have lock time.
let lazy_pmt_res =
Lazy::new(|| match self.window_manager.calc_past_median_time(&self.ghostdag_store.get_data(block.hash()).unwrap()) {
Ok((pmt, pmt_window)) => {
if !self.block_window_cache_for_past_median_time.contains_key(&block.hash()) {
self.block_window_cache_for_past_median_time.insert(block.hash(), pmt_window);
};
Ok(pmt)
}
Err(e) => Err(e),
});
let pmt = {
let (pmt, pmt_window) = self.window_manager.calc_past_median_time(&self.ghostdag_store.get_data(block.hash()).unwrap())?;
if !self.block_window_cache_for_past_median_time.contains_key(&block.hash()) {
self.block_window_cache_for_past_median_time.insert(block.hash(), pmt_window);
};
pmt
};

for tx in block.transactions.iter() {
// Quick check to avoid the expensive Lazy eval during ibd (in most cases).
// TODO: refactor this and avoid classifying the tx lock outside of the transaction validator.
if tx.lock_time != 0 {
if let Err(e) =
self.transaction_validator.utxo_free_tx_validation(tx, block.header.daa_score, (*lazy_pmt_res).clone()?)
{
return Err(RuleError::TxInContextFailed(tx.id(), e));
};
if let Err(e) = self.transaction_validator.utxo_free_tx_validation(tx, block.header.daa_score, pmt) {
return Err(RuleError::TxInContextFailed(tx.id(), e));
};
}
Ok(())
Expand Down
Loading

0 comments on commit 64eeb89

Please sign in to comment.