Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test: Improve tests robustness #6252

Merged
merged 3 commits into from
Sep 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/masternode/sync.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ void CMasternodeSync::ProcessTick(const PeerManager& peerman)

// reset the sync process if the last call to this function was more than 60 minutes ago (client was in sleep mode)
static int64_t nTimeLastProcess = GetTime();
if(GetTime() - nTimeLastProcess > 60*60 && !fMasternodeMode) {
if (!Params().IsMockableChain() && GetTime() - nTimeLastProcess > 60 * 60 && !fMasternodeMode) {
LogPrintf("CMasternodeSync::ProcessTick -- WARNING: no actions for too long, restarting sync...\n");
Reset(true);
nTimeLastProcess = GetTime();
Expand Down
10 changes: 6 additions & 4 deletions test/functional/feature_governance.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,10 @@ def run_test(self):
assert_equal(len(self.nodes[0].gobject("list-prepared")), 0)

# TODO: drop these extra 80 blocks - doesn't work without them
self.nodes[0].generate(80)
self.bump_mocktime(80)
for _ in range(8):
self.bump_mocktime(10)
self.nodes[0].generate(10)
self.sync_blocks()

self.nodes[0].generate(3)
self.bump_mocktime(3)
Expand Down Expand Up @@ -280,7 +282,7 @@ def sync_gov(node):
before = self.nodes[1].gobject("count")["votes"]

# Bump mocktime to let MNs vote again
self.bump_mocktime(GOVERNANCE_UPDATE_MIN + 1)
self.bump_mocktime(GOVERNANCE_UPDATE_MIN + 1, update_schedulers=False)

# Move another block inside the Superblock maturity window
with self.nodes[1].assert_debug_log(["CGovernanceManager::VoteGovernanceTriggers"]):
Expand All @@ -291,7 +293,7 @@ def sync_gov(node):
# Vote count should not change even though MNs are allowed to vote again
assert_equal(before, self.nodes[1].gobject("count")["votes"])
# Revert mocktime back to avoid issues in tests below
self.bump_mocktime(GOVERNANCE_UPDATE_MIN * -1)
self.bump_mocktime(GOVERNANCE_UPDATE_MIN * -1, update_schedulers=False)

block_count = self.nodes[0].getblockcount()
n = sb_cycle - block_count % sb_cycle
Expand Down
69 changes: 56 additions & 13 deletions test/functional/feature_governance_cl.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,12 @@
"""Tests governance checks can be skipped for blocks covered by the best chainlock."""

import json
import time

from test_framework.governance import have_trigger_for_height
from test_framework.messages import uint256_to_string
from test_framework.test_framework import DashTestFramework
from test_framework.util import assert_equal, force_finish_mnsync, satoshi_round
from test_framework.util import assert_equal, satoshi_round

class DashGovernanceTest (DashTestFramework):
def set_test_params(self):
Expand All @@ -22,7 +23,7 @@ def prepare_object(self, object_type, parent_hash, creation_time, revision, name
"type": object_type,
"name": name,
"start_epoch": proposal_time,
"end_epoch": proposal_time + 24 * 60 * 60,
"end_epoch": proposal_time + 20 * 156,
"payment_amount": float(amount),
"payment_address": payment_address,
"url": "https://dash.org"
Expand All @@ -40,6 +41,8 @@ def prepare_object(self, object_type, parent_hash, creation_time, revision, name

def run_test(self):
sb_cycle = 20
sb_maturity_window = 10
sb_immaturity_window = sb_cycle - sb_maturity_window

self.log.info("Make sure ChainLocks are active")

Expand All @@ -62,7 +65,14 @@ def run_test(self):
self.nodes[0].sporkupdate("SPORK_9_SUPERBLOCKS_ENABLED", 0)
self.wait_for_sporks_same()

self.log.info("Prepare and submit proposals")
# Move to the superblock cycle start block
n = sb_cycle - self.nodes[0].getblockcount() % sb_cycle
for _ in range(n):
self.bump_mocktime(156)
self.nodes[0].generate(1)
self.sync_blocks()

self.log.info("Prepare proposals")

proposal_time = self.mocktime
self.p0_payout_address = self.nodes[0].getnewaddress()
Expand All @@ -81,6 +91,8 @@ def run_test(self):
assert_equal(len(self.nodes[0].gobject("list-prepared")), 2)
assert_equal(len(self.nodes[0].gobject("list")), 0)

self.log.info("Submit proposals")

self.p0_hash = self.nodes[0].gobject("submit", "0", 1, proposal_time, p0_collateral_prepare["hex"], p0_collateral_prepare["collateralHash"])
self.p1_hash = self.nodes[0].gobject("submit", "0", 1, proposal_time, p1_collateral_prepare["hex"], p1_collateral_prepare["collateralHash"])

Expand All @@ -99,31 +111,62 @@ def run_test(self):

assert_equal(len(self.nodes[0].gobject("list", "valid", "triggers")), 0)

self.log.info("Move 1 block into sb maturity window")
n = sb_immaturity_window - self.nodes[0].getblockcount() % sb_cycle
assert n >= 0
for _ in range(n + 1):
self.bump_mocktime(156)
self.nodes[0].generate(1)
self.sync_blocks(self.nodes[0:5])

self.log.info("Wait for new trigger and votes on non-isolated nodes")
sb_block_height = self.nodes[0].getblockcount() // sb_cycle * sb_cycle + sb_cycle
assert_equal(sb_block_height % sb_cycle, 0)
self.wait_until(lambda: have_trigger_for_height(self.nodes[0:5], sb_block_height), timeout=5)

n = sb_cycle - self.nodes[0].getblockcount() % sb_cycle
assert n > 1

# Move remaining n blocks until the next Superblock
self.log.info("Move remaining n blocks until the next Superblock")
for _ in range(n - 1):
self.nodes[0].generate(1)
self.bump_mocktime(156)
self.nodes[0].generate(1)
self.sync_blocks(self.nodes[0:5])

self.log.info("Wait for new trigger and votes on non-isolated nodes")
sb_block_height = self.nodes[0].getblockcount() + 1
self.wait_until(lambda: have_trigger_for_height(self.nodes[0:5], sb_block_height))
# Mine superblock
self.nodes[0].generate(1)
# Confirm all is good
self.wait_until(lambda: have_trigger_for_height(self.nodes[0:5], sb_block_height), timeout=5)

self.log.info("Mine superblock")
self.bump_mocktime(156)
self.nodes[0].generate(1)
self.sync_blocks(self.nodes[0:5])
self.wait_for_chainlocked_block(self.nodes[0], self.nodes[0].getbestblockhash())

self.log.info("Mine (superblock cycle + 1) blocks on non-isolated nodes to forget about this trigger")
for _ in range(sb_cycle):
self.bump_mocktime(156)
self.nodes[0].generate(1)
self.sync_blocks(self.nodes[0:5])
# Should still have at least 1 trigger for the old sb cycle and 0 for the current one
assert len(self.nodes[0].gobject("list", "valid", "triggers")) >= 1
assert not have_trigger_for_height(self.nodes[0:5], sb_block_height + sb_cycle)
self.bump_mocktime(156)
self.nodes[0].generate(1)
self.sync_blocks(self.nodes[0:5])
# Trigger scheduler to mark old triggers for deletion
self.bump_mocktime(5 * 60)
# Let it do the job
time.sleep(1)
# Move forward to satisfy GOVERNANCE_DELETION_DELAY, should actually remove old triggers now
self.bump_mocktime(10 * 60)
self.wait_until(lambda: len(self.nodes[0].gobject("list", "valid", "triggers")) == 0, timeout=5)

self.log.info("Reconnect isolated node and confirm the next ChainLock will let it sync")
self.reconnect_isolated_node(5, 0)
# Force isolated node to be fully synced so that it would not request gov objects when reconnected
assert_equal(self.nodes[5].mnsync("status")["IsSynced"], False)
force_finish_mnsync(self.nodes[5])
self.nodes[0].generate(1)
self.bump_mocktime(156)
# NOTE: bumping mocktime too much after recent reconnect can result in "timeout downloading block"
self.bump_mocktime(1)
self.sync_blocks()


Expand Down
4 changes: 2 additions & 2 deletions test/functional/feature_llmq_signing.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,11 +157,11 @@ def assert_sigs_nochange(hasrecsigs, isconflicting1, isconflicting2, timeout):
assert_sigs_nochange(True, False, True, 3)

# fast forward until 0.5 days before cleanup is expected, recovered sig should still be valid
self.bump_mocktime(recsig_time + int(60 * 60 * 24 * 6.5) - self.mocktime)
self.bump_mocktime(recsig_time + int(60 * 60 * 24 * 6.5) - self.mocktime, update_schedulers=False)
# Cleanup starts every 5 seconds
wait_for_sigs(True, False, True, 15)
# fast forward 1 day, recovered sig should not be valid anymore
self.bump_mocktime(int(60 * 60 * 24 * 1))
self.bump_mocktime(int(60 * 60 * 24 * 1), update_schedulers=False)
# Cleanup starts every 5 seconds
wait_for_sigs(False, False, False, 15)

Expand Down
2 changes: 1 addition & 1 deletion test/functional/feature_mnehf.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ def run_test(self):
assert ehf_tx_duplicate in node.getrawmempool() and ehf_tx_duplicate not in block['tx']

self.log.info("Testing EHF signal with same bit but with newer start time")
self.bump_mocktime(int(60 * 60 * 24 * 14))
self.bump_mocktime(int(60 * 60 * 24 * 14), update_schedulers=False)
node.generate(1)
self.sync_blocks()
self.restart_all_nodes(params=[self.mocktime, self.mocktime + 1000000])
Expand Down
2 changes: 2 additions & 0 deletions test/functional/p2p_addr_relay.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,6 +326,8 @@ def rate_limit_tests(self):
self.restart_node(0, [])

for conn_type, no_relay in [("outbound-full-relay", False), ("block-relay-only", True), ("inbound", False)]:
# Advance the time by 5 * 60 seconds, permitting syncing from the same peer.
self.bump_mocktime(5 * 60)
self.log.info(f'Test rate limiting of addr processing for {conn_type} peers')
if conn_type == "inbound":
peer = self.nodes[0].add_p2p_connection(AddrReceiver())
Expand Down
25 changes: 18 additions & 7 deletions test/functional/test_framework/test_framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -455,7 +455,7 @@ def setup_nodes(self):
# must have a timestamp not too old (see IsInitialBlockDownload()).
if not self.disable_mocktime:
self.log.debug('Generate a block with current mocktime')
self.bump_mocktime(156 * 200)
self.bump_mocktime(156 * 200, update_schedulers=False)
block_hash = self.nodes[0].generate(1)[0]
block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
for n in self.nodes:
Expand Down Expand Up @@ -813,13 +813,24 @@ def sync_all(self, nodes=None):
self.sync_blocks(nodes)
self.sync_mempools(nodes)

def bump_mocktime(self, t, update_nodes=True, nodes=None):
def bump_mocktime(self, t, update_nodes=True, nodes=None, update_schedulers=True):
if self.mocktime == 0:
return

self.mocktime += t
if update_nodes:
set_node_times(nodes or self.nodes, self.mocktime)

if not update_nodes:
return

nodes_to_update = nodes or self.nodes
set_node_times(nodes_to_update, self.mocktime)

if not update_schedulers:
return

for node in nodes_to_update:
if node.version_is_at_least(180100):
node.mockscheduler(t)

def _initialize_mocktime(self, is_genesis):
if is_genesis:
Expand Down Expand Up @@ -913,7 +924,7 @@ def _initialize_chain(self):
gen_addresses = [k.address for k in TestNode.PRIV_KEYS][:3] + [ADDRESS_BCRT1_P2SH_OP_TRUE]
assert_equal(len(gen_addresses), 4)
for i in range(8):
self.bump_mocktime((25 if i != 7 else 24) * 156)
self.bump_mocktime((25 if i != 7 else 24) * 156, update_schedulers=False)
cache_node.generatetoaddress(
nblocks=25 if i != 7 else 24,
address=gen_addresses[i % len(gen_addresses)],
Expand Down Expand Up @@ -1132,13 +1143,13 @@ def activate_by_name(self, name, expected_activation_height=None):
# NOTE: getblockchaininfo shows softforks active at block (window * 3 - 1)
# since it's returning whether a softwork is active for the _next_ block.
# Hence the last block prior to the activation is (expected_activation_height - 2).
while expected_activation_height - height - 2 >= batch_size:
while expected_activation_height - height - 2 > batch_size:
self.bump_mocktime(batch_size)
self.nodes[0].generate(batch_size)
height += batch_size
self.sync_blocks()
blocks_left = expected_activation_height - height - 2
assert blocks_left < batch_size
assert blocks_left <= batch_size
self.bump_mocktime(blocks_left)
self.nodes[0].generate(blocks_left)
self.sync_blocks()
Expand Down
2 changes: 1 addition & 1 deletion test/functional/wallet_create_tx.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def run_test(self):

def test_anti_fee_sniping(self):
self.log.info('Check that we have some (old) blocks and that anti-fee-sniping is disabled')
self.bump_mocktime(8 * 60 * 60 + 1)
self.bump_mocktime(8 * 60 * 60 + 1, update_schedulers=False)
assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200)
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex'])
Expand Down
Loading