From ad2c5a53eecc04d8891ad89e44f7d32bd0d86b0c Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Tue, 22 Oct 2024 13:59:43 +0700 Subject: [PATCH 1/3] refactor: unify feature_notifications.py after #5522 with bitcoin's codebase --- test/functional/feature_notifications.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py index d8f09b5e2ebb8..8ef3720ff9bdc 100755 --- a/test/functional/feature_notifications.py +++ b/test/functional/feature_notifications.py @@ -43,14 +43,17 @@ def setup_network(self): os.mkdir(self.instantsendnotify_dir) # -alertnotify and -blocknotify on node0, walletnotify on node1 - self.extra_args[0].append("-alertnotify=echo > {}".format(os.path.join(self.alertnotify_dir, '%s'))) - self.extra_args[0].append("-blocknotify=echo > {}".format(os.path.join(self.blocknotify_dir, '%s'))) - self.extra_args[1].append("-rescan") - self.extra_args[1].append("-walletnotify=echo %h_%b > {}".format(os.path.join(self.walletnotify_dir, notify_outputname('%w', '%s')))) - # -chainlocknotify on node0, -instantsendnotify on node1 - self.extra_args[0].append("-chainlocknotify=echo > {}".format(os.path.join(self.chainlocknotify_dir, '%s'))) - self.extra_args[1].append("-instantsendnotify=echo > {}".format(os.path.join(self.instantsendnotify_dir, notify_outputname('%w', '%s')))) + self.extra_args = [[ + "-alertnotify=echo > {}".format(os.path.join(self.alertnotify_dir, '%s')), + "-blocknotify=echo > {}".format(os.path.join(self.blocknotify_dir, '%s')), + "-chainlocknotify=echo > {}".format(os.path.join(self.chainlocknotify_dir, '%s')), + ], [ + "-rescan", + "-walletnotify=echo %h_%b > {}".format(os.path.join(self.walletnotify_dir, notify_outputname('%w', '%s'))), + "-instantsendnotify=echo > {}".format(os.path.join(self.instantsendnotify_dir, notify_outputname('%w', '%s'))), + ], + [], [], [], []] self.wallet_names = [self.default_wallet_name, self.wallet] super().setup_network() From 65226da84983b742e9326d443a0e7fc85435b2f6 Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Wed, 18 Aug 2021 21:11:23 +0200 Subject: [PATCH 2/3] Merge bitcoin/bitcoin#22229: test: consolidate to f-strings (part 1) 68faa87881f5334b2528db4adc72ec19d94316a3 test: use f-strings in mining_*.py tests (fanquake) c2a5d560df2824df5731100c2584e8ad7a3d7bc2 test: use f-strings in interface_*.py tests (fanquake) 86d958262dff43002820d58ccb8958e2dbfb9d5b test: use f-strings in feature_proxy.py (fanquake) 31bdb33dcb8345df1bb94b28e811252a918d7dcb test: use f-strings in feature_segwit.py (fanquake) b166d54c3cbb0c028210cee977b3dcde5ac5474f test: use f-strings in feature_versionbits_warning.py (fanquake) cf6d66bf941d946600047d712c7cd15d7605322e test: use f-strings in feature_settings.py (fanquake) 6651d77f22862716f5bd7d0b31cfbd3937ab7b1d test: use f-strings in feature_pruning.py (fanquake) 961f5813ba65b6a601081912c4ece96c2679794d test: use f-strings in feature_notifications.py (fanquake) 1a546e6f6ca95772f0d7dbc2792477becbb8ea63 test: use f-strings in feature_minchainwork.py (fanquake) 6679eceacc915a8ea7cd7063f103ffc5eb9da884 test: use f-strings in feature_logging.py (fanquake) fb633933ab570e945d2a366f37eeff39f516c613 test: use f-strings in feature_loadblock.py (fanquake) e9ca8b254d4b9567831c0e113ce1c0a2b4795a95 test: use f-strings in feature_help.py (fanquake) ff7e3309995a8960ac371741b2b00c6da40f7490 test: use f-strings in feature_filelock.py (fanquake) d5a6adc5e478fa5c6e562377eea873dc38e66578 test: use f-strings in feature_fee_estimation.py (fanquake) a2de33cbdc79202bccddb4beadfde88266ac979f test: use f-strings in feature_dersig.py (fanquake) a2502cc63fd308be8af840962da9c53339433fa6 test: use f-strings in feature_dbcrash.py (fanquake) 3e2f84e7a96cb4b97b609ac853f78edd0ed43f82 test: use f-strings in feature_csv_activation.py (fanquake) e2f1fd8ee92fa421b6d293169044d6ddd5a9b8df test: use f-strings in feature_config_args.py (fanquake) 36d33d32b1b498b61f56d552f6e2c1d064f978c3 test: use f-strings in feature_cltv.py (fanquake) dca173cc044270b30782b1e3355e9dcb8c534295 test: use f-strings in feature_blocksdir.py (fanquake) 5453e8706278918ac51a725e81599cfa18c8cdbc test: use f-strings in feature_backwards_compatibility.py (fanquake) 6f3d5ad67ac8e7b50abae1a2949898d858e38106 test: use f-strings in feature_asmap.py (fanquake) Pull request description: Rather than using 3 different ways to build/format strings (sometimes all in the same test, i.e [`feature_config_args.py`](https://github.com/bitcoin/bitcoin/blob/master/test/functional/feature_config_args.py)), consolidate to using [f-strings (3.6+)](https://docs.python.org/3/reference/lexical_analysis.html#f-strings), which are generally more concise / readable, as well as more performant than existing methods. This deals with the `feature_*.py`, `interface_*.py` and `mining_*.py` tests. See also: [PEP 498](https://www.python.org/dev/peps/pep-0498/) ACKs for top commit: mjdietzx: reACK 68faa87881f5334b2528db4adc72ec19d94316a3 Zero-1729: crACK 68faa87881f5334b2528db4adc72ec19d94316a3 Tree-SHA512: d4e1a42e07d96d2c552387a46da1534223c4ce408703d7568ad2ef580797dd68d9695b8d19666b567af37f44de6e430e8be5db5d5404ba8fcecf9f5b026a6efb --- test/functional/feature_asmap.py | 14 ++--- .../feature_backwards_compatibility.py | 2 +- test/functional/feature_blocksdir.py | 5 +- test/functional/feature_cltv.py | 5 +- test/functional/feature_config_args.py | 26 ++++---- test/functional/feature_csv_activation.py | 2 +- test/functional/feature_dbcrash.py | 32 +++++----- test/functional/feature_dersig.py | 4 +- test/functional/feature_fee_estimation.py | 11 ++-- test/functional/feature_filelock.py | 8 +-- test/functional/feature_help.py | 6 +- test/functional/feature_loadblock.py | 24 +++---- test/functional/feature_logging.py | 12 ++-- test/functional/feature_minchainwork.py | 10 +-- test/functional/feature_notifications.py | 12 ++-- test/functional/feature_pruning.py | 24 +++---- test/functional/feature_settings.py | 2 +- .../functional/feature_versionbits_warning.py | 4 +- test/functional/interface_bitcoin_cli.py | 16 ++--- test/functional/interface_http.py | 20 +++--- test/functional/interface_rest.py | 62 +++++++++---------- test/functional/interface_rpc.py | 2 +- test/functional/interface_zmq.py | 6 +- test/functional/mining_basic.py | 4 +- 24 files changed, 154 insertions(+), 159 deletions(-) diff --git a/test/functional/feature_asmap.py b/test/functional/feature_asmap.py index e6c4a50e3a306..a357ee8cbbcda 100755 --- a/test/functional/feature_asmap.py +++ b/test/functional/feature_asmap.py @@ -33,8 +33,8 @@ VERSION = 'fec61fa21a9f46f3b17bdcd660d7f4cd90b966aad3aec593c99b35f0aca15853' def expected_messages(filename): - return ['Opened asmap file "{}" (59 bytes) from disk'.format(filename), - 'Using asmap version {} for IP bucketing'.format(VERSION)] + return [f'Opened asmap file "{filename}" (59 bytes) from disk', + f'Using asmap version {VERSION} for IP bucketing'] class AsmapTest(BitcoinTestFramework): def set_test_params(self): @@ -58,7 +58,7 @@ def test_asmap_with_absolute_path(self): filename = os.path.join(self.datadir, 'my-map-file.map') shutil.copyfile(self.asmap_raw, filename) with self.node.assert_debug_log(expected_messages(filename)): - self.start_node(0, ['-asmap={}'.format(filename)]) + self.start_node(0, [f'-asmap={filename}']) os.remove(filename) def test_asmap_with_relative_path(self): @@ -68,13 +68,13 @@ def test_asmap_with_relative_path(self): filename = os.path.join(self.datadir, name) shutil.copyfile(self.asmap_raw, filename) with self.node.assert_debug_log(expected_messages(filename)): - self.start_node(0, ['-asmap={}'.format(name)]) + self.start_node(0, [f'-asmap={name}']) os.remove(filename) def test_default_asmap(self): shutil.copyfile(self.asmap_raw, self.default_asmap) for arg in ['-asmap', '-asmap=']: - self.log.info('Test dashd {} (using default map file)'.format(arg)) + self.log.info(f'Test dashd {arg} (using default map file)') self.stop_node(0) with self.node.assert_debug_log(expected_messages(self.default_asmap)): self.start_node(0, [arg]) @@ -99,7 +99,7 @@ def test_asmap_interaction_with_addrman_containing_entries(self): def test_default_asmap_with_missing_file(self): self.log.info('Test dashd -asmap with missing default map file') self.stop_node(0) - msg = "Error: Could not find asmap file \"{}\"".format(self.default_asmap) + msg = f"Error: Could not find asmap file \"{self.default_asmap}\"" self.node.assert_start_raises_init_error(extra_args=['-asmap'], expected_msg=msg) def test_empty_asmap(self): @@ -107,7 +107,7 @@ def test_empty_asmap(self): self.stop_node(0) with open(self.default_asmap, "w", encoding="utf-8") as f: f.write("") - msg = "Error: Could not parse asmap file \"{}\"".format(self.default_asmap) + msg = f"Error: Could not parse asmap file \"{self.default_asmap}\"" self.node.assert_start_raises_init_error(extra_args=['-asmap'], expected_msg=msg) os.remove(self.default_asmap) diff --git a/test/functional/feature_backwards_compatibility.py b/test/functional/feature_backwards_compatibility.py index f9dee4e19e539..56caca9598391 100755 --- a/test/functional/feature_backwards_compatibility.py +++ b/test/functional/feature_backwards_compatibility.py @@ -362,7 +362,7 @@ def run_test(self): wallet = node_master.get_wallet_rpc("u1_v17") info = wallet.getaddressinfo(address) # TODO enable back when HD wallets are created by default - #descriptor = "pkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + v17_pubkey + ")" + #descriptor = f"pkh([{info["hdmasterfingerprint"]}{hdkeypath[1:]}]{v17_pubkey})" #assert_equal(info["desc"], descsum_create(descriptor)) assert_equal(info["pubkey"], v17_pubkey) diff --git a/test/functional/feature_blocksdir.py b/test/functional/feature_blocksdir.py index 6ceb723a85adf..28e6d6cdf92af 100755 --- a/test/functional/feature_blocksdir.py +++ b/test/functional/feature_blocksdir.py @@ -24,11 +24,10 @@ def run_test(self): initialize_datadir(self.options.tmpdir, 0, self.chain) self.log.info("Starting with nonexistent blocksdir ...") blocksdir_path = os.path.join(self.options.tmpdir, 'blocksdir') - self.nodes[0].assert_start_raises_init_error(["-blocksdir=" + blocksdir_path], 'Error: Specified blocks directory "' + - blocksdir_path + '" does not exist.') + self.nodes[0].assert_start_raises_init_error([f"-blocksdir={blocksdir_path}"], f'Error: Specified blocks directory "{blocksdir_path}" does not exist.') os.mkdir(blocksdir_path) self.log.info("Starting with existing blocksdir ...") - self.start_node(0, ["-blocksdir=" + blocksdir_path]) + self.start_node(0, [f"-blocksdir={blocksdir_path}"]) self.log.info("mining blocks..") self.generatetoaddress(self.nodes[0], 10, self.nodes[0].get_deterministic_priv_key().address) assert os.path.isfile(os.path.join(blocksdir_path, self.chain, "blocks", "blk00000.dat")) diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py index 7e1a69f8ac5c5..3c2994d4644ff 100755 --- a/test/functional/feature_cltv.py +++ b/test/functional/feature_cltv.py @@ -143,7 +143,7 @@ def run_test(self): block.nVersion = 3 block.solve() - with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000003)'.format(block.hash)]): + with self.nodes[0].assert_debug_log(expected_msgs=[f'{block.hash}, bad-version(0x00000003)']): peer.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) peer.sync_with_ping() @@ -173,8 +173,7 @@ def run_test(self): block.hashMerkleRoot = block.calc_merkle_root() block.solve() - with self.nodes[0].assert_debug_log(expected_msgs=['CheckInputScripts on {} failed with {}'.format( - block.vtx[-1].hash, expected_cltv_reject_reason)]): + with self.nodes[0].assert_debug_log(expected_msgs=[f'CheckInputScripts on {block.vtx[-1].hash} failed with {expected_cltv_reject_reason}']): peer.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) peer.sync_with_ping() diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py index 4b8aeaba6a4a4..2462b20383d51 100755 --- a/test/functional/feature_config_args.py +++ b/test/functional/feature_config_args.py @@ -23,7 +23,7 @@ def test_config_file_parser(self): inc_conf_file_path = os.path.join(self.nodes[0].datadir, 'include.conf') with open(os.path.join(self.nodes[0].datadir, 'dash.conf'), 'a', encoding='utf-8') as conf: - conf.write('includeconf={}\n'.format(inc_conf_file_path)) + conf.write(f'includeconf={inc_conf_file_path}\n') self.nodes[0].assert_start_raises_init_error( expected_msg='Error: Error parsing command line arguments: Invalid parameter -dash_cli=1', @@ -42,13 +42,13 @@ def test_config_file_parser(self): if self.is_wallet_compiled(): with open(inc_conf_file_path, 'w', encoding='utf8') as conf: conf.write("wallet=foo\n") - self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Config setting for -wallet only applied on %s network when in [%s] section.' % (self.chain, self.chain)) + self.nodes[0].assert_start_raises_init_error(expected_msg=f'Error: Config setting for -wallet only applied on {self.chain} network when in [{self.chain}] section.') main_conf_file_path = os.path.join(self.options.tmpdir, 'node0', 'dash_main.conf') - util.write_config(main_conf_file_path, n=0, chain='', extra_config='includeconf={}\n'.format(inc_conf_file_path)) + util.write_config(main_conf_file_path, n=0, chain='', extra_config=f'includeconf={inc_conf_file_path}\n') with open(inc_conf_file_path, 'w', encoding='utf-8') as conf: conf.write('acceptnonstdtxn=1\n') - self.nodes[0].assert_start_raises_init_error(extra_args=["-conf={}".format(main_conf_file_path)], expected_msg='Error: acceptnonstdtxn is not currently supported for main chain') + self.nodes[0].assert_start_raises_init_error(extra_args=[f"-conf={main_conf_file_path}"], expected_msg='Error: acceptnonstdtxn is not currently supported for main chain') with open(inc_conf_file_path, 'w', encoding='utf-8') as conf: conf.write('nono\n') @@ -68,14 +68,14 @@ def test_config_file_parser(self): inc_conf_file2_path = os.path.join(self.nodes[0].datadir, 'include2.conf') with open(os.path.join(self.nodes[0].datadir, 'dash.conf'), 'a', encoding='utf-8') as conf: - conf.write('includeconf={}\n'.format(inc_conf_file2_path)) + conf.write(f'includeconf={inc_conf_file2_path}\n') with open(inc_conf_file_path, 'w', encoding='utf-8') as conf: conf.write('testnot.datadir=1\n') with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf: conf.write('[testnet]\n') self.restart_node(0) - self.nodes[0].stop_node(expected_stderr='Warning: ' + inc_conf_file_path + ':1 Section [testnot] is not recognized.' + os.linesep + inc_conf_file2_path + ':1 Section [testnet] is not recognized.') + self.nodes[0].stop_node(expected_stderr=f'Warning: {inc_conf_file_path}:1 Section [testnot] is not recognized.{os.linesep}{inc_conf_file2_path}:1 Section [testnet] is not recognized.') with open(inc_conf_file_path, 'w', encoding='utf-8') as conf: conf.write('') # clear @@ -104,8 +104,8 @@ def test_args_log(self): 'Command-line arg: rpcpassword=****', 'Command-line arg: rpcuser=****', 'Command-line arg: torpassword=****', - 'Config file arg: %s="1"' % self.chain, - 'Config file arg: [%s] server="1"' % self.chain, + f'Config file arg: {self.chain}="1"', + f'Config file arg: [{self.chain}] server="1"', ], unexpected_msgs=[ 'alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0', @@ -267,7 +267,7 @@ def run_test(self): # Check that using -datadir argument on non-existent directory fails self.nodes[0].datadir = new_data_dir - self.nodes[0].assert_start_raises_init_error(['-datadir=' + new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.') + self.nodes[0].assert_start_raises_init_error([f'-datadir={new_data_dir}'], f'Error: Specified data directory "{new_data_dir}" does not exist.') # Check that using non-existent datadir in conf file fails conf_file = os.path.join(default_data_dir, "dash.conf") @@ -275,10 +275,10 @@ def run_test(self): # datadir needs to be set before [chain] section conf_file_contents = open(conf_file, encoding='utf8').read() with open(conf_file, 'w', encoding='utf8') as f: - f.write("datadir=" + new_data_dir + "\n") + f.write(f"datadir={new_data_dir}\n") f.write(conf_file_contents) - self.nodes[0].assert_start_raises_init_error(['-conf=' + conf_file], 'Error: Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.') + self.nodes[0].assert_start_raises_init_error([f'-conf={conf_file}'], f'Error: Error reading configuration file: specified data directory "{new_data_dir}" does not exist.') # Check that an explicitly specified config file that cannot be opened fails none_existent_conf_file = os.path.join(default_data_dir, "none_existent_dash.conf") @@ -287,14 +287,14 @@ def run_test(self): # Create the directory and ensure the config file now works os.mkdir(new_data_dir) # Temporarily disabled, because this test would access the user's home dir (~/.bitcoin) - self.start_node(0, ['-conf='+conf_file]) + self.start_node(0, [f'-conf={conf_file}']) self.stop_node(0) assert os.path.exists(os.path.join(new_data_dir, self.chain, 'blocks')) # Ensure command line argument overrides datadir in conf os.mkdir(new_data_dir_2) self.nodes[0].datadir = new_data_dir_2 - self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file]) + self.start_node(0, [f'-datadir={new_data_dir_2}', f'-conf={conf_file}']) assert os.path.exists(os.path.join(new_data_dir_2, self.chain, 'blocks')) diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py index 092bb707e8de6..68ff895308f33 100755 --- a/test/functional/feature_csv_activation.py +++ b/test/functional/feature_csv_activation.py @@ -258,7 +258,7 @@ def run_test(self): self.send_blocks(test_blocks) assert_equal(self.tipheight, CSV_ACTIVATION_HEIGHT - 2) - self.log.info("Height = {}, CSV not yet active (will activate for block {}, not {})".format(self.tipheight, CSV_ACTIVATION_HEIGHT, CSV_ACTIVATION_HEIGHT - 1)) + self.log.info(f"Height = {self.tipheight}, CSV not yet active (will activate for block {CSV_ACTIVATION_HEIGHT}, not {CSV_ACTIVATION_HEIGHT - 1})") assert not softfork_active(self.nodes[0], 'csv') # Test both version 1 and version 2 transactions for all tests diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py index 01e0ffccc57ab..96e97c4da8307 100755 --- a/test/functional/feature_dbcrash.py +++ b/test/functional/feature_dbcrash.py @@ -102,7 +102,7 @@ def restart_node(self, node_index, expected_tip): # perhaps we generated a test case that blew up our cache? # TODO: If this happens a lot, we should try to restart without -dbcrashratio # and make sure that recovery happens. - raise AssertionError("Unable to successfully restart node %d in allotted time", node_index) + raise AssertionError(f"Unable to successfully restart node {node_index} in allotted time") def submit_block_catch_error(self, node_index, block): """Try submitting a block to the given node. @@ -114,10 +114,10 @@ def submit_block_catch_error(self, node_index, block): self.nodes[node_index].submitblock(block) return True except (http.client.CannotSendRequest, http.client.RemoteDisconnected) as e: - self.log.debug("node %d submitblock raised exception: %s", node_index, e) + self.log.debug(f"node {node_index} submitblock raised exception: {e}") return False except OSError as e: - self.log.debug("node %d submitblock raised OSError exception: errno=%s", node_index, e.errno) + self.log.debug(f"node {node_index} submitblock raised OSError exception: errno={e.errno}") if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]: # The node has likely crashed return False @@ -142,15 +142,15 @@ def sync_node3blocks(self, block_hashes): # Deliver each block to each other node for i in range(3): nodei_utxo_hash = None - self.log.debug("Syncing blocks to node %d", i) + self.log.debug(f"Syncing blocks to node {i}") for (block_hash, block) in blocks: # Get the block from node3, and submit to node_i - self.log.debug("submitting block %s", block_hash) + self.log.debug(f"submitting block {block_hash}") if not self.submit_block_catch_error(i, block): # TODO: more carefully check that the crash is due to -dbcrashratio # (change the exit code perhaps, and check that here?) self.wait_for_node_exit(i, timeout=30) - self.log.debug("Restarting node %d after block hash %s", i, block_hash) + self.log.debug(f"Restarting node {i} after block hash {block_hash}") nodei_utxo_hash = self.restart_node(i, block_hash) assert nodei_utxo_hash is not None self.restart_counts[i] += 1 @@ -167,7 +167,7 @@ def sync_node3blocks(self, block_hashes): # - we only update the utxo cache after a node restart, since flushing # the cache is a no-op at that point if nodei_utxo_hash is not None: - self.log.debug("Checking txoutsetinfo matches for node %d", i) + self.log.debug(f"Checking txoutsetinfo matches for node {i}") assert_equal(nodei_utxo_hash, node3_utxo_hash) def verify_utxo_hash(self): @@ -218,14 +218,14 @@ def run_test(self): # Start by creating a lot of utxos on node3 initial_height = self.nodes[3].getblockcount() utxo_list = create_confirmed_utxos(self, self.nodes[3].getnetworkinfo()['relayfee'], self.nodes[3], 5000, sync_fun=self.no_op) - self.log.info("Prepped %d utxo entries", len(utxo_list)) + self.log.info(f"Prepped {len(utxo_list)} utxo entries") # Sync these blocks with the other nodes block_hashes_to_sync = [] for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1): block_hashes_to_sync.append(self.nodes[3].getblockhash(height)) - self.log.debug("Syncing %d blocks with other nodes", len(block_hashes_to_sync)) + self.log.debug(f"Syncing {len(block_hashes_to_sync)} blocks with other nodes") # Syncing the blocks could cause nodes to crash, so the test begins here. self.sync_node3blocks(block_hashes_to_sync) @@ -235,18 +235,18 @@ def run_test(self): # each time through the loop, generate a bunch of transactions, # and then either mine a single new block on the tip, or some-sized reorg. for i in range(40): - self.log.info("Iteration %d, generating 2500 transactions %s", i, self.restart_counts) + self.log.info(f"Iteration {i}, generating 2500 transactions {self.restart_counts}") # Generate a bunch of small-ish transactions self.generate_small_transactions(self.nodes[3], 2500, utxo_list) # Pick a random block between current tip, and starting tip current_height = self.nodes[3].getblockcount() random_height = random.randint(starting_tip_height, current_height) - self.log.debug("At height %d, considering height %d", current_height, random_height) + self.log.debug(f"At height {current_height}, considering height {random_height}") if random_height > starting_tip_height: # Randomly reorg from this point with some probability (1/4 for # tip, 1/5 for tip-1, ...) if random.random() < 1.0 / (current_height + 4 - random_height): - self.log.debug("Invalidating block at height %d", random_height) + self.log.debug(f"Invalidating block at height {random_height}") self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height)) # Now generate new blocks until we pass the old tip height @@ -260,10 +260,10 @@ def run_test(self): address=self.nodes[3].getnewaddress(), sync_fun=self.no_op, )) - self.log.debug("Syncing %d new blocks...", len(block_hashes)) + self.log.debug(f"Syncing {len(block_hashes)} new blocks...") self.sync_node3blocks(block_hashes) utxo_list = self.nodes[3].listunspent() - self.log.debug("Node3 utxo count: %d", len(utxo_list)) + self.log.debug(f"Node3 utxo count: {len(utxo_list)}") # Check that the utxo hashes agree with node3 # Useful side effect: each utxo cache gets flushed here, so that we @@ -271,7 +271,7 @@ def run_test(self): self.verify_utxo_hash() # Check the test coverage - self.log.info("Restarted nodes: %s; crashes on restart: %d", self.restart_counts, self.crashed_on_restart) + self.log.info(f"Restarted nodes: {self.restart_counts}; crashes on restart: {self.crashed_on_restart}") # If no nodes were restarted, we didn't test anything. assert self.restart_counts != [0, 0, 0] @@ -282,7 +282,7 @@ def run_test(self): # Warn if any of the nodes escaped restart. for i in range(3): if self.restart_counts[i] == 0: - self.log.warning("Node %d never crashed during utxo flush!", i) + self.log.warning(f"Node {i} never crashed during utxo flush!") if __name__ == "__main__": diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py index 0a6d1b5a71dc5..a0f55ca7d2d13 100755 --- a/test/functional/feature_dersig.py +++ b/test/functional/feature_dersig.py @@ -109,7 +109,7 @@ def run_test(self): block.rehash() block.solve() - with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000002)'.format(block.hash)]): + with self.nodes[0].assert_debug_log(expected_msgs=[f'{block.hash}, bad-version(0x00000002)']): peer.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) peer.sync_with_ping() @@ -131,7 +131,7 @@ def run_test(self): block.rehash() block.solve() - with self.nodes[0].assert_debug_log(expected_msgs=['CheckInputScripts on {} failed with non-mandatory-script-verify-flag (Non-canonical DER signature)'.format(block.vtx[-1].hash)]): + with self.nodes[0].assert_debug_log(expected_msgs=[f'CheckInputScripts on {block.vtx[-1].hash} failed with non-mandatory-script-verify-flag (Non-canonical DER signature)']): peer.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) peer.sync_with_ping() diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py index 12efe8dc395d0..b50484e88dd54 100755 --- a/test/functional/feature_fee_estimation.py +++ b/test/functional/feature_fee_estimation.py @@ -72,7 +72,7 @@ def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee total_in += t["amount"] tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b"")) if total_in <= amount + fee: - raise RuntimeError("Insufficient funds: need %d, have %d" % (amount + fee, total_in)) + raise RuntimeError(f"Insufficient funds: need {amount + fee}, have {total_in}") tx.vout.append(CTxOut(int((total_in - amount - fee) * COIN), P2SH_1)) tx.vout.append(CTxOut(int(amount * COIN), P2SH_2)) # These transactions don't need to be signed, but we still have to insert @@ -125,8 +125,7 @@ def check_raw_estimates(node, fees_seen): assert_greater_than(feerate, 0) if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen): - raise AssertionError("Estimated fee (%f) out of range (%f,%f)" - % (feerate, min(fees_seen), max(fees_seen))) + raise AssertionError(f"Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})") def check_smart_estimates(node, fees_seen): @@ -144,11 +143,9 @@ def check_smart_estimates(node, fees_seen): assert_greater_than_or_equal(feerate, float(minRelaytxFee)) if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen): - raise AssertionError("Estimated fee (%f) out of range (%f,%f)" - % (feerate, min(fees_seen), max(fees_seen))) + raise AssertionError(f"Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})") if feerate - delta > last_feerate: - raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms" - % (feerate, last_feerate)) + raise AssertionError(f"Estimated fee ({feerate}) larger than last fee ({last_feerate}) for lower number of confirms") last_feerate = feerate if i == 0: diff --git a/test/functional/feature_filelock.py b/test/functional/feature_filelock.py index 0ec69b8d8457a..4b47b15b6d5e7 100755 --- a/test/functional/feature_filelock.py +++ b/test/functional/feature_filelock.py @@ -22,11 +22,11 @@ def setup_network(self): def run_test(self): datadir = os.path.join(self.nodes[0].datadir, self.chain) - self.log.info("Using datadir {}".format(datadir)) + self.log.info(f"Using datadir {datadir}") self.log.info("Check that we can't start a second dashd instance using the same datadir") - expected_msg = "Error: Cannot obtain a lock on data directory {0}. {1} is probably already running.".format(datadir, self.config['environment']['PACKAGE_NAME']) - self.nodes[1].assert_start_raises_init_error(extra_args=['-datadir={}'.format(self.nodes[0].datadir), '-noserver'], expected_msg=expected_msg) + expected_msg = f"Error: Cannot obtain a lock on data directory {datadir}. {self.config['environment']['PACKAGE_NAME']} is probably already running." + self.nodes[1].assert_start_raises_init_error(extra_args=[f'-datadir={self.nodes[0].datadir}', '-noserver'], expected_msg=expected_msg) if self.is_wallet_compiled(): def check_wallet_filelock(descriptors): @@ -38,7 +38,7 @@ def check_wallet_filelock(descriptors): expected_msg = "Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another dashd?" else: expected_msg = "Error: Error initializing wallet database environment" - self.nodes[1].assert_start_raises_init_error(extra_args=['-walletdir={}'.format(wallet_dir), '-wallet=' + wallet_name, '-noserver'], expected_msg=expected_msg, match=ErrorMatch.PARTIAL_REGEX) + self.nodes[1].assert_start_raises_init_error(extra_args=[f'-walletdir={wallet_dir}', f'-wallet={wallet_name}', '-noserver'], expected_msg=expected_msg, match=ErrorMatch.PARTIAL_REGEX) if self.is_bdb_compiled(): check_wallet_filelock(False) diff --git a/test/functional/feature_help.py b/test/functional/feature_help.py index b8cef0e3d3ef8..88bf534819378 100755 --- a/test/functional/feature_help.py +++ b/test/functional/feature_help.py @@ -40,14 +40,14 @@ def run_test(self): # Node should exit immediately and output help to stdout. output, _ = self.get_node_output(ret_code_expected=0) assert b'Options' in output - self.log.info("Help text received: {} (...)".format(output[0:60])) + self.log.info(f"Help text received: {output[0:60]} (...)") self.log.info("Start dashd with -version for version information") self.nodes[0].start(extra_args=['-version']) # Node should exit immediately and output version to stdout. output, _ = self.get_node_output(ret_code_expected=0) assert b'version' in output - self.log.info("Version text received: {} (...)".format(output[0:60])) + self.log.info(f"Version text received: {output[0:60]} (...)") # Test that arguments not in the help results in an error self.log.info("Start dashdd with -fakearg to make sure it does not start") @@ -55,7 +55,7 @@ def run_test(self): # Node should exit immediately and output an error to stderr _, output = self.get_node_output(ret_code_expected=1) assert b'Error parsing command line arguments' in output - self.log.info("Error message received: {} (...)".format(output[0:60])) + self.log.info(f"Error message received: {output[0:60]} (...)") if __name__ == '__main__': diff --git a/test/functional/feature_loadblock.py b/test/functional/feature_loadblock.py index c4f3003d63611..562641fc44a5b 100755 --- a/test/functional/feature_loadblock.py +++ b/test/functional/feature_loadblock.py @@ -45,17 +45,17 @@ def run_test(self): self.log.info("Create linearization config file") with open(cfg_file, "a", encoding="utf-8") as cfg: - cfg.write("datadir={}\n".format(data_dir)) - cfg.write("rpcuser={}\n".format(node_url.username)) - cfg.write("rpcpassword={}\n".format(node_url.password)) - cfg.write("port={}\n".format(node_url.port)) - cfg.write("host={}\n".format(node_url.hostname)) - cfg.write("output_file={}\n".format(bootstrap_file)) - cfg.write("max_height=100\n") - cfg.write("netmagic=fcc1b7dc\n") - cfg.write("input={}\n".format(blocks_dir)) - cfg.write("genesis={}\n".format(genesis_block)) - cfg.write("hashlist={}\n".format(hash_list.name)) + cfg.write(f"datadir={data_dir}\n") + cfg.write(f"rpcuser={node_url.username}\n") + cfg.write(f"rpcpassword={node_url.password}\n") + cfg.write(f"port={node_url.port}\n") + cfg.write(f"host={node_url.hostname}\n") + cfg.write(f"output_file={bootstrap_file}\n") + cfg.write(f"max_height=100\n") + cfg.write(f"netmagic=fcc1b7dc\n") + cfg.write(f"input={blocks_dir}\n") + cfg.write(f"genesis={genesis_block}\n") + cfg.write(f"hashlist={hash_list.name}\n") base_dir = self.config["environment"]["SRCDIR"] linearize_dir = os.path.join(base_dir, "contrib", "linearize") @@ -72,7 +72,7 @@ def run_test(self): check=True) self.log.info("Restart second, unsynced node with bootstrap file") - self.restart_node(1, extra_args=["-loadblock=" + bootstrap_file]) + self.restart_node(1, extra_args=[f"-loadblock={bootstrap_file}"]) assert_equal(self.nodes[1].getblockcount(), 100) # start_node is blocking on all block files being imported assert_equal(self.nodes[1].getblockchaininfo()['blocks'], 100) diff --git a/test/functional/feature_logging.py b/test/functional/feature_logging.py index afcbcf099ad33..722219518a0ad 100755 --- a/test/functional/feature_logging.py +++ b/test/functional/feature_logging.py @@ -29,7 +29,7 @@ def run_test(self): # test alternative log file name outside datadir tempname = os.path.join(self.options.tmpdir, "foo.log") - self.restart_node(0, ["-debuglogfile=%s" % tempname]) + self.restart_node(0, [f"-debuglogfile={tempname}"]) assert os.path.isfile(tempname) # check that invalid log (relative) will cause error @@ -37,26 +37,26 @@ def run_test(self): invalidname = os.path.join("foo", "foo.log") self.stop_node(0) exp_stderr = r"Error: Could not open debug log file \S+$" - self.nodes[0].assert_start_raises_init_error(["-debuglogfile=%s" % (invalidname)], exp_stderr, match=ErrorMatch.FULL_REGEX) + self.nodes[0].assert_start_raises_init_error([f"-debuglogfile={invalidname}"], exp_stderr, match=ErrorMatch.FULL_REGEX) assert not os.path.isfile(os.path.join(invdir, "foo.log")) # check that invalid log (relative) works after path exists self.stop_node(0) os.mkdir(invdir) - self.start_node(0, ["-debuglogfile=%s" % (invalidname)]) + self.start_node(0, [f"-debuglogfile={invalidname}"]) assert os.path.isfile(os.path.join(invdir, "foo.log")) # check that invalid log (absolute) will cause error self.stop_node(0) invdir = os.path.join(self.options.tmpdir, "foo") invalidname = os.path.join(invdir, "foo.log") - self.nodes[0].assert_start_raises_init_error(["-debuglogfile=%s" % invalidname], exp_stderr, match=ErrorMatch.FULL_REGEX) + self.nodes[0].assert_start_raises_init_error([f"-debuglogfile={invalidname}"], exp_stderr, match=ErrorMatch.FULL_REGEX) assert not os.path.isfile(os.path.join(invdir, "foo.log")) # check that invalid log (absolute) works after path exists self.stop_node(0) os.mkdir(invdir) - self.start_node(0, ["-debuglogfile=%s" % (invalidname)]) + self.start_node(0, [f"-debuglogfile={invalidname}"]) assert os.path.isfile(os.path.join(invdir, "foo.log")) # check that -nodebuglogfile disables logging @@ -67,7 +67,7 @@ def run_test(self): assert not os.path.isfile(default_log_path) # just sanity check no crash here - self.restart_node(0, ["-debuglogfile=%s" % os.devnull]) + self.restart_node(0, [f"-debuglogfile={os.devnull}"]) if __name__ == '__main__': diff --git a/test/functional/feature_minchainwork.py b/test/functional/feature_minchainwork.py index 72edc238af524..cb8aee912baf9 100755 --- a/test/functional/feature_minchainwork.py +++ b/test/functional/feature_minchainwork.py @@ -51,16 +51,16 @@ def run_test(self): # Start building a chain on node0. node2 shouldn't be able to sync until node1's # minchainwork is exceeded starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work - self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1]) + self.log.info(f"Testing relay across node 1 (minChainWork = {self.node_min_work[1]})") starting_blockcount = self.nodes[2].getblockcount() num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK) - self.log.info("Generating %d blocks on node0", num_blocks_to_generate) + self.log.info(f"Generating {num_blocks_to_generate} blocks on node0") hashes = self.generatetoaddress(self.nodes[0], num_blocks_to_generate, self.nodes[0].get_deterministic_priv_key().address, sync_fun=self.no_op) - self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork']) + self.log.info(f"Node0 current chain work: {self.nodes[0].getblockheader(hashes[-1])['chainwork']}") # Sleep a few seconds and verify that node2 didn't get any new blocks # or headers. We sleep, rather than sync_blocks(node0, node1) because @@ -69,7 +69,7 @@ def run_test(self): time.sleep(3) self.log.info("Verifying node 2 has no more blocks than before") - self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes]) + self.log.info(f"Blockcounts: {[n.getblockcount() for n in self.nodes]}") # Node2 shouldn't have any new headers yet, because node1 should not # have relayed anything. assert_equal(len(self.nodes[2].getchaintips()), 1) @@ -98,7 +98,7 @@ def run_test(self): # insufficient work chain, in which case we'd need to reconnect them to # continue the test. - self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes]) + self.log.info(f"Blockcounts: {[n.getblockcount() for n in self.nodes]}") self.log.info("Test that getheaders requests to node2 are not ignored") peer.send_and_ping(msg) diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py index 8ef3720ff9bdc..32a983df65fef 100755 --- a/test/functional/feature_notifications.py +++ b/test/functional/feature_notifications.py @@ -22,7 +22,7 @@ UNCONFIRMED_HASH_STRING = 'unconfirmed' def notify_outputname(walletname, txid): - return txid if os.name == 'nt' else '{}_{}'.format(walletname, txid) + return txid if os.name == 'nt' else f'{walletname}_{txid}' class NotificationsTest(DashTestFramework): @@ -45,13 +45,13 @@ def setup_network(self): # -alertnotify and -blocknotify on node0, walletnotify on node1 # -chainlocknotify on node0, -instantsendnotify on node1 self.extra_args = [[ - "-alertnotify=echo > {}".format(os.path.join(self.alertnotify_dir, '%s')), - "-blocknotify=echo > {}".format(os.path.join(self.blocknotify_dir, '%s')), - "-chainlocknotify=echo > {}".format(os.path.join(self.chainlocknotify_dir, '%s')), + f"-alertnotify=echo > {os.path.join(self.alertnotify_dir, '%s')}", + f"-blocknotify=echo > {os.path.join(self.blocknotify_dir, '%s')}", + f"-chainlocknotify=echo > {os.path.join(self.chainlocknotify_dir, '%s')}", ], [ "-rescan", - "-walletnotify=echo %h_%b > {}".format(os.path.join(self.walletnotify_dir, notify_outputname('%w', '%s'))), - "-instantsendnotify=echo > {}".format(os.path.join(self.instantsendnotify_dir, notify_outputname('%w', '%s'))), + f"-walletnotify=echo %h_%b > {os.path.join(self.walletnotify_dir, notify_outputname('%w', '%s'))}", + f"-instantsendnotify=echo > {os.path.join(self.instantsendnotify_dir, notify_outputname('%w', '%s'))}", ], [], [], [], []] diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index 6f67fcbcf193b..d6b68107e3d4f 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -161,7 +161,7 @@ def test_invalid_command_line_options(self): def test_height_min(self): assert os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early" self.log.info("Success") - self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir)) + self.log.info(f"Though we're already using more than 550MiB, current usage: {calc_usage(self.prunedir)}") self.log.info("Mining 25 more blocks should cause the first block file to be pruned") # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this mine_large_blocks(self.nodes[0], 25) @@ -171,7 +171,7 @@ def test_height_min(self): self.log.info("Success") usage = calc_usage(self.prunedir) - self.log.info("Usage should be below target: %d" % usage) + self.log.info(f"Usage should be below target: {usage}") assert_greater_than(550, usage) def create_chain_with_staleblocks(self): @@ -194,18 +194,18 @@ def create_chain_with_staleblocks(self): self.connect_nodes(0, 2) self.sync_blocks(self.nodes[0:3]) - self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir)) + self.log.info(f"Usage can be over target because of high stale rate: {calc_usage(self.prunedir)}") def reorg_test(self): # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain height = self.nodes[1].getblockcount() - self.log.info("Current block height: %d" % height) + self.log.info(f"Current block height: {height}") self.forkheight = height - 287 self.forkhash = self.nodes[1].getblockhash(self.forkheight) - self.log.info("Invalidating block %s at height %d" % (self.forkhash, self.forkheight)) + self.log.info(f"Invalidating block {self.forkhash} at height {self.forkheight}") self.nodes[1].invalidateblock(self.forkhash) # We've now switched to our previously mined-24 block fork on node 1, but that's not what we want @@ -217,7 +217,7 @@ def reorg_test(self): curhash = self.nodes[1].getblockhash(self.forkheight - 1) assert self.nodes[1].getblockcount() == self.forkheight - 1 - self.log.info("New best height: %d" % self.nodes[1].getblockcount()) + self.log.info(f"New best height: {self.nodes[1].getblockcount()}") # Mine one block to avoid automatic recovery from forks on restart self.generate(self.nodes[1], 1, sync_fun=self.no_op) @@ -233,8 +233,8 @@ def reorg_test(self): self.connect_nodes(1, 2) self.sync_blocks(self.nodes[0:3], timeout=120) - self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount()) - self.log.info("Usage possibly still high because of stale blocks in block files: %d" % calc_usage(self.prunedir)) + self.log.info(f"Verify height on node 2: {self.nodes[2].getblockcount()}") + self.log.info(f"Usage possibly still high because of stale blocks in block files: {calc_usage(self.prunedir)}") self.log.info("Mine 220 more large blocks so we have requisite history") @@ -242,7 +242,7 @@ def reorg_test(self): self.sync_blocks(self.nodes[0:3], timeout=120) usage = calc_usage(self.prunedir) - self.log.info("Usage should be below target: %d" % usage) + self.log.info(f"Usage should be below target: {usage}") assert_greater_than(550, usage) def reorg_back(self): @@ -250,7 +250,7 @@ def reorg_back(self): assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) with self.nodes[2].assert_debug_log(expected_msgs=['block verification stopping at height', '(pruning, no data)']): self.nodes[2].verifychain(checklevel=4, nblocks=0) - self.log.info("Will need to redownload block %d" % self.forkheight) + self.log.info(f"Will need to redownload block {self.forkheight}") # Verify that we have enough history to reorg back to the fork point # Although this is more than 288 blocks, because this chain was written more recently @@ -274,7 +274,7 @@ def reorg_back(self): # At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg if self.nodes[2].getblockcount() < self.mainchainheight: blocks_to_mine = first_reorg_height + 1 - self.mainchainheight - self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine) + self.log.info(f"Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: {blocks_to_mine}") self.nodes[0].invalidateblock(curchainhash) assert_equal(self.nodes[0].getblockcount(), self.mainchainheight) assert_equal(self.nodes[0].getbestblockhash(), self.mainchainhash2) @@ -311,7 +311,7 @@ def prune(index): assert_equal(ret, node.getblockchaininfo()['pruneheight']) def has_block(index): - return os.path.isfile(os.path.join(self.nodes[node_number].datadir, self.chain, "blocks", "blk{:05}.dat".format(index))) + return os.path.isfile(os.path.join(self.nodes[node_number].datadir, self.chain, "blocks", f"blk{index:05}.dat")) # should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000) assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500)) diff --git a/test/functional/feature_settings.py b/test/functional/feature_settings.py index f107acc6998f5..7b295e854d6ae 100755 --- a/test/functional/feature_settings.py +++ b/test/functional/feature_settings.py @@ -83,7 +83,7 @@ def run_test(self): with altsettings.open("w") as fp: fp.write('{"key": "value"}') with node.assert_debug_log(expected_msgs=['Setting file arg: key = "value"']): - self.start_node(0, extra_args=["-settings={}".format(altsettings)]) + self.start_node(0, extra_args=[f"-settings={altsettings}"]) self.stop_node(0) diff --git a/test/functional/feature_versionbits_warning.py b/test/functional/feature_versionbits_warning.py index cf02a08bf9688..aedac443c6df2 100755 --- a/test/functional/feature_versionbits_warning.py +++ b/test/functional/feature_versionbits_warning.py @@ -21,7 +21,7 @@ VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment VB_UNKNOWN_VERSION = VB_TOP_BITS | (1 << VB_UNKNOWN_BIT) -WARN_UNKNOWN_RULES_ACTIVE = "Unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT) +WARN_UNKNOWN_RULES_ACTIVE = f"Unknown new rules activated (versionbit {VB_UNKNOWN_BIT})" VB_PATTERN = re.compile("Unknown new rules activated.*versionbit") class VersionBitsWarningTest(BitcoinTestFramework): @@ -34,7 +34,7 @@ def setup_network(self): # Open and close to create zero-length file with open(self.alert_filename, 'w', encoding='utf8'): pass - self.extra_args = [["-alertnotify=echo %s >> \"" + self.alert_filename + "\""]] + self.extra_args = [[f"-alertnotify=echo %s >> \"{self.alert_filename}\""]] self.setup_nodes() def send_blocks_with_version(self, peer, numblocks, version): diff --git a/test/functional/interface_bitcoin_cli.py b/test/functional/interface_bitcoin_cli.py index d72cd31f577e4..671ba832dd7ec 100755 --- a/test/functional/interface_bitcoin_cli.py +++ b/test/functional/interface_bitcoin_cli.py @@ -96,12 +96,12 @@ def run_test(self): user, password = get_auth_cookie(self.nodes[0].datadir, self.chain) self.log.info("Test -stdinrpcpass option") - assert_equal(BLOCKS, self.nodes[0].cli('-rpcuser={}'.format(user), '-stdinrpcpass', input=password).getblockcount()) - assert_raises_process_error(1, 'Incorrect rpcuser or rpcpassword', self.nodes[0].cli('-rpcuser={}'.format(user), '-stdinrpcpass', input='foo').echo) + assert_equal(BLOCKS, self.nodes[0].cli(f'-rpcuser={user}', '-stdinrpcpass', input=password).getblockcount()) + assert_raises_process_error(1, 'Incorrect rpcuser or rpcpassword', self.nodes[0].cli(f'-rpcuser={user}', '-stdinrpcpass', input='foo').echo) self.log.info("Test -stdin and -stdinrpcpass") - assert_equal(['foo', 'bar'], self.nodes[0].cli('-rpcuser={}'.format(user), '-stdin', '-stdinrpcpass', input=password + '\nfoo\nbar').echo()) - assert_raises_process_error(1, 'Incorrect rpcuser or rpcpassword', self.nodes[0].cli('-rpcuser={}'.format(user), '-stdin', '-stdinrpcpass', input='foo').echo) + assert_equal(['foo', 'bar'], self.nodes[0].cli(f'-rpcuser={user}', '-stdin', '-stdinrpcpass', input=f'{password}\nfoo\nbar').echo()) + assert_raises_process_error(1, 'Incorrect rpcuser or rpcpassword', self.nodes[0].cli(f'-rpcuser={user}', '-stdin', '-stdinrpcpass', input='foo').echo) self.log.info("Test connecting to a non-existing server") assert_raises_process_error(1, "Could not connect to the server", self.nodes[0].cli('-rpcport=1').echo) @@ -167,8 +167,8 @@ def run_test(self): w1 = self.nodes[0].get_wallet_rpc(wallets[0]) w2 = self.nodes[0].get_wallet_rpc(wallets[1]) w3 = self.nodes[0].get_wallet_rpc(wallets[2]) - rpcwallet2 = '-rpcwallet={}'.format(wallets[1]) - rpcwallet3 = '-rpcwallet={}'.format(wallets[2]) + rpcwallet2 = f'-rpcwallet={wallets[1]}' + rpcwallet3 = f'-rpcwallet={wallets[2]}' w1.walletpassphrase(password, self.rpc_timeout) w2.encryptwallet(password) w1.sendtoaddress(w2.getnewaddress(), amounts[1]) @@ -179,7 +179,7 @@ def run_test(self): self.log.info("Test -getinfo with multiple wallets and -rpcwallet returns specified wallet balance") for i in range(len(wallets)): - cli_get_info_string = self.nodes[0].cli('-getinfo', '-rpcwallet={}'.format(wallets[i])).send_cli() + cli_get_info_string = self.nodes[0].cli('-getinfo', f'-rpcwallet={wallets[i]}').send_cli() cli_get_info = cli_get_info_string_to_dict(cli_get_info_string) assert 'Balances' not in cli_get_info_string assert_equal(cli_get_info["Wallet"], wallets[i]) @@ -313,7 +313,7 @@ def run_test(self): self.log.info("Test -version with node stopped") self.stop_node(0) cli_response = self.nodes[0].cli('-version').send_cli() - assert "{} RPC client version".format(self.config['environment']['PACKAGE_NAME']) in cli_response + assert f"{self.config['environment']['PACKAGE_NAME']} RPC client version" in cli_response self.log.info("Test -rpcwait option successfully waits for RPC connection") self.nodes[0].start() # start node without RPC connection diff --git a/test/functional/interface_http.py b/test/functional/interface_http.py index 259135f24e4c6..6f5be5df1577d 100755 --- a/test/functional/interface_http.py +++ b/test/functional/interface_http.py @@ -24,8 +24,8 @@ def run_test(self): # lowlevel check for http persistent connection # ################################################# url = urllib.parse.urlparse(self.nodes[0].url) - authpair = url.username + ':' + url.password - headers = {"Authorization": "Basic " + str_to_b64str(authpair)} + authpair = f'{url.username}:{url.password}' + headers = {"Authorization": f"Basic {str_to_b64str(authpair)}"} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() @@ -42,7 +42,7 @@ def run_test(self): conn.close() #same should be if we add keep-alive because this should be the std. behaviour - headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"} + headers = {"Authorization": f"Basic {str_to_b64str(authpair)}", "Connection": "keep-alive"} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() @@ -59,7 +59,7 @@ def run_test(self): conn.close() #now do the same with "Connection: close" - headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"} + headers = {"Authorization": f"Basic {str_to_b64str(authpair)}", "Connection":"close"} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() @@ -70,8 +70,8 @@ def run_test(self): #node1 (2nd node) is running with disabled keep-alive option urlNode1 = urllib.parse.urlparse(self.nodes[1].url) - authpair = urlNode1.username + ':' + urlNode1.password - headers = {"Authorization": "Basic " + str_to_b64str(authpair)} + authpair = f'{urlNode1.username}:{urlNode1.password}' + headers = {"Authorization": f"Basic {str_to_b64str(authpair)}"} conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port) conn.connect() @@ -81,8 +81,8 @@ def run_test(self): #node2 (third node) is running with standard keep-alive parameters which means keep-alive is on urlNode2 = urllib.parse.urlparse(self.nodes[2].url) - authpair = urlNode2.username + ':' + urlNode2.password - headers = {"Authorization": "Basic " + str_to_b64str(authpair)} + authpair = f'{urlNode2.username}:{urlNode2.password}' + headers = {"Authorization": f"Basic {str_to_b64str(authpair)}"} conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) conn.connect() @@ -94,13 +94,13 @@ def run_test(self): # Check excessive request size conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) conn.connect() - conn.request('GET', '/' + ('x'*1000), '', headers) + conn.request('GET', f'/{"x"*1000}', '', headers) out1 = conn.getresponse() assert_equal(out1.status, http.client.NOT_FOUND) conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) conn.connect() - conn.request('GET', '/' + ('x'*10000), '', headers) + conn.request('GET', f'/{"x"*10000}', '', headers) out1 = conn.getresponse() assert_equal(out1.status, http.client.BAD_REQUEST) diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py index 46b1457c1dbe9..b079a9393fa6d 100755 --- a/test/functional/interface_rest.py +++ b/test/functional/interface_rest.py @@ -61,7 +61,7 @@ def test_rest_request(self, uri, http_method='GET', req_type=ReqType.JSON, body= rest_uri += '.hex' conn = http.client.HTTPConnection(self.url.hostname, self.url.port) - self.log.debug('%s %s %s', http_method, rest_uri, body) + self.log.debug(f'{http_method} {rest_uri} {body}') if http_method == 'GET': conn.request('GET', rest_uri) elif http_method == 'POST': @@ -94,11 +94,11 @@ def run_test(self): self.log.info("Test the /tx URI") - json_obj = self.test_rest_request("/tx/{}".format(txid)) + json_obj = self.test_rest_request(f"/tx/{txid}") assert_equal(json_obj['txid'], txid) # Check hex format response - hex_response = self.test_rest_request("/tx/{}".format(txid), req_type=ReqType.HEX, ret_type=RetType.OBJ) + hex_response = self.test_rest_request(f"/tx/{txid}", req_type=ReqType.HEX, ret_type=RetType.OBJ) assert_greater_than_or_equal(int(hex_response.getheader('content-length')), json_obj['size']*2) @@ -121,7 +121,7 @@ def run_test(self): assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) # Check chainTip response - json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending)) + json_obj = self.test_rest_request(f"/getutxos/{spending[0]}-{spending[1]}") assert_equal(json_obj['chaintipHash'], bb_hash) # Make sure there is one utxo @@ -130,7 +130,7 @@ def run_test(self): self.log.info("Query a spent TXO using the /getutxos URI") - json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent)) + json_obj = self.test_rest_request(f"/getutxos/{spent[0]}-{spent[1]}") # Check chainTip response assert_equal(json_obj['chaintipHash'], bb_hash) @@ -143,7 +143,7 @@ def run_test(self): self.log.info("Query two TXOs using the /getutxos URI") - json_obj = self.test_rest_request("/getutxos/{}-{}/{}-{}".format(*(spending + spent))) + json_obj = self.test_rest_request(f"/getutxos/{spending[0]}-{spending[1]}/{spent[0]}-{spent[1]}") assert_equal(len(json_obj['utxos']), 1) assert_equal(json_obj['bitmap'], "10") @@ -170,31 +170,31 @@ def run_test(self): # do a tx and don't sync txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) - json_obj = self.test_rest_request("/tx/{}".format(txid)) + json_obj = self.test_rest_request(f"/tx/{txid}") # get the spent output to later check for utxo (should be spent by then) spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout']) # get n of 0.1 outpoint n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1')) spending = (txid, n) - json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending)) + json_obj = self.test_rest_request(f"/getutxos/{spending[0]}-{spending[1]}") assert_equal(len(json_obj['utxos']), 0) - json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending)) + json_obj = self.test_rest_request(f"/getutxos/checkmempool/{spending[0]}-{spending[1]}") assert_equal(len(json_obj['utxos']), 1) - json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent)) + json_obj = self.test_rest_request(f"/getutxos/{spent[0]}-{spent[1]}") assert_equal(len(json_obj['utxos']), 1) - json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spent)) + json_obj = self.test_rest_request(f"/getutxos/checkmempool/{spent[0]}-{spent[1]}") assert_equal(len(json_obj['utxos']), 0) self.generate(self.nodes[0], 1) - json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending)) + json_obj = self.test_rest_request(f"/getutxos/{spending[0]}-{spending[1]}") assert_equal(len(json_obj['utxos']), 1) - json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending)) + json_obj = self.test_rest_request(f"/getutxos/checkmempool/{spending[0]}-{spending[1]}") assert_equal(len(json_obj['utxos']), 1) # Do some invalid requests @@ -203,11 +203,11 @@ def run_test(self): self.test_rest_request("/getutxos/checkmempool", http_method='POST', req_type=ReqType.JSON, status=400, ret_type=RetType.OBJ) # Test limits - long_uri = '/'.join(["{}-{}".format(txid, n) for n in range(20)]) - self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=400, ret_type=RetType.OBJ) + long_uri = '/'.join([f"{txid}-{n_}" for n_ in range(20)]) + self.test_rest_request(f"/getutxos/checkmempool/{long_uri}", http_method='POST', status=400, ret_type=RetType.OBJ) - long_uri = '/'.join(['{}-{}'.format(txid, n) for n in range(15)]) - self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=200) + long_uri = '/'.join([f'{txid}-{n_}' for n_ in range(15)]) + self.test_rest_request(f"/getutxos/checkmempool/{long_uri}", http_method='POST', status=200) self.generate(self.nodes[0], 1) # generate block to not affect upcoming tests @@ -220,42 +220,42 @@ def run_test(self): # Check result if block is not in the active chain self.nodes[0].invalidateblock(bb_hash) - assert_equal(self.test_rest_request('/headers/1/{}'.format(bb_hash)), []) - self.test_rest_request('/block/{}'.format(bb_hash)) + assert_equal(self.test_rest_request(f'/headers/1/{bb_hash}'), []) + self.test_rest_request(f'/block/{bb_hash}') self.nodes[0].reconsiderblock(bb_hash) # Check binary format - response = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ) + response = self.test_rest_request(f"/block/{bb_hash}", req_type=ReqType.BIN, ret_type=RetType.OBJ) assert_greater_than(int(response.getheader('content-length')), BLOCK_HEADER_SIZE) response_bytes = response.read() # Compare with block header - response_header = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ) + response_header = self.test_rest_request(f"/headers/1/{bb_hash}", req_type=ReqType.BIN, ret_type=RetType.OBJ) assert_equal(int(response_header.getheader('content-length')), BLOCK_HEADER_SIZE) response_header_bytes = response_header.read() assert_equal(response_bytes[:BLOCK_HEADER_SIZE], response_header_bytes) # Check block hex format - response_hex = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ) + response_hex = self.test_rest_request(f"/block/{bb_hash}", req_type=ReqType.HEX, ret_type=RetType.OBJ) assert_greater_than(int(response_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2) response_hex_bytes = response_hex.read().strip(b'\n') assert_equal(response_bytes.hex().encode(), response_hex_bytes) # Compare with hex block header - response_header_hex = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ) + response_header_hex = self.test_rest_request(f"/headers/1/{bb_hash}", req_type=ReqType.HEX, ret_type=RetType.OBJ) assert_greater_than(int(response_header_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2) response_header_hex_bytes = response_header_hex.read(BLOCK_HEADER_SIZE*2) assert_equal(response_bytes[:BLOCK_HEADER_SIZE].hex().encode(), response_header_hex_bytes) # Check json format - block_json_obj = self.test_rest_request("/block/{}".format(bb_hash)) + block_json_obj = self.test_rest_request(f"/block/{bb_hash}") assert_equal(block_json_obj['hash'], bb_hash) - assert_equal(self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']))['blockhash'], bb_hash) + assert_equal(self.test_rest_request(f"/blockhashbyheight/{block_json_obj['height']}")['blockhash'], bb_hash) # Check hex/bin format - resp_hex = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.HEX, ret_type=RetType.OBJ) + resp_hex = self.test_rest_request(f"/blockhashbyheight/{block_json_obj['height']}", req_type=ReqType.HEX, ret_type=RetType.OBJ) assert_equal(resp_hex.read().decode('utf-8').rstrip(), bb_hash) - resp_bytes = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.BIN, ret_type=RetType.BYTES) + resp_bytes = self.test_rest_request(f"/blockhashbyheight/{block_json_obj['height']}", req_type=ReqType.BIN, ret_type=RetType.BYTES) blockhash = resp_bytes[::-1].hex() assert_equal(blockhash, bb_hash) @@ -269,7 +269,7 @@ def run_test(self): self.test_rest_request("/blockhashbyheight/", ret_type=RetType.OBJ, status=400) # Compare with json block header - json_obj = self.test_rest_request("/headers/1/{}".format(bb_hash)) + json_obj = self.test_rest_request(f"/headers/1/{bb_hash}") assert_equal(len(json_obj), 1) # ensure that there is one header in the json response assert_equal(json_obj[0]['hash'], bb_hash) # request/response hash should be the same @@ -280,7 +280,7 @@ def run_test(self): # See if we can get 5 headers in one response self.generate(self.nodes[1], 5) - json_obj = self.test_rest_request("/headers/5/{}".format(bb_hash)) + json_obj = self.test_rest_request(f"/headers/5/{bb_hash}") assert_equal(len(json_obj), 5) # now we should have 5 header objects json_obj = self.test_rest_request(f"/blockfilterheaders/basic/5/{bb_hash}") assert_equal(len(json_obj), 5) # now we should have 5 filter header objects @@ -326,13 +326,13 @@ def run_test(self): newblockhash = self.generate(self.nodes[1], 1) # Check if the 3 tx show up in the new block - json_obj = self.test_rest_request("/block/{}".format(newblockhash[0])) + json_obj = self.test_rest_request(f"/block/{newblockhash[0]}") non_coinbase_txs = {tx['txid'] for tx in json_obj['tx'] if 'coinbase' not in tx['vin'][0]} assert_equal(non_coinbase_txs, set(txs)) # Check the same but without tx details - json_obj = self.test_rest_request("/block/notxdetails/{}".format(newblockhash[0])) + json_obj = self.test_rest_request(f"/block/notxdetails/{newblockhash[0]}") for tx in txs: assert tx in json_obj['tx'] diff --git a/test/functional/interface_rpc.py b/test/functional/interface_rpc.py index 4d5666f414efc..89a7d29b244f4 100755 --- a/test/functional/interface_rpc.py +++ b/test/functional/interface_rpc.py @@ -16,7 +16,7 @@ def expect_http_status(expected_http_status, expected_rpc_code, fcn, *args): try: fcn(*args) - raise AssertionError("Expected RPC error %d, got none" % expected_rpc_code) + raise AssertionError(f"Expected RPC error {expected_rpc_code}, got none") except JSONRPCException as exc: assert_equal(exc.error["code"], expected_rpc_code) assert_equal(exc.http_status, expected_http_status) diff --git a/test/functional/interface_zmq.py b/test/functional/interface_zmq.py index d8565814c2bcb..48c00db6c5f08 100755 --- a/test/functional/interface_zmq.py +++ b/test/functional/interface_zmq.py @@ -133,7 +133,7 @@ def setup_zmq_test(self, services, *, recv_timeout=60, sync_blocks=True, ipv6=Fa socket.setsockopt(zmq.IPV6, 1) subscribers.append(ZMQSubscriber(socket, topic.encode())) - self.restart_node(0, ["-zmqpub%s=%s" % (topic, address) for topic, address in services] + + self.restart_node(0, [f"-zmqpub{topic}={address}" for topic, address in services] + self.extra_args[0]) for i, sub in enumerate(subscribers): @@ -186,7 +186,7 @@ def test_basic(self): rawtx = subs[3] num_blocks = 5 - self.log.info("Generate %(n)d blocks (and %(n)d coinbase txes)" % {"n": num_blocks}) + self.log.info(f"Generate {num_blocks} blocks (and {num_blocks} coinbase txes)") genhashes = self.generatetoaddress(self.nodes[0], num_blocks, ADDRESS_BCRT1_UNSPENDABLE) for x in range(num_blocks): @@ -484,7 +484,7 @@ def test_mempool_sync(self): if mempool_sequence is not None: zmq_mem_seq = mempool_sequence if zmq_mem_seq > get_raw_seq: - raise Exception("We somehow jumped mempool sequence numbers! zmq_mem_seq: {} > get_raw_seq: {}".format(zmq_mem_seq, get_raw_seq)) + raise Exception(f"We somehow jumped mempool sequence numbers! zmq_mem_seq: {zmq_mem_seq} > get_raw_seq: {get_raw_seq}") # 4) Moving forward, we apply the delta to our local view # remaining txs(5) + 1 block connect + 1 final tx diff --git a/test/functional/mining_basic.py b/test/functional/mining_basic.py index 03097805c3d2f..99c562339174e 100755 --- a/test/functional/mining_basic.py +++ b/test/functional/mining_basic.py @@ -59,10 +59,10 @@ def mine_chain(self): assert_equal(mining_info['currentblocksize'], 1000) self.log.info('test blockversion') - self.restart_node(0, extra_args=['-mocktime={}'.format(t), '-blockversion=1337']) + self.restart_node(0, extra_args=[f'-mocktime={t}', '-blockversion=1337']) self.connect_nodes(0, 1) assert_equal(1337, self.nodes[0].getblocktemplate()['version']) - self.restart_node(0, extra_args=['-mocktime={}'.format(t)]) + self.restart_node(0, extra_args=[f'-mocktime={t}']) self.connect_nodes(0, 1) assert_equal(VERSIONBITS_TOP_BITS + (1 << VERSIONBITS_DEPLOYMENT_TESTDUMMY_BIT), self.nodes[0].getblocktemplate()['version']) self.restart_node(0) From 7c6c93d201b7338257ebc413f58b22bea490b8a2 Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Tue, 22 Oct 2024 13:47:57 +0700 Subject: [PATCH 3/3] fix: remove missing comment to follow-up for bitcoin#15864 --- test/functional/feature_config_args.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py index 2462b20383d51..622f1fd39f2cf 100755 --- a/test/functional/feature_config_args.py +++ b/test/functional/feature_config_args.py @@ -286,7 +286,6 @@ def run_test(self): # Create the directory and ensure the config file now works os.mkdir(new_data_dir) - # Temporarily disabled, because this test would access the user's home dir (~/.bitcoin) self.start_node(0, [f'-conf={conf_file}']) self.stop_node(0) assert os.path.exists(os.path.join(new_data_dir, self.chain, 'blocks'))