diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 616f4e650f42c..6e46148fb5419 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -919,6 +919,7 @@ class CRegTestParams : public CChainParams { AddLLMQ(Consensus::LLMQType::LLMQ_TEST_V17); AddLLMQ(Consensus::LLMQType::LLMQ_TEST_DIP0024); AddLLMQ(Consensus::LLMQType::LLMQ_TEST_PLATFORM); + AddLLMQ(Consensus::LLMQType::LLMQ_SINGLE_NODE); consensus.llmqTypeChainLocks = Consensus::LLMQType::LLMQ_TEST; consensus.llmqTypeDIP0024InstantSend = Consensus::LLMQType::LLMQ_TEST_DIP0024; consensus.llmqTypePlatform = Consensus::LLMQType::LLMQ_TEST_PLATFORM; @@ -927,6 +928,7 @@ class CRegTestParams : public CChainParams { UpdateLLMQTestParametersFromArgs(args, Consensus::LLMQType::LLMQ_TEST); UpdateLLMQTestParametersFromArgs(args, Consensus::LLMQType::LLMQ_TEST_INSTANTSEND); UpdateLLMQInstantSendDIP0024FromArgs(args); + UpdateLLMQTestPlatformFromArgs(args); } /** @@ -1004,16 +1006,9 @@ class CRegTestParams : public CChainParams { params->dkgBadVotesThreshold = threshold; } - /** - * Allows modifying the LLMQ type for InstantSend (DIP0024). - */ - void UpdateLLMQDIP0024InstantSend(Consensus::LLMQType llmqType) - { - consensus.llmqTypeDIP0024InstantSend = llmqType; - } - void UpdateLLMQTestParametersFromArgs(const ArgsManager& args, const Consensus::LLMQType llmqType); void UpdateLLMQInstantSendDIP0024FromArgs(const ArgsManager& args); + void UpdateLLMQTestPlatformFromArgs(const ArgsManager& args); }; static void MaybeUpdateHeights(const ArgsManager& args, Consensus::Params& consensus) @@ -1235,7 +1230,30 @@ void CRegTestParams::UpdateLLMQInstantSendDIP0024FromArgs(const ArgsManager& arg throw std::runtime_error("Invalid LLMQ type specified for -llmqtestinstantsenddip0024."); } LogPrintf("Setting llmqtestinstantsenddip0024 to %ld\n", ToUnderlying(llmqType)); - UpdateLLMQDIP0024InstantSend(llmqType); + + consensus.llmqTypeDIP0024InstantSend = llmqType; +} + +void CRegTestParams::UpdateLLMQTestPlatformFromArgs(const ArgsManager& args) +{ + if (!args.IsArgSet("-llmqtestplatform")) return; + + const auto& llmq_params_opt = GetLLMQ(consensus.llmqTypePlatform); + assert(llmq_params_opt.has_value()); + + std::string strLLMQType = gArgs.GetArg("-llmqtestplatform", std::string(llmq_params_opt->name)); + + Consensus::LLMQType llmqType = Consensus::LLMQType::LLMQ_NONE; + for (const auto& params : consensus.llmqs) { + if (params.name == strLLMQType) { + llmqType = params.type; + } + } + if (llmqType == Consensus::LLMQType::LLMQ_NONE) { + throw std::runtime_error("Invalid LLMQ type specified for -llmqtestplatform."); + } + LogPrintf("Setting llmqtestplatform to size=%ld\n", static_cast(llmqType)); + consensus.llmqTypePlatform = llmqType; } void CDevNetParams::UpdateDevnetSubsidyAndDiffParametersFromArgs(const ArgsManager& args) diff --git a/src/chainparamsbase.cpp b/src/chainparamsbase.cpp index d7ef237c0eee8..516cad994b95f 100644 --- a/src/chainparamsbase.cpp +++ b/src/chainparamsbase.cpp @@ -32,6 +32,7 @@ void SetupChainParamsBaseOptions(ArgsManager& argsman) argsman.AddArg("-llmqmnhf=", "Override the default LLMQ type used for EHF. (default: llmq_devnet, devnet-only)", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS); argsman.AddArg("-llmqtestinstantsenddip0024=", "Override the default LLMQ type used for InstantSendDIP0024. Used mainly to test Platform. (default: llmq_test_dip0024, regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CHAINPARAMS); argsman.AddArg("-llmqtestinstantsendparams=:", "Override the default LLMQ size for the LLMQ_TEST_INSTANTSEND quorums (default: 3:2, regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CHAINPARAMS); + argsman.AddArg("-llmqtestplatform=", "Override the default LLMQ type used for Platform. (default: llmq_test_platform, regtest-only)", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS); argsman.AddArg("-llmqtestparams=:", "Override the default LLMQ size for the LLMQ_TEST quorum (default: 3:2, regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CHAINPARAMS); argsman.AddArg("-powtargetspacing=", "Override the default PowTargetSpacing value in seconds (default: 2.5 minutes, devnet-only)", ArgsManager::ALLOW_INT, OptionsCategory::CHAINPARAMS); argsman.AddArg("-minimumdifficultyblocks=", "The number of blocks that can be mined with the minimum difficulty at the start of a chain (default: 0, devnet-only)", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS); diff --git a/src/llmq/commitment.cpp b/src/llmq/commitment.cpp index ea131b113c271..cb5098458cb50 100644 --- a/src/llmq/commitment.cpp +++ b/src/llmq/commitment.cpp @@ -69,7 +69,7 @@ bool CFinalCommitment::Verify(CDeterministicMNManager& dmnman, gsl::not_null memberPubKeys; - for (const auto i : irange::range(members.size())) { - if (!signers[i]) { - continue; + if (llmq_params.size == 1) { + LogPrintf("pubkey operator: %s\n", members[0]->pdmnState->pubKeyOperator.Get().ToString()); + if (!membersSig.VerifyInsecure(members[0]->pdmnState->pubKeyOperator.Get(), commitmentHash)) { +// memberPubKeys.emplace_back(members[i]->pdmnState->pubKeyOperator.Get()); + LogPrint(BCLog::LLMQ, "CFinalCommitment -- q[%s] invalid member signature\n", quorumHash.ToString()); + return false; } - memberPubKeys.emplace_back(members[i]->pdmnState->pubKeyOperator.Get()); - } - if (!membersSig.VerifySecureAggregated(memberPubKeys, commitmentHash)) { - LogPrint(BCLog::LLMQ, "CFinalCommitment -- q[%s] invalid aggregated members signature\n", quorumHash.ToString()); - return false; - } + /* + if (!membersSig.VerifySecureAggregated(memberPubKeys, commitmentHash)) { + } + */ + } else { + std::vector memberPubKeys; + for (const auto i : irange::range(members.size())) { + if (!signers[i]) { + continue; + } + memberPubKeys.emplace_back(members[i]->pdmnState->pubKeyOperator.Get()); + } + if (!membersSig.VerifySecureAggregated(memberPubKeys, commitmentHash)) { + LogPrint(BCLog::LLMQ, "CFinalCommitment -- q[%s] invalid aggregated members signature\n", quorumHash.ToString()); + return false; + } + } if (!quorumSig.VerifyInsecure(quorumPublicKey, commitmentHash)) { LogPrint(BCLog::LLMQ, "CFinalCommitment -- q[%s] invalid quorum signature\n", quorumHash.ToString()); return false; @@ -160,7 +173,7 @@ bool CFinalCommitment::VerifySizes(const Consensus::LLMQParams& params) const return false; } if (validMembers.size() != size_t(params.size)) { - LogPrint(BCLog::LLMQ, "CFinalCommitment -- q[%s] invalid signers.size=%d\n", quorumHash.ToString(), signers.size()); + LogPrint(BCLog::LLMQ, "CFinalCommitment -- q[%s] invalid validMembers.size=%d\n", quorumHash.ToString(), validMembers.size()); return false; } return true; diff --git a/src/llmq/dkgsession.cpp b/src/llmq/dkgsession.cpp index d59e7defee188..f2a0ddc499676 100644 --- a/src/llmq/dkgsession.cpp +++ b/src/llmq/dkgsession.cpp @@ -164,6 +164,8 @@ void CDKGSession::Contribute(CDKGPendingMessages& pendingMessages) return; } + assert(params.threshold > 1); // we should not get there with single-node-quorums + cxxtimer::Timer t1(true); logger.Batch("generating contributions"); if (!blsWorker.GenerateContributions(params.threshold, memberIds, vvecContribution, m_sk_contributions)) { @@ -276,6 +278,7 @@ bool CDKGSession::PreVerifyMessage(const CDKGContribution& qc, bool& retBan) con return true; } +// TODO: remove duplicated code between all ReceiveMessage: CDKGContribution, CDKGComplaint, CDKGJustification, CDKGPrematureCommitment std::optional CDKGSession::ReceiveMessage(const CDKGContribution& qc) { CDKGLogger logger(*this, __func__, __LINE__); @@ -1234,6 +1237,7 @@ std::vector CDKGSession::FinalizeCommitments() fqc.quorumVvecHash = first.quorumVvecHash; const bool isQuorumRotationEnabled{IsQuorumRotationEnabled(params, m_quorum_base_block_index)}; + // TODO: always put `true` here: so far as v19 is activated, we always write BASIC now fqc.nVersion = CFinalCommitment::GetVersion(isQuorumRotationEnabled, DeploymentActiveAfter(m_quorum_base_block_index, Params().GetConsensus(), Consensus::DEPLOYMENT_V19)); fqc.quorumIndex = isQuorumRotationEnabled ? quorumIndex : 0; @@ -1291,6 +1295,61 @@ std::vector CDKGSession::FinalizeCommitments() return finalCommitments; } +CFinalCommitment CDKGSession::FinalizeSingleCommitment() +{ + if (!AreWeMember()) { + return {}; + } + + CDKGLogger logger(*this, __func__, __LINE__); + + std::vector signerIds; + std::vector thresholdSigs; + + CFinalCommitment fqc(params, m_quorum_base_block_index->GetBlockHash()); + + + fqc.signers = {true}; + fqc.validMembers = {true}; + + CBLSSecretKey sk1; + sk1.MakeNewKey(); + + fqc.quorumPublicKey = sk1.GetPublicKey(); + fqc.quorumVvecHash = {}; + + // use just MN's operator public key as quorum pubkey. + // TODO: use sk1 here instead and use recovery mechanism from shares, but that's not trivial to do + const bool workaround_qpublic_key = true; + if (workaround_qpublic_key) { + fqc.quorumPublicKey = m_mn_activeman->GetPubKey(); + } + const bool isQuorumRotationEnabled{false}; + fqc.nVersion = CFinalCommitment::GetVersion(isQuorumRotationEnabled, DeploymentActiveAfter(m_quorum_base_block_index, Params().GetConsensus(), Consensus::DEPLOYMENT_V19)); + fqc.quorumIndex = 0; + + uint256 commitmentHash = BuildCommitmentHash(fqc.llmqType, fqc.quorumHash, fqc.validMembers, fqc.quorumPublicKey, fqc.quorumVvecHash); + fqc.quorumSig = sk1.Sign(commitmentHash); + + fqc.membersSig = m_mn_activeman->Sign(commitmentHash); + + if (workaround_qpublic_key) { + fqc.quorumSig = fqc.membersSig; + } + + if (!fqc.Verify(m_dmnman, m_quorum_base_block_index, true)) { + logger.Batch("failed to verify final commitment"); + assert(false); + } + + logger.Batch("final commitment: validMembers=%d, signers=%d, quorumPublicKey=%s", + fqc.CountValidMembers(), fqc.CountSigners(), fqc.quorumPublicKey.ToString()); + + logger.Flush(); + + return fqc; +} + CDKGMember* CDKGSession::GetMember(const uint256& proTxHash) const { auto it = membersMap.find(proTxHash); diff --git a/src/llmq/dkgsession.h b/src/llmq/dkgsession.h index b92db5646a54b..3faf213088a98 100644 --- a/src/llmq/dkgsession.h +++ b/src/llmq/dkgsession.h @@ -377,6 +377,9 @@ class CDKGSession // Phase 5: aggregate/finalize std::vector FinalizeCommitments(); + // All Phases 5-in-1 for single-node-quorum + CFinalCommitment FinalizeSingleCommitment(); + [[nodiscard]] bool AreWeMember() const { return !myProTxHash.IsNull(); } void MarkBadMember(size_t idx); diff --git a/src/llmq/dkgsessionhandler.cpp b/src/llmq/dkgsessionhandler.cpp index 09e24bf429c17..35843cad5fb93 100644 --- a/src/llmq/dkgsessionhandler.cpp +++ b/src/llmq/dkgsessionhandler.cpp @@ -563,6 +563,20 @@ void CDKGSessionHandler::HandleDKGRound() const auto tip_mn_list = m_dmnman.GetListAtChainTip(); utils::EnsureQuorumConnections(params, connman, m_dmnman, m_sporkman, tip_mn_list, pQuorumBaseBlockIndex, curSession->myProTxHash, /* is_masternode = */ m_mn_activeman != nullptr); + if (params.size == 1) // TODO: add here check AreWeMember instead checking is-null for final-commitment + { + auto finalCommitment = curSession->FinalizeSingleCommitment(); + if (finalCommitment.IsNull()) { + LogPrintf("final commitment is null here -- is-member=%d\n", curSession->AreWeMember()); + return; + } + + if (auto inv_opt = quorumBlockProcessor.AddMineableCommitment(finalCommitment); inv_opt.has_value()) { + Assert(m_peerman.get())->RelayInv(inv_opt.value()); + } + return; + } + if (curSession->AreWeMember()) { utils::AddQuorumProbeConnections(params, connman, m_dmnman, m_mn_metaman, m_sporkman, tip_mn_list, pQuorumBaseBlockIndex, curSession->myProTxHash); } diff --git a/src/llmq/options.cpp b/src/llmq/options.cpp index c1956e8a67a1b..1915987a1cf59 100644 --- a/src/llmq/options.cpp +++ b/src/llmq/options.cpp @@ -128,8 +128,6 @@ bool IsQuorumTypeEnabledInternal(Consensus::LLMQType llmqType, gsl::not_nullnHeight >= consensusParams.DIP0024QuorumsHeight)}; switch (llmqType) { - case Consensus::LLMQType::LLMQ_DEVNET: - return true; case Consensus::LLMQType::LLMQ_50_60: return !fDIP0024IsActive || !fHaveDIP0024Quorums || Params().NetworkIDString() == CBaseChainParams::TESTNET || Params().NetworkIDString() == CBaseChainParams::DEVNET; @@ -141,6 +139,8 @@ bool IsQuorumTypeEnabledInternal(Consensus::LLMQType llmqType, gsl::not_nullnHeight >= TESTNET_LLMQ_25_67_ACTIVATION_HEIGHT; diff --git a/src/llmq/params.h b/src/llmq/params.h index b9d44818f8480..8c5f49502c5b4 100644 --- a/src/llmq/params.h +++ b/src/llmq/params.h @@ -37,7 +37,9 @@ enum class LLMQType : uint8_t { LLMQ_TEST_PLATFORM = 106, // 3 members, 2 (66%) threshold, one per hour. // for devnets only. rotated version (v2) for devnets - LLMQ_DEVNET_DIP0024 = 105 // 8 members, 4 (50%) threshold, one per hour. Params might differ when -llmqdevnetparams is used + LLMQ_DEVNET_DIP0024 = 105, // 8 members, 4 (50%) threshold, one per hour. Params might differ when -llmqdevnetparams is used + + LLMQ_SINGLE_NODE = 111, // 1 memeber, 1 threshold, one per hour. }; // Configures a LLMQ and its DKG @@ -129,7 +131,7 @@ static_assert(std::is_trivially_copyable_v, "LLMQParams i static_assert(std::is_trivially_assignable_v, "LLMQParams is not trivially assignable"); -static constexpr std::array available_llmqs = { +static constexpr std::array available_llmqs = { /** * llmq_test @@ -502,6 +504,34 @@ static constexpr std::array available_llmqs = { .recoveryMembers = 12, }, + /** + * llmq_1_100 + * This quorum is used explicitly on Regtest and requires + * just 1 participant + * + * Used for Platform for easy setup testing environment + */ + LLMQParams{ + .type = LLMQType::LLMQ_SINGLE_NODE, + .name = "llmq_1_100", + .useRotation = false, + .size = 1, + .minSize = 1, + .threshold = 1, + + .dkgInterval = 24, // one DKG per hour + .dkgPhaseBlocks = 2, + .dkgMiningWindowStart = 10, // dkgPhaseBlocks * 5 = after finalization + .dkgMiningWindowEnd = 18, + .dkgBadVotesThreshold = 2, + + .signingActiveQuorumCount = 2, // just a few ones to allow easier testing + + .keepOldConnections = 3, + .keepOldKeys = 4, + .recoveryMembers = 1, + }, + }; // available_llmqs } // namespace Consensus diff --git a/src/llmq/signing_shares.cpp b/src/llmq/signing_shares.cpp index 5f53acce396c6..18593a9e02f93 100644 --- a/src/llmq/signing_shares.cpp +++ b/src/llmq/signing_shares.cpp @@ -775,6 +775,22 @@ void CSigSharesManager::TryRecoverSig(const CQuorumCPtr& quorum, const uint256& return; } + if (quorum->params.size == 1) { + if (sigSharesForSignHash->empty()) { + LogPrint(BCLog::LLMQ_SIGS, "CSigSharesManager::%s -- impossible to recover single-node signature - no shares yet. id=%s, msgHash=%s\n", __func__, + id.ToString(), msgHash.ToString()); + return; + } + const auto& sigShare = sigSharesForSignHash->begin()->second; + CBLSSignature recoveredSig = sigShare.sigShare.Get(); + LogPrint(BCLog::LLMQ_SIGS, "CSigSharesManager::%s -- recover single-node signature. id=%s, msgHash=%s\n", __func__, + id.ToString(), msgHash.ToString()); + + auto rs = std::make_shared(quorum->params.type, quorum->qc->quorumHash, id, msgHash, recoveredSig); + sigman.ProcessRecoveredSig(rs); + return; // end of single-quorum processing + } + sigSharesForRecovery.reserve((size_t) quorum->params.threshold); idsForRecovery.reserve((size_t) quorum->params.threshold); for (auto it = sigSharesForSignHash->begin(); it != sigSharesForSignHash->end() && sigSharesForRecovery.size() < size_t(quorum->params.threshold); ++it) { @@ -1521,6 +1537,35 @@ std::optional CSigSharesManager::CreateSigShare(const CQuorumCPtr& qu return std::nullopt; } + if (quorum->params.size == 1) { + int memberIdx = quorum->GetMemberIndex(activeMasterNodeProTxHash); + if (memberIdx == -1) { + // this should really not happen (IsValidMember gave true) + return std::nullopt; + } + + CSigShare sigShare(quorum->params.type, quorum->qc->quorumHash, id, msgHash, uint16_t(memberIdx), {}); + uint256 signHash = sigShare.buildSignHash(); + + // TODO: This one should be SIGN by QUORUM key, not by OPERATOR key + // see TODO in CDKGSession::FinalizeSingleCommitment for details + sigShare.sigShare.Set(m_mn_activeman->Sign(signHash), bls::bls_legacy_scheme.load()); + + if (!sigShare.sigShare.Get().IsValid()) { + LogPrintf("CSigSharesManager::%s -- failed to sign sigShare. signHash=%s, id=%s, msgHash=%s, time=%s\n", __func__, + signHash.ToString(), sigShare.getId().ToString(), sigShare.getMsgHash().ToString(), t.count()); + return std::nullopt; + } + + sigShare.UpdateKey(); + + LogPrint(BCLog::LLMQ_SIGS, "CSigSharesManager::%s -- created sigShare. signHash=%s, id=%s, msgHash=%s, llmqType=%d, quorum=%s, time=%s\n", __func__, + signHash.ToString(), sigShare.getId().ToString(), sigShare.getMsgHash().ToString(), ToUnderlying(quorum->params.type), quorum->qc->quorumHash.ToString(), t.count()); + + return sigShare; + + + } const CBLSSecretKey& skShare = quorum->GetSkShare(); if (!skShare.IsValid()) { LogPrint(BCLog::LLMQ_SIGS, "CSigSharesManager::%s -- we don't have our skShare for quorum %s\n", __func__, quorum->qc->quorumHash.ToString()); diff --git a/src/llmq/utils.cpp b/src/llmq/utils.cpp index e3cc8ac240ad6..157d0d25d4372 100644 --- a/src/llmq/utils.cpp +++ b/src/llmq/utils.cpp @@ -174,13 +174,15 @@ std::vector GetAllQuorumMembers(Consensus::LLMQType llmqTy std::vector ComputeQuorumMembers(Consensus::LLMQType llmqType, CDeterministicMNManager& dmnman, const CBlockIndex* pQuorumBaseBlockIndex) { - bool EvoOnly = (Params().GetConsensus().llmqTypePlatform == llmqType) && IsV19Active(pQuorumBaseBlockIndex); + // it will just works for evo, when you set it as PlatformQuorum const auto& llmq_params_opt = Params().GetLLMQ(llmqType); assert(llmq_params_opt.has_value()); if (llmq_params_opt->useRotation || pQuorumBaseBlockIndex->nHeight % llmq_params_opt->dkgInterval != 0) { ASSERT_IF_DEBUG(false); return {}; } + bool EvoOnly = (Params().GetConsensus().llmqTypePlatform == llmqType) && IsV19Active(pQuorumBaseBlockIndex); + if (llmq_params_opt->size == 1) EvoOnly = true; const CBlockIndex* pWorkBlockIndex = IsV20Active(pQuorumBaseBlockIndex) ? pQuorumBaseBlockIndex->GetAncestor(pQuorumBaseBlockIndex->nHeight - 8) : diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index 487bb1bb565aa..8094932265214 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -353,15 +353,18 @@ TestChainSetup::TestChainSetup(int num_blocks, const std::vector& e /* TestChainDIP3BeforeActivationSetup */ { 430, uint256S("0x0bcefaa33fec56cd84d05d0e76cd6a78badcc20f627d91903646de6a07930a14") }, /* TestChainBRRBeforeActivationSetup */ - { 497, uint256S("0x23c31820ec5160b7181bfdf328e2b76cd12c9fa4544d892b7f01e74dd6220849") }, + { 497, uint256S("0x2500bc0f0c7880b1dd4ea4423b58bf244302baa4b0dc148c87db9425ea80b252") }, /* TestChainV19BeforeActivationSetup */ - { 894, uint256S("0x2885cf0fe8fdf29803b6c65002ba2570ff011531d8ea92be312a85d655e00c51") }, + { 894, uint256S("0x6d20bbcd184866ae9223ca007c09dcdd5cb3f01f386c9edcb0cde395f2220eba") }, } }; { LOCK(::cs_main); auto hash = checkpoints.mapCheckpoints.find(num_blocks); + if (hash != checkpoints.mapCheckpoints.end() && m_node.chainman->ActiveChain().Tip()->GetBlockHash() != hash->second) { + std::cerr << "heeeeshes: " << m_node.chainman->ActiveChain().Tip()->GetBlockHash().ToString() << ' ' << hash->second.ToString() << std::endl; + } assert( hash != checkpoints.mapCheckpoints.end() && m_node.chainman->ActiveChain().Tip()->GetBlockHash() == hash->second); diff --git a/test/functional/feature_asset_locks.py b/test/functional/feature_asset_locks.py index a6ecb4debff1c..a69c217d57eed 100755 --- a/test/functional/feature_asset_locks.py +++ b/test/functional/feature_asset_locks.py @@ -49,11 +49,11 @@ class AssetLocksTest(DashTestFramework): def set_test_params(self): - self.set_dash_test_params(4, 2, [[ + self.set_dash_test_params(2, 0, [[ "-whitelist=127.0.0.1", "-llmqtestinstantsenddip0024=llmq_test_instantsend", "-testactivationheight=mn_rr@1400", - ]] * 4, evo_count=2) + ]] * 2, evo_count=2) def skip_test_if_missing_module(self): self.skip_if_no_wallet() @@ -231,15 +231,14 @@ def generate_batch(self, count): self.log.info(f"Generating batch of blocks {count} left") batch = min(50, count) count -= batch - self.bump_mocktime(batch) + self.bump_mocktime(10 * 60 + 1) self.generate(self.nodes[1], batch) # This functional test intentionally setup only 2 MN and only 2 Evo nodes # to ensure that corner case of quorum with minimum amount of nodes as possible # does not cause any issues in Dash Core - def mine_quorum_2_nodes(self, llmq_type_name, llmq_type): - self.mine_quorum(llmq_type_name=llmq_type_name, expected_members=2, expected_connections=1, expected_contributions=2, expected_commitments=2, llmq_type=llmq_type) - + def mine_quorum_2_nodes(self): + self.mine_quorum(llmq_type_name='llmq_test_platform', expected_members=2, expected_connections=1, expected_contributions=2, expected_commitments=2, llmq_type=106) def run_test(self): node_wallet = self.nodes[0] @@ -250,17 +249,9 @@ def run_test(self): self.activate_v20(expected_activation_height=900) self.log.info("Activated v20 at height:" + str(node.getblockcount())) - self.nodes[0].sporkupdate("SPORK_2_INSTANTSEND_ENABLED", 0) - self.wait_for_sporks_same() - - self.mine_quorum_2_nodes(llmq_type_name='llmq_test_instantsend', llmq_type=104) - for _ in range(2): self.dynamically_add_masternode(evo=True) - self.generate(node, 8, sync_fun=lambda: self.sync_blocks()) - self.set_sporks() - self.generate(node, 1) self.mempool_size = 0 key = ECKey() @@ -328,7 +319,7 @@ def test_asset_locks(self, node_wallet, node, pubkey): self.create_and_check_block([extra_lock_tx], expected_error = 'bad-cbtx-assetlocked-amount') self.log.info("Mine a quorum...") - self.mine_quorum_2_nodes(llmq_type_name='llmq_test_platform', llmq_type=106) + self.mine_quorum_2_nodes() self.validate_credit_pool_balance(locked_1) @@ -350,6 +341,10 @@ def test_asset_unlocks(self, node_wallet, node, pubkey): asset_unlock_tx_duplicate_index.vout[0].nValue += COIN too_late_height = node.getblockcount() + HEIGHT_DIFF_EXPIRING + self.log.info("Mine block to empty mempool") + self.bump_mocktime(10 * 60 + 1) + self.generate(self.nodes[0], 1) + self.check_mempool_result(tx=asset_unlock_tx, result_expected={'allowed': True, 'fees': {'base': Decimal(str(tiny_amount / COIN))}}) self.check_mempool_result(tx=asset_unlock_tx_too_big_fee, result_expected={'allowed': False, 'reject-reason' : 'max-fee-exceeded'}) @@ -417,7 +412,7 @@ def test_asset_unlocks(self, node_wallet, node, pubkey): reason = "double copy") self.log.info("Mining next quorum to check tx 'asset_unlock_tx_late' is still valid...") - self.mine_quorum_2_nodes(llmq_type_name='llmq_test_platform', llmq_type=106) + self.mine_quorum_2_nodes() self.log.info("Checking credit pool amount is same...") self.validate_credit_pool_balance(locked - 1 * COIN) self.check_mempool_result(tx=asset_unlock_tx_late, result_expected={'allowed': True, 'fees': {'base': Decimal(str(tiny_amount / COIN))}}) @@ -435,7 +430,7 @@ def test_asset_unlocks(self, node_wallet, node, pubkey): result_expected={'allowed': False, 'reject-reason' : 'bad-assetunlock-too-late'}) self.log.info("Checking that two quorums later it is too late because quorum is not active...") - self.mine_quorum_2_nodes(llmq_type_name='llmq_test_platform', llmq_type=106) + self.mine_quorum_2_nodes() self.log.info("Expecting new reject-reason...") assert not softfork_active(self.nodes[0], 'withdrawals') self.check_mempool_result(tx=asset_unlock_tx_too_late, @@ -513,7 +508,7 @@ def test_withdrawal_limits(self, node_wallet, node, pubkey): self.log.info("Fast forward to the next day to reset all current unlock limits...") self.generate_batch(blocks_in_one_day) - self.mine_quorum_2_nodes(llmq_type_name='llmq_test_platform', llmq_type=106) + self.mine_quorum_2_nodes() total = self.get_credit_pool_balance() coins = node_wallet.listunspent() @@ -669,12 +664,12 @@ def test_withdrawal_fork(self, node_wallet, node, pubkey): while quorumHash_str != node_wallet.quorum('list')['llmq_test_platform'][-1]: self.log.info("Generate one more quorum until signing quorum becomes the last one in the list") - self.mine_quorum_2_nodes(llmq_type_name="llmq_test_platform", llmq_type=106) + self.mine_quorum_2_nodes() self.check_mempool_result(tx=asset_unlock_tx, result_expected={'allowed': True, 'fees': {'base': Decimal(str(tiny_amount / COIN))}}) self.log.info("Generate one more quorum after which signing quorum is gone but Asset Unlock tx is still valid") assert quorumHash_str in node_wallet.quorum('list')['llmq_test_platform'] - self.mine_quorum_2_nodes(llmq_type_name="llmq_test_platform", llmq_type=106) + self.mine_quorum_2_nodes() assert quorumHash_str not in node_wallet.quorum('list')['llmq_test_platform'] if asset_unlock_tx_payload.requestedHeight + HEIGHT_DIFF_EXPIRING > node_wallet.getblockcount(): @@ -686,7 +681,7 @@ def test_withdrawal_fork(self, node_wallet, node, pubkey): index += 1 self.log.info("Generate one more quorum after which signing quorum becomes too old") - self.mine_quorum_2_nodes(llmq_type_name="llmq_test_platform", llmq_type=106) + self.mine_quorum_2_nodes() self.check_mempool_result(tx=asset_unlock_tx, result_expected={'allowed': False, 'reject-reason': 'bad-assetunlock-too-old-quorum'}) asset_unlock_tx = self.create_assetunlock(520, 2000 * COIN + 1, pubkey) diff --git a/test/functional/feature_dip3_v19.py b/test/functional/feature_dip3_v19.py index adeec5c818e84..cdf4cdef367cd 100755 --- a/test/functional/feature_dip3_v19.py +++ b/test/functional/feature_dip3_v19.py @@ -75,18 +75,8 @@ def run_test(self): self.log.info("pubkeyoperator should still be shown using legacy scheme") assert_equal(pubkeyoperator_list_before, pubkeyoperator_list_after) - self.move_to_next_cycle() - self.log.info("Cycle H height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+C height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+2C height:" + str(self.nodes[0].getblockcount())) - - self.mine_cycle_quorum(llmq_type_name='llmq_test_dip0024', llmq_type=103) - evo_info_0 = self.dynamically_add_masternode(evo=True, rnd=7) assert evo_info_0 is not None - self.generate(self.nodes[0], 8, sync_fun=lambda: self.sync_blocks()) self.log.info("Checking that protxs with duplicate EvoNodes fields are rejected") evo_info_1 = self.dynamically_add_masternode(evo=True, rnd=7, should_be_rejected=True) @@ -96,7 +86,6 @@ def run_test(self): assert evo_info_2 is None evo_info_3 = self.dynamically_add_masternode(evo=True, rnd=9) assert evo_info_3 is not None - self.generate(self.nodes[0], 8, sync_fun=lambda: self.sync_blocks()) self.dynamically_evo_update_service(evo_info_0, 9, should_be_rejected=True) revoke_protx = self.mninfo[-1].proTxHash @@ -123,12 +112,12 @@ def run_test(self): def test_revoke_protx(self, node_idx, revoke_protx, revoke_keyoperator): funds_address = self.nodes[0].getnewaddress() fund_txid = self.nodes[0].sendtoaddress(funds_address, 1) - self.wait_for_instantlock(fund_txid, self.nodes[0]) + self.bump_mocktime(10 * 60 + 1) # to make tx safe to include in block tip = self.generate(self.nodes[0], 1)[0] assert_equal(self.nodes[0].getrawtransaction(fund_txid, 1, tip)['confirmations'], 1) protx_result = self.nodes[0].protx('revoke', revoke_protx, revoke_keyoperator, 1, funds_address) - self.wait_for_instantlock(protx_result, self.nodes[0]) + self.bump_mocktime(10 * 60 + 1) # to make tx safe to include in block tip = self.generate(self.nodes[0], 1, sync_fun=self.no_op)[0] assert_equal(self.nodes[0].getrawtransaction(protx_result, 1, tip)['confirmations'], 1) # Revoking a MN results in disconnects. Wait for disconnects to actually happen diff --git a/test/functional/feature_dip4_coinbasemerkleroots.py b/test/functional/feature_dip4_coinbasemerkleroots.py index d8e05cb2e0b43..9f62c43f74f4b 100755 --- a/test/functional/feature_dip4_coinbasemerkleroots.py +++ b/test/functional/feature_dip4_coinbasemerkleroots.py @@ -252,17 +252,6 @@ def test_getmnlistdiff_base(self, baseBlockHash, blockHash): return d - def activate_dip8(self, slow_mode=False): - # NOTE: set slow_mode=True if you are activating dip8 after a huge reorg - # or nodes might fail to catch up otherwise due to a large - # (MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16 blocks) reorg error. - self.log.info("Wait for dip0008 activation") - while self.nodes[0].getblockcount() < DIP0008_HEIGHT: - self.bump_mocktime(10) - self.generate(self.nodes[0], 10, sync_fun=self.no_op) - if slow_mode: - self.sync_blocks() - self.sync_blocks() def test_dip8_quorum_merkle_root_activation(self, with_initial_quorum, slow_mode=False): if with_initial_quorum: @@ -278,7 +267,9 @@ def test_dip8_quorum_merkle_root_activation(self, with_initial_quorum, slow_mode cbtx = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), 2)["tx"][0] assert cbtx["cbTx"]["version"] == 1 - self.activate_dip8(slow_mode) + self.activate_by_name('dip0008', expected_activation_height=DIP0008_HEIGHT) + self.log.info("Mine one more block with new rules of dip0008") + self.generate(self.nodes[0], 1) # Assert that merkleRootQuorums is present and 0 (we have no quorums yet) cbtx = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), 2)["tx"][0] diff --git a/test/functional/feature_governance_cl.py b/test/functional/feature_governance_cl.py index 6b1dab92d0210..4c6d1a1ba975a 100755 --- a/test/functional/feature_governance_cl.py +++ b/test/functional/feature_governance_cl.py @@ -50,14 +50,7 @@ def run_test(self): self.wait_for_sporks_same() self.activate_v19(expected_activation_height=900) self.log.info("Activated v19 at height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+C height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+2C height:" + str(self.nodes[0].getblockcount())) - - self.mine_cycle_quorum(llmq_type_name='llmq_test_dip0024', llmq_type=103) + self.mine_cycle_quorum() self.sync_blocks() self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash()) diff --git a/test/functional/feature_llmq_chainlocks.py b/test/functional/feature_llmq_chainlocks.py index 599d350c7508b..585bc77b79921 100755 --- a/test/functional/feature_llmq_chainlocks.py +++ b/test/functional/feature_llmq_chainlocks.py @@ -49,13 +49,7 @@ def run_test(self): self.nodes[0].sporkupdate("SPORK_17_QUORUM_DKG_ENABLED", 0) self.wait_for_sporks_same() - self.move_to_next_cycle() - self.log.info("Cycle H height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+C height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+2C height:" + str(self.nodes[0].getblockcount())) - self.mine_cycle_quorum(llmq_type_name="llmq_test_dip0024", llmq_type=103) + self.mine_cycle_quorum() self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash()) self.log.info("Mine single block, ensure it includes latest chainlock") diff --git a/test/functional/feature_llmq_connections.py b/test/functional/feature_llmq_connections.py index 626bcf4d5b5d2..b46d756ba0e5d 100755 --- a/test/functional/feature_llmq_connections.py +++ b/test/functional/feature_llmq_connections.py @@ -76,13 +76,7 @@ def run_test(self): self.activate_v19(expected_activation_height=900) self.log.info("Activated v19 at height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+C height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+2C height:" + str(self.nodes[0].getblockcount())) - self.mine_cycle_quorum(llmq_type_name='llmq_test_dip0024', llmq_type=103) + self.mine_cycle_quorum() # Since we IS quorums are mined only using dip24 (rotation) we need to enable rotation, and continue tests on llmq_test_dip0024 for connections. @@ -93,7 +87,7 @@ def run_test(self): try: with mn.node.assert_debug_log(['removing masternodes quorum connections']): with mn.node.assert_debug_log(['keeping mn quorum connections']): - self.mine_cycle_quorum(llmq_type_name='llmq_test_dip0024', llmq_type=103) + self.mine_cycle_quorum(is_first=False) mn.node.mockscheduler(60) # we check for old connections via the scheduler every 60 seconds removed = True except: @@ -108,7 +102,7 @@ def run_test(self): if len(mn.node.quorum("memberof", mn.proTxHash)) > 0: try: with mn.node.assert_debug_log(['adding mn inter-quorum connections']): - self.mine_cycle_quorum(llmq_type_name='llmq_test_dip0024', llmq_type=103) + self.mine_cycle_quorum(is_first=False) added = True except: pass # it's ok to not add connections sometimes diff --git a/test/functional/feature_llmq_evo.py b/test/functional/feature_llmq_evo.py index bc2d63707afb7..613e0ea58eccc 100755 --- a/test/functional/feature_llmq_evo.py +++ b/test/functional/feature_llmq_evo.py @@ -76,15 +76,6 @@ def run_test(self): self.nodes[0].sporkupdate("SPORK_2_INSTANTSEND_ENABLED", 0) self.wait_for_sporks_same() - self.move_to_next_cycle() - self.log.info("Cycle H height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+C height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+2C height:" + str(self.nodes[0].getblockcount())) - - self.mine_cycle_quorum(llmq_type_name='llmq_test_dip0024', llmq_type=103) - evo_protxhash_list = list() for i in range(self.evo_count): evo_info = self.dynamically_add_masternode(evo=True) diff --git a/test/functional/feature_llmq_is_cl_conflicts.py b/test/functional/feature_llmq_is_cl_conflicts.py index 1f7c835d49cdf..b19572ef7771a 100755 --- a/test/functional/feature_llmq_is_cl_conflicts.py +++ b/test/functional/feature_llmq_is_cl_conflicts.py @@ -61,14 +61,7 @@ def run_test(self): self.activate_v19(expected_activation_height=900) self.log.info("Activated v19 at height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+C height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+2C height:" + str(self.nodes[0].getblockcount())) - - self.mine_cycle_quorum(llmq_type_name='llmq_test_dip0024', llmq_type=103) + self.mine_cycle_quorum() # mine single block, wait for chainlock self.generate(self.nodes[0], 1, sync_fun=self.no_op) diff --git a/test/functional/feature_llmq_is_retroactive.py b/test/functional/feature_llmq_is_retroactive.py index ea603e90dd437..94269ea423dd0 100755 --- a/test/functional/feature_llmq_is_retroactive.py +++ b/test/functional/feature_llmq_is_retroactive.py @@ -31,15 +31,7 @@ def run_test(self): self.wait_for_sporks_same() self.activate_v19(expected_activation_height=900) - self.log.info("Activated v19 at height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+C height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+2C height:" + str(self.nodes[0].getblockcount())) - - self.mine_cycle_quorum(llmq_type_name='llmq_test_dip0024', llmq_type=103) + self.mine_cycle_quorum() # Make sure that all nodes are chainlocked at the same height before starting actual tests self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash(), timeout=30) diff --git a/test/functional/feature_llmq_rotation.py b/test/functional/feature_llmq_rotation.py index 3d933eb8e4076..233f39d495c38 100755 --- a/test/functional/feature_llmq_rotation.py +++ b/test/functional/feature_llmq_rotation.py @@ -154,7 +154,7 @@ def run_test(self): self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash()) - (quorum_info_0_0, quorum_info_0_1) = self.mine_cycle_quorum(llmq_type_name=llmq_type_name, llmq_type=llmq_type) + (quorum_info_0_0, quorum_info_0_1) = self.mine_cycle_quorum(is_first=False) assert(self.test_quorum_listextended(quorum_info_0_0, llmq_type_name)) assert(self.test_quorum_listextended(quorum_info_0_1, llmq_type_name)) quorum_members_0_0 = extract_quorum_members(quorum_info_0_0) @@ -176,7 +176,7 @@ def run_test(self): self.log.info("Wait for chainlock") self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash()) - (quorum_info_1_0, quorum_info_1_1) = self.mine_cycle_quorum(llmq_type_name=llmq_type_name, llmq_type=llmq_type) + (quorum_info_1_0, quorum_info_1_1) = self.mine_cycle_quorum(is_first=False) assert(self.test_quorum_listextended(quorum_info_1_0, llmq_type_name)) assert(self.test_quorum_listextended(quorum_info_1_1, llmq_type_name)) quorum_members_1_0 = extract_quorum_members(quorum_info_1_0) @@ -210,7 +210,7 @@ def run_test(self): self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash()) self.log.info("Mine a quorum to invalidate") - (quorum_info_3_0, quorum_info_3_1) = self.mine_cycle_quorum(llmq_type_name=llmq_type_name, llmq_type=llmq_type) + (quorum_info_3_0, quorum_info_3_1) = self.mine_cycle_quorum(is_first=False) new_quorum_list = self.nodes[0].quorum("list", llmq_type) assert_equal(len(new_quorum_list[llmq_type_name]), len(quorum_list[llmq_type_name]) + 2) @@ -383,6 +383,20 @@ def test_quorum_listextended(self, quorum_info, llmq_type_name): return True return False + def move_to_next_cycle(self): + cycle_length = 24 + mninfos_online = self.mninfo.copy() + nodes = [self.nodes[0]] + [mn.node for mn in mninfos_online] + cur_block = self.nodes[0].getblockcount() + + # move forward to next DKG + skip_count = cycle_length - (cur_block % cycle_length) + if skip_count != 0: + self.bump_mocktime(1, nodes=nodes) + self.generate(self.nodes[0], skip_count, sync_fun=self.no_op) + self.sync_blocks(nodes) + self.log.info('Moved from block %d to %d' % (cur_block, self.nodes[0].getblockcount())) + if __name__ == '__main__': LLMQQuorumRotationTest().main() diff --git a/test/functional/feature_llmq_singlenode.py b/test/functional/feature_llmq_singlenode.py new file mode 100755 index 0000000000000..5a93b2622e5bc --- /dev/null +++ b/test/functional/feature_llmq_singlenode.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2024 The Dash Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +''' +feature_llmq_singlenode.py + +Checks creating LLMQs signle node quorum creation and signing +This functional test is similar to feature_llmq_signing.py but difference are big enough to make implementation common. + +''' + +from test_framework.test_framework import DashTestFramework +from test_framework.util import assert_raises_rpc_error, wait_until_helper + + +class LLMQSigningTest(DashTestFramework): + def set_test_params(self): + self.set_dash_test_params(1, 0, [["-llmqtestplatform=llmq_1_100"]] * 1, evo_count=2) + + + def mine_single_node_quorum(self): + node = self.nodes[0] + quorums = node.quorum('list')['llmq_1_100'] + while node.quorum('list')['llmq_1_100'] == quorums: + self.bump_mocktime(1) + self.generate(node, 2) + + def run_test(self): + + self.nodes[0].sporkupdate("SPORK_17_QUORUM_DKG_ENABLED", 0) + self.wait_for_sporks_same() + + self.activate_v19(expected_activation_height=900) + self.dynamically_add_masternode(evo=True) + self.dynamically_add_masternode(evo=True) + + self.connect_nodes(1, 2) + + # move forward to next DKG + skip_count = 24 - (self.nodes[0].getblockcount() % 24) + if skip_count != 0: + self.bump_mocktime(1) + self.generate(self.nodes[0], skip_count) + self.generate(self.nodes[0], 30) + + id = "0000000000000000000000000000000000000000000000000000000000000001" + msgHash = "0000000000000000000000000000000000000000000000000000000000000002" + msgHashConflict = "0000000000000000000000000000000000000000000000000000000000000003" + + def check_sigs(hasrecsigs, isconflicting1, isconflicting2): + has_sig = False + conflicting_1 = False + conflicting_2 = False + + for mn in self.mninfo: + if mn.node.quorum("hasrecsig", 111, id, msgHash): + has_sig = True + if mn.node.quorum("isconflicting", 111, id, msgHash): + conflicting_1 = True + if mn.node.quorum("isconflicting", 111, id, msgHashConflict): + conflicting_2 = True + if has_sig != hasrecsigs: + return False + if conflicting_1 != isconflicting1: + return False + if conflicting_2 != isconflicting2: + return False + + return True + + def wait_for_sigs(hasrecsigs, isconflicting1, isconflicting2, timeout): + self.wait_until(lambda: check_sigs(hasrecsigs, isconflicting1, isconflicting2), timeout = timeout) + + def assert_sigs_nochange(hasrecsigs, isconflicting1, isconflicting2, timeout): + assert not wait_until_helper(lambda: not check_sigs(hasrecsigs, isconflicting1, isconflicting2), timeout = timeout, do_assert = False) + + # Initial state + wait_for_sigs(False, False, False, 1) + + # Sign first share without any optional parameter, should not result in recovered sig + # Sign second share and test optional quorumHash parameter, should not result in recovered sig + # 1. Providing an invalid quorum hash should fail and cause no changes for sigs + assert not self.mninfo[1].node.quorum("sign", 111, id, msgHash, msgHash) + assert_sigs_nochange(False, False, False, 3) + # 2. Providing a valid quorum hash should succeed and cause no changes for sigss + quorumHash = self.mninfo[1].node.quorum("selectquorum", 111, id)["quorumHash"] + + self.mninfo[0].node.quorum("sign", 111, id, msgHash, quorumHash, False) + sign1 = self.mninfo[0].node.quorum("sign", 111, id, msgHash) + sign2 = self.mninfo[1].node.quorum("sign", 111, id, msgHash) + + wait_for_sigs(True, False, True, 15) + has0 = self.nodes[0].quorum("hasrecsig", 111, id, msgHash) + has1 = self.nodes[1].quorum("hasrecsig", 111, id, msgHash) + has2 = self.nodes[2].quorum("hasrecsig", 111, id, msgHash) + assert (has0 or has1 or has2) + + # Test `quorum verify` rpc + node = self.mninfo[0].node + recsig = node.quorum("getrecsig", 111, id, msgHash) + # Find quorum automatically + height = node.getblockcount() + height_bad = node.getblockheader(recsig["quorumHash"])["height"] + hash_bad = node.getblockhash(0) + assert node.quorum("verify", 111, id, msgHash, recsig["sig"]) + assert node.quorum("verify", 111, id, msgHash, recsig["sig"], "", height) + assert not node.quorum("verify", 111, id, msgHashConflict, recsig["sig"]) + assert not node.quorum("verify", 111, id, msgHash, recsig["sig"], "", height_bad) + # Use specific quorum + assert node.quorum("verify", 111, id, msgHash, recsig["sig"], recsig["quorumHash"]) + assert not node.quorum("verify", 111, id, msgHashConflict, recsig["sig"], recsig["quorumHash"]) + assert_raises_rpc_error(-8, "quorum not found", node.quorum, "verify", 111, id, msgHash, recsig["sig"], hash_bad) + + # Mine one more quorum, so that we have 2 active ones, nothing should change + self.mine_single_node_quorum() + assert_sigs_nochange(True, False, True, 3) + + recsig_time = self.mocktime + + # Mine 2 more quorums, so that the one used for the the recovered sig should become inactive, nothing should change + self.mine_single_node_quorum() + self.mine_single_node_quorum() + assert_sigs_nochange(True, False, True, 3) + + # fast forward until 0.5 days before cleanup is expected, recovered sig should still be valid + self.bump_mocktime(recsig_time + int(60 * 60 * 24 * 6.5) - self.mocktime, update_schedulers=False) + # Cleanup starts every 5 seconds + wait_for_sigs(True, False, True, 15) + # fast forward 1 day, recovered sig should not be valid anymore + self.bump_mocktime(int(60 * 60 * 24 * 1), update_schedulers=False) + # Cleanup starts every 5 seconds + wait_for_sigs(False, False, False, 15) + + +if __name__ == '__main__': + LLMQSigningTest().main() diff --git a/test/functional/feature_new_quorum_type_activation.py b/test/functional/feature_new_quorum_type_activation.py index 678d3bab66aba..a552d4c404bbd 100755 --- a/test/functional/feature_new_quorum_type_activation.py +++ b/test/functional/feature_new_quorum_type_activation.py @@ -25,17 +25,17 @@ def run_test(self): self.generate(self.nodes[0], 9, sync_fun=self.no_op) assert_equal(get_bip9_details(self.nodes[0], 'testdummy')['status'], 'started') ql = self.nodes[0].quorum("list") - assert_equal(len(ql), 3) + assert_equal(len(ql), 4) assert "llmq_test_v17" not in ql self.generate(self.nodes[0], 10, sync_fun=self.no_op) assert_equal(get_bip9_details(self.nodes[0], 'testdummy')['status'], 'locked_in') ql = self.nodes[0].quorum("list") - assert_equal(len(ql), 3) + assert_equal(len(ql), 4) assert "llmq_test_v17" not in ql self.generate(self.nodes[0], 10, sync_fun=self.no_op) assert_equal(get_bip9_details(self.nodes[0], 'testdummy')['status'], 'active') ql = self.nodes[0].quorum("list") - assert_equal(len(ql), 4) + assert_equal(len(ql), 5) assert "llmq_test_v17" in ql diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py index 32a983df65fef..c21cee9311658 100755 --- a/test/functional/feature_notifications.py +++ b/test/functional/feature_notifications.py @@ -106,14 +106,7 @@ def run_test(self): self.nodes[0].sporkupdate("SPORK_17_QUORUM_DKG_ENABLED", 0) self.nodes[0].sporkupdate("SPORK_19_CHAINLOCKS_ENABLED", 4070908800) self.wait_for_sporks_same() - self.move_to_next_cycle() - self.log.info("Cycle H height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+C height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+2C height:" + str(self.nodes[0].getblockcount())) - - (quorum_info_i_0, quorum_info_i_1) = self.mine_cycle_quorum(llmq_type_name='llmq_test_dip0024', llmq_type=103) + (quorum_info_i_0, quorum_info_i_1) = self.mine_cycle_quorum() self.nodes[0].sporkupdate("SPORK_19_CHAINLOCKS_ENABLED", 0) self.wait_for_sporks_same() diff --git a/test/functional/interface_zmq_dash.py b/test/functional/interface_zmq_dash.py index 86b7f8ef7d331..b1f64e445da49 100755 --- a/test/functional/interface_zmq_dash.py +++ b/test/functional/interface_zmq_dash.py @@ -139,14 +139,8 @@ def run_test(self): self.wait_for_sporks_same() self.activate_v19(expected_activation_height=900) self.log.info("Activated v19 at height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+C height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+2C height:" + str(self.nodes[0].getblockcount())) - self.mine_cycle_quorum(llmq_type_name='llmq_test_dip0024', llmq_type=103) + self.mine_cycle_quorum() self.sync_blocks() self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash()) @@ -162,12 +156,7 @@ def run_test(self): self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash()) self.test_instantsend_publishers() # At this point, we need to move forward 3 cycles (3 x 24 blocks) so the first 3 quarters can be created (without DKG sessions) - self.move_to_next_cycle() - self.test_instantsend_publishers() - self.move_to_next_cycle() - self.test_instantsend_publishers() - self.move_to_next_cycle() - self.test_instantsend_publishers() + self.generate(self.nodes[0], 24) self.mine_cycle_quorum() self.test_instantsend_publishers() finally: diff --git a/test/functional/p2p_instantsend.py b/test/functional/p2p_instantsend.py index 8fe878ebd5428..ee1bc0cd4899b 100755 --- a/test/functional/p2p_instantsend.py +++ b/test/functional/p2p_instantsend.py @@ -25,13 +25,7 @@ def run_test(self): self.wait_for_sporks_same() self.activate_v19(expected_activation_height=900) self.log.info("Activated v19 at height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+C height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+2C height:" + str(self.nodes[0].getblockcount())) - (quorum_info_i_0, quorum_info_i_1) = self.mine_cycle_quorum(llmq_type_name='llmq_test_dip0024', llmq_type=103) + (quorum_info_i_0, quorum_info_i_1) = self.mine_cycle_quorum() self.test_mempool_doublespend() self.test_block_doublespend() diff --git a/test/functional/rpc_verifyislock.py b/test/functional/rpc_verifyislock.py index 93e9228dcfa35..94fe291e76e25 100755 --- a/test/functional/rpc_verifyislock.py +++ b/test/functional/rpc_verifyislock.py @@ -35,14 +35,7 @@ def run_test(self): self.activate_v19(expected_activation_height=900) self.log.info("Activated v19 at height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+C height:" + str(self.nodes[0].getblockcount())) - self.move_to_next_cycle() - self.log.info("Cycle H+2C height:" + str(self.nodes[0].getblockcount())) - - self.mine_cycle_quorum(llmq_type_name='llmq_test_dip0024', llmq_type=103) + self.mine_cycle_quorum() self.bump_mocktime(1) self.generate(self.nodes[0], 8, sync_fun=self.sync_blocks()) @@ -64,7 +57,7 @@ def run_test(self): assert node.verifyislock(request_id, txid, rec_sig, node.getblockcount() + 100) # Mine one more cycle of rotated quorums - self.mine_cycle_quorum(llmq_type_name='llmq_test_dip0024', llmq_type=103) + self.mine_cycle_quorum(is_first=False) # Create an ISLOCK using an active quorum which will be replaced when a new cycle happens request_id = None utxos = node.listunspent() @@ -87,7 +80,7 @@ def run_test(self): # Create the ISDLOCK, then mine a cycle quorum to move renew active set isdlock = self.create_isdlock(rawtx) # Mine one block to trigger the "signHeight + dkgInterval" verification for the ISDLOCK - self.mine_cycle_quorum(llmq_type_name='llmq_test_dip0024', llmq_type=103) + self.mine_cycle_quorum(is_first=False) # Verify the ISLOCK for a transaction that is not yet known by the node rawtx_txid = node.decoderawtransaction(rawtx)["txid"] assert_raises_rpc_error(-5, "No such mempool or blockchain transaction", node.getrawtransaction, rawtx_txid) diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index ae2c405e6ed75..2d8bf0118095b 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -640,6 +640,8 @@ def serialize(self): # Calculate the merkle root given a vector of transaction hashes @staticmethod def get_merkle_root(hashes): + if len(hashes) == 0: + return 0 while len(hashes) > 1: newhashes = [] for i in range(0, len(hashes), 2): diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index eb077611c60fe..1421c12a7b38d 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -1194,7 +1194,7 @@ def set_dash_test_params(self, num_nodes, masterodes_count, extra_args=None, evo self.quorum_data_request_expiration_timeout = 360 - def activate_by_name(self, name, expected_activation_height=None): + def activate_by_name(self, name, expected_activation_height=None, slow_mode=True): assert not softfork_active(self.nodes[0], name) self.log.info("Wait for " + name + " activation") @@ -1205,7 +1205,7 @@ def activate_by_name(self, name, expected_activation_height=None): self.wait_for_sporks_same() # mine blocks in batches - batch_size = 50 + batch_size = 50 if not slow_mode else 10 if expected_activation_height is not None: height = self.nodes[0].getblockcount() assert height < expected_activation_height @@ -1258,7 +1258,6 @@ def create_simple_node(self, extra_args=None): for i in range(0, idx): self.connect_nodes(i, idx) - # TODO: to let creating Evo Nodes without instant-send available def dynamically_add_masternode(self, evo=False, rnd=None, should_be_rejected=False): mn_idx = len(self.nodes) @@ -1279,7 +1278,7 @@ def dynamically_add_masternode(self, evo=False, rnd=None, should_be_rejected=Fal return self.dynamically_initialize_datadir(node_p2p_port, node_rpc_port) - node_info = self.add_dynamically_node(self.extra_args[1]) + node_info = self.add_dynamically_node(self.extra_args[1] if len(self.extra_args) > 1 else self.extra_args[0]) args = ['-masternodeblsprivkey=%s' % created_mn_info.keyOperator] + node_info.extra_args self.start_node(mn_idx, args) @@ -1313,7 +1312,7 @@ def dynamically_prepare_masternode(self, idx, node_p2p_port, evo=False, rnd=None collateral_amount = EVONODE_COLLATERAL if evo else MASTERNODE_COLLATERAL outputs = {collateral_address: collateral_amount, funds_address: 1} collateral_txid = self.nodes[0].sendmany("", outputs) - self.wait_for_instantlock(collateral_txid, self.nodes[0]) + self.bump_mocktime(10 * 60 + 1) # to make tx safe to include in block tip = self.generate(self.nodes[0], 1)[0] rawtx = self.nodes[0].getrawtransaction(collateral_txid, 1, tip) @@ -1334,7 +1333,7 @@ def dynamically_prepare_masternode(self, idx, node_p2p_port, evo=False, rnd=None else: protx_result = self.nodes[0].protx("register", collateral_txid, collateral_vout, ipAndPort, owner_address, bls['public'], voting_address, operatorReward, reward_address, funds_address, True) - self.wait_for_instantlock(protx_result, self.nodes[0]) + self.bump_mocktime(10 * 60 + 1) # to make tx safe to include in block tip = self.generate(self.nodes[0], 1)[0] assert_equal(self.nodes[0].getrawtransaction(protx_result, 1, tip)['confirmations'], 1) @@ -1356,14 +1355,14 @@ def dynamically_evo_update_service(self, evo_info, rnd=None, should_be_rejected= platform_http_port = '%d' % (r + 2) fund_txid = self.nodes[0].sendtoaddress(funds_address, 1) - self.wait_for_instantlock(fund_txid, self.nodes[0]) + self.bump_mocktime(10 * 60 + 1) # to make tx safe to include in block tip = self.generate(self.nodes[0], 1)[0] assert_equal(self.nodes[0].getrawtransaction(fund_txid, 1, tip)['confirmations'], 1) protx_success = False try: protx_result = self.nodes[0].protx('update_service_evo', evo_info.proTxHash, evo_info.addr, evo_info.keyOperator, platform_node_id, platform_p2p_port, platform_http_port, operator_reward_address, funds_address) - self.wait_for_instantlock(protx_result, self.nodes[0]) + self.bump_mocktime(10 * 60 + 1) # to make tx safe to include in block tip = self.generate(self.nodes[0], 1)[0] assert_equal(self.nodes[0].getrawtransaction(protx_result, 1, tip)['confirmations'], 1) self.log.info("Updated EvoNode %s: platformNodeID=%s, platformP2PPort=%s, platformHTTPPort=%s" % (evo_info.proTxHash, platform_node_id, platform_p2p_port, platform_http_port)) @@ -1895,31 +1894,32 @@ def mine_quorum(self, llmq_type_name="llmq_test", llmq_type=100, expected_connec return new_quorum - def mine_cycle_quorum(self, llmq_type_name="llmq_test_dip0024", llmq_type=103, expected_connections=None, expected_members=None, expected_contributions=None, expected_complaints=0, expected_justifications=0, expected_commitments=None, mninfos_online=None): + def mine_cycle_quorum(self, is_first=True): spork21_active = self.nodes[0].spork('show')['SPORK_21_QUORUM_ALL_CONNECTED'] <= 1 spork23_active = self.nodes[0].spork('show')['SPORK_23_QUORUM_POSE'] <= 1 - if expected_connections is None: - expected_connections = (self.llmq_size_dip0024 - 1) if spork21_active else 2 - if expected_members is None: - expected_members = self.llmq_size_dip0024 - if expected_contributions is None: - expected_contributions = self.llmq_size_dip0024 - if expected_commitments is None: - expected_commitments = self.llmq_size_dip0024 - if mninfos_online is None: - mninfos_online = self.mninfo.copy() + llmq_type_name="llmq_test_dip0024" + llmq_type=103 + expected_connections = (self.llmq_size_dip0024 - 1) if spork21_active else 2 + expected_members = self.llmq_size_dip0024 + expected_contributions = self.llmq_size_dip0024 + expected_commitments = self.llmq_size_dip0024 + mninfos_online = self.mninfo.copy() + expected_complaints=0 + expected_justifications=0 - self.log.info("Mining quorum: expected_members=%d, expected_connections=%d, expected_contributions=%d, expected_complaints=%d, expected_justifications=%d, " - "expected_commitments=%d" % (expected_members, expected_connections, expected_contributions, expected_complaints, - expected_justifications, expected_commitments)) + self.log.info(f"Mining quorum: expected_members={expected_members}, expected_connections={expected_connections}, expected_contributions={expected_contributions}, expected_commitments={expected_commitments}, no complains and justfications expected") nodes = [self.nodes[0]] + [mn.node for mn in mninfos_online] - # move forward to next DKG - skip_count = 24 - (self.nodes[0].getblockcount() % 24) + cycle_length = 24 + cur_block = self.nodes[0].getblockcount() - self.move_blocks(nodes, skip_count) + skip_count = cycle_length - (cur_block % cycle_length) + # move forward to next 3 DKG rounds for the first quorum + extra_blocks = 24 * 3 if is_first else 0 + self.move_blocks(nodes, extra_blocks + skip_count) + self.log.info('Moved from block %d to %d' % (cur_block, self.nodes[0].getblockcount())) q_0 = self.nodes[0].getbestblockhash() self.log.info("Expected quorum_0 at:" + str(self.nodes[0].getblockcount())) @@ -2018,20 +2018,6 @@ def mine_cycle_quorum(self, llmq_type_name="llmq_test_dip0024", llmq_type=103, return (quorum_info_0, quorum_info_1) - def move_to_next_cycle(self): - cycle_length = 24 - mninfos_online = self.mninfo.copy() - nodes = [self.nodes[0]] + [mn.node for mn in mninfos_online] - cur_block = self.nodes[0].getblockcount() - - # move forward to next DKG - skip_count = cycle_length - (cur_block % cycle_length) - if skip_count != 0: - self.bump_mocktime(1, nodes=nodes) - self.generate(self.nodes[0], skip_count, sync_fun=self.no_op) - self.sync_blocks(nodes) - self.log.info('Moved from block %d to %d' % (cur_block, self.nodes[0].getblockcount())) - def wait_for_recovered_sig(self, rec_sig_id, rec_sig_msg_hash, llmq_type=100, timeout=10): def check_recovered_sig(): self.bump_mocktime(1) diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 8da7df9f1e57c..cb6a8f17146e9 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -133,6 +133,7 @@ 'feature_llmq_evo.py', # NOTE: needs dash_hash to pass 'feature_llmq_is_cl_conflicts.py', # NOTE: needs dash_hash to pass 'feature_llmq_dkgerrors.py', # NOTE: needs dash_hash to pass + 'feature_llmq_singlenode.py', # NOTE: needs dash_hash to pass 'feature_dip4_coinbasemerkleroots.py', # NOTE: needs dash_hash to pass 'feature_mnehf.py', # NOTE: needs dash_hash to pass 'feature_governance.py --legacy-wallet',