Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

backport: Merge bitcoin#22530,(partial)21562,22322,22517 #5677

Merged
merged 4 commits into from
Dec 4, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 17 additions & 12 deletions src/logging.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@
#include <util/threadnames.h>
#include <util/time.h>

#include <algorithm>
#include <array>

const char * const DEFAULT_DEBUGLOGFILE = "debug.log";

BCLog::Logger& LogInstance()
Expand Down Expand Up @@ -130,8 +133,7 @@ bool BCLog::Logger::DefaultShrinkDebugFile() const
return m_categories == BCLog::NONE;
}

struct CLogCategoryDesc
{
struct CLogCategoryDesc {
BCLog::LogFlags flag;
std::string category;
};
Expand Down Expand Up @@ -201,16 +203,19 @@ bool GetLogCategory(BCLog::LogFlags& flag, const std::string& str)

std::vector<LogCategory> BCLog::Logger::LogCategoriesList(bool enabled_only) const
{
// Sort log categories by alphabetical order.
std::array<CLogCategoryDesc, std::size(LogCategories)> categories;
std::copy(std::begin(LogCategories), std::end(LogCategories), categories.begin());
std::sort(categories.begin(), categories.end(), [](auto a, auto b) { return a.category < b.category; });

std::vector<LogCategory> ret;
for (const CLogCategoryDesc& category_desc : LogCategories) {
// Omit the special cases.
if (category_desc.flag != BCLog::NONE && category_desc.flag != BCLog::ALL && category_desc.flag != BCLog::DASH) {
LogCategory catActive;
catActive.category = category_desc.category;
catActive.active = WillLogCategory(category_desc.flag);
if (!enabled_only || catActive.active) {
ret.push_back(catActive);
}
for (const CLogCategoryDesc& category_desc : categories) {
if (category_desc.flag == BCLog::NONE || category_desc.flag == BCLog::ALL || category_desc.flag == BCLog::DASH) continue;
LogCategory catActive;
catActive.category = category_desc.category;
catActive.active = WillLogCategory(category_desc.flag);
if (!enabled_only || catActive.active) {
ret.push_back(catActive);
}
}
return ret;
Expand Down Expand Up @@ -261,7 +266,7 @@ namespace BCLog {
}
return ret;
}
}
} // namespace BCLog

void BCLog::Logger::LogPrintStr(const std::string& str)
{
Expand Down
4 changes: 2 additions & 2 deletions src/logging.h
Original file line number Diff line number Diff line change
Expand Up @@ -160,9 +160,9 @@ namespace BCLog {
bool DisableCategory(const std::string& str);

bool WillLogCategory(LogFlags category) const;
/** Returns a vector of the log categories */
/** Returns a vector of the log categories in alphabetical order. */
std::vector<LogCategory> LogCategoriesList(bool enabled_only = false) const;
/** Returns a string with the log categories */
/** Returns a string with the log categories in alphabetical order. */
std::string LogCategoriesString(bool enabled_only = false) const
{
return Join(LogCategoriesList(enabled_only), ", ", [&](const LogCategory& i) { return i.category; });
Expand Down
64 changes: 29 additions & 35 deletions src/net_processing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,8 @@ class PeerManagerImpl final : public PeerManager
/** The height of the best chain */
std::atomic<int> m_best_height{-1};

int64_t m_stale_tip_check_time; //!< Next time to check for stale tip
/** Next time to check for stale tip */
int64_t m_stale_tip_check_time{0};

/** Whether this node is running in blocks only mode */
const bool m_ignore_incoming_txs;
Expand Down Expand Up @@ -443,16 +444,26 @@ class PeerManagerImpl final : public PeerManager
*
* Memory used: 1.3MB
*/
std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000, 0.000'001};
uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);

/*
* Filter for transactions that have been recently confirmed.
* We use this to avoid requesting transactions that have already been
* confirnmed.
*
* Blocks don't typically have more than 4000 transactions, so this should
* be at least six blocks (~1 hr) worth of transactions that we can store,
* inserting both a txid and wtxid for every observed transaction.
* If the number of transactions appearing in a block goes up, or if we are
* seeing getdata requests more than an hour after initial announcement, we
* can increase this number.
* The false positive rate of 1/1M should come out to less than 1
* transaction per day that would be inadvertently ignored (which is the
* same probability that we have in the reject filter).
*/
Mutex m_recent_confirmed_transactions_mutex;
std::unique_ptr<CRollingBloomFilter> m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex);
CRollingBloomFilter m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex){48'000, 0.000'001};

/* Returns a bool indicating whether we requested this block.
* Also used if a block was /not/ received and timed out or started with another peer
Expand Down Expand Up @@ -1567,23 +1578,9 @@ PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& conn
m_cj_ctx(cj_ctx),
m_llmq_ctx(llmq_ctx),
m_govman(govman),
m_stale_tip_check_time(0),
m_ignore_incoming_txs(ignore_incoming_txs)
{
assert(std::addressof(g_chainman) == std::addressof(m_chainman));
// Initialize global variables that cannot be constructed at startup.
recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));

// Blocks don't typically have more than 4000 transactions, so this should
// be at least six blocks (~1 hr) worth of transactions that we can store.
// If the number of transactions appearing in a block goes up, or if we are
// seeing getdata requests more than an hour after initial announcement, we
// can increase this number.
// The false positive rate of 1/1M should come out to less than 1
// transaction per day that would be inadvertently ignored (which is the
// same probability that we have in the reject filter).
m_recent_confirmed_transactions.reset(new CRollingBloomFilter(24000, 0.000001));

const Consensus::Params& consensusParams = Params().GetConsensus();
// Stale tip checking and peer eviction are on two different timers, but we
// don't want them to get out of sync due to drift in the scheduler, so we
Expand Down Expand Up @@ -1652,7 +1649,7 @@ void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock
{
LOCK(m_recent_confirmed_transactions_mutex);
for (const auto& ptx : pblock->vtx) {
m_recent_confirmed_transactions->insert(ptx->GetHash());
m_recent_confirmed_transactions.insert(ptx->GetHash());
}
}
}
Expand All @@ -1668,7 +1665,7 @@ void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &blo
// presumably the most common case of relaying a confirmed transaction
// should be just after a new block containing it is found.
LOCK(m_recent_confirmed_transactions_mutex);
m_recent_confirmed_transactions->reset();
m_recent_confirmed_transactions.reset();
}

// All of the following cache a recent block, and are protected by cs_most_recent_block
Expand Down Expand Up @@ -1805,15 +1802,14 @@ bool PeerManagerImpl::AlreadyHave(const CInv& inv)
case MSG_TX:
case MSG_DSTX:
{
assert(recentRejects);
if (m_chainman.ActiveChain().Tip()->GetBlockHash() != hashRecentRejectsChainTip)
{
// If the chain tip has changed previously rejected transactions
// might be now valid, e.g. due to a nLockTime'd tx becoming valid,
// or a double-spend. Reset the rejects filter and give those
// txs a second chance.
hashRecentRejectsChainTip = m_chainman.ActiveChain().Tip()->GetBlockHash();
recentRejects->reset();
m_recent_rejects.reset();
}

{
Expand All @@ -1823,15 +1819,15 @@ bool PeerManagerImpl::AlreadyHave(const CInv& inv)

{
LOCK(m_recent_confirmed_transactions_mutex);
if (m_recent_confirmed_transactions->contains(inv.hash)) return true;
if (m_recent_confirmed_transactions.contains(inv.hash)) return true;
}

// When we receive an islock for a previously rejected transaction, we have to
// drop the first-seen tx (which such a locked transaction was conflicting with)
// and re-request the locked transaction (which did not make it into the mempool
// previously due to txn-mempool-conflict rule). This means that we must ignore
// recentRejects filter for such locked txes here.
// We also ignore recentRejects filter for DSTX-es because a malicious peer might
// m_recent_rejects filter for such locked txes here.
// We also ignore m_recent_rejects filter for DSTX-es because a malicious peer might
// relay a valid DSTX as a regular TX first which would skip all the specific checks
// but would cause such tx to be rejected by ATMP due to 0 fee. Ignoring it here
// should let DSTX to be propagated by honest peer later. Note, that a malicious
Expand All @@ -1842,7 +1838,7 @@ bool PeerManagerImpl::AlreadyHave(const CInv& inv)
m_llmq_ctx->isman->IsWaitingForTx(inv.hash) ||
m_llmq_ctx->isman->IsLocked(inv.hash);

return (!fIgnoreRecentRejects && recentRejects->contains(inv.hash)) ||
return (!fIgnoreRecentRejects && m_recent_rejects.contains(inv.hash)) ||
(inv.type == MSG_DSTX && static_cast<bool>(CCoinJoin::GetDSTX(inv.hash))) ||
m_mempool.exists(inv.hash) ||
(g_txindex != nullptr && g_txindex->HasTx(inv.hash));
Expand Down Expand Up @@ -2553,8 +2549,7 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set)
// Has inputs but not accepted to mempool
// Probably non-standard or insufficient fee
LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString());
assert(recentRejects);
recentRejects->insert(orphanHash);
m_recent_rejects.insert(orphanHash);
EraseOrphanTx(orphanHash);
done = true;
}
Expand Down Expand Up @@ -3608,7 +3603,7 @@ void PeerManagerImpl::ProcessMessage(
{
bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
for (const CTxIn& txin : tx.vin) {
if (recentRejects->contains(txin.prevout.hash)) {
if (m_recent_rejects.contains(txin.prevout.hash)) {
fRejectedParents = true;
break;
}
Expand Down Expand Up @@ -3637,12 +3632,11 @@ void PeerManagerImpl::ProcessMessage(
LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
// We will continue to reject this tx since it has rejected
// parents so avoid re-requesting it from other peers.
recentRejects->insert(tx.GetHash());
m_recent_rejects.insert(tx.GetHash());
m_llmq_ctx->isman->TransactionRemovedFromMempool(ptx);
}
} else {
assert(recentRejects);
recentRejects->insert(tx.GetHash());
m_recent_rejects.insert(tx.GetHash());
if (RecursiveDynamicUsage(*ptx) < 100000) {
AddToCompactExtraTransactions(ptx);
}
Expand All @@ -3661,21 +3655,21 @@ void PeerManagerImpl::ProcessMessage(
}
}

// If a tx has been detected by recentRejects, we will have reached
// If a tx has been detected by m_recent_rejects, we will have reached
// this point and the tx will have been ignored. Because we haven't run
// the tx through AcceptToMemoryPool, we won't have computed a DoS
// score for it or determined exactly why we consider it invalid.
//
// This means we won't penalize any peer subsequently relaying a DoSy
// tx (even if we penalized the first peer who gave it to us) because
// we have to account for recentRejects showing false positives. In
// we have to account for m_recent_rejects showing false positives. In
// other words, we shouldn't penalize a peer if we aren't *sure* they
// submitted a DoSy tx.
//
// Note that recentRejects doesn't just record DoSy or invalid
// Note that m_recent_rejects doesn't just record DoSy or invalid
// transactions, but any tx not accepted by the m_mempool, which may be
// due to node policy (vs. consensus). So we can't blanket penalize a
// peer simply for relaying a tx that our recentRejects has caught,
// peer simply for relaying a tx that our m_recent_rejects has caught,
// regardless of false positives.

if (state.IsInvalid())
Expand Down
23 changes: 21 additions & 2 deletions src/test/fuzz/banman.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,13 @@ void initialize_banman()
static const auto testing_setup = MakeNoLogFileContext<>();
}

static bool operator==(const CBanEntry& lhs, const CBanEntry& rhs)
{
return lhs.nVersion == rhs.nVersion &&
lhs.nCreateTime == rhs.nCreateTime &&
lhs.nBanUntil == rhs.nBanUntil;
}

FUZZ_TARGET_INIT(banman, initialize_banman)
{
// The complexity is O(N^2), where N is the input size, because each call
Expand All @@ -42,18 +49,19 @@ FUZZ_TARGET_INIT(banman, initialize_banman)
fs::path banlist_file = GetDataDir() / "fuzzed_banlist";

const bool start_with_corrupted_banlist{fuzzed_data_provider.ConsumeBool()};
bool force_read_and_write_to_err{false};
if (start_with_corrupted_banlist) {
assert(WriteBinaryFile(banlist_file.string() + ".json",
fuzzed_data_provider.ConsumeRandomLengthString()));
} else {
const bool force_read_and_write_to_err{fuzzed_data_provider.ConsumeBool()};
force_read_and_write_to_err = fuzzed_data_provider.ConsumeBool();
if (force_read_and_write_to_err) {
banlist_file = fs::path{"path"} / "to" / "inaccessible" / "fuzzed_banlist";
}
}

{
BanMan ban_man{banlist_file, nullptr, ConsumeBanTimeOffset(fuzzed_data_provider)};
BanMan ban_man{banlist_file, /* client_interface */ nullptr, /* default_ban_time */ ConsumeBanTimeOffset(fuzzed_data_provider)};
while (--limit_max_ops >= 0 && fuzzed_data_provider.ConsumeBool()) {
CallOneOf(
fuzzed_data_provider,
Expand Down Expand Up @@ -91,6 +99,17 @@ FUZZ_TARGET_INIT(banman, initialize_banman)
ban_man.Discourage(ConsumeNetAddr(fuzzed_data_provider));
});
}
if (!force_read_and_write_to_err) {
ban_man.DumpBanlist();
SetMockTime(ConsumeTime(fuzzed_data_provider));
banmap_t banmap;
ban_man.GetBanned(banmap);
BanMan ban_man_read{banlist_file, /* client_interface */ nullptr, /* default_ban_time */ 0};
banmap_t banmap_read;
ban_man_read.GetBanned(banmap_read);
// Temporarily disabled to allow the remainder of the fuzz test to run while a fix is being worked on:
// assert(banmap == banmap_read);
}
}
fs::remove(banlist_file.string() + ".json");
}
2 changes: 1 addition & 1 deletion test/functional/mempool_reorg.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def run_test(self):
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
# Sync blocks, so that peer 1 gets the block before timelock_tx
# Otherwise, peer 1 would put the timelock_tx in recentRejects
# Otherwise, peer 1 would put the timelock_tx in m_recent_rejects
self.sync_all()

# Time-locked transaction can now be spent
Expand Down
16 changes: 15 additions & 1 deletion test/functional/rpc_misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,13 +55,27 @@ def run_test(self):

assert_raises_rpc_error(-8, "unknown mode foobar", node.getmemoryinfo, mode="foobar")

self.log.info("test logging")
self.log.info("test logging rpc and help")

# Test logging RPC returns the expected number of logging categories.
assert_equal(len(node.logging()), 36)

# Test toggling a logging category on/off/on with the logging RPC.
UdjinM6 marked this conversation as resolved.
Show resolved Hide resolved
assert_equal(node.logging()['qt'], True)
node.logging(exclude=['qt'])
assert_equal(node.logging()['qt'], False)
node.logging(include=['qt'])
assert_equal(node.logging()['qt'], True)

# Test logging RPC returns the logging categories in alphabetical order.
sorted_logging_categories = sorted(node.logging())
assert_equal(list(node.logging()), sorted_logging_categories)

# Test logging help returns the logging categories string in alphabetical order.
categories = ', '.join(sorted_logging_categories)
logging_help = self.nodes[0].help('logging')
assert f"valid logging categories are: {categories}" in logging_help

self.log.info("test getindexinfo")
self.restart_node(0, ["-txindex=0"])
# Without any indices running the RPC returns an empty object
Expand Down
Loading