From 31835f2a863316367cbbd7a6525db5974cf7ae4b Mon Sep 17 00:00:00 2001 From: Pablo Greco Date: Thu, 29 Jun 2023 12:10:09 -0700 Subject: [PATCH 01/19] Prepare access to untrim headers --- src/chain.cpp | 5 +++++ src/chain.h | 6 ++++++ src/init.cpp | 1 + src/node/blockstorage.h | 1 + 4 files changed, 13 insertions(+) diff --git a/src/chain.cpp b/src/chain.cpp index dd5510bfec6..6c37bda0bb6 100644 --- a/src/chain.cpp +++ b/src/chain.cpp @@ -5,6 +5,11 @@ #include #include +#include +#include + + +node::NodeContext *CBlockIndex::m_pcontext; std::string CBlockFileInfo::ToString() const { diff --git a/src/chain.h b/src/chain.h index 76ba3b18802..0daa00edb79 100644 --- a/src/chain.h +++ b/src/chain.h @@ -16,6 +16,9 @@ #include +namespace node { +struct NodeContext; +} /** * Maximum amount of time that a block timestamp is allowed to exceed the * current network-adjusted time before the block will be accepted. @@ -218,7 +221,10 @@ class CBlockIndex friend class CBlockTreeDB; + static node::NodeContext *m_pcontext; + public: + static void SetNodeContext(node::NodeContext *context) {m_pcontext = context;}; // Irrevocably remove blocksigning and dynafed-related stuff from this // in-memory copy of the block header. diff --git a/src/init.cpp b/src/init.cpp index 2459964d951..ee19aed4f1b 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1243,6 +1243,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) const ArgsManager& args = *Assert(node.args); const CChainParams& chainparams = Params(); + CBlockIndex::SetNodeContext(&node); auto opt_max_upload = ParseByteUnits(args.GetArg("-maxuploadtarget", DEFAULT_MAX_UPLOAD_TARGET), ByteUnit::M); if (!opt_max_upload) { return InitError(strprintf(_("Unable to parse -maxuploadtarget: '%s'"), args.GetArg("-maxuploadtarget", ""))); diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index c6621cde785..1276a3f0034 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -76,6 +76,7 @@ class BlockManager { friend CChainState; friend ChainstateManager; + friend CBlockIndex; private: void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false); From 283f47b38d725d0e9bb0de3b38d10a9312ece40f Mon Sep 17 00:00:00 2001 From: Pablo Greco Date: Thu, 29 Jun 2023 10:43:49 -0700 Subject: [PATCH 02/19] Move initialization of setTrimmableBlockIndex --- src/validation.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/validation.cpp b/src/validation.cpp index 2a2873aa83b..0b18345215d 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2568,6 +2568,7 @@ bool CChainState::FlushStateToDisk( m_blockman.FlushBlockFile(); } + std::set setTrimmableBlockIndex(m_blockman.m_dirty_blockindex); // Then update all block file information (which may refer to block and undo files). { LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk", BCLog::BENCH); @@ -2577,7 +2578,6 @@ bool CChainState::FlushStateToDisk( } if (node::fTrimHeaders) { - std::set setTrimmableBlockIndex(m_blockman.m_dirty_blockindex); LogPrintf("Flushing block index, trimming headers, setTrimmableBlockIndex.size(): %d\n", setTrimmableBlockIndex.size()); int trim_height = m_chain.Height() - node::nMustKeepFullHeaders; int min_height = std::numeric_limits::max(); From 423715b0480b549e669df4f9d14f5e5efc9dc656 Mon Sep 17 00:00:00 2001 From: Pablo Greco Date: Thu, 29 Jun 2023 10:43:49 -0700 Subject: [PATCH 03/19] Avoid pruning headers that have not been saved to disk yet --- src/chain.h | 15 ++++++++++++++- src/txdb.cpp | 1 + src/validation.cpp | 5 +++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/src/chain.h b/src/chain.h index 0daa00edb79..a73d0d9215a 100644 --- a/src/chain.h +++ b/src/chain.h @@ -218,6 +218,7 @@ class CBlockIndex bool m_trimmed{false}; bool m_trimmed_dynafed_block{false}; + bool m_stored_lvl{false}; friend class CBlockTreeDB; @@ -228,19 +229,28 @@ class CBlockIndex // Irrevocably remove blocksigning and dynafed-related stuff from this // in-memory copy of the block header. - void trim() { + bool trim() { assert_untrimmed(); + if (!m_stored_lvl) { + // We can't trim in-memory data if it's not on disk yet, but we can if it's already been recovered once + return false; + } m_trimmed = true; m_trimmed_dynafed_block = !m_dynafed_params.value().IsNull(); proof = std::nullopt; m_dynafed_params = std::nullopt; m_signblock_witness = std::nullopt; + return true; } + void untrim(); inline bool trimmed() const { return m_trimmed; } + inline void set_stored() { + m_stored_lvl = true; + } inline void assert_untrimmed() const { assert(!m_trimmed); } @@ -507,6 +517,9 @@ class CDiskBlockIndex : public CBlockIndex // For compatibility with elements 0.14 based chains if (g_signed_blocks) { + if (!ser_action.ForRead()) { + obj.assert_untrimmed(); + } if (is_dyna) { READWRITE(obj.m_dynafed_params.value()); READWRITE(obj.m_signblock_witness.value().stack); diff --git a/src/txdb.cpp b/src/txdb.cpp index 4ebd1c4f08f..16e8b23d2dd 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -396,6 +396,7 @@ bool CBlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, pindexNew->nStatus = diskindex.nStatus; pindexNew->nTx = diskindex.nTx; + pindexNew->set_stored(); n_total++; if (diskindex.nHeight >= trimBelowHeight) { n_untrimmed++; diff --git a/src/validation.cpp b/src/validation.cpp index 0b18345215d..c74363a218b 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2577,6 +2577,11 @@ bool CChainState::FlushStateToDisk( return AbortNode(state, "Failed to write to block index database"); } + // This should be done inside WriteBatchSync, but CBlockIndex is const there + for (std::set::iterator it = setTrimmableBlockIndex.begin(); it != setTrimmableBlockIndex.end(); it++) { + (*it)->set_stored(); + } + if (node::fTrimHeaders) { LogPrintf("Flushing block index, trimming headers, setTrimmableBlockIndex.size(): %d\n", setTrimmableBlockIndex.size()); int trim_height = m_chain.Height() - node::nMustKeepFullHeaders; From d8e1fe7fac0ea9179f1116b55d5a1b8e266c4dfe Mon Sep 17 00:00:00 2001 From: Pablo Greco Date: Thu, 29 Jun 2023 10:47:16 -0700 Subject: [PATCH 04/19] Avoid trimming on shutdown --- src/validation.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/validation.cpp b/src/validation.cpp index c74363a218b..8b9f9fb2469 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2582,7 +2582,7 @@ bool CChainState::FlushStateToDisk( (*it)->set_stored(); } - if (node::fTrimHeaders) { + if (node::fTrimHeaders && !ShutdownRequested()) { LogPrintf("Flushing block index, trimming headers, setTrimmableBlockIndex.size(): %d\n", setTrimmableBlockIndex.size()); int trim_height = m_chain.Height() - node::nMustKeepFullHeaders; int min_height = std::numeric_limits::max(); From f89728c514c25edf3157c85c15691bf6cd6d45a6 Mon Sep 17 00:00:00 2001 From: Pablo Greco Date: Thu, 29 Jun 2023 10:52:08 -0700 Subject: [PATCH 05/19] Validate trimmed headers too while loading --- src/txdb.cpp | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/src/txdb.cpp b/src/txdb.cpp index 16e8b23d2dd..6fbd37585fa 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -396,24 +396,26 @@ bool CBlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, pindexNew->nStatus = diskindex.nStatus; pindexNew->nTx = diskindex.nTx; + pindexNew->proof = diskindex.proof; + pindexNew->m_dynafed_params = diskindex.m_dynafed_params; + pindexNew->m_signblock_witness = diskindex.m_signblock_witness; + + assert(!(g_signed_blocks && diskindex.m_dynafed_params.value().IsNull() && diskindex.proof.value().IsNull())); + pindexNew->set_stored(); n_total++; + + const uint256 block_hash = pindexNew->GetBlockHash(); + // Only validate one of every 1000 block header for sanity check + if (pindexNew->nHeight % 1000 == 0 && + block_hash != consensusParams.hashGenesisBlock && + !CheckProof(pindexNew->GetBlockHeader(), consensusParams)) { + return error("%s: CheckProof: %s, %s", __func__, block_hash.ToString(), pindexNew->ToString()); + } if (diskindex.nHeight >= trimBelowHeight) { n_untrimmed++; - pindexNew->proof = diskindex.proof; - pindexNew->m_dynafed_params = diskindex.m_dynafed_params; - pindexNew->m_signblock_witness = diskindex.m_signblock_witness; - - const uint256 block_hash = pindexNew->GetBlockHash(); - // Only validate one of every 1000 block header for sanity check - if (pindexNew->nHeight % 1000 == 0 && - block_hash != consensusParams.hashGenesisBlock && - !CheckProof(pindexNew->GetBlockHeader(), consensusParams)) { - return error("%s: CheckProof: %s, %s", __func__, block_hash.ToString(), pindexNew->ToString()); - } } else { - pindexNew->m_trimmed = true; - pindexNew->m_trimmed_dynafed_block = !diskindex.m_dynafed_params.value().IsNull(); + pindexNew->trim(); } pcursor->Next(); From afc16e8f8426304effdedf9f9c54d7d17021bd7c Mon Sep 17 00:00:00 2001 From: Pablo Greco Date: Thu, 29 Jun 2023 11:51:24 -0700 Subject: [PATCH 06/19] Allow gaps while trimming headers --- src/validation.cpp | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index 8b9f9fb2469..1cb4abf75b4 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2582,29 +2582,31 @@ bool CChainState::FlushStateToDisk( (*it)->set_stored(); } - if (node::fTrimHeaders && !ShutdownRequested()) { + int trim_height = pindexBestHeader ? pindexBestHeader->nHeight - node::nMustKeepFullHeaders : 0; + if (node::fTrimHeaders && trim_height > 0 && !ShutdownRequested()) { + static int nMinTrimHeight{0}; LogPrintf("Flushing block index, trimming headers, setTrimmableBlockIndex.size(): %d\n", setTrimmableBlockIndex.size()); - int trim_height = m_chain.Height() - node::nMustKeepFullHeaders; - int min_height = std::numeric_limits::max(); - CBlockIndex* min_index = nullptr; for (std::set::iterator it = setTrimmableBlockIndex.begin(); it != setTrimmableBlockIndex.end(); it++) { (*it)->assert_untrimmed(); if ((*it)->nHeight < trim_height) { (*it)->trim(); - if ((*it)->nHeight < min_height) { - min_height = (*it)->nHeight; - min_index = *it; - } } } - + CBlockIndex* min_index = pindexBestHeader->GetAncestor(trim_height-1); // Handle any remaining untrimmed blocks that were too recent for trimming last time we flushed. if (min_index) { - min_index = min_index->pprev; - while (min_index && !min_index->trimmed()) { - min_index->trim(); + int nMaxTrimHeightRound = std::max(nMinTrimHeight, min_index->nHeight + 1); + while (min_index && min_index->nHeight >= nMinTrimHeight) { + if (!min_index->trimmed()) { + // there may be gaps due to untrimmed blocks, we need to check them all + if (!min_index->trim()) { + // Header could not be trimmed, we'll need to try again next round + nMaxTrimHeightRound = min_index->nHeight; + } + } min_index = min_index->pprev; } + nMinTrimHeight = nMaxTrimHeightRound; } } } From 4259278ff5982f057ea8cd06022fc235fae4459a Mon Sep 17 00:00:00 2001 From: Pablo Greco Date: Thu, 29 Jun 2023 12:10:09 -0700 Subject: [PATCH 07/19] Reload trimmed header from index instead of block, block may have been pruned --- src/chain.cpp | 5 +++++ src/chain.h | 2 ++ src/node/blockstorage.cpp | 17 --------------- src/node/blockstorage.h | 1 - src/rest.cpp | 21 +++++------------- src/rpc/blockchain.cpp | 21 +++++++++--------- src/txdb.cpp | 46 +++++++++++++++++++++++++++++++++++++++ src/txdb.h | 1 + 8 files changed, 70 insertions(+), 44 deletions(-) diff --git a/src/chain.cpp b/src/chain.cpp index 6c37bda0bb6..fc36659025c 100644 --- a/src/chain.cpp +++ b/src/chain.cpp @@ -56,6 +56,11 @@ CBlockLocator CChain::GetLocator(const CBlockIndex *pindex) const { return CBlockLocator(vHave); } +const CBlockIndex *CBlockIndex::untrim_to(CBlockIndex *pindexNew) const +{ + return m_pcontext->chainman->m_blockman.m_block_tree_db->RegenerateFullIndex(this, pindexNew); +} + const CBlockIndex *CChain::FindFork(const CBlockIndex *pindex) const { if (pindex == nullptr) { return nullptr; diff --git a/src/chain.h b/src/chain.h index a73d0d9215a..119b3683a3b 100644 --- a/src/chain.h +++ b/src/chain.h @@ -244,6 +244,8 @@ class CBlockIndex } void untrim(); + const CBlockIndex * untrim_to(CBlockIndex *pindexNew) const; + inline bool trimmed() const { return m_trimmed; } diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 63ed09fc408..05d62e5b3bb 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -803,23 +803,6 @@ bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus return true; } -bool ReadBlockHeaderFromDisk(CBlockHeader& header, const CBlockIndex* pindex, const Consensus::Params& consensusParams) -{ - // Not very efficient: read a block and throw away all but the header. - CBlock tmp; - if (!ReadBlockFromDisk(tmp, pindex, consensusParams)) { - return false; - } - const FlatFilePos block_pos{WITH_LOCK(cs_main, return pindex->GetBlockPos())}; - - header = tmp.GetBlockHeader(); - if (tmp.GetHash() != pindex->GetBlockHash()) { - return error("ReadBlockheaderFromDisk(CBlockHeader&, CBlockIndex*): GetHash() doesn't match index for %s at %s", - pindex->ToString(), block_pos.ToString()); - } - return true; -} - bool ReadRawBlockFromDisk(std::vector& block, const FlatFilePos& pos, const CMessageHeader::MessageStartChars& message_start) { FlatFilePos hpos = pos; diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index 1276a3f0034..aceb775848b 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -194,7 +194,6 @@ bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos, const Consensus::P bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams); bool ReadRawBlockFromDisk(std::vector& block, const FlatFilePos& pos, const CMessageHeader::MessageStartChars& message_start); // ELEMENTS: -bool ReadBlockHeaderFromDisk(class CBlockHeader& header, const CBlockIndex* pindex, const Consensus::Params& consensusParams); bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex); diff --git a/src/rest.cpp b/src/rest.cpp index 754b55c2f80..6f79834dcdf 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -232,13 +232,9 @@ static bool rest_headers(const std::any& context, case RetFormat::BINARY: { CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION); for (const CBlockIndex *pindex : headers) { - if (pindex->trimmed()) { - CBlockHeader tmp; - node::ReadBlockHeaderFromDisk(tmp, pindex, Params().GetConsensus()); - ssHeader << tmp; - } else { - ssHeader << pindex->GetBlockHeader(); - } + CBlockIndex tmpBlockIndexFull; + const CBlockIndex* pindexfull=pindex->untrim_to(&tmpBlockIndexFull); + ssHeader << pindexfull->GetBlockHeader(); } std::string binaryHeader = ssHeader.str(); @@ -250,14 +246,9 @@ static bool rest_headers(const std::any& context, case RetFormat::HEX: { CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION); for (const CBlockIndex *pindex : headers) { - if (pindex->trimmed()) { - CBlockHeader tmp; - node::ReadBlockHeaderFromDisk(tmp, pindex, Params().GetConsensus()); - ssHeader << tmp; - - } else { - ssHeader << pindex->GetBlockHeader(); - } + CBlockIndex tmpBlockIndexFull; + const CBlockIndex* pindexfull=pindex->untrim_to(&tmpBlockIndexFull); + ssHeader << pindexfull->GetBlockHeader(); } std::string strHex = HexStr(ssHeader) + "\n"; diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 4f702b6bd55..37691ec51da 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -193,15 +193,18 @@ CBlockIndex* ParseHashOrHeight(const UniValue& param, ChainstateManager& chainma } } -UniValue blockheaderToJSON(const CBlockIndex* tip, const CBlockIndex* blockindex) +UniValue blockheaderToJSON(const CBlockIndex* tip, const CBlockIndex* blockindex_) { // Serialize passed information without accessing chain state of the active chain! AssertLockNotHeld(cs_main); // For performance reasons + CBlockIndex tmpBlockIndexFull; + const CBlockIndex* blockindex=blockindex_->untrim_to(&tmpBlockIndexFull); + UniValue result(UniValue::VOBJ); result.pushKV("hash", blockindex->GetBlockHash().GetHex()); const CBlockIndex* pnext; - int confirmations = ComputeNextBlockAndDepth(tip, blockindex, pnext); + int confirmations = ComputeNextBlockAndDepth(tip, blockindex_, pnext); result.pushKV("confirmations", confirmations); result.pushKV("height", blockindex->nHeight); result.pushKV("version", blockindex->nVersion); @@ -238,7 +241,7 @@ UniValue blockheaderToJSON(const CBlockIndex* tip, const CBlockIndex* blockindex } } result.pushKV("nTx", (uint64_t)blockindex->nTx); - if (blockindex->pprev) + if (blockindex_->pprev) result.pushKV("previousblockhash", blockindex->pprev->GetBlockHash().GetHex()); if (pnext) result.pushKV("nextblockhash", pnext->GetBlockHash().GetHex()); @@ -1002,7 +1005,7 @@ static RPCHelpMan getblockheader() if (!request.params[1].isNull()) fVerbose = request.params[1].get_bool(); - const CBlockIndex* pblockindex; + CBlockIndex* pblockindex; const CBlockIndex* tip; { ChainstateManager& chainman = EnsureAnyChainman(request.context); @@ -1018,13 +1021,9 @@ static RPCHelpMan getblockheader() if (!fVerbose) { CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION); - if (pblockindex->trimmed()) { - CBlockHeader tmp; - node::ReadBlockHeaderFromDisk(tmp, pblockindex, Params().GetConsensus()); - ssBlock << tmp; - } else { - ssBlock << pblockindex->GetBlockHeader(); - } + CBlockIndex tmpBlockIndexFull; + const CBlockIndex* pblockindexfull=pblockindex->untrim_to(&tmpBlockIndexFull); + ssBlock << pblockindexfull->GetBlockHeader(); std::string strHex = HexStr(ssBlock); return strHex; } diff --git a/src/txdb.cpp b/src/txdb.cpp index 6fbd37585fa..e05211ed97a 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -19,6 +19,7 @@ // ELEMENTS #include // CheckProof +#include // Params() static constexpr uint8_t DB_COIN{'C'}; static constexpr uint8_t DB_COINS{'c'}; @@ -365,6 +366,51 @@ bool CBlockTreeDB::WalkBlockIndexGutsForMaxHeight(int* nHeight) { return true; } +const CBlockIndex *CBlockTreeDB::RegenerateFullIndex(const CBlockIndex *pindexTrimmed, CBlockIndex *pindexNew) const +{ + if(!pindexTrimmed->trimmed()) { + return pindexTrimmed; + } + CBlockHeader tmp; + bool BlockRead = false; + { + // At this point we can either be locked or unlocked depending on where we're being called + // but cs_main is a RecursiveMutex, so it doesn't matter + LOCK(cs_main); + // In unpruned nodes, same data could be read from blocks using ReadBlockFromDisk, but that turned out to + // be about 6x slower than reading from the index + std::pair key(DB_BLOCK_INDEX, pindexTrimmed->GetBlockHash()); + CDiskBlockIndex diskindex; + BlockRead = this->Read(key, diskindex); + tmp = diskindex.GetBlockHeader(); + } + assert(BlockRead); + // Clone the needed data from the original trimmed block + pindexNew->pprev = pindexTrimmed->pprev; + pindexNew->phashBlock = pindexTrimmed->phashBlock; + // Construct block index object + pindexNew->nHeight = pindexTrimmed->nHeight; + pindexNew->nFile = pindexTrimmed->nFile; + pindexNew->nDataPos = pindexTrimmed->nDataPos; + pindexNew->nUndoPos = pindexTrimmed->nUndoPos; + pindexNew->nVersion = pindexTrimmed->nVersion; + pindexNew->hashMerkleRoot = pindexTrimmed->hashMerkleRoot; + pindexNew->nTime = pindexTrimmed->nTime; + pindexNew->nBits = pindexTrimmed->nBits; + pindexNew->nNonce = pindexTrimmed->nNonce; + pindexNew->nStatus = pindexTrimmed->nStatus; + pindexNew->nTx = pindexTrimmed->nTx; + + pindexNew->proof = tmp.proof; + pindexNew->m_dynafed_params = tmp.m_dynafed_params; + pindexNew->m_signblock_witness = tmp.m_signblock_witness; + + if (pindexTrimmed->nHeight && pindexTrimmed->nHeight % 1000 == 0) { + assert(CheckProof(pindexNew->GetBlockHeader(), Params().GetConsensus())); + } + return pindexNew; +} + bool CBlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function insertBlockIndex, int trimBelowHeight) { AssertLockHeld(::cs_main); diff --git a/src/txdb.h b/src/txdb.h index aa28a0061eb..b7cef7f2b7d 100644 --- a/src/txdb.h +++ b/src/txdb.h @@ -91,6 +91,7 @@ class CBlockTreeDB : public CDBWrapper bool LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function insertBlockIndex, int trimBelowHeight) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); // ELEMENTS: + const CBlockIndex *RegenerateFullIndex(const CBlockIndex *pindexTrimmed, CBlockIndex *pindexNew) const; bool WalkBlockIndexGutsForMaxHeight(int* nHeight); bool ReadPAKList(std::vector >& offline_list, std::vector >& online_list, bool& reject); bool WritePAKList(const std::vector >& offline_list, const std::vector >& online_list, bool reject); From c28c5ce619451c75ccbeebf855e699f5d8e4a3f5 Mon Sep 17 00:00:00 2001 From: Pablo Greco Date: Fri, 29 Sep 2023 12:04:59 -0700 Subject: [PATCH 08/19] Allow untrimming headers when needed --- src/chain.cpp | 14 ++++++++++++++ src/dynafed.cpp | 4 ++++ src/pegins.cpp | 3 +++ src/validation.cpp | 18 ++++++++++++++++++ src/validation.h | 1 + 5 files changed, 40 insertions(+) diff --git a/src/chain.cpp b/src/chain.cpp index fc36659025c..e66d1edbe02 100644 --- a/src/chain.cpp +++ b/src/chain.cpp @@ -56,6 +56,20 @@ CBlockLocator CChain::GetLocator(const CBlockIndex *pindex) const { return CBlockLocator(vHave); } +void CBlockIndex::untrim() { + if (!trimmed()) + return; + CBlockIndex tmp; + const CBlockIndex *pindexfull = untrim_to(&tmp); + assert(pindexfull!=this); + m_trimmed = false; + set_stored(); + proof = pindexfull->proof; + m_dynafed_params = pindexfull->m_dynafed_params; + m_signblock_witness = pindexfull->m_signblock_witness; + m_pcontext->chainman->m_blockman.m_dirty_blockindex.insert(this); +} + const CBlockIndex *CBlockIndex::untrim_to(CBlockIndex *pindexNew) const { return m_pcontext->chainman->m_blockman.m_block_tree_db->RegenerateFullIndex(this, pindexNew); diff --git a/src/dynafed.cpp b/src/dynafed.cpp index 8ef680e2962..29a20294837 100644 --- a/src/dynafed.cpp +++ b/src/dynafed.cpp @@ -1,6 +1,7 @@ #include #include +#include bool NextBlockIsParameterTransition(const CBlockIndex* pindexPrev, const Consensus::Params& consensus, DynaFedParamEntry& winning_entry) { @@ -15,6 +16,7 @@ bool NextBlockIsParameterTransition(const CBlockIndex* pindexPrev, const Consens for (int32_t height = next_height - 1; height >= (int32_t)(next_height - consensus.dynamic_epoch_length); --height) { const CBlockIndex* p_epoch_walk = pindexPrev->GetAncestor(height); assert(p_epoch_walk); + ForceUntrimHeader(p_epoch_walk); const DynaFedParamEntry& proposal = p_epoch_walk->dynafed_params().m_proposed; const uint256 proposal_root = proposal.CalculateRoot(); vote_tally[proposal_root]++; @@ -60,6 +62,7 @@ DynaFedParamEntry ComputeNextBlockFullCurrentParameters(const CBlockIndex* pinde // may be pre-dynafed params const CBlockIndex* p_epoch_start = pindexPrev->GetAncestor(epoch_start_height); assert(p_epoch_start); + ForceUntrimHeader(p_epoch_start); if (p_epoch_start->dynafed_params().IsNull()) { // We need to construct the "full" current parameters of pre-dynafed // consensus @@ -93,6 +96,7 @@ DynaFedParamEntry ComputeNextBlockCurrentParameters(const CBlockIndex* pindexPre { assert(pindexPrev); + ForceUntrimHeader(pindexPrev); DynaFedParamEntry entry = ComputeNextBlockFullCurrentParameters(pindexPrev, consensus); uint32_t next_height = pindexPrev->nHeight+1; diff --git a/src/pegins.cpp b/src/pegins.cpp index 370ffcb83b6..60e3b3f4b14 100644 --- a/src/pegins.cpp +++ b/src/pegins.cpp @@ -26,6 +26,8 @@ // ELEMENTS // +#include + namespace { static secp256k1_context* secp256k1_ctx_validation; @@ -487,6 +489,7 @@ std::vector> GetValidFedpegScripts(const CBlockIndex break; } + ForceUntrimHeader(p_epoch_start); if (!p_epoch_start->dynafed_params().IsNull()) { fedpegscripts.push_back(std::make_pair(p_epoch_start->dynafed_params().m_current.m_fedpeg_program, p_epoch_start->dynafed_params().m_current.m_fedpegscript)); } else { diff --git a/src/validation.cpp b/src/validation.cpp index 1cb4abf75b4..a6bcfb19db7 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2573,6 +2573,11 @@ bool CChainState::FlushStateToDisk( { LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk", BCLog::BENCH); + if (node::fTrimHeaders) { + for (std::set::iterator it = setTrimmableBlockIndex.begin(); it != setTrimmableBlockIndex.end(); it++) { + (*it)->untrim(); + } + } if (!m_blockman.WriteBlockIndexDB()) { return AbortNode(state, "Failed to write to block index database"); } @@ -2709,6 +2714,17 @@ static void UpdateTipLog( !warning_messages.empty() ? strprintf(" warning='%s'", warning_messages) : ""); } +void ForceUntrimHeader(const CBlockIndex *pindex_) +{ + assert(pindex_); + if (!pindex_->trimmed()) { + return; + } + AssertLockHeld(cs_main); + CBlockIndex *pindex=const_cast(pindex_); + pindex->untrim(); +} + void CChainState::UpdateTip(const CBlockIndex* pindexNew) { AssertLockHeld(::cs_main); @@ -2754,11 +2770,13 @@ void CChainState::UpdateTip(const CBlockIndex* pindexNew) } UpdateTipLog(coins_tip, pindexNew, m_params, __func__, "", warning_messages.original); + ForceUntrimHeader(pindexNew); // Do some logging if dynafed parameters changed. if (pindexNew->pprev && !pindexNew->dynafed_params().IsNull()) { int height = pindexNew->nHeight; uint256 hash = pindexNew->GetBlockHash(); uint256 root = pindexNew->dynafed_params().m_current.CalculateRoot(); + ForceUntrimHeader(pindexNew->pprev); if (pindexNew->pprev->dynafed_params().IsNull()) { LogPrintf("Dynafed activated in block %d:%s: %s\n", height, hash.GetHex(), root.GetHex()); } else if (root != pindexNew->pprev->dynafed_params().m_current.CalculateRoot()) { diff --git a/src/validation.h b/src/validation.h index a27f927fea1..78fa8bea6ad 100644 --- a/src/validation.h +++ b/src/validation.h @@ -1023,4 +1023,5 @@ bool LoadMempool(CTxMemPool& pool, CChainState& active_chainstate, FopenFn mocka */ const AssumeutxoData* ExpectedAssumeutxo(const int height, const CChainParams& params); +void ForceUntrimHeader(const CBlockIndex *pindex_); #endif // BITCOIN_VALIDATION_H From 1722d4aac1b168b99c0da9c110bd5bc93086ba27 Mon Sep 17 00:00:00 2001 From: Pablo Greco Date: Thu, 29 Jun 2023 12:46:53 -0700 Subject: [PATCH 09/19] Reduce the number of untrimmed headers in memory, since they can be untrimmed on demand now --- src/init.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/init.cpp b/src/init.cpp index ee19aed4f1b..fe772e58997 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1017,7 +1017,7 @@ bool AppInitParameterInteraction(const ArgsManager& args) // back to current epoch start, and then an additional total_valid_epochs on top of that. // We add one epoch here for the current partial epoch, and then another one for good luck. - node::nMustKeepFullHeaders = (chainparams.GetConsensus().total_valid_epochs + 2) * epoch_length; + node::nMustKeepFullHeaders = chainparams.GetConsensus().total_valid_epochs * epoch_length; // This is the number of headers we can have in flight downloading at a time, beyond the // set of blocks we've already validated. Capping this is necessary to keep memory usage // bounded during IBD. From 6367320f70ff4c2de80662693bfea92c130d3935 Mon Sep 17 00:00:00 2001 From: Pablo Greco Date: Thu, 29 Jun 2023 12:26:22 -0700 Subject: [PATCH 10/19] Restore NODE_NETWORK functionality with trim_headers --- src/init.cpp | 9 ++------- src/net_processing.cpp | 11 ++++++----- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index fe772e58997..f3098e7f4bc 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1011,7 +1011,7 @@ bool AppInitParameterInteraction(const ArgsManager& args) } if (args.GetBoolArg("-trim_headers", false)) { - LogPrintf("Configured for header-trimming mode. This will reduce memory usage substantially, but we will be unable to serve as a full P2P peer, and certain header fields may be missing from JSON RPC output.\n"); + LogPrintf("Configured for header-trimming mode. This will reduce memory usage substantially, but will increase IO usage when the headers need to be temporarily untrimmed.\n"); node::fTrimHeaders = true; // This calculation is driven by GetValidFedpegScripts in pegins.cpp, which walks the chain // back to current epoch start, and then an additional total_valid_epochs on top of that. @@ -1713,7 +1713,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // if pruning, unset the service bit and perform the initial blockstore prune // after any wallet rescanning has taken place. - if (fPruneMode || node::fTrimHeaders) { + if (fPruneMode) { LogPrintf("Unsetting NODE_NETWORK on prune mode\n"); nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK); if (!fReindex) { @@ -1725,11 +1725,6 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) } } - if (node::fTrimHeaders) { - LogPrintf("Unsetting NODE_NETWORK_LIMITED on header trim mode\n"); - nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK_LIMITED); - } - // ********************************************************* Step 11: import blocks if (!CheckDiskSpace(gArgs.GetDataDirNet())) { diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 8f62280b6a6..8ab454d3e0b 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -3305,12 +3305,13 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) { if (pindex->trimmed()) { - // For simplicity, if any of the headers they're asking for are trimmed, - // just drop the request. - LogPrint(BCLog::NET, "%s: ignoring getheaders from peer=%i which would return at least one trimmed header\n", __func__, pfrom.GetId()); - return; + // Header is trimmed, reload from disk before sending + CBlockIndex tmpBlockIndexFull; + const CBlockIndex* pindexfull = pindex->untrim_to(&tmpBlockIndexFull); + vHeaders.push_back(pindexfull->GetBlockHeader()); + } else { + vHeaders.push_back(pindex->GetBlockHeader()); } - vHeaders.push_back(pindex->GetBlockHeader()); if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) break; } From 5a409265ef12146de890674266156b0283a4b81b Mon Sep 17 00:00:00 2001 From: Pablo Greco Date: Tue, 4 Jul 2023 05:03:06 -0700 Subject: [PATCH 11/19] Extreme trimming on startup --- src/node/blockstorage.cpp | 13 ++++--------- src/txdb.cpp | 35 ----------------------------------- src/txdb.h | 1 - 3 files changed, 4 insertions(+), 45 deletions(-) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 05d62e5b3bb..fe964360126 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -223,15 +223,7 @@ bool BlockManager::LoadBlockIndex( { int trim_below_height = 0; if (fTrimHeaders) { - int max_height = 0; - if (!m_block_tree_db->WalkBlockIndexGutsForMaxHeight(&max_height)) { - LogPrintf("LoadBlockIndex: Failed to WalkBlockIndexGutsForMaxHeight.\n"); - return false; - } - - int must_keep_headers = (consensus_params.total_valid_epochs + 2) * consensus_params.dynamic_epoch_length; - int extra_headers_buffer = consensus_params.dynamic_epoch_length * 2; // XXX arbitrary - trim_below_height = max_height - must_keep_headers - extra_headers_buffer; + trim_below_height = std::numeric_limits::max(); } if (!m_block_tree_db->LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, trim_below_height)) { return false; @@ -337,6 +329,9 @@ bool BlockManager::LoadBlockIndex( } } + if (pindexBestHeader) { + ForceUntrimHeader(pindexBestHeader); + } return true; } diff --git a/src/txdb.cpp b/src/txdb.cpp index e05211ed97a..0b7af3787e3 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -331,41 +331,6 @@ bool CBlockTreeDB::WritePAKList(const std::vector >& return Write(std::make_pair(DB_PAK, uint256S("1")), offline_list) && Write(std::make_pair(DB_PAK, uint256S("2")), online_list) && Write(std::make_pair(DB_PAK, uint256S("3")), reject); } -/** Note that we only get a conservative (lower) estimate of the max header height here, - * obtained by sampling the first 10,000 headers on disk (which are in random order) and - * taking the highest block we see. */ -bool CBlockTreeDB::WalkBlockIndexGutsForMaxHeight(int* nHeight) { - std::unique_ptr pcursor(NewIterator()); - *nHeight = 0; - int i = 0; - pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256())); - while (pcursor->Valid()) { - if (ShutdownRequested()) return false; - std::pair key; - if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) { - i++; - if (i > 10'000) { - // Under the (accurate) assumption that the headers on disk are effectively in random height order, - // we have a good-enough (conservative) estimate of the max height very quickly, and don't need to - // waste more time. Shortcutting like this will cause us to keep a few extra headers, which is fine. - break; - } - CDiskBlockIndex diskindex; - if (pcursor->GetValue(diskindex)) { - if (diskindex.nHeight > *nHeight) { - *nHeight = diskindex.nHeight; - } - pcursor->Next(); - } else { - return error("%s: failed to read value", __func__); - } - } else { - break; - } - } - return true; -} - const CBlockIndex *CBlockTreeDB::RegenerateFullIndex(const CBlockIndex *pindexTrimmed, CBlockIndex *pindexNew) const { if(!pindexTrimmed->trimmed()) { diff --git a/src/txdb.h b/src/txdb.h index b7cef7f2b7d..1f3fab44dc1 100644 --- a/src/txdb.h +++ b/src/txdb.h @@ -92,7 +92,6 @@ class CBlockTreeDB : public CDBWrapper EXCLUSIVE_LOCKS_REQUIRED(::cs_main); // ELEMENTS: const CBlockIndex *RegenerateFullIndex(const CBlockIndex *pindexTrimmed, CBlockIndex *pindexNew) const; - bool WalkBlockIndexGutsForMaxHeight(int* nHeight); bool ReadPAKList(std::vector >& offline_list, std::vector >& online_list, bool& reject); bool WritePAKList(const std::vector >& offline_list, const std::vector >& online_list, bool reject); }; From 6fb780d8d847c5e425dc293b4fb05f6f95ad29f6 Mon Sep 17 00:00:00 2001 From: James Dorfman Date: Mon, 6 May 2024 16:33:10 +0000 Subject: [PATCH 12/19] CI: fix linting errors --- test/lint/lint-circular-dependencies.sh | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/test/lint/lint-circular-dependencies.sh b/test/lint/lint-circular-dependencies.sh index e587b1c225d..2753b8cd94c 100755 --- a/test/lint/lint-circular-dependencies.sh +++ b/test/lint/lint-circular-dependencies.sh @@ -22,6 +22,29 @@ EXPECTED_CIRCULAR_DEPENDENCIES=( "wallet/fees -> wallet/wallet -> wallet/fees" "wallet/wallet -> wallet/walletdb -> wallet/wallet" "node/coinstats -> validation -> node/coinstats" + # ELEMENTs: introduced by https://github.com/ElementsProject/elements/pull/1270 + "chain -> validation -> chain" + "chain -> validation -> consensus/tx_verify -> chain" + "dynafed -> validation -> dynafed" + "pegins -> validation -> pegins" + "chain -> node/context -> txmempool -> chain" + "chain -> validation -> deploymentstatus -> chain" + "chain -> validation -> index/blockfilterindex -> chain" + "chain -> validation -> primitives/pak -> chain" + "chain -> validation -> txdb -> chain" + "chain -> validation -> validationinterface -> chain" + "chain -> validation -> txdb -> pow -> chain" + "chain -> validation -> deploymentstatus -> versionbits -> chain" + "confidential_validation -> pegins -> validation -> confidential_validation" + "consensus/tx_verify -> pegins -> validation -> consensus/tx_verify" + "dynafed -> validation -> primitives/pak -> dynafed" + "pegins -> validation -> txmempool -> pegins" + "block_proof -> chain -> validation -> block_proof" + "block_proof -> chain -> validation -> txdb -> block_proof" + "chain -> node/context -> net_processing -> node/blockstorage -> chain" + "consensus/tx_verify -> pegins -> validation -> txmempool -> consensus/tx_verify" + "block_proof -> chain -> node/context -> net_processing -> node/blockstorage -> block_proof" + "core_io -> script/sign -> pegins -> validation -> signet -> core_io" # ELEMENTS: will be fixed by blinding cleanup "blindpsbt -> psbt -> blindpsbt" # ELEMENTS: not so easy to fix, caused by us doing asset ID lookups in the From 90e5bab636c48638f63a5b11fe5a4b7210765b62 Mon Sep 17 00:00:00 2001 From: James Dorfman Date: Mon, 6 May 2024 16:33:50 +0000 Subject: [PATCH 13/19] CI: fix msan issue --- src/chain.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/chain.cpp b/src/chain.cpp index e66d1edbe02..157669bc158 100644 --- a/src/chain.cpp +++ b/src/chain.cpp @@ -72,7 +72,10 @@ void CBlockIndex::untrim() { const CBlockIndex *CBlockIndex::untrim_to(CBlockIndex *pindexNew) const { - return m_pcontext->chainman->m_blockman.m_block_tree_db->RegenerateFullIndex(this, pindexNew); + if (m_pcontext) { + return m_pcontext->chainman->m_blockman.m_block_tree_db->RegenerateFullIndex(this, pindexNew); + } + return nullptr; } const CBlockIndex *CChain::FindFork(const CBlockIndex *pindex) const { From 73ed5dd54181826839b431c0e4eafd2625798fdc Mon Sep 17 00:00:00 2001 From: James Dorfman Date: Wed, 8 May 2024 05:13:08 +0000 Subject: [PATCH 14/19] CI: fix benchmark + QT tests by properly initializing CBlockIndex by calling SetNodeContext --- src/bench/rpc_blockchain.cpp | 3 ++- src/qt/test/rpcnestedtests.cpp | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/bench/rpc_blockchain.cpp b/src/bench/rpc_blockchain.cpp index 2143bcf9500..4c6a0bf6eaf 100644 --- a/src/bench/rpc_blockchain.cpp +++ b/src/bench/rpc_blockchain.cpp @@ -15,7 +15,7 @@ namespace { struct TestBlockAndIndex { - const std::unique_ptr testing_setup{MakeNoLogFileContext(CBaseChainParams::MAIN)}; + std::unique_ptr testing_setup{MakeNoLogFileContext(CBaseChainParams::MAIN)}; CBlock block{}; uint256 blockHash{}; CBlockIndex blockindex{}; @@ -28,6 +28,7 @@ struct TestBlockAndIndex { stream >> block; + CBlockIndex::SetNodeContext(&(testing_setup->m_node)); blockHash = block.GetHash(); blockindex.phashBlock = &blockHash; blockindex.nBits = 403014710; diff --git a/src/qt/test/rpcnestedtests.cpp b/src/qt/test/rpcnestedtests.cpp index 028e52e9644..7c128ba0ced 100644 --- a/src/qt/test/rpcnestedtests.cpp +++ b/src/qt/test/rpcnestedtests.cpp @@ -48,6 +48,7 @@ void RPCNestedTests::rpcNestedTests() TestingSetup test; m_node.setContext(&test.m_node); + CBlockIndex::SetNodeContext(&test.m_node); if (RPCIsInWarmup(nullptr)) SetRPCWarmupFinished(); From c6aba4ac06ce1fac7adaef0bd2493b998d109134 Mon Sep 17 00:00:00 2001 From: James Dorfman Date: Fri, 10 May 2024 05:42:15 +0000 Subject: [PATCH 15/19] CI: fix integer underflow bug --- src/validation.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/validation.cpp b/src/validation.cpp index a6bcfb19db7..9702071a3b2 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2587,7 +2587,10 @@ bool CChainState::FlushStateToDisk( (*it)->set_stored(); } - int trim_height = pindexBestHeader ? pindexBestHeader->nHeight - node::nMustKeepFullHeaders : 0; + int trim_height = 0; + if (pindexBestHeader && (uint64_t)pindexBestHeader->nHeight > node::nMustKeepFullHeaders) { // check first, to prevent underflow + trim_height = pindexBestHeader->nHeight - node::nMustKeepFullHeaders; + } if (node::fTrimHeaders && trim_height > 0 && !ShutdownRequested()) { static int nMinTrimHeight{0}; LogPrintf("Flushing block index, trimming headers, setTrimmableBlockIndex.size(): %d\n", setTrimmableBlockIndex.size()); From b4185c7008945aceb634756c1bae1ae04fe29ec3 Mon Sep 17 00:00:00 2001 From: James Dorfman Date: Wed, 8 May 2024 05:53:49 +0000 Subject: [PATCH 16/19] CI: fix asan lock issues --- src/chain.cpp | 11 +++++------ src/dynafed.cpp | 15 ++++++++++++--- src/pegins.cpp | 5 ++++- src/rest.cpp | 2 ++ src/rpc/blockchain.cpp | 7 ++++++- src/txdb.cpp | 5 ++--- src/validation.cpp | 5 +++-- 7 files changed, 34 insertions(+), 16 deletions(-) diff --git a/src/chain.cpp b/src/chain.cpp index 157669bc158..113bc1d3cd8 100644 --- a/src/chain.cpp +++ b/src/chain.cpp @@ -56,7 +56,8 @@ CBlockLocator CChain::GetLocator(const CBlockIndex *pindex) const { return CBlockLocator(vHave); } -void CBlockIndex::untrim() { +void CBlockIndex::untrim() EXCLUSIVE_LOCKS_REQUIRED(::cs_main){ + AssertLockHeld(::cs_main); if (!trimmed()) return; CBlockIndex tmp; @@ -70,12 +71,10 @@ void CBlockIndex::untrim() { m_pcontext->chainman->m_blockman.m_dirty_blockindex.insert(this); } -const CBlockIndex *CBlockIndex::untrim_to(CBlockIndex *pindexNew) const +const CBlockIndex *CBlockIndex::untrim_to(CBlockIndex *pindexNew) const EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { - if (m_pcontext) { - return m_pcontext->chainman->m_blockman.m_block_tree_db->RegenerateFullIndex(this, pindexNew); - } - return nullptr; + AssertLockHeld(::cs_main); + return m_pcontext->chainman->m_blockman.m_block_tree_db->RegenerateFullIndex(this, pindexNew); } const CBlockIndex *CChain::FindFork(const CBlockIndex *pindex) const { diff --git a/src/dynafed.cpp b/src/dynafed.cpp index 29a20294837..60f1cbbc4cd 100644 --- a/src/dynafed.cpp +++ b/src/dynafed.cpp @@ -16,7 +16,10 @@ bool NextBlockIsParameterTransition(const CBlockIndex* pindexPrev, const Consens for (int32_t height = next_height - 1; height >= (int32_t)(next_height - consensus.dynamic_epoch_length); --height) { const CBlockIndex* p_epoch_walk = pindexPrev->GetAncestor(height); assert(p_epoch_walk); - ForceUntrimHeader(p_epoch_walk); + { + LOCK(cs_main); + ForceUntrimHeader(p_epoch_walk); + } const DynaFedParamEntry& proposal = p_epoch_walk->dynafed_params().m_proposed; const uint256 proposal_root = proposal.CalculateRoot(); vote_tally[proposal_root]++; @@ -62,7 +65,10 @@ DynaFedParamEntry ComputeNextBlockFullCurrentParameters(const CBlockIndex* pinde // may be pre-dynafed params const CBlockIndex* p_epoch_start = pindexPrev->GetAncestor(epoch_start_height); assert(p_epoch_start); - ForceUntrimHeader(p_epoch_start); + { + LOCK(cs_main); + ForceUntrimHeader(p_epoch_start); + } if (p_epoch_start->dynafed_params().IsNull()) { // We need to construct the "full" current parameters of pre-dynafed // consensus @@ -96,7 +102,10 @@ DynaFedParamEntry ComputeNextBlockCurrentParameters(const CBlockIndex* pindexPre { assert(pindexPrev); - ForceUntrimHeader(pindexPrev); + { + LOCK(cs_main); + ForceUntrimHeader(pindexPrev); + } DynaFedParamEntry entry = ComputeNextBlockFullCurrentParameters(pindexPrev, consensus); uint32_t next_height = pindexPrev->nHeight+1; diff --git a/src/pegins.cpp b/src/pegins.cpp index 60e3b3f4b14..c5e2794dc7d 100644 --- a/src/pegins.cpp +++ b/src/pegins.cpp @@ -489,7 +489,10 @@ std::vector> GetValidFedpegScripts(const CBlockIndex break; } - ForceUntrimHeader(p_epoch_start); + { + LOCK(cs_main); + ForceUntrimHeader(p_epoch_start); + } if (!p_epoch_start->dynafed_params().IsNull()) { fedpegscripts.push_back(std::make_pair(p_epoch_start->dynafed_params().m_current.m_fedpeg_program, p_epoch_start->dynafed_params().m_current.m_fedpegscript)); } else { diff --git a/src/rest.cpp b/src/rest.cpp index 6f79834dcdf..f7376e814d5 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -232,6 +232,7 @@ static bool rest_headers(const std::any& context, case RetFormat::BINARY: { CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION); for (const CBlockIndex *pindex : headers) { + LOCK(cs_main); CBlockIndex tmpBlockIndexFull; const CBlockIndex* pindexfull=pindex->untrim_to(&tmpBlockIndexFull); ssHeader << pindexfull->GetBlockHeader(); @@ -246,6 +247,7 @@ static bool rest_headers(const std::any& context, case RetFormat::HEX: { CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION); for (const CBlockIndex *pindex : headers) { + LOCK(cs_main); CBlockIndex tmpBlockIndexFull; const CBlockIndex* pindexfull=pindex->untrim_to(&tmpBlockIndexFull); ssHeader << pindexfull->GetBlockHeader(); diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 37691ec51da..a7a60dbddd2 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -199,7 +199,11 @@ UniValue blockheaderToJSON(const CBlockIndex* tip, const CBlockIndex* blockindex AssertLockNotHeld(cs_main); // For performance reasons CBlockIndex tmpBlockIndexFull; - const CBlockIndex* blockindex=blockindex_->untrim_to(&tmpBlockIndexFull); + const CBlockIndex* blockindex; + { + LOCK(cs_main); + blockindex = blockindex_->untrim_to(&tmpBlockIndexFull); + } UniValue result(UniValue::VOBJ); result.pushKV("hash", blockindex->GetBlockHash().GetHex()); @@ -1020,6 +1024,7 @@ static RPCHelpMan getblockheader() if (!fVerbose) { + LOCK(cs_main); CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION); CBlockIndex tmpBlockIndexFull; const CBlockIndex* pblockindexfull=pblockindex->untrim_to(&tmpBlockIndexFull); diff --git a/src/txdb.cpp b/src/txdb.cpp index 0b7af3787e3..d1fe0394cbc 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -333,15 +333,14 @@ bool CBlockTreeDB::WritePAKList(const std::vector >& const CBlockIndex *CBlockTreeDB::RegenerateFullIndex(const CBlockIndex *pindexTrimmed, CBlockIndex *pindexNew) const { + LOCK(cs_main); + if(!pindexTrimmed->trimmed()) { return pindexTrimmed; } CBlockHeader tmp; bool BlockRead = false; { - // At this point we can either be locked or unlocked depending on where we're being called - // but cs_main is a RecursiveMutex, so it doesn't matter - LOCK(cs_main); // In unpruned nodes, same data could be read from blocks using ReadBlockFromDisk, but that turned out to // be about 6x slower than reading from the index std::pair key(DB_BLOCK_INDEX, pindexTrimmed->GetBlockHash()); diff --git a/src/validation.cpp b/src/validation.cpp index 9702071a3b2..b6fc6d4fd7d 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2717,13 +2717,14 @@ static void UpdateTipLog( !warning_messages.empty() ? strprintf(" warning='%s'", warning_messages) : ""); } -void ForceUntrimHeader(const CBlockIndex *pindex_) +void ForceUntrimHeader(const CBlockIndex *pindex_) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { + AssertLockHeld(cs_main); + assert(pindex_); if (!pindex_->trimmed()) { return; } - AssertLockHeld(cs_main); CBlockIndex *pindex=const_cast(pindex_); pindex->untrim(); } From 46a3ee29bc7e04d015369eec0c3408df3d3cd797 Mon Sep 17 00:00:00 2001 From: James Dorfman Date: Mon, 27 May 2024 09:25:31 +0000 Subject: [PATCH 17/19] improve efficiency by gating locks with --- src/dynafed.cpp | 7 ++++--- src/pegins.cpp | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/dynafed.cpp b/src/dynafed.cpp index 60f1cbbc4cd..d7197951491 100644 --- a/src/dynafed.cpp +++ b/src/dynafed.cpp @@ -2,6 +2,7 @@ #include #include #include +#include bool NextBlockIsParameterTransition(const CBlockIndex* pindexPrev, const Consensus::Params& consensus, DynaFedParamEntry& winning_entry) { @@ -16,7 +17,7 @@ bool NextBlockIsParameterTransition(const CBlockIndex* pindexPrev, const Consens for (int32_t height = next_height - 1; height >= (int32_t)(next_height - consensus.dynamic_epoch_length); --height) { const CBlockIndex* p_epoch_walk = pindexPrev->GetAncestor(height); assert(p_epoch_walk); - { + if (node::fTrimHeaders) { LOCK(cs_main); ForceUntrimHeader(p_epoch_walk); } @@ -65,7 +66,7 @@ DynaFedParamEntry ComputeNextBlockFullCurrentParameters(const CBlockIndex* pinde // may be pre-dynafed params const CBlockIndex* p_epoch_start = pindexPrev->GetAncestor(epoch_start_height); assert(p_epoch_start); - { + if (node::fTrimHeaders) { LOCK(cs_main); ForceUntrimHeader(p_epoch_start); } @@ -102,7 +103,7 @@ DynaFedParamEntry ComputeNextBlockCurrentParameters(const CBlockIndex* pindexPre { assert(pindexPrev); - { + if (node::fTrimHeaders) { LOCK(cs_main); ForceUntrimHeader(pindexPrev); } diff --git a/src/pegins.cpp b/src/pegins.cpp index c5e2794dc7d..72551d2c3b5 100644 --- a/src/pegins.cpp +++ b/src/pegins.cpp @@ -489,7 +489,7 @@ std::vector> GetValidFedpegScripts(const CBlockIndex break; } - { + if (node::fTrimHeaders) { LOCK(cs_main); ForceUntrimHeader(p_epoch_start); } From 4171939b58cd9773f30385b1e3ba6cca3a4d447a Mon Sep 17 00:00:00 2001 From: Byron Hambly Date: Thu, 30 May 2024 14:59:27 +0200 Subject: [PATCH 18/19] test: add trim_headers functional test (cherry picked from commit 630870f21dfbddd1366c65761f931b85dc8a189b) --- test/functional/feature_trim_headers.py | 261 ++++++++++++++++++++++++ test/functional/test_runner.py | 1 + 2 files changed, 262 insertions(+) create mode 100755 test/functional/feature_trim_headers.py diff --git a/test/functional/feature_trim_headers.py b/test/functional/feature_trim_headers.py new file mode 100755 index 00000000000..9056629dcb9 --- /dev/null +++ b/test/functional/feature_trim_headers.py @@ -0,0 +1,261 @@ +#!/usr/bin/env python3 + +import codecs + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal +from test_framework import ( + address, + key, +) +from test_framework.messages import ( + CBlock, + from_hex, +) +from test_framework.script import ( + OP_NOP, + OP_RETURN, + CScript +) + +# Generate wallet import format from private key. +def wif(pk): + # Base58Check version for regtest WIF keys is 0xef = 239 + pk_compressed = pk + bytes([0x1]) + return address.byte_to_base58(pk_compressed, 239) + +# The signblockscript is a Bitcoin Script k-of-n multisig script. +def make_signblockscript(num_nodes, required_signers, keys): + assert num_nodes >= required_signers + script = "{}".format(50 + required_signers) + for i in range(num_nodes): + k = keys[i] + script += "21" + script += codecs.encode(k.get_pubkey().get_bytes(), 'hex_codec').decode("utf-8") + script += "{}".format(50 + num_nodes) # num keys + script += "ae" # OP_CHECKMULTISIG + return script + +class TrimHeadersTest(BitcoinTestFramework): + def skip_test_if_missing_module(self): + self.skip_if_no_wallet() + + # Dynamically generate N keys to be used for block signing. + def init_keys(self, num_keys): + self.keys = [] + self.wifs = [] + for i in range(num_keys): + k = key.ECKey() + k.generate() + w = wif(k.get_bytes()) + self.keys.append(k) + self.wifs.append(w) + + def set_test_params(self): + self.num_nodes = 3 + self.num_keys = 1 + self.required_signers = 1 + self.setup_clean_chain = True + self.init_keys(self.num_keys) + signblockscript = make_signblockscript(self.num_keys, self.required_signers, self.keys) + self.witnessScript = signblockscript # post-dynafed this becomes witnessScript + args = [ + "-signblockscript={}".format(signblockscript), + "-con_max_block_sig_size={}".format(self.required_signers * 74 + self.num_nodes * 33), + "-anyonecanspendaremine=1", + "-evbparams=dynafed:0:::", + "-con_dyna_deploy_signal=1", + ] + self.trim_args = args + ["-trim_headers=1"] + self.prune_args = self.trim_args + ["-prune=1"] + self.extra_args = [ + args, + self.trim_args, + self.prune_args, + ] + + def setup_network(self): + self.setup_nodes() + self.connect_nodes(0, 1) + self.connect_nodes(0, 2) + + def check_height(self, expected_height, all=False, verbose=True): + if verbose: + self.log.info(f"Check height {expected_height}") + if all: + for n in self.nodes: + assert_equal(n.getblockcount(), expected_height) + else: + assert_equal(self.nodes[0].getblockcount(), expected_height) + + def mine_block(self, make_transactions): + # alternate mining between the signing nodes + mineridx = self.nodes[0].getblockcount() % self.required_signers # assuming in sync + mineridx_next = (self.nodes[0].getblockcount() + 1) % self.required_signers + miner = self.nodes[mineridx] + miner_next = self.nodes[mineridx_next] + + # If dynafed is enabled, this means signblockscript has been WSH-wrapped + blockchain_info = self.nodes[0].getblockchaininfo() + deployment_info = self.nodes[0].getdeploymentinfo() + dynafed_active = deployment_info['deployments']['dynafed']['bip9']['status'] == "active" + if dynafed_active: + wsh_wrap = self.nodes[0].decodescript(self.witnessScript)['segwit']['hex'] + assert_equal(wsh_wrap, blockchain_info['current_signblock_hex']) + + # Make a few transactions to make non-empty blocks for compact transmission + if make_transactions: + for i in range(10): + miner.sendtoaddress(miner_next.getnewaddress(), 1, "", "", True) + # miner makes a block + block = miner.getnewblockhex() + block_struct = from_hex(CBlock(), block) + + # make another block with the commitment field filled out + dummy_block = miner.getnewblockhex(commit_data="deadbeef") + dummy_struct = from_hex(CBlock(), dummy_block) + assert_equal(len(dummy_struct.vtx[0].vout), len(block_struct.vtx[0].vout) + 1) + # OP_RETURN deadbeef + assert_equal(CScript(dummy_struct.vtx[0].vout[0].scriptPubKey).hex(), '6a04deadbeef') + + # All nodes get compact blocks, first node may get complete + # block in 0.5 RTT even with transactions thanks to p2p connection + # with non-signing node being miner + for i in range(self.num_keys): + sketch = miner.getcompactsketch(block) + compact_response = self.nodes[i].consumecompactsketch(sketch) + if "block_tx_req" in compact_response: + block_txn = self.nodes[i].consumegetblocktxn(block, compact_response["block_tx_req"]) + final_block = self.nodes[i].finalizecompactblock(sketch, block_txn, compact_response["found_transactions"]) + else: + # If there's only coinbase, it should succeed immediately + final_block = compact_response["blockhex"] + # Block should be complete, sans signatures + self.nodes[i].testproposedblock(final_block) + + # collect num_keys signatures from signers, reduce to required_signers sigs during combine + sigs = [] + for i in range(self.num_keys): + result = miner.combineblocksigs(block, sigs, self.witnessScript) + sigs = sigs + self.nodes[i].signblock(block, self.witnessScript) + assert_equal(result["complete"], i >= self.required_signers) + # submitting should have no effect pre-threshhold + if i < self.required_signers: + miner.submitblock(result["hex"]) + + result = miner.combineblocksigs(block, sigs, self.witnessScript) + assert_equal(result["complete"], True) + + self.nodes[0].submitblock(result["hex"]) + + def mine_blocks(self, num_blocks, transactions): + for _ in range(num_blocks): + self.mine_block(transactions) + + def mine_large_blocks(self, n): + big_script = CScript([OP_RETURN] + [OP_NOP] * 950000) + node = self.nodes[0] + + for _ in range(n): + hex = node.getnewblockhex() + block = from_hex(CBlock(), hex) + tx = block.vtx[0] + tx.vout[0].scriptPubKey = big_script + tx.rehash() + block.vtx[0] = tx + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + h = block.serialize().hex() + + sigs = node.signblock(h, self.witnessScript) + + result = node.combineblocksigs(h, sigs, self.witnessScript) + assert_equal(result["complete"], True) + + node.submitblock(result["hex"]) + + + def run_test(self): + for i in range(self.num_keys): + self.nodes[i].importprivkey(self.wifs[i]) + + expected_height = 0 + self.check_height(expected_height, all=True) + + self.log.info("Mining and signing 101 blocks to unlock funds") + expected_height += 101 + self.mine_blocks(101, False) + self.sync_all() + self.check_height(expected_height, all=True) + + self.log.info("Shut down trimmed nodes") + self.stop_node(1) + self.stop_node(2) + + self.log.info("Mining and signing non-empty blocks") + expected_height += 10 + self.mine_blocks(10, True) + self.check_height(expected_height) + + # signblock rpc field stuff + tip = self.nodes[0].getblockhash(self.nodes[0].getblockcount()) + header = self.nodes[0].getblockheader(tip) + block = self.nodes[0].getblock(tip) + info = self.nodes[0].getblockchaininfo() + + assert 'signblock_witness_asm' in header + assert 'signblock_witness_hex' in header + assert 'signblock_witness_asm' in block + assert 'signblock_witness_hex' in block + + assert_equal(self.nodes[0].getdeploymentinfo()['deployments']['dynafed']['bip9']['status'], "defined") + + # activate dynafed + blocks_til_dynafed = 431 - self.nodes[0].getblockcount() + self.log.info("Activating dynafed") + self.mine_blocks(blocks_til_dynafed, False) + expected_height += blocks_til_dynafed + self.check_height(expected_height) + + assert_equal(self.nodes[0].getdeploymentinfo()['deployments']['dynafed']['bip9']['status'], "locked_in") + + num = 3000 + self.log.info(f"Mine {num} dynamic federation blocks without txns") + self.mine_blocks(num, False) + expected_height += num + self.check_height(expected_height) + + num = 10 + self.log.info(f"Mine {num} dynamic federation blocks with txns") + self.mine_blocks(num, True) + expected_height += num + self.check_height(expected_height) + + num = 777 + self.log.info(f"Mine {num} large blocks") + expected_height += num + self.mine_large_blocks(num) + + self.log.info("Restart the trimmed nodes") + self.start_node(1, extra_args=self.trim_args) + self.start_node(2, extra_args=self.prune_args) + self.connect_nodes(0, 1) + self.connect_nodes(0, 2) + + self.sync_all() + self.check_height(expected_height, all=True) + + self.log.info("Prune the pruned node") + self.nodes[2].pruneblockchain(4000) + + hash = self.nodes[0].getblockhash(expected_height) + block = self.nodes[0].getblock(hash) + for i in range(1, self.num_nodes): + assert_equal(hash, self.nodes[i].getblockhash(expected_height)) + assert_equal(block, self.nodes[i].getblock(hash)) + + self.log.info(f"All nodes at height {expected_height} with block hash {hash}") + + +if __name__ == '__main__': + TrimHeadersTest().main() diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 198a3eaeb1a..fc3c1034cce 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -112,6 +112,7 @@ 'rpc_getnewblockhex.py', 'wallet_elements_regression_1172.py --legacy-wallet', 'wallet_elements_regression_1259.py --legacy-wallet', + 'feature_trim_headers.py', # Longest test should go first, to favor running tests in parallel 'wallet_hd.py --legacy-wallet', 'wallet_hd.py --descriptors', From eed85dc5dfd93b1bc806c4361ba8d594c304a230 Mon Sep 17 00:00:00 2001 From: Byron Hambly Date: Wed, 5 Jun 2024 09:56:11 +0200 Subject: [PATCH 19/19] getblockchaininfo: add trim_headers field --- src/rpc/blockchain.cpp | 1 + test/functional/feature_trim_headers.py | 5 ++++- test/functional/rpc_blockchain.py | 1 + 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index a7a60dbddd2..03b6e449398 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1741,6 +1741,7 @@ RPCHelpMan getblockchaininfo() } obj.pushKV("size_on_disk", chainman.m_blockman.CalculateCurrentUsage()); obj.pushKV("pruned", node::fPruneMode); + obj.pushKV("trim_headers", node::fTrimHeaders); // ELEMENTS if (g_signed_blocks) { if (!DeploymentActiveAfter(tip, chainparams.GetConsensus(), Consensus::DEPLOYMENT_DYNA_FED)) { CScript sign_block_script = chainparams.GetConsensus().signblockscript; diff --git a/test/functional/feature_trim_headers.py b/test/functional/feature_trim_headers.py index 9056629dcb9..8d44ee92a95 100755 --- a/test/functional/feature_trim_headers.py +++ b/test/functional/feature_trim_headers.py @@ -187,6 +187,10 @@ def run_test(self): self.mine_blocks(101, False) self.sync_all() self.check_height(expected_height, all=True) + # check the new field in getblockchaininfo + assert not self.nodes[0].getblockchaininfo()["trim_headers"] + assert self.nodes[1].getblockchaininfo()["trim_headers"] + assert self.nodes[2].getblockchaininfo()["trim_headers"] self.log.info("Shut down trimmed nodes") self.stop_node(1) @@ -201,7 +205,6 @@ def run_test(self): tip = self.nodes[0].getblockhash(self.nodes[0].getblockcount()) header = self.nodes[0].getblockheader(tip) block = self.nodes[0].getblock(tip) - info = self.nodes[0].getblockchaininfo() assert 'signblock_witness_asm' in header assert 'signblock_witness_hex' in header diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index 949d7e6fbff..6e54ddca1fd 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -123,6 +123,7 @@ def _test_getblockchaininfo(self): 'pruned', 'size_on_disk', 'time', + 'trim_headers', 'verificationprogress', 'warnings', ]