109 "Max protocol message length must be greater than largest "
110 "possible INV message");
164 std::chrono::seconds(2),
165 std::chrono::seconds(2),
166 std::chrono::seconds(60),
173 std::chrono::seconds(2),
174 std::chrono::seconds(2),
175 std::chrono::seconds(60),
267 std::chrono::seconds{1},
268 "INVENTORY_RELAY_MAX too low");
319 std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
357 std::atomic<ServiceFlags> m_their_services{
NODE_NONE};
360 Mutex m_misbehavior_mutex;
362 int m_misbehavior_score
GUARDED_BY(m_misbehavior_mutex){0};
365 bool m_should_discourage
GUARDED_BY(m_misbehavior_mutex){
false};
368 Mutex m_block_inv_mutex;
374 std::vector<BlockHash> m_blocks_for_inv_relay
GUARDED_BY(m_block_inv_mutex);
380 std::vector<BlockHash>
381 m_blocks_for_headers_relay
GUARDED_BY(m_block_inv_mutex);
392 std::atomic<int> m_starting_height{-1};
395 std::atomic<uint64_t> m_ping_nonce_sent{0};
397 std::atomic<std::chrono::microseconds> m_ping_start{0us};
399 std::atomic<bool> m_ping_queued{
false};
409 std::chrono::microseconds m_next_send_feefilter
422 bool m_relay_txs
GUARDED_BY(m_bloom_filter_mutex){
false};
427 std::unique_ptr<CBloomFilter>
443 GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
449 std::set<TxId> m_tx_inventory_to_send
GUARDED_BY(m_tx_inventory_mutex);
455 bool m_send_mempool
GUARDED_BY(m_tx_inventory_mutex){
false};
457 std::atomic<std::chrono::seconds> m_last_mempool_req{0s};
462 std::chrono::microseconds
463 m_next_inv_send_time
GUARDED_BY(m_tx_inventory_mutex){0};
469 std::atomic<Amount> m_fee_filter_received{
Amount::zero()};
477 LOCK(m_tx_relay_mutex);
479 m_tx_relay = std::make_unique<Peer::TxRelay>();
480 return m_tx_relay.get();
484 return WITH_LOCK(m_tx_relay_mutex,
return m_tx_relay.get());
486 const TxRelay *GetTxRelay() const
488 return WITH_LOCK(m_tx_relay_mutex,
return m_tx_relay.get());
493 std::set<avalanche::ProofId>
494 m_proof_inventory_to_send
GUARDED_BY(m_proof_inventory_mutex);
497 GUARDED_BY(m_proof_inventory_mutex){10000, 0.000001};
504 std::chrono::microseconds m_next_inv_send_time{0};
508 std::atomic<std::chrono::seconds> lastSharedProofsUpdate{0s};
509 std::atomic<bool> compactproofs_requested{
false};
516 const std::unique_ptr<ProofRelay> m_proof_relay;
521 std::vector<CAddress>
533 std::unique_ptr<CRollingBloomFilter>
551 std::atomic_bool m_addr_relay_enabled{
false};
555 mutable Mutex m_addr_send_times_mutex;
557 std::chrono::microseconds
558 m_next_addr_send
GUARDED_BY(m_addr_send_times_mutex){0};
560 std::chrono::microseconds
561 m_next_local_addr_send
GUARDED_BY(m_addr_send_times_mutex){0};
566 std::atomic_bool m_wants_addrv2{
false};
570 mutable Mutex m_addr_token_bucket_mutex;
575 double m_addr_token_bucket
GUARDED_BY(m_addr_token_bucket_mutex){1.0};
577 std::chrono::microseconds
579 GetTime<std::chrono::microseconds>()};
581 std::atomic<uint64_t> m_addr_rate_limited{0};
586 std::atomic<uint64_t> m_addr_processed{0};
592 bool m_inv_triggered_getheaders_before_sync
596 Mutex m_getdata_requests_mutex;
598 std::deque<CInv> m_getdata_requests
GUARDED_BY(m_getdata_requests_mutex);
605 Mutex m_headers_sync_mutex;
610 std::unique_ptr<HeadersSyncState>
615 std::atomic<bool> m_sent_sendheaders{
false};
618 int m_num_unconnecting_headers_msgs
622 std::chrono::microseconds m_headers_sync_timeout
633 : m_id(id), m_our_services{our_services},
634 m_proof_relay(fRelayProofs ?
std::make_unique<ProofRelay>()
638 mutable Mutex m_tx_relay_mutex;
641 std::unique_ptr<TxRelay> m_tx_relay
GUARDED_BY(m_tx_relay_mutex);
644using PeerRef = std::shared_ptr<Peer>;
662 bool fSyncStarted{
false};
665 std::chrono::microseconds m_stalling_since{0us};
666 std::list<QueuedBlock> vBlocksInFlight;
669 std::chrono::microseconds m_downloading_since{0us};
671 bool fPreferredDownload{
false};
676 bool m_requested_hb_cmpctblocks{
false};
678 bool m_provides_cmpctblocks{
false};
706 struct ChainSyncTimeoutState {
709 std::chrono::seconds m_timeout{0s};
713 bool m_sent_getheaders{
false};
716 bool m_protect{
false};
719 ChainSyncTimeoutState m_chain_sync;
722 int64_t m_last_block_announcement{0};
725 const bool m_is_inbound;
727 CNodeState(
bool is_inbound) : m_is_inbound(is_inbound) {}
745 bool fInitialDownload)
override
751 const std::shared_ptr<const CBlock> &pblock)
override
760 !m_headers_presync_mutex);
762 std::atomic<bool> &interrupt)
override
764 !m_recent_confirmed_transactions_mutex,
765 !m_most_recent_block_mutex, !cs_proofrequest,
766 !m_headers_presync_mutex, g_msgproc_mutex);
769 !m_recent_confirmed_transactions_mutex,
770 !m_most_recent_block_mutex, !cs_proofrequest,
776 std::optional<std::string>
783 void RelayTransaction(const
TxId &txid) override
785 void RelayProof(const
avalanche::ProofId &proofid) override
787 void SetBestHeight(
int height)
override { m_best_height = height; };
790 Misbehaving(*
Assert(GetPeerRef(peer_id)), howmuch,
"");
794 const std::chrono::microseconds time_received,
795 const std::atomic<bool> &interruptMsgProc)
override
797 !m_recent_confirmed_transactions_mutex,
798 !m_most_recent_block_mutex, !cs_proofrequest,
799 !m_headers_presync_mutex, g_msgproc_mutex);
801 int64_t time_in_seconds)
override;
808 void ConsiderEviction(
CNode &pto, Peer &peer,
809 std::chrono::seconds time_in_seconds)
816 void EvictExtraOutboundPeers(std::chrono::seconds now)
823 void ReattemptInitialBroadcast(
CScheduler &scheduler)
829 void UpdateAvalancheStatistics()
const;
834 void AvalanchePeriodicNetworking(
CScheduler &scheduler)
const;
853 void Misbehaving(Peer &peer,
int howmuch,
const std::string &message);
867 bool MaybePunishNodeForBlock(
NodeId nodeid,
869 bool via_compact_block,
870 const std::string &message =
"")
880 const
std::
string &message = "")
892 bool MaybeDiscourageAndDisconnect(
CNode &pnode, Peer &peer);
910 bool maybe_add_extra_compact_tx)
913 struct PackageToValidate {
915 const std::vector<NodeId> m_senders;
920 : m_txns{parent, child}, m_senders{parent_sender, child_sender} {}
923 Assume(m_txns.size() == 2);
925 "parent %s (sender=%d) + child %s (sender=%d)",
926 m_txns.front()->GetId().ToString(), m_senders.front(),
927 m_txns.back()->GetId().ToString(), m_senders.back());
936 void ProcessPackageResult(
const PackageToValidate &package_to_validate,
946 std::optional<PackageToValidate> Find1P1CPackage(
const CTransactionRef &ptx,
973 bool ProcessOrphanTx(
const Config &config, Peer &peer)
986 void ProcessHeadersMessage(
const Config &config,
CNode &pfrom, Peer &peer,
987 std::vector<CBlockHeader> &&headers,
988 bool via_compact_block)
998 bool CheckHeadersPoW(
const std::vector<CBlockHeader> &headers,
1008 void HandleFewUnconnectingHeaders(
CNode &pfrom, Peer &peer,
1009 const std::vector<CBlockHeader> &headers)
1013 CheckHeadersAreContinuous(
const std::vector<CBlockHeader> &headers)
const;
1033 bool IsContinuationOfLowWorkHeadersSync(Peer &peer,
CNode &pfrom,
1034 std::vector<CBlockHeader> &headers)
1036 !m_headers_presync_mutex, g_msgproc_mutex);
1050 bool TryLowWorkHeadersSync(Peer &peer,
CNode &pfrom,
1052 std::vector<CBlockHeader> &headers)
1054 !m_headers_presync_mutex, g_msgproc_mutex);
1060 bool IsAncestorOfBestHeaderOrTip(
const CBlockIndex *header)
1074 void HeadersDirectFetchBlocks(
const Config &config,
CNode &pfrom,
1077 void UpdatePeerStateForReceivedHeaders(
CNode &pfrom, Peer &peer,
1079 bool received_new_header,
1080 bool may_have_more_headers)
1083 void SendBlockTransactions(
CNode &pfrom, Peer &peer,
const CBlock &block,
1092 std::chrono::microseconds current_time)
1102 std::chrono::microseconds current_time,
bool preferred)
1106 void PushNodeVersion(
const Config &config,
CNode &pnode,
const Peer &peer);
1114 void MaybeSendPing(
CNode &node_to, Peer &peer,
1115 std::chrono::microseconds now);
1118 void MaybeSendAddr(
CNode &
node, Peer &peer,
1119 std::chrono::microseconds current_time)
1126 void MaybeSendSendHeaders(
CNode &
node, Peer &peer)
1130 void MaybeSendFeefilter(
CNode &
node, Peer &peer,
1131 std::chrono::microseconds current_time)
1143 void RelayAddress(
NodeId originator,
const CAddress &addr,
bool fReachable)
1164 Mutex cs_proofrequest;
1169 std::atomic<int> m_best_height{-1};
1172 std::chrono::seconds m_stale_tip_check_time{0s};
1174 const Options m_opts;
1176 bool RejectIncomingTxs(
const CNode &peer)
const;
1182 bool m_initial_sync_finished{
false};
1188 mutable Mutex m_peer_mutex;
1195 std::map<NodeId, PeerRef> m_peer_map
GUARDED_BY(m_peer_mutex);
1204 const CNodeState *State(
NodeId pnode)
const
1209 std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
1216 m_last_block_inv_triggering_headers_sync
GUARDED_BY(g_msgproc_mutex){};
1224 std::map<BlockHash, std::pair<NodeId, bool>>
1234 std::atomic<std::chrono::seconds> m_block_stalling_timeout{
1248 bool AlreadyHaveTx(
const TxId &txid,
bool include_reconsiderable)
1250 !m_recent_confirmed_transactions_mutex);
1314 mutable Mutex m_recent_confirmed_transactions_mutex;
1316 GUARDED_BY(m_recent_confirmed_transactions_mutex){24'000, 0.000'001};
1325 std::chrono::microseconds
1326 NextInvToInbounds(std::chrono::microseconds now,
1327 std::chrono::seconds average_interval);
1331 mutable Mutex m_most_recent_block_mutex;
1332 std::shared_ptr<const CBlock>
1333 m_most_recent_block
GUARDED_BY(m_most_recent_block_mutex);
1334 std::shared_ptr<const CBlockHeaderAndShortTxIDs>
1335 m_most_recent_compact_block
GUARDED_BY(m_most_recent_block_mutex);
1341 Mutex m_headers_presync_mutex;
1352 using HeadersPresyncStats =
1353 std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>;
1355 std::map<NodeId, HeadersPresyncStats>
1356 m_headers_presync_stats
GUARDED_BY(m_headers_presync_mutex){};
1360 std::atomic_bool m_headers_presync_should_signal{
false};
1368 bool IsBlockRequested(
const BlockHash &hash)
1372 bool IsBlockRequestedFromOutbound(
const BlockHash &hash)
1383 void RemoveBlockRequest(
const BlockHash &hash,
1384 std::optional<NodeId> from_peer)
1393 bool BlockRequested(
const Config &config,
NodeId nodeid,
1395 std::list<QueuedBlock>::iterator **pit =
nullptr)
1404 void FindNextBlocksToDownload(
NodeId nodeid,
unsigned int count,
1416 std::atomic<
std::chrono::seconds> m_last_tip_update{0s};
1423 const std::chrono::seconds mempool_req,
1424 const std::chrono::seconds now)
1428 void ProcessGetData(
const Config &config,
CNode &pfrom, Peer &peer,
1429 const std::atomic<bool> &interruptMsgProc)
1431 peer.m_getdata_requests_mutex,
1437 const std::shared_ptr<const CBlock> &block,
1438 bool force_processing,
bool min_pow_checked);
1441 typedef std::map<TxId, CTransactionRef> MapRelay;
1448 std::deque<std::pair<std::chrono::microseconds, MapRelay::iterator>>
1457 void MaybeSetPeerAsAnnouncingHeaderAndIDs(
NodeId nodeid)
1476 std::vector<std::pair<TxHash, CTransactionRef>>
1477 vExtraTxnForCompact
GUARDED_BY(g_msgproc_mutex);
1479 size_t vExtraTxnForCompactIt
GUARDED_BY(g_msgproc_mutex) = 0;
1484 void ProcessBlockAvailability(
NodeId nodeid)
1499 bool BlockRequestAllowed(const
CBlockIndex *pindex)
1501 bool AlreadyHaveBlock(const
BlockHash &block_hash)
1503 bool AlreadyHaveProof(const
avalanche::ProofId &proofid);
1504 void ProcessGetBlockData(const
Config &config,
CNode &pfrom, Peer &peer,
1527 bool PrepareBlockFilterRequest(
CNode &
node, Peer &peer,
1529 uint32_t start_height,
1531 uint32_t max_height_diff,
1572 uint32_t GetAvalancheVoteForBlock(const
BlockHash &hash) const
1581 uint32_t GetAvalancheVoteForTx(const
TxId &
id) const
1583 !m_recent_confirmed_transactions_mutex);
1592 bool SetupAddressRelay(const
CNode &
node, Peer &peer)
1595 void AddAddressKnown(Peer &peer, const
CAddress &addr)
1597 void PushAddress(Peer &peer, const
CAddress &addr)
1605 bool ReceivedAvalancheProof(
CNode &
node, Peer &peer,
1611 const
std::chrono::seconds now)
1614 bool isPreferredDownloadPeer(const
CNode &pfrom);
1617const CNodeState *PeerManagerImpl::State(
NodeId pnode) const
1619 std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
1620 if (it == m_node_states.end()) {
1627CNodeState *PeerManagerImpl::State(
NodeId pnode)
1629 return const_cast<CNodeState *
>(std::as_const(*this).State(pnode));
1637static bool IsAddrCompatible(
const Peer &peer,
const CAddress &addr) {
1641void PeerManagerImpl::AddAddressKnown(Peer &peer,
const CAddress &addr) {
1642 assert(peer.m_addr_known);
1643 peer.m_addr_known->insert(addr.
GetKey());
1646void PeerManagerImpl::PushAddress(Peer &peer,
const CAddress &addr) {
1650 assert(peer.m_addr_known);
1651 if (addr.
IsValid() && !peer.m_addr_known->contains(addr.
GetKey()) &&
1652 IsAddrCompatible(peer, addr)) {
1653 if (peer.m_addrs_to_send.size() >= m_opts.max_addr_to_send) {
1654 peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] =
1657 peer.m_addrs_to_send.push_back(addr);
1662static void AddKnownTx(Peer &peer,
const TxId &txid) {
1663 auto tx_relay = peer.GetTxRelay();
1668 LOCK(tx_relay->m_tx_inventory_mutex);
1669 tx_relay->m_tx_inventory_known_filter.insert(txid);
1673 if (peer.m_proof_relay !=
nullptr) {
1674 LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
1675 peer.m_proof_relay->m_proof_inventory_known_filter.insert(proofid);
1679bool PeerManagerImpl::isPreferredDownloadPeer(
const CNode &pfrom) {
1681 const CNodeState *state = State(pfrom.
GetId());
1682 return state && state->fPreferredDownload;
1685static bool CanServeBlocks(
const Peer &peer) {
1693static bool IsLimitedPeer(
const Peer &peer) {
1698std::chrono::microseconds
1699PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
1700 std::chrono::seconds average_interval) {
1701 if (m_next_inv_to_inbounds.load() < now) {
1708 return m_next_inv_to_inbounds;
1711bool PeerManagerImpl::IsBlockRequested(
const BlockHash &hash) {
1712 return mapBlocksInFlight.count(hash);
1715bool PeerManagerImpl::IsBlockRequestedFromOutbound(
const BlockHash &hash) {
1716 for (
auto range = mapBlocksInFlight.equal_range(hash);
1717 range.first != range.second; range.first++) {
1718 auto [nodeid, block_it] = range.first->second;
1719 CNodeState &nodestate = *
Assert(State(nodeid));
1720 if (!nodestate.m_is_inbound) {
1728void PeerManagerImpl::RemoveBlockRequest(
const BlockHash &hash,
1729 std::optional<NodeId> from_peer) {
1730 auto range = mapBlocksInFlight.equal_range(hash);
1731 if (range.first == range.second) {
1739 while (range.first != range.second) {
1740 auto [node_id, list_it] = range.first->second;
1742 if (from_peer && *from_peer != node_id) {
1747 CNodeState &state = *
Assert(State(node_id));
1749 if (state.vBlocksInFlight.begin() == list_it) {
1752 state.m_downloading_since =
1753 std::max(state.m_downloading_since,
1754 GetTime<std::chrono::microseconds>());
1756 state.vBlocksInFlight.erase(list_it);
1758 if (state.vBlocksInFlight.empty()) {
1760 m_peers_downloading_from--;
1762 state.m_stalling_since = 0us;
1764 range.first = mapBlocksInFlight.erase(range.first);
1768bool PeerManagerImpl::BlockRequested(
const Config &config,
NodeId nodeid,
1770 std::list<QueuedBlock>::iterator **pit) {
1773 CNodeState *state = State(nodeid);
1774 assert(state !=
nullptr);
1779 for (
auto range = mapBlocksInFlight.equal_range(hash);
1780 range.first != range.second; range.first++) {
1781 if (range.first->second.first == nodeid) {
1783 *pit = &range.first->second.second;
1790 RemoveBlockRequest(hash, nodeid);
1792 std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(
1793 state->vBlocksInFlight.end(),
1794 {&block, std::unique_ptr<PartiallyDownloadedBlock>(
1795 pit ? new PartiallyDownloadedBlock(config, &m_mempool)
1797 if (state->vBlocksInFlight.size() == 1) {
1799 state->m_downloading_since = GetTime<std::chrono::microseconds>();
1800 m_peers_downloading_from++;
1803 auto itInFlight = mapBlocksInFlight.insert(
1804 std::make_pair(hash, std::make_pair(nodeid, it)));
1807 *pit = &itInFlight->second.second;
1813void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(
NodeId nodeid) {
1819 if (m_opts.ignore_incoming_txs) {
1823 CNodeState *nodestate = State(nodeid);
1828 if (!nodestate->m_provides_cmpctblocks) {
1831 int num_outbound_hb_peers = 0;
1832 for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin();
1833 it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
1834 if (*it == nodeid) {
1835 lNodesAnnouncingHeaderAndIDs.erase(it);
1836 lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
1839 CNodeState *state = State(*it);
1840 if (state !=
nullptr && !state->m_is_inbound) {
1841 ++num_outbound_hb_peers;
1844 if (nodestate->m_is_inbound) {
1847 if (lNodesAnnouncingHeaderAndIDs.size() >= 3 &&
1848 num_outbound_hb_peers == 1) {
1849 CNodeState *remove_node =
1850 State(lNodesAnnouncingHeaderAndIDs.front());
1851 if (remove_node !=
nullptr && !remove_node->m_is_inbound) {
1854 std::swap(lNodesAnnouncingHeaderAndIDs.front(),
1855 *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
1862 if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
1866 lNodesAnnouncingHeaderAndIDs.front(), [
this](
CNode *pnodeStop) {
1867 m_connman.PushMessage(
1868 pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion())
1869 .Make(NetMsgType::SENDCMPCT,
1871 CMPCTBLOCKS_VERSION));
1874 pnodeStop->m_bip152_highbandwidth_to = false;
1877 lNodesAnnouncingHeaderAndIDs.pop_front();
1886 lNodesAnnouncingHeaderAndIDs.push_back(pfrom->
GetId());
1891bool PeerManagerImpl::TipMayBeStale() {
1894 if (m_last_tip_update.load() == 0s) {
1895 m_last_tip_update = GetTime<std::chrono::seconds>();
1897 return m_last_tip_update.load() <
1898 GetTime<std::chrono::seconds>() -
1901 mapBlocksInFlight.empty();
1904bool PeerManagerImpl::CanDirectFetch() {
1910static bool PeerHasHeader(CNodeState *state,
const CBlockIndex *pindex)
1912 if (state->pindexBestKnownBlock &&
1913 pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) {
1916 if (state->pindexBestHeaderSent &&
1917 pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) {
1923void PeerManagerImpl::ProcessBlockAvailability(
NodeId nodeid) {
1924 CNodeState *state = State(nodeid);
1925 assert(state !=
nullptr);
1927 if (!state->hashLastUnknownBlock.IsNull()) {
1931 if (state->pindexBestKnownBlock ==
nullptr ||
1932 pindex->
nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1933 state->pindexBestKnownBlock = pindex;
1935 state->hashLastUnknownBlock.SetNull();
1940void PeerManagerImpl::UpdateBlockAvailability(
NodeId nodeid,
1942 CNodeState *state = State(nodeid);
1943 assert(state !=
nullptr);
1945 ProcessBlockAvailability(nodeid);
1950 if (state->pindexBestKnownBlock ==
nullptr ||
1951 pindex->
nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1952 state->pindexBestKnownBlock = pindex;
1957 state->hashLastUnknownBlock = hash;
1961void PeerManagerImpl::FindNextBlocksToDownload(
1963 std::vector<const CBlockIndex *> &vBlocks,
NodeId &nodeStaller) {
1968 vBlocks.reserve(vBlocks.size() +
count);
1969 CNodeState *state = State(nodeid);
1970 assert(state !=
nullptr);
1973 ProcessBlockAvailability(nodeid);
1975 if (state->pindexBestKnownBlock ==
nullptr ||
1976 state->pindexBestKnownBlock->nChainWork <
1978 state->pindexBestKnownBlock->nChainWork <
1984 if (state->pindexLastCommonBlock ==
nullptr) {
1987 state->pindexLastCommonBlock =
1989 .
ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight,
1996 state->pindexLastCommonBlock, state->pindexBestKnownBlock);
1997 if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) {
2001 std::vector<const CBlockIndex *> vToFetch;
2002 const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
2010 std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
2012 while (pindexWalk->
nHeight < nMaxHeight) {
2017 int nToFetch = std::min(nMaxHeight - pindexWalk->
nHeight,
2018 std::max<int>(
count - vBlocks.size(), 128));
2019 vToFetch.resize(nToFetch);
2020 pindexWalk = state->pindexBestKnownBlock->
GetAncestor(
2021 pindexWalk->
nHeight + nToFetch);
2022 vToFetch[nToFetch - 1] = pindexWalk;
2023 for (
unsigned int i = nToFetch - 1; i > 0; i--) {
2024 vToFetch[i - 1] = vToFetch[i]->
pprev;
2037 if (pindex->nStatus.hasData() ||
2040 state->pindexLastCommonBlock = pindex;
2042 }
else if (!IsBlockRequested(pindex->
GetBlockHash())) {
2044 if (pindex->
nHeight > nWindowEnd) {
2046 if (vBlocks.size() == 0 && waitingfor != nodeid) {
2049 nodeStaller = waitingfor;
2053 vBlocks.push_back(pindex);
2054 if (vBlocks.size() ==
count) {
2057 }
else if (waitingfor == -1) {
2069template <
class InvId>
2073 return !
node.HasPermission(
2086template <
class InvId>
2087static std::chrono::microseconds
2091 std::chrono::microseconds current_time,
bool preferred) {
2092 auto delay = std::chrono::microseconds{0};
2104 return current_time + delay;
2107void PeerManagerImpl::PushNodeVersion(
const Config &config,
CNode &pnode,
2109 uint64_t my_services{peer.m_our_services};
2110 const int64_t nTime{
count_seconds(GetTime<std::chrono::seconds>())};
2112 const int nNodeStartingHeight{m_best_height};
2123 const bool tx_relay{!RejectIncomingTxs(pnode)};
2131 nTime, your_services, addr_you, my_services,
2133 nNodeStartingHeight, tx_relay, extraEntropy));
2137 "send version message: version %d, blocks=%d, them=%s, "
2138 "txrelay=%d, peer=%d\n",
2143 "send version message: version %d, blocks=%d, "
2144 "txrelay=%d, peer=%d\n",
2149void PeerManagerImpl::AddTxAnnouncement(
2151 std::chrono::microseconds current_time) {
2159 const bool preferred = isPreferredDownloadPeer(
node);
2161 current_time, preferred);
2163 m_txrequest.ReceivedInv(
node.GetId(), txid, preferred, reqtime);
2166void PeerManagerImpl::AddProofAnnouncement(
2168 std::chrono::microseconds current_time,
bool preferred) {
2179 m_proofrequest.ReceivedInv(
node.GetId(), proofid, preferred, reqtime);
2182void PeerManagerImpl::UpdateLastBlockAnnounceTime(
NodeId node,
2183 int64_t time_in_seconds) {
2185 CNodeState *state = State(
node);
2187 state->m_last_block_announcement = time_in_seconds;
2191void PeerManagerImpl::InitializeNode(
const Config &config,
CNode &
node,
2196 m_node_states.emplace_hint(m_node_states.end(),
2197 std::piecewise_construct,
2198 std::forward_as_tuple(nodeid),
2199 std::forward_as_tuple(
node.IsInboundConn()));
2200 assert(m_txrequest.Count(nodeid) == 0);
2208 PeerRef peer = std::make_shared<Peer>(nodeid, our_services, !!m_avalanche);
2211 m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
2213 if (!
node.IsInboundConn()) {
2214 PushNodeVersion(config,
node, *peer);
2218void PeerManagerImpl::ReattemptInitialBroadcast(
CScheduler &scheduler) {
2221 for (
const TxId &txid : unbroadcast_txids) {
2223 if (m_mempool.
exists(txid)) {
2224 RelayTransaction(txid);
2235 auto unbroadcasted_proofids =
2239 auto it = unbroadcasted_proofids.begin();
2240 while (it != unbroadcasted_proofids.end()) {
2243 if (!pm.isBoundToPeer(*it)) {
2244 pm.removeUnbroadcastProof(*it);
2245 it = unbroadcasted_proofids.erase(it);
2252 return unbroadcasted_proofids;
2256 for (
const auto &proofid : unbroadcasted_proofids) {
2257 RelayProof(proofid);
2264 const auto reattemptBroadcastInterval = 10min +
GetRandMillis(5min);
2265 scheduler.
scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
2266 reattemptBroadcastInterval);
2269void PeerManagerImpl::UpdateAvalancheStatistics()
const {
2296void PeerManagerImpl::AvalanchePeriodicNetworking(
CScheduler &scheduler)
const {
2297 const auto now = GetTime<std::chrono::seconds>();
2298 std::vector<NodeId> avanode_ids;
2299 bool fQuorumEstablished;
2300 bool fShouldRequestMoreNodes;
2310 fShouldRequestMoreNodes =
2318 avanode_ids.push_back(pnode->GetId());
2321 PeerRef peer = GetPeerRef(pnode->
GetId());
2322 if (peer ==
nullptr) {
2326 if (peer->m_proof_relay &&
2327 now > (peer->m_proof_relay->lastSharedProofsUpdate.load() +
2329 peer->m_proof_relay->sharedProofs = {};
2333 if (avanode_ids.empty()) {
2341 for (
NodeId avanodeId : avanode_ids) {
2342 const bool sentGetavaaddr =
2345 m_connman.PushMessage(
2346 pavanode, CNetMsgMaker(pavanode->GetCommonVersion())
2347 .Make(NetMsgType::GETAVAADDR));
2348 PeerRef peer = GetPeerRef(avanodeId);
2349 WITH_LOCK(peer->m_addr_token_bucket_mutex,
2350 peer->m_addr_token_bucket +=
2351 m_opts.max_addr_to_send);
2359 if (sentGetavaaddr && fQuorumEstablished && !fShouldRequestMoreNodes) {
2374 avanode_ids.resize(std::min<size_t>(avanode_ids.size(), 3));
2377 for (
NodeId nodeid : avanode_ids) {
2380 PeerRef peer = GetPeerRef(nodeid);
2381 if (peer->m_proof_relay) {
2386 peer->m_proof_relay->compactproofs_requested =
true;
2396 const auto avalanchePeriodicNetworkingInterval = 2min +
GetRandMillis(3min);
2397 scheduler.
scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
2398 avalanchePeriodicNetworkingInterval);
2401void PeerManagerImpl::FinalizeNode(
const Config &config,
const CNode &
node) {
2412 PeerRef peer = RemovePeer(nodeid);
2414 misbehavior =
WITH_LOCK(peer->m_misbehavior_mutex,
2415 return peer->m_misbehavior_score);
2417 m_peer_map.erase(nodeid);
2419 CNodeState *state = State(nodeid);
2420 assert(state !=
nullptr);
2422 if (state->fSyncStarted) {
2426 for (
const QueuedBlock &entry : state->vBlocksInFlight) {
2428 mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash());
2429 while (range.first != range.second) {
2430 auto [node_id, list_it] = range.first->second;
2431 if (node_id != nodeid) {
2434 range.first = mapBlocksInFlight.erase(range.first);
2441 m_txrequest.DisconnectedPeer(nodeid);
2442 m_num_preferred_download_peers -= state->fPreferredDownload;
2443 m_peers_downloading_from -= (!state->vBlocksInFlight.empty());
2444 assert(m_peers_downloading_from >= 0);
2445 m_outbound_peers_with_protect_from_disconnect -=
2446 state->m_chain_sync.m_protect;
2447 assert(m_outbound_peers_with_protect_from_disconnect >= 0);
2449 m_node_states.erase(nodeid);
2451 if (m_node_states.empty()) {
2453 assert(mapBlocksInFlight.empty());
2454 assert(m_num_preferred_download_peers == 0);
2455 assert(m_peers_downloading_from == 0);
2456 assert(m_outbound_peers_with_protect_from_disconnect == 0);
2457 assert(m_txrequest.Size() == 0);
2459 return orphanage.Size();
2464 if (
node.fSuccessfullyConnected && misbehavior == 0 &&
2465 !
node.IsBlockOnlyConn() && !
node.IsInboundConn()) {
2472 LOCK(m_headers_presync_mutex);
2473 m_headers_presync_stats.erase(nodeid);
2476 WITH_LOCK(cs_proofrequest, m_proofrequest.DisconnectedPeer(nodeid));
2481PeerRef PeerManagerImpl::GetPeerRef(
NodeId id)
const {
2483 auto it = m_peer_map.find(
id);
2484 return it != m_peer_map.end() ? it->second :
nullptr;
2487PeerRef PeerManagerImpl::RemovePeer(
NodeId id) {
2490 auto it = m_peer_map.find(
id);
2491 if (it != m_peer_map.end()) {
2492 ret = std::move(it->second);
2493 m_peer_map.erase(it);
2498bool PeerManagerImpl::GetNodeStateStats(
NodeId nodeid,
2502 const CNodeState *state = State(nodeid);
2503 if (state ==
nullptr) {
2507 ? state->pindexBestKnownBlock->nHeight
2510 ? state->pindexLastCommonBlock->nHeight
2512 for (
const QueuedBlock &queue : state->vBlocksInFlight) {
2519 PeerRef peer = GetPeerRef(nodeid);
2520 if (peer ==
nullptr) {
2532 auto ping_wait{0us};
2533 if ((0 != peer->m_ping_nonce_sent) &&
2534 (0 != peer->m_ping_start.load().count())) {
2536 GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
2539 if (
auto tx_relay = peer->GetTxRelay()) {
2541 return tx_relay->m_relay_txs);
2553 LOCK(peer->m_headers_sync_mutex);
2554 if (peer->m_headers_sync) {
2562void PeerManagerImpl::AddToCompactExtraTransactions(
const CTransactionRef &tx) {
2563 if (m_opts.max_extra_txs <= 0) {
2567 if (!vExtraTxnForCompact.size()) {
2568 vExtraTxnForCompact.resize(m_opts.max_extra_txs);
2571 vExtraTxnForCompact[vExtraTxnForCompactIt] =
2572 std::make_pair(tx->GetHash(), tx);
2573 vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs;
2576void PeerManagerImpl::Misbehaving(Peer &peer,
int howmuch,
2577 const std::string &message) {
2580 LOCK(peer.m_misbehavior_mutex);
2581 const int score_before{peer.m_misbehavior_score};
2582 peer.m_misbehavior_score += howmuch;
2583 const int score_now{peer.m_misbehavior_score};
2585 const std::string message_prefixed =
2586 message.empty() ?
"" : (
": " + message);
2587 std::string warning;
2591 warning =
" DISCOURAGE THRESHOLD EXCEEDED";
2592 peer.m_should_discourage =
true;
2596 score_before, score_now, warning, message_prefixed);
2599bool PeerManagerImpl::MaybePunishNodeForBlock(
NodeId nodeid,
2601 bool via_compact_block,
2602 const std::string &message) {
2603 PeerRef peer{GetPeerRef(nodeid)};
2614 if (!via_compact_block) {
2616 Misbehaving(*peer, 100, message);
2623 CNodeState *node_state = State(nodeid);
2624 if (node_state ==
nullptr) {
2631 if (!via_compact_block && !node_state->m_is_inbound) {
2633 Misbehaving(*peer, 100, message);
2643 Misbehaving(*peer, 100, message);
2651 Misbehaving(*peer, 10, message);
2657 if (message !=
"") {
2663bool PeerManagerImpl::MaybePunishNodeForTx(
NodeId nodeid,
2665 const std::string &message) {
2666 PeerRef peer{GetPeerRef(nodeid)};
2673 Misbehaving(*peer, 100, message);
2691 if (message !=
"") {
2697bool PeerManagerImpl::BlockRequestAllowed(
const CBlockIndex *pindex) {
2703 (m_chainman.m_best_header !=
nullptr) &&
2704 (m_chainman.m_best_header->GetBlockTime() - pindex->
GetBlockTime() <
2707 *m_chainman.m_best_header, *pindex, *m_chainman.m_best_header,
2711std::optional<std::string>
2712PeerManagerImpl::FetchBlock(
const Config &config,
NodeId peer_id,
2715 return "Loading blocks ...";
2721 CNodeState *state = State(peer_id);
2722 if (state ==
nullptr) {
2723 return "Peer does not exist";
2727 RemoveBlockRequest(block_index.
GetBlockHash(), std::nullopt);
2730 if (!BlockRequested(config, peer_id, block_index)) {
2731 return "Already requested from this peer";
2740 const CNetMsgMaker msgMaker(node->GetCommonVersion());
2741 this->m_connman.PushMessage(
2742 node, msgMaker.Make(NetMsgType::GETDATA, invs));
2745 return "Node not fully connected";
2750 return std::nullopt;
2753std::unique_ptr<PeerManager>
2757 return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman,
2766 : m_rng{opts.deterministic_rng},
2768 m_chainparams(chainman.GetParams()), m_connman(connman),
2769 m_addrman(addrman), m_banman(banman), m_chainman(chainman),
2770 m_mempool(pool), m_avalanche(
avalanche), m_opts{opts} {}
2772void PeerManagerImpl::StartScheduledTasks(
CScheduler &scheduler) {
2779 "peer eviction timer should be less than stale tip check timer");
2782 this->CheckForStaleTipAndEvictPeers();
2788 const auto reattemptBroadcastInterval = 10min +
GetRandMillis(5min);
2789 scheduler.
scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
2790 reattemptBroadcastInterval);
2795 UpdateAvalancheStatistics();
2801 const auto avalanchePeriodicNetworkingInterval = 2min +
GetRandMillis(3min);
2802 scheduler.
scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
2803 avalanchePeriodicNetworkingInterval);
2812void PeerManagerImpl::BlockConnected(
2813 const std::shared_ptr<const CBlock> &pblock,
const CBlockIndex *pindex) {
2820 m_last_tip_update = GetTime<std::chrono::seconds>();
2823 LOCK(m_recent_confirmed_transactions_mutex);
2825 m_recent_confirmed_transactions.insert(ptx->GetId());
2830 for (
const auto &ptx : pblock->vtx) {
2831 m_txrequest.ForgetInvId(ptx->GetId());
2837 auto stalling_timeout = m_block_stalling_timeout.load();
2840 const auto new_timeout =
2841 std::max(std::chrono::duration_cast<std::chrono::seconds>(
2842 stalling_timeout * 0.85),
2844 if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout,
2852void PeerManagerImpl::BlockDisconnected(
2853 const std::shared_ptr<const CBlock> &block,
const CBlockIndex *pindex) {
2862 LOCK(m_recent_confirmed_transactions_mutex);
2863 m_recent_confirmed_transactions.reset();
2870void PeerManagerImpl::NewPoWValidBlock(
2871 const CBlockIndex *pindex,
const std::shared_ptr<const CBlock> &pblock) {
2872 std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock =
2873 std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock);
2878 if (pindex->
nHeight <= m_highest_fast_announce) {
2881 m_highest_fast_announce = pindex->
nHeight;
2884 const std::shared_future<CSerializedNetMsg> lazy_ser{
2885 std::async(std::launch::deferred, [&] {
2890 LOCK(m_most_recent_block_mutex);
2891 m_most_recent_block_hash = hashBlock;
2892 m_most_recent_block = pblock;
2893 m_most_recent_compact_block = pcmpctblock;
2897 [
this, pindex, &lazy_ser, &hashBlock](
CNode *pnode)
2905 ProcessBlockAvailability(pnode->
GetId());
2906 CNodeState &state = *State(pnode->
GetId());
2910 if (state.m_requested_hb_cmpctblocks &&
2911 !PeerHasHeader(&state, pindex) &&
2912 PeerHasHeader(&state, pindex->
pprev)) {
2914 "%s sending header-and-ids %s to peer=%d\n",
2915 "PeerManager::NewPoWValidBlock",
2916 hashBlock.ToString(), pnode->
GetId());
2919 m_connman.
PushMessage(pnode, ser_cmpctblock.Copy());
2920 state.pindexBestHeaderSent = pindex;
2929void PeerManagerImpl::UpdatedBlockTip(
const CBlockIndex *pindexNew,
2931 bool fInitialDownload) {
2932 SetBestHeight(pindexNew->
nHeight);
2936 if (fInitialDownload) {
2941 std::vector<BlockHash> vHashes;
2943 while (pindexToAnnounce != pindexFork) {
2945 pindexToAnnounce = pindexToAnnounce->
pprev;
2955 for (
auto &it : m_peer_map) {
2956 Peer &peer = *it.second;
2957 LOCK(peer.m_block_inv_mutex);
2959 peer.m_blocks_for_headers_relay.push_back(hash);
2971void PeerManagerImpl::BlockChecked(
const CBlock &block,
2976 std::map<BlockHash, std::pair<NodeId, bool>>::iterator it =
2977 mapBlockSource.find(hash);
2981 if (state.
IsInvalid() && it != mapBlockSource.end() &&
2982 State(it->second.first)) {
2983 MaybePunishNodeForBlock(it->second.first, state,
2984 !it->second.second);
2994 mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
2995 if (it != mapBlockSource.end()) {
2996 MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
3000 if (it != mapBlockSource.end()) {
3001 mapBlockSource.erase(it);
3010bool PeerManagerImpl::AlreadyHaveTx(
const TxId &txid,
3011 bool include_reconsiderable) {
3013 hashRecentRejectsChainTip) {
3018 hashRecentRejectsChainTip =
3020 m_recent_rejects.reset();
3021 m_recent_rejects_package_reconsiderable.reset();
3025 return orphanage.HaveTx(txid);
3031 return conflicting.HaveTx(txid);
3036 if (include_reconsiderable &&
3037 m_recent_rejects_package_reconsiderable.contains(txid)) {
3042 LOCK(m_recent_confirmed_transactions_mutex);
3043 if (m_recent_confirmed_transactions.contains(txid)) {
3048 return m_recent_rejects.contains(txid) || m_mempool.
exists(txid);
3051bool PeerManagerImpl::AlreadyHaveBlock(
const BlockHash &block_hash) {
3059 if (localProof && localProof->getId() == proofid) {
3068void PeerManagerImpl::SendPings() {
3070 for (
auto &it : m_peer_map) {
3071 it.second->m_ping_queued =
true;
3075void PeerManagerImpl::RelayTransaction(
const TxId &txid) {
3077 for (
auto &it : m_peer_map) {
3078 Peer &peer = *it.second;
3079 auto tx_relay = peer.GetTxRelay();
3083 LOCK(tx_relay->m_tx_inventory_mutex);
3089 if (tx_relay->m_next_inv_send_time == 0s) {
3093 if (!tx_relay->m_tx_inventory_known_filter.contains(txid)) {
3094 tx_relay->m_tx_inventory_to_send.insert(txid);
3101 for (
auto &it : m_peer_map) {
3102 Peer &peer = *it.second;
3104 if (!peer.m_proof_relay) {
3107 LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
3108 if (!peer.m_proof_relay->m_proof_inventory_known_filter.contains(
3110 peer.m_proof_relay->m_proof_inventory_to_send.insert(proofid);
3115void PeerManagerImpl::RelayAddress(
NodeId originator,
const CAddress &addr,
3131 const auto current_time{GetTime<std::chrono::seconds>()};
3134 const uint64_t time_addr{
3135 (
static_cast<uint64_t
>(
count_seconds(current_time)) + hash_addr) /
3145 unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
3146 std::array<std::pair<uint64_t, Peer *>, 2> best{
3147 {{0,
nullptr}, {0,
nullptr}}};
3148 assert(nRelayNodes <= best.size());
3152 for (
auto &[
id, peer] : m_peer_map) {
3153 if (peer->m_addr_relay_enabled &&
id != originator &&
3154 IsAddrCompatible(*peer, addr)) {
3156 for (
unsigned int i = 0; i < nRelayNodes; i++) {
3157 if (hashKey > best[i].first) {
3158 std::copy(best.begin() + i, best.begin() + nRelayNodes - 1,
3159 best.begin() + i + 1);
3160 best[i] = std::make_pair(hashKey, peer.get());
3167 for (
unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
3168 PushAddress(*best[i].second, addr);
3172void PeerManagerImpl::ProcessGetBlockData(
const Config &config,
CNode &pfrom,
3173 Peer &peer,
const CInv &inv) {
3176 std::shared_ptr<const CBlock> a_recent_block;
3177 std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
3179 LOCK(m_most_recent_block_mutex);
3180 a_recent_block = m_most_recent_block;
3181 a_recent_compact_block = m_most_recent_compact_block;
3184 bool need_activate_chain =
false;
3198 need_activate_chain =
true;
3202 if (need_activate_chain) {
3205 state, a_recent_block, m_avalanche)) {
3216 if (!BlockRequestAllowed(pindex)) {
3218 "%s: ignoring request from peer=%i for old "
3219 "block that isn't in the main chain\n",
3220 __func__, pfrom.
GetId());
3227 (((m_chainman.m_best_header !=
nullptr) &&
3228 (m_chainman.m_best_header->GetBlockTime() - pindex->
GetBlockTime() >
3234 "historical block serving limit reached, disconnect peer=%d\n",
3249 "Ignore block request below NODE_NETWORK_LIMITED "
3250 "threshold, disconnect peer=%d\n",
3260 if (!pindex->nStatus.hasData()) {
3263 std::shared_ptr<const CBlock> pblock;
3264 if (a_recent_block && a_recent_block->GetHash() == pindex->
GetBlockHash()) {
3265 pblock = a_recent_block;
3268 std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
3270 assert(!
"cannot load block from disk");
3272 pblock = pblockRead;
3278 bool sendMerkleBlock =
false;
3280 if (
auto tx_relay = peer.GetTxRelay()) {
3281 LOCK(tx_relay->m_bloom_filter_mutex);
3282 if (tx_relay->m_bloom_filter) {
3283 sendMerkleBlock =
true;
3284 merkleBlock =
CMerkleBlock(*pblock, *tx_relay->m_bloom_filter);
3287 if (sendMerkleBlock) {
3299 typedef std::pair<size_t, uint256> PairType;
3314 if (CanDirectFetch() &&
3317 if (a_recent_compact_block &&
3318 a_recent_compact_block->header.GetHash() ==
3322 *a_recent_compact_block));
3336 LOCK(peer.m_block_inv_mutex);
3339 if (hash == peer.m_continuation_block) {
3343 std::vector<CInv> vInv;
3344 vInv.push_back(
CInv(
3347 peer.m_continuation_block =
BlockHash();
3353PeerManagerImpl::FindTxForGetData(
const Peer &peer,
const TxId &txid,
3354 const std::chrono::seconds mempool_req,
3355 const std::chrono::seconds now) {
3356 auto txinfo = m_mempool.
info(txid);
3361 if ((mempool_req.count() && txinfo.m_time <= mempool_req) ||
3363 return std::move(txinfo.tx);
3371 if (
Assume(peer.GetTxRelay())
3372 ->m_recently_announced_invs.contains(txid)) {
3375 return std::move(txinfo.tx);
3378 auto mi = mapRelay.find(txid);
3379 if (mi != mapRelay.end()) {
3391PeerManagerImpl::FindProofForGetData(
const Peer &peer,
3393 const std::chrono::seconds now) {
3396 bool send_unconditionally =
3422 if (send_unconditionally) {
3427 if (peer.m_proof_relay->m_recently_announced_proofs.contains(proofid)) {
3434void PeerManagerImpl::ProcessGetData(
3436 const std::atomic<bool> &interruptMsgProc) {
3439 auto tx_relay = peer.GetTxRelay();
3441 std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
3442 std::vector<CInv> vNotFound;
3445 const auto now{GetTime<std::chrono::seconds>()};
3447 const auto mempool_req = tx_relay !=
nullptr
3448 ? tx_relay->m_last_mempool_req.load()
3449 : std::chrono::seconds::min();
3454 while (it != peer.m_getdata_requests.end()) {
3455 if (interruptMsgProc) {
3464 const CInv &inv = *it;
3466 if (it->IsMsgStakeContender()) {
3473 if (it->IsMsgProof()) {
3475 vNotFound.push_back(inv);
3480 auto proof = FindProofForGetData(peer, proofid, now);
3488 vNotFound.push_back(inv);
3495 if (it->IsMsgTx()) {
3496 if (tx_relay ==
nullptr) {
3512 std::vector<TxId> parent_ids_to_add;
3515 auto txiter = m_mempool.
GetIter(tx->GetId());
3517 auto &pentry = *txiter;
3519 (*pentry)->GetMemPoolParentsConst();
3520 parent_ids_to_add.reserve(parents.size());
3521 for (
const auto &parent : parents) {
3522 if (parent.get()->GetTime() >
3524 parent_ids_to_add.push_back(
3525 parent.get()->GetTx().GetId());
3530 for (
const TxId &parent_txid : parent_ids_to_add) {
3533 if (
WITH_LOCK(tx_relay->m_tx_inventory_mutex,
3534 return !tx_relay->m_tx_inventory_known_filter
3535 .contains(parent_txid))) {
3536 tx_relay->m_recently_announced_invs.insert(parent_txid);
3540 vNotFound.push_back(inv);
3553 if (it != peer.m_getdata_requests.end() && !pfrom.
fPauseSend) {
3554 const CInv &inv = *it++;
3556 ProcessGetBlockData(config, pfrom, peer, inv);
3562 peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
3564 if (!vNotFound.empty()) {
3582void PeerManagerImpl::SendBlockTransactions(
3586 for (
size_t i = 0; i < req.
indices.size(); i++) {
3588 Misbehaving(peer, 100,
"getblocktxn with out-of-bounds tx indices");
3600bool PeerManagerImpl::CheckHeadersPoW(
const std::vector<CBlockHeader> &headers,
3605 Misbehaving(peer, 100,
"header with invalid proof of work");
3610 if (!CheckHeadersAreContinuous(headers)) {
3611 Misbehaving(peer, 20,
"non-continuous headers sequence");
3624 near_chaintip_work =
3643void PeerManagerImpl::HandleFewUnconnectingHeaders(
3644 CNode &pfrom, Peer &peer,
const std::vector<CBlockHeader> &headers) {
3647 peer.m_num_unconnecting_headers_msgs++;
3651 if (MaybeSendGetHeaders(pfrom,
GetLocator(best_header), peer)) {
3654 "received header %s: missing prev block %s, sending getheaders "
3655 "(%d) to end (peer=%d, m_num_unconnecting_headers_msgs=%d)\n",
3657 headers[0].hashPrevBlock.ToString(), best_header->nHeight,
3658 pfrom.
GetId(), peer.m_num_unconnecting_headers_msgs);
3665 UpdateBlockAvailability(pfrom.
GetId(), headers.back().GetHash()));
3669 if (peer.m_num_unconnecting_headers_msgs %
3672 Misbehaving(peer, 20,
3674 peer.m_num_unconnecting_headers_msgs));
3678bool PeerManagerImpl::CheckHeadersAreContinuous(
3679 const std::vector<CBlockHeader> &headers)
const {
3682 if (!hashLastBlock.
IsNull() && header.hashPrevBlock != hashLastBlock) {
3685 hashLastBlock = header.GetHash();
3690bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(
3691 Peer &peer,
CNode &pfrom, std::vector<CBlockHeader> &headers) {
3692 if (peer.m_headers_sync) {
3693 auto result = peer.m_headers_sync->ProcessNextHeaders(
3695 if (result.request_more) {
3696 auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
3699 Assume(!locator.vHave.empty());
3700 if (!locator.vHave.empty()) {
3707 bool sent_getheaders =
3708 MaybeSendGetHeaders(pfrom, locator, peer);
3709 if (sent_getheaders) {
3711 "more getheaders (from %s) to peer=%d\n",
3712 locator.vHave.front().ToString(), pfrom.
GetId());
3715 "error sending next getheaders (from %s) to "
3716 "continue sync with peer=%d\n",
3717 locator.vHave.front().ToString(), pfrom.
GetId());
3723 peer.m_headers_sync.reset(
nullptr);
3728 LOCK(m_headers_presync_mutex);
3729 m_headers_presync_stats.erase(pfrom.
GetId());
3732 HeadersPresyncStats stats;
3733 stats.first = peer.m_headers_sync->GetPresyncWork();
3734 if (peer.m_headers_sync->GetState() ==
3736 stats.second = {peer.m_headers_sync->GetPresyncHeight(),
3737 peer.m_headers_sync->GetPresyncTime()};
3741 LOCK(m_headers_presync_mutex);
3742 m_headers_presync_stats[pfrom.
GetId()] = stats;
3744 m_headers_presync_stats.find(m_headers_presync_bestpeer);
3745 bool best_updated =
false;
3746 if (best_it == m_headers_presync_stats.end()) {
3751 const HeadersPresyncStats *stat_best{
nullptr};
3752 for (
const auto &[_peer, _stat] : m_headers_presync_stats) {
3753 if (!stat_best || _stat > *stat_best) {
3758 m_headers_presync_bestpeer = peer_best;
3759 best_updated = (peer_best == pfrom.
GetId());
3760 }
else if (best_it->first == pfrom.
GetId() ||
3761 stats > best_it->second) {
3764 m_headers_presync_bestpeer = pfrom.
GetId();
3765 best_updated =
true;
3767 if (best_updated && stats.second.has_value()) {
3770 m_headers_presync_should_signal =
true;
3774 if (result.success) {
3777 headers.swap(result.pow_validated_headers);
3780 return result.success;
3788bool PeerManagerImpl::TryLowWorkHeadersSync(
3790 std::vector<CBlockHeader> &headers) {
3797 arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold();
3801 if (total_work < minimum_chain_work) {
3815 LOCK(peer.m_headers_sync_mutex);
3816 peer.m_headers_sync.reset(
3818 chain_start_header, minimum_chain_work));
3823 (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
3826 "Ignoring low-work chain (height=%u) from peer=%d\n",
3827 chain_start_header->
nHeight + headers.size(),
3839bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(
const CBlockIndex *header) {
3840 return header !=
nullptr &&
3841 ((m_chainman.m_best_header !=
nullptr &&
3843 m_chainman.m_best_header->GetAncestor(header->
nHeight)) ||
3847bool PeerManagerImpl::MaybeSendGetHeaders(
CNode &pfrom,
3856 if (current_time - peer.m_last_getheaders_timestamp >
3860 peer.m_last_getheaders_timestamp = current_time;
3872void PeerManagerImpl::HeadersDirectFetchBlocks(
const Config &config,
3878 CNodeState *nodestate = State(pfrom.
GetId());
3882 std::vector<const CBlockIndex *> vToFetch;
3888 if (!pindexWalk->nStatus.hasData() &&
3891 vToFetch.push_back(pindexWalk);
3893 pindexWalk = pindexWalk->
pprev;
3904 std::vector<CInv> vGetData;
3907 if (nodestate->vBlocksInFlight.size() >=
3913 BlockRequested(config, pfrom.
GetId(), *pindex);
3917 if (vGetData.size() > 1) {
3919 "Downloading blocks toward %s (%d) via headers "
3924 if (vGetData.size() > 0) {
3925 if (!m_opts.ignore_incoming_txs &&
3926 nodestate->m_provides_cmpctblocks && vGetData.size() == 1 &&
3927 mapBlocksInFlight.size() == 1 &&
3945void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(
3947 bool received_new_header,
bool may_have_more_headers) {
3948 if (peer.m_num_unconnecting_headers_msgs > 0) {
3951 "peer=%d: resetting m_num_unconnecting_headers_msgs (%d -> 0)\n",
3952 pfrom.
GetId(), peer.m_num_unconnecting_headers_msgs);
3954 peer.m_num_unconnecting_headers_msgs = 0;
3958 CNodeState *nodestate = State(pfrom.
GetId());
3966 if (received_new_header &&
3968 nodestate->m_last_block_announcement =
GetTime();
3974 !may_have_more_headers) {
3977 if (nodestate->pindexBestKnownBlock &&
3978 nodestate->pindexBestKnownBlock->nChainWork <
3989 LogPrintf(
"Disconnecting outbound peer %d -- headers "
3990 "chain has insufficient work\n",
4004 nodestate->pindexBestKnownBlock !=
nullptr) {
4005 if (m_outbound_peers_with_protect_from_disconnect <
4007 nodestate->pindexBestKnownBlock->nChainWork >=
4009 !nodestate->m_chain_sync.m_protect) {
4012 nodestate->m_chain_sync.m_protect =
true;
4013 ++m_outbound_peers_with_protect_from_disconnect;
4018void PeerManagerImpl::ProcessHeadersMessage(
const Config &config,
CNode &pfrom,
4020 std::vector<CBlockHeader> &&headers,
4021 bool via_compact_block) {
4022 size_t nCount = headers.size();
4030 LOCK(peer.m_headers_sync_mutex);
4031 if (peer.m_headers_sync) {
4032 peer.m_headers_sync.reset(
nullptr);
4033 LOCK(m_headers_presync_mutex);
4034 m_headers_presync_stats.erase(pfrom.
GetId());
4043 if (!CheckHeadersPoW(headers, m_chainparams.
GetConsensus(), peer)) {
4058 bool already_validated_work =
false;
4061 bool have_headers_sync =
false;
4063 LOCK(peer.m_headers_sync_mutex);
4065 already_validated_work =
4066 IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
4078 if (headers.empty()) {
4082 have_headers_sync = !!peer.m_headers_sync;
4088 headers[0].hashPrevBlock))};
4089 bool headers_connect_blockindex{chain_start_header !=
nullptr};
4091 if (!headers_connect_blockindex) {
4096 HandleFewUnconnectingHeaders(pfrom, peer, headers);
4098 Misbehaving(peer, 10,
"invalid header received");
4110 last_received_header =
4112 if (IsAncestorOfBestHeaderOrTip(last_received_header)) {
4113 already_validated_work =
true;
4121 already_validated_work =
true;
4127 if (!already_validated_work &&
4128 TryLowWorkHeadersSync(peer, pfrom, chain_start_header, headers)) {
4140 bool received_new_header{last_received_header ==
nullptr};
4145 state, &pindexLast)) {
4147 MaybePunishNodeForBlock(pfrom.
GetId(), state, via_compact_block,
4148 "invalid header received");
4158 if (MaybeSendGetHeaders(pfrom,
GetLocator(pindexLast), peer)) {
4161 "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
4162 pindexLast->
nHeight, pfrom.
GetId(), peer.m_starting_height);
4166 UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast,
4167 received_new_header,
4171 HeadersDirectFetchBlocks(config, pfrom, *pindexLast);
4174void PeerManagerImpl::ProcessInvalidTx(
NodeId nodeid,
4177 bool maybe_add_extra_compact_tx) {
4182 const TxId &txid = ptx->GetId();
4201 m_recent_rejects_package_reconsiderable.insert(txid);
4203 m_recent_rejects.insert(txid);
4205 m_txrequest.ForgetInvId(txid);
4208 AddToCompactExtraTransactions(ptx);
4211 MaybePunishNodeForTx(nodeid, state);
4217 return orphanage.EraseTx(txid);
4231 m_txrequest.ForgetInvId(tx->GetId());
4237 orphanage.
EraseTx(tx->GetId());
4242 "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
4243 nodeid, tx->GetId().ToString(), m_mempool.
size(),
4246 RelayTransaction(tx->GetId());
4249void PeerManagerImpl::ProcessPackageResult(
4250 const PackageToValidate &package_to_validate,
4256 const auto &
package = package_to_validate.m_txns;
4257 const auto &senders = package_to_validate.m_senders;
4260 m_recent_rejects_package_reconsiderable.insert(
GetPackageHash(package));
4264 if (!
Assume(package.size() == 2)) {
4270 auto package_iter = package.rbegin();
4271 auto senders_iter = senders.rbegin();
4272 while (package_iter != package.rend()) {
4273 const auto &tx = *package_iter;
4274 const NodeId nodeid = *senders_iter;
4275 const auto it_result{package_result.
m_tx_results.find(tx->GetId())};
4279 const auto &tx_result = it_result->second;
4280 switch (tx_result.m_result_type) {
4282 ProcessValidTx(nodeid, tx);
4292 ProcessInvalidTx(nodeid, tx, tx_result.m_state,
4309std::optional<PeerManagerImpl::PackageToValidate>
4315 const auto &parent_txid{ptx->GetId()};
4317 Assume(m_recent_rejects_package_reconsiderable.contains(parent_txid));
4323 const auto cpfp_candidates_same_peer{
4329 for (
const auto &child : cpfp_candidates_same_peer) {
4330 Package maybe_cpfp_package{ptx, child};
4331 if (!m_recent_rejects_package_reconsiderable.contains(
4333 return PeerManagerImpl::PackageToValidate{ptx, child, nodeid,
4347 const auto cpfp_candidates_different_peer{
4357 std::vector<size_t> tx_indices(cpfp_candidates_different_peer.size());
4358 std::iota(tx_indices.begin(), tx_indices.end(), 0);
4359 Shuffle(tx_indices.begin(), tx_indices.end(), m_rng);
4361 for (
const auto index : tx_indices) {
4364 const auto [child_tx, child_sender] =
4365 cpfp_candidates_different_peer.at(index);
4366 Package maybe_cpfp_package{ptx, child_tx};
4367 if (!m_recent_rejects_package_reconsiderable.contains(
4369 return PeerManagerImpl::PackageToValidate{ptx, child_tx, nodeid,
4373 return std::nullopt;
4376bool PeerManagerImpl::ProcessOrphanTx(
const Config &config, Peer &peer) {
4382 return orphanage.GetTxToReconsider(peer.m_id);
4387 const TxId &orphanTxId = porphanTx->GetId();
4392 ProcessValidTx(peer.m_id, porphanTx);
4398 " invalid orphan tx %s from peer=%d. %s\n",
4405 ProcessInvalidTx(peer.m_id, porphanTx, state,
4416bool PeerManagerImpl::PrepareBlockFilterRequest(
4418 const BlockHash &stop_hash, uint32_t max_height_diff,
4420 const bool supported_filter_type =
4423 if (!supported_filter_type) {
4425 "peer %d requested unsupported block filter type: %d\n",
4426 node.GetId(),
static_cast<uint8_t
>(filter_type));
4427 node.fDisconnect =
true;
4437 if (!stop_index || !BlockRequestAllowed(stop_index)) {
4440 node.fDisconnect =
true;
4445 uint32_t stop_height = stop_index->
nHeight;
4446 if (start_height > stop_height) {
4449 "peer %d sent invalid getcfilters/getcfheaders with "
4451 "start height %d and stop height %d\n",
4452 node.GetId(), start_height, stop_height);
4453 node.fDisconnect =
true;
4456 if (stop_height - start_height >= max_height_diff) {
4458 "peer %d requested too many cfilters/cfheaders: %d / %d\n",
4459 node.GetId(), stop_height - start_height + 1, max_height_diff);
4460 node.fDisconnect =
true;
4465 if (!filter_index) {
4474void PeerManagerImpl::ProcessGetCFilters(
CNode &
node, Peer &peer,
4476 uint8_t filter_type_ser;
4477 uint32_t start_height;
4480 vRecv >> filter_type_ser >> start_height >> stop_hash;
4487 if (!PrepareBlockFilterRequest(
node, peer, filter_type, start_height,
4493 std::vector<BlockFilter> filters;
4496 "Failed to find block filter in index: filter_type=%s, "
4497 "start_height=%d, stop_hash=%s\n",
4503 for (
const auto &filter : filters) {
4510void PeerManagerImpl::ProcessGetCFHeaders(
CNode &
node, Peer &peer,
4512 uint8_t filter_type_ser;
4513 uint32_t start_height;
4516 vRecv >> filter_type_ser >> start_height >> stop_hash;
4523 if (!PrepareBlockFilterRequest(
node, peer, filter_type, start_height,
4530 if (start_height > 0) {
4532 stop_index->
GetAncestor(
static_cast<int>(start_height - 1));
4535 "Failed to find block filter header in index: "
4536 "filter_type=%s, block_hash=%s\n",
4543 std::vector<uint256> filter_hashes;
4547 "Failed to find block filter hashes in index: filter_type=%s, "
4548 "start_height=%d, stop_hash=%s\n",
4557 stop_index->
GetBlockHash(), prev_header, filter_hashes);
4561void PeerManagerImpl::ProcessGetCFCheckPt(
CNode &
node, Peer &peer,
4563 uint8_t filter_type_ser;
4566 vRecv >> filter_type_ser >> stop_hash;
4573 if (!PrepareBlockFilterRequest(
4574 node, peer, filter_type, 0, stop_hash,
4575 std::numeric_limits<uint32_t>::max(),
4576 stop_index, filter_index)) {
4584 for (
int i = headers.size() - 1; i >= 0; i--) {
4590 "Failed to find block filter header in index: "
4591 "filter_type=%s, block_hash=%s\n",
4616PeerManagerImpl::GetAvalancheVoteForBlock(
const BlockHash &hash)
const {
4627 if (pindex->nStatus.isInvalid()) {
4632 if (pindex->nStatus.isOnParkedChain()) {
4640 if (pindex == pindexFork) {
4645 if (pindexFork != pindexTip) {
4650 if (!pindex->nStatus.hasData()) {
4659uint32_t PeerManagerImpl::GetAvalancheVoteForTx(
const TxId &
id)
const {
4661 if (m_mempool.
exists(
id) ||
4662 WITH_LOCK(m_recent_confirmed_transactions_mutex,
4663 return m_recent_confirmed_transactions.contains(
id))) {
4669 return conflicting.HaveTx(id);
4675 if (m_recent_rejects.contains(
id)) {
4681 return orphanage.HaveTx(id);
4732 const std::shared_ptr<const CBlock> &block,
4733 bool force_processing,
4734 bool min_pow_checked) {
4735 bool new_block{
false};
4737 &new_block, m_avalanche);
4739 node.m_last_block_time = GetTime<std::chrono::seconds>();
4744 RemoveBlockRequest(block->GetHash(), std::nullopt);
4747 mapBlockSource.erase(block->GetHash());
4751void PeerManagerImpl::ProcessMessage(
4752 const Config &config,
CNode &pfrom,
const std::string &msg_type,
4753 CDataStream &vRecv,
const std::chrono::microseconds time_received,
4754 const std::atomic<bool> &interruptMsgProc) {
4760 PeerRef peer = GetPeerRef(pfrom.
GetId());
4761 if (peer ==
nullptr) {
4768 "Avalanche is not initialized, ignoring %s message\n",
4782 Misbehaving(*peer, 1,
"redundant version message");
4788 uint64_t nNonce = 1;
4791 std::string cleanSubVer;
4792 int starting_height = -1;
4794 uint64_t nExtraEntropy = 1;
4796 vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
4809 "peer=%d does not offer the expected services "
4810 "(%08x offered, %08x expected); disconnecting\n",
4811 pfrom.
GetId(), nServices,
4821 "peer=%d does not offer the avalanche service; disconnecting\n",
4830 "peer=%d using obsolete version %i; disconnecting\n",
4831 pfrom.
GetId(), nVersion);
4836 if (!vRecv.
empty()) {
4845 if (!vRecv.
empty()) {
4846 std::string strSubVer;
4850 if (!vRecv.
empty()) {
4851 vRecv >> starting_height;
4853 if (!vRecv.
empty()) {
4856 if (!vRecv.
empty()) {
4857 vRecv >> nExtraEntropy;
4861 LogPrintf(
"connected to self at %s, disconnecting\n",
4874 PushNodeVersion(config, pfrom, *peer);
4878 const int greatest_common_version =
4892 peer->m_their_services = nServices;
4896 pfrom.cleanSubVer = cleanSubVer;
4898 peer->m_starting_height = starting_height;
4906 (fRelay || (peer->m_our_services &
NODE_BLOOM))) {
4907 auto *
const tx_relay = peer->SetTxRelay();
4909 LOCK(tx_relay->m_bloom_filter_mutex);
4911 tx_relay->m_relay_txs = fRelay;
4924 CNodeState *state = State(pfrom.
GetId());
4925 state->fPreferredDownload =
4929 m_num_preferred_download_peers += state->fPreferredDownload;
4935 bool send_getaddr{
false};
4937 send_getaddr = SetupAddressRelay(pfrom, *peer);
4949 peer->m_getaddr_sent =
true;
4953 WITH_LOCK(peer->m_addr_token_bucket_mutex,
4954 peer->m_addr_token_bucket += m_opts.max_addr_to_send);
4975 std::string remoteAddr;
4981 "receive version message: [%s] %s: version %d, blocks=%d, "
4982 "us=%s, txrelay=%d, peer=%d%s\n",
4984 peer->m_starting_height, addrMe.ToString(), fRelay,
4985 pfrom.
GetId(), remoteAddr);
4987 int64_t currentTime =
GetTime();
4988 int64_t nTimeOffset = nTime - currentTime;
4993 Misbehaving(*peer, 20,
4994 "Ignoring invalid timestamp in version message");
5004 "feeler connection completed peer=%d; disconnecting\n",
5013 Misbehaving(*peer, 10,
"non-version message before version handshake");
5023 "ignoring redundant verack message from peer=%d\n",
5030 "New outbound peer connected: version: %d, blocks=%d, "
5032 pfrom.
nVersion.load(), peer->m_starting_height, pfrom.
GetId(),
5054 AddKnownProof(*peer, localProof->getId());
5058 peer->m_proof_relay->m_recently_announced_proofs.insert(
5059 localProof->getId());
5064 if (
auto tx_relay = peer->GetTxRelay()) {
5073 return tx_relay->m_tx_inventory_to_send.empty() &&
5074 tx_relay->m_next_inv_send_time == 0s));
5083 Misbehaving(*peer, 10,
"non-verack message before version handshake");
5097 std::vector<CAddress> vAddr;
5101 if (!SetupAddressRelay(pfrom, *peer)) {
5107 if (vAddr.size() > m_opts.max_addr_to_send) {
5110 strprintf(
"%s message size = %u", msg_type, vAddr.size()));
5115 std::vector<CAddress> vAddrOk;
5116 const auto current_a_time{Now<NodeSeconds>()};
5119 const auto current_time = GetTime<std::chrono::microseconds>();
5121 LOCK(peer->m_addr_token_bucket_mutex);
5124 const auto time_diff =
5125 std::max(current_time - peer->m_addr_token_timestamp, 0us);
5126 const double increment =
5128 peer->m_addr_token_bucket =
5129 std::min<double>(peer->m_addr_token_bucket + increment,
5133 peer->m_addr_token_timestamp = current_time;
5135 const bool rate_limited =
5137 uint64_t num_proc = 0;
5138 uint64_t num_rate_limit = 0;
5139 Shuffle(vAddr.begin(), vAddr.end(), m_rng);
5141 if (interruptMsgProc) {
5146 LOCK(peer->m_addr_token_bucket_mutex);
5148 if (peer->m_addr_token_bucket < 1.0) {
5154 peer->m_addr_token_bucket -= 1.0;
5167 addr.
nTime > current_a_time + 10min) {
5168 addr.
nTime = current_a_time - 5 * 24h;
5170 AddAddressKnown(*peer, addr);
5179 if (addr.
nTime > current_a_time - 10min && !peer->m_getaddr_sent &&
5182 RelayAddress(pfrom.
GetId(), addr, fReachable);
5186 vAddrOk.push_back(addr);
5189 peer->m_addr_processed += num_proc;
5190 peer->m_addr_rate_limited += num_rate_limit;
5192 "Received addr: %u addresses (%u processed, %u rate-limited) "
5194 vAddr.size(), num_proc, num_rate_limit, pfrom.
GetId());
5196 m_addrman.
Add(vAddrOk, pfrom.
addr, 2h);
5197 if (vAddr.size() < 1000) {
5198 peer->m_getaddr_sent =
false;
5205 "addrfetch connection completed peer=%d; disconnecting\n",
5213 peer->m_wants_addrv2 =
true;
5218 peer->m_prefers_headers =
true;
5223 bool sendcmpct_hb{
false};
5224 uint64_t sendcmpct_version{0};
5225 vRecv >> sendcmpct_hb >> sendcmpct_version;
5232 CNodeState *nodestate = State(pfrom.
GetId());
5233 nodestate->m_provides_cmpctblocks =
true;
5234 nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
5243 std::vector<CInv> vInv;
5246 Misbehaving(*peer, 20,
5247 strprintf(
"inv message size = %u", vInv.size()));
5251 const bool reject_tx_invs{RejectIncomingTxs(pfrom)};
5253 const auto current_time{GetTime<std::chrono::microseconds>()};
5254 std::optional<BlockHash> best_block;
5256 auto logInv = [&](
const CInv &inv,
bool fAlreadyHave) {
5258 fAlreadyHave ?
"have" :
"new", pfrom.
GetId());
5261 for (
CInv &inv : vInv) {
5262 if (interruptMsgProc) {
5274 const bool fAlreadyHave = AlreadyHaveBlock(
BlockHash(inv.
hash));
5275 logInv(inv, fAlreadyHave);
5278 UpdateBlockAvailability(pfrom.
GetId(), hash);
5280 !IsBlockRequested(hash)) {
5287 best_block = std::move(hash);
5295 const bool fAlreadyHave = AlreadyHaveProof(proofid);
5296 logInv(inv, fAlreadyHave);
5297 AddKnownProof(*peer, proofid);
5299 if (!fAlreadyHave && m_avalanche &&
5301 const bool preferred = isPreferredDownloadPeer(pfrom);
5303 LOCK(cs_proofrequest);
5304 AddProofAnnouncement(pfrom, proofid, current_time,
5313 const bool fAlreadyHave =
5314 AlreadyHaveTx(txid,
true);
5315 logInv(inv, fAlreadyHave);
5317 AddKnownTx(*peer, txid);
5318 if (reject_tx_invs) {
5320 "transaction (%s) inv sent in violation of "
5321 "protocol, disconnecting peer=%d\n",
5326 .IsInitialBlockDownload()) {
5327 AddTxAnnouncement(pfrom, txid, current_time);
5334 "Unknown inv type \"%s\" received from peer=%d\n",
5351 if (state.fSyncStarted ||
5352 (!peer->m_inv_triggered_getheaders_before_sync &&
5353 *best_block != m_last_block_inv_triggering_headers_sync)) {
5354 if (MaybeSendGetHeaders(
5355 pfrom,
GetLocator(m_chainman.m_best_header), *peer)) {
5357 m_chainman.m_best_header->nHeight,
5358 best_block->ToString(), pfrom.
GetId());
5360 if (!state.fSyncStarted) {
5361 peer->m_inv_triggered_getheaders_before_sync =
true;
5365 m_last_block_inv_triggering_headers_sync = *best_block;
5374 std::vector<CInv> vInv;
5377 Misbehaving(*peer, 20,
5378 strprintf(
"getdata message size = %u", vInv.size()));
5383 vInv.size(), pfrom.
GetId());
5385 if (vInv.size() > 0) {
5391 LOCK(peer->m_getdata_requests_mutex);
5392 peer->m_getdata_requests.insert(peer->m_getdata_requests.end(),
5393 vInv.begin(), vInv.end());
5394 ProcessGetData(config, pfrom, *peer, interruptMsgProc);
5403 vRecv >> locator >> hashStop;
5407 "getblocks locator size %lld > %d, disconnect peer=%d\n",
5421 std::shared_ptr<const CBlock> a_recent_block;
5423 LOCK(m_most_recent_block_mutex);
5424 a_recent_block = m_most_recent_block;
5428 state, a_recent_block, m_avalanche)) {
5446 (pindex ? pindex->
nHeight : -1),
5449 for (; pindex; pindex = m_chainman.
ActiveChain().Next(pindex)) {
5458 const int nPrunedBlocksLikelyToHave =
5462 (!pindex->nStatus.hasData() ||
5464 nPrunedBlocksLikelyToHave)) {
5467 " getblocks stopping, pruned or too old block at %d %s\n",
5472 peer->m_block_inv_mutex,
5473 peer->m_blocks_for_inv_relay.push_back(pindex->
GetBlockHash()));
5474 if (--nLimit <= 0) {
5480 peer->m_continuation_block = pindex->GetBlockHash();
5492 std::shared_ptr<const CBlock> recent_block;
5494 LOCK(m_most_recent_block_mutex);
5495 if (m_most_recent_block_hash == req.
blockhash) {
5496 recent_block = m_most_recent_block;
5501 SendBlockTransactions(pfrom, *peer, *recent_block, req);
5510 if (!pindex || !pindex->nStatus.hasData()) {
5513 "Peer %d sent us a getblocktxn for a block we don't have\n",
5525 SendBlockTransactions(pfrom, *peer, block, req);
5538 "Peer %d sent us a getblocktxn for a block > %i deep\n",
5543 WITH_LOCK(peer->m_getdata_requests_mutex,
5544 peer->m_getdata_requests.push_back(inv));
5553 vRecv >> locator >> hashStop;
5557 "getheaders locator size %lld > %d, disconnect peer=%d\n",
5566 "Ignoring getheaders from peer=%d while importing/reindexing\n",
5580 if (m_chainman.
ActiveTip() ==
nullptr ||
5585 "Ignoring getheaders from peer=%d because active chain "
5586 "has too little work; sending empty response\n",
5591 std::vector<CBlock>()));
5595 CNodeState *nodestate = State(pfrom.
GetId());
5604 if (!BlockRequestAllowed(pindex)) {
5606 "%s: ignoring request from peer=%i for old block "
5607 "header that isn't in the main chain\n",
5608 __func__, pfrom.
GetId());
5622 std::vector<CBlock> vHeaders;
5625 (pindex ? pindex->
nHeight : -1),
5628 for (; pindex; pindex = m_chainman.
ActiveChain().Next(pindex)) {
5630 if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) {
5647 nodestate->pindexBestHeaderSent =
5655 if (RejectIncomingTxs(pfrom)) {
5657 "transaction sent in violation of protocol peer=%d\n",
5673 const CTransaction &tx = *ptx;
5674 const TxId &txid = tx.GetId();
5675 AddKnownTx(*peer, txid);
5677 bool shouldReconcileTx{
false};
5681 m_txrequest.ReceivedResponse(pfrom.
GetId(), txid);
5683 if (AlreadyHaveTx(txid,
true)) {
5689 if (!m_mempool.
exists(tx.GetId())) {
5691 "Not relaying non-mempool transaction %s from "
5692 "forcerelay peer=%d\n",
5693 tx.GetId().ToString(), pfrom.
GetId());
5695 LogPrintf(
"Force relaying tx %s from peer=%d\n",
5696 tx.GetId().ToString(), pfrom.
GetId());
5697 RelayTransaction(tx.GetId());
5701 if (m_recent_rejects_package_reconsiderable.contains(txid)) {
5709 "found tx %s in reconsiderable rejects, looking for "
5710 "child in orphanage\n",
5712 if (
auto package_to_validate{
5713 Find1P1CPackage(ptx, pfrom.
GetId())}) {
5716 package_to_validate->m_txns,
5719 "package evaluation for %s: %s (%s)\n",
5720 package_to_validate->ToString(),
5722 ?
"package accepted"
5723 :
"package rejected",
5725 ProcessPackageResult(package_to_validate.value(),
5754 ProcessValidTx(pfrom.
GetId(), ptx);
5760 bool fRejectedParents =
false;
5764 std::vector<TxId> unique_parents;
5765 unique_parents.reserve(tx.vin.size());
5766 for (
const CTxIn &txin : tx.vin) {
5769 unique_parents.push_back(txin.prevout.GetTxId());
5771 std::sort(unique_parents.begin(), unique_parents.end());
5772 unique_parents.erase(
5773 std::unique(unique_parents.begin(), unique_parents.end()),
5774 unique_parents.end());
5782 std::optional<TxId> rejected_parent_reconsiderable;
5783 for (
const TxId &parent_txid : unique_parents) {
5784 if (m_recent_rejects.contains(parent_txid)) {
5785 fRejectedParents =
true;
5789 if (m_recent_rejects_package_reconsiderable.contains(
5791 !m_mempool.
exists(parent_txid)) {
5796 if (rejected_parent_reconsiderable.has_value()) {
5797 fRejectedParents =
true;
5800 rejected_parent_reconsiderable = parent_txid;
5803 if (!fRejectedParents) {
5804 const auto current_time{
5805 GetTime<std::chrono::microseconds>()};
5807 for (
const TxId &parent_txid : unique_parents) {
5809 AddKnownTx(*peer, parent_txid);
5813 if (!AlreadyHaveTx(parent_txid,
5815 AddTxAnnouncement(pfrom, parent_txid, current_time);
5821 if (
unsigned int nEvicted =
5825 if (orphanage.AddTx(ptx,
5827 AddToCompactExtraTransactions(ptx);
5830 m_opts.max_orphan_txs, m_rng);
5833 "orphanage overflow, removed %u tx\n",
5839 m_txrequest.ForgetInvId(tx.GetId());
5843 "not keeping orphan with rejected parents %s\n",
5844 tx.GetId().ToString());
5847 m_recent_rejects.insert(tx.GetId());
5848 m_txrequest.ForgetInvId(tx.GetId());
5852 ProcessInvalidTx(pfrom.
GetId(), ptx, state,
5862 "tx %s failed but reconsiderable, looking for child in "
5865 if (
auto package_to_validate{
5866 Find1P1CPackage(ptx, pfrom.
GetId())}) {
5869 package_to_validate->m_txns,
false)};
5871 "package evaluation for %s: %s (%s)\n",
5872 package_to_validate->ToString(),
5874 ?
"package accepted"
5875 :
"package rejected",
5877 ProcessPackageResult(package_to_validate.value(),
5886 m_txrequest.ForgetInvId(tx.GetId());
5888 unsigned int nEvicted{0};
5895 m_opts.max_conflicting_txs, m_rng);
5896 shouldReconcileTx = conflicting.
HaveTx(ptx->GetId());
5901 "conflicting pool overflow, removed %u tx\n",
5907 if (m_avalanche && m_avalanche->
m_preConsensus && shouldReconcileTx) {
5918 "Unexpected cmpctblock message received from peer %d\n",
5925 vRecv >> cmpctblock;
5926 }
catch (std::ios_base::failure &e) {
5928 Misbehaving(*peer, 100,
"cmpctblock-bad-indexes");
5932 bool received_new_header =
false;
5944 MaybeSendGetHeaders(
5945 pfrom,
GetLocator(m_chainman.m_best_header), *peer);
5951 GetAntiDoSWorkThreshold()) {
5955 "Ignoring low-work compact block from peer %d\n",
5962 received_new_header =
true;
5972 MaybePunishNodeForBlock(pfrom.
GetId(), state,
5974 "invalid header via cmpctblock");
5983 bool fProcessBLOCKTXN =
false;
5989 bool fRevertToHeaderProcessing =
false;
5993 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
5994 bool fBlockReconstructed =
false;
6002 CNodeState *nodestate = State(pfrom.
GetId());
6006 if (received_new_header &&
6009 nodestate->m_last_block_announcement =
GetTime();
6012 if (pindex->nStatus.hasData()) {
6019 size_t already_in_flight =
6020 std::distance(range_flight.first, range_flight.second);
6021 bool requested_block_from_this_peer{
false};
6025 bool first_in_flight =
6026 already_in_flight == 0 ||
6027 (range_flight.first->second.first == pfrom.
GetId());
6029 while (range_flight.first != range_flight.second) {
6030 if (range_flight.first->second.first == pfrom.
GetId()) {
6031 requested_block_from_this_peer =
true;
6034 range_flight.first++;
6043 if (requested_block_from_this_peer) {
6047 std::vector<CInv> vInv(1);
6057 if (!already_in_flight && !CanDirectFetch()) {
6065 nodestate->vBlocksInFlight.size() <
6067 requested_block_from_this_peer) {
6068 std::list<QueuedBlock>::iterator *queuedBlockIt =
nullptr;
6069 if (!BlockRequested(config, pfrom.
GetId(), *pindex,
6071 if (!(*queuedBlockIt)->partialBlock) {
6073 ->partialBlock.reset(
6080 "we were already syncing!\n");
6086 *(*queuedBlockIt)->partialBlock;
6088 partialBlock.
InitData(cmpctblock, vExtraTxnForCompact);
6094 Misbehaving(*peer, 100,
"invalid compact block");
6097 if (first_in_flight) {
6100 std::vector<CInv> vInv(1);
6115 for (
size_t i = 0; i < cmpctblock.
BlockTxCount(); i++) {
6126 fProcessBLOCKTXN =
true;
6127 }
else if (first_in_flight) {
6136 IsBlockRequestedFromOutbound(
6162 tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
6167 std::vector<CTransactionRef> dummy;
6168 status = tempBlock.FillBlock(*pblock, dummy);
6170 fBlockReconstructed =
true;
6174 if (requested_block_from_this_peer) {
6178 std::vector<CInv> vInv(1);
6186 fRevertToHeaderProcessing =
true;
6191 if (fProcessBLOCKTXN) {
6193 blockTxnMsg, time_received, interruptMsgProc);
6196 if (fRevertToHeaderProcessing) {
6202 return ProcessHeadersMessage(config, pfrom, *peer,
6207 if (fBlockReconstructed) {
6212 mapBlockSource.emplace(pblock->GetHash(),
6213 std::make_pair(pfrom.
GetId(),
false));
6224 ProcessBlock(config, pfrom, pblock,
true,
6233 RemoveBlockRequest(pblock->GetHash(), std::nullopt);
6243 "Unexpected blocktxn message received from peer %d\n",
6251 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
6252 bool fBlockRead =
false;
6256 auto range_flight = mapBlocksInFlight.equal_range(resp.
blockhash);
6257 size_t already_in_flight =
6258 std::distance(range_flight.first, range_flight.second);
6259 bool requested_block_from_this_peer{
false};
6263 bool first_in_flight =
6264 already_in_flight == 0 ||
6265 (range_flight.first->second.first == pfrom.
GetId());
6267 while (range_flight.first != range_flight.second) {
6268 auto [node_id, block_it] = range_flight.first->second;
6269 if (node_id == pfrom.
GetId() && block_it->partialBlock) {
6270 requested_block_from_this_peer =
true;
6273 range_flight.first++;
6276 if (!requested_block_from_this_peer) {
6278 "Peer %d sent us block transactions for block "
6279 "we weren't expecting\n",
6285 *range_flight.first->second.second->partialBlock;
6293 "invalid compact block/non-matching block transactions");
6296 if (first_in_flight) {
6298 std::vector<CInv> invs;
6306 "Peer %d sent us a compact block but it failed to "
6307 "reconstruct, waiting on first download to complete\n",
6340 std::make_pair(pfrom.
GetId(),
false));
6351 ProcessBlock(config, pfrom, pblock,
true,
6361 "Unexpected headers message received from peer %d\n",
6368 peer->m_last_getheaders_timestamp = {};
6370 std::vector<CBlockHeader> headers;
6376 Misbehaving(*peer, 20,
6377 strprintf(
"too-many-headers: headers message size = %u",
6381 headers.resize(nCount);
6382 for (
unsigned int n = 0; n < nCount; n++) {
6383 vRecv >> headers[n];
6388 ProcessHeadersMessage(config, pfrom, *peer, std::move(headers),
6394 if (m_headers_presync_should_signal.exchange(
false)) {
6395 HeadersPresyncStats stats;
6397 LOCK(m_headers_presync_mutex);
6399 m_headers_presync_stats.find(m_headers_presync_bestpeer);
6400 if (it != m_headers_presync_stats.end()) {
6406 stats.first, stats.second->first, stats.second->second);
6417 "Unexpected block message received from peer %d\n",
6422 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
6426 pblock->GetHash().ToString(), pfrom.
GetId());
6432 bool forceProcessing =
6435 const BlockHash hash = pblock->GetHash();
6436 bool min_pow_checked =
false;
6441 forceProcessing = IsBlockRequested(hash);
6442 RemoveBlockRequest(hash, pfrom.
GetId());
6446 mapBlockSource.emplace(hash, std::make_pair(pfrom.
GetId(),
true));
6454 GetAntiDoSWorkThreshold()) {
6455 min_pow_checked =
true;
6458 ProcessBlock(config, pfrom, pblock, forceProcessing, min_pow_checked);
6468 if (pfrom.m_avalanche_pubkey.has_value()) {
6471 "Ignoring avahello from peer %d: already in our node set\n",
6477 vRecv >> delegation;
6484 if (!delegation.
verify(state, pubkey)) {
6485 Misbehaving(*peer, 100,
"invalid-delegation");
6488 pfrom.m_avalanche_pubkey = std::move(pubkey);
6491 sighasher << delegation.
getId();
6499 if (!(*pfrom.m_avalanche_pubkey)
6500 .VerifySchnorr(sighasher.GetHash(),
sig)) {
6501 Misbehaving(*peer, 100,
"invalid-avahello-signature");
6508 if (!AlreadyHaveProof(proofid)) {
6509 const bool preferred = isPreferredDownloadPeer(pfrom);
6510 LOCK(cs_proofrequest);
6511 AddProofAnnouncement(pfrom, proofid,
6512 GetTime<std::chrono::microseconds>(),
6531 WITH_LOCK(peer->m_addr_token_bucket_mutex,
6532 peer->m_addr_token_bucket += m_opts.max_addr_to_send);
6534 if (peer->m_proof_relay &&
6538 peer->m_proof_relay->compactproofs_requested =
true;
6549 const auto now = Now<SteadyMilliseconds>();
6555 last_poll + std::chrono::milliseconds(m_opts.avalanche_cooldown)) {
6557 "Ignoring repeated avapoll from peer %d: cooldown not "
6572 strprintf(
"too-many-ava-poll: poll message size = %u", nCount));
6576 std::vector<avalanche::Vote> votes;
6577 votes.reserve(nCount);
6579 for (
unsigned int n = 0; n < nCount; n++) {
6587 if (!quorum_established) {
6588 votes.emplace_back(vote, inv.
hash);
6595 if (m_opts.avalanche_preconsensus) {
6609 if (m_opts.avalanche_staking_preconsensus) {
6616 "poll inv type %d unknown from peer=%d\n",
6621 votes.emplace_back(vote, inv.
hash);
6647 if (!pfrom.m_avalanche_pubkey.has_value() ||
6648 !(*pfrom.m_avalanche_pubkey)
6649 .VerifySchnorr(verifier.GetHash(),
sig)) {
6650 Misbehaving(*peer, 100,
"invalid-ava-response-signature");
6655 auto now = GetTime<std::chrono::seconds>();
6657 std::vector<avalanche::VoteItemUpdate> updates;
6664 Misbehaving(*peer, banscore,
error);
6680 Misbehaving(*peer, 2,
error);
6692 auto logVoteUpdate = [](
const auto &voteUpdate,
6693 const std::string &voteItemTypeStr,
6694 const auto &voteItemId) {
6695 std::string voteOutcome;
6696 bool alwaysPrint =
false;
6697 switch (voteUpdate.getStatus()) {
6699 voteOutcome =
"invalidated";
6703 voteOutcome =
"rejected";
6706 voteOutcome =
"accepted";
6709 voteOutcome =
"finalized";
6713 voteOutcome =
"stalled";
6722 LogPrintf(
"Avalanche %s %s %s\n", voteOutcome, voteItemTypeStr,
6723 voteItemId.ToString());
6727 voteItemTypeStr, voteItemId.ToString());
6731 bool shouldActivateBestChain =
false;
6733 for (
const auto &u : updates) {
6738 if (
auto pitem = std::get_if<const avalanche::ProofRef>(&item)) {
6742 logVoteUpdate(u,
"proof", proofid);
6744 auto rejectionMode =
6746 auto nextCooldownTimePoint = GetTime<std::chrono::seconds>();
6747 switch (u.getStatus()) {
6763 return pm.rejectProof(proofid,
6767 "ERROR: Failed to reject proof: %s\n",
6772 nextCooldownTimePoint += std::chrono::seconds(
6773 m_opts.avalanche_peer_replacement_cooldown);
6779 avalanche::PeerManager::
6780 RegistrationMode::FORCE_ACCEPT);
6783 [&](const avalanche::Peer &peer) {
6784 pm.updateNextPossibleConflictTime(
6786 nextCooldownTimePoint);
6787 if (u.getStatus() ==
6788 avalanche::VoteStatus::
6790 pm.setFinalized(peer.peerid);
6798 "ERROR: Failed to accept proof: %s\n",
6805 auto getBlockFromIndex = [
this](
const CBlockIndex *pindex) {
6808 std::shared_ptr<const CBlock> pblock =
WITH_LOCK(
6809 m_most_recent_block_mutex,
return m_most_recent_block);
6811 if (!pblock || pblock->GetHash() != pindex->
GetBlockHash()) {
6812 std::shared_ptr<CBlock> pblockRead =
6813 std::make_shared<CBlock>();
6816 assert(!
"cannot load block from disk");
6818 pblock = pblockRead;
6823 if (
auto pitem = std::get_if<const CBlockIndex *>(&item)) {
6826 shouldActivateBestChain =
true;
6830 switch (u.getStatus()) {
6835 LogPrintf(
"ERROR: Database error: %s\n",
6844 LogPrintf(
"ERROR: Database error: %s\n",
6849 auto pblock = getBlockFromIndex(pindex);
6865 if (m_opts.avalanche_preconsensus) {
6866 auto pblock = getBlockFromIndex(pindex);
6874 pindex, *m_avalanche);
6884 if (!m_opts.avalanche_preconsensus) {
6888 if (
auto pitem = std::get_if<const CTransactionRef>(&item)) {
6892 const TxId &txid = tx->GetId();
6893 logVoteUpdate(u,
"tx", txid);
6895 switch (u.getStatus()) {
6902 if (m_mempool.
exists(txid)) {
6906 std::vector<CTransactionRef> conflictingTxs =
6912 if (conflictingTxs.size() > 0) {
6923 for (
const auto &conflictingTx :
6926 conflictingTx->GetId());
6954 return conflicting.HaveTx(txid);
6957 std::vector<CTransactionRef>
6958 mempool_conflicting_txs;
6959 for (
const auto &txin : tx->vin) {
6964 mempool_conflicting_txs.push_back(
6965 std::move(conflict));
6974 [&txid, &mempool_conflicting_txs](
6979 if (mempool_conflicting_txs.size() >
6982 mempool_conflicting_txs[0],
6991 auto it = m_mempool.
GetIter(txid);
6992 if (!it.has_value()) {
6995 "Error: finalized tx (%s) is not in the "
7008 std::vector<CTransactionRef>
7011 for (
const auto &conflictingTx :
7013 m_recent_rejects.insert(
7014 conflictingTx->GetId());
7016 conflictingTx->GetId());
7029 if (shouldActivateBestChain) {
7032 state,
nullptr, m_avalanche)) {
7047 ReceivedAvalancheProof(pfrom, *peer, proof);
7056 if (peer->m_proof_relay ==
nullptr) {
7060 peer->m_proof_relay->lastSharedProofsUpdate =
7061 GetTime<std::chrono::seconds>();
7063 peer->m_proof_relay->sharedProofs =
7069 peer->m_proof_relay->sharedProofs);
7080 if (peer->m_proof_relay ==
nullptr) {
7085 if (!peer->m_proof_relay->compactproofs_requested) {
7089 peer->m_proof_relay->compactproofs_requested =
false;
7093 vRecv >> compactProofs;
7094 }
catch (std::ios_base::failure &e) {
7096 Misbehaving(*peer, 100,
"avaproofs-bad-indexes");
7101 std::set<uint32_t> prefilledIndexes;
7103 if (!ReceivedAvalancheProof(pfrom, *peer, prefilledProof.proof)) {
7133 auto shortIdProcessor =
7137 if (shortIdProcessor.hasOutOfBoundIndex()) {
7140 Misbehaving(*peer, 100,
"avaproofs-bad-indexes");
7143 if (!shortIdProcessor.isEvenlyDistributed()) {
7148 std::vector<std::pair<avalanche::ProofId, bool>> remoteProofsStatus;
7155 shortIdProcessor.matchKnownItem(shortid, peer.
proof);
7162 remoteProofsStatus.emplace_back(peer.
getProofId(),
7173 for (
size_t i = 0; i < compactProofs.
size(); i++) {
7174 if (shortIdProcessor.getItem(i) ==
nullptr) {
7191 return pfrom.m_avalanche_pubkey.has_value())) {
7194 for (
const auto &[proofid, present] : remoteProofsStatus) {
7204 if (peer->m_proof_relay ==
nullptr) {
7211 auto requestedIndiceIt = proofreq.
indices.begin();
7212 uint32_t treeIndice = 0;
7213 peer->m_proof_relay->sharedProofs.forEachLeaf([&](
const auto &proof) {
7214 if (requestedIndiceIt == proofreq.
indices.end()) {
7219 if (treeIndice++ == *requestedIndiceIt) {
7222 requestedIndiceIt++;
7228 peer->m_proof_relay->sharedProofs = {};
7241 "Ignoring \"getaddr\" from %s connection. peer=%d\n",
7248 Assume(SetupAddressRelay(pfrom, *peer));
7252 if (peer->m_getaddr_recvd) {
7257 peer->m_getaddr_recvd =
true;
7259 peer->m_addrs_to_send.clear();
7260 std::vector<CAddress> vAddr;
7261 const size_t maxAddrToSend = m_opts.max_addr_to_send;
7269 for (
const CAddress &addr : vAddr) {
7270 PushAddress(*peer, addr);
7276 auto now = GetTime<std::chrono::seconds>();
7286 if (!SetupAddressRelay(pfrom, *peer)) {
7288 "Ignoring getavaaddr message from %s peer=%d\n",
7293 auto availabilityScoreComparator = [](
const CNode *lhs,
7296 double scoreRhs = rhs->getAvailabilityScore();
7298 if (scoreLhs != scoreRhs) {
7299 return scoreLhs > scoreRhs;
7308 std::set<
const CNode *,
decltype(availabilityScoreComparator)> avaNodes(
7309 availabilityScoreComparator);
7316 avaNodes.insert(pnode);
7317 if (avaNodes.size() > m_opts.max_addr_to_send) {
7318 avaNodes.erase(std::prev(avaNodes.end()));
7322 peer->m_addrs_to_send.clear();
7323 for (
const CNode *pnode : avaNodes) {
7324 PushAddress(*peer, pnode->
addr);
7335 "mempool request with bloom filters disabled, "
7336 "disconnect peer=%d\n",
7347 "mempool request with bandwidth limit reached, "
7348 "disconnect peer=%d\n",
7355 if (
auto tx_relay = peer->GetTxRelay()) {
7356 LOCK(tx_relay->m_tx_inventory_mutex);
7357 tx_relay->m_send_mempool =
true;
7387 const auto ping_end = time_received;
7390 bool bPingFinished =
false;
7391 std::string sProblem;
7393 if (nAvail >=
sizeof(nonce)) {
7398 if (peer->m_ping_nonce_sent != 0) {
7399 if (nonce == peer->m_ping_nonce_sent) {
7402 bPingFinished =
true;
7403 const auto ping_time = ping_end - peer->m_ping_start.load();
7404 if (ping_time.count() >= 0) {
7409 sProblem =
"Timing mishap";
7413 sProblem =
"Nonce mismatch";
7417 bPingFinished =
true;
7418 sProblem =
"Nonce zero";
7422 sProblem =
"Unsolicited pong without ping";
7427 bPingFinished =
true;
7428 sProblem =
"Short payload";
7431 if (!(sProblem.empty())) {
7433 "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
7434 pfrom.
GetId(), sProblem, peer->m_ping_nonce_sent, nonce,
7437 if (bPingFinished) {
7438 peer->m_ping_nonce_sent = 0;
7446 "filterload received despite not offering bloom services "
7447 "from peer=%d; disconnecting\n",
7457 Misbehaving(*peer, 100,
"too-large bloom filter");
7458 }
else if (
auto tx_relay = peer->GetTxRelay()) {
7460 LOCK(tx_relay->m_bloom_filter_mutex);
7461 tx_relay->m_bloom_filter.reset(
new CBloomFilter(filter));
7462 tx_relay->m_relay_txs =
true;
7472 "filteradd received despite not offering bloom services "
7473 "from peer=%d; disconnecting\n",
7478 std::vector<uint8_t> vData;
7487 }
else if (
auto tx_relay = peer->GetTxRelay()) {
7488 LOCK(tx_relay->m_bloom_filter_mutex);
7489 if (tx_relay->m_bloom_filter) {
7490 tx_relay->m_bloom_filter->insert(vData);
7498 Misbehaving(*peer, 100,
"bad filteradd message");
7506 "filterclear received despite not offering bloom services "
7507 "from peer=%d; disconnecting\n",
7512 auto tx_relay = peer->GetTxRelay();
7518 LOCK(tx_relay->m_bloom_filter_mutex);
7519 tx_relay->m_bloom_filter =
nullptr;
7520 tx_relay->m_relay_txs =
true;
7529 vRecv >> newFeeFilter;
7531 if (
auto tx_relay = peer->GetTxRelay()) {
7532 tx_relay->m_fee_filter_received = newFeeFilter;
7541 ProcessGetCFilters(pfrom, *peer, vRecv);
7546 ProcessGetCFHeaders(pfrom, *peer, vRecv);
7551 ProcessGetCFCheckPt(pfrom, *peer, vRecv);
7556 std::vector<CInv> vInv;
7562 for (
CInv &inv : vInv) {
7568 m_txrequest.ReceivedResponse(pfrom.
GetId(),
TxId(inv.
hash));
7572 LOCK(cs_proofrequest);
7573 m_proofrequest.ReceivedResponse(
7587bool PeerManagerImpl::MaybeDiscourageAndDisconnect(
CNode &pnode, Peer &peer) {
7589 LOCK(peer.m_misbehavior_mutex);
7592 if (!peer.m_should_discourage) {
7596 peer.m_should_discourage =
false;
7602 LogPrintf(
"Warning: not punishing noban peer %d!\n", peer.m_id);
7608 LogPrintf(
"Warning: not punishing manually connected peer %d!\n",
7617 "Warning: disconnecting but not discouraging %s peer %d!\n",
7634bool PeerManagerImpl::ProcessMessages(
const Config &config,
CNode *pfrom,
7635 std::atomic<bool> &interruptMsgProc) {
7646 bool fMoreWork =
false;
7648 PeerRef peer = GetPeerRef(pfrom->
GetId());
7649 if (peer ==
nullptr) {
7654 LOCK(peer->m_getdata_requests_mutex);
7655 if (!peer->m_getdata_requests.empty()) {
7656 ProcessGetData(config, *pfrom, *peer, interruptMsgProc);
7660 const bool processed_orphan = ProcessOrphanTx(config, *peer);
7666 if (processed_orphan) {
7673 LOCK(peer->m_getdata_requests_mutex);
7674 if (!peer->m_getdata_requests.empty()) {
7684 std::list<CNetMessage> msgs;
7687 if (pfrom->vProcessMsg.empty()) {
7691 msgs.splice(msgs.begin(), pfrom->vProcessMsg,
7692 pfrom->vProcessMsg.begin());
7696 fMoreWork = !pfrom->vProcessMsg.empty();
7702 msg.m_recv.size(), msg.m_recv.
data());
7704 if (m_opts.capture_messages) {
7712 if (!msg.m_valid_netmagic) {
7714 "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n",
7728 if (!msg.m_valid_header) {
7736 if (!msg.m_valid_checksum) {
7748 ProcessMessage(config, *pfrom, msg.
m_type, vRecv, msg.m_time,
7750 if (interruptMsgProc) {
7755 LOCK(peer->m_getdata_requests_mutex);
7756 if (!peer->m_getdata_requests.empty()) {
7765 return orphanage.HaveTxToReconsider(peer->m_id);
7769 }
catch (
const std::exception &e) {
7772 e.what(),
typeid(e).name());
7781void PeerManagerImpl::ConsiderEviction(
CNode &pto, Peer &peer,
7782 std::chrono::seconds time_in_seconds) {
7785 CNodeState &state = *State(pto.
GetId());
7789 state.fSyncStarted) {
7796 if (state.pindexBestKnownBlock !=
nullptr &&
7797 state.pindexBestKnownBlock->nChainWork >=
7799 if (state.m_chain_sync.m_timeout != 0s) {
7800 state.m_chain_sync.m_timeout = 0s;
7801 state.m_chain_sync.m_work_header =
nullptr;
7802 state.m_chain_sync.m_sent_getheaders =
false;
7804 }
else if (state.m_chain_sync.m_timeout == 0s ||
7805 (state.m_chain_sync.m_work_header !=
nullptr &&
7806 state.pindexBestKnownBlock !=
nullptr &&
7807 state.pindexBestKnownBlock->nChainWork >=
7808 state.m_chain_sync.m_work_header->nChainWork)) {
7814 state.m_chain_sync.m_work_header = m_chainman.
ActiveChain().
Tip();
7815 state.m_chain_sync.m_sent_getheaders =
false;
7816 }
else if (state.m_chain_sync.m_timeout > 0s &&
7817 time_in_seconds > state.m_chain_sync.m_timeout) {
7822 if (state.m_chain_sync.m_sent_getheaders) {
7825 "Disconnecting outbound peer %d for old chain, best known "
7828 state.pindexBestKnownBlock !=
nullptr
7829 ? state.pindexBestKnownBlock->GetBlockHash().ToString()
7833 assert(state.m_chain_sync.m_work_header);
7838 MaybeSendGetHeaders(
7839 pto,
GetLocator(state.m_chain_sync.m_work_header->pprev),
7843 "sending getheaders to outbound peer=%d to verify chain "
7844 "work (current best known block:%s, benchmark blockhash: "
7847 state.pindexBestKnownBlock !=
nullptr
7848 ? state.pindexBestKnownBlock->GetBlockHash().ToString()
7850 state.m_chain_sync.m_work_header->GetBlockHash()
7852 state.m_chain_sync.m_sent_getheaders =
true;
7859 state.m_chain_sync.m_timeout =
7866void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now) {
7875 std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0},
7876 next_youngest_peer{-1, 0};
7882 if (pnode->
GetId() > youngest_peer.first) {
7883 next_youngest_peer = youngest_peer;
7884 youngest_peer.first = pnode->GetId();
7885 youngest_peer.second = pnode->m_last_block_time;
7889 NodeId to_disconnect = youngest_peer.first;
7890 if (youngest_peer.second > next_youngest_peer.second) {
7893 to_disconnect = next_youngest_peer.first;
7905 CNodeState *node_state = State(pnode->
GetId());
7906 if (node_state ==
nullptr ||
7908 node_state->vBlocksInFlight.empty())) {
7911 "disconnecting extra block-relay-only peer=%d "
7912 "(last block received at time %d)\n",
7919 "keeping block-relay-only peer=%d chosen for eviction "
7920 "(connect time: %d, blocks_in_flight: %d)\n",
7922 node_state->vBlocksInFlight.size());
7938 int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
7949 CNodeState *state = State(pnode->
GetId());
7950 if (state ==
nullptr) {
7955 if (state->m_chain_sync.m_protect) {
7958 if (state->m_last_block_announcement < oldest_block_announcement ||
7959 (state->m_last_block_announcement == oldest_block_announcement &&
7960 pnode->
GetId() > worst_peer)) {
7961 worst_peer = pnode->
GetId();
7962 oldest_block_announcement = state->m_last_block_announcement;
7966 if (worst_peer == -1) {
7970 bool disconnected = m_connman.
ForNode(
7978 CNodeState &state = *State(pnode->
GetId());
7980 state.vBlocksInFlight.empty()) {
7982 "disconnecting extra outbound peer=%d (last block "
7983 "announcement received at time %d)\n",
7984 pnode->
GetId(), oldest_block_announcement);
7989 "keeping outbound peer=%d chosen for eviction "
7990 "(connect time: %d, blocks_in_flight: %d)\n",
7992 state.vBlocksInFlight.size());
8007void PeerManagerImpl::CheckForStaleTipAndEvictPeers() {
8010 auto now{GetTime<std::chrono::seconds>()};
8012 EvictExtraOutboundPeers(now);
8014 if (now > m_stale_tip_check_time) {
8020 LogPrintf(
"Potential stale tip detected, will try using extra "
8021 "outbound peer (last tip update: %d seconds ago)\n",
8030 if (!m_initial_sync_finished && CanDirectFetch()) {
8032 m_initial_sync_finished =
true;
8036void PeerManagerImpl::MaybeSendPing(
CNode &node_to, Peer &peer,
8037 std::chrono::microseconds now) {
8039 node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
8040 peer.m_ping_nonce_sent &&
8052 bool pingSend =
false;
8054 if (peer.m_ping_queued) {
8059 if (peer.m_ping_nonce_sent == 0 &&
8068 nonce = GetRand<uint64_t>();
8069 }
while (nonce == 0);
8070 peer.m_ping_queued =
false;
8071 peer.m_ping_start = now;
8073 peer.m_ping_nonce_sent = nonce;
8079 peer.m_ping_nonce_sent = 0;
8085void PeerManagerImpl::MaybeSendAddr(
CNode &
node, Peer &peer,
8086 std::chrono::microseconds current_time) {
8088 if (!peer.m_addr_relay_enabled) {
8092 LOCK(peer.m_addr_send_times_mutex);
8094 peer.m_next_local_addr_send < current_time) {
8101 if (peer.m_next_local_addr_send != 0us) {
8102 peer.m_addr_known->reset();
8105 CAddress local_addr{*local_service, peer.m_our_services,
8106 Now<NodeSeconds>()};
8107 PushAddress(peer, local_addr);
8114 if (current_time <= peer.m_next_addr_send) {
8118 peer.m_next_addr_send =
8121 const size_t max_addr_to_send = m_opts.max_addr_to_send;
8122 if (!
Assume(peer.m_addrs_to_send.size() <= max_addr_to_send)) {
8125 peer.m_addrs_to_send.resize(max_addr_to_send);
8130 auto addr_already_known =
8133 bool ret = peer.m_addr_known->contains(addr.
GetKey());
8135 peer.m_addr_known->insert(addr.
GetKey());
8139 peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(),
8140 peer.m_addrs_to_send.end(),
8141 addr_already_known),
8142 peer.m_addrs_to_send.end());
8145 if (peer.m_addrs_to_send.empty()) {
8149 const char *msg_type;
8151 if (peer.m_wants_addrv2) {
8160 .
Make(make_flags, msg_type, peer.m_addrs_to_send));
8161 peer.m_addrs_to_send.clear();
8164 if (peer.m_addrs_to_send.capacity() > 40) {
8165 peer.m_addrs_to_send.shrink_to_fit();
8169void PeerManagerImpl::MaybeSendSendHeaders(
CNode &
node, Peer &peer) {
8174 if (!peer.m_sent_sendheaders &&
8177 CNodeState &state = *State(
node.GetId());
8178 if (state.pindexBestKnownBlock !=
nullptr &&
8179 state.pindexBestKnownBlock->nChainWork >
8187 peer.m_sent_sendheaders =
true;
8192void PeerManagerImpl::MaybeSendFeefilter(
8193 CNode &pto, Peer &peer, std::chrono::microseconds current_time) {
8194 if (m_opts.ignore_incoming_txs) {
8218 static const Amount MAX_FILTER{m_fee_filter_rounder.round(
MAX_MONEY)};
8219 if (peer.m_fee_filter_sent == MAX_FILTER) {
8222 peer.m_next_send_feefilter = 0us;
8225 if (current_time > peer.m_next_send_feefilter) {
8226 Amount filterToSend = m_fee_filter_rounder.round(currentFilter);
8230 if (filterToSend != peer.m_fee_filter_sent) {
8234 peer.m_fee_filter_sent = filterToSend;
8236 peer.m_next_send_feefilter =
8243 peer.m_next_send_feefilter &&
8244 (currentFilter < 3 * peer.m_fee_filter_sent / 4 ||
8245 currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
8246 peer.m_next_send_feefilter =
8247 current_time + GetRandomDuration<std::chrono::microseconds>(
8253class CompareInvMempoolOrder {
8257 explicit CompareInvMempoolOrder(
CTxMemPool *_mempool) : mp(_mempool) {}
8259 bool operator()(std::set<TxId>::iterator a, std::set<TxId>::iterator b) {
8269bool PeerManagerImpl::RejectIncomingTxs(
const CNode &peer)
const {
8278 if (m_opts.ignore_incoming_txs &&
8285bool PeerManagerImpl::SetupAddressRelay(
const CNode &
node, Peer &peer) {
8289 if (
node.IsBlockOnlyConn()) {
8293 if (!peer.m_addr_relay_enabled.exchange(
true)) {
8297 peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
8303bool PeerManagerImpl::SendMessages(
const Config &config,
CNode *pto) {
8306 PeerRef peer = GetPeerRef(pto->
GetId());
8315 if (MaybeDiscourageAndDisconnect(*pto, *peer)) {
8328 const auto current_time{GetTime<std::chrono::microseconds>()};
8333 "addrfetch connection timeout; disconnecting peer=%d\n",
8339 MaybeSendPing(*pto, *peer, current_time);
8346 bool sync_blocks_and_headers_from_peer =
false;
8348 MaybeSendAddr(*pto, *peer, current_time);
8350 MaybeSendSendHeaders(*pto, *peer);
8355 CNodeState &state = *State(pto->
GetId());
8358 if (m_chainman.m_best_header ==
nullptr) {
8365 if (state.fPreferredDownload) {
8366 sync_blocks_and_headers_from_peer =
true;
8377 if (m_num_preferred_download_peers == 0 ||
8378 mapBlocksInFlight.empty()) {
8379 sync_blocks_and_headers_from_peer =
true;
8383 if (!state.fSyncStarted && CanServeBlocks(*peer) &&
8387 if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) ||
8389 const CBlockIndex *pindexStart = m_chainman.m_best_header;
8398 if (pindexStart->
pprev) {
8399 pindexStart = pindexStart->
pprev;
8401 if (MaybeSendGetHeaders(*pto,
GetLocator(pindexStart), *peer)) {
8404 "initial getheaders (%d) to peer=%d (startheight:%d)\n",
8406 peer->m_starting_height);
8408 state.fSyncStarted =
true;
8409 peer->m_headers_sync_timeout =
8414 std::chrono::microseconds{
8416 Ticks<std::chrono::seconds>(
8418 m_chainman.m_best_header->Time()) /
8435 LOCK(peer->m_block_inv_mutex);
8436 std::vector<CBlock> vHeaders;
8438 ((!peer->m_prefers_headers &&
8439 (!state.m_requested_hb_cmpctblocks ||
8440 peer->m_blocks_for_headers_relay.size() > 1)) ||
8441 peer->m_blocks_for_headers_relay.size() >
8446 ProcessBlockAvailability(pto->
GetId());
8448 if (!fRevertToInv) {
8449 bool fFoundStartingHeader =
false;
8453 for (
const BlockHash &hash : peer->m_blocks_for_headers_relay) {
8459 fRevertToInv =
true;
8462 if (pBestIndex !=
nullptr && pindex->
pprev != pBestIndex) {
8473 fRevertToInv =
true;
8476 pBestIndex = pindex;
8477 if (fFoundStartingHeader) {
8480 }
else if (PeerHasHeader(&state, pindex)) {
8483 }
else if (pindex->
pprev ==
nullptr ||
8484 PeerHasHeader(&state, pindex->
pprev)) {
8487 fFoundStartingHeader =
true;
8492 fRevertToInv =
true;
8497 if (!fRevertToInv && !vHeaders.empty()) {
8498 if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
8503 "%s sending header-and-ids %s to peer=%d\n",
8504 __func__, vHeaders.front().GetHash().ToString(),
8507 std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
8509 LOCK(m_most_recent_block_mutex);
8510 if (m_most_recent_block_hash ==
8512 cached_cmpctblock_msg =
8514 *m_most_recent_compact_block);
8517 if (cached_cmpctblock_msg.has_value()) {
8519 pto, std::move(cached_cmpctblock_msg.value()));
8523 block, *pBestIndex)};
8530 state.pindexBestHeaderSent = pBestIndex;
8531 }
else if (peer->m_prefers_headers) {
8532 if (vHeaders.size() > 1) {
8534 "%s: %u headers, range (%s, %s), to peer=%d\n",
8535 __func__, vHeaders.size(),
8536 vHeaders.front().GetHash().ToString(),
8537 vHeaders.back().GetHash().ToString(),
8541 "%s: sending header %s to peer=%d\n", __func__,
8542 vHeaders.front().GetHash().ToString(),
8547 state.pindexBestHeaderSent = pBestIndex;
8549 fRevertToInv =
true;
8556 if (!peer->m_blocks_for_headers_relay.empty()) {
8558 peer->m_blocks_for_headers_relay.back();
8569 "Announcing block %s not on main chain (tip=%s)\n",
8578 if (!PeerHasHeader(&state, pindex)) {
8579 peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
8581 "%s: sending inv peer=%d hash=%s\n", __func__,
8586 peer->m_blocks_for_headers_relay.clear();
8593 std::vector<CInv> vInv;
8594 auto addInvAndMaybeFlush = [&](uint32_t type,
const uint256 &hash) {
8595 vInv.emplace_back(type, hash);
8607 LOCK(peer->m_block_inv_mutex);
8609 vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(),
8615 for (
const BlockHash &hash : peer->m_blocks_for_inv_relay) {
8618 peer->m_blocks_for_inv_relay.clear();
8621 auto computeNextInvSendTime =
8622 [&](std::chrono::microseconds &next) ->
bool {
8625 if (next < current_time) {
8626 fSendTrickle =
true;
8628 next = NextInvToInbounds(
8633 next = current_time;
8637 return fSendTrickle;
8641 if (peer->m_proof_relay !=
nullptr) {
8642 LOCK(peer->m_proof_relay->m_proof_inventory_mutex);
8644 if (computeNextInvSendTime(
8645 peer->m_proof_relay->m_next_inv_send_time)) {
8647 peer->m_proof_relay->m_proof_inventory_to_send.begin();
8649 peer->m_proof_relay->m_proof_inventory_to_send.end()) {
8652 it = peer->m_proof_relay->m_proof_inventory_to_send.erase(
8655 if (peer->m_proof_relay->m_proof_inventory_known_filter
8656 .contains(proofid)) {
8660 peer->m_proof_relay->m_proof_inventory_known_filter.insert(
8663 peer->m_proof_relay->m_recently_announced_proofs.insert(
8669 if (
auto tx_relay = peer->GetTxRelay()) {
8670 LOCK(tx_relay->m_tx_inventory_mutex);
8672 const bool fSendTrickle =
8673 computeNextInvSendTime(tx_relay->m_next_inv_send_time);
8678 LOCK(tx_relay->m_bloom_filter_mutex);
8679 if (!tx_relay->m_relay_txs) {
8680 tx_relay->m_tx_inventory_to_send.clear();
8685 if (fSendTrickle && tx_relay->m_send_mempool) {
8686 auto vtxinfo = m_mempool.
infoAll();
8687 tx_relay->m_send_mempool =
false;
8689 tx_relay->m_fee_filter_received.load()};
8691 LOCK(tx_relay->m_bloom_filter_mutex);
8693 for (
const auto &txinfo : vtxinfo) {
8694 const TxId &txid = txinfo.tx->GetId();
8695 tx_relay->m_tx_inventory_to_send.erase(txid);
8698 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
8701 if (tx_relay->m_bloom_filter &&
8702 !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
8706 tx_relay->m_tx_inventory_known_filter.insert(txid);
8709 addInvAndMaybeFlush(
MSG_TX, txid);
8711 tx_relay->m_last_mempool_req =
8712 std::chrono::duration_cast<std::chrono::seconds>(
8719 std::vector<std::set<TxId>::iterator> vInvTx;
8720 vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
8721 for (std::set<TxId>::iterator it =
8722 tx_relay->m_tx_inventory_to_send.begin();
8723 it != tx_relay->m_tx_inventory_to_send.end(); it++) {
8724 vInvTx.push_back(it);
8727 tx_relay->m_fee_filter_received.load()};
8732 CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool);
8733 std::make_heap(vInvTx.begin(), vInvTx.end(),
8734 compareInvMempoolOrder);
8738 unsigned int nRelayedTransactions = 0;
8739 LOCK(tx_relay->m_bloom_filter_mutex);
8740 while (!vInvTx.empty() &&
8745 std::pop_heap(vInvTx.begin(), vInvTx.end(),
8746 compareInvMempoolOrder);
8747 std::set<TxId>::iterator it = vInvTx.back();
8749 const TxId txid = *it;
8751 tx_relay->m_tx_inventory_to_send.erase(it);
8753 if (tx_relay->m_tx_inventory_known_filter.contains(txid)) {
8757 auto txinfo = m_mempool.
info(txid);
8763 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
8766 if (tx_relay->m_bloom_filter &&
8767 !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
8772 tx_relay->m_recently_announced_invs.insert(txid);
8773 addInvAndMaybeFlush(
MSG_TX, txid);
8774 nRelayedTransactions++;
8777 while (!g_relay_expiration.empty() &&
8778 g_relay_expiration.front().first <
8780 mapRelay.erase(g_relay_expiration.front().second);
8781 g_relay_expiration.pop_front();
8784 auto ret = mapRelay.insert(
8785 std::make_pair(txid, std::move(txinfo.tx)));
8787 g_relay_expiration.push_back(std::make_pair(
8791 tx_relay->m_tx_inventory_known_filter.insert(txid);
8797 if (!vInv.empty()) {
8804 CNodeState &state = *State(pto->
GetId());
8807 auto stalling_timeout = m_block_stalling_timeout.load();
8808 if (state.m_stalling_since.count() &&
8809 state.m_stalling_since < current_time - stalling_timeout) {
8814 LogPrintf(
"Peer=%d is stalling block download, disconnecting\n",
8819 const auto new_timeout =
8821 if (stalling_timeout != new_timeout &&
8822 m_block_stalling_timeout.compare_exchange_strong(
8823 stalling_timeout, new_timeout)) {
8826 "Increased stalling timeout temporarily to %d seconds\n",
8838 if (state.vBlocksInFlight.size() > 0) {
8839 QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
8840 int nOtherPeersWithValidatedDownloads =
8841 m_peers_downloading_from - 1;
8843 state.m_downloading_since +
8844 std::chrono::seconds{consensusParams.nPowTargetSpacing} *
8847 nOtherPeersWithValidatedDownloads)) {
8848 LogPrintf(
"Timeout downloading block %s from peer=%d, "
8850 queuedBlock.pindex->GetBlockHash().ToString(),
8858 if (state.fSyncStarted &&
8859 peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
8862 if (current_time > peer->m_headers_sync_timeout &&
8863 nSyncStarted == 1 &&
8864 (m_num_preferred_download_peers -
8865 state.fPreferredDownload >=
8874 LogPrintf(
"Timeout downloading headers from peer=%d, "
8880 LogPrintf(
"Timeout downloading headers from noban "
8881 "peer=%d, not disconnecting\n",
8887 state.fSyncStarted =
false;
8889 peer->m_headers_sync_timeout = 0us;
8895 peer->m_headers_sync_timeout = std::chrono::microseconds::max();
8901 ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
8904 std::vector<CInv> vGetData;
8912 CNodeState &state = *State(pto->
GetId());
8914 if (CanServeBlocks(*peer) &&
8915 ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) ||
8918 std::vector<const CBlockIndex *> vToDownload;
8920 FindNextBlocksToDownload(pto->
GetId(),
8922 state.vBlocksInFlight.size(),
8923 vToDownload, staller);
8926 BlockRequested(config, pto->
GetId(), *pindex);
8931 if (state.vBlocksInFlight.empty() && staller != -1) {
8932 if (State(staller)->m_stalling_since == 0us) {
8933 State(staller)->m_stalling_since = current_time;
8940 auto addGetDataAndMaybeFlush = [&](uint32_t type,
const uint256 &hash) {
8941 CInv inv(type, hash);
8944 vGetData.push_back(std::move(inv));
8956 LOCK(cs_proofrequest);
8957 std::vector<std::pair<NodeId, avalanche::ProofId>> expired;
8959 m_proofrequest.GetRequestable(pto->
GetId(), current_time, &expired);
8960 for (
const auto &entry : expired) {
8962 "timeout of inflight proof %s from peer=%d\n",
8963 entry.second.ToString(), entry.first);
8965 for (
const auto &proofid : requestable) {
8966 if (!AlreadyHaveProof(proofid)) {
8968 m_proofrequest.RequestedData(
8969 pto->
GetId(), proofid,
8976 m_proofrequest.ForgetInvId(proofid);
8986 std::vector<std::pair<NodeId, TxId>> expired;
8988 m_txrequest.GetRequestable(pto->
GetId(), current_time, &expired);
8989 for (
const auto &entry : expired) {
8991 entry.second.ToString(), entry.first);
8993 for (
const TxId &txid : requestable) {
8997 if (!AlreadyHaveTx(txid,
false)) {
8998 addGetDataAndMaybeFlush(
MSG_TX, txid);
8999 m_txrequest.RequestedData(
9006 m_txrequest.ForgetInvId(txid);
9010 if (!vGetData.empty()) {
9016 MaybeSendFeefilter(*pto, *peer, current_time);
9020bool PeerManagerImpl::ReceivedAvalancheProof(
CNode &
node, Peer &peer,
9022 assert(proof !=
nullptr);
9026 AddKnownProof(peer, proofid);
9038 return node.m_avalanche_pubkey.has_value());
9039 auto saveProofIfStaker = [
this, isStaker](
const CNode &
node,
9041 const NodeId nodeid) ->
bool {
9053 LOCK(cs_proofrequest);
9054 m_proofrequest.ReceivedResponse(nodeid, proofid);
9056 if (AlreadyHaveProof(proofid)) {
9057 m_proofrequest.ForgetInvId(proofid);
9058 saveProofIfStaker(
node, proofid, nodeid);
9068 return pm.registerProof(proof, state);
9070 WITH_LOCK(cs_proofrequest, m_proofrequest.ForgetInvId(proofid));
9071 RelayProof(proofid);
9073 node.m_last_proof_time = GetTime<std::chrono::seconds>();
9076 nodeid, proofid.ToString());
9098 "Not polling the avalanche proof (%s): peer=%d, proofid %s\n",
9099 state.
IsValid() ?
"not-worth-polling"
9101 nodeid, proofid.ToString());
9104 saveProofIfStaker(
node, proofid, nodeid);
9106 if (isStaker && m_opts.avalanche_staking_preconsensus) {
bool MoneyRange(const Amount nValue)
static constexpr Amount MAX_MONEY
No amount larger than this (in satoshi) is valid.
enum ReadStatus_t ReadStatus
const std::string & BlockFilterTypeName(BlockFilterType filter_type)
Get the human-readable name for a filter type.
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int CFCHECKPT_INTERVAL
Interval between compact filter checkpoints.
@ CHAIN
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends,...
@ TRANSACTIONS
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid,...
@ SCRIPTS
Scripts & signatures ok.
@ TREE
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
arith_uint256 GetBlockProof(const CBlockIndex &block)
CBlockLocator GetLocator(const CBlockIndex *index)
Get a locator for a block index entry.
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params ¶ms)
Return the time it would take to redo the work difference between from and to, assuming the current h...
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
#define Assert(val)
Identity function.
#define Assume(val)
Assume is the identity function.
Stochastic address manager.
void Connected(const CService &addr, NodeSeconds time=Now< NodeSeconds >())
We have successfully connected to this peer.
void Good(const CService &addr, bool test_before_evict=true, NodeSeconds time=Now< NodeSeconds >())
Mark an entry as accessible, possibly moving it from "new" to "tried".
bool Add(const std::vector< CAddress > &vAddr, const CNetAddr &source, std::chrono::seconds time_penalty=0s)
Attempt to add one or more addresses to addrman's new table.
void SetServices(const CService &addr, ServiceFlags nServices)
Update an entry's service bits.
void Discourage(const CNetAddr &net_addr)
bool IsBanned(const CNetAddr &net_addr)
Return whether net_addr is banned.
bool IsDiscouraged(const CNetAddr &net_addr)
Return whether net_addr is discouraged.
BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of bloc...
bool LookupFilterRange(int start_height, const CBlockIndex *stop_index, std::vector< BlockFilter > &filters_out) const
Get a range of filters between two heights on a chain.
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector< uint256 > &hashes_out) const
Get a range of filter hashes between two heights on a chain.
bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out) EXCLUSIVE_LOCKS_REQUIRED(!m_cs_headers_cache)
Get a single filter header by block.
std::vector< CTransactionRef > txn
std::vector< uint32_t > indices
A CService with information about it as peer.
ServiceFlags nServices
Serialized as uint64_t in V1, and as CompactSize in V2.
NodeSeconds nTime
Always included in serialization, except in the network format on INIT_PROTO_VERSION.
size_t BlockTxCount() const
std::vector< CTransactionRef > vtx
The block chain is a tree shaped structure starting with the genesis block at the root,...
bool IsValid(enum BlockValidity nUpTo=BlockValidity::TRANSACTIONS) const EXCLUSIVE_LOCKS_REQUIRED(
Check whether this block index entry is valid up to the passed validity level.
CBlockIndex * pprev
pointer to the index of the predecessor of this block
CBlockHeader GetBlockHeader() const
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
bool HaveTxsDownloaded() const
Check whether this block's and all previous blocks' transactions have been downloaded (and stored to ...
int64_t GetBlockTime() const
unsigned int nTx
Number of transactions in this block.
CBlockIndex * GetAncestor(int height)
Efficiently find an ancestor of this block.
BlockHash GetBlockHash() const
int nHeight
height of the entry in the chain. The genesis block has height 0
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
bool IsWithinSizeConstraints() const
True if the size is <= MAX_BLOOM_FILTER_SIZE and the number of hash functions is <= MAX_HASH_FUNCS (c...
CBlockIndex * Tip() const
Returns the index entry for the tip of this chain, or nullptr if none.
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...
int Height() const
Return the maximal height in the chain.
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system.
const CBlock & GenesisBlock() const
const Consensus::Params & GetConsensus() const
void ForEachNode(const NodeFn &func)
bool OutboundTargetReached(bool historicalBlockServingLimit) const
check if the outbound target is reached.
bool ForNode(NodeId id, std::function< bool(CNode *pnode)> func)
bool GetNetworkActive() const
bool GetTryNewOutboundPeer() const
void SetTryNewOutboundPeer(bool flag)
unsigned int GetReceiveFloodSize() const
int GetExtraBlockRelayCount() const
void WakeMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc)
void StartExtraBlockRelayPeers()
bool DisconnectNode(const std::string &node)
CSipHasher GetDeterministicRandomizer(uint64_t id) const
Get a unique deterministic randomizer.
int GetExtraFullOutboundCount() const
std::vector< CAddress > GetAddresses(size_t max_addresses, size_t max_pct, std::optional< Network > network) const
Return all or many randomly selected addresses, optionally by network.
bool CheckIncomingNonce(uint64_t nonce)
bool ShouldRunInactivityChecks(const CNode &node, std::chrono::seconds now) const
Return true if we should disconnect the peer for failing an inactivity check.
void PushMessage(CNode *pnode, CSerializedNetMsg &&msg)
bool GetUseAddrmanOutgoing() const
Double ended buffer combining vector and stream-like interfaces.
Fee rate in satoshis per kilobyte: Amount / kB.
Amount GetFeePerK() const
Return the fee in satoshis for a size of 1000 bytes.
Reads data from an underlying stream, while hashing the read data.
Inv(ventory) message data.
bool IsMsgCmpctBlk() const
std::string ToString() const
bool IsMsgStakeContender() const
bool IsMsgFilteredBlk() const
Used to create a Merkle proof (usually from a subset of transactions), which consists of a block head...
std::vector< std::pair< size_t, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
bool IsRelayable() const
Whether this address should be relayed to other peers even if we can't reach it ourselves.
bool IsAddrV1Compatible() const
Check if the current object can be serialized in pre-ADDRv2/BIP155 format.
Transport protocol agnostic message container.
CSerializedNetMsg Make(int nFlags, std::string msg_type, Args &&...args) const
Information about a peer.
RecursiveMutex cs_vProcessMsg
Mutex cs_avalanche_pubkey
bool IsFeelerConn() const
const std::chrono::seconds m_connected
Unix epoch time at peer connection.
bool ExpectServicesFromConn() const
std::atomic< int > nVersion
std::atomic_bool m_has_all_wanted_services
Whether this peer provides all services that we want.
bool IsInboundConn() const
bool HasPermission(NetPermissionFlags permission) const
std::atomic_bool fPauseRecv
bool IsOutboundOrBlockRelayConn() const
bool IsManualConn() const
std::atomic< int64_t > nTimeOffset
const std::string m_addr_name
std::string ConnectionTypeAsString() const
void SetCommonVersion(int greatest_common_version)
std::atomic< bool > m_bip152_highbandwidth_to
std::atomic_bool m_relays_txs
Whether we should relay transactions to this peer.
std::atomic< bool > m_bip152_highbandwidth_from
void PongReceived(std::chrono::microseconds ping_time)
A ping-pong round trip has completed successfully.
std::atomic_bool fSuccessfullyConnected
bool IsAddrFetchConn() const
uint64_t GetLocalNonce() const
void SetAddrLocal(const CService &addrLocalIn) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_local_mutex)
May not be called more than once.
bool IsBlockOnlyConn() const
int GetCommonVersion() const
bool IsFullOutboundConn() const
uint64_t nRemoteHostNonce
Mutex m_subver_mutex
cleanSubVer is a sanitized string of the user agent byte array we read from the wire.
std::atomic_bool fPauseSend
std::chrono::seconds m_nextGetAvaAddr
uint64_t nRemoteExtraEntropy
uint64_t GetLocalExtraEntropy() const
SteadyMilliseconds m_last_poll
double getAvailabilityScore() const
std::atomic_bool m_bloom_filter_loaded
Whether this peer has loaded a bloom filter.
void updateAvailabilityScore(double decayFactor)
The availability score is calculated using an exponentially weighted average.
std::atomic< std::chrono::seconds > m_avalanche_last_message_fault
const bool m_inbound_onion
Whether this peer is an inbound onion, i.e.
std::atomic< int > m_avalanche_message_fault_counter
std::atomic< bool > m_avalanche_enabled
std::atomic< std::chrono::seconds > m_last_block_time
UNIX epoch time of the last block received from this peer that we had not yet seen (e....
std::atomic_bool fDisconnect
std::atomic< std::chrono::seconds > m_last_tx_time
UNIX epoch time of the last transaction received from this peer that we had not yet seen (e....
void invsVoted(uint32_t count)
The node voted for count invs.
bool IsAvalancheOutboundConnection() const
An encapsulated public key.
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set.
Simple class for background tasks that should be run periodically or once "after a while".
void scheduleEvery(Predicate p, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Repeat p until it return false.
void scheduleFromNow(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Call f once after the delta has passed.
A combination of a network address (CNetAddr) and a (TCP) port.
std::string ToString() const
std::vector< uint8_t > GetKey() const
uint64_t Finalize() const
Compute the 64-bit SipHash-2-4 of the data written so far.
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data.
std::set< std::reference_wrapper< const CTxMemPoolEntryRef >, CompareIteratorById > Parents
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
void removeConflicts(const CTransaction &tx) EXCLUSIVE_LOCKS_REQUIRED(cs)
void RemoveUnbroadcastTx(const TxId &txid, const bool unchecked=false)
Removes a transaction from the unbroadcast set.
CFeeRate GetMinFee() const
The minimum fee to get into the mempool, which may itself not be enough for larger-sized transactions...
RecursiveMutex cs
This mutex needs to be locked when accessing mapTx or other members that are guarded by it.
void removeRecursive(const CTransaction &tx, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs)
bool CompareTopologically(const TxId &txida, const TxId &txidb) const
TxMempoolInfo info(const TxId &txid) const
size_t DynamicMemoryUsage() const
std::vector< TxMempoolInfo > infoAll() const
bool setAvalancheFinalized(const CTxMemPoolEntryRef &tx) EXCLUSIVE_LOCKS_REQUIRED(cs)
CTransactionRef GetConflictTx(const COutPoint &prevout) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Get the transaction in the pool that spends the same prevout.
bool exists(const TxId &txid) const
std::set< TxId > GetUnbroadcastTxs() const
Returns transactions in unbroadcast set.
auto withOrphanage(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_orphanage)
const CFeeRate m_min_relay_feerate
auto withConflicting(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_conflicting)
void removeForFinalizedBlock(const std::vector< CTransactionRef > &vtx) EXCLUSIVE_LOCKS_REQUIRED(cs)
unsigned long size() const
std::optional< txiter > GetIter(const TxId &txid) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Returns an iterator to the given txid, if found.
virtual void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr< const CBlock > &block)
Notifies listeners that a block which builds directly on our current tip has been received and connec...
virtual void BlockChecked(const CBlock &, const BlockValidationState &)
Notifies listeners of a block validation result.
virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
Notifies listeners when the block chain tip advances.
virtual void BlockConnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being connected.
virtual void BlockDisconnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being disconnected.
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
MempoolAcceptResult ProcessTransaction(const CTransactionRef &tx, bool test_accept=false) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Try to add a transaction to the memory pool.
bool ProcessNewBlock(const std::shared_ptr< const CBlock > &block, bool force_processing, bool min_pow_checked, bool *new_block, avalanche::Processor *const avalanche=nullptr) LOCKS_EXCLUDED(cs_main)
Process an incoming block.
CBlockIndex * ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
SnapshotCompletionResult MaybeCompleteSnapshotValidation(std::function< void(bilingual_str)> shutdown_fnc=[](bilingual_str msg) { AbortNode(msg.original, msg);}) EXCLUSIVE_LOCKS_REQUIRED(Chainstate & ActiveChainstate() const
Once the background validation chainstate has reached the height which is the base of the UTXO snapsh...
bool ProcessNewBlockHeaders(const std::vector< CBlockHeader > &block, bool min_pow_checked, BlockValidationState &state, const CBlockIndex **ppindex=nullptr, const std::optional< CCheckpointData > &test_checkpoints=std::nullopt) LOCKS_EXCLUDED(cs_main)
Process incoming block headers.
const arith_uint256 & MinimumChainWork() const
CChain & ActiveChain() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
void MaybeRebalanceCaches() EXCLUSIVE_LOCKS_REQUIRED(void ReportHeadersPresync(const arith_uint256 &work, int64_t height, int64_t timestamp)
Check to see if caches are out of balance and if so, call ResizeCoinsCaches() as needed.
node::BlockManager m_blockman
A single BlockManager instance is shared across each constructed chainstate to avoid duplicating bloc...
virtual uint64_t GetMaxBlockSize() const =0
A writer stream (for serialization) that computes a 256-bit hash.
size_t Count(NodeId peer) const
Count how many announcements a peer has (REQUESTED, CANDIDATE, and COMPLETED combined).
size_t CountInFlight(NodeId peer) const
Count how many REQUESTED announcements a peer has.
Interface for message handling.
static Mutex g_msgproc_mutex
Mutex for anything that is only accessed via the msg processing thread.
virtual bool ProcessMessages(const Config &config, CNode *pnode, std::atomic< bool > &interrupt) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process protocol messages received from a given node.
virtual bool SendMessages(const Config &config, CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Send queued protocol messages to a given node.
virtual void InitializeNode(const Config &config, CNode &node, ServiceFlags our_services)=0
Initialize a peer (setup state, queue any initial messages)
virtual void FinalizeNode(const Config &config, const CNode &node)=0
Handle removal of a peer (clear state)
static bool HasFlag(NetPermissionFlags flags, NetPermissionFlags f)
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< std::pair< TxHash, CTransactionRef > > &extra_txn)
bool IsTxAvailable(size_t index) const
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
virtual std::optional< std::string > FetchBlock(const Config &config, NodeId peer_id, const CBlockIndex &block_index)=0
Attempt to manually fetch block from a given peer.
virtual void SendPings()=0
Send ping message to all peers.
static std::unique_ptr< PeerManager > make(CConnman &connman, AddrMan &addrman, BanMan *banman, ChainstateManager &chainman, CTxMemPool &pool, avalanche::Processor *const avalanche, Options opts)
virtual void ProcessMessage(const Config &config, CNode &pfrom, const std::string &msg_type, CDataStream &vRecv, const std::chrono::microseconds time_received, const std::atomic< bool > &interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process a single message from a peer.
virtual void StartScheduledTasks(CScheduler &scheduler)=0
Begin running background tasks, should only be called once.
virtual bool IgnoresIncomingTxs()=0
Whether this node ignores txs received over p2p.
virtual void UnitTestMisbehaving(const NodeId peer_id, const int howmuch)=0
Public for unit testing.
virtual bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) const =0
Get statistics from node state.
virtual void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)=0
This function is used for testing the stale tip eviction logic, see denialofservice_tests....
virtual void CheckForStaleTipAndEvictPeers()=0
Evict extra outbound peers.
static RCUPtr make(Args &&...args)
Construct a new object that is owned by the pointer.
int EraseTx(const TxId &txid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase a tx by txid.
void EraseForPeer(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase all txs announced by a peer (eg, after that peer disconnects)
std::vector< CTransactionRef > GetChildrenFromSamePeer(const CTransactionRef &parent, NodeId nodeid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Get all children that spend from this tx and were received from nodeid.
bool AddTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add a new transaction to the pool.
unsigned int LimitTxs(unsigned int max_txs, FastRandomContext &rng) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Limit the txs to the given maximum.
void EraseForBlock(const CBlock &block) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase all txs included in or invalidated by a new block.
std::vector< CTransactionRef > GetConflictTxs(const CTransactionRef &tx) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
void AddChildrenToWorkSet(const CTransaction &tx) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add any tx that list a particular tx as a parent into the from peer's work set.
bool HaveTx(const TxId &txid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Check if we already have an the transaction.
std::vector< std::pair< CTransactionRef, NodeId > > GetChildrenFromDifferentPeer(const CTransactionRef &parent, NodeId nodeid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Get all children that spend from this tx but were not received from nodeid.
std::string GetRejectReason() const
std::string ToString() const
256-bit unsigned big integer.
const std::vector< PrefilledProof > & getPrefilledProofs() const
uint64_t getShortID(const ProofId &proofid) const
const std::vector< uint64_t > & getShortIDs() const
ProofId getProofId() const
bool verify(DelegationState &state, CPubKey &auth) const
const DelegationId & getId() const
const LimitedProofId & getLimitedProofId() const
bool shouldRequestMoreNodes()
Returns true if we encountered a lack of node since the last call.
bool exists(const ProofId &proofid) const
Return true if the (valid) proof exists, but only for non-dangling proofs.
bool forPeer(const ProofId &proofid, Callable &&func) const
bool addNode(NodeId nodeid, const ProofId &proofid)
Node API.
void removeUnbroadcastProof(const ProofId &proofid)
const ProofRadixTree & getShareableProofsSnapshot() const
bool isBoundToPeer(const ProofId &proofid) const
bool saveRemoteProof(const ProofId &proofid, const NodeId nodeid, const bool present)
void forEachPeer(Callable &&func) const
void setInvalid(const ProofId &proofid)
bool isInvalid(const ProofId &proofid) const
bool isImmature(const ProofId &proofid) const
void updateAvailabilityScores(const double decayFactor, Callable &&getNodeAvailabilityScore)
auto getUnbroadcastProofs() const
bool isInConflictingPool(const ProofId &proofid) const
void sendResponse(CNode *pfrom, Response response) const
bool addToReconcile(const AnyVoteItem &item) EXCLUSIVE_LOCKS_REQUIRED(!cs_finalizedItems)
int64_t getAvaproofsNodeCounter() const
bool registerVotes(NodeId nodeid, const Response &response, std::vector< VoteItemUpdate > &updates, int &banscore, std::string &error) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
bool sendHello(CNode *pfrom) EXCLUSIVE_LOCKS_REQUIRED(!cs_delayedAvahelloNodeIds)
Send a avahello message.
bool isQuorumEstablished() LOCKS_EXCLUDED(cs_main) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
const bool m_preConsensus
ProofRef getLocalProof() const
void addStakeContender(const ProofRef &proof) EXCLUSIVE_LOCKS_REQUIRED(cs_main
Track votes on stake contenders.
bool reconcileOrFinalize(const ProofRef &proof) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Wrapper around the addToReconcile for proofs that adds back the finalization flag to the peer if it i...
void sendDelayedAvahello() EXCLUSIVE_LOCKS_REQUIRED(!cs_delayedAvahelloNodeIds)
auto withPeerManager(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
int getStakeContenderStatus(const StakeContenderId &contenderId) const EXCLUSIVE_LOCKS_REQUIRED(!cs_stakeContenderCache
void avaproofsSent(NodeId nodeid) LOCKS_EXCLUDED(cs_main) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
std::vector< uint32_t > indices
std::string ToString() const
std::string GetHex() const
bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos) const
Functions for disk access for blocks.
CBlockIndex * LookupBlockIndex(const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
bool LoadingBlocks() const
bool IsPruneMode() const
Whether running in -prune mode.
static const uint256 ZERO
@ BLOCK_CHECKPOINT
the block failed to meet one of our checkpoints
@ BLOCK_HEADER_LOW_WORK
the block header may be on a too-little-work chain
@ BLOCK_INVALID_HEADER
invalid proof of work or time too old
@ BLOCK_CACHED_INVALID
this block was cached as being invalid and we didn't store the reason why
@ BLOCK_CONSENSUS
invalid by consensus rules (excluding any below reasons)
@ BLOCK_MISSING_PREV
We don't have the previous block the checked one is built on.
@ BLOCK_INVALID_PREV
A block this one builds on is invalid.
@ BLOCK_MUTATED
the block's data didn't match the data committed to by the PoW
@ BLOCK_TIME_FUTURE
block timestamp was > 2 hours in the future (or our clock is bad)
@ BLOCK_RESULT_UNSET
initial value. Block has not yet been rejected
@ TX_MISSING_INPUTS
transaction was missing some of its inputs
@ TX_CHILD_BEFORE_PARENT
This tx outputs are already spent in the mempool.
@ TX_MEMPOOL_POLICY
violated mempool's fee/size/descendant/etc limits
@ TX_PACKAGE_RECONSIDERABLE
fails some policy, but might be acceptable if submitted in a (different) package
@ TX_UNKNOWN
transaction was not validated because package failed
@ TX_PREMATURE_SPEND
transaction spends a coinbase too early, or violates locktime/sequence locks
@ TX_DUPLICATE
Tx already in mempool or in the chain.
@ TX_INPUTS_NOT_STANDARD
inputs failed policy rules
@ TX_CONFLICT
Tx conflicts with a finalized tx, i.e.
@ TX_NOT_STANDARD
otherwise didn't meet our local policy rules
@ TX_AVALANCHE_RECONSIDERABLE
fails some policy, but might be reconsidered by avalanche voting
@ TX_NO_MEMPOOL
this node does not have a mempool so can't validate the transaction
@ TX_RESULT_UNSET
initial value. Tx has not yet been rejected
@ TX_CONSENSUS
invalid by consensus rules
static size_t RecursiveDynamicUsage(const CScript &script)
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
std::array< uint8_t, CPubKey::SCHNORR_SIZE > SchnorrSig
a Schnorr signature
bool error(const char *fmt, const Args &...args)
#define LogPrint(category,...)
const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
const char * AVAPROOFSREQ
Request for missing avalanche proofs after an avaproofs message has been processed.
const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
const char * BLOCK
The block message transmits a single serialized block.
const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter.
const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
const char * ADDRV2
The addrv2 message relays connection information for peers on the network just like the addr message,...
const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
const char * AVAPROOFS
The avaproofs message the proof short ids of all the valid proofs that we know.
const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
const char * GETAVAPROOFS
The getavaproofs message requests an avaproofs message that provides the proof short ids of all the v...
const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
const char * GETAVAADDR
The getavaaddr message requests an addr message from the receiving node, containing IP addresses of t...
const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids".
const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
const char * TX
The tx message transmits a single transaction.
const char * AVAHELLO
Contains a delegation and a signature.
const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
const char * AVARESPONSE
Contains an avalanche::Response.
const char * GETDATA
The getdata message requests one or more data objects from another node.
const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
const char * BLOCKTXN
Contains a BlockTransactions.
const char * GETCFHEADERS
getcfheaders requests a compact filter header and the filter hashes for a range of blocks,...
const char * SENDADDRV2
The sendaddrv2 message signals support for receiving ADDRV2 messages (BIP155).
const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected.
const char * AVAPOLL
Contains an avalanche::Poll.
const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
const char * AVAPROOF
Contains an avalanche::Proof.
const char * CFCHECKPT
cfcheckpt is a response to a getcfcheckpt request containing a vector of evenly spaced filter headers...
const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
ShortIdProcessor< PrefilledProof, ShortIdProcessorPrefilledProofAdapter, ProofRefCompare > ProofShortIdProcessor
std::variant< const ProofRef, const CBlockIndex *, const CTransactionRef > AnyVoteItem
RCUPtr< const Proof > ProofRef
Implement std::hash so RCUPtr can be used as a key for maps or sets.
std::optional< CService > GetLocalAddrForPeer(CNode &node)
Returns a local address that we should advertise to this peer.
std::function< void(const CAddress &addr, const std::string &msg_type, Span< const uint8_t > data, bool is_incoming)> CaptureMessage
Defaults to CaptureMessageToFile(), but can be overridden by unit tests.
std::string userAgent(const Config &config)
bool IsReachable(enum Network net)
bool SeenLocal(const CService &addr)
vote for a local address
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of the user agent string in version message.
static constexpr std::chrono::minutes TIMEOUT_INTERVAL
Time after which to disconnect, after waiting for a ping response (or inactivity).
@ BypassProofRequestLimits
static constexpr auto HEADERS_RESPONSE_TIME
How long to wait for a peer to respond to a getheaders request.
static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET
The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND based inc...
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT
Default time during which a peer must stall block download progress before being disconnected.
static constexpr auto GETAVAADDR_INTERVAL
Minimum time between 2 successives getavaaddr messages from the same peer.
static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL
Verify that INVENTORY_MAX_RECENT_RELAY is enough to cache everything typically relayed before uncondi...
static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB
Maximum number of inventory items to send per transmission.
static constexpr auto EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect.
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch?...
static uint32_t getAvalancheVoteForProof(const avalanche::Processor &avalanche, const avalanche::ProofId &id)
Decide a response for an Avalanche poll about the given proof.
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static constexpr int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay.
static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL
Delay between rotating the peers we relay a particular address to.
static const int MAX_NUM_UNCONNECTING_HEADERS_MSGS
Maximum number of unconnecting headers announcements before DoS score.
static constexpr auto MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict.
static constexpr auto CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork.
static constexpr auto RELAY_TX_CACHE_TIME
How long to cache transactions in mapRelay for normal relay.
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts.
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for.
static constexpr uint64_t CMPCTBLOCKS_VERSION
The compactblocks version we support.
bool IsAvalancheMessageType(const std::string &msg_type)
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/behind headers chain.
static std::chrono::microseconds ComputeRequestTime(const CNode &node, const InvRequestTracker< InvId > &requestTracker, const DataRequestParameters &requestParams, std::chrono::microseconds current_time, bool preferred)
Compute the request time for this announcement, current time plus delays for:
static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for inbound peers.
static constexpr DataRequestParameters TX_REQUEST_PARAMS
static constexpr auto MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
static constexpr uint32_t MAX_GETCFILTERS_SIZE
Maximum number of compact filters that may be requested with one getcfilters.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout.
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in multiples of the block interval (i.e.
static constexpr auto AVALANCHE_AVAPROOFS_TIMEOUT
If no proof was requested from a compact proof message after this timeout expired,...
static constexpr auto STALE_CHECK_INTERVAL
How frequently to check for stale tips.
static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY
The number of most recently announced transactions a peer can request.
static constexpr auto UNCONDITIONAL_RELAY_DELAY
How long a transaction has to be in the mempool before it can unconditionally be relayed (even when n...
static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts.
static const unsigned int MAX_LOCATOR_SZ
The maximum number of entries in a locator.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
static constexpr double MAX_ADDR_RATE_PER_SECOND
The maximum rate of address records we're willing to process on average.
static constexpr auto PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive.
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we're willing to serve as compact blocks to peers when requested.
static constexpr DataRequestParameters PROOF_REQUEST_PARAMS
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
static bool TooManyAnnouncements(const CNode &node, const InvRequestTracker< InvId > &requestTracker, const DataRequestParameters &requestParams)
static constexpr uint32_t MAX_GETCFHEADERS_SIZE
Maximum number of cf hashes that may be requested with one getcfheaders.
static constexpr auto BLOCK_STALLING_TIMEOUT_MAX
Maximum timeout for stalling block download.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
static constexpr size_t MAX_PCT_ADDR_TO_SEND
the maximum percentage of addresses from our addrman to return in response to a getaddr message.
static const unsigned int MAX_INV_SZ
The maximum number of entries in an 'inv' protocol message.
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND
Maximum rate of inventory items to send per second.
static constexpr size_t MAX_ADDR_TO_SEND
The maximum number of address records permitted in an ADDR message.
static const unsigned int MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK
Maximum number of outstanding CMPCTBLOCK requests for the same block.
static const int DISCOURAGEMENT_THRESHOLD
Threshold for marking a node to be discouraged, e.g.
static const unsigned int MAX_HEADERS_RESULTS
Number of headers sent in one getheaders result.
static constexpr int ADDRV2_FORMAT
A flag that is ORed into the protocol version to designate that addresses should be serialized in (un...
bool IsProxy(const CNetAddr &addr)
static constexpr NodeId NO_NODE
Special NodeId that represent no node.
uint256 GetPackageHash(const Package &package)
std::vector< CTransactionRef > Package
A package is an ordered list of transactions.
static constexpr Amount DEFAULT_MIN_RELAY_TX_FEE_PER_KB(1000 *SATOSHI)
Default for -minrelaytxfee, minimum relay fee for transactions.
std::shared_ptr< const CTransaction > CTransactionRef
static constexpr size_t AVALANCHE_MAX_ELEMENT_POLL
Maximum item that can be polled at once.
void SetServiceFlagsIBDCache(bool state)
Set the current IBD status in order to figure out the desirable service flags.
ServiceFlags GetDesirableServiceFlags(ServiceFlags services)
Gets the set of service flags which are "desirable" for a given peer.
static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH
Maximum length of incoming protocol messages (Currently 2MB).
static bool HasAllDesirableServiceFlags(ServiceFlags services)
A shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services),...
@ MSG_AVA_STAKE_CONTENDER
@ MSG_CMPCT_BLOCK
Defined in BIP152.
ServiceFlags
nServices flags.
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB.
std::chrono::microseconds GetExponentialRand(std::chrono::microseconds now, std::chrono::seconds average_interval)
Return a timestamp in the future sampled from an exponential distribution (https://en....
constexpr auto GetRandMillis
void Shuffle(I first, I last, R &&rng)
More efficient than using std::shuffle on a FastRandomContext.
reverse_range< T > reverse_iterate(T &x)
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
void Unserialize(Stream &, char)=delete
#define LIMITED_STRING(obj, n)
uint64_t ReadCompactSize(Stream &is, bool range_check=true)
Decode a CompactSize-encoded variable-length integer.
constexpr auto MakeUCharSpan(V &&v) -> decltype(UCharSpanCast(Span{std::forward< V >(v)}))
Like the Span constructor, but for (const) uint8_t member types only.
static const double AVALANCHE_STATISTICS_DECAY_FACTOR
Pre-computed decay factor for the avalanche statistics computation.
static constexpr std::chrono::minutes AVALANCHE_STATISTICS_REFRESH_PERIOD
Refresh period for the avalanche statistics computation.
std::string ToString(const T &t)
Locale-independent version of std::to_string.
static constexpr Amount zero() noexcept
A BlockHash is a unqiue identifier for a block.
Describes a place in the block chain to another node such that if the other node doesn't have the sam...
std::vector< BlockHash > vHave
std::chrono::microseconds m_ping_wait
Amount m_fee_filter_received
std::vector< int > vHeightInFlight
bool m_addr_relay_enabled
uint64_t m_addr_rate_limited
uint64_t m_addr_processed
ServiceFlags their_services
std::vector< uint8_t > data
Parameters that influence chain consensus.
int64_t nPowTargetSpacing
std::chrono::seconds PowTargetSpacing() const
const std::chrono::seconds overloaded_peer_delay
How long to delay requesting data from overloaded peers (see max_peer_request_in_flight).
const size_t max_peer_announcements
Maximum number of inventories to consider for requesting, per peer.
const std::chrono::seconds nonpref_peer_delay
How long to delay requesting data from non-preferred peers.
const NetPermissionFlags bypass_request_limits_permissions
Permission flags a peer requires to bypass the request limits tracking limits and delay penalty.
const std::chrono::microseconds getdata_interval
How long to wait (in microseconds) before a data request from an additional peer.
const size_t max_peer_request_in_flight
Maximum number of in-flight data requests from a peer.
Validation result for a transaction evaluated by MemPoolAccept (single or package).
const ResultType m_result_type
Result type.
const TxValidationState m_state
Contains information about why the transaction failed.
@ MEMPOOL_ENTRY
Valid, transaction was already in the mempool.
@ VALID
Fully validated, valid.
static time_point now() noexcept
Return current system time or mocked time, if set.
std::chrono::time_point< NodeClock > time_point
Validation result for package mempool acceptance.
PackageValidationState m_state
std::map< TxId, MempoolAcceptResult > m_tx_results
Map from txid to finished MempoolAcceptResults.
This is a radix tree storing values identified by a unique key.
A TxId is the identifier of a transaction.
std::chrono::seconds registration_time
const ProofId & getProofId() const
StakeContenderIds are unique for each block to ensure that the peer polling for their acceptance has ...
#define AssertLockNotHeld(cs)
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
#define EXCLUSIVE_LOCKS_REQUIRED(...)
#define LOCKS_EXCLUDED(...)
#define NO_THREAD_SAFETY_ANALYSIS
int64_t GetTime()
DEPRECATED Use either ClockType::now() or Now<TimePointType>() if a cast is needed.
constexpr int64_t count_microseconds(std::chrono::microseconds t)
constexpr int64_t count_seconds(std::chrono::seconds t)
std::chrono::time_point< NodeClock, std::chrono::seconds > NodeSeconds
double CountSecondsDouble(SecondsDouble t)
Helper to count the seconds in any std::chrono::duration type.
NodeClock::time_point GetAdjustedTime()
void AddTimeData(const CNetAddr &ip, int64_t nOffsetSample)
#define TRACE6(context, event, a, b, c, d, e, f)
@ AVALANCHE
Removed by avalanche vote.
std::string SanitizeString(std::string_view str, int rule)
Remove unsafe chars.
arith_uint256 CalculateHeadersWork(const std::vector< CBlockHeader > &headers)
Return the sum of the work on a given set of headers.
bool HasValidProofOfWork(const std::vector< CBlockHeader > &headers, const Consensus::Params &consensusParams)
Check with the proof of work on each blockheader matches the value in nBits.
PackageMempoolAcceptResult ProcessNewPackage(Chainstate &active_chainstate, CTxMemPool &pool, const Package &package, bool test_accept)
Validate (and maybe submit) a package to the mempool.
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ActiveChain().Tip() will not be pr...
CMainSignals & GetMainSignals()
static const int INIT_PROTO_VERSION
initial proto version, to be increased after version/verack negotiation
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version
static const int PROTOCOL_VERSION
network protocol versioning
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
static const int INVALID_CB_NO_BAN_VERSION
not banning for invalid compact blocks starts with this version
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.