109 "Max protocol message length must be greater than largest "
110 "possible INV message");
164 std::chrono::seconds(2),
165 std::chrono::seconds(2),
166 std::chrono::seconds(60),
173 std::chrono::seconds(2),
174 std::chrono::seconds(2),
175 std::chrono::seconds(60),
267 std::chrono::seconds{1},
268 "INVENTORY_RELAY_MAX too low");
319 std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
357 std::atomic<ServiceFlags> m_their_services{
NODE_NONE};
360 Mutex m_misbehavior_mutex;
362 int m_misbehavior_score
GUARDED_BY(m_misbehavior_mutex){0};
365 bool m_should_discourage
GUARDED_BY(m_misbehavior_mutex){
false};
368 Mutex m_block_inv_mutex;
374 std::vector<BlockHash> m_blocks_for_inv_relay
GUARDED_BY(m_block_inv_mutex);
380 std::vector<BlockHash>
381 m_blocks_for_headers_relay
GUARDED_BY(m_block_inv_mutex);
392 std::atomic<int> m_starting_height{-1};
395 std::atomic<uint64_t> m_ping_nonce_sent{0};
397 std::atomic<std::chrono::microseconds> m_ping_start{0us};
399 std::atomic<bool> m_ping_queued{
false};
409 std::chrono::microseconds m_next_send_feefilter
422 bool m_relay_txs
GUARDED_BY(m_bloom_filter_mutex){
false};
427 std::unique_ptr<CBloomFilter>
443 GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
449 std::set<TxId> m_tx_inventory_to_send
GUARDED_BY(m_tx_inventory_mutex);
455 bool m_send_mempool
GUARDED_BY(m_tx_inventory_mutex){
false};
457 std::atomic<std::chrono::seconds> m_last_mempool_req{0s};
462 std::chrono::microseconds
463 m_next_inv_send_time
GUARDED_BY(m_tx_inventory_mutex){0};
469 std::atomic<Amount> m_fee_filter_received{
Amount::zero()};
477 LOCK(m_tx_relay_mutex);
479 m_tx_relay = std::make_unique<Peer::TxRelay>();
480 return m_tx_relay.get();
484 return WITH_LOCK(m_tx_relay_mutex,
return m_tx_relay.get());
486 const TxRelay *GetTxRelay() const
488 return WITH_LOCK(m_tx_relay_mutex,
return m_tx_relay.get());
493 std::set<avalanche::ProofId>
494 m_proof_inventory_to_send
GUARDED_BY(m_proof_inventory_mutex);
497 GUARDED_BY(m_proof_inventory_mutex){10000, 0.000001};
504 std::chrono::microseconds m_next_inv_send_time{0};
508 std::atomic<std::chrono::seconds> lastSharedProofsUpdate{0s};
509 std::atomic<bool> compactproofs_requested{
false};
516 const std::unique_ptr<ProofRelay> m_proof_relay;
521 std::vector<CAddress>
533 std::unique_ptr<CRollingBloomFilter>
551 std::atomic_bool m_addr_relay_enabled{
false};
555 mutable Mutex m_addr_send_times_mutex;
557 std::chrono::microseconds
558 m_next_addr_send
GUARDED_BY(m_addr_send_times_mutex){0};
560 std::chrono::microseconds
561 m_next_local_addr_send
GUARDED_BY(m_addr_send_times_mutex){0};
566 std::atomic_bool m_wants_addrv2{
false};
570 mutable Mutex m_addr_token_bucket_mutex;
575 double m_addr_token_bucket
GUARDED_BY(m_addr_token_bucket_mutex){1.0};
577 std::chrono::microseconds
579 GetTime<std::chrono::microseconds>()};
581 std::atomic<uint64_t> m_addr_rate_limited{0};
586 std::atomic<uint64_t> m_addr_processed{0};
592 bool m_inv_triggered_getheaders_before_sync
596 Mutex m_getdata_requests_mutex;
598 std::deque<CInv> m_getdata_requests
GUARDED_BY(m_getdata_requests_mutex);
605 Mutex m_headers_sync_mutex;
610 std::unique_ptr<HeadersSyncState>
615 std::atomic<bool> m_sent_sendheaders{
false};
618 int m_num_unconnecting_headers_msgs
622 std::chrono::microseconds m_headers_sync_timeout
633 : m_id(id), m_our_services{our_services},
634 m_proof_relay(fRelayProofs ?
std::make_unique<ProofRelay>()
638 mutable Mutex m_tx_relay_mutex;
641 std::unique_ptr<TxRelay> m_tx_relay
GUARDED_BY(m_tx_relay_mutex);
644using PeerRef = std::shared_ptr<Peer>;
662 bool fSyncStarted{
false};
665 std::chrono::microseconds m_stalling_since{0us};
666 std::list<QueuedBlock> vBlocksInFlight;
669 std::chrono::microseconds m_downloading_since{0us};
671 bool fPreferredDownload{
false};
676 bool m_requested_hb_cmpctblocks{
false};
678 bool m_provides_cmpctblocks{
false};
706 struct ChainSyncTimeoutState {
709 std::chrono::seconds m_timeout{0s};
713 bool m_sent_getheaders{
false};
716 bool m_protect{
false};
719 ChainSyncTimeoutState m_chain_sync;
722 int64_t m_last_block_announcement{0};
725 const bool m_is_inbound;
727 CNodeState(
bool is_inbound) : m_is_inbound(is_inbound) {}
745 bool fInitialDownload)
override
751 const std::shared_ptr<const CBlock> &pblock)
override
760 !m_headers_presync_mutex);
762 std::atomic<bool> &interrupt)
override
764 !m_recent_confirmed_transactions_mutex,
765 !m_most_recent_block_mutex, !cs_proofrequest,
766 !m_headers_presync_mutex, g_msgproc_mutex);
769 !m_recent_confirmed_transactions_mutex,
770 !m_most_recent_block_mutex, !cs_proofrequest,
776 std::optional<std::string>
783 void RelayTransaction(const
TxId &txid) override
785 void RelayProof(const
avalanche::ProofId &proofid) override
787 void SetBestHeight(
int height)
override { m_best_height = height; };
790 Misbehaving(*
Assert(GetPeerRef(peer_id)), howmuch,
"");
794 const std::chrono::microseconds time_received,
795 const std::atomic<bool> &interruptMsgProc)
override
797 !m_recent_confirmed_transactions_mutex,
798 !m_most_recent_block_mutex, !cs_proofrequest,
799 !m_headers_presync_mutex, g_msgproc_mutex);
801 int64_t time_in_seconds)
override;
808 void ConsiderEviction(
CNode &pto, Peer &peer,
809 std::chrono::seconds time_in_seconds)
816 void EvictExtraOutboundPeers(std::chrono::seconds now)
823 void ReattemptInitialBroadcast(
CScheduler &scheduler)
829 void UpdateAvalancheStatistics()
const;
834 void AvalanchePeriodicNetworking(
CScheduler &scheduler)
const;
853 void Misbehaving(Peer &peer,
int howmuch,
const std::string &message);
867 bool MaybePunishNodeForBlock(
NodeId nodeid,
869 bool via_compact_block,
870 const std::string &message =
"")
880 const
std::
string &message = "")
892 bool MaybeDiscourageAndDisconnect(
CNode &pnode, Peer &peer);
910 bool maybe_add_extra_compact_tx)
913 struct PackageToValidate {
915 const std::vector<NodeId> m_senders;
920 : m_txns{parent, child}, m_senders{parent_sender, child_sender} {}
923 Assume(m_txns.size() == 2);
925 "parent %s (sender=%d) + child %s (sender=%d)",
926 m_txns.front()->GetId().ToString(), m_senders.front(),
927 m_txns.back()->GetId().ToString(), m_senders.back());
936 void ProcessPackageResult(
const PackageToValidate &package_to_validate,
946 std::optional<PackageToValidate> Find1P1CPackage(
const CTransactionRef &ptx,
973 bool ProcessOrphanTx(
const Config &config, Peer &peer)
986 void ProcessHeadersMessage(
const Config &config,
CNode &pfrom, Peer &peer,
987 std::vector<CBlockHeader> &&headers,
988 bool via_compact_block)
998 bool CheckHeadersPoW(
const std::vector<CBlockHeader> &headers,
1008 void HandleFewUnconnectingHeaders(
CNode &pfrom, Peer &peer,
1009 const std::vector<CBlockHeader> &headers)
1013 CheckHeadersAreContinuous(
const std::vector<CBlockHeader> &headers)
const;
1033 bool IsContinuationOfLowWorkHeadersSync(Peer &peer,
CNode &pfrom,
1034 std::vector<CBlockHeader> &headers)
1036 !m_headers_presync_mutex, g_msgproc_mutex);
1050 bool TryLowWorkHeadersSync(Peer &peer,
CNode &pfrom,
1052 std::vector<CBlockHeader> &headers)
1054 !m_headers_presync_mutex, g_msgproc_mutex);
1060 bool IsAncestorOfBestHeaderOrTip(
const CBlockIndex *header)
1074 void HeadersDirectFetchBlocks(
const Config &config,
CNode &pfrom,
1077 void UpdatePeerStateForReceivedHeaders(
CNode &pfrom, Peer &peer,
1079 bool received_new_header,
1080 bool may_have_more_headers)
1083 void SendBlockTransactions(
CNode &pfrom, Peer &peer,
const CBlock &block,
1092 std::chrono::microseconds current_time)
1102 std::chrono::microseconds current_time,
bool preferred)
1106 void PushNodeVersion(
const Config &config,
CNode &pnode,
const Peer &peer);
1114 void MaybeSendPing(
CNode &node_to, Peer &peer,
1115 std::chrono::microseconds now);
1118 void MaybeSendAddr(
CNode &
node, Peer &peer,
1119 std::chrono::microseconds current_time)
1126 void MaybeSendSendHeaders(
CNode &
node, Peer &peer)
1130 void MaybeSendFeefilter(
CNode &
node, Peer &peer,
1131 std::chrono::microseconds current_time)
1143 void RelayAddress(
NodeId originator,
const CAddress &addr,
bool fReachable)
1164 Mutex cs_proofrequest;
1169 std::atomic<int> m_best_height{-1};
1172 std::chrono::seconds m_stale_tip_check_time{0s};
1174 const Options m_opts;
1176 bool RejectIncomingTxs(
const CNode &peer)
const;
1182 bool m_initial_sync_finished{
false};
1188 mutable Mutex m_peer_mutex;
1195 std::map<NodeId, PeerRef> m_peer_map
GUARDED_BY(m_peer_mutex);
1204 const CNodeState *State(
NodeId pnode)
const
1209 std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
1216 m_last_block_inv_triggering_headers_sync
GUARDED_BY(g_msgproc_mutex){};
1224 std::map<BlockHash, std::pair<NodeId, bool>>
1234 std::atomic<std::chrono::seconds> m_block_stalling_timeout{
1248 bool AlreadyHaveTx(
const TxId &txid,
bool include_reconsiderable)
1250 !m_recent_confirmed_transactions_mutex);
1314 mutable Mutex m_recent_confirmed_transactions_mutex;
1316 GUARDED_BY(m_recent_confirmed_transactions_mutex){24'000, 0.000'001};
1325 std::chrono::microseconds
1326 NextInvToInbounds(std::chrono::microseconds now,
1327 std::chrono::seconds average_interval);
1331 mutable Mutex m_most_recent_block_mutex;
1332 std::shared_ptr<const CBlock>
1333 m_most_recent_block
GUARDED_BY(m_most_recent_block_mutex);
1334 std::shared_ptr<const CBlockHeaderAndShortTxIDs>
1335 m_most_recent_compact_block
GUARDED_BY(m_most_recent_block_mutex);
1341 Mutex m_headers_presync_mutex;
1352 using HeadersPresyncStats =
1353 std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>;
1355 std::map<NodeId, HeadersPresyncStats>
1356 m_headers_presync_stats
GUARDED_BY(m_headers_presync_mutex){};
1360 std::atomic_bool m_headers_presync_should_signal{
false};
1368 bool IsBlockRequested(
const BlockHash &hash)
1372 bool IsBlockRequestedFromOutbound(
const BlockHash &hash)
1383 void RemoveBlockRequest(
const BlockHash &hash,
1384 std::optional<NodeId> from_peer)
1393 bool BlockRequested(
const Config &config,
NodeId nodeid,
1395 std::list<QueuedBlock>::iterator **pit =
nullptr)
1404 void FindNextBlocksToDownload(const Peer &peer,
unsigned int count,
1410 void TryDownloadingHistoricalBlocks(
1411 const Peer &peer,
unsigned int count,
1445 const Peer &peer, CNodeState *state,
1447 int nWindowEnd, const
CChain *activeChain =
nullptr,
1448 NodeId *nodeStaller =
nullptr)
1458 std::atomic<
std::chrono::seconds> m_last_tip_update{0s};
1465 const std::chrono::seconds mempool_req,
1466 const std::chrono::seconds now)
1470 void ProcessGetData(
const Config &config,
CNode &pfrom, Peer &peer,
1471 const std::atomic<bool> &interruptMsgProc)
1473 peer.m_getdata_requests_mutex,
1479 const std::shared_ptr<const CBlock> &block,
1480 bool force_processing,
bool min_pow_checked);
1483 typedef std::map<TxId, CTransactionRef> MapRelay;
1490 std::deque<std::pair<std::chrono::microseconds, MapRelay::iterator>>
1499 void MaybeSetPeerAsAnnouncingHeaderAndIDs(
NodeId nodeid)
1518 std::vector<std::pair<TxHash, CTransactionRef>>
1519 vExtraTxnForCompact
GUARDED_BY(g_msgproc_mutex);
1521 size_t vExtraTxnForCompactIt
GUARDED_BY(g_msgproc_mutex) = 0;
1526 void ProcessBlockAvailability(
NodeId nodeid)
1541 bool BlockRequestAllowed(const
CBlockIndex *pindex)
1543 bool AlreadyHaveBlock(const
BlockHash &block_hash)
1545 bool AlreadyHaveProof(const
avalanche::ProofId &proofid);
1546 void ProcessGetBlockData(const
Config &config,
CNode &pfrom, Peer &peer,
1569 bool PrepareBlockFilterRequest(
CNode &
node, Peer &peer,
1571 uint32_t start_height,
1573 uint32_t max_height_diff,
1614 uint32_t GetAvalancheVoteForBlock(const
BlockHash &hash) const
1623 uint32_t GetAvalancheVoteForTx(const
TxId &
id) const
1625 !m_recent_confirmed_transactions_mutex);
1634 bool SetupAddressRelay(const
CNode &
node, Peer &peer)
1637 void AddAddressKnown(Peer &peer, const
CAddress &addr)
1639 void PushAddress(Peer &peer, const
CAddress &addr)
1647 bool ReceivedAvalancheProof(
CNode &
node, Peer &peer,
1653 const
std::chrono::seconds now)
1656 bool isPreferredDownloadPeer(const
CNode &pfrom);
1659const CNodeState *PeerManagerImpl::State(
NodeId pnode) const
1661 std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
1662 if (it == m_node_states.end()) {
1669CNodeState *PeerManagerImpl::State(
NodeId pnode)
1671 return const_cast<CNodeState *
>(std::as_const(*this).State(pnode));
1679static bool IsAddrCompatible(
const Peer &peer,
const CAddress &addr) {
1683void PeerManagerImpl::AddAddressKnown(Peer &peer,
const CAddress &addr) {
1684 assert(peer.m_addr_known);
1685 peer.m_addr_known->insert(addr.
GetKey());
1688void PeerManagerImpl::PushAddress(Peer &peer,
const CAddress &addr) {
1692 assert(peer.m_addr_known);
1693 if (addr.
IsValid() && !peer.m_addr_known->contains(addr.
GetKey()) &&
1694 IsAddrCompatible(peer, addr)) {
1695 if (peer.m_addrs_to_send.size() >= m_opts.max_addr_to_send) {
1696 peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] =
1699 peer.m_addrs_to_send.push_back(addr);
1704static void AddKnownTx(Peer &peer,
const TxId &txid) {
1705 auto tx_relay = peer.GetTxRelay();
1710 LOCK(tx_relay->m_tx_inventory_mutex);
1711 tx_relay->m_tx_inventory_known_filter.insert(txid);
1715 if (peer.m_proof_relay !=
nullptr) {
1716 LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
1717 peer.m_proof_relay->m_proof_inventory_known_filter.insert(proofid);
1721bool PeerManagerImpl::isPreferredDownloadPeer(
const CNode &pfrom) {
1723 const CNodeState *state = State(pfrom.
GetId());
1724 return state && state->fPreferredDownload;
1727static bool CanServeBlocks(
const Peer &peer) {
1735static bool IsLimitedPeer(
const Peer &peer) {
1740std::chrono::microseconds
1741PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
1742 std::chrono::seconds average_interval) {
1743 if (m_next_inv_to_inbounds.load() < now) {
1750 return m_next_inv_to_inbounds;
1753bool PeerManagerImpl::IsBlockRequested(
const BlockHash &hash) {
1754 return mapBlocksInFlight.count(hash);
1757bool PeerManagerImpl::IsBlockRequestedFromOutbound(
const BlockHash &hash) {
1758 for (
auto range = mapBlocksInFlight.equal_range(hash);
1759 range.first != range.second; range.first++) {
1760 auto [nodeid, block_it] = range.first->second;
1761 CNodeState &nodestate = *
Assert(State(nodeid));
1762 if (!nodestate.m_is_inbound) {
1770void PeerManagerImpl::RemoveBlockRequest(
const BlockHash &hash,
1771 std::optional<NodeId> from_peer) {
1772 auto range = mapBlocksInFlight.equal_range(hash);
1773 if (range.first == range.second) {
1781 while (range.first != range.second) {
1782 auto [node_id, list_it] = range.first->second;
1784 if (from_peer && *from_peer != node_id) {
1789 CNodeState &state = *
Assert(State(node_id));
1791 if (state.vBlocksInFlight.begin() == list_it) {
1794 state.m_downloading_since =
1795 std::max(state.m_downloading_since,
1796 GetTime<std::chrono::microseconds>());
1798 state.vBlocksInFlight.erase(list_it);
1800 if (state.vBlocksInFlight.empty()) {
1802 m_peers_downloading_from--;
1804 state.m_stalling_since = 0us;
1806 range.first = mapBlocksInFlight.erase(range.first);
1810bool PeerManagerImpl::BlockRequested(
const Config &config,
NodeId nodeid,
1812 std::list<QueuedBlock>::iterator **pit) {
1815 CNodeState *state = State(nodeid);
1816 assert(state !=
nullptr);
1821 for (
auto range = mapBlocksInFlight.equal_range(hash);
1822 range.first != range.second; range.first++) {
1823 if (range.first->second.first == nodeid) {
1825 *pit = &range.first->second.second;
1832 RemoveBlockRequest(hash, nodeid);
1834 std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(
1835 state->vBlocksInFlight.end(),
1836 {&block, std::unique_ptr<PartiallyDownloadedBlock>(
1837 pit ? new PartiallyDownloadedBlock(config, &m_mempool)
1839 if (state->vBlocksInFlight.size() == 1) {
1841 state->m_downloading_since = GetTime<std::chrono::microseconds>();
1842 m_peers_downloading_from++;
1845 auto itInFlight = mapBlocksInFlight.insert(
1846 std::make_pair(hash, std::make_pair(nodeid, it)));
1849 *pit = &itInFlight->second.second;
1855void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(
NodeId nodeid) {
1861 if (m_opts.ignore_incoming_txs) {
1865 CNodeState *nodestate = State(nodeid);
1870 if (!nodestate->m_provides_cmpctblocks) {
1873 int num_outbound_hb_peers = 0;
1874 for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin();
1875 it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
1876 if (*it == nodeid) {
1877 lNodesAnnouncingHeaderAndIDs.erase(it);
1878 lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
1881 CNodeState *state = State(*it);
1882 if (state !=
nullptr && !state->m_is_inbound) {
1883 ++num_outbound_hb_peers;
1886 if (nodestate->m_is_inbound) {
1889 if (lNodesAnnouncingHeaderAndIDs.size() >= 3 &&
1890 num_outbound_hb_peers == 1) {
1891 CNodeState *remove_node =
1892 State(lNodesAnnouncingHeaderAndIDs.front());
1893 if (remove_node !=
nullptr && !remove_node->m_is_inbound) {
1896 std::swap(lNodesAnnouncingHeaderAndIDs.front(),
1897 *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
1904 if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
1908 lNodesAnnouncingHeaderAndIDs.front(), [
this](
CNode *pnodeStop) {
1909 m_connman.PushMessage(
1910 pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion())
1911 .Make(NetMsgType::SENDCMPCT,
1913 CMPCTBLOCKS_VERSION));
1916 pnodeStop->m_bip152_highbandwidth_to = false;
1919 lNodesAnnouncingHeaderAndIDs.pop_front();
1928 lNodesAnnouncingHeaderAndIDs.push_back(pfrom->
GetId());
1933bool PeerManagerImpl::TipMayBeStale() {
1936 if (m_last_tip_update.load() == 0s) {
1937 m_last_tip_update = GetTime<std::chrono::seconds>();
1939 return m_last_tip_update.load() <
1940 GetTime<std::chrono::seconds>() -
1943 mapBlocksInFlight.empty();
1946bool PeerManagerImpl::CanDirectFetch() {
1952static bool PeerHasHeader(CNodeState *state,
const CBlockIndex *pindex)
1954 if (state->pindexBestKnownBlock &&
1955 pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) {
1958 if (state->pindexBestHeaderSent &&
1959 pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) {
1965void PeerManagerImpl::ProcessBlockAvailability(
NodeId nodeid) {
1966 CNodeState *state = State(nodeid);
1967 assert(state !=
nullptr);
1969 if (!state->hashLastUnknownBlock.IsNull()) {
1973 if (state->pindexBestKnownBlock ==
nullptr ||
1974 pindex->
nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1975 state->pindexBestKnownBlock = pindex;
1977 state->hashLastUnknownBlock.SetNull();
1982void PeerManagerImpl::UpdateBlockAvailability(
NodeId nodeid,
1984 CNodeState *state = State(nodeid);
1985 assert(state !=
nullptr);
1987 ProcessBlockAvailability(nodeid);
1992 if (state->pindexBestKnownBlock ==
nullptr ||
1993 pindex->
nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1994 state->pindexBestKnownBlock = pindex;
1999 state->hashLastUnknownBlock = hash;
2005void PeerManagerImpl::FindNextBlocksToDownload(
2006 const Peer &peer,
unsigned int count,
2007 std::vector<const CBlockIndex *> &vBlocks,
NodeId &nodeStaller) {
2012 vBlocks.reserve(vBlocks.size() +
count);
2013 CNodeState *state = State(peer.m_id);
2014 assert(state !=
nullptr);
2017 ProcessBlockAvailability(peer.m_id);
2019 if (state->pindexBestKnownBlock ==
nullptr ||
2020 state->pindexBestKnownBlock->nChainWork <
2022 state->pindexBestKnownBlock->nChainWork <
2028 if (state->pindexLastCommonBlock ==
nullptr) {
2031 state->pindexLastCommonBlock =
2033 .
ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight,
2040 state->pindexLastCommonBlock, state->pindexBestKnownBlock);
2041 if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) {
2045 const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
2053 FindNextBlocks(vBlocks, peer, state, pindexWalk,
count, nWindowEnd,
2057void PeerManagerImpl::TryDownloadingHistoricalBlocks(
2058 const Peer &peer,
unsigned int count,
2059 std::vector<const CBlockIndex *> &vBlocks,
const CBlockIndex *from_tip,
2064 if (vBlocks.size() >=
count) {
2068 vBlocks.reserve(
count);
2069 CNodeState *state =
Assert(State(peer.m_id));
2071 if (state->pindexBestKnownBlock ==
nullptr ||
2072 state->pindexBestKnownBlock->GetAncestor(target_block->
nHeight) !=
2087 FindNextBlocks(vBlocks, peer, state, from_tip,
count,
2092void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex *> &vBlocks,
2093 const Peer &peer, CNodeState *state,
2095 unsigned int count,
int nWindowEnd,
2096 const CChain *activeChain,
2098 std::vector<const CBlockIndex *> vToFetch;
2100 std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
2102 while (pindexWalk->
nHeight < nMaxHeight) {
2107 int nToFetch = std::min(nMaxHeight - pindexWalk->
nHeight,
2108 std::max<int>(
count - vBlocks.size(), 128));
2109 vToFetch.resize(nToFetch);
2110 pindexWalk = state->pindexBestKnownBlock->
GetAncestor(
2111 pindexWalk->
nHeight + nToFetch);
2112 vToFetch[nToFetch - 1] = pindexWalk;
2113 for (
unsigned int i = nToFetch - 1; i > 0; i--) {
2114 vToFetch[i - 1] = vToFetch[i]->
pprev;
2127 if (pindex->nStatus.hasData() ||
2128 (activeChain && activeChain->
Contains(pindex))) {
2130 state->pindexLastCommonBlock = pindex;
2132 }
else if (!IsBlockRequested(pindex->
GetBlockHash())) {
2134 if (pindex->
nHeight > nWindowEnd) {
2136 if (vBlocks.size() == 0 && waitingfor != peer.m_id) {
2140 *nodeStaller = waitingfor;
2145 vBlocks.push_back(pindex);
2146 if (vBlocks.size() ==
count) {
2149 }
else if (waitingfor == -1) {
2161template <
class InvId>
2165 return !
node.HasPermission(
2178template <
class InvId>
2179static std::chrono::microseconds
2183 std::chrono::microseconds current_time,
bool preferred) {
2184 auto delay = std::chrono::microseconds{0};
2196 return current_time + delay;
2199void PeerManagerImpl::PushNodeVersion(
const Config &config,
CNode &pnode,
2201 uint64_t my_services{peer.m_our_services};
2202 const int64_t nTime{
count_seconds(GetTime<std::chrono::seconds>())};
2204 const int nNodeStartingHeight{m_best_height};
2215 const bool tx_relay{!RejectIncomingTxs(pnode)};
2223 nTime, your_services, addr_you, my_services,
2225 nNodeStartingHeight, tx_relay, extraEntropy));
2229 "send version message: version %d, blocks=%d, them=%s, "
2230 "txrelay=%d, peer=%d\n",
2235 "send version message: version %d, blocks=%d, "
2236 "txrelay=%d, peer=%d\n",
2241void PeerManagerImpl::AddTxAnnouncement(
2243 std::chrono::microseconds current_time) {
2251 const bool preferred = isPreferredDownloadPeer(
node);
2253 current_time, preferred);
2255 m_txrequest.ReceivedInv(
node.GetId(), txid, preferred, reqtime);
2258void PeerManagerImpl::AddProofAnnouncement(
2260 std::chrono::microseconds current_time,
bool preferred) {
2271 m_proofrequest.ReceivedInv(
node.GetId(), proofid, preferred, reqtime);
2274void PeerManagerImpl::UpdateLastBlockAnnounceTime(
NodeId node,
2275 int64_t time_in_seconds) {
2277 CNodeState *state = State(
node);
2279 state->m_last_block_announcement = time_in_seconds;
2283void PeerManagerImpl::InitializeNode(
const Config &config,
CNode &
node,
2288 m_node_states.emplace_hint(m_node_states.end(),
2289 std::piecewise_construct,
2290 std::forward_as_tuple(nodeid),
2291 std::forward_as_tuple(
node.IsInboundConn()));
2292 assert(m_txrequest.Count(nodeid) == 0);
2300 PeerRef peer = std::make_shared<Peer>(nodeid, our_services, !!m_avalanche);
2303 m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
2305 if (!
node.IsInboundConn()) {
2306 PushNodeVersion(config,
node, *peer);
2310void PeerManagerImpl::ReattemptInitialBroadcast(
CScheduler &scheduler) {
2313 for (
const TxId &txid : unbroadcast_txids) {
2315 if (m_mempool.
exists(txid)) {
2316 RelayTransaction(txid);
2327 auto unbroadcasted_proofids =
2331 auto it = unbroadcasted_proofids.begin();
2332 while (it != unbroadcasted_proofids.end()) {
2335 if (!pm.isBoundToPeer(*it)) {
2336 pm.removeUnbroadcastProof(*it);
2337 it = unbroadcasted_proofids.erase(it);
2344 return unbroadcasted_proofids;
2348 for (
const auto &proofid : unbroadcasted_proofids) {
2349 RelayProof(proofid);
2356 const auto reattemptBroadcastInterval = 10min +
GetRandMillis(5min);
2357 scheduler.
scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
2358 reattemptBroadcastInterval);
2361void PeerManagerImpl::UpdateAvalancheStatistics()
const {
2367void PeerManagerImpl::AvalanchePeriodicNetworking(
CScheduler &scheduler)
const {
2368 const auto now = GetTime<std::chrono::seconds>();
2369 std::vector<NodeId> avanode_ids;
2370 bool fQuorumEstablished;
2371 bool fShouldRequestMoreNodes;
2381 fShouldRequestMoreNodes =
2389 avanode_ids.push_back(pnode->GetId());
2392 PeerRef peer = GetPeerRef(pnode->
GetId());
2393 if (peer ==
nullptr) {
2397 if (peer->m_proof_relay &&
2398 now > (peer->m_proof_relay->lastSharedProofsUpdate.load() +
2400 peer->m_proof_relay->sharedProofs = {};
2404 if (avanode_ids.empty()) {
2412 for (
NodeId avanodeId : avanode_ids) {
2413 const bool sentGetavaaddr =
2416 m_connman.PushMessage(
2417 pavanode, CNetMsgMaker(pavanode->GetCommonVersion())
2418 .Make(NetMsgType::GETAVAADDR));
2419 PeerRef peer = GetPeerRef(avanodeId);
2420 WITH_LOCK(peer->m_addr_token_bucket_mutex,
2421 peer->m_addr_token_bucket +=
2422 m_opts.max_addr_to_send);
2430 if (sentGetavaaddr && fQuorumEstablished && !fShouldRequestMoreNodes) {
2445 avanode_ids.resize(std::min<size_t>(avanode_ids.size(), 3));
2448 for (
NodeId nodeid : avanode_ids) {
2451 PeerRef peer = GetPeerRef(nodeid);
2452 if (peer->m_proof_relay) {
2457 peer->m_proof_relay->compactproofs_requested =
true;
2467 const auto avalanchePeriodicNetworkingInterval = 2min +
GetRandMillis(3min);
2468 scheduler.
scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
2469 avalanchePeriodicNetworkingInterval);
2472void PeerManagerImpl::FinalizeNode(
const Config &config,
const CNode &
node) {
2483 PeerRef peer = RemovePeer(nodeid);
2485 misbehavior =
WITH_LOCK(peer->m_misbehavior_mutex,
2486 return peer->m_misbehavior_score);
2488 m_peer_map.erase(nodeid);
2490 CNodeState *state = State(nodeid);
2491 assert(state !=
nullptr);
2493 if (state->fSyncStarted) {
2497 for (
const QueuedBlock &entry : state->vBlocksInFlight) {
2499 mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash());
2500 while (range.first != range.second) {
2501 auto [node_id, list_it] = range.first->second;
2502 if (node_id != nodeid) {
2505 range.first = mapBlocksInFlight.erase(range.first);
2512 m_txrequest.DisconnectedPeer(nodeid);
2513 m_num_preferred_download_peers -= state->fPreferredDownload;
2514 m_peers_downloading_from -= (!state->vBlocksInFlight.empty());
2515 assert(m_peers_downloading_from >= 0);
2516 m_outbound_peers_with_protect_from_disconnect -=
2517 state->m_chain_sync.m_protect;
2518 assert(m_outbound_peers_with_protect_from_disconnect >= 0);
2520 m_node_states.erase(nodeid);
2522 if (m_node_states.empty()) {
2524 assert(mapBlocksInFlight.empty());
2525 assert(m_num_preferred_download_peers == 0);
2526 assert(m_peers_downloading_from == 0);
2527 assert(m_outbound_peers_with_protect_from_disconnect == 0);
2528 assert(m_txrequest.Size() == 0);
2530 return orphanage.Size();
2535 if (
node.fSuccessfullyConnected && misbehavior == 0 &&
2536 !
node.IsBlockOnlyConn() && !
node.IsInboundConn()) {
2543 LOCK(m_headers_presync_mutex);
2544 m_headers_presync_stats.erase(nodeid);
2547 WITH_LOCK(cs_proofrequest, m_proofrequest.DisconnectedPeer(nodeid));
2552PeerRef PeerManagerImpl::GetPeerRef(
NodeId id)
const {
2554 auto it = m_peer_map.find(
id);
2555 return it != m_peer_map.end() ? it->second :
nullptr;
2558PeerRef PeerManagerImpl::RemovePeer(
NodeId id) {
2561 auto it = m_peer_map.find(
id);
2562 if (it != m_peer_map.end()) {
2563 ret = std::move(it->second);
2564 m_peer_map.erase(it);
2569bool PeerManagerImpl::GetNodeStateStats(
NodeId nodeid,
2573 const CNodeState *state = State(nodeid);
2574 if (state ==
nullptr) {
2578 ? state->pindexBestKnownBlock->nHeight
2581 ? state->pindexLastCommonBlock->nHeight
2583 for (
const QueuedBlock &queue : state->vBlocksInFlight) {
2590 PeerRef peer = GetPeerRef(nodeid);
2591 if (peer ==
nullptr) {
2603 auto ping_wait{0us};
2604 if ((0 != peer->m_ping_nonce_sent) &&
2605 (0 != peer->m_ping_start.load().count())) {
2607 GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
2610 if (
auto tx_relay = peer->GetTxRelay()) {
2612 return tx_relay->m_relay_txs);
2624 LOCK(peer->m_headers_sync_mutex);
2625 if (peer->m_headers_sync) {
2633void PeerManagerImpl::AddToCompactExtraTransactions(
const CTransactionRef &tx) {
2634 if (m_opts.max_extra_txs <= 0) {
2638 if (!vExtraTxnForCompact.size()) {
2639 vExtraTxnForCompact.resize(m_opts.max_extra_txs);
2642 vExtraTxnForCompact[vExtraTxnForCompactIt] =
2643 std::make_pair(tx->GetHash(), tx);
2644 vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs;
2647void PeerManagerImpl::Misbehaving(Peer &peer,
int howmuch,
2648 const std::string &message) {
2651 LOCK(peer.m_misbehavior_mutex);
2652 const int score_before{peer.m_misbehavior_score};
2653 peer.m_misbehavior_score += howmuch;
2654 const int score_now{peer.m_misbehavior_score};
2656 const std::string message_prefixed =
2657 message.empty() ?
"" : (
": " + message);
2658 std::string warning;
2662 warning =
" DISCOURAGE THRESHOLD EXCEEDED";
2663 peer.m_should_discourage =
true;
2667 score_before, score_now, warning, message_prefixed);
2670bool PeerManagerImpl::MaybePunishNodeForBlock(
NodeId nodeid,
2672 bool via_compact_block,
2673 const std::string &message) {
2674 PeerRef peer{GetPeerRef(nodeid)};
2685 if (!via_compact_block) {
2687 Misbehaving(*peer, 100, message);
2694 CNodeState *node_state = State(nodeid);
2695 if (node_state ==
nullptr) {
2702 if (!via_compact_block && !node_state->m_is_inbound) {
2704 Misbehaving(*peer, 100, message);
2714 Misbehaving(*peer, 100, message);
2722 Misbehaving(*peer, 10, message);
2728 if (message !=
"") {
2734bool PeerManagerImpl::MaybePunishNodeForTx(
NodeId nodeid,
2736 const std::string &message) {
2737 PeerRef peer{GetPeerRef(nodeid)};
2744 Misbehaving(*peer, 100, message);
2762 if (message !=
"") {
2768bool PeerManagerImpl::BlockRequestAllowed(
const CBlockIndex *pindex) {
2774 (m_chainman.m_best_header !=
nullptr) &&
2775 (m_chainman.m_best_header->GetBlockTime() - pindex->
GetBlockTime() <
2778 *m_chainman.m_best_header, *pindex, *m_chainman.m_best_header,
2782std::optional<std::string>
2783PeerManagerImpl::FetchBlock(
const Config &config,
NodeId peer_id,
2786 return "Loading blocks ...";
2792 CNodeState *state = State(peer_id);
2793 if (state ==
nullptr) {
2794 return "Peer does not exist";
2798 RemoveBlockRequest(block_index.
GetBlockHash(), std::nullopt);
2801 if (!BlockRequested(config, peer_id, block_index)) {
2802 return "Already requested from this peer";
2811 const CNetMsgMaker msgMaker(node->GetCommonVersion());
2812 this->m_connman.PushMessage(
2813 node, msgMaker.Make(NetMsgType::GETDATA, invs));
2816 return "Node not fully connected";
2821 return std::nullopt;
2824std::unique_ptr<PeerManager>
2828 return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman,
2837 : m_rng{opts.deterministic_rng},
2839 m_chainparams(chainman.GetParams()), m_connman(connman),
2840 m_addrman(addrman), m_banman(banman), m_chainman(chainman),
2841 m_mempool(pool), m_avalanche(
avalanche), m_opts{opts} {}
2843void PeerManagerImpl::StartScheduledTasks(
CScheduler &scheduler) {
2850 "peer eviction timer should be less than stale tip check timer");
2853 this->CheckForStaleTipAndEvictPeers();
2859 const auto reattemptBroadcastInterval = 10min +
GetRandMillis(5min);
2860 scheduler.
scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
2861 reattemptBroadcastInterval);
2866 UpdateAvalancheStatistics();
2872 const auto avalanchePeriodicNetworkingInterval = 2min +
GetRandMillis(3min);
2873 scheduler.
scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
2874 avalanchePeriodicNetworkingInterval);
2883void PeerManagerImpl::BlockConnected(
2884 const std::shared_ptr<const CBlock> &pblock,
const CBlockIndex *pindex) {
2891 m_last_tip_update = GetTime<std::chrono::seconds>();
2894 LOCK(m_recent_confirmed_transactions_mutex);
2896 m_recent_confirmed_transactions.insert(ptx->GetId());
2901 for (
const auto &ptx : pblock->vtx) {
2902 m_txrequest.ForgetInvId(ptx->GetId());
2908 auto stalling_timeout = m_block_stalling_timeout.load();
2911 const auto new_timeout =
2912 std::max(std::chrono::duration_cast<std::chrono::seconds>(
2913 stalling_timeout * 0.85),
2915 if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout,
2923void PeerManagerImpl::BlockDisconnected(
2924 const std::shared_ptr<const CBlock> &block,
const CBlockIndex *pindex) {
2933 LOCK(m_recent_confirmed_transactions_mutex);
2934 m_recent_confirmed_transactions.reset();
2941void PeerManagerImpl::NewPoWValidBlock(
2942 const CBlockIndex *pindex,
const std::shared_ptr<const CBlock> &pblock) {
2943 std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock =
2944 std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock);
2949 if (pindex->
nHeight <= m_highest_fast_announce) {
2952 m_highest_fast_announce = pindex->
nHeight;
2955 const std::shared_future<CSerializedNetMsg> lazy_ser{
2956 std::async(std::launch::deferred, [&] {
2961 LOCK(m_most_recent_block_mutex);
2962 m_most_recent_block_hash = hashBlock;
2963 m_most_recent_block = pblock;
2964 m_most_recent_compact_block = pcmpctblock;
2968 [
this, pindex, &lazy_ser, &hashBlock](
CNode *pnode)
2976 ProcessBlockAvailability(pnode->
GetId());
2977 CNodeState &state = *State(pnode->
GetId());
2981 if (state.m_requested_hb_cmpctblocks &&
2982 !PeerHasHeader(&state, pindex) &&
2983 PeerHasHeader(&state, pindex->
pprev)) {
2985 "%s sending header-and-ids %s to peer=%d\n",
2986 "PeerManager::NewPoWValidBlock",
2987 hashBlock.ToString(), pnode->
GetId());
2990 m_connman.
PushMessage(pnode, ser_cmpctblock.Copy());
2991 state.pindexBestHeaderSent = pindex;
3000void PeerManagerImpl::UpdatedBlockTip(
const CBlockIndex *pindexNew,
3002 bool fInitialDownload) {
3003 SetBestHeight(pindexNew->
nHeight);
3007 if (fInitialDownload) {
3012 std::vector<BlockHash> vHashes;
3014 while (pindexToAnnounce != pindexFork) {
3016 pindexToAnnounce = pindexToAnnounce->
pprev;
3026 for (
auto &it : m_peer_map) {
3027 Peer &peer = *it.second;
3028 LOCK(peer.m_block_inv_mutex);
3030 peer.m_blocks_for_headers_relay.push_back(hash);
3042void PeerManagerImpl::BlockChecked(
const CBlock &block,
3047 std::map<BlockHash, std::pair<NodeId, bool>>::iterator it =
3048 mapBlockSource.find(hash);
3052 if (state.
IsInvalid() && it != mapBlockSource.end() &&
3053 State(it->second.first)) {
3054 MaybePunishNodeForBlock(it->second.first, state,
3055 !it->second.second);
3064 mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
3065 if (it != mapBlockSource.end()) {
3066 MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
3070 if (it != mapBlockSource.end()) {
3071 mapBlockSource.erase(it);
3080bool PeerManagerImpl::AlreadyHaveTx(
const TxId &txid,
3081 bool include_reconsiderable) {
3083 hashRecentRejectsChainTip) {
3088 hashRecentRejectsChainTip =
3090 m_recent_rejects.reset();
3091 m_recent_rejects_package_reconsiderable.reset();
3095 return orphanage.HaveTx(txid);
3101 return conflicting.HaveTx(txid);
3106 if (include_reconsiderable &&
3107 m_recent_rejects_package_reconsiderable.contains(txid)) {
3112 LOCK(m_recent_confirmed_transactions_mutex);
3113 if (m_recent_confirmed_transactions.contains(txid)) {
3118 return m_recent_rejects.contains(txid) || m_mempool.
exists(txid);
3121bool PeerManagerImpl::AlreadyHaveBlock(
const BlockHash &block_hash) {
3126 if (!
Assume(m_avalanche)) {
3131 if (localProof && localProof->getId() == proofid) {
3140void PeerManagerImpl::SendPings() {
3142 for (
auto &it : m_peer_map) {
3143 it.second->m_ping_queued =
true;
3147void PeerManagerImpl::RelayTransaction(
const TxId &txid) {
3149 for (
auto &it : m_peer_map) {
3150 Peer &peer = *it.second;
3151 auto tx_relay = peer.GetTxRelay();
3155 LOCK(tx_relay->m_tx_inventory_mutex);
3161 if (tx_relay->m_next_inv_send_time == 0s) {
3165 if (!tx_relay->m_tx_inventory_known_filter.contains(txid)) {
3166 tx_relay->m_tx_inventory_to_send.insert(txid);
3173 for (
auto &it : m_peer_map) {
3174 Peer &peer = *it.second;
3176 if (!peer.m_proof_relay) {
3179 LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
3180 if (!peer.m_proof_relay->m_proof_inventory_known_filter.contains(
3182 peer.m_proof_relay->m_proof_inventory_to_send.insert(proofid);
3187void PeerManagerImpl::RelayAddress(
NodeId originator,
const CAddress &addr,
3203 const auto current_time{GetTime<std::chrono::seconds>()};
3206 const uint64_t time_addr{
3207 (
static_cast<uint64_t
>(
count_seconds(current_time)) + hash_addr) /
3217 unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
3218 std::array<std::pair<uint64_t, Peer *>, 2> best{
3219 {{0,
nullptr}, {0,
nullptr}}};
3220 assert(nRelayNodes <= best.size());
3224 for (
auto &[
id, peer] : m_peer_map) {
3225 if (peer->m_addr_relay_enabled &&
id != originator &&
3226 IsAddrCompatible(*peer, addr)) {
3228 for (
unsigned int i = 0; i < nRelayNodes; i++) {
3229 if (hashKey > best[i].first) {
3230 std::copy(best.begin() + i, best.begin() + nRelayNodes - 1,
3231 best.begin() + i + 1);
3232 best[i] = std::make_pair(hashKey, peer.get());
3239 for (
unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
3240 PushAddress(*best[i].second, addr);
3244void PeerManagerImpl::ProcessGetBlockData(
const Config &config,
CNode &pfrom,
3245 Peer &peer,
const CInv &inv) {
3248 std::shared_ptr<const CBlock> a_recent_block;
3249 std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
3251 LOCK(m_most_recent_block_mutex);
3252 a_recent_block = m_most_recent_block;
3253 a_recent_compact_block = m_most_recent_compact_block;
3256 bool need_activate_chain =
false;
3270 need_activate_chain =
true;
3274 if (need_activate_chain) {
3277 state, a_recent_block, m_avalanche)) {
3288 if (!BlockRequestAllowed(pindex)) {
3290 "%s: ignoring request from peer=%i for old "
3291 "block that isn't in the main chain\n",
3292 __func__, pfrom.
GetId());
3299 (((m_chainman.m_best_header !=
nullptr) &&
3300 (m_chainman.m_best_header->GetBlockTime() - pindex->
GetBlockTime() >
3306 "historical block serving limit reached, disconnect peer=%d\n",
3321 "Ignore block request below NODE_NETWORK_LIMITED "
3322 "threshold, disconnect peer=%d\n",
3332 if (!pindex->nStatus.hasData()) {
3335 std::shared_ptr<const CBlock> pblock;
3336 if (a_recent_block && a_recent_block->GetHash() == pindex->
GetBlockHash()) {
3337 pblock = a_recent_block;
3340 std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
3342 assert(!
"cannot load block from disk");
3344 pblock = pblockRead;
3350 bool sendMerkleBlock =
false;
3352 if (
auto tx_relay = peer.GetTxRelay()) {
3353 LOCK(tx_relay->m_bloom_filter_mutex);
3354 if (tx_relay->m_bloom_filter) {
3355 sendMerkleBlock =
true;
3356 merkleBlock =
CMerkleBlock(*pblock, *tx_relay->m_bloom_filter);
3359 if (sendMerkleBlock) {
3371 typedef std::pair<size_t, uint256> PairType;
3386 if (CanDirectFetch() &&
3389 if (a_recent_compact_block &&
3390 a_recent_compact_block->header.GetHash() ==
3394 *a_recent_compact_block));
3408 LOCK(peer.m_block_inv_mutex);
3411 if (hash == peer.m_continuation_block) {
3415 std::vector<CInv> vInv;
3416 vInv.push_back(
CInv(
3419 peer.m_continuation_block =
BlockHash();
3425PeerManagerImpl::FindTxForGetData(
const Peer &peer,
const TxId &txid,
3426 const std::chrono::seconds mempool_req,
3427 const std::chrono::seconds now) {
3428 auto txinfo = m_mempool.
info(txid);
3433 if ((mempool_req.count() && txinfo.m_time <= mempool_req) ||
3435 return std::move(txinfo.tx);
3443 if (
Assume(peer.GetTxRelay())
3444 ->m_recently_announced_invs.contains(txid)) {
3447 return std::move(txinfo.tx);
3450 auto mi = mapRelay.find(txid);
3451 if (mi != mapRelay.end()) {
3463PeerManagerImpl::FindProofForGetData(
const Peer &peer,
3465 const std::chrono::seconds now) {
3468 bool send_unconditionally =
3494 if (send_unconditionally) {
3499 if (peer.m_proof_relay->m_recently_announced_proofs.contains(proofid)) {
3506void PeerManagerImpl::ProcessGetData(
3508 const std::atomic<bool> &interruptMsgProc) {
3511 auto tx_relay = peer.GetTxRelay();
3513 std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
3514 std::vector<CInv> vNotFound;
3517 const auto now{GetTime<std::chrono::seconds>()};
3519 const auto mempool_req = tx_relay !=
nullptr
3520 ? tx_relay->m_last_mempool_req.load()
3521 : std::chrono::seconds::min();
3526 while (it != peer.m_getdata_requests.end()) {
3527 if (interruptMsgProc) {
3536 const CInv &inv = *it;
3538 if (it->IsMsgProof()) {
3540 vNotFound.push_back(inv);
3545 auto proof = FindProofForGetData(peer, proofid, now);
3553 vNotFound.push_back(inv);
3560 if (it->IsMsgTx()) {
3561 if (tx_relay ==
nullptr) {
3577 std::vector<TxId> parent_ids_to_add;
3580 auto txiter = m_mempool.
GetIter(tx->GetId());
3582 auto &pentry = *txiter;
3584 (*pentry)->GetMemPoolParentsConst();
3585 parent_ids_to_add.reserve(parents.size());
3586 for (
const auto &parent : parents) {
3587 if (parent.get()->GetTime() >
3589 parent_ids_to_add.push_back(
3590 parent.get()->GetTx().GetId());
3595 for (
const TxId &parent_txid : parent_ids_to_add) {
3598 if (
WITH_LOCK(tx_relay->m_tx_inventory_mutex,
3599 return !tx_relay->m_tx_inventory_known_filter
3600 .contains(parent_txid))) {
3601 tx_relay->m_recently_announced_invs.insert(parent_txid);
3605 vNotFound.push_back(inv);
3618 if (it != peer.m_getdata_requests.end() && !pfrom.
fPauseSend) {
3619 const CInv &inv = *it++;
3621 ProcessGetBlockData(config, pfrom, peer, inv);
3627 peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
3629 if (!vNotFound.empty()) {
3647void PeerManagerImpl::SendBlockTransactions(
3651 for (
size_t i = 0; i < req.
indices.size(); i++) {
3653 Misbehaving(peer, 100,
"getblocktxn with out-of-bounds tx indices");
3665bool PeerManagerImpl::CheckHeadersPoW(
const std::vector<CBlockHeader> &headers,
3670 Misbehaving(peer, 100,
"header with invalid proof of work");
3675 if (!CheckHeadersAreContinuous(headers)) {
3676 Misbehaving(peer, 20,
"non-continuous headers sequence");
3689 near_chaintip_work =
3708void PeerManagerImpl::HandleFewUnconnectingHeaders(
3709 CNode &pfrom, Peer &peer,
const std::vector<CBlockHeader> &headers) {
3712 peer.m_num_unconnecting_headers_msgs++;
3716 if (MaybeSendGetHeaders(pfrom,
GetLocator(best_header), peer)) {
3719 "received header %s: missing prev block %s, sending getheaders "
3720 "(%d) to end (peer=%d, m_num_unconnecting_headers_msgs=%d)\n",
3722 headers[0].hashPrevBlock.ToString(), best_header->nHeight,
3723 pfrom.
GetId(), peer.m_num_unconnecting_headers_msgs);
3730 UpdateBlockAvailability(pfrom.
GetId(), headers.back().GetHash()));
3734 if (peer.m_num_unconnecting_headers_msgs %
3737 Misbehaving(peer, 20,
3739 peer.m_num_unconnecting_headers_msgs));
3743bool PeerManagerImpl::CheckHeadersAreContinuous(
3744 const std::vector<CBlockHeader> &headers)
const {
3747 if (!hashLastBlock.
IsNull() && header.hashPrevBlock != hashLastBlock) {
3750 hashLastBlock = header.GetHash();
3755bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(
3756 Peer &peer,
CNode &pfrom, std::vector<CBlockHeader> &headers) {
3757 if (peer.m_headers_sync) {
3758 auto result = peer.m_headers_sync->ProcessNextHeaders(
3760 if (result.request_more) {
3761 auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
3764 Assume(!locator.vHave.empty());
3765 if (!locator.vHave.empty()) {
3772 bool sent_getheaders =
3773 MaybeSendGetHeaders(pfrom, locator, peer);
3774 if (sent_getheaders) {
3776 "more getheaders (from %s) to peer=%d\n",
3777 locator.vHave.front().ToString(), pfrom.
GetId());
3780 "error sending next getheaders (from %s) to "
3781 "continue sync with peer=%d\n",
3782 locator.vHave.front().ToString(), pfrom.
GetId());
3788 peer.m_headers_sync.reset(
nullptr);
3793 LOCK(m_headers_presync_mutex);
3794 m_headers_presync_stats.erase(pfrom.
GetId());
3797 HeadersPresyncStats stats;
3798 stats.first = peer.m_headers_sync->GetPresyncWork();
3799 if (peer.m_headers_sync->GetState() ==
3801 stats.second = {peer.m_headers_sync->GetPresyncHeight(),
3802 peer.m_headers_sync->GetPresyncTime()};
3806 LOCK(m_headers_presync_mutex);
3807 m_headers_presync_stats[pfrom.
GetId()] = stats;
3809 m_headers_presync_stats.find(m_headers_presync_bestpeer);
3810 bool best_updated =
false;
3811 if (best_it == m_headers_presync_stats.end()) {
3816 const HeadersPresyncStats *stat_best{
nullptr};
3817 for (
const auto &[_peer, _stat] : m_headers_presync_stats) {
3818 if (!stat_best || _stat > *stat_best) {
3823 m_headers_presync_bestpeer = peer_best;
3824 best_updated = (peer_best == pfrom.
GetId());
3825 }
else if (best_it->first == pfrom.
GetId() ||
3826 stats > best_it->second) {
3829 m_headers_presync_bestpeer = pfrom.
GetId();
3830 best_updated =
true;
3832 if (best_updated && stats.second.has_value()) {
3835 m_headers_presync_should_signal =
true;
3839 if (result.success) {
3842 headers.swap(result.pow_validated_headers);
3845 return result.success;
3853bool PeerManagerImpl::TryLowWorkHeadersSync(
3855 std::vector<CBlockHeader> &headers) {
3862 arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold();
3866 if (total_work < minimum_chain_work) {
3880 LOCK(peer.m_headers_sync_mutex);
3881 peer.m_headers_sync.reset(
3883 chain_start_header, minimum_chain_work));
3888 (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
3891 "Ignoring low-work chain (height=%u) from peer=%d\n",
3892 chain_start_header->
nHeight + headers.size(),
3904bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(
const CBlockIndex *header) {
3905 return header !=
nullptr &&
3906 ((m_chainman.m_best_header !=
nullptr &&
3908 m_chainman.m_best_header->GetAncestor(header->
nHeight)) ||
3912bool PeerManagerImpl::MaybeSendGetHeaders(
CNode &pfrom,
3921 if (current_time - peer.m_last_getheaders_timestamp >
3925 peer.m_last_getheaders_timestamp = current_time;
3937void PeerManagerImpl::HeadersDirectFetchBlocks(
const Config &config,
3943 CNodeState *nodestate = State(pfrom.
GetId());
3947 std::vector<const CBlockIndex *> vToFetch;
3953 if (!pindexWalk->nStatus.hasData() &&
3956 vToFetch.push_back(pindexWalk);
3958 pindexWalk = pindexWalk->
pprev;
3969 std::vector<CInv> vGetData;
3972 if (nodestate->vBlocksInFlight.size() >=
3978 BlockRequested(config, pfrom.
GetId(), *pindex);
3982 if (vGetData.size() > 1) {
3984 "Downloading blocks toward %s (%d) via headers "
3989 if (vGetData.size() > 0) {
3990 if (!m_opts.ignore_incoming_txs &&
3991 nodestate->m_provides_cmpctblocks && vGetData.size() == 1 &&
3992 mapBlocksInFlight.size() == 1 &&
4010void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(
4012 bool received_new_header,
bool may_have_more_headers) {
4013 if (peer.m_num_unconnecting_headers_msgs > 0) {
4016 "peer=%d: resetting m_num_unconnecting_headers_msgs (%d -> 0)\n",
4017 pfrom.
GetId(), peer.m_num_unconnecting_headers_msgs);
4019 peer.m_num_unconnecting_headers_msgs = 0;
4023 CNodeState *nodestate = State(pfrom.
GetId());
4031 if (received_new_header &&
4033 nodestate->m_last_block_announcement =
GetTime();
4041 if (nodestate->pindexBestKnownBlock &&
4042 nodestate->pindexBestKnownBlock->nChainWork <
4053 LogPrintf(
"Disconnecting outbound peer %d -- headers "
4054 "chain has insufficient work\n",
4068 nodestate->pindexBestKnownBlock !=
nullptr) {
4069 if (m_outbound_peers_with_protect_from_disconnect <
4071 nodestate->pindexBestKnownBlock->nChainWork >=
4073 !nodestate->m_chain_sync.m_protect) {
4076 nodestate->m_chain_sync.m_protect =
true;
4077 ++m_outbound_peers_with_protect_from_disconnect;
4082void PeerManagerImpl::ProcessHeadersMessage(
const Config &config,
CNode &pfrom,
4084 std::vector<CBlockHeader> &&headers,
4085 bool via_compact_block) {
4086 size_t nCount = headers.size();
4094 LOCK(peer.m_headers_sync_mutex);
4095 if (peer.m_headers_sync) {
4096 peer.m_headers_sync.reset(
nullptr);
4097 LOCK(m_headers_presync_mutex);
4098 m_headers_presync_stats.erase(pfrom.
GetId());
4107 if (!CheckHeadersPoW(headers, m_chainparams.
GetConsensus(), peer)) {
4122 bool already_validated_work =
false;
4125 bool have_headers_sync =
false;
4127 LOCK(peer.m_headers_sync_mutex);
4129 already_validated_work =
4130 IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
4142 if (headers.empty()) {
4146 have_headers_sync = !!peer.m_headers_sync;
4152 headers[0].hashPrevBlock))};
4153 bool headers_connect_blockindex{chain_start_header !=
nullptr};
4155 if (!headers_connect_blockindex) {
4160 HandleFewUnconnectingHeaders(pfrom, peer, headers);
4162 Misbehaving(peer, 10,
"invalid header received");
4174 last_received_header =
4176 if (IsAncestorOfBestHeaderOrTip(last_received_header)) {
4177 already_validated_work =
true;
4185 already_validated_work =
true;
4191 if (!already_validated_work &&
4192 TryLowWorkHeadersSync(peer, pfrom, chain_start_header, headers)) {
4204 bool received_new_header{last_received_header ==
nullptr};
4209 state, &pindexLast)) {
4211 MaybePunishNodeForBlock(pfrom.
GetId(), state, via_compact_block,
4212 "invalid header received");
4222 if (MaybeSendGetHeaders(pfrom,
GetLocator(pindexLast), peer)) {
4225 "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
4226 pindexLast->
nHeight, pfrom.
GetId(), peer.m_starting_height);
4230 UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast,
4231 received_new_header,
4235 HeadersDirectFetchBlocks(config, pfrom, *pindexLast);
4238void PeerManagerImpl::ProcessInvalidTx(
NodeId nodeid,
4241 bool maybe_add_extra_compact_tx) {
4246 const TxId &txid = ptx->GetId();
4265 m_recent_rejects_package_reconsiderable.insert(txid);
4267 m_recent_rejects.insert(txid);
4269 m_txrequest.ForgetInvId(txid);
4272 AddToCompactExtraTransactions(ptx);
4275 MaybePunishNodeForTx(nodeid, state);
4281 return orphanage.EraseTx(txid);
4295 m_txrequest.ForgetInvId(tx->GetId());
4301 orphanage.
EraseTx(tx->GetId());
4306 "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
4307 nodeid, tx->GetId().ToString(), m_mempool.
size(),
4310 RelayTransaction(tx->GetId());
4313void PeerManagerImpl::ProcessPackageResult(
4314 const PackageToValidate &package_to_validate,
4320 const auto &
package = package_to_validate.m_txns;
4321 const auto &senders = package_to_validate.m_senders;
4324 m_recent_rejects_package_reconsiderable.insert(
GetPackageHash(package));
4328 if (!
Assume(package.size() == 2)) {
4334 auto package_iter = package.rbegin();
4335 auto senders_iter = senders.rbegin();
4336 while (package_iter != package.rend()) {
4337 const auto &tx = *package_iter;
4338 const NodeId nodeid = *senders_iter;
4339 const auto it_result{package_result.
m_tx_results.find(tx->GetId())};
4343 const auto &tx_result = it_result->second;
4344 switch (tx_result.m_result_type) {
4346 ProcessValidTx(nodeid, tx);
4356 ProcessInvalidTx(nodeid, tx, tx_result.m_state,
4373std::optional<PeerManagerImpl::PackageToValidate>
4379 const auto &parent_txid{ptx->GetId()};
4381 Assume(m_recent_rejects_package_reconsiderable.contains(parent_txid));
4387 const auto cpfp_candidates_same_peer{
4393 for (
const auto &child : cpfp_candidates_same_peer) {
4394 Package maybe_cpfp_package{ptx, child};
4395 if (!m_recent_rejects_package_reconsiderable.contains(
4397 return PeerManagerImpl::PackageToValidate{ptx, child, nodeid,
4411 const auto cpfp_candidates_different_peer{
4421 std::vector<size_t> tx_indices(cpfp_candidates_different_peer.size());
4422 std::iota(tx_indices.begin(), tx_indices.end(), 0);
4423 Shuffle(tx_indices.begin(), tx_indices.end(), m_rng);
4425 for (
const auto index : tx_indices) {
4428 const auto [child_tx, child_sender] =
4429 cpfp_candidates_different_peer.at(index);
4430 Package maybe_cpfp_package{ptx, child_tx};
4431 if (!m_recent_rejects_package_reconsiderable.contains(
4433 return PeerManagerImpl::PackageToValidate{ptx, child_tx, nodeid,
4437 return std::nullopt;
4440bool PeerManagerImpl::ProcessOrphanTx(
const Config &config, Peer &peer) {
4446 return orphanage.GetTxToReconsider(peer.m_id);
4451 const TxId &orphanTxId = porphanTx->GetId();
4456 ProcessValidTx(peer.m_id, porphanTx);
4462 " invalid orphan tx %s from peer=%d. %s\n",
4469 ProcessInvalidTx(peer.m_id, porphanTx, state,
4480bool PeerManagerImpl::PrepareBlockFilterRequest(
4482 const BlockHash &stop_hash, uint32_t max_height_diff,
4484 const bool supported_filter_type =
4487 if (!supported_filter_type) {
4489 "peer %d requested unsupported block filter type: %d\n",
4490 node.GetId(),
static_cast<uint8_t
>(filter_type));
4491 node.fDisconnect =
true;
4501 if (!stop_index || !BlockRequestAllowed(stop_index)) {
4504 node.fDisconnect =
true;
4509 uint32_t stop_height = stop_index->
nHeight;
4510 if (start_height > stop_height) {
4513 "peer %d sent invalid getcfilters/getcfheaders with "
4515 "start height %d and stop height %d\n",
4516 node.GetId(), start_height, stop_height);
4517 node.fDisconnect =
true;
4520 if (stop_height - start_height >= max_height_diff) {
4522 "peer %d requested too many cfilters/cfheaders: %d / %d\n",
4523 node.GetId(), stop_height - start_height + 1, max_height_diff);
4524 node.fDisconnect =
true;
4529 if (!filter_index) {
4538void PeerManagerImpl::ProcessGetCFilters(
CNode &
node, Peer &peer,
4540 uint8_t filter_type_ser;
4541 uint32_t start_height;
4544 vRecv >> filter_type_ser >> start_height >> stop_hash;
4551 if (!PrepareBlockFilterRequest(
node, peer, filter_type, start_height,
4557 std::vector<BlockFilter> filters;
4560 "Failed to find block filter in index: filter_type=%s, "
4561 "start_height=%d, stop_hash=%s\n",
4567 for (
const auto &filter : filters) {
4574void PeerManagerImpl::ProcessGetCFHeaders(
CNode &
node, Peer &peer,
4576 uint8_t filter_type_ser;
4577 uint32_t start_height;
4580 vRecv >> filter_type_ser >> start_height >> stop_hash;
4587 if (!PrepareBlockFilterRequest(
node, peer, filter_type, start_height,
4594 if (start_height > 0) {
4596 stop_index->
GetAncestor(
static_cast<int>(start_height - 1));
4599 "Failed to find block filter header in index: "
4600 "filter_type=%s, block_hash=%s\n",
4607 std::vector<uint256> filter_hashes;
4611 "Failed to find block filter hashes in index: filter_type=%s, "
4612 "start_height=%d, stop_hash=%s\n",
4621 stop_index->
GetBlockHash(), prev_header, filter_hashes);
4625void PeerManagerImpl::ProcessGetCFCheckPt(
CNode &
node, Peer &peer,
4627 uint8_t filter_type_ser;
4630 vRecv >> filter_type_ser >> stop_hash;
4637 if (!PrepareBlockFilterRequest(
4638 node, peer, filter_type, 0, stop_hash,
4639 std::numeric_limits<uint32_t>::max(),
4640 stop_index, filter_index)) {
4648 for (
int i = headers.size() - 1; i >= 0; i--) {
4654 "Failed to find block filter header in index: "
4655 "filter_type=%s, block_hash=%s\n",
4680PeerManagerImpl::GetAvalancheVoteForBlock(
const BlockHash &hash)
const {
4691 if (pindex->nStatus.isInvalid()) {
4696 if (pindex->nStatus.isOnParkedChain()) {
4704 if (pindex == pindexFork) {
4709 if (pindexFork != pindexTip) {
4714 if (!pindex->nStatus.hasData()) {
4723uint32_t PeerManagerImpl::GetAvalancheVoteForTx(
const TxId &
id)
const {
4725 if (m_mempool.
exists(
id) ||
4726 WITH_LOCK(m_recent_confirmed_transactions_mutex,
4727 return m_recent_confirmed_transactions.contains(
id))) {
4733 return conflicting.HaveTx(id);
4739 if (m_recent_rejects.contains(
id)) {
4745 return orphanage.HaveTx(id);
4796 const std::shared_ptr<const CBlock> &block,
4797 bool force_processing,
4798 bool min_pow_checked) {
4799 bool new_block{
false};
4801 &new_block, m_avalanche);
4803 node.m_last_block_time = GetTime<std::chrono::seconds>();
4808 RemoveBlockRequest(block->GetHash(), std::nullopt);
4811 mapBlockSource.erase(block->GetHash());
4815void PeerManagerImpl::ProcessMessage(
4816 const Config &config,
CNode &pfrom,
const std::string &msg_type,
4817 CDataStream &vRecv,
const std::chrono::microseconds time_received,
4818 const std::atomic<bool> &interruptMsgProc) {
4824 PeerRef peer = GetPeerRef(pfrom.
GetId());
4825 if (peer ==
nullptr) {
4831 "Avalanche is not initialized, ignoring %s message\n",
4839 Misbehaving(*peer, 1,
"redundant version message");
4845 uint64_t nNonce = 1;
4848 std::string cleanSubVer;
4849 int starting_height = -1;
4851 uint64_t nExtraEntropy = 1;
4853 vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
4866 "peer=%d does not offer the expected services "
4867 "(%08x offered, %08x expected); disconnecting\n",
4868 pfrom.
GetId(), nServices,
4878 "peer=%d does not offer the avalanche service; disconnecting\n",
4887 "peer=%d using obsolete version %i; disconnecting\n",
4888 pfrom.
GetId(), nVersion);
4893 if (!vRecv.
empty()) {
4902 if (!vRecv.
empty()) {
4903 std::string strSubVer;
4907 if (!vRecv.
empty()) {
4908 vRecv >> starting_height;
4910 if (!vRecv.
empty()) {
4913 if (!vRecv.
empty()) {
4914 vRecv >> nExtraEntropy;
4918 LogPrintf(
"connected to self at %s, disconnecting\n",
4931 PushNodeVersion(config, pfrom, *peer);
4935 const int greatest_common_version =
4949 peer->m_their_services = nServices;
4953 pfrom.cleanSubVer = cleanSubVer;
4955 peer->m_starting_height = starting_height;
4963 (fRelay || (peer->m_our_services &
NODE_BLOOM))) {
4964 auto *
const tx_relay = peer->SetTxRelay();
4966 LOCK(tx_relay->m_bloom_filter_mutex);
4968 tx_relay->m_relay_txs = fRelay;
4981 CNodeState *state = State(pfrom.
GetId());
4982 state->fPreferredDownload =
4986 m_num_preferred_download_peers += state->fPreferredDownload;
4992 bool send_getaddr{
false};
4994 send_getaddr = SetupAddressRelay(pfrom, *peer);
5006 peer->m_getaddr_sent =
true;
5010 WITH_LOCK(peer->m_addr_token_bucket_mutex,
5011 peer->m_addr_token_bucket += m_opts.max_addr_to_send);
5032 std::string remoteAddr;
5038 "receive version message: [%s] %s: version %d, blocks=%d, "
5039 "us=%s, txrelay=%d, peer=%d%s\n",
5041 peer->m_starting_height, addrMe.ToString(), fRelay,
5042 pfrom.
GetId(), remoteAddr);
5044 int64_t currentTime =
GetTime();
5045 int64_t nTimeOffset = nTime - currentTime;
5050 Misbehaving(*peer, 20,
5051 "Ignoring invalid timestamp in version message");
5061 "feeler connection completed peer=%d; disconnecting\n",
5070 Misbehaving(*peer, 10,
"non-version message before version handshake");
5080 "ignoring redundant verack message from peer=%d\n",
5087 "New outbound peer connected: version: %d, blocks=%d, "
5089 pfrom.
nVersion.load(), peer->m_starting_height, pfrom.
GetId(),
5111 AddKnownProof(*peer, localProof->getId());
5115 peer->m_proof_relay->m_recently_announced_proofs.insert(
5116 localProof->getId());
5121 if (
auto tx_relay = peer->GetTxRelay()) {
5130 return tx_relay->m_tx_inventory_to_send.empty() &&
5131 tx_relay->m_next_inv_send_time == 0s));
5140 Misbehaving(*peer, 10,
"non-verack message before version handshake");
5154 std::vector<CAddress> vAddr;
5158 if (!SetupAddressRelay(pfrom, *peer)) {
5164 if (vAddr.size() > m_opts.max_addr_to_send) {
5167 strprintf(
"%s message size = %u", msg_type, vAddr.size()));
5172 std::vector<CAddress> vAddrOk;
5173 const auto current_a_time{Now<NodeSeconds>()};
5176 const auto current_time = GetTime<std::chrono::microseconds>();
5178 LOCK(peer->m_addr_token_bucket_mutex);
5181 const auto time_diff =
5182 std::max(current_time - peer->m_addr_token_timestamp, 0us);
5183 const double increment =
5185 peer->m_addr_token_bucket =
5186 std::min<double>(peer->m_addr_token_bucket + increment,
5190 peer->m_addr_token_timestamp = current_time;
5192 const bool rate_limited =
5194 uint64_t num_proc = 0;
5195 uint64_t num_rate_limit = 0;
5196 Shuffle(vAddr.begin(), vAddr.end(), m_rng);
5198 if (interruptMsgProc) {
5203 LOCK(peer->m_addr_token_bucket_mutex);
5205 if (peer->m_addr_token_bucket < 1.0) {
5211 peer->m_addr_token_bucket -= 1.0;
5224 addr.
nTime > current_a_time + 10min) {
5225 addr.
nTime = current_a_time - 5 * 24h;
5227 AddAddressKnown(*peer, addr);
5236 if (addr.
nTime > current_a_time - 10min && !peer->m_getaddr_sent &&
5239 RelayAddress(pfrom.
GetId(), addr, fReachable);
5243 vAddrOk.push_back(addr);
5246 peer->m_addr_processed += num_proc;
5247 peer->m_addr_rate_limited += num_rate_limit;
5249 "Received addr: %u addresses (%u processed, %u rate-limited) "
5251 vAddr.size(), num_proc, num_rate_limit, pfrom.
GetId());
5253 m_addrman.
Add(vAddrOk, pfrom.
addr, 2h);
5254 if (vAddr.size() < 1000) {
5255 peer->m_getaddr_sent =
false;
5262 "addrfetch connection completed peer=%d; disconnecting\n",
5270 peer->m_wants_addrv2 =
true;
5275 peer->m_prefers_headers =
true;
5280 bool sendcmpct_hb{
false};
5281 uint64_t sendcmpct_version{0};
5282 vRecv >> sendcmpct_hb >> sendcmpct_version;
5289 CNodeState *nodestate = State(pfrom.
GetId());
5290 nodestate->m_provides_cmpctblocks =
true;
5291 nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
5300 std::vector<CInv> vInv;
5303 Misbehaving(*peer, 20,
5304 strprintf(
"inv message size = %u", vInv.size()));
5308 const bool reject_tx_invs{RejectIncomingTxs(pfrom)};
5310 const auto current_time{GetTime<std::chrono::microseconds>()};
5311 std::optional<BlockHash> best_block;
5313 auto logInv = [&](
const CInv &inv,
bool fAlreadyHave) {
5315 fAlreadyHave ?
"have" :
"new", pfrom.
GetId());
5318 for (
CInv &inv : vInv) {
5319 if (interruptMsgProc) {
5331 const bool fAlreadyHave = AlreadyHaveBlock(
BlockHash(inv.
hash));
5332 logInv(inv, fAlreadyHave);
5335 UpdateBlockAvailability(pfrom.
GetId(), hash);
5337 !IsBlockRequested(hash)) {
5344 best_block = std::move(hash);
5355 const bool fAlreadyHave = AlreadyHaveProof(proofid);
5356 logInv(inv, fAlreadyHave);
5357 AddKnownProof(*peer, proofid);
5359 if (!fAlreadyHave && m_avalanche &&
5361 const bool preferred = isPreferredDownloadPeer(pfrom);
5363 LOCK(cs_proofrequest);
5364 AddProofAnnouncement(pfrom, proofid, current_time,
5373 const bool fAlreadyHave =
5374 AlreadyHaveTx(txid,
true);
5375 logInv(inv, fAlreadyHave);
5377 AddKnownTx(*peer, txid);
5378 if (reject_tx_invs) {
5380 "transaction (%s) inv sent in violation of "
5381 "protocol, disconnecting peer=%d\n",
5385 }
else if (!fAlreadyHave &&
5387 AddTxAnnouncement(pfrom, txid, current_time);
5394 "Unknown inv type \"%s\" received from peer=%d\n",
5411 if (state.fSyncStarted ||
5412 (!peer->m_inv_triggered_getheaders_before_sync &&
5413 *best_block != m_last_block_inv_triggering_headers_sync)) {
5414 if (MaybeSendGetHeaders(
5415 pfrom,
GetLocator(m_chainman.m_best_header), *peer)) {
5417 m_chainman.m_best_header->nHeight,
5418 best_block->ToString(), pfrom.
GetId());
5420 if (!state.fSyncStarted) {
5421 peer->m_inv_triggered_getheaders_before_sync =
true;
5425 m_last_block_inv_triggering_headers_sync = *best_block;
5434 std::vector<CInv> vInv;
5437 Misbehaving(*peer, 20,
5438 strprintf(
"getdata message size = %u", vInv.size()));
5443 vInv.size(), pfrom.
GetId());
5445 if (vInv.size() > 0) {
5451 LOCK(peer->m_getdata_requests_mutex);
5452 peer->m_getdata_requests.insert(peer->m_getdata_requests.end(),
5453 vInv.begin(), vInv.end());
5454 ProcessGetData(config, pfrom, *peer, interruptMsgProc);
5463 vRecv >> locator >> hashStop;
5467 "getblocks locator size %lld > %d, disconnect peer=%d\n",
5481 std::shared_ptr<const CBlock> a_recent_block;
5483 LOCK(m_most_recent_block_mutex);
5484 a_recent_block = m_most_recent_block;
5488 state, a_recent_block, m_avalanche)) {
5506 (pindex ? pindex->
nHeight : -1),
5509 for (; pindex; pindex = m_chainman.
ActiveChain().Next(pindex)) {
5518 const int nPrunedBlocksLikelyToHave =
5522 (!pindex->nStatus.hasData() ||
5524 nPrunedBlocksLikelyToHave)) {
5527 " getblocks stopping, pruned or too old block at %d %s\n",
5532 peer->m_block_inv_mutex,
5533 peer->m_blocks_for_inv_relay.push_back(pindex->
GetBlockHash()));
5534 if (--nLimit <= 0) {
5540 peer->m_continuation_block = pindex->GetBlockHash();
5552 std::shared_ptr<const CBlock> recent_block;
5554 LOCK(m_most_recent_block_mutex);
5555 if (m_most_recent_block_hash == req.
blockhash) {
5556 recent_block = m_most_recent_block;
5561 SendBlockTransactions(pfrom, *peer, *recent_block, req);
5570 if (!pindex || !pindex->nStatus.hasData()) {
5573 "Peer %d sent us a getblocktxn for a block we don't have\n",
5585 SendBlockTransactions(pfrom, *peer, block, req);
5598 "Peer %d sent us a getblocktxn for a block > %i deep\n",
5603 WITH_LOCK(peer->m_getdata_requests_mutex,
5604 peer->m_getdata_requests.push_back(inv));
5613 vRecv >> locator >> hashStop;
5617 "getheaders locator size %lld > %d, disconnect peer=%d\n",
5626 "Ignoring getheaders from peer=%d while importing/reindexing\n",
5640 if (m_chainman.
ActiveTip() ==
nullptr ||
5645 "Ignoring getheaders from peer=%d because active chain "
5646 "has too little work; sending empty response\n",
5651 std::vector<CBlock>()));
5655 CNodeState *nodestate = State(pfrom.
GetId());
5664 if (!BlockRequestAllowed(pindex)) {
5666 "%s: ignoring request from peer=%i for old block "
5667 "header that isn't in the main chain\n",
5668 __func__, pfrom.
GetId());
5682 std::vector<CBlock> vHeaders;
5685 (pindex ? pindex->
nHeight : -1),
5688 for (; pindex; pindex = m_chainman.
ActiveChain().Next(pindex)) {
5690 if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) {
5707 nodestate->pindexBestHeaderSent =
5715 if (RejectIncomingTxs(pfrom)) {
5717 "transaction sent in violation of protocol peer=%d\n",
5733 const CTransaction &tx = *ptx;
5734 const TxId &txid = tx.GetId();
5735 AddKnownTx(*peer, txid);
5737 bool shouldReconcileTx{
false};
5741 m_txrequest.ReceivedResponse(pfrom.
GetId(), txid);
5743 if (AlreadyHaveTx(txid,
true)) {
5749 if (!m_mempool.
exists(tx.GetId())) {
5751 "Not relaying non-mempool transaction %s from "
5752 "forcerelay peer=%d\n",
5753 tx.GetId().ToString(), pfrom.
GetId());
5755 LogPrintf(
"Force relaying tx %s from peer=%d\n",
5756 tx.GetId().ToString(), pfrom.
GetId());
5757 RelayTransaction(tx.GetId());
5761 if (m_recent_rejects_package_reconsiderable.contains(txid)) {
5769 "found tx %s in reconsiderable rejects, looking for "
5770 "child in orphanage\n",
5772 if (
auto package_to_validate{
5773 Find1P1CPackage(ptx, pfrom.
GetId())}) {
5776 package_to_validate->m_txns,
5779 "package evaluation for %s: %s (%s)\n",
5780 package_to_validate->ToString(),
5782 ?
"package accepted"
5783 :
"package rejected",
5785 ProcessPackageResult(package_to_validate.value(),
5814 ProcessValidTx(pfrom.
GetId(), ptx);
5820 bool fRejectedParents =
false;
5824 std::vector<TxId> unique_parents;
5825 unique_parents.reserve(tx.vin.size());
5826 for (
const CTxIn &txin : tx.vin) {
5829 unique_parents.push_back(txin.prevout.GetTxId());
5831 std::sort(unique_parents.begin(), unique_parents.end());
5832 unique_parents.erase(
5833 std::unique(unique_parents.begin(), unique_parents.end()),
5834 unique_parents.end());
5842 std::optional<TxId> rejected_parent_reconsiderable;
5843 for (
const TxId &parent_txid : unique_parents) {
5844 if (m_recent_rejects.contains(parent_txid)) {
5845 fRejectedParents =
true;
5849 if (m_recent_rejects_package_reconsiderable.contains(
5851 !m_mempool.
exists(parent_txid)) {
5856 if (rejected_parent_reconsiderable.has_value()) {
5857 fRejectedParents =
true;
5860 rejected_parent_reconsiderable = parent_txid;
5863 if (!fRejectedParents) {
5864 const auto current_time{
5865 GetTime<std::chrono::microseconds>()};
5867 for (
const TxId &parent_txid : unique_parents) {
5869 AddKnownTx(*peer, parent_txid);
5873 if (!AlreadyHaveTx(parent_txid,
5875 AddTxAnnouncement(pfrom, parent_txid, current_time);
5881 if (
unsigned int nEvicted =
5885 if (orphanage.AddTx(ptx,
5887 AddToCompactExtraTransactions(ptx);
5890 m_opts.max_orphan_txs, m_rng);
5893 "orphanage overflow, removed %u tx\n",
5899 m_txrequest.ForgetInvId(tx.GetId());
5903 "not keeping orphan with rejected parents %s\n",
5904 tx.GetId().ToString());
5907 m_recent_rejects.insert(tx.GetId());
5908 m_txrequest.ForgetInvId(tx.GetId());
5912 ProcessInvalidTx(pfrom.
GetId(), ptx, state,
5922 "tx %s failed but reconsiderable, looking for child in "
5925 if (
auto package_to_validate{
5926 Find1P1CPackage(ptx, pfrom.
GetId())}) {
5929 package_to_validate->m_txns,
false)};
5931 "package evaluation for %s: %s (%s)\n",
5932 package_to_validate->ToString(),
5934 ?
"package accepted"
5935 :
"package rejected",
5937 ProcessPackageResult(package_to_validate.value(),
5946 m_txrequest.ForgetInvId(tx.GetId());
5948 unsigned int nEvicted{0};
5955 m_opts.max_conflicting_txs, m_rng);
5956 shouldReconcileTx = conflicting.
HaveTx(ptx->GetId());
5961 "conflicting pool overflow, removed %u tx\n",
5967 if (m_avalanche && m_avalanche->
m_preConsensus && shouldReconcileTx) {
5978 "Unexpected cmpctblock message received from peer %d\n",
5985 vRecv >> cmpctblock;
5986 }
catch (std::ios_base::failure &e) {
5988 Misbehaving(*peer, 100,
"cmpctblock-bad-indexes");
5992 bool received_new_header =
false;
6005 MaybeSendGetHeaders(
6006 pfrom,
GetLocator(m_chainman.m_best_header), *peer);
6012 GetAntiDoSWorkThreshold()) {
6016 "Ignoring low-work compact block from peer %d\n",
6022 received_new_header =
true;
6032 MaybePunishNodeForBlock(pfrom.
GetId(), state,
6034 "invalid header via cmpctblock");
6039 if (received_new_header) {
6041 "Saw new cmpctblock header hash=%s peer=%d\n",
6042 blockhash.ToString(), pfrom.
GetId());
6049 bool fProcessBLOCKTXN =
false;
6055 bool fRevertToHeaderProcessing =
false;
6059 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
6060 bool fBlockReconstructed =
false;
6068 CNodeState *nodestate = State(pfrom.
GetId());
6072 if (received_new_header &&
6075 nodestate->m_last_block_announcement =
GetTime();
6078 if (pindex->nStatus.hasData()) {
6085 size_t already_in_flight =
6086 std::distance(range_flight.first, range_flight.second);
6087 bool requested_block_from_this_peer{
false};
6091 bool first_in_flight =
6092 already_in_flight == 0 ||
6093 (range_flight.first->second.first == pfrom.
GetId());
6095 while (range_flight.first != range_flight.second) {
6096 if (range_flight.first->second.first == pfrom.
GetId()) {
6097 requested_block_from_this_peer =
true;
6100 range_flight.first++;
6109 if (requested_block_from_this_peer) {
6113 std::vector<CInv> vInv(1);
6123 if (!already_in_flight && !CanDirectFetch()) {
6131 nodestate->vBlocksInFlight.size() <
6133 requested_block_from_this_peer) {
6134 std::list<QueuedBlock>::iterator *queuedBlockIt =
nullptr;
6135 if (!BlockRequested(config, pfrom.
GetId(), *pindex,
6137 if (!(*queuedBlockIt)->partialBlock) {
6139 ->partialBlock.reset(
6146 "we were already syncing!\n");
6152 *(*queuedBlockIt)->partialBlock;
6154 partialBlock.
InitData(cmpctblock, vExtraTxnForCompact);
6160 Misbehaving(*peer, 100,
"invalid compact block");
6163 if (first_in_flight) {
6166 std::vector<CInv> vInv(1);
6180 for (
size_t i = 0; i < cmpctblock.
BlockTxCount(); i++) {
6191 fProcessBLOCKTXN =
true;
6192 }
else if (first_in_flight) {
6201 IsBlockRequestedFromOutbound(blockhash) ||
6226 tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
6231 std::vector<CTransactionRef> dummy;
6232 status = tempBlock.FillBlock(*pblock, dummy);
6234 fBlockReconstructed =
true;
6238 if (requested_block_from_this_peer) {
6242 std::vector<CInv> vInv(1);
6250 fRevertToHeaderProcessing =
true;
6255 if (fProcessBLOCKTXN) {
6257 blockTxnMsg, time_received, interruptMsgProc);
6260 if (fRevertToHeaderProcessing) {
6266 return ProcessHeadersMessage(config, pfrom, *peer,
6271 if (fBlockReconstructed) {
6276 mapBlockSource.emplace(pblock->GetHash(),
6277 std::make_pair(pfrom.
GetId(),
false));
6288 ProcessBlock(config, pfrom, pblock,
true,
6297 RemoveBlockRequest(pblock->GetHash(), std::nullopt);
6307 "Unexpected blocktxn message received from peer %d\n",
6315 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
6316 bool fBlockRead =
false;
6320 auto range_flight = mapBlocksInFlight.equal_range(resp.
blockhash);
6321 size_t already_in_flight =
6322 std::distance(range_flight.first, range_flight.second);
6323 bool requested_block_from_this_peer{
false};
6327 bool first_in_flight =
6328 already_in_flight == 0 ||
6329 (range_flight.first->second.first == pfrom.
GetId());
6331 while (range_flight.first != range_flight.second) {
6332 auto [node_id, block_it] = range_flight.first->second;
6333 if (node_id == pfrom.
GetId() && block_it->partialBlock) {
6334 requested_block_from_this_peer =
true;
6337 range_flight.first++;
6340 if (!requested_block_from_this_peer) {
6342 "Peer %d sent us block transactions for block "
6343 "we weren't expecting\n",
6349 *range_flight.first->second.second->partialBlock;
6357 "invalid compact block/non-matching block transactions");
6360 if (first_in_flight) {
6362 std::vector<CInv> invs;
6370 "Peer %d sent us a compact block but it failed to "
6371 "reconstruct, waiting on first download to complete\n",
6404 std::make_pair(pfrom.
GetId(),
false));
6415 ProcessBlock(config, pfrom, pblock,
true,
6425 "Unexpected headers message received from peer %d\n",
6432 peer->m_last_getheaders_timestamp = {};
6434 std::vector<CBlockHeader> headers;
6440 Misbehaving(*peer, 20,
6441 strprintf(
"too-many-headers: headers message size = %u",
6445 headers.resize(nCount);
6446 for (
unsigned int n = 0; n < nCount; n++) {
6447 vRecv >> headers[n];
6452 ProcessHeadersMessage(config, pfrom, *peer, std::move(headers),
6458 if (m_headers_presync_should_signal.exchange(
false)) {
6459 HeadersPresyncStats stats;
6461 LOCK(m_headers_presync_mutex);
6463 m_headers_presync_stats.find(m_headers_presync_bestpeer);
6464 if (it != m_headers_presync_stats.end()) {
6470 stats.first, stats.second->first, stats.second->second);
6481 "Unexpected block message received from peer %d\n",
6486 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
6490 pblock->GetHash().ToString(), pfrom.
GetId());
6498 const BlockHash hash = pblock->GetHash();
6499 bool min_pow_checked =
false;
6504 forceProcessing = IsBlockRequested(hash);
6505 RemoveBlockRequest(hash, pfrom.
GetId());
6509 mapBlockSource.emplace(hash, std::make_pair(pfrom.
GetId(),
true));
6517 GetAntiDoSWorkThreshold()) {
6518 min_pow_checked =
true;
6521 ProcessBlock(config, pfrom, pblock, forceProcessing, min_pow_checked);
6531 if (pfrom.m_avalanche_pubkey.has_value()) {
6534 "Ignoring avahello from peer %d: already in our node set\n",
6540 vRecv >> delegation;
6547 if (!delegation.
verify(state, pubkey)) {
6548 Misbehaving(*peer, 100,
"invalid-delegation");
6551 pfrom.m_avalanche_pubkey = std::move(pubkey);
6554 sighasher << delegation.
getId();
6562 if (!(*pfrom.m_avalanche_pubkey)
6563 .VerifySchnorr(sighasher.GetHash(),
sig)) {
6564 Misbehaving(*peer, 100,
"invalid-avahello-signature");
6571 if (!AlreadyHaveProof(proofid)) {
6572 const bool preferred = isPreferredDownloadPeer(pfrom);
6573 LOCK(cs_proofrequest);
6574 AddProofAnnouncement(pfrom, proofid,
6575 GetTime<std::chrono::microseconds>(),
6594 WITH_LOCK(peer->m_addr_token_bucket_mutex,
6595 peer->m_addr_token_bucket += m_opts.max_addr_to_send);
6600 peer->m_proof_relay->compactproofs_requested =
true;
6611 const auto now = Now<SteadyMilliseconds>();
6617 last_poll + std::chrono::milliseconds(m_opts.avalanche_cooldown)) {
6619 "Ignoring repeated avapoll from peer %d: cooldown not "
6634 strprintf(
"too-many-ava-poll: poll message size = %u", nCount));
6638 std::vector<avalanche::Vote> votes;
6639 votes.reserve(nCount);
6641 for (
unsigned int n = 0; n < nCount; n++) {
6649 if (!quorum_established) {
6650 votes.emplace_back(vote, inv.
hash);
6657 if (m_opts.avalanche_preconsensus) {
6671 if (m_opts.avalanche_staking_preconsensus) {
6678 "poll inv type %d unknown from peer=%d\n",
6683 votes.emplace_back(vote, inv.
hash);
6709 if (!pfrom.m_avalanche_pubkey.has_value() ||
6710 !(*pfrom.m_avalanche_pubkey)
6711 .VerifySchnorr(verifier.GetHash(),
sig)) {
6712 Misbehaving(*peer, 100,
"invalid-ava-response-signature");
6717 auto now = GetTime<std::chrono::seconds>();
6719 std::vector<avalanche::VoteItemUpdate> updates;
6726 Misbehaving(*peer, banscore,
error);
6742 Misbehaving(*peer, 2,
error);
6754 auto logVoteUpdate = [](
const auto &voteUpdate,
6755 const std::string &voteItemTypeStr,
6756 const auto &voteItemId) {
6757 std::string voteOutcome;
6758 bool alwaysPrint =
false;
6759 switch (voteUpdate.getStatus()) {
6761 voteOutcome =
"invalidated";
6765 voteOutcome =
"rejected";
6768 voteOutcome =
"accepted";
6771 voteOutcome =
"finalized";
6775 voteOutcome =
"stalled";
6784 LogPrintf(
"Avalanche %s %s %s\n", voteOutcome, voteItemTypeStr,
6785 voteItemId.ToString());
6789 voteItemTypeStr, voteItemId.ToString());
6793 bool shouldActivateBestChain =
false;
6795 for (
const auto &u : updates) {
6800 if (
auto pitem = std::get_if<const avalanche::ProofRef>(&item)) {
6804 logVoteUpdate(u,
"proof", proofid);
6806 auto rejectionMode =
6808 auto nextCooldownTimePoint = GetTime<std::chrono::seconds>();
6809 switch (u.getStatus()) {
6825 return pm.rejectProof(proofid,
6829 "ERROR: Failed to reject proof: %s\n",
6834 nextCooldownTimePoint += std::chrono::seconds(
6835 m_opts.avalanche_peer_replacement_cooldown);
6841 avalanche::PeerManager::
6842 RegistrationMode::FORCE_ACCEPT);
6845 [&](const avalanche::Peer &peer) {
6846 pm.updateNextPossibleConflictTime(
6848 nextCooldownTimePoint);
6849 if (u.getStatus() ==
6850 avalanche::VoteStatus::
6852 pm.setFinalized(peer.peerid);
6860 "ERROR: Failed to accept proof: %s\n",
6867 auto getBlockFromIndex = [
this](
const CBlockIndex *pindex) {
6870 std::shared_ptr<const CBlock> pblock =
WITH_LOCK(
6871 m_most_recent_block_mutex,
return m_most_recent_block);
6873 if (!pblock || pblock->GetHash() != pindex->
GetBlockHash()) {
6874 std::shared_ptr<CBlock> pblockRead =
6875 std::make_shared<CBlock>();
6878 assert(!
"cannot load block from disk");
6880 pblock = pblockRead;
6885 if (
auto pitem = std::get_if<const CBlockIndex *>(&item)) {
6888 shouldActivateBestChain =
true;
6892 switch (u.getStatus()) {
6897 LogPrintf(
"ERROR: Database error: %s\n",
6906 LogPrintf(
"ERROR: Database error: %s\n",
6911 auto pblock = getBlockFromIndex(pindex);
6927 if (m_opts.avalanche_preconsensus) {
6928 auto pblock = getBlockFromIndex(pindex);
6936 pindex, *m_avalanche);
6946 if (m_opts.avalanche_staking_preconsensus) {
6948 std::get_if<const avalanche::StakeContenderId>(&item)) {
6950 logVoteUpdate(u,
"contender", contenderId);
6952 switch (u.getStatus()) {
6973 if (!m_opts.avalanche_preconsensus) {
6977 if (
auto pitem = std::get_if<const CTransactionRef>(&item)) {
6981 const TxId &txid = tx->GetId();
6982 logVoteUpdate(u,
"tx", txid);
6984 switch (u.getStatus()) {
6991 if (m_mempool.
exists(txid)) {
6995 std::vector<CTransactionRef> conflictingTxs =
7001 if (conflictingTxs.size() > 0) {
7012 for (
const auto &conflictingTx :
7015 conflictingTx->GetId());
7043 return conflicting.HaveTx(txid);
7046 std::vector<CTransactionRef>
7047 mempool_conflicting_txs;
7048 for (
const auto &txin : tx->vin) {
7053 mempool_conflicting_txs.push_back(
7054 std::move(conflict));
7063 [&txid, &mempool_conflicting_txs](
7068 if (mempool_conflicting_txs.size() >
7071 mempool_conflicting_txs[0],
7080 auto it = m_mempool.
GetIter(txid);
7081 if (!it.has_value()) {
7084 "Error: finalized tx (%s) is not in the "
7090 std::vector<TxId> finalizedTxIds;
7094 for (
const auto &finalized_txid : finalizedTxIds) {
7104 std::vector<CTransactionRef>
7107 for (
const auto &conflictingTx :
7109 m_recent_rejects.insert(
7110 conflictingTx->GetId());
7112 conflictingTx->GetId());
7125 if (shouldActivateBestChain) {
7128 state,
nullptr, m_avalanche)) {
7143 ReceivedAvalancheProof(pfrom, *peer, proof);
7152 if (peer->m_proof_relay ==
nullptr) {
7156 peer->m_proof_relay->lastSharedProofsUpdate =
7157 GetTime<std::chrono::seconds>();
7159 peer->m_proof_relay->sharedProofs =
7165 peer->m_proof_relay->sharedProofs);
7176 if (peer->m_proof_relay ==
nullptr) {
7181 if (!peer->m_proof_relay->compactproofs_requested) {
7185 peer->m_proof_relay->compactproofs_requested =
false;
7189 vRecv >> compactProofs;
7190 }
catch (std::ios_base::failure &e) {
7192 Misbehaving(*peer, 100,
"avaproofs-bad-indexes");
7197 std::set<uint32_t> prefilledIndexes;
7199 if (!ReceivedAvalancheProof(pfrom, *peer, prefilledProof.proof)) {
7229 auto shortIdProcessor =
7233 if (shortIdProcessor.hasOutOfBoundIndex()) {
7236 Misbehaving(*peer, 100,
"avaproofs-bad-indexes");
7239 if (!shortIdProcessor.isEvenlyDistributed()) {
7244 std::vector<std::pair<avalanche::ProofId, bool>> remoteProofsStatus;
7251 shortIdProcessor.matchKnownItem(shortid, peer.
proof);
7258 remoteProofsStatus.emplace_back(peer.
getProofId(),
7269 for (
size_t i = 0; i < compactProofs.
size(); i++) {
7270 if (shortIdProcessor.getItem(i) ==
nullptr) {
7287 return pfrom.m_avalanche_pubkey.has_value())) {
7290 for (
const auto &[proofid, present] : remoteProofsStatus) {
7300 if (peer->m_proof_relay ==
nullptr) {
7307 auto requestedIndiceIt = proofreq.
indices.begin();
7308 uint32_t treeIndice = 0;
7309 peer->m_proof_relay->sharedProofs.forEachLeaf([&](
const auto &proof) {
7310 if (requestedIndiceIt == proofreq.
indices.end()) {
7315 if (treeIndice++ == *requestedIndiceIt) {
7318 requestedIndiceIt++;
7324 peer->m_proof_relay->sharedProofs = {};
7337 "Ignoring \"getaddr\" from %s connection. peer=%d\n",
7344 Assume(SetupAddressRelay(pfrom, *peer));
7348 if (peer->m_getaddr_recvd) {
7353 peer->m_getaddr_recvd =
true;
7355 peer->m_addrs_to_send.clear();
7356 std::vector<CAddress> vAddr;
7357 const size_t maxAddrToSend = m_opts.max_addr_to_send;
7365 for (
const CAddress &addr : vAddr) {
7366 PushAddress(*peer, addr);
7372 auto now = GetTime<std::chrono::seconds>();
7382 if (!SetupAddressRelay(pfrom, *peer)) {
7384 "Ignoring getavaaddr message from %s peer=%d\n",
7389 auto availabilityScoreComparator = [](
const CNode *lhs,
7392 double scoreRhs = rhs->getAvailabilityScore();
7394 if (scoreLhs != scoreRhs) {
7395 return scoreLhs > scoreRhs;
7404 std::set<
const CNode *,
decltype(availabilityScoreComparator)> avaNodes(
7405 availabilityScoreComparator);
7412 avaNodes.insert(pnode);
7413 if (avaNodes.size() > m_opts.max_addr_to_send) {
7414 avaNodes.erase(std::prev(avaNodes.end()));
7418 peer->m_addrs_to_send.clear();
7419 for (
const CNode *pnode : avaNodes) {
7420 PushAddress(*peer, pnode->
addr);
7431 "mempool request with bloom filters disabled, "
7432 "disconnect peer=%d\n",
7443 "mempool request with bandwidth limit reached, "
7444 "disconnect peer=%d\n",
7451 if (
auto tx_relay = peer->GetTxRelay()) {
7452 LOCK(tx_relay->m_tx_inventory_mutex);
7453 tx_relay->m_send_mempool =
true;
7483 const auto ping_end = time_received;
7486 bool bPingFinished =
false;
7487 std::string sProblem;
7489 if (nAvail >=
sizeof(nonce)) {
7494 if (peer->m_ping_nonce_sent != 0) {
7495 if (nonce == peer->m_ping_nonce_sent) {
7498 bPingFinished =
true;
7499 const auto ping_time = ping_end - peer->m_ping_start.load();
7500 if (ping_time.count() >= 0) {
7505 sProblem =
"Timing mishap";
7509 sProblem =
"Nonce mismatch";
7513 bPingFinished =
true;
7514 sProblem =
"Nonce zero";
7518 sProblem =
"Unsolicited pong without ping";
7523 bPingFinished =
true;
7524 sProblem =
"Short payload";
7527 if (!(sProblem.empty())) {
7529 "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
7530 pfrom.
GetId(), sProblem, peer->m_ping_nonce_sent, nonce,
7533 if (bPingFinished) {
7534 peer->m_ping_nonce_sent = 0;
7542 "filterload received despite not offering bloom services "
7543 "from peer=%d; disconnecting\n",
7553 Misbehaving(*peer, 100,
"too-large bloom filter");
7554 }
else if (
auto tx_relay = peer->GetTxRelay()) {
7556 LOCK(tx_relay->m_bloom_filter_mutex);
7557 tx_relay->m_bloom_filter.reset(
new CBloomFilter(filter));
7558 tx_relay->m_relay_txs =
true;
7568 "filteradd received despite not offering bloom services "
7569 "from peer=%d; disconnecting\n",
7574 std::vector<uint8_t> vData;
7583 }
else if (
auto tx_relay = peer->GetTxRelay()) {
7584 LOCK(tx_relay->m_bloom_filter_mutex);
7585 if (tx_relay->m_bloom_filter) {
7586 tx_relay->m_bloom_filter->insert(vData);
7594 Misbehaving(*peer, 100,
"bad filteradd message");
7602 "filterclear received despite not offering bloom services "
7603 "from peer=%d; disconnecting\n",
7608 auto tx_relay = peer->GetTxRelay();
7614 LOCK(tx_relay->m_bloom_filter_mutex);
7615 tx_relay->m_bloom_filter =
nullptr;
7616 tx_relay->m_relay_txs =
true;
7625 vRecv >> newFeeFilter;
7627 if (
auto tx_relay = peer->GetTxRelay()) {
7628 tx_relay->m_fee_filter_received = newFeeFilter;
7637 ProcessGetCFilters(pfrom, *peer, vRecv);
7642 ProcessGetCFHeaders(pfrom, *peer, vRecv);
7647 ProcessGetCFCheckPt(pfrom, *peer, vRecv);
7652 std::vector<CInv> vInv;
7658 for (
CInv &inv : vInv) {
7664 m_txrequest.ReceivedResponse(pfrom.
GetId(),
TxId(inv.
hash));
7671 LOCK(cs_proofrequest);
7672 m_proofrequest.ReceivedResponse(
7686bool PeerManagerImpl::MaybeDiscourageAndDisconnect(
CNode &pnode, Peer &peer) {
7688 LOCK(peer.m_misbehavior_mutex);
7691 if (!peer.m_should_discourage) {
7695 peer.m_should_discourage =
false;
7701 LogPrintf(
"Warning: not punishing noban peer %d!\n", peer.m_id);
7707 LogPrintf(
"Warning: not punishing manually connected peer %d!\n",
7716 "Warning: disconnecting but not discouraging %s peer %d!\n",
7733bool PeerManagerImpl::ProcessMessages(
const Config &config,
CNode *pfrom,
7734 std::atomic<bool> &interruptMsgProc) {
7745 bool fMoreWork =
false;
7747 PeerRef peer = GetPeerRef(pfrom->
GetId());
7748 if (peer ==
nullptr) {
7753 LOCK(peer->m_getdata_requests_mutex);
7754 if (!peer->m_getdata_requests.empty()) {
7755 ProcessGetData(config, *pfrom, *peer, interruptMsgProc);
7759 const bool processed_orphan = ProcessOrphanTx(config, *peer);
7765 if (processed_orphan) {
7772 LOCK(peer->m_getdata_requests_mutex);
7773 if (!peer->m_getdata_requests.empty()) {
7783 std::list<CNetMessage> msgs;
7786 if (pfrom->vProcessMsg.empty()) {
7790 msgs.splice(msgs.begin(), pfrom->vProcessMsg,
7791 pfrom->vProcessMsg.begin());
7795 fMoreWork = !pfrom->vProcessMsg.empty();
7801 msg.m_recv.size(), msg.m_recv.
data());
7803 if (m_opts.capture_messages) {
7811 if (!msg.m_valid_netmagic) {
7813 "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n",
7827 if (!msg.m_valid_header) {
7835 if (!msg.m_valid_checksum) {
7847 ProcessMessage(config, *pfrom, msg.
m_type, vRecv, msg.m_time,
7849 if (interruptMsgProc) {
7854 LOCK(peer->m_getdata_requests_mutex);
7855 if (!peer->m_getdata_requests.empty()) {
7864 return orphanage.HaveTxToReconsider(peer->m_id);
7868 }
catch (
const std::exception &e) {
7871 e.what(),
typeid(e).name());
7880void PeerManagerImpl::ConsiderEviction(
CNode &pto, Peer &peer,
7881 std::chrono::seconds time_in_seconds) {
7884 CNodeState &state = *State(pto.
GetId());
7888 state.fSyncStarted) {
7895 if (state.pindexBestKnownBlock !=
nullptr &&
7896 state.pindexBestKnownBlock->nChainWork >=
7898 if (state.m_chain_sync.m_timeout != 0s) {
7899 state.m_chain_sync.m_timeout = 0s;
7900 state.m_chain_sync.m_work_header =
nullptr;
7901 state.m_chain_sync.m_sent_getheaders =
false;
7903 }
else if (state.m_chain_sync.m_timeout == 0s ||
7904 (state.m_chain_sync.m_work_header !=
nullptr &&
7905 state.pindexBestKnownBlock !=
nullptr &&
7906 state.pindexBestKnownBlock->nChainWork >=
7907 state.m_chain_sync.m_work_header->nChainWork)) {
7913 state.m_chain_sync.m_work_header = m_chainman.
ActiveChain().
Tip();
7914 state.m_chain_sync.m_sent_getheaders =
false;
7915 }
else if (state.m_chain_sync.m_timeout > 0s &&
7916 time_in_seconds > state.m_chain_sync.m_timeout) {
7921 if (state.m_chain_sync.m_sent_getheaders) {
7924 "Disconnecting outbound peer %d for old chain, best known "
7927 state.pindexBestKnownBlock !=
nullptr
7928 ? state.pindexBestKnownBlock->GetBlockHash().ToString()
7932 assert(state.m_chain_sync.m_work_header);
7937 MaybeSendGetHeaders(
7938 pto,
GetLocator(state.m_chain_sync.m_work_header->pprev),
7942 "sending getheaders to outbound peer=%d to verify chain "
7943 "work (current best known block:%s, benchmark blockhash: "
7946 state.pindexBestKnownBlock !=
nullptr
7947 ? state.pindexBestKnownBlock->GetBlockHash().ToString()
7949 state.m_chain_sync.m_work_header->GetBlockHash()
7951 state.m_chain_sync.m_sent_getheaders =
true;
7958 state.m_chain_sync.m_timeout =
7965void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now) {
7974 std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0},
7975 next_youngest_peer{-1, 0};
7981 if (pnode->
GetId() > youngest_peer.first) {
7982 next_youngest_peer = youngest_peer;
7983 youngest_peer.first = pnode->GetId();
7984 youngest_peer.second = pnode->m_last_block_time;
7988 NodeId to_disconnect = youngest_peer.first;
7989 if (youngest_peer.second > next_youngest_peer.second) {
7992 to_disconnect = next_youngest_peer.first;
8004 CNodeState *node_state = State(pnode->
GetId());
8005 if (node_state ==
nullptr ||
8007 node_state->vBlocksInFlight.empty())) {
8010 "disconnecting extra block-relay-only peer=%d "
8011 "(last block received at time %d)\n",
8018 "keeping block-relay-only peer=%d chosen for eviction "
8019 "(connect time: %d, blocks_in_flight: %d)\n",
8021 node_state->vBlocksInFlight.size());
8037 int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
8048 CNodeState *state = State(pnode->
GetId());
8049 if (state ==
nullptr) {
8054 if (state->m_chain_sync.m_protect) {
8057 if (state->m_last_block_announcement < oldest_block_announcement ||
8058 (state->m_last_block_announcement == oldest_block_announcement &&
8059 pnode->
GetId() > worst_peer)) {
8060 worst_peer = pnode->
GetId();
8061 oldest_block_announcement = state->m_last_block_announcement;
8065 if (worst_peer == -1) {
8069 bool disconnected = m_connman.
ForNode(
8077 CNodeState &state = *State(pnode->
GetId());
8079 state.vBlocksInFlight.empty()) {
8081 "disconnecting extra outbound peer=%d (last block "
8082 "announcement received at time %d)\n",
8083 pnode->
GetId(), oldest_block_announcement);
8088 "keeping outbound peer=%d chosen for eviction "
8089 "(connect time: %d, blocks_in_flight: %d)\n",
8091 state.vBlocksInFlight.size());
8106void PeerManagerImpl::CheckForStaleTipAndEvictPeers() {
8109 auto now{GetTime<std::chrono::seconds>()};
8111 EvictExtraOutboundPeers(now);
8113 if (now > m_stale_tip_check_time) {
8119 LogPrintf(
"Potential stale tip detected, will try using extra "
8120 "outbound peer (last tip update: %d seconds ago)\n",
8129 if (!m_initial_sync_finished && CanDirectFetch()) {
8131 m_initial_sync_finished =
true;
8135void PeerManagerImpl::MaybeSendPing(
CNode &node_to, Peer &peer,
8136 std::chrono::microseconds now) {
8138 node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
8139 peer.m_ping_nonce_sent &&
8151 bool pingSend =
false;
8153 if (peer.m_ping_queued) {
8158 if (peer.m_ping_nonce_sent == 0 &&
8167 nonce = GetRand<uint64_t>();
8168 }
while (nonce == 0);
8169 peer.m_ping_queued =
false;
8170 peer.m_ping_start = now;
8172 peer.m_ping_nonce_sent = nonce;
8178 peer.m_ping_nonce_sent = 0;
8184void PeerManagerImpl::MaybeSendAddr(
CNode &
node, Peer &peer,
8185 std::chrono::microseconds current_time) {
8187 if (!peer.m_addr_relay_enabled) {
8191 LOCK(peer.m_addr_send_times_mutex);
8193 peer.m_next_local_addr_send < current_time) {
8200 if (peer.m_next_local_addr_send != 0us) {
8201 peer.m_addr_known->reset();
8204 CAddress local_addr{*local_service, peer.m_our_services,
8205 Now<NodeSeconds>()};
8206 PushAddress(peer, local_addr);
8213 if (current_time <= peer.m_next_addr_send) {
8217 peer.m_next_addr_send =
8220 const size_t max_addr_to_send = m_opts.max_addr_to_send;
8221 if (!
Assume(peer.m_addrs_to_send.size() <= max_addr_to_send)) {
8224 peer.m_addrs_to_send.resize(max_addr_to_send);
8229 auto addr_already_known =
8232 bool ret = peer.m_addr_known->contains(addr.
GetKey());
8234 peer.m_addr_known->insert(addr.
GetKey());
8238 peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(),
8239 peer.m_addrs_to_send.end(),
8240 addr_already_known),
8241 peer.m_addrs_to_send.end());
8244 if (peer.m_addrs_to_send.empty()) {
8248 const char *msg_type;
8250 if (peer.m_wants_addrv2) {
8259 .
Make(make_flags, msg_type, peer.m_addrs_to_send));
8260 peer.m_addrs_to_send.clear();
8263 if (peer.m_addrs_to_send.capacity() > 40) {
8264 peer.m_addrs_to_send.shrink_to_fit();
8268void PeerManagerImpl::MaybeSendSendHeaders(
CNode &
node, Peer &peer) {
8273 if (!peer.m_sent_sendheaders &&
8276 CNodeState &state = *State(
node.GetId());
8277 if (state.pindexBestKnownBlock !=
nullptr &&
8278 state.pindexBestKnownBlock->nChainWork >
8286 peer.m_sent_sendheaders =
true;
8291void PeerManagerImpl::MaybeSendFeefilter(
8292 CNode &pto, Peer &peer, std::chrono::microseconds current_time) {
8293 if (m_opts.ignore_incoming_txs) {
8317 static const Amount MAX_FILTER{m_fee_filter_rounder.round(
MAX_MONEY)};
8318 if (peer.m_fee_filter_sent == MAX_FILTER) {
8321 peer.m_next_send_feefilter = 0us;
8324 if (current_time > peer.m_next_send_feefilter) {
8325 Amount filterToSend = m_fee_filter_rounder.round(currentFilter);
8329 if (filterToSend != peer.m_fee_filter_sent) {
8333 peer.m_fee_filter_sent = filterToSend;
8335 peer.m_next_send_feefilter =
8342 peer.m_next_send_feefilter &&
8343 (currentFilter < 3 * peer.m_fee_filter_sent / 4 ||
8344 currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
8345 peer.m_next_send_feefilter =
8346 current_time + GetRandomDuration<std::chrono::microseconds>(
8352class CompareInvMempoolOrder {
8356 explicit CompareInvMempoolOrder(
CTxMemPool *_mempool) : mp(_mempool) {}
8358 bool operator()(std::set<TxId>::iterator a, std::set<TxId>::iterator b) {
8368bool PeerManagerImpl::RejectIncomingTxs(
const CNode &peer)
const {
8377 if (m_opts.ignore_incoming_txs &&
8384bool PeerManagerImpl::SetupAddressRelay(
const CNode &
node, Peer &peer) {
8388 if (
node.IsBlockOnlyConn()) {
8392 if (!peer.m_addr_relay_enabled.exchange(
true)) {
8396 peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
8402bool PeerManagerImpl::SendMessages(
const Config &config,
CNode *pto) {
8405 PeerRef peer = GetPeerRef(pto->
GetId());
8414 if (MaybeDiscourageAndDisconnect(*pto, *peer)) {
8427 const auto current_time{GetTime<std::chrono::microseconds>()};
8432 "addrfetch connection timeout; disconnecting peer=%d\n",
8438 MaybeSendPing(*pto, *peer, current_time);
8445 bool sync_blocks_and_headers_from_peer =
false;
8447 MaybeSendAddr(*pto, *peer, current_time);
8449 MaybeSendSendHeaders(*pto, *peer);
8454 CNodeState &state = *State(pto->
GetId());
8457 if (m_chainman.m_best_header ==
nullptr) {
8464 if (state.fPreferredDownload) {
8465 sync_blocks_and_headers_from_peer =
true;
8476 if (m_num_preferred_download_peers == 0 ||
8477 mapBlocksInFlight.empty()) {
8478 sync_blocks_and_headers_from_peer =
true;
8482 if (!state.fSyncStarted && CanServeBlocks(*peer) &&
8486 if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) ||
8488 const CBlockIndex *pindexStart = m_chainman.m_best_header;
8497 if (pindexStart->
pprev) {
8498 pindexStart = pindexStart->
pprev;
8500 if (MaybeSendGetHeaders(*pto,
GetLocator(pindexStart), *peer)) {
8503 "initial getheaders (%d) to peer=%d (startheight:%d)\n",
8505 peer->m_starting_height);
8507 state.fSyncStarted =
true;
8508 peer->m_headers_sync_timeout =
8513 std::chrono::microseconds{
8515 Ticks<std::chrono::seconds>(
8517 m_chainman.m_best_header->Time()) /
8534 LOCK(peer->m_block_inv_mutex);
8535 std::vector<CBlock> vHeaders;
8537 ((!peer->m_prefers_headers &&
8538 (!state.m_requested_hb_cmpctblocks ||
8539 peer->m_blocks_for_headers_relay.size() > 1)) ||
8540 peer->m_blocks_for_headers_relay.size() >
8545 ProcessBlockAvailability(pto->
GetId());
8547 if (!fRevertToInv) {
8548 bool fFoundStartingHeader =
false;
8552 for (
const BlockHash &hash : peer->m_blocks_for_headers_relay) {
8558 fRevertToInv =
true;
8561 if (pBestIndex !=
nullptr && pindex->
pprev != pBestIndex) {
8572 fRevertToInv =
true;
8575 pBestIndex = pindex;
8576 if (fFoundStartingHeader) {
8579 }
else if (PeerHasHeader(&state, pindex)) {
8582 }
else if (pindex->
pprev ==
nullptr ||
8583 PeerHasHeader(&state, pindex->
pprev)) {
8586 fFoundStartingHeader =
true;
8591 fRevertToInv =
true;
8596 if (!fRevertToInv && !vHeaders.empty()) {
8597 if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
8602 "%s sending header-and-ids %s to peer=%d\n",
8603 __func__, vHeaders.front().GetHash().ToString(),
8606 std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
8608 LOCK(m_most_recent_block_mutex);
8609 if (m_most_recent_block_hash ==
8611 cached_cmpctblock_msg =
8613 *m_most_recent_compact_block);
8616 if (cached_cmpctblock_msg.has_value()) {
8618 pto, std::move(cached_cmpctblock_msg.value()));
8622 block, *pBestIndex)};
8629 state.pindexBestHeaderSent = pBestIndex;
8630 }
else if (peer->m_prefers_headers) {
8631 if (vHeaders.size() > 1) {
8633 "%s: %u headers, range (%s, %s), to peer=%d\n",
8634 __func__, vHeaders.size(),
8635 vHeaders.front().GetHash().ToString(),
8636 vHeaders.back().GetHash().ToString(),
8640 "%s: sending header %s to peer=%d\n", __func__,
8641 vHeaders.front().GetHash().ToString(),
8646 state.pindexBestHeaderSent = pBestIndex;
8648 fRevertToInv =
true;
8655 if (!peer->m_blocks_for_headers_relay.empty()) {
8657 peer->m_blocks_for_headers_relay.back();
8668 "Announcing block %s not on main chain (tip=%s)\n",
8677 if (!PeerHasHeader(&state, pindex)) {
8678 peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
8680 "%s: sending inv peer=%d hash=%s\n", __func__,
8685 peer->m_blocks_for_headers_relay.clear();
8692 std::vector<CInv> vInv;
8693 auto addInvAndMaybeFlush = [&](uint32_t type,
const uint256 &hash) {
8694 vInv.emplace_back(type, hash);
8706 LOCK(peer->m_block_inv_mutex);
8708 vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(),
8714 for (
const BlockHash &hash : peer->m_blocks_for_inv_relay) {
8717 peer->m_blocks_for_inv_relay.clear();
8720 auto computeNextInvSendTime =
8721 [&](std::chrono::microseconds &next) ->
bool {
8724 if (next < current_time) {
8725 fSendTrickle =
true;
8727 next = NextInvToInbounds(
8732 next = current_time;
8736 return fSendTrickle;
8740 if (peer->m_proof_relay !=
nullptr) {
8741 LOCK(peer->m_proof_relay->m_proof_inventory_mutex);
8743 if (computeNextInvSendTime(
8744 peer->m_proof_relay->m_next_inv_send_time)) {
8746 peer->m_proof_relay->m_proof_inventory_to_send.begin();
8748 peer->m_proof_relay->m_proof_inventory_to_send.end()) {
8751 it = peer->m_proof_relay->m_proof_inventory_to_send.erase(
8754 if (peer->m_proof_relay->m_proof_inventory_known_filter
8755 .contains(proofid)) {
8759 peer->m_proof_relay->m_proof_inventory_known_filter.insert(
8762 peer->m_proof_relay->m_recently_announced_proofs.insert(
8768 if (
auto tx_relay = peer->GetTxRelay()) {
8769 LOCK(tx_relay->m_tx_inventory_mutex);
8771 const bool fSendTrickle =
8772 computeNextInvSendTime(tx_relay->m_next_inv_send_time);
8777 LOCK(tx_relay->m_bloom_filter_mutex);
8778 if (!tx_relay->m_relay_txs) {
8779 tx_relay->m_tx_inventory_to_send.clear();
8784 if (fSendTrickle && tx_relay->m_send_mempool) {
8785 auto vtxinfo = m_mempool.
infoAll();
8786 tx_relay->m_send_mempool =
false;
8788 tx_relay->m_fee_filter_received.load()};
8790 LOCK(tx_relay->m_bloom_filter_mutex);
8792 for (
const auto &txinfo : vtxinfo) {
8793 const TxId &txid = txinfo.tx->GetId();
8794 tx_relay->m_tx_inventory_to_send.erase(txid);
8797 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
8800 if (tx_relay->m_bloom_filter &&
8801 !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
8805 tx_relay->m_tx_inventory_known_filter.insert(txid);
8808 addInvAndMaybeFlush(
MSG_TX, txid);
8810 tx_relay->m_last_mempool_req =
8811 std::chrono::duration_cast<std::chrono::seconds>(
8818 std::vector<std::set<TxId>::iterator> vInvTx;
8819 vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
8820 for (std::set<TxId>::iterator it =
8821 tx_relay->m_tx_inventory_to_send.begin();
8822 it != tx_relay->m_tx_inventory_to_send.end(); it++) {
8823 vInvTx.push_back(it);
8826 tx_relay->m_fee_filter_received.load()};
8831 CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool);
8832 std::make_heap(vInvTx.begin(), vInvTx.end(),
8833 compareInvMempoolOrder);
8837 unsigned int nRelayedTransactions = 0;
8838 LOCK(tx_relay->m_bloom_filter_mutex);
8839 while (!vInvTx.empty() &&
8844 std::pop_heap(vInvTx.begin(), vInvTx.end(),
8845 compareInvMempoolOrder);
8846 std::set<TxId>::iterator it = vInvTx.back();
8848 const TxId txid = *it;
8850 tx_relay->m_tx_inventory_to_send.erase(it);
8852 if (tx_relay->m_tx_inventory_known_filter.contains(txid)) {
8856 auto txinfo = m_mempool.
info(txid);
8862 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
8865 if (tx_relay->m_bloom_filter &&
8866 !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
8871 tx_relay->m_recently_announced_invs.insert(txid);
8872 addInvAndMaybeFlush(
MSG_TX, txid);
8873 nRelayedTransactions++;
8876 while (!g_relay_expiration.empty() &&
8877 g_relay_expiration.front().first <
8879 mapRelay.erase(g_relay_expiration.front().second);
8880 g_relay_expiration.pop_front();
8883 auto ret = mapRelay.insert(
8884 std::make_pair(txid, std::move(txinfo.tx)));
8886 g_relay_expiration.push_back(std::make_pair(
8890 tx_relay->m_tx_inventory_known_filter.insert(txid);
8896 if (!vInv.empty()) {
8903 CNodeState &state = *State(pto->
GetId());
8906 auto stalling_timeout = m_block_stalling_timeout.load();
8907 if (state.m_stalling_since.count() &&
8908 state.m_stalling_since < current_time - stalling_timeout) {
8913 LogPrintf(
"Peer=%d is stalling block download, disconnecting\n",
8918 const auto new_timeout =
8920 if (stalling_timeout != new_timeout &&
8921 m_block_stalling_timeout.compare_exchange_strong(
8922 stalling_timeout, new_timeout)) {
8925 "Increased stalling timeout temporarily to %d seconds\n",
8937 if (state.vBlocksInFlight.size() > 0) {
8938 QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
8939 int nOtherPeersWithValidatedDownloads =
8940 m_peers_downloading_from - 1;
8942 state.m_downloading_since +
8943 std::chrono::seconds{consensusParams.nPowTargetSpacing} *
8946 nOtherPeersWithValidatedDownloads)) {
8947 LogPrintf(
"Timeout downloading block %s from peer=%d, "
8949 queuedBlock.pindex->GetBlockHash().ToString(),
8957 if (state.fSyncStarted &&
8958 peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
8961 if (current_time > peer->m_headers_sync_timeout &&
8962 nSyncStarted == 1 &&
8963 (m_num_preferred_download_peers -
8964 state.fPreferredDownload >=
8973 LogPrintf(
"Timeout downloading headers from peer=%d, "
8979 LogPrintf(
"Timeout downloading headers from noban "
8980 "peer=%d, not disconnecting\n",
8986 state.fSyncStarted =
false;
8988 peer->m_headers_sync_timeout = 0us;
8994 peer->m_headers_sync_timeout = std::chrono::microseconds::max();
9000 ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
9003 std::vector<CInv> vGetData;
9011 CNodeState &state = *State(pto->
GetId());
9013 if (CanServeBlocks(*peer) &&
9014 ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) ||
9017 std::vector<const CBlockIndex *> vToDownload;
9019 auto get_inflight_budget = [&state]() {
9022 static_cast<int>(state.vBlocksInFlight.size()));
9028 FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload,
9031 !IsLimitedPeer(*peer)) {
9032 TryDownloadingHistoricalBlocks(
9033 *peer, get_inflight_budget(), vToDownload,
9035 Assert(m_chainman.GetSnapshotBaseBlock()));
9039 BlockRequested(config, pto->
GetId(), *pindex);
9044 if (state.vBlocksInFlight.empty() && staller != -1) {
9045 if (State(staller)->m_stalling_since == 0us) {
9046 State(staller)->m_stalling_since = current_time;
9053 auto addGetDataAndMaybeFlush = [&](uint32_t type,
const uint256 &hash) {
9054 CInv inv(type, hash);
9057 vGetData.push_back(std::move(inv));
9069 LOCK(cs_proofrequest);
9070 std::vector<std::pair<NodeId, avalanche::ProofId>> expired;
9072 m_proofrequest.GetRequestable(pto->
GetId(), current_time, &expired);
9073 for (
const auto &entry : expired) {
9075 "timeout of inflight proof %s from peer=%d\n",
9076 entry.second.ToString(), entry.first);
9078 for (
const auto &proofid : requestable) {
9079 if (!AlreadyHaveProof(proofid)) {
9081 m_proofrequest.RequestedData(
9082 pto->
GetId(), proofid,
9089 m_proofrequest.ForgetInvId(proofid);
9099 std::vector<std::pair<NodeId, TxId>> expired;
9101 m_txrequest.GetRequestable(pto->
GetId(), current_time, &expired);
9102 for (
const auto &entry : expired) {
9104 entry.second.ToString(), entry.first);
9106 for (
const TxId &txid : requestable) {
9110 if (!AlreadyHaveTx(txid,
false)) {
9111 addGetDataAndMaybeFlush(
MSG_TX, txid);
9112 m_txrequest.RequestedData(
9119 m_txrequest.ForgetInvId(txid);
9123 if (!vGetData.empty()) {
9129 MaybeSendFeefilter(*pto, *peer, current_time);
9133bool PeerManagerImpl::ReceivedAvalancheProof(
CNode &
node, Peer &peer,
9135 assert(proof !=
nullptr);
9139 AddKnownProof(peer, proofid);
9151 return node.m_avalanche_pubkey.has_value());
9152 auto saveProofIfStaker = [
this, isStaker](
const CNode &
node,
9154 const NodeId nodeid) ->
bool {
9166 LOCK(cs_proofrequest);
9167 m_proofrequest.ReceivedResponse(nodeid, proofid);
9169 if (AlreadyHaveProof(proofid)) {
9170 m_proofrequest.ForgetInvId(proofid);
9171 saveProofIfStaker(
node, proofid, nodeid);
9181 return pm.registerProof(proof, state);
9183 WITH_LOCK(cs_proofrequest, m_proofrequest.ForgetInvId(proofid));
9184 RelayProof(proofid);
9186 node.m_last_proof_time = GetTime<std::chrono::seconds>();
9189 nodeid, proofid.ToString());
9211 "Not polling the avalanche proof (%s): peer=%d, proofid %s\n",
9212 state.
IsValid() ?
"not-worth-polling"
9214 nodeid, proofid.ToString());
9217 saveProofIfStaker(
node, proofid, nodeid);
9219 if (isStaker && m_opts.avalanche_staking_preconsensus) {
bool MoneyRange(const Amount nValue)
static constexpr Amount MAX_MONEY
No amount larger than this (in satoshi) is valid.
enum ReadStatus_t ReadStatus
const std::string & BlockFilterTypeName(BlockFilterType filter_type)
Get the human-readable name for a filter type.
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int CFCHECKPT_INTERVAL
Interval between compact filter checkpoints.
@ CHAIN
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends,...
@ TRANSACTIONS
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid,...
@ SCRIPTS
Scripts & signatures ok.
@ TREE
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
arith_uint256 GetBlockProof(const CBlockIndex &block)
CBlockLocator GetLocator(const CBlockIndex *index)
Get a locator for a block index entry.
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params ¶ms)
Return the time it would take to redo the work difference between from and to, assuming the current h...
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
#define Assert(val)
Identity function.
#define Assume(val)
Assume is the identity function.
Stochastic address manager.
void Connected(const CService &addr, NodeSeconds time=Now< NodeSeconds >())
We have successfully connected to this peer.
void Good(const CService &addr, bool test_before_evict=true, NodeSeconds time=Now< NodeSeconds >())
Mark an entry as accessible, possibly moving it from "new" to "tried".
bool Add(const std::vector< CAddress > &vAddr, const CNetAddr &source, std::chrono::seconds time_penalty=0s)
Attempt to add one or more addresses to addrman's new table.
void SetServices(const CService &addr, ServiceFlags nServices)
Update an entry's service bits.
void Discourage(const CNetAddr &net_addr)
bool IsBanned(const CNetAddr &net_addr)
Return whether net_addr is banned.
bool IsDiscouraged(const CNetAddr &net_addr)
Return whether net_addr is discouraged.
BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of bloc...
bool LookupFilterRange(int start_height, const CBlockIndex *stop_index, std::vector< BlockFilter > &filters_out) const
Get a range of filters between two heights on a chain.
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector< uint256 > &hashes_out) const
Get a range of filter hashes between two heights on a chain.
bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out) EXCLUSIVE_LOCKS_REQUIRED(!m_cs_headers_cache)
Get a single filter header by block.
std::vector< CTransactionRef > txn
std::vector< uint32_t > indices
A CService with information about it as peer.
ServiceFlags nServices
Serialized as uint64_t in V1, and as CompactSize in V2.
NodeSeconds nTime
Always included in serialization, except in the network format on INIT_PROTO_VERSION.
size_t BlockTxCount() const
std::vector< CTransactionRef > vtx
The block chain is a tree shaped structure starting with the genesis block at the root,...
bool IsValid(enum BlockValidity nUpTo=BlockValidity::TRANSACTIONS) const EXCLUSIVE_LOCKS_REQUIRED(
Check whether this block index entry is valid up to the passed validity level.
CBlockIndex * pprev
pointer to the index of the predecessor of this block
CBlockHeader GetBlockHeader() const
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
bool HaveTxsDownloaded() const
Check whether this block's and all previous blocks' transactions have been downloaded (and stored to ...
int64_t GetBlockTime() const
unsigned int nTx
Number of transactions in this block.
CBlockIndex * GetAncestor(int height)
Efficiently find an ancestor of this block.
BlockHash GetBlockHash() const
int nHeight
height of the entry in the chain. The genesis block has height 0
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
bool IsWithinSizeConstraints() const
True if the size is <= MAX_BLOOM_FILTER_SIZE and the number of hash functions is <= MAX_HASH_FUNCS (c...
An in-memory indexed chain of blocks.
CBlockIndex * Tip() const
Returns the index entry for the tip of this chain, or nullptr if none.
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...
int Height() const
Return the maximal height in the chain.
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system.
const CBlock & GenesisBlock() const
const Consensus::Params & GetConsensus() const
void ForEachNode(const NodeFn &func)
bool OutboundTargetReached(bool historicalBlockServingLimit) const
check if the outbound target is reached.
bool ForNode(NodeId id, std::function< bool(CNode *pnode)> func)
bool GetNetworkActive() const
bool GetTryNewOutboundPeer() const
void SetTryNewOutboundPeer(bool flag)
unsigned int GetReceiveFloodSize() const
int GetExtraBlockRelayCount() const
void WakeMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc)
void StartExtraBlockRelayPeers()
bool DisconnectNode(const std::string &node)
CSipHasher GetDeterministicRandomizer(uint64_t id) const
Get a unique deterministic randomizer.
int GetExtraFullOutboundCount() const
std::vector< CAddress > GetAddresses(size_t max_addresses, size_t max_pct, std::optional< Network > network) const
Return all or many randomly selected addresses, optionally by network.
bool CheckIncomingNonce(uint64_t nonce)
bool ShouldRunInactivityChecks(const CNode &node, std::chrono::seconds now) const
Return true if we should disconnect the peer for failing an inactivity check.
void PushMessage(CNode *pnode, CSerializedNetMsg &&msg)
bool GetUseAddrmanOutgoing() const
Double ended buffer combining vector and stream-like interfaces.
Fee rate in satoshis per kilobyte: Amount / kB.
Amount GetFeePerK() const
Return the fee in satoshis for a size of 1000 bytes.
Reads data from an underlying stream, while hashing the read data.
Inv(ventory) message data.
bool IsMsgCmpctBlk() const
std::string ToString() const
bool IsMsgStakeContender() const
bool IsMsgFilteredBlk() const
Used to create a Merkle proof (usually from a subset of transactions), which consists of a block head...
std::vector< std::pair< size_t, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
bool IsRelayable() const
Whether this address should be relayed to other peers even if we can't reach it ourselves.
bool IsAddrV1Compatible() const
Check if the current object can be serialized in pre-ADDRv2/BIP155 format.
Transport protocol agnostic message container.
CSerializedNetMsg Make(int nFlags, std::string msg_type, Args &&...args) const
Information about a peer.
RecursiveMutex cs_vProcessMsg
Mutex cs_avalanche_pubkey
bool IsFeelerConn() const
const std::chrono::seconds m_connected
Unix epoch time at peer connection.
bool ExpectServicesFromConn() const
std::atomic< int > nVersion
std::atomic_bool m_has_all_wanted_services
Whether this peer provides all services that we want.
bool IsInboundConn() const
bool HasPermission(NetPermissionFlags permission) const
std::atomic_bool fPauseRecv
bool IsOutboundOrBlockRelayConn() const
bool IsManualConn() const
std::atomic< int64_t > nTimeOffset
const std::string m_addr_name
std::string ConnectionTypeAsString() const
void SetCommonVersion(int greatest_common_version)
std::atomic< bool > m_bip152_highbandwidth_to
std::atomic_bool m_relays_txs
Whether we should relay transactions to this peer.
std::atomic< bool > m_bip152_highbandwidth_from
void PongReceived(std::chrono::microseconds ping_time)
A ping-pong round trip has completed successfully.
std::atomic_bool fSuccessfullyConnected
bool IsAddrFetchConn() const
uint64_t GetLocalNonce() const
void SetAddrLocal(const CService &addrLocalIn) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_local_mutex)
May not be called more than once.
bool IsBlockOnlyConn() const
int GetCommonVersion() const
bool IsFullOutboundConn() const
uint64_t nRemoteHostNonce
Mutex m_subver_mutex
cleanSubVer is a sanitized string of the user agent byte array we read from the wire.
std::atomic_bool fPauseSend
std::chrono::seconds m_nextGetAvaAddr
uint64_t nRemoteExtraEntropy
uint64_t GetLocalExtraEntropy() const
SteadyMilliseconds m_last_poll
double getAvailabilityScore() const
std::atomic_bool m_bloom_filter_loaded
Whether this peer has loaded a bloom filter.
void updateAvailabilityScore(double decayFactor)
The availability score is calculated using an exponentially weighted average.
std::atomic< std::chrono::seconds > m_avalanche_last_message_fault
const bool m_inbound_onion
Whether this peer is an inbound onion, i.e.
std::atomic< int > m_avalanche_message_fault_counter
std::atomic< bool > m_avalanche_enabled
std::atomic< std::chrono::seconds > m_last_block_time
UNIX epoch time of the last block received from this peer that we had not yet seen (e....
std::atomic_bool fDisconnect
std::atomic< std::chrono::seconds > m_last_tx_time
UNIX epoch time of the last transaction received from this peer that we had not yet seen (e....
void invsVoted(uint32_t count)
The node voted for count invs.
bool IsAvalancheOutboundConnection() const
An encapsulated public key.
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set.
Simple class for background tasks that should be run periodically or once "after a while".
void scheduleEvery(Predicate p, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Repeat p until it return false.
void scheduleFromNow(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Call f once after the delta has passed.
A combination of a network address (CNetAddr) and a (TCP) port.
std::string ToString() const
std::vector< uint8_t > GetKey() const
uint64_t Finalize() const
Compute the 64-bit SipHash-2-4 of the data written so far.
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data.
std::set< std::reference_wrapper< const CTxMemPoolEntryRef >, CompareIteratorById > Parents
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
void removeConflicts(const CTransaction &tx) EXCLUSIVE_LOCKS_REQUIRED(cs)
void RemoveUnbroadcastTx(const TxId &txid, const bool unchecked=false)
Removes a transaction from the unbroadcast set.
CFeeRate GetMinFee() const
The minimum fee to get into the mempool, which may itself not be enough for larger-sized transactions...
RecursiveMutex cs
This mutex needs to be locked when accessing mapTx or other members that are guarded by it.
void removeRecursive(const CTransaction &tx, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs)
bool CompareTopologically(const TxId &txida, const TxId &txidb) const
TxMempoolInfo info(const TxId &txid) const
size_t DynamicMemoryUsage() const
std::vector< TxMempoolInfo > infoAll() const
bool setAvalancheFinalized(const CTxMemPoolEntryRef &tx, std::vector< TxId > &finalizedTxIds) EXCLUSIVE_LOCKS_REQUIRED(cs)
CTransactionRef GetConflictTx(const COutPoint &prevout) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Get the transaction in the pool that spends the same prevout.
bool exists(const TxId &txid) const
std::set< TxId > GetUnbroadcastTxs() const
Returns transactions in unbroadcast set.
auto withOrphanage(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_orphanage)
const CFeeRate m_min_relay_feerate
auto withConflicting(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_conflicting)
void removeForFinalizedBlock(const std::vector< CTransactionRef > &vtx) EXCLUSIVE_LOCKS_REQUIRED(cs)
unsigned long size() const
std::optional< txiter > GetIter(const TxId &txid) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Returns an iterator to the given txid, if found.
virtual void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr< const CBlock > &block)
Notifies listeners that a block which builds directly on our current tip has been received and connec...
virtual void BlockChecked(const CBlock &, const BlockValidationState &)
Notifies listeners of a block validation result.
virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
Notifies listeners when the block chain tip advances.
virtual void BlockConnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being connected.
virtual void BlockDisconnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being disconnected.
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
SnapshotCompletionResult MaybeCompleteSnapshotValidation(std::function< void(bilingual_str)> shutdown_fnc=[](bilingual_str msg) { AbortNode(msg.original, msg);}) EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex *GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(Chainstate ActiveChainstate)() const
Once the background validation chainstate has reached the height which is the base of the UTXO snapsh...
const CBlockIndex * GetBackgroundSyncTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The tip of the background sync chain.
MempoolAcceptResult ProcessTransaction(const CTransactionRef &tx, bool test_accept=false) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Try to add a transaction to the memory pool.
bool IsInitialBlockDownload() const
Check whether we are doing an initial block download (synchronizing from disk or network)
bool ProcessNewBlock(const std::shared_ptr< const CBlock > &block, bool force_processing, bool min_pow_checked, bool *new_block, avalanche::Processor *const avalanche=nullptr) LOCKS_EXCLUDED(cs_main)
Process an incoming block.
CBlockIndex * ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
bool BackgroundSyncInProgress() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The state of a background sync (for net processing)
bool ProcessNewBlockHeaders(const std::vector< CBlockHeader > &block, bool min_pow_checked, BlockValidationState &state, const CBlockIndex **ppindex=nullptr, const std::optional< CCheckpointData > &test_checkpoints=std::nullopt) LOCKS_EXCLUDED(cs_main)
Process incoming block headers.
const arith_uint256 & MinimumChainWork() const
CChain & ActiveChain() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
void MaybeRebalanceCaches() EXCLUSIVE_LOCKS_REQUIRED(void ReportHeadersPresync(const arith_uint256 &work, int64_t height, int64_t timestamp)
Check to see if caches are out of balance and if so, call ResizeCoinsCaches() as needed.
node::BlockManager m_blockman
A single BlockManager instance is shared across each constructed chainstate to avoid duplicating bloc...
virtual uint64_t GetMaxBlockSize() const =0
A writer stream (for serialization) that computes a 256-bit hash.
size_t Count(NodeId peer) const
Count how many announcements a peer has (REQUESTED, CANDIDATE, and COMPLETED combined).
size_t CountInFlight(NodeId peer) const
Count how many REQUESTED announcements a peer has.
Interface for message handling.
static Mutex g_msgproc_mutex
Mutex for anything that is only accessed via the msg processing thread.
virtual bool ProcessMessages(const Config &config, CNode *pnode, std::atomic< bool > &interrupt) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process protocol messages received from a given node.
virtual bool SendMessages(const Config &config, CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Send queued protocol messages to a given node.
virtual void InitializeNode(const Config &config, CNode &node, ServiceFlags our_services)=0
Initialize a peer (setup state, queue any initial messages)
virtual void FinalizeNode(const Config &config, const CNode &node)=0
Handle removal of a peer (clear state)
static bool HasFlag(NetPermissionFlags flags, NetPermissionFlags f)
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< std::pair< TxHash, CTransactionRef > > &extra_txn)
bool IsTxAvailable(size_t index) const
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
virtual std::optional< std::string > FetchBlock(const Config &config, NodeId peer_id, const CBlockIndex &block_index)=0
Attempt to manually fetch block from a given peer.
virtual void SendPings()=0
Send ping message to all peers.
static std::unique_ptr< PeerManager > make(CConnman &connman, AddrMan &addrman, BanMan *banman, ChainstateManager &chainman, CTxMemPool &pool, avalanche::Processor *const avalanche, Options opts)
virtual void ProcessMessage(const Config &config, CNode &pfrom, const std::string &msg_type, CDataStream &vRecv, const std::chrono::microseconds time_received, const std::atomic< bool > &interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process a single message from a peer.
virtual void StartScheduledTasks(CScheduler &scheduler)=0
Begin running background tasks, should only be called once.
virtual bool IgnoresIncomingTxs()=0
Whether this node ignores txs received over p2p.
virtual void UnitTestMisbehaving(const NodeId peer_id, const int howmuch)=0
Public for unit testing.
virtual bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) const =0
Get statistics from node state.
virtual void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)=0
This function is used for testing the stale tip eviction logic, see denialofservice_tests....
virtual void CheckForStaleTipAndEvictPeers()=0
Evict extra outbound peers.
static RCUPtr make(Args &&...args)
Construct a new object that is owned by the pointer.
int EraseTx(const TxId &txid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase a tx by txid.
void EraseForPeer(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase all txs announced by a peer (eg, after that peer disconnects)
std::vector< CTransactionRef > GetChildrenFromSamePeer(const CTransactionRef &parent, NodeId nodeid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Get all children that spend from this tx and were received from nodeid.
bool AddTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add a new transaction to the pool.
unsigned int LimitTxs(unsigned int max_txs, FastRandomContext &rng) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Limit the txs to the given maximum.
void EraseForBlock(const CBlock &block) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase all txs included in or invalidated by a new block.
std::vector< CTransactionRef > GetConflictTxs(const CTransactionRef &tx) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
void AddChildrenToWorkSet(const CTransaction &tx) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add any tx that list a particular tx as a parent into the from peer's work set.
bool HaveTx(const TxId &txid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Check if we already have an the transaction.
std::vector< std::pair< CTransactionRef, NodeId > > GetChildrenFromDifferentPeer(const CTransactionRef &parent, NodeId nodeid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Get all children that spend from this tx but were not received from nodeid.
std::string GetRejectReason() const
std::string ToString() const
256-bit unsigned big integer.
const std::vector< PrefilledProof > & getPrefilledProofs() const
uint64_t getShortID(const ProofId &proofid) const
const std::vector< uint64_t > & getShortIDs() const
ProofId getProofId() const
bool verify(DelegationState &state, CPubKey &auth) const
const DelegationId & getId() const
const LimitedProofId & getLimitedProofId() const
bool shouldRequestMoreNodes()
Returns true if we encountered a lack of node since the last call.
bool exists(const ProofId &proofid) const
Return true if the (valid) proof exists, but only for non-dangling proofs.
bool forPeer(const ProofId &proofid, Callable &&func) const
bool addNode(NodeId nodeid, const ProofId &proofid)
Node API.
void removeUnbroadcastProof(const ProofId &proofid)
const ProofRadixTree & getShareableProofsSnapshot() const
bool isBoundToPeer(const ProofId &proofid) const
bool saveRemoteProof(const ProofId &proofid, const NodeId nodeid, const bool present)
void forEachPeer(Callable &&func) const
void setInvalid(const ProofId &proofid)
bool isInvalid(const ProofId &proofid) const
bool isImmature(const ProofId &proofid) const
auto getUnbroadcastProofs() const
bool isInConflictingPool(const ProofId &proofid) const
void sendResponse(CNode *pfrom, Response response) const
void finalizeStakeContender(const StakeContenderId &contenderId) EXCLUSIVE_LOCKS_REQUIRED(cs_main
bool addToReconcile(const AnyVoteItem &item) EXCLUSIVE_LOCKS_REQUIRED(!cs_finalizedItems)
void acceptStakeContender(const StakeContenderId &contenderId) EXCLUSIVE_LOCKS_REQUIRED(!cs_stakeContenderCache)
int64_t getAvaproofsNodeCounter() const
bool registerVotes(NodeId nodeid, const Response &response, std::vector< VoteItemUpdate > &updates, int &banscore, std::string &error) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
bool sendHello(CNode *pfrom) EXCLUSIVE_LOCKS_REQUIRED(!cs_delayedAvahelloNodeIds)
Send a avahello message.
void setRecentlyFinalized(const uint256 &itemId) EXCLUSIVE_LOCKS_REQUIRED(!cs_finalizedItems)
bool isQuorumEstablished() LOCKS_EXCLUDED(cs_main) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
const bool m_preConsensus
ProofRef getLocalProof() const
void addStakeContender(const ProofRef &proof) EXCLUSIVE_LOCKS_REQUIRED(cs_main
Track votes on stake contenders.
bool reconcileOrFinalize(const ProofRef &proof) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Wrapper around the addToReconcile for proofs that adds back the finalization flag to the peer if it i...
void sendDelayedAvahello() EXCLUSIVE_LOCKS_REQUIRED(!cs_delayedAvahelloNodeIds)
void rejectStakeContender(const StakeContenderId &contenderId) EXCLUSIVE_LOCKS_REQUIRED(!cs_stakeContenderCache)
auto withPeerManager(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
int getStakeContenderStatus(const StakeContenderId &contenderId) const EXCLUSIVE_LOCKS_REQUIRED(!cs_stakeContenderCache
void avaproofsSent(NodeId nodeid) LOCKS_EXCLUDED(cs_main) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
std::vector< uint32_t > indices
std::string ToString() const
std::string GetHex() const
bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos) const
Functions for disk access for blocks.
CBlockIndex * LookupBlockIndex(const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
bool LoadingBlocks() const
bool IsPruneMode() const
Whether running in -prune mode.
static const uint256 ZERO
@ BLOCK_CHECKPOINT
the block failed to meet one of our checkpoints
@ BLOCK_HEADER_LOW_WORK
the block header may be on a too-little-work chain
@ BLOCK_INVALID_HEADER
invalid proof of work or time too old
@ BLOCK_CACHED_INVALID
this block was cached as being invalid and we didn't store the reason why
@ BLOCK_CONSENSUS
invalid by consensus rules (excluding any below reasons)
@ BLOCK_MISSING_PREV
We don't have the previous block the checked one is built on.
@ BLOCK_INVALID_PREV
A block this one builds on is invalid.
@ BLOCK_MUTATED
the block's data didn't match the data committed to by the PoW
@ BLOCK_TIME_FUTURE
block timestamp was > 2 hours in the future (or our clock is bad)
@ BLOCK_RESULT_UNSET
initial value. Block has not yet been rejected
@ TX_MISSING_INPUTS
transaction was missing some of its inputs
@ TX_CHILD_BEFORE_PARENT
This tx outputs are already spent in the mempool.
@ TX_MEMPOOL_POLICY
violated mempool's fee/size/descendant/etc limits
@ TX_PACKAGE_RECONSIDERABLE
fails some policy, but might be acceptable if submitted in a (different) package
@ TX_UNKNOWN
transaction was not validated because package failed
@ TX_PREMATURE_SPEND
transaction spends a coinbase too early, or violates locktime/sequence locks
@ TX_DUPLICATE
Tx already in mempool or in the chain.
@ TX_INPUTS_NOT_STANDARD
inputs failed policy rules
@ TX_CONFLICT
Tx conflicts with a finalized tx, i.e.
@ TX_NOT_STANDARD
otherwise didn't meet our local policy rules
@ TX_AVALANCHE_RECONSIDERABLE
fails some policy, but might be reconsidered by avalanche voting
@ TX_NO_MEMPOOL
this node does not have a mempool so can't validate the transaction
@ TX_RESULT_UNSET
initial value. Tx has not yet been rejected
@ TX_CONSENSUS
invalid by consensus rules
static size_t RecursiveDynamicUsage(const CScript &script)
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
std::array< uint8_t, CPubKey::SCHNORR_SIZE > SchnorrSig
a Schnorr signature
bool error(const char *fmt, const Args &...args)
#define LogPrint(category,...)
#define LogPrintfCategory(category,...)
const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
const char * AVAPROOFSREQ
Request for missing avalanche proofs after an avaproofs message has been processed.
const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
const char * BLOCK
The block message transmits a single serialized block.
const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter.
const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
const char * ADDRV2
The addrv2 message relays connection information for peers on the network just like the addr message,...
const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
const char * AVAPROOFS
The avaproofs message the proof short ids of all the valid proofs that we know.
const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
const char * GETAVAPROOFS
The getavaproofs message requests an avaproofs message that provides the proof short ids of all the v...
const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
const char * GETAVAADDR
The getavaaddr message requests an addr message from the receiving node, containing IP addresses of t...
const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids".
const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
const char * TX
The tx message transmits a single transaction.
const char * AVAHELLO
Contains a delegation and a signature.
const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
const char * AVARESPONSE
Contains an avalanche::Response.
const char * GETDATA
The getdata message requests one or more data objects from another node.
const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
const char * BLOCKTXN
Contains a BlockTransactions.
const char * GETCFHEADERS
getcfheaders requests a compact filter header and the filter hashes for a range of blocks,...
const char * SENDADDRV2
The sendaddrv2 message signals support for receiving ADDRV2 messages (BIP155).
const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected.
const char * AVAPOLL
Contains an avalanche::Poll.
const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
const char * AVAPROOF
Contains an avalanche::Proof.
const char * CFCHECKPT
cfcheckpt is a response to a getcfcheckpt request containing a vector of evenly spaced filter headers...
const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
ShortIdProcessor< PrefilledProof, ShortIdProcessorPrefilledProofAdapter, ProofRefCompare > ProofShortIdProcessor
std::variant< const ProofRef, const CBlockIndex *, const StakeContenderId, const CTransactionRef > AnyVoteItem
RCUPtr< const Proof > ProofRef
Implement std::hash so RCUPtr can be used as a key for maps or sets.
std::optional< CService > GetLocalAddrForPeer(CNode &node)
Returns a local address that we should advertise to this peer.
std::function< void(const CAddress &addr, const std::string &msg_type, Span< const uint8_t > data, bool is_incoming)> CaptureMessage
Defaults to CaptureMessageToFile(), but can be overridden by unit tests.
std::string userAgent(const Config &config)
bool IsReachable(enum Network net)
bool SeenLocal(const CService &addr)
vote for a local address
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of the user agent string in version message.
static constexpr std::chrono::minutes TIMEOUT_INTERVAL
Time after which to disconnect, after waiting for a ping response (or inactivity).
@ BypassProofRequestLimits
static constexpr auto HEADERS_RESPONSE_TIME
How long to wait for a peer to respond to a getheaders request.
static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET
The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND based inc...
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT
Default time during which a peer must stall block download progress before being disconnected.
static constexpr auto GETAVAADDR_INTERVAL
Minimum time between 2 successives getavaaddr messages from the same peer.
static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL
Verify that INVENTORY_MAX_RECENT_RELAY is enough to cache everything typically relayed before uncondi...
static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB
Maximum number of inventory items to send per transmission.
static constexpr auto EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect.
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch?...
static uint32_t getAvalancheVoteForProof(const avalanche::Processor &avalanche, const avalanche::ProofId &id)
Decide a response for an Avalanche poll about the given proof.
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static constexpr int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay.
static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL
Delay between rotating the peers we relay a particular address to.
static const int MAX_NUM_UNCONNECTING_HEADERS_MSGS
Maximum number of unconnecting headers announcements before DoS score.
static constexpr auto MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict.
static constexpr auto CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork.
static constexpr auto RELAY_TX_CACHE_TIME
How long to cache transactions in mapRelay for normal relay.
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts.
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for.
static constexpr uint64_t CMPCTBLOCKS_VERSION
The compactblocks version we support.
bool IsAvalancheMessageType(const std::string &msg_type)
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/behind headers chain.
static std::chrono::microseconds ComputeRequestTime(const CNode &node, const InvRequestTracker< InvId > &requestTracker, const DataRequestParameters &requestParams, std::chrono::microseconds current_time, bool preferred)
Compute the request time for this announcement, current time plus delays for:
static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for inbound peers.
static constexpr DataRequestParameters TX_REQUEST_PARAMS
static constexpr auto MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
static constexpr uint32_t MAX_GETCFILTERS_SIZE
Maximum number of compact filters that may be requested with one getcfilters.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout.
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in multiples of the block interval (i.e.
static constexpr auto AVALANCHE_AVAPROOFS_TIMEOUT
If no proof was requested from a compact proof message after this timeout expired,...
static constexpr auto STALE_CHECK_INTERVAL
How frequently to check for stale tips.
static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY
The number of most recently announced transactions a peer can request.
static constexpr auto UNCONDITIONAL_RELAY_DELAY
How long a transaction has to be in the mempool before it can unconditionally be relayed (even when n...
static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts.
static const unsigned int MAX_LOCATOR_SZ
The maximum number of entries in a locator.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
static constexpr double MAX_ADDR_RATE_PER_SECOND
The maximum rate of address records we're willing to process on average.
static constexpr auto PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive.
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we're willing to serve as compact blocks to peers when requested.
static constexpr DataRequestParameters PROOF_REQUEST_PARAMS
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
static bool TooManyAnnouncements(const CNode &node, const InvRequestTracker< InvId > &requestTracker, const DataRequestParameters &requestParams)
static constexpr uint32_t MAX_GETCFHEADERS_SIZE
Maximum number of cf hashes that may be requested with one getcfheaders.
static constexpr auto BLOCK_STALLING_TIMEOUT_MAX
Maximum timeout for stalling block download.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
static constexpr size_t MAX_PCT_ADDR_TO_SEND
the maximum percentage of addresses from our addrman to return in response to a getaddr message.
static const unsigned int MAX_INV_SZ
The maximum number of entries in an 'inv' protocol message.
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND
Maximum rate of inventory items to send per second.
static constexpr size_t MAX_ADDR_TO_SEND
The maximum number of address records permitted in an ADDR message.
static const unsigned int MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK
Maximum number of outstanding CMPCTBLOCK requests for the same block.
static const int DISCOURAGEMENT_THRESHOLD
Threshold for marking a node to be discouraged, e.g.
static const unsigned int MAX_HEADERS_RESULTS
Number of headers sent in one getheaders result.
static constexpr int ADDRV2_FORMAT
A flag that is ORed into the protocol version to designate that addresses should be serialized in (un...
bool IsProxy(const CNetAddr &addr)
static constexpr NodeId NO_NODE
Special NodeId that represent no node.
uint256 GetPackageHash(const Package &package)
std::vector< CTransactionRef > Package
A package is an ordered list of transactions.
static constexpr Amount DEFAULT_MIN_RELAY_TX_FEE_PER_KB(1000 *SATOSHI)
Default for -minrelaytxfee, minimum relay fee for transactions.
std::shared_ptr< const CTransaction > CTransactionRef
static constexpr size_t AVALANCHE_MAX_ELEMENT_POLL
Maximum item that can be polled at once.
void SetServiceFlagsIBDCache(bool state)
Set the current IBD status in order to figure out the desirable service flags.
ServiceFlags GetDesirableServiceFlags(ServiceFlags services)
Gets the set of service flags which are "desirable" for a given peer.
static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH
Maximum length of incoming protocol messages (Currently 2MB).
static bool HasAllDesirableServiceFlags(ServiceFlags services)
A shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services),...
@ MSG_AVA_STAKE_CONTENDER
@ MSG_CMPCT_BLOCK
Defined in BIP152.
ServiceFlags
nServices flags.
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB.
std::chrono::microseconds GetExponentialRand(std::chrono::microseconds now, std::chrono::seconds average_interval)
Return a timestamp in the future sampled from an exponential distribution (https://en....
constexpr auto GetRandMillis
void Shuffle(I first, I last, R &&rng)
More efficient than using std::shuffle on a FastRandomContext.
reverse_range< T > reverse_iterate(T &x)
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
void Unserialize(Stream &, char)=delete
#define LIMITED_STRING(obj, n)
uint64_t ReadCompactSize(Stream &is, bool range_check=true)
Decode a CompactSize-encoded variable-length integer.
constexpr auto MakeUCharSpan(V &&v) -> decltype(UCharSpanCast(Span{std::forward< V >(v)}))
Like the Span constructor, but for (const) uint8_t member types only.
static const double AVALANCHE_STATISTICS_DECAY_FACTOR
Pre-computed decay factor for the avalanche statistics computation.
static constexpr std::chrono::minutes AVALANCHE_STATISTICS_REFRESH_PERIOD
Refresh period for the avalanche statistics computation.
std::string ToString(const T &t)
Locale-independent version of std::to_string.
static constexpr Amount zero() noexcept
A BlockHash is a unqiue identifier for a block.
Describes a place in the block chain to another node such that if the other node doesn't have the sam...
std::vector< BlockHash > vHave
std::chrono::microseconds m_ping_wait
Amount m_fee_filter_received
std::vector< int > vHeightInFlight
bool m_addr_relay_enabled
uint64_t m_addr_rate_limited
uint64_t m_addr_processed
ServiceFlags their_services
std::vector< uint8_t > data
Parameters that influence chain consensus.
int64_t nPowTargetSpacing
std::chrono::seconds PowTargetSpacing() const
const std::chrono::seconds overloaded_peer_delay
How long to delay requesting data from overloaded peers (see max_peer_request_in_flight).
const size_t max_peer_announcements
Maximum number of inventories to consider for requesting, per peer.
const std::chrono::seconds nonpref_peer_delay
How long to delay requesting data from non-preferred peers.
const NetPermissionFlags bypass_request_limits_permissions
Permission flags a peer requires to bypass the request limits tracking limits and delay penalty.
const std::chrono::microseconds getdata_interval
How long to wait (in microseconds) before a data request from an additional peer.
const size_t max_peer_request_in_flight
Maximum number of in-flight data requests from a peer.
Validation result for a transaction evaluated by MemPoolAccept (single or package).
const ResultType m_result_type
Result type.
const TxValidationState m_state
Contains information about why the transaction failed.
@ MEMPOOL_ENTRY
Valid, transaction was already in the mempool.
@ VALID
Fully validated, valid.
static time_point now() noexcept
Return current system time or mocked time, if set.
std::chrono::time_point< NodeClock > time_point
Validation result for package mempool acceptance.
PackageValidationState m_state
std::map< TxId, MempoolAcceptResult > m_tx_results
Map from txid to finished MempoolAcceptResults.
This is a radix tree storing values identified by a unique key.
A TxId is the identifier of a transaction.
std::chrono::seconds registration_time
const ProofId & getProofId() const
StakeContenderIds are unique for each block to ensure that the peer polling for their acceptance has ...
#define AssertLockNotHeld(cs)
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
#define EXCLUSIVE_LOCKS_REQUIRED(...)
#define LOCKS_EXCLUDED(...)
#define NO_THREAD_SAFETY_ANALYSIS
int64_t GetTime()
DEPRECATED Use either ClockType::now() or Now<TimePointType>() if a cast is needed.
constexpr int64_t count_microseconds(std::chrono::microseconds t)
constexpr int64_t count_seconds(std::chrono::seconds t)
std::chrono::time_point< NodeClock, std::chrono::seconds > NodeSeconds
double CountSecondsDouble(SecondsDouble t)
Helper to count the seconds in any std::chrono::duration type.
NodeClock::time_point GetAdjustedTime()
void AddTimeData(const CNetAddr &ip, int64_t nOffsetSample)
#define TRACE6(context, event, a, b, c, d, e, f)
@ AVALANCHE
Removed by avalanche vote.
std::string SanitizeString(std::string_view str, int rule)
Remove unsafe chars.
arith_uint256 CalculateHeadersWork(const std::vector< CBlockHeader > &headers)
Return the sum of the work on a given set of headers.
bool HasValidProofOfWork(const std::vector< CBlockHeader > &headers, const Consensus::Params &consensusParams)
Check with the proof of work on each blockheader matches the value in nBits.
PackageMempoolAcceptResult ProcessNewPackage(Chainstate &active_chainstate, CTxMemPool &pool, const Package &package, bool test_accept)
Validate (and maybe submit) a package to the mempool.
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ActiveChain().Tip() will not be pr...
CMainSignals & GetMainSignals()
static const int INIT_PROTO_VERSION
initial proto version, to be increased after version/verack negotiation
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version
static const int PROTOCOL_VERSION
network protocol versioning
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
static const int INVALID_CB_NO_BAN_VERSION
not banning for invalid compact blocks starts with this version
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.