110 "Max protocol message length must be greater than largest "
111 "possible INV message");
165 std::chrono::seconds(2),
166 std::chrono::seconds(2),
167 std::chrono::seconds(60),
174 std::chrono::seconds(2),
175 std::chrono::seconds(2),
176 std::chrono::seconds(60),
275 std::chrono::seconds{1},
276 "INVENTORY_RELAY_MAX too low");
333 std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
371 std::atomic<ServiceFlags> m_their_services{
NODE_NONE};
374 Mutex m_misbehavior_mutex;
376 int m_misbehavior_score
GUARDED_BY(m_misbehavior_mutex){0};
379 bool m_should_discourage
GUARDED_BY(m_misbehavior_mutex){
false};
382 Mutex m_block_inv_mutex;
388 std::vector<BlockHash> m_blocks_for_inv_relay
GUARDED_BY(m_block_inv_mutex);
394 std::vector<BlockHash>
395 m_blocks_for_headers_relay
GUARDED_BY(m_block_inv_mutex);
406 std::atomic<int> m_starting_height{-1};
409 std::atomic<uint64_t> m_ping_nonce_sent{0};
411 std::atomic<std::chrono::microseconds> m_ping_start{0us};
413 std::atomic<bool> m_ping_queued{
false};
423 std::chrono::microseconds m_next_send_feefilter
436 bool m_relay_txs
GUARDED_BY(m_bloom_filter_mutex){
false};
441 std::unique_ptr<CBloomFilter>
457 GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
463 std::set<TxId> m_tx_inventory_to_send
GUARDED_BY(m_tx_inventory_mutex);
469 bool m_send_mempool
GUARDED_BY(m_tx_inventory_mutex){
false};
471 std::atomic<std::chrono::seconds> m_last_mempool_req{0s};
476 std::chrono::microseconds m_next_inv_send_time
483 std::atomic<Amount> m_fee_filter_received{
Amount::zero()};
491 LOCK(m_tx_relay_mutex);
493 m_tx_relay = std::make_unique<Peer::TxRelay>();
494 return m_tx_relay.get();
498 return WITH_LOCK(m_tx_relay_mutex,
return m_tx_relay.get());
500 const TxRelay *GetTxRelay() const
502 return WITH_LOCK(m_tx_relay_mutex,
return m_tx_relay.get());
507 std::set<avalanche::ProofId>
508 m_proof_inventory_to_send
GUARDED_BY(m_proof_inventory_mutex);
511 GUARDED_BY(m_proof_inventory_mutex){10000, 0.000001};
518 std::chrono::microseconds m_next_inv_send_time{0};
522 std::atomic<std::chrono::seconds> lastSharedProofsUpdate{0s};
523 std::atomic<bool> compactproofs_requested{
false};
530 const std::unique_ptr<ProofRelay> m_proof_relay;
535 std::vector<CAddress>
547 std::unique_ptr<CRollingBloomFilter>
565 std::atomic_bool m_addr_relay_enabled{
false};
569 mutable Mutex m_addr_send_times_mutex;
571 std::chrono::microseconds
572 m_next_addr_send
GUARDED_BY(m_addr_send_times_mutex){0};
574 std::chrono::microseconds
575 m_next_local_addr_send
GUARDED_BY(m_addr_send_times_mutex){0};
580 std::atomic_bool m_wants_addrv2{
false};
584 mutable Mutex m_addr_token_bucket_mutex;
589 double m_addr_token_bucket
GUARDED_BY(m_addr_token_bucket_mutex){1.0};
591 std::chrono::microseconds
593 GetTime<std::chrono::microseconds>()};
595 std::atomic<uint64_t> m_addr_rate_limited{0};
600 std::atomic<uint64_t> m_addr_processed{0};
612 bool m_inv_triggered_getheaders_before_sync
616 Mutex m_getdata_requests_mutex;
618 std::deque<CInv> m_getdata_requests
GUARDED_BY(m_getdata_requests_mutex);
625 Mutex m_headers_sync_mutex;
630 std::unique_ptr<HeadersSyncState>
635 std::atomic<bool> m_sent_sendheaders{
false};
638 int m_num_unconnecting_headers_msgs
642 std::chrono::microseconds m_headers_sync_timeout
653 : m_id(id), m_our_services{our_services},
655 ?
std::make_unique<ProofRelay>()
659 mutable Mutex m_tx_relay_mutex;
666 std::unique_ptr<TxRelay> m_tx_relay
GUARDED_BY(m_tx_relay_mutex);
669 using PeerRef = std::shared_ptr<Peer>;
687 bool fSyncStarted{
false};
690 std::chrono::microseconds m_stalling_since{0us};
691 std::list<QueuedBlock> vBlocksInFlight;
694 std::chrono::microseconds m_downloading_since{0us};
695 int nBlocksInFlight{0};
697 bool fPreferredDownload{
false};
702 bool m_requested_hb_cmpctblocks{
false};
704 bool m_provides_cmpctblocks{
false};
732 struct ChainSyncTimeoutState {
735 std::chrono::seconds m_timeout{0s};
739 bool m_sent_getheaders{
false};
742 bool m_protect{
false};
745 ChainSyncTimeoutState m_chain_sync;
748 int64_t m_last_block_announcement{0};
751 const bool m_is_inbound;
753 CNodeState(
bool is_inbound) : m_is_inbound(is_inbound) {}
760 bool ignore_incoming_txs);
771 bool fInitialDownload)
override
777 const std::shared_ptr<const CBlock> &pblock)
override
786 !m_headers_presync_mutex);
788 std::atomic<bool> &interrupt)
override
790 !m_recent_confirmed_transactions_mutex,
791 !m_most_recent_block_mutex, !cs_proofrequest,
792 !m_headers_presync_mutex, g_msgproc_mutex);
795 !m_recent_confirmed_transactions_mutex,
796 !m_most_recent_block_mutex, !cs_proofrequest,
802 std::optional<std::string>
809 void RelayTransaction(const
TxId &txid) override
811 void RelayProof(const
avalanche::ProofId &proofid) override
813 void SetBestHeight(
int height)
override { m_best_height = height; };
816 Misbehaving(*
Assert(GetPeerRef(peer_id)), howmuch,
"");
820 const std::chrono::microseconds time_received,
821 const std::atomic<bool> &interruptMsgProc)
override
823 !m_recent_confirmed_transactions_mutex,
824 !m_most_recent_block_mutex, !cs_proofrequest,
825 !m_headers_presync_mutex, g_msgproc_mutex);
827 int64_t time_in_seconds)
override;
834 void ConsiderEviction(
CNode &pto, Peer &peer,
835 std::chrono::seconds time_in_seconds)
842 void EvictExtraOutboundPeers(std::chrono::seconds now)
849 void ReattemptInitialBroadcast(
CScheduler &scheduler)
855 void UpdateAvalancheStatistics()
const;
860 void AvalanchePeriodicNetworking(
CScheduler &scheduler)
const;
879 void Misbehaving(Peer &peer,
int howmuch,
const std::string &message);
893 bool MaybePunishNodeForBlock(
NodeId nodeid,
895 bool via_compact_block,
896 const std::string &message =
"")
906 const
std::
string &message = "")
918 bool MaybeDiscourageAndDisconnect(
CNode &pnode, Peer &peer);
920 void ProcessOrphanTx(const
Config &config,
std::set<
TxId> &orphan_work_set)
933 void ProcessHeadersMessage(const
Config &config,
CNode &pfrom, Peer &peer,
935 bool via_compact_block)
954 void HandleFewUnconnectingHeaders(
CNode &pfrom, Peer &peer,
959 CheckHeadersAreContinuous(const
std::vector<
CBlockHeader> &headers) const;
979 bool IsContinuationOfLowWorkHeadersSync(Peer &peer,
CNode &pfrom,
982 !m_headers_presync_mutex, g_msgproc_mutex);
997 bool TryLowWorkHeadersSync(Peer &peer,
CNode &pfrom,
1001 !m_headers_presync_mutex, g_msgproc_mutex);
1007 bool IsAncestorOfBestHeaderOrTip(const
CBlockIndex *header)
1021 void HeadersDirectFetchBlocks(const
Config &config,
CNode &pfrom,
1024 void UpdatePeerStateForReceivedHeaders(
CNode &pfrom, Peer &peer,
1026 bool received_new_header,
1027 bool may_have_more_headers)
1030 void SendBlockTransactions(
CNode &pfrom, Peer &peer, const
CBlock &block,
1039 std::chrono::microseconds current_time)
1049 std::chrono::microseconds current_time,
bool preferred)
1053 void PushNodeVersion(const
Config &config,
CNode &pnode, const Peer &peer);
1061 void MaybeSendPing(
CNode &node_to, Peer &peer,
1062 std::chrono::microseconds now);
1065 void MaybeSendAddr(
CNode &
node, Peer &peer,
1066 std::chrono::microseconds current_time)
1073 void MaybeSendSendHeaders(
CNode &
node, Peer &peer)
1077 void MaybeSendFeefilter(
CNode &
node, Peer &peer,
1078 std::chrono::microseconds current_time)
1090 void RelayAddress(
NodeId originator, const
CAddress &addr,
bool fReachable)
1105 Mutex cs_proofrequest;
1110 std::atomic<
int> m_best_height{-1};
1113 std::chrono::seconds m_stale_tip_check_time{0s};
1116 const bool m_ignore_incoming_txs;
1122 bool m_initial_sync_finished{
false};
1128 mutable Mutex m_peer_mutex;
1135 std::map<NodeId, PeerRef> m_peer_map
GUARDED_BY(m_peer_mutex);
1144 const CNodeState *State(
NodeId pnode)
const
1149 std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
1156 m_last_block_inv_triggering_headers_sync
GUARDED_BY(g_msgproc_mutex){};
1164 std::map<BlockHash, std::pair<NodeId, bool>>
1174 std::atomic<std::chrono::seconds> m_block_stalling_timeout{
1177 bool AlreadyHaveTx(
const TxId &txid)
1179 !m_recent_confirmed_transactions_mutex);
1209 mutable Mutex m_recent_confirmed_transactions_mutex;
1211 GUARDED_BY(m_recent_confirmed_transactions_mutex){24'000, 0.000'001};
1220 std::chrono::microseconds
1221 NextInvToInbounds(std::chrono::microseconds now,
1222 std::chrono::seconds average_interval);
1226 Mutex m_most_recent_block_mutex;
1227 std::shared_ptr<const CBlock>
1228 m_most_recent_block
GUARDED_BY(m_most_recent_block_mutex);
1229 std::shared_ptr<const CBlockHeaderAndShortTxIDs>
1230 m_most_recent_compact_block
GUARDED_BY(m_most_recent_block_mutex);
1236 Mutex m_headers_presync_mutex;
1247 using HeadersPresyncStats =
1248 std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>;
1250 std::map<NodeId, HeadersPresyncStats>
1251 m_headers_presync_stats
GUARDED_BY(m_headers_presync_mutex){};
1255 std::atomic_bool m_headers_presync_should_signal{
false};
1263 bool IsBlockRequested(
const BlockHash &hash)
1271 void RemoveBlockRequest(
const BlockHash &hash)
1280 bool BlockRequested(
const Config &config,
NodeId nodeid,
1282 std::list<QueuedBlock>::iterator **pit =
nullptr)
1291 void FindNextBlocksToDownload(
NodeId nodeid,
unsigned int count,
1300 std::atomic<
std::chrono::seconds> m_last_tip_update{0s};
1307 const std::chrono::seconds mempool_req,
1308 const std::chrono::seconds now)
1312 void ProcessGetData(
const Config &config,
CNode &pfrom, Peer &peer,
1313 const std::atomic<bool> &interruptMsgProc)
1315 peer.m_getdata_requests_mutex,
1321 const std::shared_ptr<const CBlock> &block,
1322 bool force_processing,
bool min_pow_checked);
1325 typedef std::map<TxId, CTransactionRef> MapRelay;
1332 std::deque<std::pair<std::chrono::microseconds, MapRelay::iterator>>
1341 void MaybeSetPeerAsAnnouncingHeaderAndIDs(
NodeId nodeid)
1363 std::vector<std::pair<TxHash, CTransactionRef>>
1371 void ProcessBlockAvailability(
NodeId nodeid)
1386 bool BlockRequestAllowed(const
CBlockIndex *pindex)
1388 bool AlreadyHaveBlock(const
BlockHash &block_hash)
1390 bool AlreadyHaveProof(const
avalanche::ProofId &proofid);
1391 void ProcessGetBlockData(const
Config &config,
CNode &pfrom, Peer &peer,
1414 bool PrepareBlockFilterRequest(
CNode &
node, Peer &peer,
1416 uint32_t start_height,
1418 uint32_t max_height_diff,
1459 uint32_t GetAvalancheVoteForBlock(const
BlockHash &hash) const
1468 uint32_t GetAvalancheVoteForTx(const
TxId &
id) const
1470 !m_recent_confirmed_transactions_mutex);
1479 bool SetupAddressRelay(const
CNode &
node, Peer &peer)
1482 void AddAddressKnown(Peer &peer, const
CAddress &addr)
1484 void PushAddress(Peer &peer, const
CAddress &addr,
1493 bool ReceivedAvalancheProof(
CNode &
node, Peer &peer,
1499 const
std::chrono::seconds now)
1502 bool isPreferredDownloadPeer(const
CNode &pfrom);
1505 const CNodeState *PeerManagerImpl::State(
NodeId pnode) const
1507 std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
1508 if (it == m_node_states.end()) {
1515 CNodeState *PeerManagerImpl::State(
NodeId pnode)
1517 return const_cast<CNodeState *
>(std::as_const(*this).State(pnode));
1525 static bool IsAddrCompatible(
const Peer &peer,
const CAddress &addr) {
1529 void PeerManagerImpl::AddAddressKnown(Peer &peer,
const CAddress &addr) {
1530 assert(peer.m_addr_known);
1531 peer.m_addr_known->insert(addr.
GetKey());
1534 void PeerManagerImpl::PushAddress(Peer &peer,
const CAddress &addr,
1539 assert(peer.m_addr_known);
1540 if (addr.
IsValid() && !peer.m_addr_known->contains(addr.
GetKey()) &&
1541 IsAddrCompatible(peer, addr)) {
1543 peer.m_addrs_to_send[insecure_rand.
randrange(
1544 peer.m_addrs_to_send.size())] = addr;
1546 peer.m_addrs_to_send.push_back(addr);
1551 static void AddKnownTx(Peer &peer,
const TxId &txid) {
1552 auto tx_relay = peer.GetTxRelay();
1557 LOCK(tx_relay->m_tx_inventory_mutex);
1558 tx_relay->m_tx_inventory_known_filter.insert(txid);
1562 if (peer.m_proof_relay !=
nullptr) {
1563 LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
1564 peer.m_proof_relay->m_proof_inventory_known_filter.insert(proofid);
1568 bool PeerManagerImpl::isPreferredDownloadPeer(
const CNode &pfrom) {
1570 const CNodeState *state = State(pfrom.
GetId());
1571 return state && state->fPreferredDownload;
1574 static bool CanServeBlocks(
const Peer &peer) {
1582 static bool IsLimitedPeer(
const Peer &peer) {
1587 std::chrono::microseconds
1588 PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
1589 std::chrono::seconds average_interval) {
1590 if (m_next_inv_to_inbounds.load() < now) {
1597 return m_next_inv_to_inbounds;
1600 bool PeerManagerImpl::IsBlockRequested(
const BlockHash &hash) {
1601 return mapBlocksInFlight.find(hash) != mapBlocksInFlight.end();
1604 void PeerManagerImpl::RemoveBlockRequest(
const BlockHash &hash) {
1605 auto it = mapBlocksInFlight.find(hash);
1607 if (it == mapBlocksInFlight.end()) {
1612 auto [node_id, list_it] = it->second;
1613 CNodeState *state = State(node_id);
1614 assert(state !=
nullptr);
1616 if (state->vBlocksInFlight.begin() == list_it) {
1619 state->m_downloading_since = std::max(
1620 state->m_downloading_since, GetTime<std::chrono::microseconds>());
1622 state->vBlocksInFlight.erase(list_it);
1624 state->nBlocksInFlight--;
1625 if (state->nBlocksInFlight == 0) {
1627 m_peers_downloading_from--;
1629 state->m_stalling_since = 0us;
1630 mapBlocksInFlight.erase(it);
1633 bool PeerManagerImpl::BlockRequested(
const Config &config,
NodeId nodeid,
1635 std::list<QueuedBlock>::iterator **pit) {
1638 CNodeState *state = State(nodeid);
1639 assert(state !=
nullptr);
1643 std::pair<NodeId, std::list<QueuedBlock>::iterator>>::iterator
1644 itInFlight = mapBlocksInFlight.find(hash);
1645 if (itInFlight != mapBlocksInFlight.end() &&
1646 itInFlight->second.first == nodeid) {
1648 *pit = &itInFlight->second.second;
1654 RemoveBlockRequest(hash);
1656 std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(
1657 state->vBlocksInFlight.end(),
1658 {&block, std::unique_ptr<PartiallyDownloadedBlock>(
1659 pit ? new PartiallyDownloadedBlock(config, &m_mempool)
1661 state->nBlocksInFlight++;
1662 if (state->nBlocksInFlight == 1) {
1664 state->m_downloading_since = GetTime<std::chrono::microseconds>();
1665 m_peers_downloading_from++;
1668 itInFlight = mapBlocksInFlight
1669 .insert(std::make_pair(hash, std::make_pair(nodeid, it)))
1673 *pit = &itInFlight->second.second;
1679 void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(
NodeId nodeid) {
1685 if (m_ignore_incoming_txs) {
1689 CNodeState *nodestate = State(nodeid);
1694 if (!nodestate->m_provides_cmpctblocks) {
1697 int num_outbound_hb_peers = 0;
1698 for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin();
1699 it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
1700 if (*it == nodeid) {
1701 lNodesAnnouncingHeaderAndIDs.erase(it);
1702 lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
1705 CNodeState *state = State(*it);
1706 if (state !=
nullptr && !state->m_is_inbound) {
1707 ++num_outbound_hb_peers;
1710 if (nodestate->m_is_inbound) {
1713 if (lNodesAnnouncingHeaderAndIDs.size() >= 3 &&
1714 num_outbound_hb_peers == 1) {
1715 CNodeState *remove_node =
1716 State(lNodesAnnouncingHeaderAndIDs.front());
1717 if (remove_node !=
nullptr && !remove_node->m_is_inbound) {
1720 std::swap(lNodesAnnouncingHeaderAndIDs.front(),
1721 *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
1728 if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
1732 lNodesAnnouncingHeaderAndIDs.front(), [
this](
CNode *pnodeStop) {
1733 m_connman.PushMessage(
1734 pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion())
1735 .Make(NetMsgType::SENDCMPCT,
1737 CMPCTBLOCKS_VERSION));
1740 pnodeStop->m_bip152_highbandwidth_to = false;
1743 lNodesAnnouncingHeaderAndIDs.pop_front();
1745 m_connman.PushMessage(pfrom,
1752 lNodesAnnouncingHeaderAndIDs.push_back(pfrom->
GetId());
1757 bool PeerManagerImpl::TipMayBeStale() {
1760 if (m_last_tip_update.load() == 0s) {
1761 m_last_tip_update = GetTime<std::chrono::seconds>();
1763 return m_last_tip_update.load() <
1764 GetTime<std::chrono::seconds>() -
1767 mapBlocksInFlight.empty();
1770 bool PeerManagerImpl::CanDirectFetch() {
1771 return m_chainman.ActiveChain().Tip()->Time() >
1773 m_chainparams.GetConsensus().PowTargetSpacing() * 20;
1776 static bool PeerHasHeader(CNodeState *state,
const CBlockIndex *pindex)
1778 if (state->pindexBestKnownBlock &&
1779 pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) {
1782 if (state->pindexBestHeaderSent &&
1783 pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) {
1789 void PeerManagerImpl::ProcessBlockAvailability(
NodeId nodeid) {
1790 CNodeState *state = State(nodeid);
1791 assert(state !=
nullptr);
1793 if (!state->hashLastUnknownBlock.IsNull()) {
1795 m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock);
1797 if (state->pindexBestKnownBlock ==
nullptr ||
1798 pindex->
nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1799 state->pindexBestKnownBlock = pindex;
1801 state->hashLastUnknownBlock.SetNull();
1806 void PeerManagerImpl::UpdateBlockAvailability(
NodeId nodeid,
1808 CNodeState *state = State(nodeid);
1809 assert(state !=
nullptr);
1811 ProcessBlockAvailability(nodeid);
1813 const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
1816 if (state->pindexBestKnownBlock ==
nullptr ||
1817 pindex->
nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1818 state->pindexBestKnownBlock = pindex;
1823 state->hashLastUnknownBlock = hash;
1827 void PeerManagerImpl::FindNextBlocksToDownload(
1829 std::vector<const CBlockIndex *> &vBlocks,
NodeId &nodeStaller) {
1834 vBlocks.reserve(vBlocks.size() +
count);
1835 CNodeState *state = State(nodeid);
1836 assert(state !=
nullptr);
1839 ProcessBlockAvailability(nodeid);
1841 if (state->pindexBestKnownBlock ==
nullptr ||
1842 state->pindexBestKnownBlock->nChainWork <
1843 m_chainman.ActiveChain().Tip()->nChainWork ||
1844 state->pindexBestKnownBlock->nChainWork <
1845 m_chainman.MinimumChainWork()) {
1850 if (state->pindexLastCommonBlock ==
nullptr) {
1853 state->pindexLastCommonBlock =
1855 .ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight,
1856 m_chainman.ActiveChain().Height())];
1862 state->pindexLastCommonBlock, state->pindexBestKnownBlock);
1863 if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) {
1867 std::vector<const CBlockIndex *> vToFetch;
1868 const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
1876 std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
1878 while (pindexWalk->
nHeight < nMaxHeight) {
1883 int nToFetch = std::min(nMaxHeight - pindexWalk->
nHeight,
1884 std::max<int>(
count - vBlocks.size(), 128));
1885 vToFetch.resize(nToFetch);
1886 pindexWalk = state->pindexBestKnownBlock->
GetAncestor(
1887 pindexWalk->
nHeight + nToFetch);
1888 vToFetch[nToFetch - 1] = pindexWalk;
1889 for (
unsigned int i = nToFetch - 1; i > 0; i--) {
1890 vToFetch[i - 1] = vToFetch[i]->
pprev;
1903 if (pindex->nStatus.hasData() ||
1904 m_chainman.ActiveChain().Contains(pindex)) {
1906 state->pindexLastCommonBlock = pindex;
1908 }
else if (!IsBlockRequested(pindex->
GetBlockHash())) {
1910 if (pindex->
nHeight > nWindowEnd) {
1912 if (vBlocks.size() == 0 && waitingfor != nodeid) {
1915 nodeStaller = waitingfor;
1919 vBlocks.push_back(pindex);
1920 if (vBlocks.size() ==
count) {
1923 }
else if (waitingfor == -1) {
1925 waitingfor = mapBlocksInFlight[pindex->
GetBlockHash()].first;
1933 template <
class InvId>
1937 return !
node.HasPermission(
1950 template <
class InvId>
1951 static std::chrono::microseconds
1955 std::chrono::microseconds current_time,
bool preferred) {
1956 auto delay = std::chrono::microseconds{0};
1968 return current_time + delay;
1971 void PeerManagerImpl::PushNodeVersion(
const Config &config,
CNode &pnode,
1973 uint64_t my_services{peer.m_our_services};
1974 const int64_t nTime{
count_seconds(GetTime<std::chrono::seconds>())};
1976 const int nNodeStartingHeight{m_best_height};
1987 const bool tx_relay = !m_ignore_incoming_txs && !pnode.
IsBlockOnlyConn() &&
1989 m_connman.PushMessage(
1996 nTime, your_services, addr_you, my_services,
1998 nNodeStartingHeight, tx_relay, extraEntropy));
2002 "send version message: version %d, blocks=%d, them=%s, "
2003 "txrelay=%d, peer=%d\n",
2008 "send version message: version %d, blocks=%d, "
2009 "txrelay=%d, peer=%d\n",
2014 void PeerManagerImpl::AddTxAnnouncement(
2016 std::chrono::microseconds current_time) {
2024 const bool preferred = isPreferredDownloadPeer(
node);
2026 current_time, preferred);
2028 m_txrequest.ReceivedInv(
node.GetId(), txid, preferred, reqtime);
2031 void PeerManagerImpl::AddProofAnnouncement(
2033 std::chrono::microseconds current_time,
bool preferred) {
2044 m_proofrequest.ReceivedInv(
node.GetId(), proofid, preferred, reqtime);
2047 void PeerManagerImpl::UpdateLastBlockAnnounceTime(
NodeId node,
2048 int64_t time_in_seconds) {
2050 CNodeState *state = State(
node);
2052 state->m_last_block_announcement = time_in_seconds;
2056 void PeerManagerImpl::InitializeNode(
const Config &config,
CNode &
node,
2061 m_node_states.emplace_hint(m_node_states.end(),
2062 std::piecewise_construct,
2063 std::forward_as_tuple(nodeid),
2064 std::forward_as_tuple(
node.IsInboundConn()));
2065 assert(m_txrequest.Count(nodeid) == 0);
2067 PeerRef peer = std::make_shared<Peer>(nodeid, our_services);
2070 m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
2072 if (!
node.IsInboundConn()) {
2073 PushNodeVersion(config,
node, *peer);
2077 void PeerManagerImpl::ReattemptInitialBroadcast(
CScheduler &scheduler) {
2078 std::set<TxId> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
2080 for (
const TxId &txid : unbroadcast_txids) {
2082 if (m_mempool.exists(txid)) {
2083 RelayTransaction(txid);
2085 m_mempool.RemoveUnbroadcastTx(txid,
true);
2094 auto unbroadcasted_proofids =
2098 auto it = unbroadcasted_proofids.begin();
2099 while (it != unbroadcasted_proofids.end()) {
2102 if (!pm.isBoundToPeer(*it)) {
2103 pm.removeUnbroadcastProof(*it);
2104 it = unbroadcasted_proofids.erase(it);
2111 return unbroadcasted_proofids;
2115 for (
const auto &proofid : unbroadcasted_proofids) {
2116 RelayProof(proofid);
2123 const auto reattemptBroadcastInterval = 10min +
GetRandMillis(5min);
2124 scheduler.
scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
2125 reattemptBroadcastInterval);
2128 void PeerManagerImpl::UpdateAvalancheStatistics()
const {
2129 m_connman.ForEachNode([](
CNode *pnode) {
2146 m_connman.ForNode(nodeid, [&](
CNode *pavanode) {
2155 void PeerManagerImpl::AvalanchePeriodicNetworking(
CScheduler &scheduler)
const {
2156 const auto now = GetTime<std::chrono::seconds>();
2157 std::vector<NodeId> avanode_ids;
2158 bool fQuorumEstablished;
2159 bool fShouldRequestMoreNodes;
2168 fQuorumEstablished =
g_avalanche->isQuorumEstablished();
2169 fShouldRequestMoreNodes =
2174 m_connman.ForEachNode([&](
CNode *pnode) {
2177 avanode_ids.push_back(pnode->GetId());
2180 PeerRef peer = GetPeerRef(pnode->
GetId());
2181 if (peer ==
nullptr) {
2185 if (peer->m_proof_relay &&
2186 now > (peer->m_proof_relay->lastSharedProofsUpdate.load() +
2188 peer->m_proof_relay->sharedProofs = {};
2192 if (avanode_ids.empty()) {
2200 for (
NodeId avanodeId : avanode_ids) {
2201 const bool sentGetavaaddr =
2202 m_connman.ForNode(avanodeId, [&](
CNode *pavanode) {
2204 m_connman.PushMessage(
2205 pavanode, CNetMsgMaker(pavanode->GetCommonVersion())
2206 .Make(NetMsgType::GETAVAADDR));
2207 PeerRef peer = GetPeerRef(avanodeId);
2208 WITH_LOCK(peer->m_addr_token_bucket_mutex,
2209 peer->m_addr_token_bucket += GetMaxAddrToSend());
2217 if (sentGetavaaddr && fQuorumEstablished && !fShouldRequestMoreNodes) {
2222 if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
2231 if (
g_avalanche->getAvaproofsNodeCounter() == 0) {
2232 avanode_ids.resize(std::min<size_t>(avanode_ids.size(), 3));
2235 for (
NodeId nodeid : avanode_ids) {
2237 m_connman.ForNode(nodeid, [&](
CNode *pavanode) {
2238 PeerRef peer = GetPeerRef(nodeid);
2239 if (peer->m_proof_relay) {
2240 m_connman.PushMessage(pavanode,
2244 peer->m_proof_relay->compactproofs_requested =
true;
2254 const auto avalanchePeriodicNetworkingInterval = 2min +
GetRandMillis(3min);
2255 scheduler.
scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
2256 avalanchePeriodicNetworkingInterval);
2259 void PeerManagerImpl::FinalizeNode(
const Config &config,
const CNode &
node) {
2270 PeerRef peer = RemovePeer(nodeid);
2272 misbehavior =
WITH_LOCK(peer->m_misbehavior_mutex,
2273 return peer->m_misbehavior_score);
2275 m_peer_map.erase(nodeid);
2277 CNodeState *state = State(nodeid);
2278 assert(state !=
nullptr);
2280 if (state->fSyncStarted) {
2284 for (
const QueuedBlock &entry : state->vBlocksInFlight) {
2285 mapBlocksInFlight.erase(entry.pindex->GetBlockHash());
2288 m_txrequest.DisconnectedPeer(nodeid);
2289 m_num_preferred_download_peers -= state->fPreferredDownload;
2290 m_peers_downloading_from -= (state->nBlocksInFlight != 0);
2291 assert(m_peers_downloading_from >= 0);
2292 m_outbound_peers_with_protect_from_disconnect -=
2293 state->m_chain_sync.m_protect;
2294 assert(m_outbound_peers_with_protect_from_disconnect >= 0);
2296 m_node_states.erase(nodeid);
2298 if (m_node_states.empty()) {
2300 assert(mapBlocksInFlight.empty());
2301 assert(m_num_preferred_download_peers == 0);
2302 assert(m_peers_downloading_from == 0);
2303 assert(m_outbound_peers_with_protect_from_disconnect == 0);
2304 assert(m_txrequest.Size() == 0);
2309 if (
node.fSuccessfullyConnected && misbehavior == 0 &&
2310 !
node.IsBlockOnlyConn() && !
node.IsInboundConn()) {
2314 m_addrman.Connected(
node.addr);
2317 LOCK(m_headers_presync_mutex);
2318 m_headers_presync_stats.erase(nodeid);
2321 WITH_LOCK(cs_proofrequest, m_proofrequest.DisconnectedPeer(nodeid));
2326 PeerRef PeerManagerImpl::GetPeerRef(
NodeId id)
const {
2328 auto it = m_peer_map.find(
id);
2329 return it != m_peer_map.end() ? it->second :
nullptr;
2332 PeerRef PeerManagerImpl::RemovePeer(
NodeId id) {
2335 auto it = m_peer_map.find(
id);
2336 if (it != m_peer_map.end()) {
2337 ret = std::move(it->second);
2338 m_peer_map.erase(it);
2343 bool PeerManagerImpl::GetNodeStateStats(
NodeId nodeid,
2347 const CNodeState *state = State(nodeid);
2348 if (state ==
nullptr) {
2352 ? state->pindexBestKnownBlock->nHeight
2355 ? state->pindexLastCommonBlock->nHeight
2357 for (
const QueuedBlock &queue : state->vBlocksInFlight) {
2364 PeerRef peer = GetPeerRef(nodeid);
2365 if (peer ==
nullptr) {
2377 auto ping_wait{0us};
2378 if ((0 != peer->m_ping_nonce_sent) &&
2379 (0 != peer->m_ping_start.load().count())) {
2381 GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
2384 if (
auto tx_relay = peer->GetTxRelay()) {
2386 return tx_relay->m_relay_txs);
2398 LOCK(peer->m_headers_sync_mutex);
2399 if (peer->m_headers_sync) {
2407 void PeerManagerImpl::AddToCompactExtraTransactions(
const CTransactionRef &tx) {
2410 if (max_extra_txn <= 0) {
2414 if (!vExtraTxnForCompact.size()) {
2415 vExtraTxnForCompact.resize(max_extra_txn);
2418 vExtraTxnForCompact[vExtraTxnForCompactIt] =
2419 std::make_pair(tx->GetHash(), tx);
2420 vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
2423 void PeerManagerImpl::Misbehaving(Peer &peer,
int howmuch,
2424 const std::string &message) {
2427 LOCK(peer.m_misbehavior_mutex);
2428 const int score_before{peer.m_misbehavior_score};
2429 peer.m_misbehavior_score += howmuch;
2430 const int score_now{peer.m_misbehavior_score};
2432 const std::string message_prefixed =
2433 message.empty() ?
"" : (
": " + message);
2434 std::string warning;
2438 warning =
" DISCOURAGE THRESHOLD EXCEEDED";
2439 peer.m_should_discourage =
true;
2443 score_before, score_now, warning, message_prefixed);
2446 bool PeerManagerImpl::MaybePunishNodeForBlock(
NodeId nodeid,
2448 bool via_compact_block,
2449 const std::string &message) {
2450 PeerRef peer{GetPeerRef(nodeid)};
2461 if (!via_compact_block) {
2463 Misbehaving(*peer, 100, message);
2470 CNodeState *node_state = State(nodeid);
2471 if (node_state ==
nullptr) {
2478 if (!via_compact_block && !node_state->m_is_inbound) {
2480 Misbehaving(*peer, 100, message);
2490 Misbehaving(*peer, 100, message);
2498 Misbehaving(*peer, 10, message);
2504 if (message !=
"") {
2510 bool PeerManagerImpl::MaybePunishNodeForTx(
NodeId nodeid,
2512 const std::string &message) {
2513 PeerRef peer{GetPeerRef(nodeid)};
2520 Misbehaving(*peer, 100, message);
2535 if (message !=
"") {
2541 bool PeerManagerImpl::BlockRequestAllowed(
const CBlockIndex *pindex) {
2543 if (m_chainman.ActiveChain().Contains(pindex)) {
2547 (m_chainman.m_best_header !=
nullptr) &&
2548 (m_chainman.m_best_header->GetBlockTime() - pindex->
GetBlockTime() <
2551 *m_chainman.m_best_header, *pindex, *m_chainman.m_best_header,
2555 std::optional<std::string>
2556 PeerManagerImpl::FetchBlock(
const Config &config,
NodeId peer_id,
2558 if (m_chainman.m_blockman.LoadingBlocks()) {
2559 return "Loading blocks ...";
2564 CNodeState *state = State(peer_id);
2565 if (state ==
nullptr) {
2566 return "Peer does not exist";
2571 if (!BlockRequested(config, peer_id, block_index)) {
2572 return "Already requested from this peer";
2580 if (!m_connman.ForNode(peer_id, [
this, &invs](
CNode *
node) {
2581 const CNetMsgMaker msgMaker(node->GetCommonVersion());
2582 this->m_connman.PushMessage(
2583 node, msgMaker.Make(NetMsgType::GETDATA, invs));
2586 return "Node not fully connected";
2591 return std::nullopt;
2598 bool ignore_incoming_txs) {
2599 return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman,
2600 pool, ignore_incoming_txs);
2606 : m_chainparams(chainman.GetParams()), m_connman(connman),
2607 m_addrman(addrman), m_banman(banman), m_chainman(chainman),
2608 m_mempool(pool), m_ignore_incoming_txs(ignore_incoming_txs) {}
2610 void PeerManagerImpl::StartScheduledTasks(
CScheduler &scheduler) {
2617 "peer eviction timer should be less than stale tip check timer");
2620 this->CheckForStaleTipAndEvictPeers();
2626 const auto reattemptBroadcastInterval = 10min +
GetRandMillis(5min);
2627 scheduler.
scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
2628 reattemptBroadcastInterval);
2633 UpdateAvalancheStatistics();
2639 const auto avalanchePeriodicNetworkingInterval = 2min +
GetRandMillis(3min);
2640 scheduler.
scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
2641 avalanchePeriodicNetworkingInterval);
2650 void PeerManagerImpl::BlockConnected(
2651 const std::shared_ptr<const CBlock> &pblock,
const CBlockIndex *pindex) {
2653 m_last_tip_update = GetTime<std::chrono::seconds>();
2656 LOCK(m_recent_confirmed_transactions_mutex);
2658 m_recent_confirmed_transactions.insert(ptx->GetId());
2663 for (
const auto &ptx : pblock->vtx) {
2664 m_txrequest.ForgetInvId(ptx->GetId());
2670 auto stalling_timeout = m_block_stalling_timeout.load();
2673 const auto new_timeout =
2674 std::max(std::chrono::duration_cast<std::chrono::seconds>(
2675 stalling_timeout * 0.85),
2677 if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout,
2685 void PeerManagerImpl::BlockDisconnected(
2686 const std::shared_ptr<const CBlock> &block,
const CBlockIndex *pindex) {
2695 LOCK(m_recent_confirmed_transactions_mutex);
2696 m_recent_confirmed_transactions.reset();
2703 void PeerManagerImpl::NewPoWValidBlock(
2704 const CBlockIndex *pindex,
const std::shared_ptr<const CBlock> &pblock) {
2705 std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock =
2706 std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock);
2711 if (pindex->
nHeight <= m_highest_fast_announce) {
2714 m_highest_fast_announce = pindex->
nHeight;
2717 const std::shared_future<CSerializedNetMsg> lazy_ser{
2718 std::async(std::launch::deferred, [&] {
2723 LOCK(m_most_recent_block_mutex);
2724 m_most_recent_block_hash = hashBlock;
2725 m_most_recent_block = pblock;
2726 m_most_recent_compact_block = pcmpctblock;
2729 m_connman.ForEachNode(
2730 [
this, pindex, &lazy_ser, &hashBlock](
CNode *pnode)
2738 ProcessBlockAvailability(pnode->
GetId());
2739 CNodeState &state = *State(pnode->
GetId());
2743 if (state.m_requested_hb_cmpctblocks &&
2744 !PeerHasHeader(&state, pindex) &&
2745 PeerHasHeader(&state, pindex->
pprev)) {
2747 "%s sending header-and-ids %s to peer=%d\n",
2748 "PeerManager::NewPoWValidBlock",
2749 hashBlock.ToString(), pnode->
GetId());
2752 m_connman.PushMessage(pnode, ser_cmpctblock.Copy());
2753 state.pindexBestHeaderSent = pindex;
2762 void PeerManagerImpl::UpdatedBlockTip(
const CBlockIndex *pindexNew,
2764 bool fInitialDownload) {
2765 SetBestHeight(pindexNew->
nHeight);
2769 if (fInitialDownload) {
2774 std::vector<BlockHash> vHashes;
2776 while (pindexToAnnounce != pindexFork) {
2778 pindexToAnnounce = pindexToAnnounce->
pprev;
2788 for (
auto &it : m_peer_map) {
2789 Peer &peer = *it.second;
2790 LOCK(peer.m_block_inv_mutex);
2792 peer.m_blocks_for_headers_relay.push_back(hash);
2797 m_connman.WakeMessageHandler();
2804 void PeerManagerImpl::BlockChecked(
const CBlock &block,
2809 std::map<BlockHash, std::pair<NodeId, bool>>::iterator it =
2810 mapBlockSource.find(hash);
2814 if (state.
IsInvalid() && it != mapBlockSource.end() &&
2815 State(it->second.first)) {
2816 MaybePunishNodeForBlock(it->second.first, state,
2817 !it->second.second);
2826 !m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
2827 mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
2828 if (it != mapBlockSource.end()) {
2829 MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
2833 if (it != mapBlockSource.end()) {
2834 mapBlockSource.erase(it);
2843 bool PeerManagerImpl::AlreadyHaveTx(
const TxId &txid) {
2844 if (m_chainman.ActiveChain().Tip()->GetBlockHash() !=
2845 hashRecentRejectsChainTip) {
2850 hashRecentRejectsChainTip =
2851 m_chainman.ActiveChain().Tip()->GetBlockHash();
2852 m_recent_rejects.reset();
2855 if (m_orphanage.
HaveTx(txid)) {
2860 LOCK(m_recent_confirmed_transactions_mutex);
2861 if (m_recent_confirmed_transactions.contains(txid)) {
2866 return m_recent_rejects.contains(txid) || m_mempool.exists(txid);
2869 bool PeerManagerImpl::AlreadyHaveBlock(
const BlockHash &block_hash) {
2870 return m_chainman.m_blockman.LookupBlockIndex(block_hash) !=
nullptr;
2877 if (localProof && localProof->getId() == proofid) {
2886 void PeerManagerImpl::SendPings() {
2888 for (
auto &it : m_peer_map) {
2889 it.second->m_ping_queued =
true;
2893 void PeerManagerImpl::RelayTransaction(
const TxId &txid) {
2895 for (
auto &it : m_peer_map) {
2896 Peer &peer = *it.second;
2897 auto tx_relay = peer.GetTxRelay();
2901 LOCK(tx_relay->m_tx_inventory_mutex);
2902 if (!tx_relay->m_tx_inventory_known_filter.contains(txid)) {
2903 tx_relay->m_tx_inventory_to_send.insert(txid);
2910 for (
auto &it : m_peer_map) {
2911 Peer &peer = *it.second;
2913 if (!peer.m_proof_relay) {
2916 LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
2917 if (!peer.m_proof_relay->m_proof_inventory_known_filter.contains(
2919 peer.m_proof_relay->m_proof_inventory_to_send.insert(proofid);
2924 void PeerManagerImpl::RelayAddress(
NodeId originator,
const CAddress &addr,
2940 const auto current_time{GetTime<std::chrono::seconds>()};
2943 const uint64_t time_addr{
2944 (
static_cast<uint64_t
>(
count_seconds(current_time)) + hash_addr) /
2955 unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
2956 std::array<std::pair<uint64_t, Peer *>, 2> best{
2957 {{0,
nullptr}, {0,
nullptr}}};
2958 assert(nRelayNodes <= best.size());
2962 for (
auto &[
id, peer] : m_peer_map) {
2963 if (peer->m_addr_relay_enabled &&
id != originator &&
2964 IsAddrCompatible(*peer, addr)) {
2966 for (
unsigned int i = 0; i < nRelayNodes; i++) {
2967 if (hashKey > best[i].first) {
2968 std::copy(best.begin() + i, best.begin() + nRelayNodes - 1,
2969 best.begin() + i + 1);
2970 best[i] = std::make_pair(hashKey, peer.get());
2977 for (
unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
2978 PushAddress(*best[i].second, addr, insecure_rand);
2982 void PeerManagerImpl::ProcessGetBlockData(
const Config &config,
CNode &pfrom,
2983 Peer &peer,
const CInv &inv) {
2986 std::shared_ptr<const CBlock> a_recent_block;
2987 std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
2989 LOCK(m_most_recent_block_mutex);
2990 a_recent_block = m_most_recent_block;
2991 a_recent_compact_block = m_most_recent_compact_block;
2994 bool need_activate_chain =
false;
2998 m_chainman.m_blockman.LookupBlockIndex(hash);
3008 need_activate_chain =
true;
3012 if (need_activate_chain) {
3014 if (!m_chainman.ActiveChainstate().ActivateBestChain(state,
3022 const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
3026 if (!BlockRequestAllowed(pindex)) {
3028 "%s: ignoring request from peer=%i for old "
3029 "block that isn't in the main chain\n",
3030 __func__, pfrom.
GetId());
3036 if (m_connman.OutboundTargetReached(
true) &&
3037 (((m_chainman.m_best_header !=
nullptr) &&
3038 (m_chainman.m_best_header->GetBlockTime() - pindex->
GetBlockTime() >
3044 "historical block serving limit reached, disconnect peer=%d\n",
3056 (m_chainman.ActiveChain().Tip()->nHeight - pindex->
nHeight >
3059 "Ignore block request below NODE_NETWORK_LIMITED "
3060 "threshold, disconnect peer=%d\n",
3070 if (!pindex->nStatus.hasData()) {
3073 std::shared_ptr<const CBlock> pblock;
3074 if (a_recent_block && a_recent_block->GetHash() == pindex->
GetBlockHash()) {
3075 pblock = a_recent_block;
3078 std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
3080 m_chainparams.GetConsensus())) {
3081 assert(!
"cannot load block from disk");
3083 pblock = pblockRead;
3086 m_connman.PushMessage(&pfrom,
3089 bool sendMerkleBlock =
false;
3091 if (
auto tx_relay = peer.GetTxRelay()) {
3092 LOCK(tx_relay->m_bloom_filter_mutex);
3093 if (tx_relay->m_bloom_filter) {
3094 sendMerkleBlock =
true;
3095 merkleBlock =
CMerkleBlock(*pblock, *tx_relay->m_bloom_filter);
3098 if (sendMerkleBlock) {
3099 m_connman.PushMessage(
3110 typedef std::pair<size_t, uint256> PairType;
3112 m_connman.PushMessage(
3125 if (CanDirectFetch() &&
3128 if (a_recent_compact_block &&
3129 a_recent_compact_block->header.GetHash() ==
3131 m_connman.PushMessage(&pfrom,
3133 *a_recent_compact_block));
3136 m_connman.PushMessage(
3141 m_connman.PushMessage(
3147 LOCK(peer.m_block_inv_mutex);
3150 if (hash == peer.m_continuation_block) {
3154 std::vector<CInv> vInv;
3155 vInv.push_back(
CInv(
3156 MSG_BLOCK, m_chainman.ActiveChain().Tip()->GetBlockHash()));
3158 peer.m_continuation_block =
BlockHash();
3164 PeerManagerImpl::FindTxForGetData(
const Peer &peer,
const TxId &txid,
3165 const std::chrono::seconds mempool_req,
3166 const std::chrono::seconds now) {
3167 auto txinfo = m_mempool.info(txid);
3172 if ((mempool_req.count() && txinfo.m_time <= mempool_req) ||
3174 return std::move(txinfo.tx);
3182 if (
Assume(peer.GetTxRelay())
3183 ->m_recently_announced_invs.contains(txid)) {
3186 return std::move(txinfo.tx);
3189 auto mi = mapRelay.find(txid);
3190 if (mi != mapRelay.end()) {
3202 PeerManagerImpl::FindProofForGetData(
const Peer &peer,
3204 const std::chrono::seconds now) {
3207 bool send_unconditionally =
3233 if (send_unconditionally) {
3238 if (peer.m_proof_relay->m_recently_announced_proofs.contains(proofid)) {
3245 void PeerManagerImpl::ProcessGetData(
3247 const std::atomic<bool> &interruptMsgProc) {
3250 auto tx_relay = peer.GetTxRelay();
3252 std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
3253 std::vector<CInv> vNotFound;
3256 const auto now{GetTime<std::chrono::seconds>()};
3258 const auto mempool_req = tx_relay !=
nullptr
3259 ? tx_relay->m_last_mempool_req.load()
3260 : std::chrono::seconds::min();
3265 while (it != peer.m_getdata_requests.end()) {
3266 if (interruptMsgProc) {
3275 const CInv &inv = *it;
3277 if (it->IsMsgProof()) {
3279 auto proof = FindProofForGetData(peer, proofid, now);
3281 m_connman.PushMessage(
3287 vNotFound.push_back(inv);
3294 if (it->IsMsgTx()) {
3295 if (tx_relay ==
nullptr) {
3306 m_connman.PushMessage(
3308 m_mempool.RemoveUnbroadcastTx(txid);
3311 std::vector<TxId> parent_ids_to_add;
3314 auto txiter = m_mempool.GetIter(tx->GetId());
3316 auto &pentry = *txiter;
3318 (*pentry)->GetMemPoolParentsConst();
3319 parent_ids_to_add.reserve(parents.size());
3320 for (
const auto &parent : parents) {
3321 if (parent.get()->GetTime() >
3323 parent_ids_to_add.push_back(
3324 parent.get()->GetTx().GetId());
3329 for (
const TxId &parent_txid : parent_ids_to_add) {
3332 if (
WITH_LOCK(tx_relay->m_tx_inventory_mutex,
3333 return !tx_relay->m_tx_inventory_known_filter
3334 .contains(parent_txid))) {
3335 tx_relay->m_recently_announced_invs.insert(parent_txid);
3339 vNotFound.push_back(inv);
3352 if (it != peer.m_getdata_requests.end() && !pfrom.
fPauseSend) {
3353 const CInv &inv = *it++;
3355 ProcessGetBlockData(config, pfrom, peer, inv);
3361 peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
3363 if (!vNotFound.empty()) {
3376 m_connman.PushMessage(&pfrom,
3381 void PeerManagerImpl::SendBlockTransactions(
3385 for (
size_t i = 0; i < req.
indices.size(); i++) {
3387 Misbehaving(peer, 100,
"getblocktxn with out-of-bounds tx indices");
3395 m_connman.PushMessage(
3399 bool PeerManagerImpl::CheckHeadersPoW(
const std::vector<CBlockHeader> &headers,
3404 Misbehaving(peer, 100,
"header with invalid proof of work");
3409 if (!CheckHeadersAreContinuous(headers)) {
3410 Misbehaving(peer, 20,
"non-continuous headers sequence");
3419 if (m_chainman.ActiveChain().Tip() !=
nullptr) {
3420 const CBlockIndex *tip = m_chainman.ActiveChain().Tip();
3423 near_chaintip_work =
3427 return std::max(near_chaintip_work, m_chainman.MinimumChainWork());
3442 void PeerManagerImpl::HandleFewUnconnectingHeaders(
3443 CNode &pfrom, Peer &peer,
const std::vector<CBlockHeader> &headers) {
3446 peer.m_num_unconnecting_headers_msgs++;
3450 if (MaybeSendGetHeaders(pfrom,
GetLocator(best_header), peer)) {
3453 "received header %s: missing prev block %s, sending getheaders "
3454 "(%d) to end (peer=%d, m_num_unconnecting_headers_msgs=%d)\n",
3456 headers[0].hashPrevBlock.ToString(), best_header->nHeight,
3457 pfrom.
GetId(), peer.m_num_unconnecting_headers_msgs);
3464 UpdateBlockAvailability(pfrom.
GetId(), headers.back().GetHash()));
3468 if (peer.m_num_unconnecting_headers_msgs %
3471 Misbehaving(peer, 20,
3473 peer.m_num_unconnecting_headers_msgs));
3477 bool PeerManagerImpl::CheckHeadersAreContinuous(
3478 const std::vector<CBlockHeader> &headers)
const {
3481 if (!hashLastBlock.
IsNull() && header.hashPrevBlock != hashLastBlock) {
3484 hashLastBlock = header.GetHash();
3489 bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(
3490 Peer &peer,
CNode &pfrom, std::vector<CBlockHeader> &headers) {
3491 if (peer.m_headers_sync) {
3492 auto result = peer.m_headers_sync->ProcessNextHeaders(
3494 if (result.request_more) {
3495 auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
3498 Assume(!locator.vHave.empty());
3499 if (!locator.vHave.empty()) {
3506 bool sent_getheaders =
3507 MaybeSendGetHeaders(pfrom, locator, peer);
3508 if (sent_getheaders) {
3510 "more getheaders (from %s) to peer=%d\n",
3511 locator.vHave.front().ToString(), pfrom.
GetId());
3514 "error sending next getheaders (from %s) to "
3515 "continue sync with peer=%d\n",
3516 locator.vHave.front().ToString(), pfrom.
GetId());
3522 peer.m_headers_sync.reset(
nullptr);
3527 LOCK(m_headers_presync_mutex);
3528 m_headers_presync_stats.erase(pfrom.
GetId());
3531 HeadersPresyncStats stats;
3532 stats.first = peer.m_headers_sync->GetPresyncWork();
3533 if (peer.m_headers_sync->GetState() ==
3535 stats.second = {peer.m_headers_sync->GetPresyncHeight(),
3536 peer.m_headers_sync->GetPresyncTime()};
3540 LOCK(m_headers_presync_mutex);
3541 m_headers_presync_stats[pfrom.
GetId()] = stats;
3543 m_headers_presync_stats.find(m_headers_presync_bestpeer);
3544 bool best_updated =
false;
3545 if (best_it == m_headers_presync_stats.end()) {
3550 const HeadersPresyncStats *stat_best{
nullptr};
3551 for (
const auto &[_peer, _stat] : m_headers_presync_stats) {
3552 if (!stat_best || _stat > *stat_best) {
3557 m_headers_presync_bestpeer = peer_best;
3558 best_updated = (peer_best == pfrom.
GetId());
3559 }
else if (best_it->first == pfrom.
GetId() ||
3560 stats > best_it->second) {
3563 m_headers_presync_bestpeer = pfrom.
GetId();
3564 best_updated =
true;
3566 if (best_updated && stats.second.has_value()) {
3569 m_headers_presync_should_signal =
true;
3573 if (result.success) {
3576 headers.swap(result.pow_validated_headers);
3579 return result.success;
3587 bool PeerManagerImpl::TryLowWorkHeadersSync(
3589 std::vector<CBlockHeader> &headers) {
3596 arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold();
3600 if (total_work < minimum_chain_work) {
3614 LOCK(peer.m_headers_sync_mutex);
3615 peer.m_headers_sync.reset(
3617 chain_start_header, minimum_chain_work));
3621 return IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
3625 "Ignoring low-work chain (height=%u) from peer=%d\n",
3626 chain_start_header->
nHeight + headers.size(), pfrom.
GetId());
3635 bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(
const CBlockIndex *header) {
3636 return header !=
nullptr &&
3637 ((m_chainman.m_best_header !=
nullptr &&
3640 m_chainman.ActiveChain().Contains(header));
3643 bool PeerManagerImpl::MaybeSendGetHeaders(
CNode &pfrom,
3652 if (current_time - peer.m_last_getheaders_timestamp >
3654 m_connman.PushMessage(
3656 peer.m_last_getheaders_timestamp = current_time;
3668 void PeerManagerImpl::HeadersDirectFetchBlocks(
const Config &config,
3674 CNodeState *nodestate = State(pfrom.
GetId());
3677 m_chainman.ActiveChain().Tip()->nChainWork <= pindexLast->
nChainWork) {
3678 std::vector<const CBlockIndex *> vToFetch;
3682 while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) &&
3684 if (!pindexWalk->nStatus.hasData() &&
3687 vToFetch.push_back(pindexWalk);
3689 pindexWalk = pindexWalk->
pprev;
3695 if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
3700 std::vector<CInv> vGetData;
3703 if (nodestate->nBlocksInFlight >=
3709 BlockRequested(config, pfrom.
GetId(), *pindex);
3713 if (vGetData.size() > 1) {
3715 "Downloading blocks toward %s (%d) via headers "
3720 if (vGetData.size() > 0) {
3721 if (!m_ignore_incoming_txs &&
3722 nodestate->m_provides_cmpctblocks && vGetData.size() == 1 &&
3723 mapBlocksInFlight.size() == 1 &&
3729 m_connman.PushMessage(
3741 void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(
3743 bool received_new_header,
bool may_have_more_headers) {
3744 if (peer.m_num_unconnecting_headers_msgs > 0) {
3747 "peer=%d: resetting m_num_unconnecting_headers_msgs (%d -> 0)\n",
3748 pfrom.
GetId(), peer.m_num_unconnecting_headers_msgs);
3750 peer.m_num_unconnecting_headers_msgs = 0;
3754 CNodeState *nodestate = State(pfrom.
GetId());
3763 if (received_new_header &&
3764 pindexLast->
nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
3765 nodestate->m_last_block_announcement =
GetTime();
3770 if (m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
3771 !may_have_more_headers) {
3774 if (nodestate->pindexBestKnownBlock &&
3775 nodestate->pindexBestKnownBlock->nChainWork <
3776 m_chainman.MinimumChainWork()) {
3786 LogPrintf(
"Disconnecting outbound peer %d -- headers "
3787 "chain has insufficient work\n",
3801 nodestate->pindexBestKnownBlock !=
nullptr) {
3802 if (m_outbound_peers_with_protect_from_disconnect <
3804 nodestate->pindexBestKnownBlock->nChainWork >=
3805 m_chainman.ActiveChain().Tip()->nChainWork &&
3806 !nodestate->m_chain_sync.m_protect) {
3809 nodestate->m_chain_sync.m_protect =
true;
3810 ++m_outbound_peers_with_protect_from_disconnect;
3815 void PeerManagerImpl::ProcessHeadersMessage(
const Config &config,
CNode &pfrom,
3817 std::vector<CBlockHeader> &&headers,
3818 bool via_compact_block) {
3819 size_t nCount = headers.size();
3827 LOCK(peer.m_headers_sync_mutex);
3828 if (peer.m_headers_sync) {
3829 peer.m_headers_sync.reset(
nullptr);
3830 LOCK(m_headers_presync_mutex);
3831 m_headers_presync_stats.erase(pfrom.
GetId());
3840 if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) {
3855 bool already_validated_work =
false;
3858 bool have_headers_sync =
false;
3860 LOCK(peer.m_headers_sync_mutex);
3862 already_validated_work =
3863 IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
3875 if (headers.empty()) {
3879 have_headers_sync = !!peer.m_headers_sync;
3885 headers[0].hashPrevBlock))};
3886 bool headers_connect_blockindex{chain_start_header !=
nullptr};
3888 if (!headers_connect_blockindex) {
3893 HandleFewUnconnectingHeaders(pfrom, peer, headers);
3895 Misbehaving(peer, 10,
"invalid header received");
3907 last_received_header =
3908 m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash());
3909 if (IsAncestorOfBestHeaderOrTip(last_received_header)) {
3910 already_validated_work =
true;
3917 if (!already_validated_work &&
3918 TryLowWorkHeadersSync(peer, pfrom, chain_start_header, headers)) {
3930 bool received_new_header{last_received_header ==
nullptr};
3934 if (!m_chainman.ProcessNewBlockHeaders(headers,
true,
3935 state, &pindexLast)) {
3937 MaybePunishNodeForBlock(pfrom.
GetId(), state, via_compact_block,
3938 "invalid header received");
3948 if (MaybeSendGetHeaders(pfrom,
GetLocator(pindexLast), peer)) {
3951 "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
3952 pindexLast->
nHeight, pfrom.
GetId(), peer.m_starting_height);
3956 UpdatePeerStateForReceivedHeaders(pfrom, peer, pindexLast,
3957 received_new_header,
3961 HeadersDirectFetchBlocks(config, pfrom, pindexLast);
3973 void PeerManagerImpl::ProcessOrphanTx(
const Config &config,
3974 std::set<TxId> &orphan_work_set) {
3977 while (!orphan_work_set.empty()) {
3978 const TxId orphanTxId = *orphan_work_set.
begin();
3979 orphan_work_set.erase(orphan_work_set.begin());
3981 const auto [porphanTx, from_peer] = m_orphanage.
GetTx(orphanTxId);
3982 if (porphanTx ==
nullptr) {
3987 m_chainman.ProcessTransaction(porphanTx);
3992 RelayTransaction(orphanTxId);
3994 m_orphanage.
EraseTx(orphanTxId);
3999 " invalid orphan tx %s from peer=%d. %s\n",
4002 MaybePunishNodeForTx(from_peer, state);
4009 m_recent_rejects.insert(orphanTxId);
4011 m_orphanage.
EraseTx(orphanTxId);
4017 bool PeerManagerImpl::PrepareBlockFilterRequest(
4019 const BlockHash &stop_hash, uint32_t max_height_diff,
4021 const bool supported_filter_type =
4024 if (!supported_filter_type) {
4026 "peer %d requested unsupported block filter type: %d\n",
4027 node.GetId(),
static_cast<uint8_t
>(filter_type));
4028 node.fDisconnect =
true;
4034 stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash);
4038 if (!stop_index || !BlockRequestAllowed(stop_index)) {
4041 node.fDisconnect =
true;
4046 uint32_t stop_height = stop_index->
nHeight;
4047 if (start_height > stop_height) {
4050 "peer %d sent invalid getcfilters/getcfheaders with "
4052 "start height %d and stop height %d\n",
4053 node.GetId(), start_height, stop_height);
4054 node.fDisconnect =
true;
4057 if (stop_height - start_height >= max_height_diff) {
4059 "peer %d requested too many cfilters/cfheaders: %d / %d\n",
4060 node.GetId(), stop_height - start_height + 1, max_height_diff);
4061 node.fDisconnect =
true;
4066 if (!filter_index) {
4075 void PeerManagerImpl::ProcessGetCFilters(
CNode &
node, Peer &peer,
4077 uint8_t filter_type_ser;
4078 uint32_t start_height;
4081 vRecv >> filter_type_ser >> start_height >> stop_hash;
4088 if (!PrepareBlockFilterRequest(
node, peer, filter_type, start_height,
4094 std::vector<BlockFilter> filters;
4097 "Failed to find block filter in index: filter_type=%s, "
4098 "start_height=%d, stop_hash=%s\n",
4104 for (
const auto &filter : filters) {
4107 m_connman.PushMessage(&
node, std::move(msg));
4111 void PeerManagerImpl::ProcessGetCFHeaders(
CNode &
node, Peer &peer,
4113 uint8_t filter_type_ser;
4114 uint32_t start_height;
4117 vRecv >> filter_type_ser >> start_height >> stop_hash;
4124 if (!PrepareBlockFilterRequest(
node, peer, filter_type, start_height,
4131 if (start_height > 0) {
4133 stop_index->
GetAncestor(
static_cast<int>(start_height - 1));
4136 "Failed to find block filter header in index: "
4137 "filter_type=%s, block_hash=%s\n",
4144 std::vector<uint256> filter_hashes;
4148 "Failed to find block filter hashes in index: filter_type=%s, "
4149 "start_height=%d, stop_hash=%s\n",
4158 stop_index->
GetBlockHash(), prev_header, filter_hashes);
4159 m_connman.PushMessage(&
node, std::move(msg));
4162 void PeerManagerImpl::ProcessGetCFCheckPt(
CNode &
node, Peer &peer,
4164 uint8_t filter_type_ser;
4167 vRecv >> filter_type_ser >> stop_hash;
4174 if (!PrepareBlockFilterRequest(
4175 node, peer, filter_type, 0, stop_hash,
4176 std::numeric_limits<uint32_t>::max(),
4177 stop_index, filter_index)) {
4185 for (
int i = headers.size() - 1; i >= 0; i--) {
4191 "Failed to find block filter header in index: "
4192 "filter_type=%s, block_hash=%s\n",
4202 m_connman.PushMessage(&
node, std::move(msg));
4217 PeerManagerImpl::GetAvalancheVoteForBlock(
const BlockHash &hash)
const {
4220 const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
4228 if (pindex->nStatus.isInvalid()) {
4233 if (pindex->nStatus.isOnParkedChain()) {
4237 const CBlockIndex *pindexTip = m_chainman.ActiveChain().Tip();
4241 if (pindex == pindexFork) {
4246 if (pindexFork != pindexTip) {
4251 if (!pindex->nStatus.hasData()) {
4260 uint32_t PeerManagerImpl::GetAvalancheVoteForTx(
const TxId &
id)
const {
4262 if (m_mempool.exists(
id) ||
4263 WITH_LOCK(m_recent_confirmed_transactions_mutex,
4264 return m_recent_confirmed_transactions.contains(
id))) {
4269 if (m_recent_rejects.contains(
id)) {
4274 if (m_orphanage.
HaveTx(
id)) {
4325 const std::shared_ptr<const CBlock> &block,
4326 bool force_processing,
4327 bool min_pow_checked) {
4328 bool new_block{
false};
4329 m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked,
4332 node.m_last_block_time = GetTime<std::chrono::seconds>();
4335 mapBlockSource.erase(block->GetHash());
4339 void PeerManagerImpl::ProcessMessage(
4340 const Config &config,
CNode &pfrom,
const std::string &msg_type,
4341 CDataStream &vRecv,
const std::chrono::microseconds time_received,
4342 const std::atomic<bool> &interruptMsgProc) {
4348 PeerRef peer = GetPeerRef(pfrom.
GetId());
4349 if (peer ==
nullptr) {
4356 "Avalanche is not initialized, ignoring %s message\n",
4370 Misbehaving(*peer, 1,
"redundant version message");
4376 uint64_t nNonce = 1;
4379 std::string cleanSubVer;
4380 int starting_height = -1;
4382 uint64_t nExtraEntropy = 1;
4384 vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
4392 m_addrman.SetServices(pfrom.
addr, nServices);
4397 "peer=%d does not offer the expected services "
4398 "(%08x offered, %08x expected); disconnecting\n",
4399 pfrom.
GetId(), nServices,
4409 "peer=%d does not offer the avalanche service; disconnecting\n",
4418 "peer=%d using obsolete version %i; disconnecting\n",
4419 pfrom.
GetId(), nVersion);
4424 if (!vRecv.
empty()) {
4433 if (!vRecv.
empty()) {
4434 std::string strSubVer;
4438 if (!vRecv.
empty()) {
4439 vRecv >> starting_height;
4441 if (!vRecv.
empty()) {
4444 if (!vRecv.
empty()) {
4445 vRecv >> nExtraEntropy;
4448 if (pfrom.
IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce)) {
4449 LogPrintf(
"connected to self at %s, disconnecting\n",
4462 PushNodeVersion(config, pfrom, *peer);
4466 const int greatest_common_version =
4480 peer->m_their_services = nServices;
4484 pfrom.cleanSubVer = cleanSubVer;
4486 peer->m_starting_height = starting_height;
4493 (fRelay || (peer->m_our_services &
NODE_BLOOM))) {
4494 auto *
const tx_relay = peer->SetTxRelay();
4496 LOCK(tx_relay->m_bloom_filter_mutex);
4498 tx_relay->m_relay_txs = fRelay;
4511 CNodeState *state = State(pfrom.
GetId());
4512 state->fPreferredDownload =
4516 m_num_preferred_download_peers += state->fPreferredDownload;
4520 if (!pfrom.
IsInboundConn() && SetupAddressRelay(pfrom, *peer)) {
4533 !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
4539 "ProcessMessages: advertising address %s\n",
4541 PushAddress(*peer, addr, insecure_rand);
4550 "ProcessMessages: advertising address %s\n",
4552 PushAddress(*peer, addr, insecure_rand);
4557 m_connman.PushMessage(&pfrom,
CNetMsgMaker(greatest_common_version)
4559 peer->m_getaddr_sent =
true;
4563 WITH_LOCK(peer->m_addr_token_bucket_mutex,
4582 m_addrman.Good(pfrom.
addr);
4585 std::string remoteAddr;
4591 "receive version message: [%s] %s: version %d, blocks=%d, "
4592 "us=%s, txrelay=%d, peer=%d%s\n",
4594 peer->m_starting_height, addrMe.ToString(), fRelay,
4595 pfrom.
GetId(), remoteAddr);
4597 int64_t currentTime =
GetTime();
4598 int64_t nTimeOffset = nTime - currentTime;
4600 if (nTime < int64_t(m_chainparams.GenesisBlock().nTime)) {
4603 Misbehaving(*peer, 20,
4604 "Ignoring invalid timestamp in version message");
4614 "feeler connection completed peer=%d; disconnecting\n",
4623 Misbehaving(*peer, 10,
"non-version message before version handshake");
4633 "ignoring redundant verack message from peer=%d\n",
4640 "New outbound peer connected: version: %d, blocks=%d, "
4642 pfrom.
nVersion.load(), peer->m_starting_height, pfrom.
GetId(),
4653 m_connman.PushMessage(
4664 AddKnownProof(*peer, localProof->getId());
4668 peer->m_proof_relay->m_recently_announced_proofs.insert(
4669 localProof->getId());
4680 Misbehaving(*peer, 10,
"non-verack message before version handshake");
4694 std::vector<CAddress> vAddr;
4698 if (!SetupAddressRelay(pfrom, *peer)) {
4707 strprintf(
"%s message size = %u", msg_type, vAddr.size()));
4712 std::vector<CAddress> vAddrOk;
4716 const auto current_time = GetTime<std::chrono::microseconds>();
4718 LOCK(peer->m_addr_token_bucket_mutex);
4721 const auto time_diff =
4722 std::max(current_time - peer->m_addr_token_timestamp, 0us);
4723 const double increment =
4725 peer->m_addr_token_bucket =
4726 std::min<double>(peer->m_addr_token_bucket + increment,
4730 peer->m_addr_token_timestamp = current_time;
4732 const bool rate_limited =
4734 uint64_t num_proc = 0;
4735 uint64_t num_rate_limit = 0;
4738 if (interruptMsgProc) {
4743 LOCK(peer->m_addr_token_bucket_mutex);
4745 if (peer->m_addr_token_bucket < 1.0) {
4751 peer->m_addr_token_bucket -= 1.0;
4764 addr.
nTime > current_a_time + 10min) {
4765 addr.
nTime = current_a_time - 5 * 24h;
4767 AddAddressKnown(*peer, addr);
4769 (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
4776 if (addr.
nTime > current_a_time - 10min && !peer->m_getaddr_sent &&
4779 RelayAddress(pfrom.
GetId(), addr, fReachable);
4783 vAddrOk.push_back(addr);
4786 peer->m_addr_processed += num_proc;
4787 peer->m_addr_rate_limited += num_rate_limit;
4789 "Received addr: %u addresses (%u processed, %u rate-limited) "
4791 vAddr.size(), num_proc, num_rate_limit, pfrom.
GetId());
4793 m_addrman.Add(vAddrOk, pfrom.
addr, 2h);
4794 if (vAddr.size() < 1000) {
4795 peer->m_getaddr_sent =
false;
4802 "addrfetch connection completed peer=%d; disconnecting\n",
4810 peer->m_wants_addrv2 =
true;
4815 peer->m_prefers_headers =
true;
4820 bool sendcmpct_hb{
false};
4821 uint64_t sendcmpct_version{0};
4822 vRecv >> sendcmpct_hb >> sendcmpct_version;
4829 CNodeState *nodestate = State(pfrom.
GetId());
4830 nodestate->m_provides_cmpctblocks =
true;
4831 nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
4840 std::vector<CInv> vInv;
4843 Misbehaving(*peer, 20,
4844 strprintf(
"inv message size = %u", vInv.size()));
4850 bool reject_tx_invs{m_ignore_incoming_txs || pfrom.
IsBlockOnlyConn()};
4855 reject_tx_invs =
false;
4858 const auto current_time{GetTime<std::chrono::microseconds>()};
4859 std::optional<BlockHash> best_block;
4861 auto logInv = [&](
const CInv &inv,
bool fAlreadyHave) {
4863 fAlreadyHave ?
"have" :
"new", pfrom.
GetId());
4866 for (
CInv &inv : vInv) {
4867 if (interruptMsgProc) {
4873 const bool fAlreadyHave = AlreadyHaveBlock(
BlockHash(inv.
hash));
4874 logInv(inv, fAlreadyHave);
4877 UpdateBlockAvailability(pfrom.
GetId(), hash);
4878 if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() &&
4879 !IsBlockRequested(hash)) {
4886 best_block = std::move(hash);
4894 const bool fAlreadyHave = AlreadyHaveProof(proofid);
4895 logInv(inv, fAlreadyHave);
4896 AddKnownProof(*peer, proofid);
4899 !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
4900 const bool preferred = isPreferredDownloadPeer(pfrom);
4902 LOCK(cs_proofrequest);
4903 AddProofAnnouncement(pfrom, proofid, current_time,
4912 const bool fAlreadyHave = AlreadyHaveTx(txid);
4913 logInv(inv, fAlreadyHave);
4915 AddKnownTx(*peer, txid);
4916 if (reject_tx_invs) {
4918 "transaction (%s) inv sent in violation of "
4919 "protocol, disconnecting peer=%d\n",
4923 }
else if (!fAlreadyHave && !m_chainman.ActiveChainstate()
4924 .IsInitialBlockDownload()) {
4925 AddTxAnnouncement(pfrom, txid, current_time);
4932 "Unknown inv type \"%s\" received from peer=%d\n",
4949 if (state.fSyncStarted ||
4950 (!peer->m_inv_triggered_getheaders_before_sync &&
4951 *best_block != m_last_block_inv_triggering_headers_sync)) {
4952 if (MaybeSendGetHeaders(
4953 pfrom,
GetLocator(m_chainman.m_best_header), *peer)) {
4955 m_chainman.m_best_header->nHeight,
4956 best_block->ToString(), pfrom.
GetId());
4958 if (!state.fSyncStarted) {
4959 peer->m_inv_triggered_getheaders_before_sync =
true;
4963 m_last_block_inv_triggering_headers_sync = *best_block;
4972 std::vector<CInv> vInv;
4975 Misbehaving(*peer, 20,
4976 strprintf(
"getdata message size = %u", vInv.size()));
4981 vInv.size(), pfrom.
GetId());
4983 if (vInv.size() > 0) {
4989 LOCK(peer->m_getdata_requests_mutex);
4990 peer->m_getdata_requests.insert(peer->m_getdata_requests.end(),
4991 vInv.begin(), vInv.end());
4992 ProcessGetData(config, pfrom, *peer, interruptMsgProc);
5001 vRecv >> locator >> hashStop;
5005 "getblocks locator size %lld > %d, disconnect peer=%d\n",
5019 std::shared_ptr<const CBlock> a_recent_block;
5021 LOCK(m_most_recent_block_mutex);
5022 a_recent_block = m_most_recent_block;
5025 if (!m_chainman.ActiveChainstate().ActivateBestChain(
5026 state, a_recent_block)) {
5036 m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
5040 pindex = m_chainman.ActiveChain().Next(pindex);
5044 (pindex ? pindex->
nHeight : -1),
5047 for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) {
5056 const int nPrunedBlocksLikelyToHave =
5058 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
5059 if (m_chainman.m_blockman.IsPruneMode() &&
5060 (!pindex->nStatus.hasData() ||
5061 pindex->
nHeight <= m_chainman.ActiveChain().Tip()->nHeight -
5062 nPrunedBlocksLikelyToHave)) {
5065 " getblocks stopping, pruned or too old block at %d %s\n",
5070 peer->m_block_inv_mutex,
5071 peer->m_blocks_for_inv_relay.push_back(pindex->
GetBlockHash()));
5072 if (--nLimit <= 0) {
5078 peer->m_continuation_block = pindex->GetBlockHash();
5090 std::shared_ptr<const CBlock> recent_block;
5092 LOCK(m_most_recent_block_mutex);
5093 if (m_most_recent_block_hash == req.
blockhash) {
5094 recent_block = m_most_recent_block;
5099 SendBlockTransactions(pfrom, *peer, *recent_block, req);
5107 m_chainman.m_blockman.LookupBlockIndex(req.
blockhash);
5108 if (!pindex || !pindex->nStatus.hasData()) {
5111 "Peer %d sent us a getblocktxn for a block we don't have\n",
5120 m_chainparams.GetConsensus());
5123 SendBlockTransactions(pfrom, *peer, block, req);
5136 "Peer %d sent us a getblocktxn for a block > %i deep\n",
5141 WITH_LOCK(peer->m_getdata_requests_mutex,
5142 peer->m_getdata_requests.push_back(inv));
5151 vRecv >> locator >> hashStop;
5155 "getheaders locator size %lld > %d, disconnect peer=%d\n",
5161 if (m_chainman.m_blockman.LoadingBlocks()) {
5164 "Ignoring getheaders from peer=%d while importing/reindexing\n",
5178 if (m_chainman.ActiveTip() ==
nullptr ||
5179 (m_chainman.ActiveTip()->nChainWork <
5180 m_chainman.MinimumChainWork() &&
5183 "Ignoring getheaders from peer=%d because active chain "
5184 "has too little work; sending empty response\n",
5189 std::vector<CBlock>()));
5193 CNodeState *nodestate = State(pfrom.
GetId());
5197 pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop);
5202 if (!BlockRequestAllowed(pindex)) {
5204 "%s: ignoring request from peer=%i for old block "
5205 "header that isn't in the main chain\n",
5206 __func__, pfrom.
GetId());
5212 m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
5214 pindex = m_chainman.ActiveChain().Next(pindex);
5220 std::vector<CBlock> vHeaders;
5223 (pindex ? pindex->
nHeight : -1),
5226 for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) {
5228 if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) {
5245 nodestate->pindexBestHeaderSent =
5246 pindex ? pindex : m_chainman.ActiveChain().Tip();
5247 m_connman.PushMessage(&pfrom,
5256 if ((m_ignore_incoming_txs &&
5260 "transaction sent in violation of protocol peer=%d\n",
5270 AddKnownTx(*peer, txid);
5274 m_txrequest.ReceivedResponse(pfrom.
GetId(), txid);
5276 if (AlreadyHaveTx(txid)) {
5282 if (!m_mempool.exists(tx.
GetId())) {
5283 LogPrintf(
"Not relaying non-mempool transaction %s from "
5284 "forcerelay peer=%d\n",
5287 LogPrintf(
"Force relaying tx %s from peer=%d\n",
5289 RelayTransaction(tx.
GetId());
5301 m_txrequest.ForgetInvId(tx.
GetId());
5302 RelayTransaction(tx.
GetId());
5308 "AcceptToMemoryPool: peer=%d: accepted %s "
5309 "(poolsz %u txn, %u kB)\n",
5311 m_mempool.DynamicMemoryUsage() / 1000);
5315 ProcessOrphanTx(config, peer->m_orphan_work_set);
5319 bool fRejectedParents =
false;
5323 std::vector<TxId> unique_parents;
5324 unique_parents.reserve(tx.
vin.size());
5329 std::sort(unique_parents.begin(), unique_parents.end());
5330 unique_parents.erase(
5331 std::unique(unique_parents.begin(), unique_parents.end()),
5332 unique_parents.end());
5333 for (
const TxId &parent_txid : unique_parents) {
5334 if (m_recent_rejects.contains(parent_txid)) {
5335 fRejectedParents =
true;
5339 if (!fRejectedParents) {
5340 const auto current_time{GetTime<std::chrono::microseconds>()};
5342 for (
const TxId &parent_txid : unique_parents) {
5344 AddKnownTx(*peer, parent_txid);
5345 if (!AlreadyHaveTx(parent_txid)) {
5346 AddTxAnnouncement(pfrom, parent_txid, current_time);
5351 AddToCompactExtraTransactions(ptx);
5356 m_txrequest.ForgetInvId(tx.
GetId());
5360 unsigned int nMaxOrphanTx = (
unsigned int)std::max(
5364 unsigned int nEvicted = m_orphanage.
LimitOrphans(nMaxOrphanTx);
5367 "orphanage overflow, removed %u tx\n", nEvicted);
5371 "not keeping orphan with rejected parents %s\n",
5375 m_recent_rejects.insert(tx.
GetId());
5376 m_txrequest.ForgetInvId(tx.
GetId());
5379 m_recent_rejects.insert(tx.
GetId());
5380 m_txrequest.ForgetInvId(tx.
GetId());
5383 AddToCompactExtraTransactions(ptx);
5406 "%s from peer=%d was not accepted: %s\n",
5408 MaybePunishNodeForTx(pfrom.
GetId(), state);
5415 if (m_chainman.m_blockman.LoadingBlocks()) {
5417 "Unexpected cmpctblock message received from peer %d\n",
5424 vRecv >> cmpctblock;
5425 }
catch (std::ios_base::failure &e) {
5427 Misbehaving(*peer, 100,
"cmpctblock-bad-indexes");
5431 bool received_new_header =
false;
5437 m_chainman.m_blockman.LookupBlockIndex(
5442 if (!m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
5443 MaybeSendGetHeaders(
5444 pfrom,
GetLocator(m_chainman.m_best_header), *peer);
5450 GetAntiDoSWorkThreshold()) {
5454 "Ignoring low-work compact block from peer %d\n",
5459 if (!m_chainman.m_blockman.LookupBlockIndex(
5461 received_new_header =
true;
5467 if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header},
5471 MaybePunishNodeForBlock(pfrom.
GetId(), state,
5473 "invalid header via cmpctblock");
5482 bool fProcessBLOCKTXN =
false;
5488 bool fRevertToHeaderProcessing =
false;
5492 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
5493 bool fBlockReconstructed =
false;
5501 CNodeState *nodestate = State(pfrom.
GetId());
5505 if (received_new_header &&
5507 m_chainman.ActiveChain().Tip()->nChainWork) {
5508 nodestate->m_last_block_announcement =
GetTime();
5512 std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
5513 iterator blockInFlightIt =
5515 bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.
end();
5517 if (pindex->nStatus.hasData()) {
5523 m_chainman.ActiveChain()
5528 if (fAlreadyInFlight) {
5532 std::vector<CInv> vInv(1);
5534 m_connman.PushMessage(
5542 if (!fAlreadyInFlight && !CanDirectFetch()) {
5548 if (pindex->
nHeight <= m_chainman.ActiveChain().Height() + 2) {
5549 if ((!fAlreadyInFlight && nodestate->nBlocksInFlight <
5551 (fAlreadyInFlight &&
5552 blockInFlightIt->second.first == pfrom.
GetId())) {
5553 std::list<QueuedBlock>::iterator *queuedBlockIt =
nullptr;
5554 if (!BlockRequested(config, pfrom.
GetId(), *pindex,
5556 if (!(*queuedBlockIt)->partialBlock) {
5558 ->partialBlock.reset(
5565 "we were already syncing!\n");
5571 *(*queuedBlockIt)->partialBlock;
5573 partialBlock.
InitData(cmpctblock, vExtraTxnForCompact);
5578 Misbehaving(*peer, 100,
"invalid compact block");
5583 std::vector<CInv> vInv(1);
5585 m_connman.PushMessage(
5591 for (
size_t i = 0; i < cmpctblock.
BlockTxCount(); i++) {
5602 fProcessBLOCKTXN =
true;
5605 m_connman.PushMessage(
5616 tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
5621 std::vector<CTransactionRef> dummy;
5622 status = tempBlock.FillBlock(*pblock, dummy);
5624 fBlockReconstructed =
true;
5628 if (fAlreadyInFlight) {
5632 std::vector<CInv> vInv(1);
5634 m_connman.PushMessage(
5640 fRevertToHeaderProcessing =
true;
5645 if (fProcessBLOCKTXN) {
5647 blockTxnMsg, time_received, interruptMsgProc);
5650 if (fRevertToHeaderProcessing) {
5656 return ProcessHeadersMessage(config, pfrom, *peer,
5661 if (fBlockReconstructed) {
5666 mapBlockSource.emplace(pblock->GetHash(),
5667 std::make_pair(pfrom.
GetId(),
false));
5678 ProcessBlock(config, pfrom, pblock,
true,
5687 RemoveBlockRequest(pblock->GetHash());
5695 if (m_chainman.m_blockman.LoadingBlocks()) {
5697 "Unexpected blocktxn message received from peer %d\n",
5705 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
5706 bool fBlockRead =
false;
5711 std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
5712 iterator it = mapBlocksInFlight.find(resp.
blockhash);
5713 if (it == mapBlocksInFlight.end() ||
5714 !it->second.second->partialBlock ||
5715 it->second.first != pfrom.
GetId()) {
5717 "Peer %d sent us block transactions for block "
5718 "we weren't expecting\n",
5724 *it->second.second->partialBlock;
5732 "invalid compact block/non-matching block transactions");
5736 std::vector<CInv> invs;
5738 m_connman.PushMessage(&pfrom,
5769 std::make_pair(pfrom.
GetId(),
false));
5780 ProcessBlock(config, pfrom, pblock,
true,
5788 if (m_chainman.m_blockman.LoadingBlocks()) {
5790 "Unexpected headers message received from peer %d\n",
5797 peer->m_last_getheaders_timestamp = {};
5799 std::vector<CBlockHeader> headers;
5805 Misbehaving(*peer, 20,
5806 strprintf(
"too-many-headers: headers message size = %u",
5810 headers.resize(nCount);
5811 for (
unsigned int n = 0; n < nCount; n++) {
5812 vRecv >> headers[n];
5817 ProcessHeadersMessage(config, pfrom, *peer, std::move(headers),
5823 if (m_headers_presync_should_signal.exchange(
false)) {
5824 HeadersPresyncStats stats;
5826 LOCK(m_headers_presync_mutex);
5828 m_headers_presync_stats.find(m_headers_presync_bestpeer);
5829 if (it != m_headers_presync_stats.end()) {
5834 m_chainman.ReportHeadersPresync(
5835 stats.first, stats.second->first, stats.second->second);
5844 if (m_chainman.m_blockman.LoadingBlocks()) {
5846 "Unexpected block message received from peer %d\n",
5851 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
5855 pblock->GetHash().ToString(), pfrom.
GetId());
5861 bool forceProcessing =
5863 !m_chainman.ActiveChainstate().IsInitialBlockDownload();
5864 const BlockHash hash = pblock->GetHash();
5865 bool min_pow_checked =
false;
5870 forceProcessing = IsBlockRequested(hash);
5871 RemoveBlockRequest(hash);
5875 mapBlockSource.emplace(hash, std::make_pair(pfrom.
GetId(),
true));
5879 m_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock);
5883 GetAntiDoSWorkThreshold()) {
5884 min_pow_checked =
true;
5887 ProcessBlock(config, pfrom, pblock, forceProcessing, min_pow_checked);
5894 if (pfrom.m_avalanche_pubkey.has_value()) {
5897 "Ignoring avahello from peer %d: already in our node set\n",
5903 vRecv >> delegation;
5910 if (!delegation.
verify(state, pubkey)) {
5911 Misbehaving(*peer, 100,
"invalid-delegation");
5914 pfrom.m_avalanche_pubkey = std::move(pubkey);
5917 sighasher << delegation.
getId();
5925 if (!(*pfrom.m_avalanche_pubkey)
5926 .VerifySchnorr(sighasher.GetHash(),
sig)) {
5927 Misbehaving(*peer, 100,
"invalid-avahello-signature");
5934 if (!AlreadyHaveProof(proofid)) {
5935 const bool preferred = isPreferredDownloadPeer(pfrom);
5936 LOCK(cs_proofrequest);
5937 AddProofAnnouncement(pfrom, proofid,
5938 GetTime<std::chrono::microseconds>(),
5955 m_connman.PushMessage(&pfrom,
5957 WITH_LOCK(peer->m_addr_token_bucket_mutex,
5960 if (peer->m_proof_relay &&
5961 !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
5962 m_connman.PushMessage(&pfrom,
5964 peer->m_proof_relay->compactproofs_requested =
true;
5972 const auto now = Now<SteadyMilliseconds>();
5973 const int64_t cooldown =
5979 if (now < last_poll + std::chrono::milliseconds(cooldown)) {
5981 "Ignoring repeated avapoll from peer %d: cooldown not "
5987 const bool quorum_established =
5997 strprintf(
"too-many-ava-poll: poll message size = %u", nCount));
6001 std::vector<avalanche::Vote> votes;
6002 votes.reserve(nCount);
6004 for (
unsigned int n = 0; n < nCount; n++) {
6012 if (!quorum_established) {
6013 votes.emplace_back(vote, inv.
hash);
6036 "poll inv type %d unknown from peer=%d\n",
6041 votes.emplace_back(vote, inv.
hash);
6063 if (!pfrom.m_avalanche_pubkey.has_value() ||
6064 !(*pfrom.m_avalanche_pubkey)
6065 .VerifySchnorr(verifier.GetHash(),
sig)) {
6066 Misbehaving(*peer, 100,
"invalid-ava-response-signature");
6071 auto now = GetTime<std::chrono::seconds>();
6073 std::vector<avalanche::VoteItemUpdate> updates;
6080 Misbehaving(*peer, banscore,
error);
6096 Misbehaving(*peer, 2,
error);
6108 auto logVoteUpdate = [](
const auto &voteUpdate,
6109 const std::string &voteItemTypeStr,
6110 const auto &voteItemId) {
6111 std::string voteOutcome;
6112 switch (voteUpdate.getStatus()) {
6114 voteOutcome =
"invalidated";
6117 voteOutcome =
"rejected";
6120 voteOutcome =
"accepted";
6123 voteOutcome =
"finalized";
6126 voteOutcome =
"stalled";
6134 voteItemTypeStr, voteItemId.ToString());
6137 bool shouldActivateBestChain =
false;
6142 for (
const auto &u : updates) {
6147 if (
auto pitem = std::get_if<const avalanche::ProofRef>(&item)) {
6151 logVoteUpdate(u,
"proof", proofid);
6153 auto rejectionMode =
6155 auto nextCooldownTimePoint = GetTime<std::chrono::seconds>();
6156 switch (u.getStatus()) {
6172 return pm.rejectProof(proofid,
6176 "ERROR: Failed to reject proof: %s\n",
6181 nextCooldownTimePoint +=
6183 "-avalanchepeerreplacementcooldown",
6190 avalanche::PeerManager::
6191 RegistrationMode::FORCE_ACCEPT);
6194 [&](const avalanche::Peer &peer) {
6195 pm.updateNextPossibleConflictTime(
6197 nextCooldownTimePoint);
6198 if (u.getStatus() ==
6199 avalanche::VoteStatus::
6201 pm.setFinalized(peer.peerid);
6209 "ERROR: Failed to accept proof: %s\n",
6216 if (
auto pitem = std::get_if<const CBlockIndex *>(&item)) {
6219 shouldActivateBestChain =
true;
6223 switch (u.getStatus()) {
6227 m_chainman.ActiveChainstate().ParkBlock(state, pindex);
6229 LogPrintf(
"ERROR: Database error: %s\n",
6236 m_chainman.ActiveChainstate().UnparkBlock(pindex);
6241 m_chainman.ActiveChainstate().UnparkBlock(pindex);
6243 m_chainman.ActiveChainstate().AvalancheFinalizeBlock(
6254 if (!fPreConsensus) {
6258 if (
auto pitem = std::get_if<const CTransactionRef>(&item)) {
6262 const TxId &txid = tx->GetId();
6263 logVoteUpdate(u,
"tx", txid);
6265 switch (u.getStatus()) {
6274 auto it = m_mempool.GetIter(txid);
6275 if (it.has_value()) {
6276 m_mempool.removeRecursive(
6286 auto it = m_mempool.GetIter(txid);
6287 if (!it.has_value()) {
6289 "Error: finalized tx (%s) is not in the "
6295 m_mempool.setAvalancheFinalized(**it);
6305 if (shouldActivateBestChain) {
6307 if (!m_chainman.ActiveChainstate().ActivateBestChain(state)) {
6319 ReceivedAvalancheProof(pfrom, *peer, proof);
6325 if (peer->m_proof_relay ==
nullptr) {
6329 peer->m_proof_relay->lastSharedProofsUpdate =
6330 GetTime<std::chrono::seconds>();
6332 peer->m_proof_relay->sharedProofs =
6338 peer->m_proof_relay->sharedProofs);
6339 m_connman.PushMessage(
6346 if (peer->m_proof_relay ==
nullptr) {
6351 if (!peer->m_proof_relay->compactproofs_requested) {
6355 peer->m_proof_relay->compactproofs_requested =
false;
6359 vRecv >> compactProofs;
6360 }
catch (std::ios_base::failure &e) {
6362 Misbehaving(*peer, 100,
"avaproofs-bad-indexes");
6367 std::set<uint32_t> prefilledIndexes;
6369 if (!ReceivedAvalancheProof(pfrom, *peer, prefilledProof.proof)) {
6380 "Got an avaproofs message with no shortid (peer %d)\n",
6402 auto shortIdProcessor =
6406 if (shortIdProcessor.hasOutOfBoundIndex()) {
6409 Misbehaving(*peer, 100,
"avaproofs-bad-indexes");
6412 if (!shortIdProcessor.isEvenlyDistributed()) {
6417 size_t proofCount = 0;
6418 std::vector<std::pair<avalanche::ProofId, bool>> remoteProofsStatus;
6425 shortIdProcessor.matchKnownItem(shortid, peer.
proof);
6432 remoteProofsStatus.emplace_back(peer.
getProofId(),
6436 proofCount += added;
6445 for (
size_t i = 0; i < compactProofs.
size(); i++) {
6446 if (shortIdProcessor.getItem(i) ==
nullptr) {
6451 m_connman.PushMessage(&pfrom,
6463 return pfrom.m_avalanche_pubkey.has_value())) {
6466 for (
const auto &[proofid, present] : remoteProofsStatus) {
6476 if (peer->m_proof_relay ==
nullptr) {
6483 auto requestedIndiceIt = proofreq.
indices.begin();
6484 uint32_t treeIndice = 0;
6485 peer->m_proof_relay->sharedProofs.forEachLeaf([&](
const auto &proof) {
6486 if (requestedIndiceIt == proofreq.
indices.end()) {
6491 if (treeIndice++ == *requestedIndiceIt) {
6492 m_connman.PushMessage(
6494 requestedIndiceIt++;
6500 peer->m_proof_relay->sharedProofs = {};
6513 "Ignoring \"getaddr\" from %s connection. peer=%d\n",
6520 Assume(SetupAddressRelay(pfrom, *peer));
6524 if (peer->m_getaddr_recvd) {
6529 peer->m_getaddr_recvd =
true;
6531 peer->m_addrs_to_send.clear();
6532 std::vector<CAddress> vAddr;
6538 vAddr = m_connman.GetAddresses(pfrom, maxAddrToSend,
6542 for (
const CAddress &addr : vAddr) {
6543 PushAddress(*peer, addr, insecure_rand);
6549 auto now = GetTime<std::chrono::seconds>();
6554 "Ignoring repeated getavaaddr from peer %d\n",
6562 if (!SetupAddressRelay(pfrom, *peer)) {
6564 "Ignoring getavaaddr message from %s peer=%d\n",
6569 auto availabilityScoreComparator = [](
const CNode *lhs,
6572 double scoreRhs = rhs->getAvailabilityScore();
6574 if (scoreLhs != scoreRhs) {
6575 return scoreLhs > scoreRhs;
6584 std::set<
const CNode *, decltype(availabilityScoreComparator)> avaNodes(
6585 availabilityScoreComparator);
6586 m_connman.ForEachNode([&](
const CNode *pnode) {
6592 avaNodes.insert(pnode);
6594 avaNodes.erase(std::prev(avaNodes.end()));
6598 peer->m_addrs_to_send.clear();
6600 for (
const CNode *pnode : avaNodes) {
6601 PushAddress(*peer, pnode->
addr, insecure_rand);
6612 "mempool request with bloom filters disabled, "
6613 "disconnect peer=%d\n",
6620 if (m_connman.OutboundTargetReached(
false) &&
6624 "mempool request with bandwidth limit reached, "
6625 "disconnect peer=%d\n",
6632 if (
auto tx_relay = peer->GetTxRelay()) {
6633 LOCK(tx_relay->m_tx_inventory_mutex);
6634 tx_relay->m_send_mempool =
true;
6657 m_connman.PushMessage(&pfrom,
6664 const auto ping_end = time_received;
6667 bool bPingFinished =
false;
6668 std::string sProblem;
6670 if (nAvail >=
sizeof(nonce)) {
6675 if (peer->m_ping_nonce_sent != 0) {
6676 if (nonce == peer->m_ping_nonce_sent) {
6679 bPingFinished =
true;
6680 const auto ping_time = ping_end - peer->m_ping_start.load();
6681 if (ping_time.count() >= 0) {
6686 sProblem =
"Timing mishap";
6690 sProblem =
"Nonce mismatch";
6694 bPingFinished =
true;
6695 sProblem =
"Nonce zero";
6699 sProblem =
"Unsolicited pong without ping";
6704 bPingFinished =
true;
6705 sProblem =
"Short payload";
6708 if (!(sProblem.empty())) {
6710 "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
6711 pfrom.
GetId(), sProblem, peer->m_ping_nonce_sent, nonce,
6714 if (bPingFinished) {
6715 peer->m_ping_nonce_sent = 0;
6723 "filterload received despite not offering bloom services "
6724 "from peer=%d; disconnecting\n",
6734 Misbehaving(*peer, 100,
"too-large bloom filter");
6735 }
else if (
auto tx_relay = peer->GetTxRelay()) {
6737 LOCK(tx_relay->m_bloom_filter_mutex);
6738 tx_relay->m_bloom_filter.reset(
new CBloomFilter(filter));
6739 tx_relay->m_relay_txs =
true;
6749 "filteradd received despite not offering bloom services "
6750 "from peer=%d; disconnecting\n",
6755 std::vector<uint8_t> vData;
6764 }
else if (
auto tx_relay = peer->GetTxRelay()) {
6765 LOCK(tx_relay->m_bloom_filter_mutex);
6766 if (tx_relay->m_bloom_filter) {
6767 tx_relay->m_bloom_filter->insert(vData);
6775 Misbehaving(*peer, 100,
"bad filteradd message");
6783 "filterclear received despite not offering bloom services "
6784 "from peer=%d; disconnecting\n",
6789 auto tx_relay = peer->GetTxRelay();
6795 LOCK(tx_relay->m_bloom_filter_mutex);
6796 tx_relay->m_bloom_filter =
nullptr;
6797 tx_relay->m_relay_txs =
true;
6806 vRecv >> newFeeFilter;
6808 if (
auto tx_relay = peer->GetTxRelay()) {
6809 tx_relay->m_fee_filter_received = newFeeFilter;
6818 ProcessGetCFilters(pfrom, *peer, vRecv);
6823 ProcessGetCFHeaders(pfrom, *peer, vRecv);
6828 ProcessGetCFCheckPt(pfrom, *peer, vRecv);
6833 std::vector<CInv> vInv;
6839 for (
CInv &inv : vInv) {
6845 m_txrequest.ReceivedResponse(pfrom.
GetId(),
TxId(inv.
hash));
6849 LOCK(cs_proofrequest);
6850 m_proofrequest.ReceivedResponse(
6864 bool PeerManagerImpl::MaybeDiscourageAndDisconnect(
CNode &pnode, Peer &peer) {
6866 LOCK(peer.m_misbehavior_mutex);
6869 if (!peer.m_should_discourage) {
6873 peer.m_should_discourage =
false;
6879 LogPrintf(
"Warning: not punishing noban peer %d!\n", peer.m_id);
6885 LogPrintf(
"Warning: not punishing manually connected peer %d!\n",
6894 "Warning: disconnecting but not discouraging %s peer %d!\n",
6905 m_banman->Discourage(pnode.
addr);
6907 m_connman.DisconnectNode(pnode.
addr);
6911 bool PeerManagerImpl::ProcessMessages(
const Config &config,
CNode *pfrom,
6912 std::atomic<bool> &interruptMsgProc) {
6923 bool fMoreWork =
false;
6925 PeerRef peer = GetPeerRef(pfrom->
GetId());
6926 if (peer ==
nullptr) {
6931 LOCK(peer->m_getdata_requests_mutex);
6932 if (!peer->m_getdata_requests.empty()) {
6933 ProcessGetData(config, *pfrom, *peer, interruptMsgProc);
6939 if (!peer->m_orphan_work_set.empty()) {
6940 ProcessOrphanTx(config, peer->m_orphan_work_set);
6951 LOCK(peer->m_getdata_requests_mutex);
6952 if (!peer->m_getdata_requests.empty()) {
6959 if (!peer->m_orphan_work_set.empty()) {
6969 std::list<CNetMessage> msgs;
6972 if (pfrom->vProcessMsg.empty()) {
6976 msgs.splice(msgs.begin(), pfrom->vProcessMsg,
6977 pfrom->vProcessMsg.begin());
6981 fMoreWork = !pfrom->vProcessMsg.empty();
6987 msg.m_recv.size(), msg.m_recv.
data());
6997 if (!msg.m_valid_netmagic) {
6999 "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n",
7004 m_banman->Discourage(pfrom->
addr);
7006 m_connman.DisconnectNode(pfrom->
addr);
7013 if (!msg.m_valid_header) {
7021 if (!msg.m_valid_checksum) {
7026 m_banman->Discourage(pfrom->
addr);
7028 m_connman.DisconnectNode(pfrom->
addr);
7033 ProcessMessage(config, *pfrom, msg.
m_type, vRecv, msg.m_time,
7035 if (interruptMsgProc) {
7040 LOCK(peer->m_getdata_requests_mutex);
7041 if (!peer->m_getdata_requests.empty()) {
7045 }
catch (
const std::exception &e) {
7048 e.what(),
typeid(e).name());
7057 void PeerManagerImpl::ConsiderEviction(
CNode &pto, Peer &peer,
7058 std::chrono::seconds time_in_seconds) {
7061 CNodeState &state = *State(pto.
GetId());
7065 state.fSyncStarted) {
7072 if (state.pindexBestKnownBlock !=
nullptr &&
7073 state.pindexBestKnownBlock->nChainWork >=
7074 m_chainman.ActiveChain().Tip()->nChainWork) {
7075 if (state.m_chain_sync.m_timeout != 0s) {
7076 state.m_chain_sync.m_timeout = 0s;
7077 state.m_chain_sync.m_work_header =
nullptr;
7078 state.m_chain_sync.m_sent_getheaders =
false;
7080 }
else if (state.m_chain_sync.m_timeout == 0s ||
7081 (state.m_chain_sync.m_work_header !=
nullptr &&
7082 state.pindexBestKnownBlock !=
nullptr &&
7083 state.pindexBestKnownBlock->nChainWork >=
7084 state.m_chain_sync.m_work_header->nChainWork)) {
7090 state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip();
7091 state.m_chain_sync.m_sent_getheaders =
false;
7092 }
else if (state.m_chain_sync.m_timeout > 0s &&
7093 time_in_seconds > state.m_chain_sync.m_timeout) {
7098 if (state.m_chain_sync.m_sent_getheaders) {
7101 "Disconnecting outbound peer %d for old chain, best known "
7104 state.pindexBestKnownBlock !=
nullptr
7105 ? state.pindexBestKnownBlock->GetBlockHash().ToString()
7109 assert(state.m_chain_sync.m_work_header);
7114 MaybeSendGetHeaders(
7115 pto,
GetLocator(state.m_chain_sync.m_work_header->pprev),
7119 "sending getheaders to outbound peer=%d to verify chain "
7120 "work (current best known block:%s, benchmark blockhash: "
7123 state.pindexBestKnownBlock !=
nullptr
7124 ? state.pindexBestKnownBlock->GetBlockHash().ToString()
7126 state.m_chain_sync.m_work_header->GetBlockHash()
7128 state.m_chain_sync.m_sent_getheaders =
true;
7135 state.m_chain_sync.m_timeout =
7142 void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now) {
7150 if (m_connman.GetExtraBlockRelayCount() > 0) {
7151 std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0},
7152 next_youngest_peer{-1, 0};
7154 m_connman.ForEachNode([&](
CNode *pnode) {
7158 if (pnode->
GetId() > youngest_peer.first) {
7159 next_youngest_peer = youngest_peer;
7160 youngest_peer.first = pnode->GetId();
7161 youngest_peer.second = pnode->m_last_block_time;
7165 NodeId to_disconnect = youngest_peer.first;
7166 if (youngest_peer.second > next_youngest_peer.second) {
7169 to_disconnect = next_youngest_peer.first;
7181 CNodeState *node_state = State(pnode->
GetId());
7182 if (node_state ==
nullptr ||
7184 node_state->nBlocksInFlight == 0)) {
7187 "disconnecting extra block-relay-only peer=%d "
7188 "(last block received at time %d)\n",
7195 "keeping block-relay-only peer=%d chosen for eviction "
7196 "(connect time: %d, blocks_in_flight: %d)\n",
7198 node_state->nBlocksInFlight);
7205 if (m_connman.GetExtraFullOutboundCount() <= 0) {
7214 int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
7225 CNodeState *state = State(pnode->
GetId());
7226 if (state ==
nullptr) {
7231 if (state->m_chain_sync.m_protect) {
7234 if (state->m_last_block_announcement < oldest_block_announcement ||
7235 (state->m_last_block_announcement == oldest_block_announcement &&
7236 pnode->
GetId() > worst_peer)) {
7237 worst_peer = pnode->
GetId();
7238 oldest_block_announcement = state->m_last_block_announcement;
7242 if (worst_peer == -1) {
7246 bool disconnected = m_connman.ForNode(
7254 CNodeState &state = *State(pnode->
GetId());
7256 state.nBlocksInFlight == 0) {
7258 "disconnecting extra outbound peer=%d (last block "
7259 "announcement received at time %d)\n",
7260 pnode->
GetId(), oldest_block_announcement);
7265 "keeping outbound peer=%d chosen for eviction "
7266 "(connect time: %d, blocks_in_flight: %d)\n",
7268 state.nBlocksInFlight);
7279 m_connman.SetTryNewOutboundPeer(
false);
7283 void PeerManagerImpl::CheckForStaleTipAndEvictPeers() {
7286 auto now{GetTime<std::chrono::seconds>()};
7288 EvictExtraOutboundPeers(now);
7290 if (now > m_stale_tip_check_time) {
7293 if (!m_chainman.m_blockman.LoadingBlocks() &&
7294 m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() &&
7296 LogPrintf(
"Potential stale tip detected, will try using extra "
7297 "outbound peer (last tip update: %d seconds ago)\n",
7299 m_connman.SetTryNewOutboundPeer(
true);
7300 }
else if (m_connman.GetTryNewOutboundPeer()) {
7301 m_connman.SetTryNewOutboundPeer(
false);
7306 if (!m_initial_sync_finished && CanDirectFetch()) {
7307 m_connman.StartExtraBlockRelayPeers();
7308 m_initial_sync_finished =
true;
7312 void PeerManagerImpl::MaybeSendPing(
CNode &node_to, Peer &peer,
7313 std::chrono::microseconds now) {
7314 if (m_connman.ShouldRunInactivityChecks(
7315 node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
7316 peer.m_ping_nonce_sent &&
7328 bool pingSend =
false;
7330 if (peer.m_ping_queued) {
7335 if (peer.m_ping_nonce_sent == 0 &&
7344 nonce = GetRand<uint64_t>();
7345 }
while (nonce == 0);
7346 peer.m_ping_queued =
false;
7347 peer.m_ping_start = now;
7349 peer.m_ping_nonce_sent = nonce;
7350 m_connman.PushMessage(&node_to,
7355 peer.m_ping_nonce_sent = 0;
7361 void PeerManagerImpl::MaybeSendAddr(
CNode &
node, Peer &peer,
7362 std::chrono::microseconds current_time) {
7364 if (!peer.m_addr_relay_enabled) {
7368 LOCK(peer.m_addr_send_times_mutex);
7369 if (
fListen && !m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
7370 peer.m_next_local_addr_send < current_time) {
7377 if (peer.m_next_local_addr_send != 0us) {
7378 peer.m_addr_known->reset();
7381 CAddress local_addr{*local_service, peer.m_our_services,
7384 PushAddress(peer, local_addr, insecure_rand);
7391 if (current_time <= peer.m_next_addr_send) {
7395 peer.m_next_addr_send =
7399 if (!
Assume(peer.m_addrs_to_send.size() <= max_addr_to_send)) {
7402 peer.m_addrs_to_send.resize(max_addr_to_send);
7407 auto addr_already_known =
7410 bool ret = peer.m_addr_known->contains(addr.
GetKey());
7412 peer.m_addr_known->insert(addr.
GetKey());
7416 peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(),
7417 peer.m_addrs_to_send.end(),
7418 addr_already_known),
7419 peer.m_addrs_to_send.end());
7422 if (peer.m_addrs_to_send.empty()) {
7426 const char *msg_type;
7428 if (peer.m_wants_addrv2) {
7435 m_connman.PushMessage(
7437 .
Make(make_flags, msg_type, peer.m_addrs_to_send));
7438 peer.m_addrs_to_send.clear();
7441 if (peer.m_addrs_to_send.capacity() > 40) {
7442 peer.m_addrs_to_send.shrink_to_fit();
7446 void PeerManagerImpl::MaybeSendSendHeaders(
CNode &
node, Peer &peer) {
7451 if (!peer.m_sent_sendheaders &&
7454 CNodeState &state = *State(
node.GetId());
7455 if (state.pindexBestKnownBlock !=
nullptr &&
7456 state.pindexBestKnownBlock->nChainWork >
7457 m_chainman.MinimumChainWork()) {
7464 peer.m_sent_sendheaders =
true;
7469 void PeerManagerImpl::MaybeSendFeefilter(
7470 CNode &pto, Peer &peer, std::chrono::microseconds current_time) {
7471 if (m_ignore_incoming_txs) {
7488 Amount currentFilter = m_mempool.GetMinFee().GetFeePerK();
7492 if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
7498 if (peer.m_fee_filter_sent == MAX_FILTER) {
7501 peer.m_next_send_feefilter = 0us;
7504 if (current_time > peer.m_next_send_feefilter) {
7505 Amount filterToSend = g_filter_rounder.round(currentFilter);
7508 std::max(filterToSend, m_mempool.m_min_relay_feerate.GetFeePerK());
7509 if (filterToSend != peer.m_fee_filter_sent) {
7510 m_connman.PushMessage(
7513 peer.m_fee_filter_sent = filterToSend;
7515 peer.m_next_send_feefilter =
7522 peer.m_next_send_feefilter &&
7523 (currentFilter < 3 * peer.m_fee_filter_sent / 4 ||
7524 currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
7525 peer.m_next_send_feefilter =
7526 current_time + GetRandomDuration<std::chrono::microseconds>(
7532 class CompareInvMempoolOrder {
7536 explicit CompareInvMempoolOrder(
CTxMemPool *_mempool) : mp(_mempool) {}
7538 bool operator()(std::set<TxId>::iterator a, std::set<TxId>::iterator b) {
7548 bool PeerManagerImpl::SetupAddressRelay(
const CNode &
node, Peer &peer) {
7552 if (
node.IsBlockOnlyConn()) {
7556 if (!peer.m_addr_relay_enabled.exchange(
true)) {
7559 peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
7565 bool PeerManagerImpl::SendMessages(
const Config &config,
CNode *pto) {
7568 PeerRef peer = GetPeerRef(pto->
GetId());
7577 if (MaybeDiscourageAndDisconnect(*pto, *peer)) {
7590 const auto current_time{GetTime<std::chrono::microseconds>()};
7595 "addrfetch connection timeout; disconnecting peer=%d\n",
7601 MaybeSendPing(*pto, *peer, current_time);
7608 bool sync_blocks_and_headers_from_peer =
false;
7610 MaybeSendAddr(*pto, *peer, current_time);
7612 MaybeSendSendHeaders(*pto, *peer);
7617 CNodeState &state = *State(pto->
GetId());
7620 if (m_chainman.m_best_header ==
nullptr) {
7621 m_chainman.m_best_header = m_chainman.ActiveChain().Tip();
7627 if (state.fPreferredDownload) {
7628 sync_blocks_and_headers_from_peer =
true;
7639 if (m_num_preferred_download_peers == 0 ||
7640 mapBlocksInFlight.empty()) {
7641 sync_blocks_and_headers_from_peer =
true;
7645 if (!state.fSyncStarted && CanServeBlocks(*peer) &&
7646 !m_chainman.m_blockman.LoadingBlocks()) {
7649 if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) ||
7651 const CBlockIndex *pindexStart = m_chainman.m_best_header;
7660 if (pindexStart->
pprev) {
7661 pindexStart = pindexStart->
pprev;
7663 if (MaybeSendGetHeaders(*pto,
GetLocator(pindexStart), *peer)) {
7666 "initial getheaders (%d) to peer=%d (startheight:%d)\n",
7668 peer->m_starting_height);
7670 state.fSyncStarted =
true;
7671 peer->m_headers_sync_timeout =
7676 std::chrono::microseconds{
7678 Ticks<std::chrono::seconds>(
7680 m_chainman.m_best_header->Time()) /
7697 LOCK(peer->m_block_inv_mutex);
7698 std::vector<CBlock> vHeaders;
7700 ((!peer->m_prefers_headers &&
7701 (!state.m_requested_hb_cmpctblocks ||
7702 peer->m_blocks_for_headers_relay.size() > 1)) ||
7703 peer->m_blocks_for_headers_relay.size() >
7708 ProcessBlockAvailability(pto->
GetId());
7710 if (!fRevertToInv) {
7711 bool fFoundStartingHeader =
false;
7715 for (
const BlockHash &hash : peer->m_blocks_for_headers_relay) {
7717 m_chainman.m_blockman.LookupBlockIndex(hash);
7719 if (m_chainman.ActiveChain()[pindex->
nHeight] != pindex) {
7721 fRevertToInv =
true;
7724 if (pBestIndex !=
nullptr && pindex->
pprev != pBestIndex) {
7735 fRevertToInv =
true;
7738 pBestIndex = pindex;
7739 if (fFoundStartingHeader) {
7742 }
else if (PeerHasHeader(&state, pindex)) {
7745 }
else if (pindex->
pprev ==
nullptr ||
7746 PeerHasHeader(&state, pindex->
pprev)) {
7749 fFoundStartingHeader =
true;
7754 fRevertToInv =
true;
7759 if (!fRevertToInv && !vHeaders.empty()) {
7760 if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
7765 "%s sending header-and-ids %s to peer=%d\n",
7766 __func__, vHeaders.front().GetHash().ToString(),
7769 std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
7771 LOCK(m_most_recent_block_mutex);
7772 if (m_most_recent_block_hash ==
7774 cached_cmpctblock_msg =
7776 *m_most_recent_compact_block);
7779 if (cached_cmpctblock_msg.has_value()) {
7780 m_connman.PushMessage(
7781 pto, std::move(cached_cmpctblock_msg.value()));
7788 m_connman.PushMessage(
7792 state.pindexBestHeaderSent = pBestIndex;
7793 }
else if (peer->m_prefers_headers) {
7794 if (vHeaders.size() > 1) {
7796 "%s: %u headers, range (%s, %s), to peer=%d\n",
7797 __func__, vHeaders.size(),
7798 vHeaders.front().GetHash().ToString(),
7799 vHeaders.back().GetHash().ToString(),
7803 "%s: sending header %s to peer=%d\n", __func__,
7804 vHeaders.front().GetHash().ToString(),
7807 m_connman.PushMessage(
7809 state.pindexBestHeaderSent = pBestIndex;
7811 fRevertToInv =
true;
7818 if (!peer->m_blocks_for_headers_relay.empty()) {
7820 peer->m_blocks_for_headers_relay.back();
7822 m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce);
7828 if (m_chainman.ActiveChain()[pindex->
nHeight] != pindex) {
7831 "Announcing block %s not on main chain (tip=%s)\n",
7833 m_chainman.ActiveChain()
7840 if (!PeerHasHeader(&state, pindex)) {
7841 peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
7843 "%s: sending inv peer=%d hash=%s\n", __func__,
7848 peer->m_blocks_for_headers_relay.clear();
7855 std::vector<CInv> vInv;
7856 auto addInvAndMaybeFlush = [&](uint32_t type,
const uint256 &hash) {
7857 vInv.emplace_back(type, hash);
7859 m_connman.PushMessage(
7869 LOCK(peer->m_block_inv_mutex);
7871 vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(),
7877 for (
const BlockHash &hash : peer->m_blocks_for_inv_relay) {
7880 peer->m_blocks_for_inv_relay.clear();
7883 auto computeNextInvSendTime =
7884 [&](std::chrono::microseconds &next) ->
bool {
7887 if (next < current_time) {
7888 fSendTrickle =
true;
7890 next = NextInvToInbounds(
7895 next = current_time;
7899 return fSendTrickle;
7903 if (peer->m_proof_relay !=
nullptr) {
7904 LOCK(peer->m_proof_relay->m_proof_inventory_mutex);
7906 if (computeNextInvSendTime(
7907 peer->m_proof_relay->m_next_inv_send_time)) {
7909 peer->m_proof_relay->m_proof_inventory_to_send.begin();
7911 peer->m_proof_relay->m_proof_inventory_to_send.end()) {
7914 it = peer->m_proof_relay->m_proof_inventory_to_send.erase(
7917 if (peer->m_proof_relay->m_proof_inventory_known_filter
7918 .contains(proofid)) {
7922 peer->m_proof_relay->m_proof_inventory_known_filter.insert(
7925 peer->m_proof_relay->m_recently_announced_proofs.insert(
7931 if (
auto tx_relay = peer->GetTxRelay()) {
7932 LOCK(tx_relay->m_tx_inventory_mutex);
7934 const bool fSendTrickle =
7935 computeNextInvSendTime(tx_relay->m_next_inv_send_time);
7940 LOCK(tx_relay->m_bloom_filter_mutex);
7941 if (!tx_relay->m_relay_txs) {
7942 tx_relay->m_tx_inventory_to_send.clear();
7947 if (fSendTrickle && tx_relay->m_send_mempool) {
7948 auto vtxinfo = m_mempool.infoAll();
7949 tx_relay->m_send_mempool =
false;
7951 tx_relay->m_fee_filter_received.load()};
7953 LOCK(tx_relay->m_bloom_filter_mutex);
7955 for (
const auto &txinfo : vtxinfo) {
7956 const TxId &txid = txinfo.tx->GetId();
7957 tx_relay->m_tx_inventory_to_send.erase(txid);
7960 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
7963 if (tx_relay->m_bloom_filter &&
7964 !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
7968 tx_relay->m_tx_inventory_known_filter.insert(txid);
7971 addInvAndMaybeFlush(
MSG_TX, txid);
7973 tx_relay->m_last_mempool_req =
7974 std::chrono::duration_cast<std::chrono::seconds>(
7981 std::vector<std::set<TxId>::iterator> vInvTx;
7982 vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
7983 for (std::set<TxId>::iterator it =
7984 tx_relay->m_tx_inventory_to_send.begin();
7985 it != tx_relay->m_tx_inventory_to_send.end(); it++) {
7986 vInvTx.push_back(it);
7989 tx_relay->m_fee_filter_received.load()};
7994 CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool);
7995 std::make_heap(vInvTx.begin(), vInvTx.end(),
7996 compareInvMempoolOrder);
8000 unsigned int nRelayedTransactions = 0;
8001 LOCK(tx_relay->m_bloom_filter_mutex);
8002 while (!vInvTx.empty() &&
8007 std::pop_heap(vInvTx.begin(), vInvTx.end(),
8008 compareInvMempoolOrder);
8009 std::set<TxId>::iterator it = vInvTx.back();
8011 const TxId txid = *it;
8013 tx_relay->m_tx_inventory_to_send.erase(it);
8015 if (tx_relay->m_tx_inventory_known_filter.contains(txid)) {
8019 auto txinfo = m_mempool.info(txid);
8025 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
8028 if (tx_relay->m_bloom_filter &&
8029 !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
8034 tx_relay->m_recently_announced_invs.insert(txid);
8035 addInvAndMaybeFlush(
MSG_TX, txid);
8036 nRelayedTransactions++;
8039 while (!g_relay_expiration.empty() &&
8040 g_relay_expiration.front().first <
8042 mapRelay.erase(g_relay_expiration.front().second);
8043 g_relay_expiration.pop_front();
8046 auto ret = mapRelay.insert(
8047 std::make_pair(txid, std::move(txinfo.tx)));
8049 g_relay_expiration.push_back(std::make_pair(
8053 tx_relay->m_tx_inventory_known_filter.insert(txid);
8059 if (!vInv.empty()) {
8066 CNodeState &state = *State(pto->
GetId());
8069 auto stalling_timeout = m_block_stalling_timeout.load();
8070 if (state.m_stalling_since.count() &&
8071 state.m_stalling_since < current_time - stalling_timeout) {
8076 LogPrintf(
"Peer=%d is stalling block download, disconnecting\n",
8081 const auto new_timeout =
8083 if (stalling_timeout != new_timeout &&
8084 m_block_stalling_timeout.compare_exchange_strong(
8085 stalling_timeout, new_timeout)) {
8088 "Increased stalling timeout temporarily to %d seconds\n",
8100 if (state.vBlocksInFlight.size() > 0) {
8101 QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
8102 int nOtherPeersWithValidatedDownloads =
8103 m_peers_downloading_from - 1;
8105 state.m_downloading_since +
8106 std::chrono::seconds{consensusParams.nPowTargetSpacing} *
8109 nOtherPeersWithValidatedDownloads)) {
8110 LogPrintf(
"Timeout downloading block %s from peer=%d, "
8112 queuedBlock.pindex->GetBlockHash().ToString(),
8120 if (state.fSyncStarted &&
8121 peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
8124 if (current_time > peer->m_headers_sync_timeout &&
8125 nSyncStarted == 1 &&
8126 (m_num_preferred_download_peers -
8127 state.fPreferredDownload >=
8136 LogPrintf(
"Timeout downloading headers from peer=%d, "
8142 LogPrintf(
"Timeout downloading headers from noban "
8143 "peer=%d, not disconnecting\n",
8149 state.fSyncStarted =
false;
8151 peer->m_headers_sync_timeout = 0us;
8157 peer->m_headers_sync_timeout = std::chrono::microseconds::max();
8163 ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
8166 std::vector<CInv> vGetData;
8174 CNodeState &state = *State(pto->
GetId());
8176 if (CanServeBlocks(*peer) &&
8177 ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) ||
8178 !m_chainman.ActiveChainstate().IsInitialBlockDownload()) &&
8180 std::vector<const CBlockIndex *> vToDownload;
8182 FindNextBlocksToDownload(pto->
GetId(),
8184 state.nBlocksInFlight,
8185 vToDownload, staller);
8188 BlockRequested(config, pto->
GetId(), *pindex);
8193 if (state.nBlocksInFlight == 0 && staller != -1) {
8194 if (State(staller)->m_stalling_since == 0us) {
8195 State(staller)->m_stalling_since = current_time;
8202 auto addGetDataAndMaybeFlush = [&](uint32_t type,
const uint256 &hash) {
8203 CInv inv(type, hash);
8206 vGetData.push_back(std::move(inv));
8208 m_connman.PushMessage(
8218 LOCK(cs_proofrequest);
8219 std::vector<std::pair<NodeId, avalanche::ProofId>> expired;
8221 m_proofrequest.GetRequestable(pto->
GetId(), current_time, &expired);
8222 for (
const auto &entry : expired) {
8224 "timeout of inflight proof %s from peer=%d\n",
8225 entry.second.ToString(), entry.first);
8227 for (
const auto &proofid : requestable) {
8228 if (!AlreadyHaveProof(proofid)) {
8230 m_proofrequest.RequestedData(
8231 pto->
GetId(), proofid,
8238 m_proofrequest.ForgetInvId(proofid);
8248 std::vector<std::pair<NodeId, TxId>> expired;
8250 m_txrequest.GetRequestable(pto->
GetId(), current_time, &expired);
8251 for (
const auto &entry : expired) {
8253 entry.second.ToString(), entry.first);
8255 for (
const TxId &txid : requestable) {
8256 if (!AlreadyHaveTx(txid)) {
8257 addGetDataAndMaybeFlush(
MSG_TX, txid);
8258 m_txrequest.RequestedData(
8265 m_txrequest.ForgetInvId(txid);
8269 if (!vGetData.empty()) {
8270 m_connman.PushMessage(pto,
8275 MaybeSendFeefilter(*pto, *peer, current_time);
8279 bool PeerManagerImpl::ReceivedAvalancheProof(
CNode &
node, Peer &peer,
8281 assert(proof !=
nullptr);
8285 AddKnownProof(peer, proofid);
8287 if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
8297 return node.m_avalanche_pubkey.has_value());
8298 auto saveProofIfStaker = [isStaker](
const CNode &
node,
8300 const NodeId nodeid) ->
bool {
8312 LOCK(cs_proofrequest);
8313 m_proofrequest.ReceivedResponse(nodeid, proofid);
8315 if (AlreadyHaveProof(proofid)) {
8316 m_proofrequest.ForgetInvId(proofid);
8317 saveProofIfStaker(
node, proofid, nodeid);
8327 return pm.registerProof(proof, state);
8329 WITH_LOCK(cs_proofrequest, m_proofrequest.ForgetInvId(proofid));
8330 RelayProof(proofid);
8332 node.m_last_proof_time = GetTime<std::chrono::seconds>();
8335 nodeid, proofid.ToString());
8353 "Not polling the avalanche proof (%s): peer=%d, proofid %s\n",
8354 state.
IsValid() ?
"not-worth-polling"
8356 nodeid, proofid.ToString());
8359 saveProofIfStaker(
node, proofid, nodeid);
bool MoneyRange(const Amount nValue)
static constexpr Amount MAX_MONEY
No amount larger than this (in satoshi) is valid.
bool isAvalancheEnabled(const ArgsManager &argsman)
static constexpr size_t AVALANCHE_DEFAULT_PEER_REPLACEMENT_COOLDOWN
Peer replacement cooldown time default value in seconds.
static constexpr bool DEFAULT_AVALANCHE_PRECONSENSUS
Default for -avalanchepreconsensus.
std::unique_ptr< avalanche::Processor > g_avalanche
Global avalanche instance.
static constexpr size_t AVALANCHE_DEFAULT_COOLDOWN
Avalanche default cooldown in milliseconds.
enum ReadStatus_t ReadStatus
const std::string & BlockFilterTypeName(BlockFilterType filter_type)
Get the human-readable name for a filter type.
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int CFCHECKPT_INTERVAL
Interval between compact filter checkpoints.
@ CHAIN
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends,...
@ TRANSACTIONS
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid,...
@ SCRIPTS
Scripts & signatures ok.
@ TREE
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
arith_uint256 GetBlockProof(const CBlockIndex &block)
CBlockLocator GetLocator(const CBlockIndex *index)
Get a locator for a block index entry.
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params ¶ms)
Return the time it would take to redo the work difference between from and to, assuming the current h...
const CChainParams & Params()
Return the currently selected parameters.
#define Assert(val)
Identity function.
#define Assume(val)
Assume is the identity function.
Stochastic address manager.
int64_t GetIntArg(const std::string &strArg, int64_t nDefault) const
Return integer argument or default value.
bool GetBoolArg(const std::string &strArg, bool fDefault) const
Return boolean argument or default value.
BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of bloc...
bool LookupFilterRange(int start_height, const CBlockIndex *stop_index, std::vector< BlockFilter > &filters_out) const
Get a range of filters between two heights on a chain.
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector< uint256 > &hashes_out) const
Get a range of filter hashes between two heights on a chain.
bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out) EXCLUSIVE_LOCKS_REQUIRED(!m_cs_headers_cache)
Get a single filter header by block.
std::vector< CTransactionRef > txn
std::vector< uint32_t > indices
A CService with information about it as peer.
ServiceFlags nServices
Serialized as uint64_t in V1, and as CompactSize in V2.
NodeSeconds nTime
Always included in serialization, except in the network format on INIT_PROTO_VERSION.
size_t BlockTxCount() const
std::vector< CTransactionRef > vtx
The block chain is a tree shaped structure starting with the genesis block at the root,...
bool IsValid(enum BlockValidity nUpTo=BlockValidity::TRANSACTIONS) const EXCLUSIVE_LOCKS_REQUIRED(
Check whether this block index entry is valid up to the passed validity level.
CBlockIndex * pprev
pointer to the index of the predecessor of this block
CBlockHeader GetBlockHeader() const
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
bool HaveTxsDownloaded() const
Check whether this block's and all previous blocks' transactions have been downloaded (and stored to ...
int64_t GetBlockTime() const
unsigned int nTx
Number of transactions in this block.
CBlockIndex * GetAncestor(int height)
Efficiently find an ancestor of this block.
BlockHash GetBlockHash() const
int nHeight
height of the entry in the chain. The genesis block has height 0
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
bool IsWithinSizeConstraints() const
True if the size is <= MAX_BLOOM_FILTER_SIZE and the number of hash functions is <= MAX_HASH_FUNCS (c...
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system.
Double ended buffer combining vector and stream-like interfaces.
Fee rate in satoshis per kilobyte: Amount / kB.
Reads data from an underlying stream, while hashing the read data.
A writer stream (for serialization) that computes a 256-bit hash.
Inv(ventory) message data.
bool IsMsgCmpctBlk() const
std::string ToString() const
bool IsMsgFilteredBlk() const
Used to create a Merkle proof (usually from a subset of transactions), which consists of a block head...
std::vector< std::pair< size_t, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
bool IsRelayable() const
Whether this address should be relayed to other peers even if we can't reach it ourselves.
void SetIP(const CNetAddr &ip)
bool IsAddrV1Compatible() const
Check if the current object can be serialized in pre-ADDRv2/BIP155 format.
Transport protocol agnostic message container.
CSerializedNetMsg Make(int nFlags, std::string msg_type, Args &&...args) const
Information about a peer.
RecursiveMutex cs_vProcessMsg
Mutex cs_avalanche_pubkey
bool IsFeelerConn() const
const std::chrono::seconds m_connected
Unix epoch time at peer connection.
bool ExpectServicesFromConn() const
std::atomic< int > nVersion
std::atomic_bool m_has_all_wanted_services
Whether this peer provides all services that we want.
bool IsInboundConn() const
bool HasPermission(NetPermissionFlags permission) const
std::atomic_bool fPauseRecv
bool IsOutboundOrBlockRelayConn() const
bool IsManualConn() const
std::atomic< int64_t > nTimeOffset
const std::string m_addr_name
std::string ConnectionTypeAsString() const
void SetCommonVersion(int greatest_common_version)
std::atomic< bool > m_bip152_highbandwidth_to
std::atomic_bool m_relays_txs
Whether we should relay transactions to this peer (their version message did not include fRelay=false...
std::atomic< bool > m_bip152_highbandwidth_from
void PongReceived(std::chrono::microseconds ping_time)
A ping-pong round trip has completed successfully.
std::atomic_bool fSuccessfullyConnected
bool IsAddrFetchConn() const
uint64_t GetLocalNonce() const
void SetAddrLocal(const CService &addrLocalIn) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_local_mutex)
May not be called more than once.
bool IsBlockOnlyConn() const
int GetCommonVersion() const
bool IsFullOutboundConn() const
uint64_t nRemoteHostNonce
Mutex m_subver_mutex
cleanSubVer is a sanitized string of the user agent byte array we read from the wire.
std::atomic_bool fPauseSend
std::chrono::seconds m_nextGetAvaAddr
uint64_t nRemoteExtraEntropy
uint64_t GetLocalExtraEntropy() const
SteadyMilliseconds m_last_poll
double getAvailabilityScore() const
std::atomic_bool m_bloom_filter_loaded
Whether this peer has loaded a bloom filter.
void updateAvailabilityScore(double decayFactor)
The availability score is calculated using an exponentially weighted average.
std::atomic< std::chrono::seconds > m_avalanche_last_message_fault
const bool m_inbound_onion
Whether this peer is an inbound onion, i.e.
std::atomic< int > m_avalanche_message_fault_counter
std::atomic< bool > m_avalanche_enabled
std::atomic< std::chrono::seconds > m_last_block_time
UNIX epoch time of the last block received from this peer that we had not yet seen (e....
std::atomic_bool fDisconnect
std::atomic< std::chrono::seconds > m_last_tx_time
UNIX epoch time of the last transaction received from this peer that we had not yet seen (e....
void invsVoted(uint32_t count)
The node voted for count invs.
bool IsAvalancheOutboundConnection() const
const TxId & GetTxId() const
An encapsulated public key.
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set.
Simple class for background tasks that should be run periodically or once "after a while".
void scheduleEvery(Predicate p, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Repeat p until it return false.
void scheduleFromNow(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Call f once after the delta has passed.
A combination of a network address (CNetAddr) and a (TCP) port.
std::string ToString() const
std::vector< uint8_t > GetKey() const
uint64_t Finalize() const
Compute the 64-bit SipHash-2-4 of the data written so far.
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data.
The basic transaction that is broadcasted on the network and contained in blocks.
const TxHash GetHash() const
const std::vector< CTxIn > vin
An input of a transaction.
std::set< std::reference_wrapper< const CTxMemPoolEntryRef >, CompareIteratorById > Parents
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
bool CompareTopologically(const TxId &txida, const TxId &txidb) const
virtual void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr< const CBlock > &block)
Notifies listeners that a block which builds directly on our current tip has been received and connec...
virtual void BlockChecked(const CBlock &, const BlockValidationState &)
Notifies listeners of a block validation result.
virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
Notifies listeners when the block chain tip advances.
virtual void BlockConnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being connected.
virtual void BlockDisconnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being disconnected.
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
virtual uint64_t GetMaxBlockSize() const =0
uint64_t randrange(uint64_t range) noexcept
Generate a random integer in the range [0..range).
size_t Count(NodeId peer) const
Count how many announcements a peer has (REQUESTED, CANDIDATE, and COMPLETED combined).
size_t CountInFlight(NodeId peer) const
Count how many REQUESTED announcements a peer has.
Interface for message handling.
static Mutex g_msgproc_mutex
Mutex for anything that is only accessed via the msg processing thread.
virtual bool ProcessMessages(const Config &config, CNode *pnode, std::atomic< bool > &interrupt) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process protocol messages received from a given node.
virtual bool SendMessages(const Config &config, CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Send queued protocol messages to a given node.
virtual void InitializeNode(const Config &config, CNode &node, ServiceFlags our_services)=0
Initialize a peer (setup state, queue any initial messages)
virtual void FinalizeNode(const Config &config, const CNode &node)=0
Handle removal of a peer (clear state)
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< std::pair< TxHash, CTransactionRef >> &extra_txn)
bool IsTxAvailable(size_t index) const
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
static std::unique_ptr< PeerManager > make(CConnman &connman, AddrMan &addrman, BanMan *banman, ChainstateManager &chainman, CTxMemPool &pool, bool ignore_incoming_txs)
virtual void SendPings()=0
Send ping message to all peers.
virtual void ProcessMessage(const Config &config, CNode &pfrom, const std::string &msg_type, CDataStream &vRecv, const std::chrono::microseconds time_received, const std::atomic< bool > &interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process a single message from a peer.
virtual void StartScheduledTasks(CScheduler &scheduler)=0
Begin running background tasks, should only be called once.
virtual bool IgnoresIncomingTxs()=0
Whether this node ignores txs received over p2p.
virtual void UnitTestMisbehaving(const NodeId peer_id, const int howmuch)=0
Public for unit testing.
virtual std::optional< std::string > FetchBlock(const Config &config, NodeId peer_id, const CBlockIndex &block_index)=0
Attempt to manually fetch block from a given peer.
virtual bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) const =0
Get statistics from node state.
virtual void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)=0
This function is used for testing the stale tip eviction logic, see denialofservice_tests....
virtual void CheckForStaleTipAndEvictPeers()=0
Evict extra outbound peers.
static RCUPtr make(Args &&...args)
Construct a new object that is owned by the pointer.
A class to track orphan transactions (failed on TX_MISSING_INPUTS) Since we cannot distinguish orphan...
bool HaveTx(const TxId &txid) const LOCKS_EXCLUDED(g_cs_orphans)
Check if we already have an orphan transaction.
std::pair< CTransactionRef, NodeId > GetTx(const TxId &txid) const EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
Get an orphan transaction and its originating peer (Transaction ref will be nullptr if not found)
size_t Size() LOCKS_EXCLUDED(
Return how many entries exist in the orphange.
void EraseForBlock(const CBlock &block) LOCKS_EXCLUDED(g_cs_orphans)
Erase all orphans included in or invalidated by a new block.
void EraseForPeer(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
Erase all orphans announced by a peer (eg, after that peer disconnects)
bool AddTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
Add a new orphan transaction.
unsigned int LimitOrphans(unsigned int max_orphans) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
Limit the orphanage to the given maximum.
void AddChildrenToWorkSet(const CTransaction &tx, std::set< TxId > &orphan_work_set) const EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
Add any orphans that list a particular tx as a parent into a peer's work set (ie orphans that may hav...
int EraseTx(const TxId &txid) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
Erase an orphan by txid.
std::string GetRejectReason() const
std::string ToString() const
256-bit unsigned big integer.
const std::vector< uint64_t > & getShortIDs() const
uint64_t getShortID(const ProofId &proofid) const
const std::vector< PrefilledProof > & getPrefilledProofs() const
ProofId getProofId() const
bool verify(DelegationState &state, CPubKey &auth) const
const LimitedProofId & getLimitedProofId() const
const DelegationId & getId() const
bool shouldRequestMoreNodes()
Returns true if we encountered a lack of node since the last call.
bool exists(const ProofId &proofid) const
bool forPeer(const ProofId &proofid, Callable &&func) const
bool addNode(NodeId nodeid, const ProofId &proofid)
Node API.
void removeUnbroadcastProof(const ProofId &proofid)
bool isBoundToPeer(const ProofId &proofid) const
bool saveRemoteProof(const ProofId &proofid, const NodeId nodeid, const bool present)
void forEachPeer(Callable &&func) const
void setInvalid(const ProofId &proofid)
bool isInvalid(const ProofId &proofid) const
bool isImmature(const ProofId &proofid) const
const ProofRadixTree & getShareableProofsSnapshot() const
void updateAvailabilityScores(const double decayFactor, Callable &&getNodeAvailabilityScore)
auto getUnbroadcastProofs() const
bool isInConflictingPool(const ProofId &proofid) const
std::vector< uint32_t > indices
std::string ToString() const
std::string GetHex() const
static const uint256 ZERO
@ BLOCK_CHECKPOINT
the block failed to meet one of our checkpoints
@ BLOCK_HEADER_LOW_WORK
the block header may be on a too-little-work chain
@ BLOCK_INVALID_HEADER
invalid proof of work or time too old
@ BLOCK_CACHED_INVALID
this block was cached as being invalid and we didn't store the reason why
@ BLOCK_CONSENSUS
invalid by consensus rules (excluding any below reasons)
@ BLOCK_MISSING_PREV
We don't have the previous block the checked one is built on.
@ BLOCK_INVALID_PREV
A block this one builds on is invalid.
@ BLOCK_MUTATED
the block's data didn't match the data committed to by the PoW
@ BLOCK_TIME_FUTURE
block timestamp was > 2 hours in the future (or our clock is bad)
@ BLOCK_RESULT_UNSET
initial value. Block has not yet been rejected
@ TX_MISSING_INPUTS
transaction was missing some of its inputs
@ TX_CHILD_BEFORE_PARENT
This tx outputs are already spent in the mempool.
@ TX_MEMPOOL_POLICY
violated mempool's fee/size/descendant/etc limits
@ TX_PREMATURE_SPEND
transaction spends a coinbase too early, or violates locktime/sequence locks
@ TX_DUPLICATE
Tx already in mempool or in the chain.
@ TX_INPUTS_NOT_STANDARD
inputs failed policy rules
@ TX_CONFLICT
Tx conflicts with another mempool tx, i.e.
@ TX_NOT_STANDARD
otherwise didn't meet our local policy rules
@ TX_NO_MEMPOOL
this node does not have a mempool so can't validate the transaction
@ TX_RESULT_UNSET
initial value. Tx has not yet been rejected
@ TX_CONSENSUS
invalid by consensus rules
static size_t RecursiveDynamicUsage(const CScript &script)
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
std::array< uint8_t, CPubKey::SCHNORR_SIZE > SchnorrSig
a Schnorr signature
#define LogPrint(category,...)
const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
const char * AVAPROOFSREQ
Request for missing avalanche proofs after an avaproofs message has been processed.
const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
const char * BLOCK
The block message transmits a single serialized block.
const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter.
const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
const char * ADDRV2
The addrv2 message relays connection information for peers on the network just like the addr message,...
const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
const char * AVAPROOFS
The avaproofs message the proof short ids of all the valid proofs that we know.
const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
const char * GETAVAPROOFS
The getavaproofs message requests an avaproofs message that provides the proof short ids of all the v...
const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
const char * GETAVAADDR
The getavaaddr message requests an addr message from the receiving node, containing IP addresses of t...
const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids".
const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
const char * TX
The tx message transmits a single transaction.
const char * AVAHELLO
Contains a delegation and a signature.
const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
const char * AVARESPONSE
Contains an avalanche::Response.
const char * GETDATA
The getdata message requests one or more data objects from another node.
const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
const char * BLOCKTXN
Contains a BlockTransactions.
const char * GETCFHEADERS
getcfheaders requests a compact filter header and the filter hashes for a range of blocks,...
const char * SENDADDRV2
The sendaddrv2 message signals support for receiving ADDRV2 messages (BIP155).
const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected.
const char * AVAPOLL
Contains an avalanche::Poll.
const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
const char * AVAPROOF
Contains an avalanche::Proof.
const char * CFCHECKPT
cfcheckpt is a response to a getcfcheckpt request containing a vector of evenly spaced filter headers...
const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
ShortIdProcessor< PrefilledProof, ShortIdProcessorPrefilledProofAdapter, ProofRefCompare > ProofShortIdProcessor
std::variant< const ProofRef, const CBlockIndex *, const CTransactionRef > AnyVoteItem
RCUPtr< const Proof > ProofRef
bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos, const Consensus::Params ¶ms)
Functions for disk access for blocks.
Implement std::hash so RCUPtr can be used as a key for maps or sets.
bool IsPeerAddrLocalGood(CNode *pnode)
CService GetLocalAddress(const CNetAddr &addrPeer)
std::optional< CService > GetLocalAddrForPeer(CNode &node)
Returns a local address that we should advertise to this peer.
std::function< void(const CAddress &addr, const std::string &msg_type, Span< const uint8_t > data, bool is_incoming)> CaptureMessage
Defaults to CaptureMessageToFile(), but can be overridden by unit tests.
std::string userAgent(const Config &config)
bool IsReachable(enum Network net)
bool SeenLocal(const CService &addr)
vote for a local address
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of the user agent string in version message.
static constexpr std::chrono::minutes TIMEOUT_INTERVAL
Time after which to disconnect, after waiting for a ping response (or inactivity).
@ BypassProofRequestLimits
static constexpr auto HEADERS_RESPONSE_TIME
How long to wait for a peer to respond to a getheaders request.
static constexpr size_t MAX_ADDR_TO_SEND
The maximum number of address records permitted in an ADDR message.
static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET
The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND based inc...
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT
Default time during which a peer must stall block download progress before being disconnected.
static constexpr auto GETAVAADDR_INTERVAL
Minimum time between 2 successives getavaaddr messages from the same peer.
static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL
Verify that INVENTORY_MAX_RECENT_RELAY is enough to cache everything typically relayed before uncondi...
static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB
Maximum number of inventory items to send per transmission.
static constexpr auto EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect.
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch?...
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static constexpr int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay.
static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL
Delay between rotating the peers we relay a particular address to.
static const int MAX_NUM_UNCONNECTING_HEADERS_MSGS
Maximum number of unconnecting headers announcements before DoS score.
static constexpr auto MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict.
static constexpr auto CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork.
static constexpr auto RELAY_TX_CACHE_TIME
How long to cache transactions in mapRelay for normal relay.
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts.
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for.
static constexpr uint64_t CMPCTBLOCKS_VERSION
The compactblocks version we support.
bool IsAvalancheMessageType(const std::string &msg_type)
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/behind headers chain.
static std::chrono::microseconds ComputeRequestTime(const CNode &node, const InvRequestTracker< InvId > &requestTracker, const DataRequestParameters &requestParams, std::chrono::microseconds current_time, bool preferred)
Compute the request time for this announcement, current time plus delays for:
static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for inbound peers.
static constexpr DataRequestParameters TX_REQUEST_PARAMS
static constexpr auto MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
static constexpr uint32_t MAX_GETCFILTERS_SIZE
Maximum number of compact filters that may be requested with one getcfilters.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout.
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in multiples of the block interval (i.e.
static constexpr auto AVALANCHE_AVAPROOFS_TIMEOUT
If no proof was requested from a compact proof message after this timeout expired,...
static constexpr auto STALE_CHECK_INTERVAL
How frequently to check for stale tips.
static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY
The number of most recently announced transactions a peer can request.
static constexpr auto UNCONDITIONAL_RELAY_DELAY
How long a transaction has to be in the mempool before it can unconditionally be relayed (even when n...
static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts.
static const unsigned int MAX_LOCATOR_SZ
The maximum number of entries in a locator.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
static constexpr double MAX_ADDR_RATE_PER_SECOND
The maximum rate of address records we're willing to process on average.
static constexpr auto PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive.
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we're willing to serve as compact blocks to peers when requested.
static constexpr DataRequestParameters PROOF_REQUEST_PARAMS
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
static uint32_t getAvalancheVoteForProof(const avalanche::ProofId &id)
Decide a response for an Avalanche poll about the given proof.
static bool TooManyAnnouncements(const CNode &node, const InvRequestTracker< InvId > &requestTracker, const DataRequestParameters &requestParams)
static constexpr uint32_t MAX_GETCFHEADERS_SIZE
Maximum number of cf hashes that may be requested with one getcfheaders.
static constexpr auto BLOCK_STALLING_TIMEOUT_MAX
Maximum timeout for stalling block download.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
size_t GetMaxAddrToSend()
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
static const unsigned int MAX_HEADERS_RESULTS
Number of headers sent in one getheaders result.
static constexpr size_t MAX_PCT_ADDR_TO_SEND
the maximum percentage of addresses from our addrman to return in response to a getaddr message.
static const unsigned int MAX_INV_SZ
The maximum number of entries in an 'inv' protocol message.
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND
Maximum rate of inventory items to send per second.
static const unsigned int DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN
Default number of orphan+recently-replaced txn to keep around for block reconstruction.
static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS
Default for -maxorphantx, maximum number of orphan transactions kept in memory.
static const int DISCOURAGEMENT_THRESHOLD
Threshold for marking a node to be discouraged, e.g.
static constexpr int ADDRV2_FORMAT
A flag that is ORed into the protocol version to designate that addresses should be serialized in (un...
bool IsProxy(const CNetAddr &addr)
static constexpr Amount DEFAULT_MIN_RELAY_TX_FEE_PER_KB(1000 *SATOSHI)
Default for -minrelaytxfee, minimum relay fee for transactions.
std::shared_ptr< const CTransaction > CTransactionRef
static constexpr size_t AVALANCHE_MAX_ELEMENT_POLL
Maximum item that can be polled at once.
void SetServiceFlagsIBDCache(bool state)
Set the current IBD status in order to figure out the desirable service flags.
ServiceFlags GetDesirableServiceFlags(ServiceFlags services)
Gets the set of service flags which are "desirable" for a given peer.
static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH
Maximum length of incoming protocol messages (Currently 2MB).
static bool HasAllDesirableServiceFlags(ServiceFlags services)
A shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services),...
@ MSG_CMPCT_BLOCK
Defined in BIP152.
ServiceFlags
nServices flags.
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB.
std::chrono::microseconds GetExponentialRand(std::chrono::microseconds now, std::chrono::seconds average_interval)
Return a timestamp in the future sampled from an exponential distribution (https://en....
constexpr auto GetRandMillis
void Shuffle(I first, I last, R &&rng)
More efficient than using std::shuffle on a FastRandomContext.
reverse_range< T > reverse_iterate(T &x)
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
void Unserialize(Stream &, char)=delete
#define LIMITED_STRING(obj, n)
uint64_t ReadCompactSize(Stream &is, bool range_check=true)
Decode a CompactSize-encoded variable-length integer.
constexpr auto MakeUCharSpan(V &&v) -> decltype(UCharSpanCast(Span{std::forward< V >(v)}))
Like the Span constructor, but for (const) uint8_t member types only.
static const double AVALANCHE_STATISTICS_DECAY_FACTOR
Pre-computed decay factor for the avalanche statistics computation.
static constexpr std::chrono::minutes AVALANCHE_STATISTICS_REFRESH_PERIOD
Refresh period for the avalanche statistics computation.
std::string SanitizeString(const std::string &str, int rule)
Remove unsafe chars.
std::string ToString(const T &t)
Locale-independent version of std::to_string.
static constexpr Amount zero() noexcept
A BlockHash is a unqiue identifier for a block.
Describes a place in the block chain to another node such that if the other node doesn't have the sam...
std::vector< BlockHash > vHave
std::chrono::microseconds m_ping_wait
Amount m_fee_filter_received
std::vector< int > vHeightInFlight
bool m_addr_relay_enabled
uint64_t m_addr_rate_limited
uint64_t m_addr_processed
ServiceFlags their_services
std::vector< uint8_t > data
Parameters that influence chain consensus.
int64_t nPowTargetSpacing
const std::chrono::seconds overloaded_peer_delay
How long to delay requesting data from overloaded peers (see max_peer_request_in_flight).
const size_t max_peer_announcements
Maximum number of inventories to consider for requesting, per peer.
const std::chrono::seconds nonpref_peer_delay
How long to delay requesting data from non-preferred peers.
const NetPermissionFlags bypass_request_limits_permissions
Permission flags a peer requires to bypass the request limits tracking limits and delay penalty.
const std::chrono::microseconds getdata_interval
How long to wait (in microseconds) before a data request from an additional peer.
const size_t max_peer_request_in_flight
Maximum number of in-flight data requests from a peer.
Validation result for a single transaction mempool acceptance.
const ResultType m_result_type
const TxValidationState m_state
@ VALID
Fully validated, valid.
static time_point now() noexcept
Return current system time or mocked time, if set.
std::chrono::time_point< NodeClock > time_point
This is a radix tree storing values identified by a unique key.
A TxId is the identifier of a transaction.
std::chrono::seconds registration_time
const ProofId & getProofId() const
#define AssertLockNotHeld(cs)
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
bool error(const char *fmt, const Args &...args)
#define EXCLUSIVE_LOCKS_REQUIRED(...)
#define LOCKS_EXCLUDED(...)
constexpr int64_t count_microseconds(std::chrono::microseconds t)
constexpr int64_t count_seconds(std::chrono::seconds t)
std::chrono::time_point< NodeClock, std::chrono::seconds > NodeSeconds
double CountSecondsDouble(SecondsDouble t)
Helper to count the seconds in any std::chrono::duration type.
NodeClock::time_point GetAdjustedTime()
void AddTimeData(const CNetAddr &ip, int64_t nOffsetSample)
NodeSeconds AdjustedTime()
#define TRACE6(context, event, a, b, c, d, e, f)
@ AVALANCHE
Removed by avalanche vote.
RecursiveMutex g_cs_orphans
Guards orphan transactions and extra txs for compact blocks.
arith_uint256 CalculateHeadersWork(const std::vector< CBlockHeader > &headers)
Return the sum of the work on a given set of headers.
bool HasValidProofOfWork(const std::vector< CBlockHeader > &headers, const Consensus::Params &consensusParams)
Check with the proof of work on each blockheader matches the value in nBits.
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ActiveChain().Tip() will not be pr...
static const int INIT_PROTO_VERSION
initial proto version, to be increased after version/verack negotiation
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version
static const int PROTOCOL_VERSION
network protocol versioning
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
static const int INVALID_CB_NO_BAN_VERSION
not banning for invalid compact blocks starts with this version
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.